layerscape: add patches-5.4
[openwrt/staging/wigyori.git] / target / linux / layerscape / patches-5.4 / 701-net-0009-dpa-SDK-DPAA-1.x-Ethernet-driver.patch
1 From f7f94b1e7e9c6044a23bab1c5e773f6259f2d3e0 Mon Sep 17 00:00:00 2001
2 From: Madalin Bucur <madalin.bucur@nxp.com>
3 Date: Wed, 10 May 2017 16:39:42 +0300
4 Subject: [PATCH] dpa: SDK DPAA 1.x Ethernet driver
5
6 Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
7 ---
8 drivers/net/ethernet/freescale/sdk_dpaa/Kconfig | 173 ++
9 drivers/net/ethernet/freescale/sdk_dpaa/Makefile | 46 +
10 .../net/ethernet/freescale/sdk_dpaa/dpaa_1588.c | 580 ++++++
11 .../net/ethernet/freescale/sdk_dpaa/dpaa_1588.h | 138 ++
12 .../net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c | 180 ++
13 .../net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h | 43 +
14 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c | 1210 ++++++++++++
15 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h | 697 +++++++
16 .../ethernet/freescale/sdk_dpaa/dpaa_eth_base.c | 263 +++
17 .../ethernet/freescale/sdk_dpaa/dpaa_eth_base.h | 50 +
18 .../ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c | 1991 ++++++++++++++++++++
19 .../ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h | 236 +++
20 .../ethernet/freescale/sdk_dpaa/dpaa_eth_common.c | 1812 ++++++++++++++++++
21 .../ethernet/freescale/sdk_dpaa/dpaa_eth_common.h | 226 +++
22 .../ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c | 381 ++++
23 .../net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c | 1113 +++++++++++
24 .../ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c | 278 +++
25 .../ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h | 144 ++
26 .../net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c | 544 ++++++
27 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ptp.c | 290 +++
28 drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c | 909 +++++++++
29 drivers/net/ethernet/freescale/sdk_dpaa/mac.c | 489 +++++
30 drivers/net/ethernet/freescale/sdk_dpaa/mac.h | 135 ++
31 .../net/ethernet/freescale/sdk_dpaa/offline_port.c | 848 +++++++++
32 .../net/ethernet/freescale/sdk_dpaa/offline_port.h | 59 +
33 25 files changed, 12835 insertions(+)
34 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/Kconfig
35 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/Makefile
36 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.c
37 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.h
38 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c
39 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h
40 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c
41 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h
42 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.c
43 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.h
44 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c
45 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h
46 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.c
47 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.h
48 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c
49 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c
50 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c
51 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h
52 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c
53 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ptp.c
54 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c
55 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/mac.c
56 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/mac.h
57 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/offline_port.c
58 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/offline_port.h
59
60 --- /dev/null
61 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/Kconfig
62 @@ -0,0 +1,173 @@
63 +menuconfig FSL_SDK_DPAA_ETH
64 + tristate "DPAA Ethernet"
65 + depends on (FSL_SOC || ARM64 || ARM) && FSL_SDK_BMAN && FSL_SDK_QMAN && FSL_SDK_FMAN && !FSL_DPAA_ETH
66 + select PHYLIB
67 + help
68 + Data Path Acceleration Architecture Ethernet driver,
69 + supporting the Freescale QorIQ chips.
70 + Depends on Freescale Buffer Manager and Queue Manager
71 + driver and Frame Manager Driver.
72 +
73 +if FSL_SDK_DPAA_ETH
74 +
75 +config FSL_DPAA_HOOKS
76 + bool "DPAA Ethernet driver hooks"
77 +
78 +config FSL_DPAA_CEETM
79 + bool "DPAA CEETM QoS"
80 + depends on NET_SCHED
81 + default n
82 + help
83 + Enable QoS offloading support through the CEETM hardware block.
84 +
85 +config FSL_DPAA_OFFLINE_PORTS
86 + bool "Offline Ports support"
87 + depends on FSL_SDK_DPAA_ETH
88 + default y
89 + help
90 + The Offline Parsing / Host Command ports (short: OH ports, of Offline ports) provide
91 + most of the functionality of the regular, online ports, except they receive their
92 + frames from a core or an accelerator on the SoC, via QMan frame queues,
93 + rather than directly from the network.
94 + Offline ports are configured via PCD (Parse-Classify-Distribute) schemes, just like
95 + any online FMan port. They deliver the processed frames to frame queues, according
96 + to the applied PCD configurations.
97 +
98 + Choosing this feature will not impact the functionality and/or performance of the system,
99 + so it is safe to have it.
100 +
101 +config FSL_DPAA_ADVANCED_DRIVERS
102 + bool "Advanced DPAA Ethernet drivers"
103 + depends on FSL_SDK_DPAA_ETH
104 + default y
105 + help
106 + Besides the standard DPAA Ethernet driver the DPAA Proxy initialization driver
107 + is needed to support advanced scenarios. Select this to also build the advanced
108 + drivers.
109 +
110 +config FSL_DPAA_ETH_JUMBO_FRAME
111 + bool "Optimize for jumbo frames"
112 + default n
113 + help
114 + Optimize the DPAA Ethernet driver throughput for large frames
115 + termination traffic (e.g. 4K and above).
116 + NOTE: This option can only be used if FSL_FM_MAX_FRAME_SIZE
117 + is set to 9600 bytes.
118 + Using this option in combination with small frames increases
119 + significantly the driver's memory footprint and may even deplete
120 + the system memory. Also, the skb truesize is altered and messages
121 + from the stack that warn against this are bypassed.
122 + This option is not available on LS1043.
123 +
124 +config FSL_DPAA_TS
125 + bool "Linux compliant timestamping"
126 + depends on FSL_SDK_DPAA_ETH
127 + default n
128 + help
129 + Enable Linux API compliant timestamping support.
130 +
131 +config FSL_DPAA_1588
132 + bool "IEEE 1588-compliant timestamping"
133 + depends on FSL_SDK_DPAA_ETH
134 + select FSL_DPAA_TS
135 + default n
136 + help
137 + Enable IEEE1588 support code.
138 +
139 +config FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
140 + bool "Use driver's Tx queue selection mechanism"
141 + default y
142 + depends on FSL_SDK_DPAA_ETH
143 + help
144 + The DPAA-Ethernet driver defines a ndo_select_queue() callback for optimal selection
145 + of the egress FQ. That will override the XPS support for this netdevice.
146 + If for whatever reason you want to be in control of the egress FQ-to-CPU selection and mapping,
147 + or simply don't want to use the driver's ndo_select_queue() callback, then unselect this
148 + and use the standard XPS support instead.
149 +
150 +config FSL_DPAA_ETH_MAX_BUF_COUNT
151 + int "Maximum nuber of buffers in private bpool"
152 + depends on FSL_SDK_DPAA_ETH
153 + range 64 2048
154 + default "128"
155 + help
156 + The maximum number of buffers to be by default allocated in the DPAA-Ethernet private port's
157 + buffer pool. One needn't normally modify this, as it has probably been tuned for performance
158 + already. This cannot be lower than DPAA_ETH_REFILL_THRESHOLD.
159 +
160 +config FSL_DPAA_ETH_REFILL_THRESHOLD
161 + int "Private bpool refill threshold"
162 + depends on FSL_SDK_DPAA_ETH
163 + range 32 FSL_DPAA_ETH_MAX_BUF_COUNT
164 + default "80"
165 + help
166 + The DPAA-Ethernet driver will start replenishing buffer pools whose count
167 + falls below this threshold. This must be related to DPAA_ETH_MAX_BUF_COUNT. One needn't normally
168 + modify this value unless one has very specific performance reasons.
169 +
170 +config FSL_DPAA_CS_THRESHOLD_1G
171 + hex "Egress congestion threshold on 1G ports"
172 + depends on FSL_SDK_DPAA_ETH
173 + range 0x1000 0x10000000
174 + default "0x06000000"
175 + help
176 + The size in bytes of the egress Congestion State notification threshold on 1G ports.
177 + The 1G dTSECs can quite easily be flooded by cores doing Tx in a tight loop
178 + (e.g. by sending UDP datagrams at "while(1) speed"),
179 + and the larger the frame size, the more acute the problem.
180 + So we have to find a balance between these factors:
181 + - avoiding the device staying congested for a prolonged time (risking
182 + the netdev watchdog to fire - see also the tx_timeout module param);
183 + - affecting performance of protocols such as TCP, which otherwise
184 + behave well under the congestion notification mechanism;
185 + - preventing the Tx cores from tightly-looping (as if the congestion
186 + threshold was too low to be effective);
187 + - running out of memory if the CS threshold is set too high.
188 +
189 +config FSL_DPAA_CS_THRESHOLD_10G
190 + hex "Egress congestion threshold on 10G ports"
191 + depends on FSL_SDK_DPAA_ETH
192 + range 0x1000 0x20000000
193 + default "0x10000000"
194 + help
195 + The size in bytes of the egress Congestion State notification threshold on 10G ports.
196 +
197 +config FSL_DPAA_INGRESS_CS_THRESHOLD
198 + hex "Ingress congestion threshold on FMan ports"
199 + depends on FSL_SDK_DPAA_ETH
200 + default "0x10000000"
201 + help
202 + The size in bytes of the ingress tail-drop threshold on FMan ports.
203 + Traffic piling up above this value will be rejected by QMan and discarded by FMan.
204 +
205 +config FSL_DPAA_ETH_DEBUGFS
206 + bool "DPAA Ethernet debugfs interface"
207 + depends on DEBUG_FS && FSL_SDK_DPAA_ETH
208 + default y
209 + help
210 + This option compiles debugfs code for the DPAA Ethernet driver.
211 +
212 +config FSL_DPAA_ETH_DEBUG
213 + bool "DPAA Ethernet Debug Support"
214 + depends on FSL_SDK_DPAA_ETH
215 + default n
216 + help
217 + This option compiles debug code for the DPAA Ethernet driver.
218 +
219 +config FSL_DPAA_DBG_LOOP
220 + bool "DPAA Ethernet Debug loopback"
221 + depends on FSL_DPAA_ETH_DEBUGFS && FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
222 + default n
223 + help
224 + This option allows to divert all received traffic on a certain interface A towards a
225 + selected interface B. This option is used to benchmark the HW + Ethernet driver in
226 + isolation from the Linux networking stack. The loops are controlled by debugfs entries,
227 + one for each interface. By default all loops are disabled (target value is -1). I.e. to
228 + change the loop setting for interface 4 and divert all received traffic to interface 5
229 + write Tx interface number in the receive interface debugfs file:
230 + # cat /sys/kernel/debug/powerpc/fsl_dpa/eth4_loop
231 + 4->-1
232 + # echo 5 > /sys/kernel/debug/powerpc/fsl_dpa/eth4_loop
233 + # cat /sys/kernel/debug/powerpc/fsl_dpa/eth4_loop
234 + 4->5
235 +endif # FSL_SDK_DPAA_ETH
236 --- /dev/null
237 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/Makefile
238 @@ -0,0 +1,46 @@
239 +#
240 +# Makefile for the Freescale Ethernet controllers
241 +#
242 +ccflags-y += -DVERSION=\"\"
243 +#
244 +# Include netcomm SW specific definitions
245 +include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
246 +
247 +ccflags-y += -I$(NET_DPA)
248 +
249 +obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_mac.o fsl_dpa.o
250 +obj-$(CONFIG_PTP_1588_CLOCK_DPAA) += dpaa_ptp.o
251 +
252 +fsl_dpa-objs += dpaa_ethtool.o dpaa_eth_sysfs.o dpaa_eth.o dpaa_eth_sg.o dpaa_eth_common.o
253 +ifeq ($(CONFIG_FSL_DPAA_DBG_LOOP),y)
254 +fsl_dpa-objs += dpaa_debugfs.o
255 +endif
256 +ifeq ($(CONFIG_FSL_DPAA_1588),y)
257 +fsl_dpa-objs += dpaa_1588.o
258 +endif
259 +ifeq ($(CONFIG_FSL_DPAA_CEETM),y)
260 +ccflags-y += -Idrivers/net/ethernet/freescale/sdk_fman/src/wrapper
261 +fsl_dpa-objs += dpaa_eth_ceetm.o
262 +endif
263 +
264 +fsl_mac-objs += mac.o mac-api.o
265 +
266 +# Advanced drivers
267 +ifeq ($(CONFIG_FSL_DPAA_ADVANCED_DRIVERS),y)
268 +obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_advanced.o
269 +obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_proxy.o
270 +
271 +fsl_advanced-objs += dpaa_eth_base.o
272 +# suport for multiple drivers per kernel module comes in kernel 3.14
273 +# so we are forced to generate several modules for the advanced drivers
274 +fsl_proxy-objs += dpaa_eth_proxy.o
275 +
276 +ifeq ($(CONFIG_FSL_DPAA_OFFLINE_PORTS),y)
277 +obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_oh.o
278 +
279 +fsl_oh-objs += offline_port.o
280 +endif
281 +endif
282 +
283 +# Needed by the tracing framework
284 +CFLAGS_dpaa_eth.o := -I$(src)
285 --- /dev/null
286 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.c
287 @@ -0,0 +1,580 @@
288 +/* Copyright (C) 2011 Freescale Semiconductor, Inc.
289 + * Copyright (C) 2009 IXXAT Automation, GmbH
290 + *
291 + * DPAA Ethernet Driver -- IEEE 1588 interface functionality
292 + *
293 + * This program is free software; you can redistribute it and/or modify
294 + * it under the terms of the GNU General Public License as published by
295 + * the Free Software Foundation; either version 2 of the License, or
296 + * (at your option) any later version.
297 + *
298 + * This program is distributed in the hope that it will be useful,
299 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
300 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
301 + * GNU General Public License for more details.
302 + *
303 + * You should have received a copy of the GNU General Public License along
304 + * with this program; if not, write to the Free Software Foundation, Inc.,
305 + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
306 + *
307 + */
308 +#include <linux/io.h>
309 +#include <linux/device.h>
310 +#include <linux/fs.h>
311 +#include <linux/vmalloc.h>
312 +#include <linux/spinlock.h>
313 +#include <linux/ip.h>
314 +#include <linux/ipv6.h>
315 +#include <linux/udp.h>
316 +#include <asm/div64.h>
317 +#include "dpaa_eth.h"
318 +#include "dpaa_eth_common.h"
319 +#include "dpaa_1588.h"
320 +#include "mac.h"
321 +
322 +static int dpa_ptp_init_circ(struct dpa_ptp_circ_buf *ptp_buf, u32 size)
323 +{
324 + struct circ_buf *circ_buf = &ptp_buf->circ_buf;
325 +
326 + circ_buf->buf = vmalloc(sizeof(struct dpa_ptp_data) * size);
327 + if (!circ_buf->buf)
328 + return 1;
329 +
330 + circ_buf->head = 0;
331 + circ_buf->tail = 0;
332 + ptp_buf->size = size;
333 + spin_lock_init(&ptp_buf->ptp_lock);
334 +
335 + return 0;
336 +}
337 +
338 +static void dpa_ptp_reset_circ(struct dpa_ptp_circ_buf *ptp_buf, u32 size)
339 +{
340 + struct circ_buf *circ_buf = &ptp_buf->circ_buf;
341 +
342 + circ_buf->head = 0;
343 + circ_buf->tail = 0;
344 + ptp_buf->size = size;
345 +}
346 +
347 +static int dpa_ptp_insert(struct dpa_ptp_circ_buf *ptp_buf,
348 + struct dpa_ptp_data *data)
349 +{
350 + struct circ_buf *circ_buf = &ptp_buf->circ_buf;
351 + int size = ptp_buf->size;
352 + struct dpa_ptp_data *tmp;
353 + unsigned long flags;
354 + int head, tail;
355 +
356 + spin_lock_irqsave(&ptp_buf->ptp_lock, flags);
357 +
358 + head = circ_buf->head;
359 + tail = circ_buf->tail;
360 +
361 + if (CIRC_SPACE(head, tail, size) <= 0)
362 + circ_buf->tail = (tail + 1) & (size - 1);
363 +
364 + tmp = (struct dpa_ptp_data *)(circ_buf->buf) + head;
365 + memcpy(tmp, data, sizeof(struct dpa_ptp_data));
366 +
367 + circ_buf->head = (head + 1) & (size - 1);
368 +
369 + spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
370 +
371 + return 0;
372 +}
373 +
374 +static int dpa_ptp_is_ident_match(struct dpa_ptp_ident *dst,
375 + struct dpa_ptp_ident *src)
376 +{
377 + int ret;
378 +
379 + if ((dst->version != src->version) || (dst->msg_type != src->msg_type))
380 + return 0;
381 +
382 + if ((dst->netw_prot == src->netw_prot)
383 + || src->netw_prot == DPA_PTP_PROT_DONTCARE) {
384 + if (dst->seq_id != src->seq_id)
385 + return 0;
386 +
387 + ret = memcmp(dst->snd_port_id, src->snd_port_id,
388 + DPA_PTP_SOURCE_PORT_LENGTH);
389 + if (ret)
390 + return 0;
391 + else
392 + return 1;
393 + }
394 +
395 + return 0;
396 +}
397 +
398 +static int dpa_ptp_find_and_remove(struct dpa_ptp_circ_buf *ptp_buf,
399 + struct dpa_ptp_ident *ident,
400 + struct dpa_ptp_time *ts)
401 +{
402 + struct circ_buf *circ_buf = &ptp_buf->circ_buf;
403 + int size = ptp_buf->size;
404 + int head, tail, idx;
405 + unsigned long flags;
406 + struct dpa_ptp_data *tmp, *tmp2;
407 + struct dpa_ptp_ident *tmp_ident;
408 +
409 + spin_lock_irqsave(&ptp_buf->ptp_lock, flags);
410 +
411 + head = circ_buf->head;
412 + tail = idx = circ_buf->tail;
413 +
414 + if (CIRC_CNT(head, tail, size) == 0) {
415 + spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
416 + return 1;
417 + }
418 +
419 + while (idx != head) {
420 + tmp = (struct dpa_ptp_data *)(circ_buf->buf) + idx;
421 + tmp_ident = &tmp->ident;
422 + if (dpa_ptp_is_ident_match(tmp_ident, ident))
423 + break;
424 + idx = (idx + 1) & (size - 1);
425 + }
426 +
427 + if (idx == head) {
428 + spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
429 + return 1;
430 + }
431 +
432 + ts->sec = tmp->ts.sec;
433 + ts->nsec = tmp->ts.nsec;
434 +
435 + if (idx != tail) {
436 + if (CIRC_CNT(idx, tail, size) > TS_ACCUMULATION_THRESHOLD) {
437 + tail = circ_buf->tail =
438 + (idx - TS_ACCUMULATION_THRESHOLD) & (size - 1);
439 + }
440 +
441 + while (CIRC_CNT(idx, tail, size) > 0) {
442 + tmp = (struct dpa_ptp_data *)(circ_buf->buf) + idx;
443 + idx = (idx - 1) & (size - 1);
444 + tmp2 = (struct dpa_ptp_data *)(circ_buf->buf) + idx;
445 + *tmp = *tmp2;
446 + }
447 + }
448 + circ_buf->tail = (tail + 1) & (size - 1);
449 +
450 + spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
451 +
452 + return 0;
453 +}
454 +
455 +/* Parse the PTP packets
456 + *
457 + * The PTP header can be found in an IPv4 packet, IPv6 patcket or in
458 + * an IEEE802.3 ethernet frame. This function returns the position of
459 + * the PTP packet or NULL if no PTP found
460 + */
461 +static u8 *dpa_ptp_parse_packet(struct sk_buff *skb, u16 *eth_type)
462 +{
463 + u8 *pos = skb->data + ETH_ALEN + ETH_ALEN;
464 + u8 *ptp_loc = NULL;
465 + u8 msg_type;
466 + u32 access_len = ETH_ALEN + ETH_ALEN + DPA_ETYPE_LEN;
467 + struct iphdr *iph;
468 + struct udphdr *udph;
469 + struct ipv6hdr *ipv6h;
470 +
471 + /* when we can receive S/G frames we need to check the data we want to
472 + * access is in the linear skb buffer
473 + */
474 + if (!pskb_may_pull(skb, access_len))
475 + return NULL;
476 +
477 + *eth_type = *((u16 *)pos);
478 +
479 + /* Check if inner tag is here */
480 + if (*eth_type == ETH_P_8021Q) {
481 + access_len += DPA_VLAN_TAG_LEN;
482 +
483 + if (!pskb_may_pull(skb, access_len))
484 + return NULL;
485 +
486 + pos += DPA_VLAN_TAG_LEN;
487 + *eth_type = *((u16 *)pos);
488 + }
489 +
490 + pos += DPA_ETYPE_LEN;
491 +
492 + switch (*eth_type) {
493 + /* Transport of PTP over Ethernet */
494 + case ETH_P_1588:
495 + ptp_loc = pos;
496 +
497 + if (!pskb_may_pull(skb, access_len + PTP_OFFS_MSG_TYPE + 1))
498 + return NULL;
499 +
500 + msg_type = *((u8 *)(ptp_loc + PTP_OFFS_MSG_TYPE)) & 0xf;
501 + if ((msg_type == PTP_MSGTYPE_SYNC)
502 + || (msg_type == PTP_MSGTYPE_DELREQ)
503 + || (msg_type == PTP_MSGTYPE_PDELREQ)
504 + || (msg_type == PTP_MSGTYPE_PDELRESP))
505 + return ptp_loc;
506 + break;
507 + /* Transport of PTP over IPv4 */
508 + case ETH_P_IP:
509 + iph = (struct iphdr *)pos;
510 + access_len += sizeof(struct iphdr);
511 +
512 + if (!pskb_may_pull(skb, access_len))
513 + return NULL;
514 +
515 + if (ntohs(iph->protocol) != IPPROTO_UDP)
516 + return NULL;
517 +
518 + access_len += iph->ihl * 4 - sizeof(struct iphdr) +
519 + sizeof(struct udphdr);
520 +
521 + if (!pskb_may_pull(skb, access_len))
522 + return NULL;
523 +
524 + pos += iph->ihl * 4;
525 + udph = (struct udphdr *)pos;
526 + if (ntohs(udph->dest) != 319)
527 + return NULL;
528 + ptp_loc = pos + sizeof(struct udphdr);
529 + break;
530 + /* Transport of PTP over IPv6 */
531 + case ETH_P_IPV6:
532 + ipv6h = (struct ipv6hdr *)pos;
533 +
534 + access_len += sizeof(struct ipv6hdr) + sizeof(struct udphdr);
535 +
536 + if (ntohs(ipv6h->nexthdr) != IPPROTO_UDP)
537 + return NULL;
538 +
539 + pos += sizeof(struct ipv6hdr);
540 + udph = (struct udphdr *)pos;
541 + if (ntohs(udph->dest) != 319)
542 + return NULL;
543 + ptp_loc = pos + sizeof(struct udphdr);
544 + break;
545 + default:
546 + break;
547 + }
548 +
549 + return ptp_loc;
550 +}
551 +
552 +static int dpa_ptp_store_stamp(const struct dpa_priv_s *priv,
553 + struct sk_buff *skb, void *data, enum port_type rx_tx,
554 + struct dpa_ptp_data *ptp_data)
555 +{
556 + u64 nsec;
557 + u32 mod;
558 + u8 *ptp_loc;
559 + u16 eth_type;
560 +
561 + ptp_loc = dpa_ptp_parse_packet(skb, &eth_type);
562 + if (!ptp_loc)
563 + return -EINVAL;
564 +
565 + switch (eth_type) {
566 + case ETH_P_IP:
567 + ptp_data->ident.netw_prot = DPA_PTP_PROT_IPV4;
568 + break;
569 + case ETH_P_IPV6:
570 + ptp_data->ident.netw_prot = DPA_PTP_PROT_IPV6;
571 + break;
572 + case ETH_P_1588:
573 + ptp_data->ident.netw_prot = DPA_PTP_PROT_802_3;
574 + break;
575 + default:
576 + return -EINVAL;
577 + }
578 +
579 + if (!pskb_may_pull(skb, ptp_loc - skb->data + PTP_OFFS_SEQ_ID + 2))
580 + return -EINVAL;
581 +
582 + ptp_data->ident.version = *(ptp_loc + PTP_OFFS_VER_PTP) & 0xf;
583 + ptp_data->ident.msg_type = *(ptp_loc + PTP_OFFS_MSG_TYPE) & 0xf;
584 + ptp_data->ident.seq_id = *((u16 *)(ptp_loc + PTP_OFFS_SEQ_ID));
585 + memcpy(ptp_data->ident.snd_port_id, ptp_loc + PTP_OFFS_SRCPRTID,
586 + DPA_PTP_SOURCE_PORT_LENGTH);
587 +
588 + nsec = dpa_get_timestamp_ns(priv, rx_tx, data);
589 + mod = do_div(nsec, NANOSEC_PER_SECOND);
590 + ptp_data->ts.sec = nsec;
591 + ptp_data->ts.nsec = mod;
592 +
593 + return 0;
594 +}
595 +
596 +void dpa_ptp_store_txstamp(const struct dpa_priv_s *priv,
597 + struct sk_buff *skb, void *data)
598 +{
599 + struct dpa_ptp_tsu *tsu = priv->tsu;
600 + struct dpa_ptp_data ptp_tx_data;
601 +
602 + if (dpa_ptp_store_stamp(priv, skb, data, TX, &ptp_tx_data))
603 + return;
604 +
605 + dpa_ptp_insert(&tsu->tx_timestamps, &ptp_tx_data);
606 +}
607 +
608 +void dpa_ptp_store_rxstamp(const struct dpa_priv_s *priv,
609 + struct sk_buff *skb, void *data)
610 +{
611 + struct dpa_ptp_tsu *tsu = priv->tsu;
612 + struct dpa_ptp_data ptp_rx_data;
613 +
614 + if (dpa_ptp_store_stamp(priv, skb, data, RX, &ptp_rx_data))
615 + return;
616 +
617 + dpa_ptp_insert(&tsu->rx_timestamps, &ptp_rx_data);
618 +}
619 +
620 +static uint8_t dpa_get_tx_timestamp(struct dpa_ptp_tsu *ptp_tsu,
621 + struct dpa_ptp_ident *ident,
622 + struct dpa_ptp_time *ts)
623 +{
624 + struct dpa_ptp_tsu *tsu = ptp_tsu;
625 + struct dpa_ptp_time tmp;
626 + int flag;
627 +
628 + flag = dpa_ptp_find_and_remove(&tsu->tx_timestamps, ident, &tmp);
629 + if (!flag) {
630 + ts->sec = tmp.sec;
631 + ts->nsec = tmp.nsec;
632 + return 0;
633 + }
634 +
635 + return -1;
636 +}
637 +
638 +static uint8_t dpa_get_rx_timestamp(struct dpa_ptp_tsu *ptp_tsu,
639 + struct dpa_ptp_ident *ident,
640 + struct dpa_ptp_time *ts)
641 +{
642 + struct dpa_ptp_tsu *tsu = ptp_tsu;
643 + struct dpa_ptp_time tmp;
644 + int flag;
645 +
646 + flag = dpa_ptp_find_and_remove(&tsu->rx_timestamps, ident, &tmp);
647 + if (!flag) {
648 + ts->sec = tmp.sec;
649 + ts->nsec = tmp.nsec;
650 + return 0;
651 + }
652 +
653 + return -1;
654 +}
655 +
656 +static void dpa_set_fiper_alarm(struct dpa_ptp_tsu *tsu,
657 + struct dpa_ptp_time *cnt_time)
658 +{
659 + struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
660 + u64 tmp, fiper;
661 +
662 + if (mac_dev->fm_rtc_disable)
663 + mac_dev->fm_rtc_disable(get_fm_handle(tsu->dpa_priv->net_dev));
664 +
665 + /* TMR_FIPER1 will pulse every second after ALARM1 expired */
666 + tmp = (u64)cnt_time->sec * NANOSEC_PER_SECOND + (u64)cnt_time->nsec;
667 + fiper = NANOSEC_PER_SECOND - DPA_PTP_NOMINAL_FREQ_PERIOD_NS;
668 + if (mac_dev->fm_rtc_set_alarm)
669 + mac_dev->fm_rtc_set_alarm(get_fm_handle(tsu->dpa_priv->net_dev),
670 + 0, tmp);
671 + if (mac_dev->fm_rtc_set_fiper)
672 + mac_dev->fm_rtc_set_fiper(get_fm_handle(tsu->dpa_priv->net_dev),
673 + 0, fiper);
674 +
675 + if (mac_dev->fm_rtc_enable)
676 + mac_dev->fm_rtc_enable(get_fm_handle(tsu->dpa_priv->net_dev));
677 +}
678 +
679 +static void dpa_get_curr_cnt(struct dpa_ptp_tsu *tsu,
680 + struct dpa_ptp_time *curr_time)
681 +{
682 + struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
683 + u64 tmp;
684 + u32 mod;
685 +
686 + if (mac_dev->fm_rtc_get_cnt)
687 + mac_dev->fm_rtc_get_cnt(get_fm_handle(tsu->dpa_priv->net_dev),
688 + &tmp);
689 +
690 + mod = do_div(tmp, NANOSEC_PER_SECOND);
691 + curr_time->sec = (u32)tmp;
692 + curr_time->nsec = mod;
693 +}
694 +
695 +static void dpa_set_1588cnt(struct dpa_ptp_tsu *tsu,
696 + struct dpa_ptp_time *cnt_time)
697 +{
698 + struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
699 + u64 tmp;
700 +
701 + tmp = (u64)cnt_time->sec * NANOSEC_PER_SECOND + (u64)cnt_time->nsec;
702 +
703 + if (mac_dev->fm_rtc_set_cnt)
704 + mac_dev->fm_rtc_set_cnt(get_fm_handle(tsu->dpa_priv->net_dev),
705 + tmp);
706 +
707 + /* Restart fiper two seconds later */
708 + cnt_time->sec += 2;
709 + cnt_time->nsec = 0;
710 + dpa_set_fiper_alarm(tsu, cnt_time);
711 +}
712 +
713 +static void dpa_get_drift(struct dpa_ptp_tsu *tsu, u32 *addend)
714 +{
715 + struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
716 + u32 drift;
717 +
718 + if (mac_dev->fm_rtc_get_drift)
719 + mac_dev->fm_rtc_get_drift(get_fm_handle(tsu->dpa_priv->net_dev),
720 + &drift);
721 +
722 + *addend = drift;
723 +}
724 +
725 +static void dpa_set_drift(struct dpa_ptp_tsu *tsu, u32 addend)
726 +{
727 + struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
728 +
729 + if (mac_dev->fm_rtc_set_drift)
730 + mac_dev->fm_rtc_set_drift(get_fm_handle(tsu->dpa_priv->net_dev),
731 + addend);
732 +}
733 +
734 +static void dpa_flush_timestamp(struct dpa_ptp_tsu *tsu)
735 +{
736 + dpa_ptp_reset_circ(&tsu->rx_timestamps, DEFAULT_PTP_RX_BUF_SZ);
737 + dpa_ptp_reset_circ(&tsu->tx_timestamps, DEFAULT_PTP_TX_BUF_SZ);
738 +}
739 +
740 +int dpa_ioctl_1588(struct net_device *dev, struct ifreq *ifr, int cmd)
741 +{
742 + struct dpa_priv_s *priv = netdev_priv(dev);
743 + struct dpa_ptp_tsu *tsu = priv->tsu;
744 + struct mac_device *mac_dev = priv->mac_dev;
745 + struct dpa_ptp_data ptp_data;
746 + struct dpa_ptp_data *ptp_data_user;
747 + struct dpa_ptp_time act_time;
748 + u32 addend;
749 + int retval = 0;
750 +
751 + if (!tsu || !tsu->valid)
752 + return -ENODEV;
753 +
754 + switch (cmd) {
755 + case PTP_ENBL_TXTS_IOCTL:
756 + tsu->hwts_tx_en_ioctl = 1;
757 + if (mac_dev->fm_rtc_enable)
758 + mac_dev->fm_rtc_enable(get_fm_handle(dev));
759 + if (mac_dev->ptp_enable)
760 + mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev));
761 + break;
762 + case PTP_DSBL_TXTS_IOCTL:
763 + tsu->hwts_tx_en_ioctl = 0;
764 + if (mac_dev->fm_rtc_disable)
765 + mac_dev->fm_rtc_disable(get_fm_handle(dev));
766 + if (mac_dev->ptp_disable)
767 + mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev));
768 + break;
769 + case PTP_ENBL_RXTS_IOCTL:
770 + tsu->hwts_rx_en_ioctl = 1;
771 + break;
772 + case PTP_DSBL_RXTS_IOCTL:
773 + tsu->hwts_rx_en_ioctl = 0;
774 + break;
775 + case PTP_GET_RX_TIMESTAMP:
776 + ptp_data_user = (struct dpa_ptp_data *)ifr->ifr_data;
777 + if (copy_from_user(&ptp_data.ident,
778 + &ptp_data_user->ident, sizeof(ptp_data.ident)))
779 + return -EINVAL;
780 +
781 + if (dpa_get_rx_timestamp(tsu, &ptp_data.ident, &ptp_data.ts))
782 + return -EAGAIN;
783 +
784 + if (copy_to_user((void __user *)&ptp_data_user->ts,
785 + &ptp_data.ts, sizeof(ptp_data.ts)))
786 + return -EFAULT;
787 + break;
788 + case PTP_GET_TX_TIMESTAMP:
789 + ptp_data_user = (struct dpa_ptp_data *)ifr->ifr_data;
790 + if (copy_from_user(&ptp_data.ident,
791 + &ptp_data_user->ident, sizeof(ptp_data.ident)))
792 + return -EINVAL;
793 +
794 + if (dpa_get_tx_timestamp(tsu, &ptp_data.ident, &ptp_data.ts))
795 + return -EAGAIN;
796 +
797 + if (copy_to_user((void __user *)&ptp_data_user->ts,
798 + &ptp_data.ts, sizeof(ptp_data.ts)))
799 + return -EFAULT;
800 + break;
801 + case PTP_GET_TIME:
802 + dpa_get_curr_cnt(tsu, &act_time);
803 + if (copy_to_user(ifr->ifr_data, &act_time, sizeof(act_time)))
804 + return -EFAULT;
805 + break;
806 + case PTP_SET_TIME:
807 + if (copy_from_user(&act_time, ifr->ifr_data, sizeof(act_time)))
808 + return -EINVAL;
809 + dpa_set_1588cnt(tsu, &act_time);
810 + break;
811 + case PTP_GET_ADJ:
812 + dpa_get_drift(tsu, &addend);
813 + if (copy_to_user(ifr->ifr_data, &addend, sizeof(addend)))
814 + return -EFAULT;
815 + break;
816 + case PTP_SET_ADJ:
817 + if (copy_from_user(&addend, ifr->ifr_data, sizeof(addend)))
818 + return -EINVAL;
819 + dpa_set_drift(tsu, addend);
820 + break;
821 + case PTP_SET_FIPER_ALARM:
822 + if (copy_from_user(&act_time, ifr->ifr_data, sizeof(act_time)))
823 + return -EINVAL;
824 + dpa_set_fiper_alarm(tsu, &act_time);
825 + break;
826 + case PTP_CLEANUP_TS:
827 + dpa_flush_timestamp(tsu);
828 + break;
829 + default:
830 + return -EINVAL;
831 + }
832 +
833 + return retval;
834 +}
835 +
836 +int dpa_ptp_init(struct dpa_priv_s *priv)
837 +{
838 + struct dpa_ptp_tsu *tsu;
839 +
840 + /* Allocate memory for PTP structure */
841 + tsu = kzalloc(sizeof(struct dpa_ptp_tsu), GFP_KERNEL);
842 + if (!tsu)
843 + return -ENOMEM;
844 +
845 + tsu->valid = TRUE;
846 + tsu->dpa_priv = priv;
847 +
848 + dpa_ptp_init_circ(&tsu->rx_timestamps, DEFAULT_PTP_RX_BUF_SZ);
849 + dpa_ptp_init_circ(&tsu->tx_timestamps, DEFAULT_PTP_TX_BUF_SZ);
850 +
851 + priv->tsu = tsu;
852 +
853 + return 0;
854 +}
855 +EXPORT_SYMBOL(dpa_ptp_init);
856 +
857 +void dpa_ptp_cleanup(struct dpa_priv_s *priv)
858 +{
859 + struct dpa_ptp_tsu *tsu = priv->tsu;
860 +
861 + tsu->valid = FALSE;
862 + vfree(tsu->rx_timestamps.circ_buf.buf);
863 + vfree(tsu->tx_timestamps.circ_buf.buf);
864 +
865 + kfree(tsu);
866 +}
867 +EXPORT_SYMBOL(dpa_ptp_cleanup);
868 --- /dev/null
869 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.h
870 @@ -0,0 +1,138 @@
871 +/* Copyright (C) 2011 Freescale Semiconductor, Inc.
872 + *
873 + * This program is free software; you can redistribute it and/or modify
874 + * it under the terms of the GNU General Public License as published by
875 + * the Free Software Foundation; either version 2 of the License, or
876 + * (at your option) any later version.
877 + *
878 + * This program is distributed in the hope that it will be useful,
879 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
880 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
881 + * GNU General Public License for more details.
882 + *
883 + * You should have received a copy of the GNU General Public License along
884 + * with this program; if not, write to the Free Software Foundation, Inc.,
885 + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
886 + *
887 + */
888 +#ifndef __DPAA_1588_H__
889 +#define __DPAA_1588_H__
890 +
891 +#include <linux/netdevice.h>
892 +#include <linux/etherdevice.h>
893 +#include <linux/circ_buf.h>
894 +#include <linux/fsl_qman.h>
895 +
896 +#define DEFAULT_PTP_RX_BUF_SZ 256
897 +#define DEFAULT_PTP_TX_BUF_SZ 256
898 +
899 +/* 1588 private ioctl calls */
900 +#define PTP_ENBL_TXTS_IOCTL SIOCDEVPRIVATE
901 +#define PTP_DSBL_TXTS_IOCTL (SIOCDEVPRIVATE + 1)
902 +#define PTP_ENBL_RXTS_IOCTL (SIOCDEVPRIVATE + 2)
903 +#define PTP_DSBL_RXTS_IOCTL (SIOCDEVPRIVATE + 3)
904 +#define PTP_GET_TX_TIMESTAMP (SIOCDEVPRIVATE + 4)
905 +#define PTP_GET_RX_TIMESTAMP (SIOCDEVPRIVATE + 5)
906 +#define PTP_SET_TIME (SIOCDEVPRIVATE + 6)
907 +#define PTP_GET_TIME (SIOCDEVPRIVATE + 7)
908 +#define PTP_SET_FIPER_ALARM (SIOCDEVPRIVATE + 8)
909 +#define PTP_SET_ADJ (SIOCDEVPRIVATE + 9)
910 +#define PTP_GET_ADJ (SIOCDEVPRIVATE + 10)
911 +#define PTP_CLEANUP_TS (SIOCDEVPRIVATE + 11)
912 +
913 +/* PTP V2 message type */
914 +enum {
915 + PTP_MSGTYPE_SYNC = 0x0,
916 + PTP_MSGTYPE_DELREQ = 0x1,
917 + PTP_MSGTYPE_PDELREQ = 0x2,
918 + PTP_MSGTYPE_PDELRESP = 0x3,
919 + PTP_MSGTYPE_FLWUP = 0x8,
920 + PTP_MSGTYPE_DELRESP = 0x9,
921 + PTP_MSGTYPE_PDELRES_FLWUP = 0xA,
922 + PTP_MSGTYPE_ANNOUNCE = 0xB,
923 + PTP_MSGTYPE_SGNLNG = 0xC,
924 + PTP_MSGTYPE_MNGMNT = 0xD,
925 +};
926 +
927 +/* Byte offset of data in the PTP V2 headers */
928 +#define PTP_OFFS_MSG_TYPE 0
929 +#define PTP_OFFS_VER_PTP 1
930 +#define PTP_OFFS_MSG_LEN 2
931 +#define PTP_OFFS_DOM_NMB 4
932 +#define PTP_OFFS_FLAGS 6
933 +#define PTP_OFFS_CORFIELD 8
934 +#define PTP_OFFS_SRCPRTID 20
935 +#define PTP_OFFS_SEQ_ID 30
936 +#define PTP_OFFS_CTRL 32
937 +#define PTP_OFFS_LOGMEAN 33
938 +
939 +#define PTP_IP_OFFS 14
940 +#define PTP_UDP_OFFS 34
941 +#define PTP_HEADER_OFFS 42
942 +#define PTP_MSG_TYPE_OFFS (PTP_HEADER_OFFS + PTP_OFFS_MSG_TYPE)
943 +#define PTP_SPORT_ID_OFFS (PTP_HEADER_OFFS + PTP_OFFS_SRCPRTID)
944 +#define PTP_SEQ_ID_OFFS (PTP_HEADER_OFFS + PTP_OFFS_SEQ_ID)
945 +#define PTP_CTRL_OFFS (PTP_HEADER_OFFS + PTP_OFFS_CTRL)
946 +
947 +/* 1588-2008 network protocol enumeration values */
948 +#define DPA_PTP_PROT_IPV4 1
949 +#define DPA_PTP_PROT_IPV6 2
950 +#define DPA_PTP_PROT_802_3 3
951 +#define DPA_PTP_PROT_DONTCARE 0xFFFF
952 +
953 +#define DPA_PTP_SOURCE_PORT_LENGTH 10
954 +#define DPA_PTP_HEADER_SZE 34
955 +#define DPA_ETYPE_LEN 2
956 +#define DPA_VLAN_TAG_LEN 4
957 +#define NANOSEC_PER_SECOND 1000000000
958 +
959 +/* The threshold between the current found one and the oldest one */
960 +#define TS_ACCUMULATION_THRESHOLD 50
961 +
962 +/* Struct needed to identify a timestamp */
963 +struct dpa_ptp_ident {
964 + u8 version;
965 + u8 msg_type;
966 + u16 netw_prot;
967 + u16 seq_id;
968 + u8 snd_port_id[DPA_PTP_SOURCE_PORT_LENGTH];
969 +};
970 +
971 +/* Timestamp format in 1588-2008 */
972 +struct dpa_ptp_time {
973 + u64 sec; /* just 48 bit used */
974 + u32 nsec;
975 +};
976 +
977 +/* needed for timestamp data over ioctl */
978 +struct dpa_ptp_data {
979 + struct dpa_ptp_ident ident;
980 + struct dpa_ptp_time ts;
981 +};
982 +
983 +struct dpa_ptp_circ_buf {
984 + struct circ_buf circ_buf;
985 + u32 size;
986 + spinlock_t ptp_lock;
987 +};
988 +
989 +/* PTP TSU control structure */
990 +struct dpa_ptp_tsu {
991 + struct dpa_priv_s *dpa_priv;
992 + bool valid;
993 + struct dpa_ptp_circ_buf rx_timestamps;
994 + struct dpa_ptp_circ_buf tx_timestamps;
995 +
996 + /* HW timestamping over ioctl enabled flag */
997 + int hwts_tx_en_ioctl;
998 + int hwts_rx_en_ioctl;
999 +};
1000 +
1001 +extern int dpa_ptp_init(struct dpa_priv_s *priv);
1002 +extern void dpa_ptp_cleanup(struct dpa_priv_s *priv);
1003 +extern void dpa_ptp_store_txstamp(const struct dpa_priv_s *priv,
1004 + struct sk_buff *skb, void *data);
1005 +extern void dpa_ptp_store_rxstamp(const struct dpa_priv_s *priv,
1006 + struct sk_buff *skb, void *data);
1007 +extern int dpa_ioctl_1588(struct net_device *dev, struct ifreq *ifr, int cmd);
1008 +#endif
1009 --- /dev/null
1010 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c
1011 @@ -0,0 +1,180 @@
1012 +/* Copyright 2008-2013 Freescale Semiconductor Inc.
1013 + *
1014 + * Redistribution and use in source and binary forms, with or without
1015 + * modification, are permitted provided that the following conditions are met:
1016 + * * Redistributions of source code must retain the above copyright
1017 + * notice, this list of conditions and the following disclaimer.
1018 + * * Redistributions in binary form must reproduce the above copyright
1019 + * notice, this list of conditions and the following disclaimer in the
1020 + * documentation and/or other materials provided with the distribution.
1021 + * * Neither the name of Freescale Semiconductor nor the
1022 + * names of its contributors may be used to endorse or promote products
1023 + * derived from this software without specific prior written permission.
1024 + *
1025 + *
1026 + * ALTERNATIVELY, this software may be distributed under the terms of the
1027 + * GNU General Public License ("GPL") as published by the Free Software
1028 + * Foundation, either version 2 of that License or (at your option) any
1029 + * later version.
1030 + *
1031 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
1032 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1033 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1034 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
1035 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1036 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1037 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1038 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1039 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1040 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1041 + */
1042 +
1043 +#include <linux/module.h>
1044 +#include <linux/fsl_qman.h> /* struct qm_mcr_querycgr */
1045 +#include <linux/debugfs.h>
1046 +#include "dpaa_debugfs.h"
1047 +#include "dpaa_eth.h" /* struct dpa_priv_s, dpa_percpu_priv_s, dpa_bp */
1048 +
1049 +#define DPA_DEBUGFS_DESCRIPTION "FSL DPAA Ethernet debugfs entries"
1050 +#define DPA_ETH_DEBUGFS_ROOT "fsl_dpa"
1051 +
1052 +static struct dentry *dpa_debugfs_root;
1053 +
1054 +static int __cold dpa_debugfs_loop_open(struct inode *inode, struct file *file);
1055 +static ssize_t dpa_loop_write(struct file *f,
1056 + const char __user *buf, size_t count, loff_t *off);
1057 +
1058 +static const struct file_operations dpa_debugfs_lp_fops = {
1059 + .open = dpa_debugfs_loop_open,
1060 + .write = dpa_loop_write,
1061 + .read = seq_read,
1062 + .llseek = seq_lseek,
1063 + .release = single_release,
1064 +};
1065 +
1066 +static int dpa_debugfs_loop_show(struct seq_file *file, void *offset)
1067 +{
1068 + struct dpa_priv_s *priv;
1069 +
1070 + BUG_ON(offset == NULL);
1071 +
1072 + priv = netdev_priv((struct net_device *)file->private);
1073 + seq_printf(file, "%d->%d\n", priv->loop_id, priv->loop_to);
1074 +
1075 + return 0;
1076 +}
1077 +
1078 +static int user_input_convert(const char __user *user_buf, size_t count,
1079 + long *val)
1080 +{
1081 + char buf[12];
1082 +
1083 + if (count > sizeof(buf) - 1)
1084 + return -EINVAL;
1085 + if (copy_from_user(buf, user_buf, count))
1086 + return -EFAULT;
1087 + buf[count] = '\0';
1088 + if (kstrtol(buf, 0, val))
1089 + return -EINVAL;
1090 + return 0;
1091 +}
1092 +
1093 +static ssize_t dpa_loop_write(struct file *f,
1094 + const char __user *buf, size_t count, loff_t *off)
1095 +{
1096 + struct dpa_priv_s *priv;
1097 + struct net_device *netdev;
1098 + struct seq_file *sf;
1099 + int ret;
1100 + long val;
1101 +
1102 + ret = user_input_convert(buf, count, &val);
1103 + if (ret)
1104 + return ret;
1105 +
1106 + sf = (struct seq_file *)f->private_data;
1107 + netdev = (struct net_device *)sf->private;
1108 + priv = netdev_priv(netdev);
1109 +
1110 + priv->loop_to = ((val < 0) || (val > 20)) ? -1 : val;
1111 +
1112 + return count;
1113 +}
1114 +
1115 +static int __cold dpa_debugfs_loop_open(struct inode *inode, struct file *file)
1116 +{
1117 + int _errno;
1118 + const struct net_device *net_dev;
1119 +
1120 + _errno = single_open(file, dpa_debugfs_loop_show, inode->i_private);
1121 + if (unlikely(_errno < 0)) {
1122 + net_dev = (struct net_device *)inode->i_private;
1123 +
1124 + if (netif_msg_drv((struct dpa_priv_s *)netdev_priv(net_dev)))
1125 + netdev_err(net_dev, "single_open() = %d\n",
1126 + _errno);
1127 + }
1128 +
1129 + return _errno;
1130 +}
1131 +
1132 +
1133 +int dpa_netdev_debugfs_create(struct net_device *net_dev)
1134 +{
1135 + struct dpa_priv_s *priv = netdev_priv(net_dev);
1136 + static int cnt;
1137 + char loop_file_name[100];
1138 +
1139 + if (unlikely(dpa_debugfs_root == NULL)) {
1140 + pr_err(KBUILD_MODNAME ": %s:%hu:%s(): \t%s\n",
1141 + KBUILD_BASENAME".c", __LINE__, __func__,
1142 + "root debugfs missing, possible module ordering issue");
1143 + return -ENOMEM;
1144 + }
1145 +
1146 + sprintf(loop_file_name, "eth%d_loop", ++cnt);
1147 + priv->debugfs_loop_file = debugfs_create_file(loop_file_name,
1148 + S_IRUGO,
1149 + dpa_debugfs_root,
1150 + net_dev,
1151 + &dpa_debugfs_lp_fops);
1152 + if (unlikely(priv->debugfs_loop_file == NULL)) {
1153 + netdev_err(net_dev, "debugfs_create_file(%s/%s)",
1154 + dpa_debugfs_root->d_iname,
1155 + loop_file_name);
1156 +
1157 + return -ENOMEM;
1158 + }
1159 + return 0;
1160 +}
1161 +
1162 +void dpa_netdev_debugfs_remove(struct net_device *net_dev)
1163 +{
1164 + struct dpa_priv_s *priv = netdev_priv(net_dev);
1165 +
1166 + debugfs_remove(priv->debugfs_loop_file);
1167 +}
1168 +
1169 +int __init dpa_debugfs_module_init(void)
1170 +{
1171 + int _errno = 0;
1172 +
1173 + pr_info(KBUILD_MODNAME ": " DPA_DEBUGFS_DESCRIPTION "\n");
1174 +
1175 + dpa_debugfs_root = debugfs_create_dir(DPA_ETH_DEBUGFS_ROOT, NULL);
1176 +
1177 + if (unlikely(dpa_debugfs_root == NULL)) {
1178 + _errno = -ENOMEM;
1179 + pr_err(KBUILD_MODNAME ": %s:%hu:%s():\n",
1180 + KBUILD_BASENAME".c", __LINE__, __func__);
1181 + pr_err("\tdebugfs_create_dir(%s/"KBUILD_MODNAME") = %d\n",
1182 + DPA_ETH_DEBUGFS_ROOT, _errno);
1183 + }
1184 +
1185 + return _errno;
1186 +}
1187 +
1188 +void __exit dpa_debugfs_module_exit(void)
1189 +{
1190 + debugfs_remove(dpa_debugfs_root);
1191 +}
1192 --- /dev/null
1193 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h
1194 @@ -0,0 +1,43 @@
1195 +/* Copyright 2008-2013 Freescale Semiconductor Inc.
1196 + *
1197 + * Redistribution and use in source and binary forms, with or without
1198 + * modification, are permitted provided that the following conditions are met:
1199 + * * Redistributions of source code must retain the above copyright
1200 + * notice, this list of conditions and the following disclaimer.
1201 + * * Redistributions in binary form must reproduce the above copyright
1202 + * notice, this list of conditions and the following disclaimer in the
1203 + * documentation and/or other materials provided with the distribution.
1204 + * * Neither the name of Freescale Semiconductor nor the
1205 + * names of its contributors may be used to endorse or promote products
1206 + * derived from this software without specific prior written permission.
1207 + *
1208 + *
1209 + * ALTERNATIVELY, this software may be distributed under the terms of the
1210 + * GNU General Public License ("GPL") as published by the Free Software
1211 + * Foundation, either version 2 of that License or (at your option) any
1212 + * later version.
1213 + *
1214 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
1215 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1216 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1217 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
1218 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1219 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1220 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1221 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1222 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1223 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1224 + */
1225 +
1226 +#ifndef DPAA_DEBUGFS_H_
1227 +#define DPAA_DEBUGFS_H_
1228 +
1229 +#include <linux/netdevice.h>
1230 +#include <linux/dcache.h> /* struct dentry needed in dpaa_eth.h */
1231 +
1232 +int dpa_netdev_debugfs_create(struct net_device *net_dev);
1233 +void dpa_netdev_debugfs_remove(struct net_device *net_dev);
1234 +int __init dpa_debugfs_module_init(void);
1235 +void __exit dpa_debugfs_module_exit(void);
1236 +
1237 +#endif /* DPAA_DEBUGFS_H_ */
1238 --- /dev/null
1239 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c
1240 @@ -0,0 +1,1210 @@
1241 +/* Copyright 2008-2013 Freescale Semiconductor Inc.
1242 + *
1243 + * Redistribution and use in source and binary forms, with or without
1244 + * modification, are permitted provided that the following conditions are met:
1245 + * * Redistributions of source code must retain the above copyright
1246 + * notice, this list of conditions and the following disclaimer.
1247 + * * Redistributions in binary form must reproduce the above copyright
1248 + * notice, this list of conditions and the following disclaimer in the
1249 + * documentation and/or other materials provided with the distribution.
1250 + * * Neither the name of Freescale Semiconductor nor the
1251 + * names of its contributors may be used to endorse or promote products
1252 + * derived from this software without specific prior written permission.
1253 + *
1254 + *
1255 + * ALTERNATIVELY, this software may be distributed under the terms of the
1256 + * GNU General Public License ("GPL") as published by the Free Software
1257 + * Foundation, either version 2 of that License or (at your option) any
1258 + * later version.
1259 + *
1260 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
1261 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1262 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1263 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
1264 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1265 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1266 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1267 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1268 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1269 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1270 + */
1271 +
1272 +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
1273 +#define pr_fmt(fmt) \
1274 + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
1275 + KBUILD_BASENAME".c", __LINE__, __func__
1276 +#else
1277 +#define pr_fmt(fmt) \
1278 + KBUILD_MODNAME ": " fmt
1279 +#endif
1280 +
1281 +#include <linux/init.h>
1282 +#include <linux/module.h>
1283 +#include <linux/of_mdio.h>
1284 +#include <linux/of_net.h>
1285 +#include <linux/kthread.h>
1286 +#include <linux/io.h>
1287 +#include <linux/if_arp.h> /* arp_hdr_len() */
1288 +#include <linux/if_vlan.h> /* VLAN_HLEN */
1289 +#include <linux/icmp.h> /* struct icmphdr */
1290 +#include <linux/ip.h> /* struct iphdr */
1291 +#include <linux/ipv6.h> /* struct ipv6hdr */
1292 +#include <linux/udp.h> /* struct udphdr */
1293 +#include <linux/tcp.h> /* struct tcphdr */
1294 +#include <linux/net.h> /* net_ratelimit() */
1295 +#include <linux/if_ether.h> /* ETH_P_IP and ETH_P_IPV6 */
1296 +#include <linux/highmem.h>
1297 +#include <linux/percpu.h>
1298 +#include <linux/dma-mapping.h>
1299 +#include <linux/fsl_bman.h>
1300 +#ifdef CONFIG_SOC_BUS
1301 +#include <linux/sys_soc.h> /* soc_device_match */
1302 +#endif
1303 +
1304 +#include "fsl_fman.h"
1305 +#include "fm_ext.h"
1306 +#include "fm_port_ext.h"
1307 +
1308 +#include "mac.h"
1309 +#include "dpaa_eth.h"
1310 +#include "dpaa_eth_common.h"
1311 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
1312 +#include "dpaa_debugfs.h"
1313 +#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
1314 +
1315 +/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
1316 + * using trace events only need to #include <trace/events/sched.h>
1317 + */
1318 +#define CREATE_TRACE_POINTS
1319 +#include "dpaa_eth_trace.h"
1320 +
1321 +#define DPA_NAPI_WEIGHT 64
1322 +
1323 +/* Valid checksum indication */
1324 +#define DPA_CSUM_VALID 0xFFFF
1325 +
1326 +#define DPA_DESCRIPTION "FSL DPAA Ethernet driver"
1327 +
1328 +MODULE_LICENSE("Dual BSD/GPL");
1329 +
1330 +MODULE_AUTHOR("Andy Fleming <afleming@freescale.com>");
1331 +
1332 +MODULE_DESCRIPTION(DPA_DESCRIPTION);
1333 +
1334 +static uint8_t debug = -1;
1335 +module_param(debug, byte, S_IRUGO);
1336 +MODULE_PARM_DESC(debug, "Module/Driver verbosity level");
1337 +
1338 +/* This has to work in tandem with the DPA_CS_THRESHOLD_xxx values. */
1339 +static uint16_t tx_timeout = 1000;
1340 +module_param(tx_timeout, ushort, S_IRUGO);
1341 +MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
1342 +
1343 +static const char rtx[][3] = {
1344 + [RX] = "RX",
1345 + [TX] = "TX"
1346 +};
1347 +
1348 +#ifndef CONFIG_PPC
1349 +bool dpaa_errata_a010022;
1350 +EXPORT_SYMBOL(dpaa_errata_a010022);
1351 +#endif
1352 +
1353 +/* BM */
1354 +
1355 +#define DPAA_ETH_MAX_PAD (L1_CACHE_BYTES * 8)
1356 +
1357 +static uint8_t dpa_priv_common_bpid;
1358 +
1359 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
1360 +struct net_device *dpa_loop_netdevs[20];
1361 +#endif
1362 +
1363 +#ifdef CONFIG_PM
1364 +
1365 +static int dpaa_suspend(struct device *dev)
1366 +{
1367 + struct net_device *net_dev;
1368 + struct dpa_priv_s *priv;
1369 + struct mac_device *mac_dev;
1370 + int err = 0;
1371 +
1372 + net_dev = dev_get_drvdata(dev);
1373 +
1374 + if (net_dev->flags & IFF_UP) {
1375 + priv = netdev_priv(net_dev);
1376 + mac_dev = priv->mac_dev;
1377 +
1378 + if (priv->wol & DPAA_WOL_MAGIC) {
1379 + err = priv->mac_dev->set_wol(mac_dev->port_dev[RX],
1380 + priv->mac_dev->get_mac_handle(mac_dev), true);
1381 + if (err) {
1382 + netdev_err(net_dev, "set_wol() = %d\n", err);
1383 + goto set_wol_failed;
1384 + }
1385 + }
1386 +
1387 + err = fm_port_suspend(mac_dev->port_dev[RX]);
1388 + if (err) {
1389 + netdev_err(net_dev, "fm_port_suspend(RX) = %d\n", err);
1390 + goto rx_port_suspend_failed;
1391 + }
1392 +
1393 + err = fm_port_suspend(mac_dev->port_dev[TX]);
1394 + if (err) {
1395 + netdev_err(net_dev, "fm_port_suspend(TX) = %d\n", err);
1396 + goto tx_port_suspend_failed;
1397 + }
1398 + }
1399 +
1400 + return 0;
1401 +
1402 +tx_port_suspend_failed:
1403 + fm_port_resume(mac_dev->port_dev[RX]);
1404 +rx_port_suspend_failed:
1405 + if (priv->wol & DPAA_WOL_MAGIC) {
1406 + priv->mac_dev->set_wol(mac_dev->port_dev[RX],
1407 + priv->mac_dev->get_mac_handle(mac_dev), false);
1408 + }
1409 +set_wol_failed:
1410 + return err;
1411 +}
1412 +
1413 +static int dpaa_resume(struct device *dev)
1414 +{
1415 + struct net_device *net_dev;
1416 + struct dpa_priv_s *priv;
1417 + struct mac_device *mac_dev;
1418 + int err = 0;
1419 +
1420 + net_dev = dev_get_drvdata(dev);
1421 +
1422 + if (net_dev->flags & IFF_UP) {
1423 + priv = netdev_priv(net_dev);
1424 + mac_dev = priv->mac_dev;
1425 +
1426 + err = fm_mac_resume(mac_dev->get_mac_handle(mac_dev));
1427 + if (err) {
1428 + netdev_err(net_dev, "fm_mac_resume = %d\n", err);
1429 + goto resume_failed;
1430 + }
1431 +
1432 + err = fm_port_resume(mac_dev->port_dev[TX]);
1433 + if (err) {
1434 + netdev_err(net_dev, "fm_port_resume(TX) = %d\n", err);
1435 + goto resume_failed;
1436 + }
1437 +
1438 + err = fm_port_resume(mac_dev->port_dev[RX]);
1439 + if (err) {
1440 + netdev_err(net_dev, "fm_port_resume(RX) = %d\n", err);
1441 + goto resume_failed;
1442 + }
1443 +
1444 + if (priv->wol & DPAA_WOL_MAGIC) {
1445 + err = priv->mac_dev->set_wol(mac_dev->port_dev[RX],
1446 + priv->mac_dev->get_mac_handle(mac_dev), false);
1447 + if (err) {
1448 + netdev_err(net_dev, "set_wol() = %d\n", err);
1449 + goto resume_failed;
1450 + }
1451 + }
1452 + }
1453 +
1454 + return 0;
1455 +
1456 +resume_failed:
1457 + return err;
1458 +}
1459 +
1460 +static const struct dev_pm_ops dpaa_pm_ops = {
1461 + .suspend = dpaa_suspend,
1462 + .resume = dpaa_resume,
1463 +};
1464 +
1465 +#define DPAA_PM_OPS (&dpaa_pm_ops)
1466 +
1467 +#else /* CONFIG_PM */
1468 +
1469 +#define DPAA_PM_OPS NULL
1470 +
1471 +#endif /* CONFIG_PM */
1472 +
1473 +/* Checks whether the checksum field in Parse Results array is valid
1474 + * (equals 0xFFFF) and increments the .cse counter otherwise
1475 + */
1476 +static inline void
1477 +dpa_csum_validation(const struct dpa_priv_s *priv,
1478 + struct dpa_percpu_priv_s *percpu_priv,
1479 + const struct qm_fd *fd)
1480 +{
1481 + dma_addr_t addr = qm_fd_addr(fd);
1482 + struct dpa_bp *dpa_bp = priv->dpa_bp;
1483 + void *frm = phys_to_virt(addr);
1484 + fm_prs_result_t *parse_result;
1485 +
1486 + if (unlikely(!frm))
1487 + return;
1488 +
1489 + dma_sync_single_for_cpu(dpa_bp->dev, addr, DPA_RX_PRIV_DATA_SIZE +
1490 + DPA_PARSE_RESULTS_SIZE, DMA_BIDIRECTIONAL);
1491 +
1492 + parse_result = (fm_prs_result_t *)(frm + DPA_RX_PRIV_DATA_SIZE);
1493 +
1494 + if (parse_result->cksum != DPA_CSUM_VALID)
1495 + percpu_priv->rx_errors.cse++;
1496 +}
1497 +
1498 +static void _dpa_rx_error(struct net_device *net_dev,
1499 + const struct dpa_priv_s *priv,
1500 + struct dpa_percpu_priv_s *percpu_priv,
1501 + const struct qm_fd *fd,
1502 + u32 fqid)
1503 +{
1504 + /* limit common, possibly innocuous Rx FIFO Overflow errors'
1505 + * interference with zero-loss convergence benchmark results.
1506 + */
1507 + if (likely(fd->status & FM_FD_STAT_ERR_PHYSICAL))
1508 + pr_warn_once("fsl-dpa: non-zero error counters in fman statistics (sysfs)\n");
1509 + else
1510 + if (netif_msg_hw(priv) && net_ratelimit())
1511 + netdev_dbg(net_dev, "Err FD status = 0x%08x\n",
1512 + fd->status & FM_FD_STAT_RX_ERRORS);
1513 +#ifdef CONFIG_FSL_DPAA_HOOKS
1514 + if (dpaa_eth_hooks.rx_error &&
1515 + dpaa_eth_hooks.rx_error(net_dev, fd, fqid) == DPAA_ETH_STOLEN)
1516 + /* it's up to the hook to perform resource cleanup */
1517 + return;
1518 +#endif
1519 + percpu_priv->stats.rx_errors++;
1520 +
1521 + if (fd->status & FM_PORT_FRM_ERR_DMA)
1522 + percpu_priv->rx_errors.dme++;
1523 + if (fd->status & FM_PORT_FRM_ERR_PHYSICAL)
1524 + percpu_priv->rx_errors.fpe++;
1525 + if (fd->status & FM_PORT_FRM_ERR_SIZE)
1526 + percpu_priv->rx_errors.fse++;
1527 + if (fd->status & FM_PORT_FRM_ERR_PRS_HDR_ERR)
1528 + percpu_priv->rx_errors.phe++;
1529 + if (fd->status & FM_FD_STAT_L4CV)
1530 + dpa_csum_validation(priv, percpu_priv, fd);
1531 +
1532 + dpa_fd_release(net_dev, fd);
1533 +}
1534 +
1535 +static void _dpa_tx_error(struct net_device *net_dev,
1536 + const struct dpa_priv_s *priv,
1537 + struct dpa_percpu_priv_s *percpu_priv,
1538 + const struct qm_fd *fd,
1539 + u32 fqid)
1540 +{
1541 + struct sk_buff *skb;
1542 +
1543 + if (netif_msg_hw(priv) && net_ratelimit())
1544 + netdev_warn(net_dev, "FD status = 0x%08x\n",
1545 + fd->status & FM_FD_STAT_TX_ERRORS);
1546 +#ifdef CONFIG_FSL_DPAA_HOOKS
1547 + if (dpaa_eth_hooks.tx_error &&
1548 + dpaa_eth_hooks.tx_error(net_dev, fd, fqid) == DPAA_ETH_STOLEN)
1549 + /* now the hook must ensure proper cleanup */
1550 + return;
1551 +#endif
1552 + percpu_priv->stats.tx_errors++;
1553 +
1554 + /* If we intended the buffers from this frame to go into the bpools
1555 + * when the FMan transmit was done, we need to put it in manually.
1556 + */
1557 + if (fd->bpid != 0xff) {
1558 + dpa_fd_release(net_dev, fd);
1559 + return;
1560 + }
1561 +
1562 + skb = _dpa_cleanup_tx_fd(priv, fd);
1563 + dev_kfree_skb(skb);
1564 +}
1565 +
1566 +/* Helper function to factor out frame validation logic on all Rx paths. Its
1567 + * purpose is to extract from the Parse Results structure information about
1568 + * the integrity of the frame, its checksum, the length of the parsed headers
1569 + * and whether the frame is suitable for GRO.
1570 + *
1571 + * Assumes no parser errors, since any error frame is dropped before this
1572 + * function is called.
1573 + *
1574 + * @skb will have its ip_summed field overwritten;
1575 + * @use_gro will only be written with 0, if the frame is definitely not
1576 + * GRO-able; otherwise, it will be left unchanged;
1577 + * @hdr_size will be written with a safe value, at least the size of the
1578 + * headers' length.
1579 + */
1580 +void __hot _dpa_process_parse_results(const fm_prs_result_t *parse_results,
1581 + const struct qm_fd *fd,
1582 + struct sk_buff *skb, int *use_gro)
1583 +{
1584 + if (fd->status & FM_FD_STAT_L4CV) {
1585 + /* The parser has run and performed L4 checksum validation.
1586 + * We know there were no parser errors (and implicitly no
1587 + * L4 csum error), otherwise we wouldn't be here.
1588 + */
1589 + skb->ip_summed = CHECKSUM_UNNECESSARY;
1590 +
1591 + /* Don't go through GRO for certain types of traffic that
1592 + * we know are not GRO-able, such as dgram-based protocols.
1593 + * In the worst-case scenarios, such as small-pkt terminating
1594 + * UDP, the extra GRO processing would be overkill.
1595 + *
1596 + * The only protocol the Parser supports that is also GRO-able
1597 + * is currently TCP.
1598 + */
1599 + if (!fm_l4_frame_is_tcp(parse_results))
1600 + *use_gro = 0;
1601 +
1602 + return;
1603 + }
1604 +
1605 + /* We're here because either the parser didn't run or the L4 checksum
1606 + * was not verified. This may include the case of a UDP frame with
1607 + * checksum zero or an L4 proto other than TCP/UDP
1608 + */
1609 + skb->ip_summed = CHECKSUM_NONE;
1610 +
1611 + /* Bypass GRO for unknown traffic or if no PCDs are applied */
1612 + *use_gro = 0;
1613 +}
1614 +
1615 +int dpaa_eth_poll(struct napi_struct *napi, int budget)
1616 +{
1617 + struct dpa_napi_portal *np =
1618 + container_of(napi, struct dpa_napi_portal, napi);
1619 +
1620 + int cleaned = qman_p_poll_dqrr(np->p, budget);
1621 +
1622 + if (cleaned < budget) {
1623 + int tmp;
1624 + napi_complete(napi);
1625 + tmp = qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
1626 + DPA_BUG_ON(tmp);
1627 + }
1628 +
1629 + return cleaned;
1630 +}
1631 +EXPORT_SYMBOL(dpaa_eth_poll);
1632 +
1633 +static void __hot _dpa_tx_conf(struct net_device *net_dev,
1634 + const struct dpa_priv_s *priv,
1635 + struct dpa_percpu_priv_s *percpu_priv,
1636 + const struct qm_fd *fd,
1637 + u32 fqid)
1638 +{
1639 + struct sk_buff *skb;
1640 +
1641 + /* do we need the timestamp for the error frames? */
1642 +
1643 + if (unlikely(fd->status & FM_FD_STAT_TX_ERRORS) != 0) {
1644 + if (netif_msg_hw(priv) && net_ratelimit())
1645 + netdev_warn(net_dev, "FD status = 0x%08x\n",
1646 + fd->status & FM_FD_STAT_TX_ERRORS);
1647 +
1648 + percpu_priv->stats.tx_errors++;
1649 + }
1650 +
1651 + /* hopefully we need not get the timestamp before the hook */
1652 +#ifdef CONFIG_FSL_DPAA_HOOKS
1653 + if (dpaa_eth_hooks.tx_confirm && dpaa_eth_hooks.tx_confirm(net_dev,
1654 + fd, fqid) == DPAA_ETH_STOLEN)
1655 + /* it's the hook that must now perform cleanup */
1656 + return;
1657 +#endif
1658 + /* This might not perfectly reflect the reality, if the core dequeuing
1659 + * the Tx confirmation is different from the one that did the enqueue,
1660 + * but at least it'll show up in the total count.
1661 + */
1662 + percpu_priv->tx_confirm++;
1663 +
1664 + skb = _dpa_cleanup_tx_fd(priv, fd);
1665 +
1666 + dev_kfree_skb(skb);
1667 +}
1668 +
1669 +enum qman_cb_dqrr_result
1670 +priv_rx_error_dqrr(struct qman_portal *portal,
1671 + struct qman_fq *fq,
1672 + const struct qm_dqrr_entry *dq)
1673 +{
1674 + struct net_device *net_dev;
1675 + struct dpa_priv_s *priv;
1676 + struct dpa_percpu_priv_s *percpu_priv;
1677 + int *count_ptr;
1678 +
1679 + net_dev = ((struct dpa_fq *)fq)->net_dev;
1680 + priv = netdev_priv(net_dev);
1681 +
1682 + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
1683 + count_ptr = raw_cpu_ptr(priv->dpa_bp->percpu_count);
1684 +
1685 + if (dpaa_eth_napi_schedule(percpu_priv, portal))
1686 + return qman_cb_dqrr_stop;
1687 +
1688 + if (unlikely(dpaa_eth_refill_bpools(priv->dpa_bp, count_ptr)))
1689 + /* Unable to refill the buffer pool due to insufficient
1690 + * system memory. Just release the frame back into the pool,
1691 + * otherwise we'll soon end up with an empty buffer pool.
1692 + */
1693 + dpa_fd_release(net_dev, &dq->fd);
1694 + else
1695 + _dpa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
1696 +
1697 + return qman_cb_dqrr_consume;
1698 +}
1699 +
1700 +
1701 +enum qman_cb_dqrr_result __hot
1702 +priv_rx_default_dqrr(struct qman_portal *portal,
1703 + struct qman_fq *fq,
1704 + const struct qm_dqrr_entry *dq)
1705 +{
1706 + struct net_device *net_dev;
1707 + struct dpa_priv_s *priv;
1708 + struct dpa_percpu_priv_s *percpu_priv;
1709 + int *count_ptr;
1710 + struct dpa_bp *dpa_bp;
1711 +
1712 + net_dev = ((struct dpa_fq *)fq)->net_dev;
1713 + priv = netdev_priv(net_dev);
1714 + dpa_bp = priv->dpa_bp;
1715 +
1716 + /* Trace the Rx fd */
1717 + trace_dpa_rx_fd(net_dev, fq, &dq->fd);
1718 +
1719 + /* IRQ handler, non-migratable; safe to use raw_cpu_ptr here */
1720 + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
1721 + count_ptr = raw_cpu_ptr(dpa_bp->percpu_count);
1722 +
1723 + if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal)))
1724 + return qman_cb_dqrr_stop;
1725 +
1726 + /* Vale of plenty: make sure we didn't run out of buffers */
1727 +
1728 + if (unlikely(dpaa_eth_refill_bpools(dpa_bp, count_ptr)))
1729 + /* Unable to refill the buffer pool due to insufficient
1730 + * system memory. Just release the frame back into the pool,
1731 + * otherwise we'll soon end up with an empty buffer pool.
1732 + */
1733 + dpa_fd_release(net_dev, &dq->fd);
1734 + else
1735 + _dpa_rx(net_dev, portal, priv, percpu_priv, &dq->fd, fq->fqid,
1736 + count_ptr);
1737 +
1738 + return qman_cb_dqrr_consume;
1739 +}
1740 +
1741 +enum qman_cb_dqrr_result
1742 +priv_tx_conf_error_dqrr(struct qman_portal *portal,
1743 + struct qman_fq *fq,
1744 + const struct qm_dqrr_entry *dq)
1745 +{
1746 + struct net_device *net_dev;
1747 + struct dpa_priv_s *priv;
1748 + struct dpa_percpu_priv_s *percpu_priv;
1749 +
1750 + net_dev = ((struct dpa_fq *)fq)->net_dev;
1751 + priv = netdev_priv(net_dev);
1752 +
1753 + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
1754 +
1755 + if (dpaa_eth_napi_schedule(percpu_priv, portal))
1756 + return qman_cb_dqrr_stop;
1757 +
1758 + _dpa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
1759 +
1760 + return qman_cb_dqrr_consume;
1761 +}
1762 +
1763 +enum qman_cb_dqrr_result __hot
1764 +priv_tx_conf_default_dqrr(struct qman_portal *portal,
1765 + struct qman_fq *fq,
1766 + const struct qm_dqrr_entry *dq)
1767 +{
1768 + struct net_device *net_dev;
1769 + struct dpa_priv_s *priv;
1770 + struct dpa_percpu_priv_s *percpu_priv;
1771 +
1772 + net_dev = ((struct dpa_fq *)fq)->net_dev;
1773 + priv = netdev_priv(net_dev);
1774 +
1775 + /* Trace the fd */
1776 + trace_dpa_tx_conf_fd(net_dev, fq, &dq->fd);
1777 +
1778 + /* Non-migratable context, safe to use raw_cpu_ptr */
1779 + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
1780 +
1781 + if (dpaa_eth_napi_schedule(percpu_priv, portal))
1782 + return qman_cb_dqrr_stop;
1783 +
1784 + _dpa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
1785 +
1786 + return qman_cb_dqrr_consume;
1787 +}
1788 +
1789 +void priv_ern(struct qman_portal *portal,
1790 + struct qman_fq *fq,
1791 + const struct qm_mr_entry *msg)
1792 +{
1793 + struct net_device *net_dev;
1794 + const struct dpa_priv_s *priv;
1795 + struct sk_buff *skb;
1796 + struct dpa_percpu_priv_s *percpu_priv;
1797 + struct qm_fd fd = msg->ern.fd;
1798 +
1799 + net_dev = ((struct dpa_fq *)fq)->net_dev;
1800 + priv = netdev_priv(net_dev);
1801 + /* Non-migratable context, safe to use raw_cpu_ptr */
1802 + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
1803 +
1804 + percpu_priv->stats.tx_dropped++;
1805 + percpu_priv->stats.tx_fifo_errors++;
1806 + count_ern(percpu_priv, msg);
1807 +
1808 + /* If we intended this buffer to go into the pool
1809 + * when the FM was done, we need to put it in
1810 + * manually.
1811 + */
1812 + if (msg->ern.fd.bpid != 0xff) {
1813 + dpa_fd_release(net_dev, &fd);
1814 + return;
1815 + }
1816 +
1817 + skb = _dpa_cleanup_tx_fd(priv, &fd);
1818 + dev_kfree_skb_any(skb);
1819 +}
1820 +
1821 +const struct dpa_fq_cbs_t private_fq_cbs = {
1822 + .rx_defq = { .cb = { .dqrr = priv_rx_default_dqrr } },
1823 + .tx_defq = { .cb = { .dqrr = priv_tx_conf_default_dqrr } },
1824 + .rx_errq = { .cb = { .dqrr = priv_rx_error_dqrr } },
1825 + .tx_errq = { .cb = { .dqrr = priv_tx_conf_error_dqrr } },
1826 + .egress_ern = { .cb = { .ern = priv_ern } }
1827 +};
1828 +EXPORT_SYMBOL(private_fq_cbs);
1829 +
1830 +static void dpaa_eth_napi_enable(struct dpa_priv_s *priv)
1831 +{
1832 + struct dpa_percpu_priv_s *percpu_priv;
1833 + int i, j;
1834 +
1835 + for_each_possible_cpu(i) {
1836 + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
1837 +
1838 + for (j = 0; j < qman_portal_max; j++)
1839 + napi_enable(&percpu_priv->np[j].napi);
1840 + }
1841 +}
1842 +
1843 +static void dpaa_eth_napi_disable(struct dpa_priv_s *priv)
1844 +{
1845 + struct dpa_percpu_priv_s *percpu_priv;
1846 + int i, j;
1847 +
1848 + for_each_possible_cpu(i) {
1849 + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
1850 +
1851 + for (j = 0; j < qman_portal_max; j++)
1852 + napi_disable(&percpu_priv->np[j].napi);
1853 + }
1854 +}
1855 +
1856 +static int __cold dpa_eth_priv_start(struct net_device *net_dev)
1857 +{
1858 + int err;
1859 + struct dpa_priv_s *priv;
1860 +
1861 + priv = netdev_priv(net_dev);
1862 +
1863 + dpaa_eth_napi_enable(priv);
1864 +
1865 + err = dpa_start(net_dev);
1866 + if (err < 0)
1867 + dpaa_eth_napi_disable(priv);
1868 +
1869 + return err;
1870 +}
1871 +
1872 +
1873 +
1874 +static int __cold dpa_eth_priv_stop(struct net_device *net_dev)
1875 +{
1876 + int _errno;
1877 + struct dpa_priv_s *priv;
1878 +
1879 + _errno = dpa_stop(net_dev);
1880 + /* Allow NAPI to consume any frame still in the Rx/TxConfirm
1881 + * ingress queues. This is to avoid a race between the current
1882 + * context and ksoftirqd which could leave NAPI disabled while
1883 + * in fact there's still Rx traffic to be processed.
1884 + */
1885 + usleep_range(5000, 10000);
1886 +
1887 + priv = netdev_priv(net_dev);
1888 + dpaa_eth_napi_disable(priv);
1889 +
1890 + return _errno;
1891 +}
1892 +
1893 +#ifdef CONFIG_NET_POLL_CONTROLLER
1894 +static void dpaa_eth_poll_controller(struct net_device *net_dev)
1895 +{
1896 + struct dpa_priv_s *priv = netdev_priv(net_dev);
1897 + struct dpa_percpu_priv_s *percpu_priv =
1898 + raw_cpu_ptr(priv->percpu_priv);
1899 + struct qman_portal *p;
1900 + const struct qman_portal_config *pc;
1901 + struct dpa_napi_portal *np;
1902 +
1903 + p = (struct qman_portal *)qman_get_affine_portal(smp_processor_id());
1904 + pc = qman_p_get_portal_config(p);
1905 + np = &percpu_priv->np[pc->index];
1906 +
1907 + qman_p_irqsource_remove(np->p, QM_PIRQ_DQRI);
1908 + qman_p_poll_dqrr(np->p, np->napi.weight);
1909 + qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
1910 +}
1911 +#endif
1912 +
1913 +static const struct net_device_ops dpa_private_ops = {
1914 + .ndo_open = dpa_eth_priv_start,
1915 + .ndo_start_xmit = dpa_tx,
1916 + .ndo_stop = dpa_eth_priv_stop,
1917 + .ndo_tx_timeout = dpa_timeout,
1918 + .ndo_get_stats64 = dpa_get_stats64,
1919 + .ndo_set_mac_address = dpa_set_mac_address,
1920 + .ndo_validate_addr = eth_validate_addr,
1921 +#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
1922 + .ndo_select_queue = dpa_select_queue,
1923 +#endif
1924 + .ndo_change_mtu = dpa_change_mtu,
1925 + .ndo_set_rx_mode = dpa_set_rx_mode,
1926 + .ndo_init = dpa_ndo_init,
1927 + .ndo_set_features = dpa_set_features,
1928 + .ndo_fix_features = dpa_fix_features,
1929 + .ndo_do_ioctl = dpa_ioctl,
1930 +#ifdef CONFIG_NET_POLL_CONTROLLER
1931 + .ndo_poll_controller = dpaa_eth_poll_controller,
1932 +#endif
1933 +};
1934 +
1935 +static int dpa_private_napi_add(struct net_device *net_dev)
1936 +{
1937 + struct dpa_priv_s *priv = netdev_priv(net_dev);
1938 + struct dpa_percpu_priv_s *percpu_priv;
1939 + int i, cpu;
1940 +
1941 + for_each_possible_cpu(cpu) {
1942 + percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
1943 +
1944 + percpu_priv->np = devm_kzalloc(net_dev->dev.parent,
1945 + qman_portal_max * sizeof(struct dpa_napi_portal),
1946 + GFP_KERNEL);
1947 +
1948 + if (unlikely(percpu_priv->np == NULL)) {
1949 + dev_err(net_dev->dev.parent, "devm_kzalloc() failed\n");
1950 + return -ENOMEM;
1951 + }
1952 +
1953 + for (i = 0; i < qman_portal_max; i++)
1954 + netif_napi_add(net_dev, &percpu_priv->np[i].napi,
1955 + dpaa_eth_poll, DPA_NAPI_WEIGHT);
1956 + }
1957 +
1958 + return 0;
1959 +}
1960 +
1961 +void dpa_private_napi_del(struct net_device *net_dev)
1962 +{
1963 + struct dpa_priv_s *priv = netdev_priv(net_dev);
1964 + struct dpa_percpu_priv_s *percpu_priv;
1965 + int i, cpu;
1966 +
1967 + for_each_possible_cpu(cpu) {
1968 + percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
1969 +
1970 + if (percpu_priv->np) {
1971 + for (i = 0; i < qman_portal_max; i++)
1972 + netif_napi_del(&percpu_priv->np[i].napi);
1973 +
1974 + devm_kfree(net_dev->dev.parent, percpu_priv->np);
1975 + }
1976 + }
1977 +}
1978 +EXPORT_SYMBOL(dpa_private_napi_del);
1979 +
1980 +static int dpa_private_netdev_init(struct net_device *net_dev)
1981 +{
1982 + int i;
1983 + struct dpa_priv_s *priv = netdev_priv(net_dev);
1984 + struct dpa_percpu_priv_s *percpu_priv;
1985 + const uint8_t *mac_addr;
1986 +
1987 + /* Although we access another CPU's private data here
1988 + * we do it at initialization so it is safe
1989 + */
1990 + for_each_possible_cpu(i) {
1991 + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
1992 + percpu_priv->net_dev = net_dev;
1993 + }
1994 +
1995 + net_dev->netdev_ops = &dpa_private_ops;
1996 + mac_addr = priv->mac_dev->addr;
1997 +
1998 + net_dev->mem_start = priv->mac_dev->res->start;
1999 + net_dev->mem_end = priv->mac_dev->res->end;
2000 +
2001 + net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2002 + NETIF_F_LLTX);
2003 +
2004 + /* Advertise S/G and HIGHDMA support for private interfaces */
2005 + net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA;
2006 + /* Recent kernels enable GSO automatically, if
2007 + * we declare NETIF_F_SG. For conformity, we'll
2008 + * still declare GSO explicitly.
2009 + */
2010 + net_dev->features |= NETIF_F_GSO;
2011 +
2012 + /* Advertise GRO support */
2013 + net_dev->features |= NETIF_F_GRO;
2014 +
2015 + return dpa_netdev_init(net_dev, mac_addr, tx_timeout);
2016 +}
2017 +
2018 +static struct dpa_bp * __cold
2019 +dpa_priv_bp_probe(struct device *dev)
2020 +{
2021 + struct dpa_bp *dpa_bp;
2022 +
2023 + dpa_bp = devm_kzalloc(dev, sizeof(*dpa_bp), GFP_KERNEL);
2024 + if (unlikely(dpa_bp == NULL)) {
2025 + dev_err(dev, "devm_kzalloc() failed\n");
2026 + return ERR_PTR(-ENOMEM);
2027 + }
2028 +
2029 + dpa_bp->percpu_count = devm_alloc_percpu(dev, *dpa_bp->percpu_count);
2030 + dpa_bp->target_count = CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT;
2031 +
2032 + dpa_bp->seed_cb = dpa_bp_priv_seed;
2033 + dpa_bp->free_buf_cb = _dpa_bp_free_pf;
2034 +
2035 + return dpa_bp;
2036 +}
2037 +
2038 +/* Place all ingress FQs (Rx Default, Rx Error, PCD FQs) in a dedicated CGR.
2039 + * We won't be sending congestion notifications to FMan; for now, we just use
2040 + * this CGR to generate enqueue rejections to FMan in order to drop the frames
2041 + * before they reach our ingress queues and eat up memory.
2042 + */
2043 +static int dpaa_eth_priv_ingress_cgr_init(struct dpa_priv_s *priv)
2044 +{
2045 + struct qm_mcc_initcgr initcgr;
2046 + u32 cs_th;
2047 + int err;
2048 +
2049 + err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid);
2050 + if (err < 0) {
2051 + pr_err("Error %d allocating CGR ID\n", err);
2052 + goto out_error;
2053 + }
2054 +
2055 + /* Enable CS TD, but disable Congestion State Change Notifications. */
2056 + initcgr.we_mask = QM_CGR_WE_CS_THRES;
2057 + initcgr.cgr.cscn_en = QM_CGR_EN;
2058 + cs_th = CONFIG_FSL_DPAA_INGRESS_CS_THRESHOLD;
2059 + qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
2060 +
2061 + initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
2062 + initcgr.cgr.cstd_en = QM_CGR_EN;
2063 +
2064 + /* This is actually a hack, because this CGR will be associated with
2065 + * our affine SWP. However, we'll place our ingress FQs in it.
2066 + */
2067 + err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT,
2068 + &initcgr);
2069 + if (err < 0) {
2070 + pr_err("Error %d creating ingress CGR with ID %d\n", err,
2071 + priv->ingress_cgr.cgrid);
2072 + qman_release_cgrid(priv->ingress_cgr.cgrid);
2073 + goto out_error;
2074 + }
2075 + pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n",
2076 + priv->ingress_cgr.cgrid, priv->mac_dev->addr);
2077 +
2078 + /* struct qman_cgr allows special cgrid values (i.e. outside the 0..255
2079 + * range), but we have no common initialization path between the
2080 + * different variants of the DPAA Eth driver, so we do it here rather
2081 + * than modifying every other variant than "private Eth".
2082 + */
2083 + priv->use_ingress_cgr = true;
2084 +
2085 +out_error:
2086 + return err;
2087 +}
2088 +
2089 +static int dpa_priv_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
2090 + size_t count)
2091 +{
2092 + struct dpa_priv_s *priv = netdev_priv(net_dev);
2093 + int i;
2094 +
2095 + if (netif_msg_probe(priv))
2096 + dev_dbg(net_dev->dev.parent,
2097 + "Using private BM buffer pools\n");
2098 +
2099 + priv->bp_count = count;
2100 +
2101 + for (i = 0; i < count; i++) {
2102 + int err;
2103 + err = dpa_bp_alloc(&dpa_bp[i]);
2104 + if (err < 0) {
2105 + dpa_bp_free(priv);
2106 + priv->dpa_bp = NULL;
2107 + return err;
2108 + }
2109 +
2110 + priv->dpa_bp = &dpa_bp[i];
2111 + }
2112 +
2113 + dpa_priv_common_bpid = priv->dpa_bp->bpid;
2114 + return 0;
2115 +}
2116 +
2117 +static const struct of_device_id dpa_match[];
2118 +
2119 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
2120 +static int dpa_new_loop_id(void)
2121 +{
2122 + static int if_id;
2123 +
2124 + return if_id++;
2125 +}
2126 +#endif
2127 +
2128 +static int
2129 +dpaa_eth_priv_probe(struct platform_device *_of_dev)
2130 +{
2131 + int err = 0, i, channel;
2132 + struct device *dev;
2133 + struct device_node *dpa_node;
2134 + struct dpa_bp *dpa_bp;
2135 + size_t count = 1;
2136 + struct net_device *net_dev = NULL;
2137 + struct dpa_priv_s *priv = NULL;
2138 + struct dpa_percpu_priv_s *percpu_priv;
2139 + struct fm_port_fqs port_fqs;
2140 + struct dpa_buffer_layout_s *buf_layout = NULL;
2141 + struct mac_device *mac_dev;
2142 +
2143 + dev = &_of_dev->dev;
2144 +
2145 + dpa_node = dev->of_node;
2146 +
2147 + if (!of_device_is_available(dpa_node))
2148 + return -ENODEV;
2149 +
2150 + /* Get the buffer pools assigned to this interface;
2151 + * run only once the default pool probing code
2152 + */
2153 + dpa_bp = (dpa_bpid2pool(dpa_priv_common_bpid)) ? :
2154 + dpa_priv_bp_probe(dev);
2155 + if (IS_ERR(dpa_bp))
2156 + return PTR_ERR(dpa_bp);
2157 +
2158 + /* Allocate this early, so we can store relevant information in
2159 + * the private area (needed by 1588 code in dpa_mac_probe)
2160 + */
2161 + net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES);
2162 + if (!net_dev) {
2163 + dev_err(dev, "alloc_etherdev_mq() failed\n");
2164 + goto alloc_etherdev_mq_failed;
2165 + }
2166 +
2167 + /* Do this here, so we can be verbose early */
2168 + SET_NETDEV_DEV(net_dev, dev);
2169 + dev_set_drvdata(dev, net_dev);
2170 +
2171 + priv = netdev_priv(net_dev);
2172 + priv->net_dev = net_dev;
2173 + strcpy(priv->if_type, "private");
2174 +
2175 + priv->msg_enable = netif_msg_init(debug, -1);
2176 +
2177 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
2178 + priv->loop_id = dpa_new_loop_id();
2179 + priv->loop_to = -1; /* disabled by default */
2180 + dpa_loop_netdevs[priv->loop_id] = net_dev;
2181 +#endif
2182 +
2183 + mac_dev = dpa_mac_probe(_of_dev);
2184 + if (IS_ERR(mac_dev) || !mac_dev) {
2185 + err = PTR_ERR(mac_dev);
2186 + goto mac_probe_failed;
2187 + }
2188 +
2189 + /* We have physical ports, so we need to establish
2190 + * the buffer layout.
2191 + */
2192 + buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout),
2193 + GFP_KERNEL);
2194 + if (!buf_layout) {
2195 + dev_err(dev, "devm_kzalloc() failed\n");
2196 + goto alloc_failed;
2197 + }
2198 + dpa_set_buffers_layout(mac_dev, buf_layout);
2199 +
2200 + /* For private ports, need to compute the size of the default
2201 + * buffer pool, based on FMan port buffer layout;also update
2202 + * the maximum buffer size for private ports if necessary
2203 + */
2204 + dpa_bp->size = dpa_bp_size(&buf_layout[RX]);
2205 +
2206 +#ifdef CONFIG_FSL_DPAA_ETH_JUMBO_FRAME
2207 + /* We only want to use jumbo frame optimization if we actually have
2208 + * L2 MAX FRM set for jumbo frames as well.
2209 + */
2210 +#ifndef CONFIG_PPC
2211 + if (likely(!dpaa_errata_a010022))
2212 +#endif
2213 + if(fm_get_max_frm() < 9600)
2214 + dev_warn(dev,
2215 + "Invalid configuration: if jumbo frames support is on, FSL_FM_MAX_FRAME_SIZE should be set to 9600\n");
2216 +#endif
2217 +
2218 + INIT_LIST_HEAD(&priv->dpa_fq_list);
2219 +
2220 + memset(&port_fqs, 0, sizeof(port_fqs));
2221 +
2222 + err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list, &port_fqs, true, RX);
2223 + if (!err)
2224 + err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list,
2225 + &port_fqs, true, TX);
2226 +
2227 + if (err < 0)
2228 + goto fq_probe_failed;
2229 +
2230 + /* bp init */
2231 +
2232 + err = dpa_priv_bp_create(net_dev, dpa_bp, count);
2233 +
2234 + if (err < 0)
2235 + goto bp_create_failed;
2236 +
2237 + priv->mac_dev = mac_dev;
2238 +
2239 + channel = dpa_get_channel();
2240 +
2241 + if (channel < 0) {
2242 + err = channel;
2243 + goto get_channel_failed;
2244 + }
2245 +
2246 + priv->channel = (uint16_t)channel;
2247 + dpaa_eth_add_channel(priv->channel);
2248 +
2249 + dpa_fq_setup(priv, &private_fq_cbs, priv->mac_dev->port_dev[TX]);
2250 +
2251 + /* Create a congestion group for this netdev, with
2252 + * dynamically-allocated CGR ID.
2253 + * Must be executed after probing the MAC, but before
2254 + * assigning the egress FQs to the CGRs.
2255 + */
2256 + err = dpaa_eth_cgr_init(priv);
2257 + if (err < 0) {
2258 + dev_err(dev, "Error initializing CGR\n");
2259 + goto tx_cgr_init_failed;
2260 + }
2261 + err = dpaa_eth_priv_ingress_cgr_init(priv);
2262 + if (err < 0) {
2263 + dev_err(dev, "Error initializing ingress CGR\n");
2264 + goto rx_cgr_init_failed;
2265 + }
2266 +
2267 + /* Add the FQs to the interface, and make them active */
2268 + err = dpa_fqs_init(dev, &priv->dpa_fq_list, false);
2269 + if (err < 0)
2270 + goto fq_alloc_failed;
2271 +
2272 + priv->buf_layout = buf_layout;
2273 + priv->tx_headroom = dpa_get_headroom(&priv->buf_layout[TX]);
2274 + priv->rx_headroom = dpa_get_headroom(&priv->buf_layout[RX]);
2275 +
2276 + /* All real interfaces need their ports initialized */
2277 + dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs,
2278 + buf_layout, dev);
2279 +
2280 +#ifdef CONFIG_FMAN_PFC
2281 + for (i = 0; i < CONFIG_FMAN_PFC_COS_COUNT; i++) {
2282 + err = fm_port_set_pfc_priorities_mapping_to_qman_wq(
2283 + mac_dev->port_dev[TX], i, i);
2284 + if (unlikely(err != 0)) {
2285 + dev_err(dev, "Error maping PFC %u to WQ %u\n", i, i);
2286 + goto pfc_mapping_failed;
2287 + }
2288 + }
2289 +#endif
2290 +
2291 + priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
2292 +
2293 + if (priv->percpu_priv == NULL) {
2294 + dev_err(dev, "devm_alloc_percpu() failed\n");
2295 + err = -ENOMEM;
2296 + goto alloc_percpu_failed;
2297 + }
2298 + for_each_possible_cpu(i) {
2299 + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
2300 + memset(percpu_priv, 0, sizeof(*percpu_priv));
2301 + }
2302 +
2303 + /* Initialize NAPI */
2304 + err = dpa_private_napi_add(net_dev);
2305 +
2306 + if (err < 0)
2307 + goto napi_add_failed;
2308 +
2309 + err = dpa_private_netdev_init(net_dev);
2310 +
2311 + if (err < 0)
2312 + goto netdev_init_failed;
2313 +
2314 + dpaa_eth_sysfs_init(&net_dev->dev);
2315 +
2316 +#ifdef CONFIG_PM
2317 + device_set_wakeup_capable(dev, true);
2318 +#endif
2319 +
2320 + pr_info("fsl_dpa: Probed interface %s\n", net_dev->name);
2321 +
2322 + return 0;
2323 +
2324 +netdev_init_failed:
2325 +napi_add_failed:
2326 + dpa_private_napi_del(net_dev);
2327 +alloc_percpu_failed:
2328 +#ifdef CONFIG_FMAN_PFC
2329 +pfc_mapping_failed:
2330 +#endif
2331 + dpa_fq_free(dev, &priv->dpa_fq_list);
2332 +fq_alloc_failed:
2333 + qman_delete_cgr_safe(&priv->ingress_cgr);
2334 + qman_release_cgrid(priv->ingress_cgr.cgrid);
2335 +rx_cgr_init_failed:
2336 + qman_delete_cgr_safe(&priv->cgr_data.cgr);
2337 + qman_release_cgrid(priv->cgr_data.cgr.cgrid);
2338 +tx_cgr_init_failed:
2339 +get_channel_failed:
2340 + dpa_bp_free(priv);
2341 +bp_create_failed:
2342 +fq_probe_failed:
2343 +alloc_failed:
2344 +mac_probe_failed:
2345 + dev_set_drvdata(dev, NULL);
2346 + free_netdev(net_dev);
2347 +alloc_etherdev_mq_failed:
2348 + if (atomic_read(&dpa_bp->refs) == 0)
2349 + devm_kfree(dev, dpa_bp);
2350 +
2351 + return err;
2352 +}
2353 +
2354 +static const struct of_device_id dpa_match[] = {
2355 + {
2356 + .compatible = "fsl,dpa-ethernet"
2357 + },
2358 + {}
2359 +};
2360 +MODULE_DEVICE_TABLE(of, dpa_match);
2361 +
2362 +static struct platform_driver dpa_driver = {
2363 + .driver = {
2364 + .name = KBUILD_MODNAME,
2365 + .of_match_table = dpa_match,
2366 + .owner = THIS_MODULE,
2367 + .pm = DPAA_PM_OPS,
2368 + },
2369 + .probe = dpaa_eth_priv_probe,
2370 + .remove = dpa_remove
2371 +};
2372 +
2373 +#ifndef CONFIG_PPC
2374 +static bool __init __cold soc_has_errata_a010022(void)
2375 +{
2376 +#ifdef CONFIG_SOC_BUS
2377 + const struct soc_device_attribute soc_msi_matches[] = {
2378 + { .family = "QorIQ LS1043A",
2379 + .data = NULL },
2380 + { },
2381 + };
2382 +
2383 + if (soc_device_match(soc_msi_matches))
2384 + return true;
2385 +
2386 + return false;
2387 +#else
2388 + return true; /* cannot identify SoC */
2389 +#endif
2390 +}
2391 +#endif
2392 +
2393 +static int __init __cold dpa_load(void)
2394 +{
2395 + int _errno;
2396 +
2397 + pr_info(DPA_DESCRIPTION "\n");
2398 +
2399 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
2400 + dpa_debugfs_module_init();
2401 +#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
2402 +
2403 + /* initialise dpaa_eth mirror values */
2404 + dpa_rx_extra_headroom = fm_get_rx_extra_headroom();
2405 + dpa_max_frm = fm_get_max_frm();
2406 + dpa_num_cpus = num_possible_cpus();
2407 +
2408 +#ifndef CONFIG_PPC
2409 + /* Detect if the current SoC requires the 4K alignment workaround */
2410 + dpaa_errata_a010022 = soc_has_errata_a010022();
2411 +#endif
2412 +
2413 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
2414 + memset(dpa_loop_netdevs, 0, sizeof(dpa_loop_netdevs));
2415 +#endif
2416 +
2417 + _errno = platform_driver_register(&dpa_driver);
2418 + if (unlikely(_errno < 0)) {
2419 + pr_err(KBUILD_MODNAME
2420 + ": %s:%hu:%s(): platform_driver_register() = %d\n",
2421 + KBUILD_BASENAME".c", __LINE__, __func__, _errno);
2422 + }
2423 +
2424 + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
2425 + KBUILD_BASENAME".c", __func__);
2426 +
2427 + return _errno;
2428 +}
2429 +module_init(dpa_load);
2430 +
2431 +static void __exit __cold dpa_unload(void)
2432 +{
2433 + pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
2434 + KBUILD_BASENAME".c", __func__);
2435 +
2436 + platform_driver_unregister(&dpa_driver);
2437 +
2438 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
2439 + dpa_debugfs_module_exit();
2440 +#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
2441 +
2442 + /* Only one channel is used and needs to be relased after all
2443 + * interfaces are removed
2444 + */
2445 + dpa_release_channel();
2446 +
2447 + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
2448 + KBUILD_BASENAME".c", __func__);
2449 +}
2450 +module_exit(dpa_unload);
2451 --- /dev/null
2452 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h
2453 @@ -0,0 +1,697 @@
2454 +/* Copyright 2008-2012 Freescale Semiconductor Inc.
2455 + *
2456 + * Redistribution and use in source and binary forms, with or without
2457 + * modification, are permitted provided that the following conditions are met:
2458 + * * Redistributions of source code must retain the above copyright
2459 + * notice, this list of conditions and the following disclaimer.
2460 + * * Redistributions in binary form must reproduce the above copyright
2461 + * notice, this list of conditions and the following disclaimer in the
2462 + * documentation and/or other materials provided with the distribution.
2463 + * * Neither the name of Freescale Semiconductor nor the
2464 + * names of its contributors may be used to endorse or promote products
2465 + * derived from this software without specific prior written permission.
2466 + *
2467 + *
2468 + * ALTERNATIVELY, this software may be distributed under the terms of the
2469 + * GNU General Public License ("GPL") as published by the Free Software
2470 + * Foundation, either version 2 of that License or (at your option) any
2471 + * later version.
2472 + *
2473 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
2474 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
2475 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
2476 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
2477 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
2478 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
2479 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
2480 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2481 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
2482 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2483 + */
2484 +
2485 +#ifndef __DPA_H
2486 +#define __DPA_H
2487 +
2488 +#include <linux/netdevice.h>
2489 +#include <linux/fsl_qman.h> /* struct qman_fq */
2490 +
2491 +#include "fm_ext.h"
2492 +#include "dpaa_eth_trace.h"
2493 +
2494 +extern int dpa_rx_extra_headroom;
2495 +extern int dpa_max_frm;
2496 +extern int dpa_num_cpus;
2497 +
2498 +#define dpa_get_rx_extra_headroom() dpa_rx_extra_headroom
2499 +#define dpa_get_max_frm() dpa_max_frm
2500 +
2501 +#define dpa_get_max_mtu() \
2502 + (dpa_get_max_frm() - (VLAN_ETH_HLEN + ETH_FCS_LEN))
2503 +
2504 +#define __hot
2505 +
2506 +/* Simple enum of FQ types - used for array indexing */
2507 +enum port_type {RX, TX};
2508 +
2509 +/* TODO: This structure should be renamed & moved to the FMD wrapper */
2510 +struct dpa_buffer_layout_s {
2511 + uint16_t priv_data_size;
2512 + bool parse_results;
2513 + bool time_stamp;
2514 + bool hash_results;
2515 + uint8_t manip_extra_space;
2516 + uint16_t data_align;
2517 +};
2518 +
2519 +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
2520 +#define DPA_BUG_ON(cond) BUG_ON(cond)
2521 +#else
2522 +#define DPA_BUG_ON(cond)
2523 +#endif
2524 +
2525 +#define DPA_TX_PRIV_DATA_SIZE 16
2526 +#define DPA_PARSE_RESULTS_SIZE sizeof(fm_prs_result_t)
2527 +#define DPA_TIME_STAMP_SIZE 8
2528 +#define DPA_HASH_RESULTS_SIZE 8
2529 +#define DPA_RX_PRIV_DATA_SIZE (DPA_TX_PRIV_DATA_SIZE + \
2530 + dpa_get_rx_extra_headroom())
2531 +
2532 +#define FM_FD_STAT_RX_ERRORS \
2533 + (FM_PORT_FRM_ERR_DMA | FM_PORT_FRM_ERR_PHYSICAL | \
2534 + FM_PORT_FRM_ERR_SIZE | FM_PORT_FRM_ERR_CLS_DISCARD | \
2535 + FM_PORT_FRM_ERR_EXTRACTION | FM_PORT_FRM_ERR_NO_SCHEME | \
2536 + FM_PORT_FRM_ERR_ILL_PLCR | FM_PORT_FRM_ERR_PRS_TIMEOUT | \
2537 + FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT | FM_PORT_FRM_ERR_PRS_HDR_ERR)
2538 +
2539 +#define FM_FD_STAT_TX_ERRORS \
2540 + (FM_PORT_FRM_ERR_UNSUPPORTED_FORMAT | \
2541 + FM_PORT_FRM_ERR_LENGTH | FM_PORT_FRM_ERR_DMA)
2542 +
2543 +#ifndef CONFIG_FSL_DPAA_ETH_JUMBO_FRAME
2544 +/* The raw buffer size must be cacheline aligned.
2545 + * Normally we use 2K buffers.
2546 + */
2547 +#define DPA_BP_RAW_SIZE 2048
2548 +#else
2549 +/* For jumbo frame optimizations, use buffers large enough to accommodate
2550 + * 9.6K frames, FD maximum offset, skb sh_info overhead and some extra
2551 + * space to account for further alignments.
2552 + */
2553 +#define DPA_MAX_FRM_SIZE 9600
2554 +#ifdef CONFIG_PPC
2555 +#define DPA_BP_RAW_SIZE \
2556 + ((DPA_MAX_FRM_SIZE + DPA_MAX_FD_OFFSET + \
2557 + sizeof(struct skb_shared_info) + 128) & ~(SMP_CACHE_BYTES - 1))
2558 +#else /* CONFIG_PPC */
2559 +#define DPA_BP_RAW_SIZE ((unlikely(dpaa_errata_a010022)) ? 2048 : \
2560 + ((DPA_MAX_FRM_SIZE + DPA_MAX_FD_OFFSET + \
2561 + sizeof(struct skb_shared_info) + 128) & ~(SMP_CACHE_BYTES - 1)))
2562 +#endif /* CONFIG_PPC */
2563 +#endif /* CONFIG_FSL_DPAA_ETH_JUMBO_FRAME */
2564 +
2565 +/* This is what FMan is ever allowed to use.
2566 + * FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is
2567 + * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that,
2568 + * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us
2569 + * half-page-aligned buffers (can we?), so we reserve some more space
2570 + * for start-of-buffer alignment.
2571 + */
2572 +#define dpa_bp_size(buffer_layout) (SKB_WITH_OVERHEAD(DPA_BP_RAW_SIZE) - \
2573 + SMP_CACHE_BYTES)
2574 +/* We must ensure that skb_shinfo is always cacheline-aligned. */
2575 +#define DPA_SKB_SIZE(size) ((size) & ~(SMP_CACHE_BYTES - 1))
2576 +
2577 +/* Maximum size of a buffer for which recycling is allowed.
2578 + * We need an upper limit such that forwarded skbs that get reallocated on Tx
2579 + * aren't allowed to grow unboundedly. On the other hand, we need to make sure
2580 + * that skbs allocated by us will not fail to be recycled due to their size.
2581 + *
2582 + * For a requested size, the kernel allocator provides the next power of two
2583 + * sized block, which the stack will use as is, regardless of the actual size
2584 + * it required; since we must accommodate at most 9.6K buffers (L2 maximum
2585 + * supported frame size), set the recycling upper limit to 16K.
2586 + */
2587 +#define DPA_RECYCLE_MAX_SIZE 16384
2588 +
2589 +#if defined(CONFIG_FSL_SDK_FMAN_TEST)
2590 +/*TODO: temporary for fman pcd testing */
2591 +#define FMAN_PCD_TESTS_MAX_NUM_RANGES 20
2592 +#endif
2593 +
2594 +#define DPAA_ETH_FQ_DELTA 0x10000
2595 +
2596 +#define DPAA_ETH_PCD_FQ_BASE(device_addr) \
2597 + (((device_addr) & 0x1fffff) >> 6)
2598 +
2599 +#define DPAA_ETH_PCD_FQ_HI_PRIO_BASE(device_addr) \
2600 + (DPAA_ETH_FQ_DELTA + DPAA_ETH_PCD_FQ_BASE(device_addr))
2601 +
2602 +/* Largest value that the FQD's OAL field can hold.
2603 + * This is DPAA-1.x specific.
2604 + * TODO: This rather belongs in fsl_qman.h
2605 + */
2606 +#define FSL_QMAN_MAX_OAL 127
2607 +
2608 +/* Maximum offset value for a contig or sg FD (represented on 9 bits) */
2609 +#define DPA_MAX_FD_OFFSET ((1 << 9) - 1)
2610 +
2611 +/* Default alignment for start of data in an Rx FD */
2612 +#define DPA_FD_DATA_ALIGNMENT 16
2613 +
2614 +/* Values for the L3R field of the FM Parse Results
2615 + */
2616 +/* L3 Type field: First IP Present IPv4 */
2617 +#define FM_L3_PARSE_RESULT_IPV4 0x8000
2618 +/* L3 Type field: First IP Present IPv6 */
2619 +#define FM_L3_PARSE_RESULT_IPV6 0x4000
2620 +
2621 +/* Values for the L4R field of the FM Parse Results
2622 + * See $8.8.4.7.20 - L4 HXS - L4 Results from DPAA-Rev2 Reference Manual.
2623 + */
2624 +/* L4 Type field: UDP */
2625 +#define FM_L4_PARSE_RESULT_UDP 0x40
2626 +/* L4 Type field: TCP */
2627 +#define FM_L4_PARSE_RESULT_TCP 0x20
2628 +/* FD status field indicating whether the FM Parser has attempted to validate
2629 + * the L4 csum of the frame.
2630 + * Note that having this bit set doesn't necessarily imply that the checksum
2631 + * is valid. One would have to check the parse results to find that out.
2632 + */
2633 +#define FM_FD_STAT_L4CV 0x00000004
2634 +
2635 +
2636 +#define FM_FD_STAT_ERR_PHYSICAL FM_PORT_FRM_ERR_PHYSICAL
2637 +
2638 +/* Check if the parsed frame was found to be a TCP segment.
2639 + *
2640 + * @parse_result_ptr must be of type (fm_prs_result_t *).
2641 + */
2642 +#define fm_l4_frame_is_tcp(parse_result_ptr) \
2643 + ((parse_result_ptr)->l4r & FM_L4_PARSE_RESULT_TCP)
2644 +
2645 +/* number of Tx queues to FMan */
2646 +#ifdef CONFIG_FMAN_PFC
2647 +#define DPAA_ETH_TX_QUEUES (NR_CPUS * CONFIG_FMAN_PFC_COS_COUNT)
2648 +#else
2649 +#define DPAA_ETH_TX_QUEUES NR_CPUS
2650 +#endif
2651 +
2652 +#define DPAA_ETH_RX_QUEUES 128
2653 +
2654 +/* Convenience macros for storing/retrieving the skb back-pointers. They must
2655 + * accommodate both recycling and confirmation paths - i.e. cases when the buf
2656 + * was allocated by ourselves, respectively by the stack. In the former case,
2657 + * we could store the skb at negative offset; in the latter case, we can't,
2658 + * so we'll use 0 as offset.
2659 + *
2660 + * NB: @off is an offset from a (struct sk_buff **) pointer!
2661 + */
2662 +#define DPA_WRITE_SKB_PTR(skb, skbh, addr, off) \
2663 +{ \
2664 + skbh = (struct sk_buff **)addr; \
2665 + *(skbh + (off)) = skb; \
2666 +}
2667 +#define DPA_READ_SKB_PTR(skb, skbh, addr, off) \
2668 +{ \
2669 + skbh = (struct sk_buff **)addr; \
2670 + skb = *(skbh + (off)); \
2671 +}
2672 +
2673 +#ifdef CONFIG_PM
2674 +/* Magic Packet wakeup */
2675 +#define DPAA_WOL_MAGIC 0x00000001
2676 +#endif
2677 +
2678 +#if defined(CONFIG_FSL_SDK_FMAN_TEST)
2679 +struct pcd_range {
2680 + uint32_t base;
2681 + uint32_t count;
2682 +};
2683 +#endif
2684 +
2685 +/* More detailed FQ types - used for fine-grained WQ assignments */
2686 +enum dpa_fq_type {
2687 + FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */
2688 + FQ_TYPE_RX_ERROR, /* Rx Error FQs */
2689 + FQ_TYPE_RX_PCD, /* User-defined PCDs */
2690 + FQ_TYPE_TX, /* "Real" Tx FQs */
2691 + FQ_TYPE_TX_CONFIRM, /* Tx default Conf FQ (actually an Rx FQ) */
2692 + FQ_TYPE_TX_CONF_MQ, /* Tx conf FQs (one for each Tx FQ) */
2693 + FQ_TYPE_TX_ERROR, /* Tx Error FQs (these are actually Rx FQs) */
2694 + FQ_TYPE_RX_PCD_HI_PRIO, /* User-defined high-priority PCDs */
2695 +};
2696 +
2697 +struct dpa_fq {
2698 + struct qman_fq fq_base;
2699 + struct list_head list;
2700 + struct net_device *net_dev;
2701 + bool init;
2702 + uint32_t fqid;
2703 + uint32_t flags;
2704 + uint16_t channel;
2705 + uint8_t wq;
2706 + enum dpa_fq_type fq_type;
2707 +};
2708 +
2709 +struct dpa_fq_cbs_t {
2710 + struct qman_fq rx_defq;
2711 + struct qman_fq tx_defq;
2712 + struct qman_fq rx_errq;
2713 + struct qman_fq tx_errq;
2714 + struct qman_fq egress_ern;
2715 +};
2716 +
2717 +struct fqid_cell {
2718 + uint32_t start;
2719 + uint32_t count;
2720 +};
2721 +
2722 +struct dpa_bp {
2723 + struct bman_pool *pool;
2724 + uint8_t bpid;
2725 + struct device *dev;
2726 + union {
2727 + /* The buffer pools used for the private ports are initialized
2728 + * with target_count buffers for each CPU; at runtime the
2729 + * number of buffers per CPU is constantly brought back to this
2730 + * level
2731 + */
2732 + int target_count;
2733 + /* The configured value for the number of buffers in the pool,
2734 + * used for shared port buffer pools
2735 + */
2736 + int config_count;
2737 + };
2738 + size_t size;
2739 + bool seed_pool;
2740 + /* physical address of the contiguous memory used by the pool to store
2741 + * the buffers
2742 + */
2743 + dma_addr_t paddr;
2744 + /* virtual address of the contiguous memory used by the pool to store
2745 + * the buffers
2746 + */
2747 + void __iomem *vaddr;
2748 + /* current number of buffers in the bpool alloted to this CPU */
2749 + int __percpu *percpu_count;
2750 + atomic_t refs;
2751 + /* some bpools need to be seeded before use by this cb */
2752 + int (*seed_cb)(struct dpa_bp *);
2753 + /* some bpools need to be emptied before freeing; this cb is used
2754 + * for freeing of individual buffers taken from the pool
2755 + */
2756 + void (*free_buf_cb)(void *addr);
2757 +};
2758 +
2759 +struct dpa_rx_errors {
2760 + u64 dme; /* DMA Error */
2761 + u64 fpe; /* Frame Physical Error */
2762 + u64 fse; /* Frame Size Error */
2763 + u64 phe; /* Header Error */
2764 + u64 cse; /* Checksum Validation Error */
2765 +};
2766 +
2767 +/* Counters for QMan ERN frames - one counter per rejection code */
2768 +struct dpa_ern_cnt {
2769 + u64 cg_tdrop; /* Congestion group taildrop */
2770 + u64 wred; /* WRED congestion */
2771 + u64 err_cond; /* Error condition */
2772 + u64 early_window; /* Order restoration, frame too early */
2773 + u64 late_window; /* Order restoration, frame too late */
2774 + u64 fq_tdrop; /* FQ taildrop */
2775 + u64 fq_retired; /* FQ is retired */
2776 + u64 orp_zero; /* ORP disabled */
2777 +};
2778 +
2779 +struct dpa_napi_portal {
2780 + struct napi_struct napi;
2781 + struct qman_portal *p;
2782 +};
2783 +
2784 +struct dpa_percpu_priv_s {
2785 + struct net_device *net_dev;
2786 + struct dpa_napi_portal *np;
2787 + u64 in_interrupt;
2788 + u64 tx_returned;
2789 + u64 tx_confirm;
2790 + /* fragmented (non-linear) skbuffs received from the stack */
2791 + u64 tx_frag_skbuffs;
2792 + /* number of S/G frames received */
2793 + u64 rx_sg;
2794 +
2795 + struct rtnl_link_stats64 stats;
2796 + struct dpa_rx_errors rx_errors;
2797 + struct dpa_ern_cnt ern_cnt;
2798 +};
2799 +
2800 +struct dpa_priv_s {
2801 + struct dpa_percpu_priv_s __percpu *percpu_priv;
2802 + struct dpa_bp *dpa_bp;
2803 + /* Store here the needed Tx headroom for convenience and speed
2804 + * (even though it can be computed based on the fields of buf_layout)
2805 + */
2806 + uint16_t tx_headroom;
2807 + struct net_device *net_dev;
2808 + struct mac_device *mac_dev;
2809 + struct qman_fq *egress_fqs[DPAA_ETH_TX_QUEUES];
2810 + struct qman_fq *conf_fqs[DPAA_ETH_TX_QUEUES];
2811 +
2812 + size_t bp_count;
2813 +
2814 + uint16_t channel; /* "fsl,qman-channel-id" */
2815 + struct list_head dpa_fq_list;
2816 +
2817 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
2818 + struct dentry *debugfs_loop_file;
2819 +#endif
2820 +
2821 + uint32_t msg_enable; /* net_device message level */
2822 +#ifdef CONFIG_FSL_DPAA_1588
2823 + struct dpa_ptp_tsu *tsu;
2824 +#endif
2825 +
2826 +#if defined(CONFIG_FSL_SDK_FMAN_TEST)
2827 +/* TODO: this is temporary until pcd support is implemented in dpaa */
2828 + int priv_pcd_num_ranges;
2829 + struct pcd_range priv_pcd_ranges[FMAN_PCD_TESTS_MAX_NUM_RANGES];
2830 +#endif
2831 +
2832 + struct {
2833 + /**
2834 + * All egress queues to a given net device belong to one
2835 + * (and the same) congestion group.
2836 + */
2837 + struct qman_cgr cgr;
2838 + /* If congested, when it began. Used for performance stats. */
2839 + u32 congestion_start_jiffies;
2840 + /* Number of jiffies the Tx port was congested. */
2841 + u32 congested_jiffies;
2842 + /**
2843 + * Counter for the number of times the CGR
2844 + * entered congestion state
2845 + */
2846 + u32 cgr_congested_count;
2847 + } cgr_data;
2848 + /* Use a per-port CGR for ingress traffic. */
2849 + bool use_ingress_cgr;
2850 + struct qman_cgr ingress_cgr;
2851 +
2852 +#ifdef CONFIG_FSL_DPAA_TS
2853 + bool ts_tx_en; /* Tx timestamping enabled */
2854 + bool ts_rx_en; /* Rx timestamping enabled */
2855 +#endif /* CONFIG_FSL_DPAA_TS */
2856 +
2857 + struct dpa_buffer_layout_s *buf_layout;
2858 + uint16_t rx_headroom;
2859 + char if_type[30];
2860 +
2861 + void *peer;
2862 +#ifdef CONFIG_PM
2863 + u32 wol;
2864 +#endif
2865 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
2866 + int loop_id;
2867 + int loop_to;
2868 +#endif
2869 +#ifdef CONFIG_FSL_DPAA_CEETM
2870 + bool ceetm_en; /* CEETM QoS enabled */
2871 +#endif
2872 +};
2873 +
2874 +struct fm_port_fqs {
2875 + struct dpa_fq *tx_defq;
2876 + struct dpa_fq *tx_errq;
2877 + struct dpa_fq *rx_defq;
2878 + struct dpa_fq *rx_errq;
2879 +};
2880 +
2881 +
2882 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
2883 +extern struct net_device *dpa_loop_netdevs[20];
2884 +#endif
2885 +
2886 +/* functions with different implementation for SG and non-SG: */
2887 +int dpa_bp_priv_seed(struct dpa_bp *dpa_bp);
2888 +int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *count_ptr);
2889 +void __hot _dpa_rx(struct net_device *net_dev,
2890 + struct qman_portal *portal,
2891 + const struct dpa_priv_s *priv,
2892 + struct dpa_percpu_priv_s *percpu_priv,
2893 + const struct qm_fd *fd,
2894 + u32 fqid,
2895 + int *count_ptr);
2896 +int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev);
2897 +int __hot dpa_tx_extended(struct sk_buff *skb, struct net_device *net_dev,
2898 + struct qman_fq *egress_fq, struct qman_fq *conf_fq);
2899 +struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,
2900 + const struct qm_fd *fd);
2901 +void __hot _dpa_process_parse_results(const fm_prs_result_t *parse_results,
2902 + const struct qm_fd *fd,
2903 + struct sk_buff *skb,
2904 + int *use_gro);
2905 +#ifndef CONFIG_FSL_DPAA_TS
2906 +bool dpa_skb_is_recyclable(struct sk_buff *skb);
2907 +bool dpa_buf_is_recyclable(struct sk_buff *skb,
2908 + uint32_t min_size,
2909 + uint16_t min_offset,
2910 + unsigned char **new_buf_start);
2911 +#endif
2912 +int __hot skb_to_contig_fd(struct dpa_priv_s *priv,
2913 + struct sk_buff *skb, struct qm_fd *fd,
2914 + int *count_ptr, int *offset);
2915 +int __hot skb_to_sg_fd(struct dpa_priv_s *priv,
2916 + struct sk_buff *skb, struct qm_fd *fd);
2917 +int __cold __attribute__((nonnull))
2918 + _dpa_fq_free(struct device *dev, struct qman_fq *fq);
2919 +
2920 +/* Turn on HW checksum computation for this outgoing frame.
2921 + * If the current protocol is not something we support in this regard
2922 + * (or if the stack has already computed the SW checksum), we do nothing.
2923 + *
2924 + * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
2925 + * otherwise.
2926 + *
2927 + * Note that this function may modify the fd->cmd field and the skb data buffer
2928 + * (the Parse Results area).
2929 + */
2930 +int dpa_enable_tx_csum(struct dpa_priv_s *priv,
2931 + struct sk_buff *skb, struct qm_fd *fd, char *parse_results);
2932 +
2933 +static inline int dpaa_eth_napi_schedule(struct dpa_percpu_priv_s *percpu_priv,
2934 + struct qman_portal *portal)
2935 +{
2936 + /* In case of threaded ISR for RT enable kernel,
2937 + * in_irq() does not return appropriate value, so use
2938 + * in_serving_softirq to distinguish softirq or irq context.
2939 + */
2940 + if (unlikely(in_irq() || !in_serving_softirq())) {
2941 + /* Disable QMan IRQ and invoke NAPI */
2942 + int ret = qman_p_irqsource_remove(portal, QM_PIRQ_DQRI);
2943 + if (likely(!ret)) {
2944 + const struct qman_portal_config *pc =
2945 + qman_p_get_portal_config(portal);
2946 + struct dpa_napi_portal *np =
2947 + &percpu_priv->np[pc->index];
2948 +
2949 + np->p = portal;
2950 + napi_schedule(&np->napi);
2951 + percpu_priv->in_interrupt++;
2952 + return 1;
2953 + }
2954 + }
2955 + return 0;
2956 +}
2957 +
2958 +static inline ssize_t __const __must_check __attribute__((nonnull))
2959 +dpa_fd_length(const struct qm_fd *fd)
2960 +{
2961 + return fd->length20;
2962 +}
2963 +
2964 +static inline ssize_t __const __must_check __attribute__((nonnull))
2965 +dpa_fd_offset(const struct qm_fd *fd)
2966 +{
2967 + return fd->offset;
2968 +}
2969 +
2970 +/* Verifies if the skb length is below the interface MTU */
2971 +static inline int dpa_check_rx_mtu(struct sk_buff *skb, int mtu)
2972 +{
2973 + if (unlikely(skb->len > mtu))
2974 + if ((skb->protocol != htons(ETH_P_8021Q))
2975 + || (skb->len > mtu + 4))
2976 + return -1;
2977 +
2978 + return 0;
2979 +}
2980 +
2981 +static inline uint16_t dpa_get_headroom(struct dpa_buffer_layout_s *bl)
2982 +{
2983 + uint16_t headroom;
2984 + /* The frame headroom must accommodate:
2985 + * - the driver private data area
2986 + * - parse results, hash results, timestamp if selected
2987 + * - manip extra space
2988 + * If either hash results or time stamp are selected, both will
2989 + * be copied to/from the frame headroom, as TS is located between PR and
2990 + * HR in the IC and IC copy size has a granularity of 16bytes
2991 + * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM)
2992 + *
2993 + * Also make sure the headroom is a multiple of data_align bytes
2994 + */
2995 + headroom = (uint16_t)(bl->priv_data_size +
2996 + (bl->parse_results ? DPA_PARSE_RESULTS_SIZE : 0) +
2997 + (bl->hash_results || bl->time_stamp ?
2998 + DPA_TIME_STAMP_SIZE + DPA_HASH_RESULTS_SIZE : 0) +
2999 + bl->manip_extra_space);
3000 +
3001 + return bl->data_align ? ALIGN(headroom, bl->data_align) : headroom;
3002 +}
3003 +
3004 +int fm_mac_dump_regs(struct mac_device *h_dev, char *buf, int n);
3005 +int fm_mac_dump_rx_stats(struct mac_device *h_dev, char *buf, int n);
3006 +int fm_mac_dump_tx_stats(struct mac_device *h_dev, char *buf, int n);
3007 +
3008 +void dpaa_eth_sysfs_remove(struct device *dev);
3009 +void dpaa_eth_sysfs_init(struct device *dev);
3010 +int dpaa_eth_poll(struct napi_struct *napi, int budget);
3011 +
3012 +void dpa_private_napi_del(struct net_device *net_dev);
3013 +
3014 +/* Equivalent to a memset(0), but works faster */
3015 +static inline void clear_fd(struct qm_fd *fd)
3016 +{
3017 + fd->opaque_addr = 0;
3018 + fd->opaque = 0;
3019 + fd->cmd = 0;
3020 +}
3021 +
3022 +static inline int _dpa_tx_fq_to_id(const struct dpa_priv_s *priv,
3023 + struct qman_fq *tx_fq)
3024 +{
3025 + int i;
3026 +
3027 + for (i = 0; i < DPAA_ETH_TX_QUEUES; i++)
3028 + if (priv->egress_fqs[i] == tx_fq)
3029 + return i;
3030 +
3031 + return -EINVAL;
3032 +}
3033 +
3034 +static inline int __hot dpa_xmit(struct dpa_priv_s *priv,
3035 + struct rtnl_link_stats64 *percpu_stats,
3036 + struct qm_fd *fd, struct qman_fq *egress_fq,
3037 + struct qman_fq *conf_fq)
3038 +{
3039 + int err, i;
3040 +
3041 + if (fd->bpid == 0xff)
3042 + fd->cmd |= qman_fq_fqid(conf_fq);
3043 +
3044 + /* Trace this Tx fd */
3045 + trace_dpa_tx_fd(priv->net_dev, egress_fq, fd);
3046 +
3047 + for (i = 0; i < 100000; i++) {
3048 + err = qman_enqueue(egress_fq, fd, 0);
3049 + if (err != -EBUSY)
3050 + break;
3051 + }
3052 +
3053 + if (unlikely(err < 0)) {
3054 + /* TODO differentiate b/w -EBUSY (EQCR full) and other codes? */
3055 + percpu_stats->tx_errors++;
3056 + percpu_stats->tx_fifo_errors++;
3057 + return err;
3058 + }
3059 +
3060 + percpu_stats->tx_packets++;
3061 + percpu_stats->tx_bytes += dpa_fd_length(fd);
3062 +
3063 + return 0;
3064 +}
3065 +
3066 +/* Use multiple WQs for FQ assignment:
3067 + * - Tx Confirmation queues go to WQ1.
3068 + * - Rx Default, Tx and PCD queues go to WQ3 (no differentiation between
3069 + * Rx and Tx traffic, or between Rx Default and Rx PCD frames).
3070 + * - Rx Error and Tx Error queues go to WQ2 (giving them a better chance
3071 + * to be scheduled, in case there are many more FQs in WQ3).
3072 + * This ensures that Tx-confirmed buffers are timely released. In particular,
3073 + * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they
3074 + * are greatly outnumbered by other FQs in the system (usually PCDs), while
3075 + * dequeue scheduling is round-robin.
3076 + */
3077 +static inline void _dpa_assign_wq(struct dpa_fq *fq)
3078 +{
3079 + switch (fq->fq_type) {
3080 + case FQ_TYPE_TX_CONFIRM:
3081 + case FQ_TYPE_TX_CONF_MQ:
3082 + fq->wq = 1;
3083 + break;
3084 + case FQ_TYPE_RX_DEFAULT:
3085 + case FQ_TYPE_TX:
3086 + fq->wq = 3;
3087 + break;
3088 + case FQ_TYPE_RX_ERROR:
3089 + case FQ_TYPE_TX_ERROR:
3090 + case FQ_TYPE_RX_PCD_HI_PRIO:
3091 + fq->wq = 2;
3092 + break;
3093 + case FQ_TYPE_RX_PCD:
3094 + fq->wq = 5;
3095 + break;
3096 + default:
3097 + WARN(1, "Invalid FQ type %d for FQID %d!\n",
3098 + fq->fq_type, fq->fqid);
3099 + }
3100 +}
3101 +
3102 +#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
3103 +/* Use in lieu of skb_get_queue_mapping() */
3104 +#ifdef CONFIG_FMAN_PFC
3105 +#define dpa_get_queue_mapping(skb) \
3106 + (((skb)->priority < CONFIG_FMAN_PFC_COS_COUNT) ? \
3107 + ((skb)->priority * dpa_num_cpus + smp_processor_id()) : \
3108 + ((CONFIG_FMAN_PFC_COS_COUNT - 1) * \
3109 + dpa_num_cpus + smp_processor_id()));
3110 +
3111 +#else
3112 +#define dpa_get_queue_mapping(skb) \
3113 + raw_smp_processor_id()
3114 +#endif
3115 +#else
3116 +/* Use the queue selected by XPS */
3117 +#define dpa_get_queue_mapping(skb) \
3118 + skb_get_queue_mapping(skb)
3119 +#endif
3120 +
3121 +#ifdef CONFIG_PTP_1588_CLOCK_DPAA
3122 +struct ptp_priv_s {
3123 + struct device_node *node;
3124 + struct platform_device *of_dev;
3125 + struct mac_device *mac_dev;
3126 +};
3127 +extern struct ptp_priv_s ptp_priv;
3128 +#endif
3129 +
3130 +static inline void _dpa_bp_free_pf(void *addr)
3131 +{
3132 + put_page(virt_to_head_page(addr));
3133 +}
3134 +
3135 +/* TODO: LS1043A SoC has a HW issue regarding FMan DMA transactions; The issue
3136 + * manifests itself at high traffic rates when frames exceed 4K memory
3137 + * boundaries; For the moment, we use a SW workaround to avoid frames larger
3138 + * than 4K or that exceed 4K alignments.
3139 + */
3140 +
3141 +#ifndef CONFIG_PPC
3142 +extern bool dpaa_errata_a010022; /* SoC affected by A010022 errata */
3143 +
3144 +#define HAS_DMA_ISSUE(start, size) \
3145 + (((u64)(start) + (size)) > (((u64)(start) + 0x1000) & ~0xFFF))
3146 +#define BOUNDARY_4K(start, size) (((u64)(start) + (u64)(size)) & ~0xFFF)
3147 +
3148 +#endif /* !CONFIG_PPC */
3149 +
3150 +#endif /* __DPA_H */
3151 --- /dev/null
3152 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.c
3153 @@ -0,0 +1,263 @@
3154 +/* Copyright 2008-2013 Freescale Semiconductor, Inc.
3155 + *
3156 + * Redistribution and use in source and binary forms, with or without
3157 + * modification, are permitted provided that the following conditions are met:
3158 + * * Redistributions of source code must retain the above copyright
3159 + * notice, this list of conditions and the following disclaimer.
3160 + * * Redistributions in binary form must reproduce the above copyright
3161 + * notice, this list of conditions and the following disclaimer in the
3162 + * documentation and/or other materials provided with the distribution.
3163 + * * Neither the name of Freescale Semiconductor nor the
3164 + * names of its contributors may be used to endorse or promote products
3165 + * derived from this software without specific prior written permission.
3166 + *
3167 + *
3168 + * ALTERNATIVELY, this software may be distributed under the terms of the
3169 + * GNU General Public License ("GPL") as published by the Free Software
3170 + * Foundation, either version 2 of that License or (at your option) any
3171 + * later version.
3172 + *
3173 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
3174 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
3175 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
3176 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
3177 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
3178 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
3179 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
3180 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
3181 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
3182 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3183 + */
3184 +
3185 +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
3186 +#define pr_fmt(fmt) \
3187 + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
3188 + KBUILD_BASENAME".c", __LINE__, __func__
3189 +#else
3190 +#define pr_fmt(fmt) \
3191 + KBUILD_MODNAME ": " fmt
3192 +#endif
3193 +
3194 +#include <linux/init.h>
3195 +#include <linux/module.h>
3196 +#include <linux/io.h>
3197 +#include <linux/of_platform.h>
3198 +#include <linux/of_net.h>
3199 +#include <linux/etherdevice.h>
3200 +#include <linux/kthread.h>
3201 +#include <linux/percpu.h>
3202 +#include <linux/highmem.h>
3203 +#include <linux/sort.h>
3204 +#include <linux/fsl_qman.h>
3205 +#include "dpaa_eth.h"
3206 +#include "dpaa_eth_common.h"
3207 +#include "dpaa_eth_base.h"
3208 +
3209 +#define DPA_DESCRIPTION "FSL DPAA Advanced drivers:"
3210 +
3211 +MODULE_LICENSE("Dual BSD/GPL");
3212 +
3213 +uint8_t advanced_debug = -1;
3214 +module_param(advanced_debug, byte, S_IRUGO);
3215 +MODULE_PARM_DESC(advanced_debug, "Module/Driver verbosity level");
3216 +EXPORT_SYMBOL(advanced_debug);
3217 +
3218 +static int dpa_bp_cmp(const void *dpa_bp0, const void *dpa_bp1)
3219 +{
3220 + return ((struct dpa_bp *)dpa_bp0)->size -
3221 + ((struct dpa_bp *)dpa_bp1)->size;
3222 +}
3223 +
3224 +struct dpa_bp * __cold __must_check /* __attribute__((nonnull)) */
3225 +dpa_bp_probe(struct platform_device *_of_dev, size_t *count)
3226 +{
3227 + int i, lenp, na, ns, err;
3228 + struct device *dev;
3229 + struct device_node *dev_node;
3230 + const __be32 *bpool_cfg;
3231 + struct dpa_bp *dpa_bp;
3232 + u32 bpid;
3233 +
3234 + dev = &_of_dev->dev;
3235 +
3236 + *count = of_count_phandle_with_args(dev->of_node,
3237 + "fsl,bman-buffer-pools", NULL);
3238 + if (*count < 1) {
3239 + dev_err(dev, "missing fsl,bman-buffer-pools device tree entry\n");
3240 + return ERR_PTR(-EINVAL);
3241 + }
3242 +
3243 + dpa_bp = devm_kzalloc(dev, *count * sizeof(*dpa_bp), GFP_KERNEL);
3244 + if (dpa_bp == NULL) {
3245 + dev_err(dev, "devm_kzalloc() failed\n");
3246 + return ERR_PTR(-ENOMEM);
3247 + }
3248 +
3249 + dev_node = of_find_node_by_path("/");
3250 + if (unlikely(dev_node == NULL)) {
3251 + dev_err(dev, "of_find_node_by_path(/) failed\n");
3252 + return ERR_PTR(-EINVAL);
3253 + }
3254 +
3255 + na = of_n_addr_cells(dev_node);
3256 + ns = of_n_size_cells(dev_node);
3257 +
3258 + for (i = 0; i < *count; i++) {
3259 + of_node_put(dev_node);
3260 +
3261 + dev_node = of_parse_phandle(dev->of_node,
3262 + "fsl,bman-buffer-pools", i);
3263 + if (dev_node == NULL) {
3264 + dev_err(dev, "of_find_node_by_phandle() failed\n");
3265 + return ERR_PTR(-EFAULT);
3266 + }
3267 +
3268 + if (unlikely(!of_device_is_compatible(dev_node, "fsl,bpool"))) {
3269 + dev_err(dev,
3270 + "!of_device_is_compatible(%s, fsl,bpool)\n",
3271 + dev_node->full_name);
3272 + dpa_bp = ERR_PTR(-EINVAL);
3273 + goto _return_of_node_put;
3274 + }
3275 +
3276 + err = of_property_read_u32(dev_node, "fsl,bpid", &bpid);
3277 + if (err) {
3278 + dev_err(dev, "Cannot find buffer pool ID in the device tree\n");
3279 + dpa_bp = ERR_PTR(-EINVAL);
3280 + goto _return_of_node_put;
3281 + }
3282 + dpa_bp[i].bpid = (uint8_t)bpid;
3283 +
3284 + bpool_cfg = of_get_property(dev_node, "fsl,bpool-ethernet-cfg",
3285 + &lenp);
3286 + if (bpool_cfg && (lenp == (2 * ns + na) * sizeof(*bpool_cfg))) {
3287 + const uint32_t *seed_pool;
3288 +
3289 + dpa_bp[i].config_count =
3290 + (int)of_read_number(bpool_cfg, ns);
3291 + dpa_bp[i].size =
3292 + (size_t)of_read_number(bpool_cfg + ns, ns);
3293 + dpa_bp[i].paddr =
3294 + of_read_number(bpool_cfg + 2 * ns, na);
3295 +
3296 + seed_pool = of_get_property(dev_node,
3297 + "fsl,bpool-ethernet-seeds", &lenp);
3298 + dpa_bp[i].seed_pool = !!seed_pool;
3299 +
3300 + } else {
3301 + dev_err(dev,
3302 + "Missing/invalid fsl,bpool-ethernet-cfg device tree entry for node %s\n",
3303 + dev_node->full_name);
3304 + dpa_bp = ERR_PTR(-EINVAL);
3305 + goto _return_of_node_put;
3306 + }
3307 + }
3308 +
3309 + sort(dpa_bp, *count, sizeof(*dpa_bp), dpa_bp_cmp, NULL);
3310 +
3311 + return dpa_bp;
3312 +
3313 +_return_of_node_put:
3314 + if (dev_node)
3315 + of_node_put(dev_node);
3316 +
3317 + return dpa_bp;
3318 +}
3319 +EXPORT_SYMBOL(dpa_bp_probe);
3320 +
3321 +int dpa_bp_shared_port_seed(struct dpa_bp *bp)
3322 +{
3323 + void __iomem **ptr;
3324 +
3325 + /* In MAC-less and Shared-MAC scenarios the physical
3326 + * address of the buffer pool in device tree is set
3327 + * to 0 to specify that another entity (USDPAA) will
3328 + * allocate and seed the buffers
3329 + */
3330 + if (!bp->paddr)
3331 + return 0;
3332 +
3333 + /* allocate memory region for buffers */
3334 + devm_request_mem_region(bp->dev, bp->paddr,
3335 + bp->size * bp->config_count, KBUILD_MODNAME);
3336 + /* managed ioremap unmapping */
3337 + ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
3338 + if (!ptr)
3339 + return -EIO;
3340 +#ifndef CONFIG_PPC
3341 + bp->vaddr = ioremap_cache_ns(bp->paddr, bp->size * bp->config_count);
3342 +#else
3343 + bp->vaddr = ioremap_prot(bp->paddr, bp->size * bp->config_count, 0);
3344 +#endif
3345 + if (bp->vaddr == NULL) {
3346 + pr_err("Could not map memory for pool %d\n", bp->bpid);
3347 + devres_free(ptr);
3348 + return -EIO;
3349 + }
3350 + *ptr = bp->vaddr;
3351 + devres_add(bp->dev, ptr);
3352 +
3353 + /* seed pool with buffers from that memory region */
3354 + if (bp->seed_pool) {
3355 + int count = bp->target_count;
3356 + dma_addr_t addr = bp->paddr;
3357 +
3358 + while (count) {
3359 + struct bm_buffer bufs[8];
3360 + uint8_t num_bufs = 0;
3361 +
3362 + do {
3363 + BUG_ON(addr > 0xffffffffffffull);
3364 + bufs[num_bufs].bpid = bp->bpid;
3365 + bm_buffer_set64(&bufs[num_bufs++], addr);
3366 + addr += bp->size;
3367 +
3368 + } while (--count && (num_bufs < 8));
3369 +
3370 + while (bman_release(bp->pool, bufs, num_bufs, 0))
3371 + cpu_relax();
3372 + }
3373 + }
3374 +
3375 + return 0;
3376 +}
3377 +EXPORT_SYMBOL(dpa_bp_shared_port_seed);
3378 +
3379 +int dpa_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
3380 + size_t count)
3381 +{
3382 + struct dpa_priv_s *priv = netdev_priv(net_dev);
3383 + int i;
3384 +
3385 + priv->dpa_bp = dpa_bp;
3386 + priv->bp_count = count;
3387 +
3388 + for (i = 0; i < count; i++) {
3389 + int err;
3390 + err = dpa_bp_alloc(&dpa_bp[i]);
3391 + if (err < 0) {
3392 + dpa_bp_free(priv);
3393 + priv->dpa_bp = NULL;
3394 + return err;
3395 + }
3396 + }
3397 +
3398 + return 0;
3399 +}
3400 +EXPORT_SYMBOL(dpa_bp_create);
3401 +
3402 +static int __init __cold dpa_advanced_load(void)
3403 +{
3404 + pr_info(DPA_DESCRIPTION "\n");
3405 +
3406 + return 0;
3407 +}
3408 +module_init(dpa_advanced_load);
3409 +
3410 +static void __exit __cold dpa_advanced_unload(void)
3411 +{
3412 + pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
3413 + KBUILD_BASENAME".c", __func__);
3414 +
3415 +}
3416 +module_exit(dpa_advanced_unload);
3417 --- /dev/null
3418 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.h
3419 @@ -0,0 +1,50 @@
3420 +/* Copyright 2008-2013 Freescale Semiconductor, Inc.
3421 + *
3422 + * Redistribution and use in source and binary forms, with or without
3423 + * modification, are permitted provided that the following conditions are met:
3424 + * * Redistributions of source code must retain the above copyright
3425 + * notice, this list of conditions and the following disclaimer.
3426 + * * Redistributions in binary form must reproduce the above copyright
3427 + * notice, this list of conditions and the following disclaimer in the
3428 + * documentation and/or other materials provided with the distribution.
3429 + * * Neither the name of Freescale Semiconductor nor the
3430 + * names of its contributors may be used to endorse or promote products
3431 + * derived from this software without specific prior written permission.
3432 + *
3433 + *
3434 + * ALTERNATIVELY, this software may be distributed under the terms of the
3435 + * GNU General Public License ("GPL") as published by the Free Software
3436 + * Foundation, either version 2 of that License or (at your option) any
3437 + * later version.
3438 + *
3439 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
3440 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
3441 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
3442 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
3443 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
3444 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
3445 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
3446 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
3447 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
3448 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3449 + */
3450 +
3451 +#ifndef __DPAA_ETH_BASE_H
3452 +#define __DPAA_ETH_BASE_H
3453 +
3454 +#include <linux/etherdevice.h> /* struct net_device */
3455 +#include <linux/fsl_bman.h> /* struct bm_buffer */
3456 +#include <linux/of_platform.h> /* struct platform_device */
3457 +#include <linux/net_tstamp.h> /* struct hwtstamp_config */
3458 +
3459 +extern uint8_t advanced_debug;
3460 +extern const struct dpa_fq_cbs_t shared_fq_cbs;
3461 +extern int __hot dpa_shared_tx(struct sk_buff *skb, struct net_device *net_dev);
3462 +
3463 +struct dpa_bp * __cold __must_check /* __attribute__((nonnull)) */
3464 +dpa_bp_probe(struct platform_device *_of_dev, size_t *count);
3465 +int dpa_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
3466 + size_t count);
3467 +int dpa_bp_shared_port_seed(struct dpa_bp *bp);
3468 +
3469 +#endif /* __DPAA_ETH_BASE_H */
3470 --- /dev/null
3471 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c
3472 @@ -0,0 +1,1991 @@
3473 +/* Copyright 2008-2016 Freescale Semiconductor Inc.
3474 + *
3475 + * Redistribution and use in source and binary forms, with or without
3476 + * modification, are permitted provided that the following conditions are met:
3477 + * * Redistributions of source code must retain the above copyright
3478 + * notice, this list of conditions and the following disclaimer.
3479 + * * Redistributions in binary form must reproduce the above copyright
3480 + * notice, this list of conditions and the following disclaimer in the
3481 + * documentation and/or other materials provided with the distribution.
3482 + * * Neither the name of Freescale Semiconductor nor the
3483 + * names of its contributors may be used to endorse or promote products
3484 + * derived from this software without specific prior written permission.
3485 + *
3486 + *
3487 + * ALTERNATIVELY, this software may be distributed under the terms of the
3488 + * GNU General Public License ("GPL") as published by the Free Software
3489 + * Foundation, either version 2 of that License or (at your option) any
3490 + * later version.
3491 + *
3492 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
3493 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
3494 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
3495 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
3496 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
3497 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
3498 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
3499 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
3500 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
3501 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3502 + */
3503 +
3504 +#include <linux/init.h>
3505 +#include "dpaa_eth_ceetm.h"
3506 +
3507 +#define DPA_CEETM_DESCRIPTION "FSL DPAA CEETM qdisc"
3508 +
3509 +const struct nla_policy ceetm_policy[TCA_CEETM_MAX + 1] = {
3510 + [TCA_CEETM_COPT] = { .len = sizeof(struct tc_ceetm_copt) },
3511 + [TCA_CEETM_QOPS] = { .len = sizeof(struct tc_ceetm_qopt) },
3512 +};
3513 +
3514 +struct Qdisc_ops ceetm_qdisc_ops;
3515 +
3516 +/* Obtain the DCP and the SP ids from the FMan port */
3517 +static void get_dcp_and_sp(struct net_device *dev, enum qm_dc_portal *dcp_id,
3518 + unsigned int *sp_id)
3519 +{
3520 + uint32_t channel;
3521 + t_LnxWrpFmPortDev *port_dev;
3522 + struct dpa_priv_s *dpa_priv = netdev_priv(dev);
3523 + struct mac_device *mac_dev = dpa_priv->mac_dev;
3524 +
3525 + port_dev = (t_LnxWrpFmPortDev *)mac_dev->port_dev[TX];
3526 + channel = port_dev->txCh;
3527 +
3528 + *sp_id = channel & CHANNEL_SP_MASK;
3529 + pr_debug(KBUILD_BASENAME " : FM sub-portal ID %d\n", *sp_id);
3530 +
3531 + if (channel < DCP0_MAX_CHANNEL) {
3532 + *dcp_id = qm_dc_portal_fman0;
3533 + pr_debug(KBUILD_BASENAME " : DCP ID 0\n");
3534 + } else {
3535 + *dcp_id = qm_dc_portal_fman1;
3536 + pr_debug(KBUILD_BASENAME " : DCP ID 1\n");
3537 + }
3538 +}
3539 +
3540 +/* Enqueue Rejection Notification callback */
3541 +static void ceetm_ern(struct qman_portal *portal, struct qman_fq *fq,
3542 + const struct qm_mr_entry *msg)
3543 +{
3544 + struct net_device *net_dev;
3545 + struct ceetm_class *cls;
3546 + struct ceetm_class_stats *cstats = NULL;
3547 + const struct dpa_priv_s *dpa_priv;
3548 + struct dpa_percpu_priv_s *dpa_percpu_priv;
3549 + struct sk_buff *skb;
3550 + struct qm_fd fd = msg->ern.fd;
3551 +
3552 + net_dev = ((struct ceetm_fq *)fq)->net_dev;
3553 + dpa_priv = netdev_priv(net_dev);
3554 + dpa_percpu_priv = raw_cpu_ptr(dpa_priv->percpu_priv);
3555 +
3556 + /* Increment DPA counters */
3557 + dpa_percpu_priv->stats.tx_dropped++;
3558 + dpa_percpu_priv->stats.tx_fifo_errors++;
3559 +
3560 + /* Increment CEETM counters */
3561 + cls = ((struct ceetm_fq *)fq)->ceetm_cls;
3562 + switch (cls->type) {
3563 + case CEETM_PRIO:
3564 + cstats = this_cpu_ptr(cls->prio.cstats);
3565 + break;
3566 + case CEETM_WBFS:
3567 + cstats = this_cpu_ptr(cls->wbfs.cstats);
3568 + break;
3569 + }
3570 +
3571 + if (cstats)
3572 + cstats->ern_drop_count++;
3573 +
3574 + if (fd.bpid != 0xff) {
3575 + dpa_fd_release(net_dev, &fd);
3576 + return;
3577 + }
3578 +
3579 + skb = _dpa_cleanup_tx_fd(dpa_priv, &fd);
3580 + dev_kfree_skb_any(skb);
3581 +}
3582 +
3583 +/* Congestion State Change Notification callback */
3584 +static void ceetm_cscn(struct qm_ceetm_ccg *ccg, void *cb_ctx, int congested)
3585 +{
3586 + struct ceetm_fq *ceetm_fq = (struct ceetm_fq *)cb_ctx;
3587 + struct dpa_priv_s *dpa_priv = netdev_priv(ceetm_fq->net_dev);
3588 + struct ceetm_class *cls = ceetm_fq->ceetm_cls;
3589 + struct ceetm_class_stats *cstats = NULL;
3590 +
3591 + switch (cls->type) {
3592 + case CEETM_PRIO:
3593 + cstats = this_cpu_ptr(cls->prio.cstats);
3594 + break;
3595 + case CEETM_WBFS:
3596 + cstats = this_cpu_ptr(cls->wbfs.cstats);
3597 + break;
3598 + }
3599 +
3600 + if (congested) {
3601 + dpa_priv->cgr_data.congestion_start_jiffies = jiffies;
3602 + netif_tx_stop_all_queues(dpa_priv->net_dev);
3603 + dpa_priv->cgr_data.cgr_congested_count++;
3604 + if (cstats)
3605 + cstats->congested_count++;
3606 + } else {
3607 + dpa_priv->cgr_data.congested_jiffies +=
3608 + (jiffies - dpa_priv->cgr_data.congestion_start_jiffies);
3609 + netif_tx_wake_all_queues(dpa_priv->net_dev);
3610 + }
3611 +}
3612 +
3613 +/* Allocate a ceetm fq */
3614 +static int ceetm_alloc_fq(struct ceetm_fq **fq, struct net_device *dev,
3615 + struct ceetm_class *cls)
3616 +{
3617 + *fq = kzalloc(sizeof(**fq), GFP_KERNEL);
3618 + if (!*fq)
3619 + return -ENOMEM;
3620 +
3621 + (*fq)->net_dev = dev;
3622 + (*fq)->ceetm_cls = cls;
3623 + return 0;
3624 +}
3625 +
3626 +/* Configure a ceetm Class Congestion Group */
3627 +static int ceetm_config_ccg(struct qm_ceetm_ccg **ccg,
3628 + struct qm_ceetm_channel *channel, unsigned int id,
3629 + struct ceetm_fq *fq, struct dpa_priv_s *dpa_priv)
3630 +{
3631 + int err;
3632 + u32 cs_th;
3633 + u16 ccg_mask;
3634 + struct qm_ceetm_ccg_params ccg_params;
3635 +
3636 + err = qman_ceetm_ccg_claim(ccg, channel, id, ceetm_cscn, fq);
3637 + if (err)
3638 + return err;
3639 +
3640 + /* Configure the count mode (frames/bytes), enable congestion state
3641 + * notifications, configure the congestion entry and exit thresholds,
3642 + * enable tail-drop, configure the tail-drop mode, and set the
3643 + * overhead accounting limit
3644 + */
3645 + ccg_mask = QM_CCGR_WE_MODE |
3646 + QM_CCGR_WE_CSCN_EN |
3647 + QM_CCGR_WE_CS_THRES_IN | QM_CCGR_WE_CS_THRES_OUT |
3648 + QM_CCGR_WE_TD_EN | QM_CCGR_WE_TD_MODE |
3649 + QM_CCGR_WE_OAL;
3650 +
3651 + ccg_params.mode = 0; /* count bytes */
3652 + ccg_params.cscn_en = 1; /* generate notifications */
3653 + ccg_params.td_en = 1; /* enable tail-drop */
3654 + ccg_params.td_mode = 0; /* tail-drop on congestion state */
3655 + ccg_params.oal = (signed char)(min(sizeof(struct sk_buff) +
3656 + dpa_priv->tx_headroom, (size_t)FSL_QMAN_MAX_OAL));
3657 +
3658 + /* Set the congestion state thresholds according to the link speed */
3659 + if (dpa_priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
3660 + cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_10G;
3661 + else
3662 + cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_1G;
3663 +
3664 + qm_cgr_cs_thres_set64(&ccg_params.cs_thres_in, cs_th, 1);
3665 + qm_cgr_cs_thres_set64(&ccg_params.cs_thres_out,
3666 + cs_th * CEETM_CCGR_RATIO, 1);
3667 +
3668 + err = qman_ceetm_ccg_set(*ccg, ccg_mask, &ccg_params);
3669 + if (err)
3670 + return err;
3671 +
3672 + return 0;
3673 +}
3674 +
3675 +/* Configure a ceetm Logical Frame Queue */
3676 +static int ceetm_config_lfq(struct qm_ceetm_cq *cq, struct ceetm_fq *fq,
3677 + struct qm_ceetm_lfq **lfq)
3678 +{
3679 + int err;
3680 + u64 context_a;
3681 + u32 context_b;
3682 +
3683 + err = qman_ceetm_lfq_claim(lfq, cq);
3684 + if (err)
3685 + return err;
3686 +
3687 + /* Get the former contexts in order to preserve context B */
3688 + err = qman_ceetm_lfq_get_context(*lfq, &context_a, &context_b);
3689 + if (err)
3690 + return err;
3691 +
3692 + context_a = CEETM_CONTEXT_A;
3693 + err = qman_ceetm_lfq_set_context(*lfq, context_a, context_b);
3694 + if (err)
3695 + return err;
3696 +
3697 + (*lfq)->ern = ceetm_ern;
3698 +
3699 + err = qman_ceetm_create_fq(*lfq, &fq->fq);
3700 + if (err)
3701 + return err;
3702 +
3703 + return 0;
3704 +}
3705 +
3706 +/* Configure a prio ceetm class */
3707 +static int ceetm_config_prio_cls(struct ceetm_class *cls,
3708 + struct net_device *dev,
3709 + struct qm_ceetm_channel *channel,
3710 + unsigned int id)
3711 +{
3712 + int err;
3713 + struct dpa_priv_s *dpa_priv = netdev_priv(dev);
3714 +
3715 + err = ceetm_alloc_fq(&cls->prio.fq, dev, cls);
3716 + if (err)
3717 + return err;
3718 +
3719 + /* Claim and configure the CCG */
3720 + err = ceetm_config_ccg(&cls->prio.ccg, channel, id, cls->prio.fq,
3721 + dpa_priv);
3722 + if (err)
3723 + return err;
3724 +
3725 + /* Claim and configure the CQ */
3726 + err = qman_ceetm_cq_claim(&cls->prio.cq, channel, id, cls->prio.ccg);
3727 + if (err)
3728 + return err;
3729 +
3730 + if (cls->shaped) {
3731 + err = qman_ceetm_channel_set_cq_cr_eligibility(channel, id, 1);
3732 + if (err)
3733 + return err;
3734 +
3735 + err = qman_ceetm_channel_set_cq_er_eligibility(channel, id, 1);
3736 + if (err)
3737 + return err;
3738 + }
3739 +
3740 + /* Claim and configure a LFQ */
3741 + err = ceetm_config_lfq(cls->prio.cq, cls->prio.fq, &cls->prio.lfq);
3742 + if (err)
3743 + return err;
3744 +
3745 + return 0;
3746 +}
3747 +
3748 +/* Configure a wbfs ceetm class */
3749 +static int ceetm_config_wbfs_cls(struct ceetm_class *cls,
3750 + struct net_device *dev,
3751 + struct qm_ceetm_channel *channel,
3752 + unsigned int id, int type)
3753 +{
3754 + int err;
3755 + struct dpa_priv_s *dpa_priv = netdev_priv(dev);
3756 +
3757 + err = ceetm_alloc_fq(&cls->wbfs.fq, dev, cls);
3758 + if (err)
3759 + return err;
3760 +
3761 + /* Claim and configure the CCG */
3762 + err = ceetm_config_ccg(&cls->wbfs.ccg, channel, id, cls->wbfs.fq,
3763 + dpa_priv);
3764 + if (err)
3765 + return err;
3766 +
3767 + /* Claim and configure the CQ */
3768 + if (type == WBFS_GRP_B)
3769 + err = qman_ceetm_cq_claim_B(&cls->wbfs.cq, channel, id,
3770 + cls->wbfs.ccg);
3771 + else
3772 + err = qman_ceetm_cq_claim_A(&cls->wbfs.cq, channel, id,
3773 + cls->wbfs.ccg);
3774 + if (err)
3775 + return err;
3776 +
3777 + /* Configure the CQ weight: real number multiplied by 100 to get rid
3778 + * of the fraction
3779 + */
3780 + err = qman_ceetm_set_queue_weight_in_ratio(cls->wbfs.cq,
3781 + cls->wbfs.weight * 100);
3782 + if (err)
3783 + return err;
3784 +
3785 + /* Claim and configure a LFQ */
3786 + err = ceetm_config_lfq(cls->wbfs.cq, cls->wbfs.fq, &cls->wbfs.lfq);
3787 + if (err)
3788 + return err;
3789 +
3790 + return 0;
3791 +}
3792 +
3793 +/* Find class in qdisc hash table using given handle */
3794 +static inline struct ceetm_class *ceetm_find(u32 handle, struct Qdisc *sch)
3795 +{
3796 + struct ceetm_qdisc *priv = qdisc_priv(sch);
3797 + struct Qdisc_class_common *clc;
3798 +
3799 + pr_debug(KBUILD_BASENAME " : %s : find class %X in qdisc %X\n",
3800 + __func__, handle, sch->handle);
3801 +
3802 + clc = qdisc_class_find(&priv->clhash, handle);
3803 + return clc ? container_of(clc, struct ceetm_class, common) : NULL;
3804 +}
3805 +
3806 +/* Insert a class in the qdisc's class hash */
3807 +static void ceetm_link_class(struct Qdisc *sch,
3808 + struct Qdisc_class_hash *clhash,
3809 + struct Qdisc_class_common *common)
3810 +{
3811 + sch_tree_lock(sch);
3812 + qdisc_class_hash_insert(clhash, common);
3813 + sch_tree_unlock(sch);
3814 + qdisc_class_hash_grow(sch, clhash);
3815 +}
3816 +
3817 +/* Destroy a ceetm class */
3818 +static void ceetm_cls_destroy(struct Qdisc *sch, struct ceetm_class *cl)
3819 +{
3820 + if (!cl)
3821 + return;
3822 +
3823 + pr_debug(KBUILD_BASENAME " : %s : destroy class %X from under %X\n",
3824 + __func__, cl->common.classid, sch->handle);
3825 +
3826 + switch (cl->type) {
3827 + case CEETM_ROOT:
3828 + if (cl->root.child) {
3829 + qdisc_destroy(cl->root.child);
3830 + cl->root.child = NULL;
3831 + }
3832 +
3833 + if (cl->root.ch && qman_ceetm_channel_release(cl->root.ch))
3834 + pr_err(KBUILD_BASENAME
3835 + " : %s : error releasing the channel %d\n",
3836 + __func__, cl->root.ch->idx);
3837 +
3838 + break;
3839 +
3840 + case CEETM_PRIO:
3841 + if (cl->prio.child) {
3842 + qdisc_destroy(cl->prio.child);
3843 + cl->prio.child = NULL;
3844 + }
3845 +
3846 + if (cl->prio.lfq && qman_ceetm_lfq_release(cl->prio.lfq))
3847 + pr_err(KBUILD_BASENAME
3848 + " : %s : error releasing the LFQ %d\n",
3849 + __func__, cl->prio.lfq->idx);
3850 +
3851 + if (cl->prio.cq && qman_ceetm_cq_release(cl->prio.cq))
3852 + pr_err(KBUILD_BASENAME
3853 + " : %s : error releasing the CQ %d\n",
3854 + __func__, cl->prio.cq->idx);
3855 +
3856 + if (cl->prio.ccg && qman_ceetm_ccg_release(cl->prio.ccg))
3857 + pr_err(KBUILD_BASENAME
3858 + " : %s : error releasing the CCG %d\n",
3859 + __func__, cl->prio.ccg->idx);
3860 +
3861 + kfree(cl->prio.fq);
3862 +
3863 + if (cl->prio.cstats)
3864 + free_percpu(cl->prio.cstats);
3865 +
3866 + break;
3867 +
3868 + case CEETM_WBFS:
3869 + if (cl->wbfs.lfq && qman_ceetm_lfq_release(cl->wbfs.lfq))
3870 + pr_err(KBUILD_BASENAME
3871 + " : %s : error releasing the LFQ %d\n",
3872 + __func__, cl->wbfs.lfq->idx);
3873 +
3874 + if (cl->wbfs.cq && qman_ceetm_cq_release(cl->wbfs.cq))
3875 + pr_err(KBUILD_BASENAME
3876 + " : %s : error releasing the CQ %d\n",
3877 + __func__, cl->wbfs.cq->idx);
3878 +
3879 + if (cl->wbfs.ccg && qman_ceetm_ccg_release(cl->wbfs.ccg))
3880 + pr_err(KBUILD_BASENAME
3881 + " : %s : error releasing the CCG %d\n",
3882 + __func__, cl->wbfs.ccg->idx);
3883 +
3884 + kfree(cl->wbfs.fq);
3885 +
3886 + if (cl->wbfs.cstats)
3887 + free_percpu(cl->wbfs.cstats);
3888 + }
3889 +
3890 + tcf_destroy_chain(&cl->filter_list);
3891 + kfree(cl);
3892 +}
3893 +
3894 +/* Destroy a ceetm qdisc */
3895 +static void ceetm_destroy(struct Qdisc *sch)
3896 +{
3897 + unsigned int ntx, i;
3898 + struct hlist_node *next;
3899 + struct ceetm_class *cl;
3900 + struct ceetm_qdisc *priv = qdisc_priv(sch);
3901 + struct net_device *dev = qdisc_dev(sch);
3902 +
3903 + pr_debug(KBUILD_BASENAME " : %s : destroy qdisc %X\n",
3904 + __func__, sch->handle);
3905 +
3906 + /* All filters need to be removed before destroying the classes */
3907 + tcf_destroy_chain(&priv->filter_list);
3908 +
3909 + for (i = 0; i < priv->clhash.hashsize; i++) {
3910 + hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode)
3911 + tcf_destroy_chain(&cl->filter_list);
3912 + }
3913 +
3914 + for (i = 0; i < priv->clhash.hashsize; i++) {
3915 + hlist_for_each_entry_safe(cl, next, &priv->clhash.hash[i],
3916 + common.hnode)
3917 + ceetm_cls_destroy(sch, cl);
3918 + }
3919 +
3920 + qdisc_class_hash_destroy(&priv->clhash);
3921 +
3922 + switch (priv->type) {
3923 + case CEETM_ROOT:
3924 + dpa_disable_ceetm(dev);
3925 +
3926 + if (priv->root.lni && qman_ceetm_lni_release(priv->root.lni))
3927 + pr_err(KBUILD_BASENAME
3928 + " : %s : error releasing the LNI %d\n",
3929 + __func__, priv->root.lni->idx);
3930 +
3931 + if (priv->root.sp && qman_ceetm_sp_release(priv->root.sp))
3932 + pr_err(KBUILD_BASENAME
3933 + " : %s : error releasing the SP %d\n",
3934 + __func__, priv->root.sp->idx);
3935 +
3936 + if (priv->root.qstats)
3937 + free_percpu(priv->root.qstats);
3938 +
3939 + if (!priv->root.qdiscs)
3940 + break;
3941 +
3942 + /* Remove the pfifo qdiscs */
3943 + for (ntx = 0; ntx < dev->num_tx_queues; ntx++)
3944 + if (priv->root.qdiscs[ntx])
3945 + qdisc_destroy(priv->root.qdiscs[ntx]);
3946 +
3947 + kfree(priv->root.qdiscs);
3948 + break;
3949 +
3950 + case CEETM_PRIO:
3951 + if (priv->prio.parent)
3952 + priv->prio.parent->root.child = NULL;
3953 + break;
3954 +
3955 + case CEETM_WBFS:
3956 + if (priv->wbfs.parent)
3957 + priv->wbfs.parent->prio.child = NULL;
3958 + break;
3959 + }
3960 +}
3961 +
3962 +static int ceetm_dump(struct Qdisc *sch, struct sk_buff *skb)
3963 +{
3964 + struct Qdisc *qdisc;
3965 + unsigned int ntx, i;
3966 + struct nlattr *nest;
3967 + struct tc_ceetm_qopt qopt;
3968 + struct ceetm_qdisc_stats *qstats;
3969 + struct net_device *dev = qdisc_dev(sch);
3970 + struct ceetm_qdisc *priv = qdisc_priv(sch);
3971 +
3972 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
3973 +
3974 + sch_tree_lock(sch);
3975 + memset(&qopt, 0, sizeof(qopt));
3976 + qopt.type = priv->type;
3977 + qopt.shaped = priv->shaped;
3978 +
3979 + switch (priv->type) {
3980 + case CEETM_ROOT:
3981 + /* Gather statistics from the underlying pfifo qdiscs */
3982 + sch->q.qlen = 0;
3983 + memset(&sch->bstats, 0, sizeof(sch->bstats));
3984 + memset(&sch->qstats, 0, sizeof(sch->qstats));
3985 +
3986 + for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
3987 + qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
3988 + sch->q.qlen += qdisc->q.qlen;
3989 + sch->bstats.bytes += qdisc->bstats.bytes;
3990 + sch->bstats.packets += qdisc->bstats.packets;
3991 + sch->qstats.qlen += qdisc->qstats.qlen;
3992 + sch->qstats.backlog += qdisc->qstats.backlog;
3993 + sch->qstats.drops += qdisc->qstats.drops;
3994 + sch->qstats.requeues += qdisc->qstats.requeues;
3995 + sch->qstats.overlimits += qdisc->qstats.overlimits;
3996 + }
3997 +
3998 + for_each_online_cpu(i) {
3999 + qstats = per_cpu_ptr(priv->root.qstats, i);
4000 + sch->qstats.drops += qstats->drops;
4001 + }
4002 +
4003 + qopt.rate = priv->root.rate;
4004 + qopt.ceil = priv->root.ceil;
4005 + qopt.overhead = priv->root.overhead;
4006 + break;
4007 +
4008 + case CEETM_PRIO:
4009 + qopt.qcount = priv->prio.qcount;
4010 + break;
4011 +
4012 + case CEETM_WBFS:
4013 + qopt.qcount = priv->wbfs.qcount;
4014 + qopt.cr = priv->wbfs.cr;
4015 + qopt.er = priv->wbfs.er;
4016 + break;
4017 +
4018 + default:
4019 + pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
4020 + sch_tree_unlock(sch);
4021 + return -EINVAL;
4022 + }
4023 +
4024 + nest = nla_nest_start(skb, TCA_OPTIONS);
4025 + if (!nest)
4026 + goto nla_put_failure;
4027 + if (nla_put(skb, TCA_CEETM_QOPS, sizeof(qopt), &qopt))
4028 + goto nla_put_failure;
4029 + nla_nest_end(skb, nest);
4030 +
4031 + sch_tree_unlock(sch);
4032 + return skb->len;
4033 +
4034 +nla_put_failure:
4035 + sch_tree_unlock(sch);
4036 + nla_nest_cancel(skb, nest);
4037 + return -EMSGSIZE;
4038 +}
4039 +
4040 +/* Configure a root ceetm qdisc */
4041 +static int ceetm_init_root(struct Qdisc *sch, struct ceetm_qdisc *priv,
4042 + struct tc_ceetm_qopt *qopt)
4043 +{
4044 + struct netdev_queue *dev_queue;
4045 + struct Qdisc *qdisc;
4046 + enum qm_dc_portal dcp_id;
4047 + unsigned int i, sp_id, parent_id;
4048 + int err;
4049 + u64 bps;
4050 + struct qm_ceetm_sp *sp;
4051 + struct qm_ceetm_lni *lni;
4052 + struct net_device *dev = qdisc_dev(sch);
4053 + struct dpa_priv_s *dpa_priv = netdev_priv(dev);
4054 + struct mac_device *mac_dev = dpa_priv->mac_dev;
4055 +
4056 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
4057 +
4058 + /* Validate inputs */
4059 + if (sch->parent != TC_H_ROOT) {
4060 + pr_err("CEETM: a root ceetm qdisc can not be attached to a class\n");
4061 + tcf_destroy_chain(&priv->filter_list);
4062 + qdisc_class_hash_destroy(&priv->clhash);
4063 + return -EINVAL;
4064 + }
4065 +
4066 + if (!mac_dev) {
4067 + pr_err("CEETM: the interface is lacking a mac\n");
4068 + err = -EINVAL;
4069 + goto err_init_root;
4070 + }
4071 +
4072 + /* pre-allocate underlying pfifo qdiscs */
4073 + priv->root.qdiscs = kcalloc(dev->num_tx_queues,
4074 + sizeof(priv->root.qdiscs[0]),
4075 + GFP_KERNEL);
4076 + if (!priv->root.qdiscs) {
4077 + err = -ENOMEM;
4078 + goto err_init_root;
4079 + }
4080 +
4081 + for (i = 0; i < dev->num_tx_queues; i++) {
4082 + dev_queue = netdev_get_tx_queue(dev, i);
4083 + parent_id = TC_H_MAKE(TC_H_MAJ(sch->handle),
4084 + TC_H_MIN(i + PFIFO_MIN_OFFSET));
4085 +
4086 + qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
4087 + parent_id);
4088 + if (!qdisc) {
4089 + err = -ENOMEM;
4090 + goto err_init_root;
4091 + }
4092 +
4093 + priv->root.qdiscs[i] = qdisc;
4094 + qdisc->flags |= TCQ_F_ONETXQUEUE;
4095 + }
4096 +
4097 + sch->flags |= TCQ_F_MQROOT;
4098 +
4099 + priv->root.qstats = alloc_percpu(struct ceetm_qdisc_stats);
4100 + if (!priv->root.qstats) {
4101 + pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
4102 + __func__);
4103 + err = -ENOMEM;
4104 + goto err_init_root;
4105 + }
4106 +
4107 + priv->shaped = qopt->shaped;
4108 + priv->root.rate = qopt->rate;
4109 + priv->root.ceil = qopt->ceil;
4110 + priv->root.overhead = qopt->overhead;
4111 +
4112 + /* Claim the SP */
4113 + get_dcp_and_sp(dev, &dcp_id, &sp_id);
4114 + err = qman_ceetm_sp_claim(&sp, dcp_id, sp_id);
4115 + if (err) {
4116 + pr_err(KBUILD_BASENAME " : %s : failed to claim the SP\n",
4117 + __func__);
4118 + goto err_init_root;
4119 + }
4120 +
4121 + priv->root.sp = sp;
4122 +
4123 + /* Claim the LNI - will use the same id as the SP id since SPs 0-7
4124 + * are connected to the TX FMan ports
4125 + */
4126 + err = qman_ceetm_lni_claim(&lni, dcp_id, sp_id);
4127 + if (err) {
4128 + pr_err(KBUILD_BASENAME " : %s : failed to claim the LNI\n",
4129 + __func__);
4130 + goto err_init_root;
4131 + }
4132 +
4133 + priv->root.lni = lni;
4134 +
4135 + err = qman_ceetm_sp_set_lni(sp, lni);
4136 + if (err) {
4137 + pr_err(KBUILD_BASENAME " : %s : failed to link the SP and LNI\n",
4138 + __func__);
4139 + goto err_init_root;
4140 + }
4141 +
4142 + lni->sp = sp;
4143 +
4144 + /* Configure the LNI shaper */
4145 + if (priv->shaped) {
4146 + err = qman_ceetm_lni_enable_shaper(lni, 1, priv->root.overhead);
4147 + if (err) {
4148 + pr_err(KBUILD_BASENAME " : %s : failed to configure the LNI shaper\n",
4149 + __func__);
4150 + goto err_init_root;
4151 + }
4152 +
4153 + bps = priv->root.rate << 3; /* Bps -> bps */
4154 + err = qman_ceetm_lni_set_commit_rate_bps(lni, bps, dev->mtu);
4155 + if (err) {
4156 + pr_err(KBUILD_BASENAME " : %s : failed to configure the LNI shaper\n",
4157 + __func__);
4158 + goto err_init_root;
4159 + }
4160 +
4161 + bps = priv->root.ceil << 3; /* Bps -> bps */
4162 + err = qman_ceetm_lni_set_excess_rate_bps(lni, bps, dev->mtu);
4163 + if (err) {
4164 + pr_err(KBUILD_BASENAME " : %s : failed to configure the LNI shaper\n",
4165 + __func__);
4166 + goto err_init_root;
4167 + }
4168 + }
4169 +
4170 + /* TODO default configuration */
4171 +
4172 + dpa_enable_ceetm(dev);
4173 + return 0;
4174 +
4175 +err_init_root:
4176 + ceetm_destroy(sch);
4177 + return err;
4178 +}
4179 +
4180 +/* Configure a prio ceetm qdisc */
4181 +static int ceetm_init_prio(struct Qdisc *sch, struct ceetm_qdisc *priv,
4182 + struct tc_ceetm_qopt *qopt)
4183 +{
4184 + int err;
4185 + unsigned int i;
4186 + struct ceetm_class *parent_cl, *child_cl;
4187 + struct Qdisc *parent_qdisc;
4188 + struct net_device *dev = qdisc_dev(sch);
4189 +
4190 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
4191 +
4192 + if (sch->parent == TC_H_ROOT) {
4193 + pr_err("CEETM: a prio ceetm qdisc can not be root\n");
4194 + err = -EINVAL;
4195 + goto err_init_prio;
4196 + }
4197 +
4198 + parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent));
4199 + if (strcmp(parent_qdisc->ops->id, ceetm_qdisc_ops.id)) {
4200 + pr_err("CEETM: a ceetm qdisc can not be attached to other qdisc/class types\n");
4201 + err = -EINVAL;
4202 + goto err_init_prio;
4203 + }
4204 +
4205 + /* Obtain the parent root ceetm_class */
4206 + parent_cl = ceetm_find(sch->parent, parent_qdisc);
4207 +
4208 + if (!parent_cl || parent_cl->type != CEETM_ROOT) {
4209 + pr_err("CEETM: a prio ceetm qdiscs can be added only under a root ceetm class\n");
4210 + err = -EINVAL;
4211 + goto err_init_prio;
4212 + }
4213 +
4214 + priv->prio.parent = parent_cl;
4215 + parent_cl->root.child = sch;
4216 +
4217 + priv->shaped = parent_cl->shaped;
4218 + priv->prio.qcount = qopt->qcount;
4219 +
4220 + /* Create and configure qcount child classes */
4221 + for (i = 0; i < priv->prio.qcount; i++) {
4222 + child_cl = kzalloc(sizeof(*child_cl), GFP_KERNEL);
4223 + if (!child_cl) {
4224 + pr_err(KBUILD_BASENAME " : %s : kzalloc() failed\n",
4225 + __func__);
4226 + err = -ENOMEM;
4227 + goto err_init_prio;
4228 + }
4229 +
4230 + child_cl->prio.cstats = alloc_percpu(struct ceetm_class_stats);
4231 + if (!child_cl->prio.cstats) {
4232 + pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
4233 + __func__);
4234 + err = -ENOMEM;
4235 + goto err_init_prio_cls;
4236 + }
4237 +
4238 + child_cl->common.classid = TC_H_MAKE(sch->handle, (i + 1));
4239 + child_cl->refcnt = 1;
4240 + child_cl->parent = sch;
4241 + child_cl->type = CEETM_PRIO;
4242 + child_cl->shaped = priv->shaped;
4243 + child_cl->prio.child = NULL;
4244 +
4245 + /* All shaped CQs have CR and ER enabled by default */
4246 + child_cl->prio.cr = child_cl->shaped;
4247 + child_cl->prio.er = child_cl->shaped;
4248 + child_cl->prio.fq = NULL;
4249 + child_cl->prio.cq = NULL;
4250 +
4251 + /* Configure the corresponding hardware CQ */
4252 + err = ceetm_config_prio_cls(child_cl, dev,
4253 + parent_cl->root.ch, i);
4254 + if (err) {
4255 + pr_err(KBUILD_BASENAME " : %s : failed to configure the ceetm prio class %X\n",
4256 + __func__, child_cl->common.classid);
4257 + goto err_init_prio_cls;
4258 + }
4259 +
4260 + /* Add class handle in Qdisc */
4261 + ceetm_link_class(sch, &priv->clhash, &child_cl->common);
4262 + pr_debug(KBUILD_BASENAME " : %s : added ceetm prio class %X associated with CQ %d and CCG %d\n",
4263 + __func__, child_cl->common.classid,
4264 + child_cl->prio.cq->idx, child_cl->prio.ccg->idx);
4265 + }
4266 +
4267 + return 0;
4268 +
4269 +err_init_prio_cls:
4270 + ceetm_cls_destroy(sch, child_cl);
4271 +err_init_prio:
4272 + ceetm_destroy(sch);
4273 + return err;
4274 +}
4275 +
4276 +/* Configure a wbfs ceetm qdisc */
4277 +static int ceetm_init_wbfs(struct Qdisc *sch, struct ceetm_qdisc *priv,
4278 + struct tc_ceetm_qopt *qopt)
4279 +{
4280 + int err, group_b, small_group;
4281 + unsigned int i, id, prio_a, prio_b;
4282 + struct ceetm_class *parent_cl, *child_cl, *root_cl;
4283 + struct Qdisc *parent_qdisc;
4284 + struct ceetm_qdisc *parent_priv;
4285 + struct qm_ceetm_channel *channel;
4286 + struct net_device *dev = qdisc_dev(sch);
4287 +
4288 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
4289 +
4290 + /* Validate inputs */
4291 + if (sch->parent == TC_H_ROOT) {
4292 + pr_err("CEETM: a wbfs ceetm qdiscs can not be root\n");
4293 + err = -EINVAL;
4294 + goto err_init_wbfs;
4295 + }
4296 +
4297 + /* Obtain the parent prio ceetm qdisc */
4298 + parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent));
4299 + if (strcmp(parent_qdisc->ops->id, ceetm_qdisc_ops.id)) {
4300 + pr_err("CEETM: a ceetm qdisc can not be attached to other qdisc/class types\n");
4301 + err = -EINVAL;
4302 + goto err_init_wbfs;
4303 + }
4304 +
4305 + /* Obtain the parent prio ceetm class */
4306 + parent_cl = ceetm_find(sch->parent, parent_qdisc);
4307 + parent_priv = qdisc_priv(parent_qdisc);
4308 +
4309 + if (!parent_cl || parent_cl->type != CEETM_PRIO) {
4310 + pr_err("CEETM: a wbfs ceetm qdiscs can be added only under a prio ceetm class\n");
4311 + err = -EINVAL;
4312 + goto err_init_wbfs;
4313 + }
4314 +
4315 + if (!qopt->qcount || !qopt->qweight[0]) {
4316 + pr_err("CEETM: qcount and qweight are mandatory for a wbfs ceetm qdisc\n");
4317 + err = -EINVAL;
4318 + goto err_init_wbfs;
4319 + }
4320 +
4321 + priv->shaped = parent_cl->shaped;
4322 +
4323 + if (!priv->shaped && (qopt->cr || qopt->er)) {
4324 + pr_err("CEETM: CR/ER can be enabled only for shaped wbfs ceetm qdiscs\n");
4325 + err = -EINVAL;
4326 + goto err_init_wbfs;
4327 + }
4328 +
4329 + if (priv->shaped && !(qopt->cr || qopt->er)) {
4330 + pr_err("CEETM: either CR or ER must be enabled for shaped wbfs ceetm qdiscs\n");
4331 + err = -EINVAL;
4332 + goto err_init_wbfs;
4333 + }
4334 +
4335 + /* Obtain the parent root ceetm class */
4336 + root_cl = parent_priv->prio.parent;
4337 + if ((root_cl->root.wbfs_grp_a && root_cl->root.wbfs_grp_b) ||
4338 + root_cl->root.wbfs_grp_large) {
4339 + pr_err("CEETM: no more wbfs classes are available\n");
4340 + err = -EINVAL;
4341 + goto err_init_wbfs;
4342 + }
4343 +
4344 + if ((root_cl->root.wbfs_grp_a || root_cl->root.wbfs_grp_b) &&
4345 + qopt->qcount == CEETM_MAX_WBFS_QCOUNT) {
4346 + pr_err("CEETM: only %d wbfs classes are available\n",
4347 + CEETM_MIN_WBFS_QCOUNT);
4348 + err = -EINVAL;
4349 + goto err_init_wbfs;
4350 + }
4351 +
4352 + priv->wbfs.parent = parent_cl;
4353 + parent_cl->prio.child = sch;
4354 +
4355 + priv->wbfs.qcount = qopt->qcount;
4356 + priv->wbfs.cr = qopt->cr;
4357 + priv->wbfs.er = qopt->er;
4358 +
4359 + channel = root_cl->root.ch;
4360 +
4361 + /* Configure the hardware wbfs channel groups */
4362 + if (priv->wbfs.qcount == CEETM_MAX_WBFS_QCOUNT) {
4363 + /* Configure the large group A */
4364 + priv->wbfs.group_type = WBFS_GRP_LARGE;
4365 + small_group = false;
4366 + group_b = false;
4367 + prio_a = TC_H_MIN(parent_cl->common.classid) - 1;
4368 + prio_b = prio_a;
4369 +
4370 + } else if (root_cl->root.wbfs_grp_a) {
4371 + /* Configure the group B */
4372 + priv->wbfs.group_type = WBFS_GRP_B;
4373 +
4374 + err = qman_ceetm_channel_get_group(channel, &small_group,
4375 + &prio_a, &prio_b);
4376 + if (err) {
4377 + pr_err(KBUILD_BASENAME " : %s : failed to get group details\n",
4378 + __func__);
4379 + goto err_init_wbfs;
4380 + }
4381 +
4382 + small_group = true;
4383 + group_b = true;
4384 + prio_b = TC_H_MIN(parent_cl->common.classid) - 1;
4385 + /* If group A isn't configured, configure it as group B */
4386 + prio_a = prio_a ? : prio_b;
4387 +
4388 + } else {
4389 + /* Configure the small group A */
4390 + priv->wbfs.group_type = WBFS_GRP_A;
4391 +
4392 + err = qman_ceetm_channel_get_group(channel, &small_group,
4393 + &prio_a, &prio_b);
4394 + if (err) {
4395 + pr_err(KBUILD_BASENAME " : %s : failed to get group details\n",
4396 + __func__);
4397 + goto err_init_wbfs;
4398 + }
4399 +
4400 + small_group = true;
4401 + group_b = false;
4402 + prio_a = TC_H_MIN(parent_cl->common.classid) - 1;
4403 + /* If group B isn't configured, configure it as group A */
4404 + prio_b = prio_b ? : prio_a;
4405 + }
4406 +
4407 + err = qman_ceetm_channel_set_group(channel, small_group, prio_a,
4408 + prio_b);
4409 + if (err)
4410 + goto err_init_wbfs;
4411 +
4412 + if (priv->shaped) {
4413 + err = qman_ceetm_channel_set_group_cr_eligibility(channel,
4414 + group_b,
4415 + priv->wbfs.cr);
4416 + if (err) {
4417 + pr_err(KBUILD_BASENAME " : %s : failed to set group CR eligibility\n",
4418 + __func__);
4419 + goto err_init_wbfs;
4420 + }
4421 +
4422 + err = qman_ceetm_channel_set_group_er_eligibility(channel,
4423 + group_b,
4424 + priv->wbfs.er);
4425 + if (err) {
4426 + pr_err(KBUILD_BASENAME " : %s : failed to set group ER eligibility\n",
4427 + __func__);
4428 + goto err_init_wbfs;
4429 + }
4430 + }
4431 +
4432 + /* Create qcount child classes */
4433 + for (i = 0; i < priv->wbfs.qcount; i++) {
4434 + child_cl = kzalloc(sizeof(*child_cl), GFP_KERNEL);
4435 + if (!child_cl) {
4436 + pr_err(KBUILD_BASENAME " : %s : kzalloc() failed\n",
4437 + __func__);
4438 + err = -ENOMEM;
4439 + goto err_init_wbfs;
4440 + }
4441 +
4442 + child_cl->wbfs.cstats = alloc_percpu(struct ceetm_class_stats);
4443 + if (!child_cl->wbfs.cstats) {
4444 + pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
4445 + __func__);
4446 + err = -ENOMEM;
4447 + goto err_init_wbfs_cls;
4448 + }
4449 +
4450 + child_cl->common.classid = TC_H_MAKE(sch->handle, (i + 1));
4451 + child_cl->refcnt = 1;
4452 + child_cl->parent = sch;
4453 + child_cl->type = CEETM_WBFS;
4454 + child_cl->shaped = priv->shaped;
4455 + child_cl->wbfs.fq = NULL;
4456 + child_cl->wbfs.cq = NULL;
4457 + child_cl->wbfs.weight = qopt->qweight[i];
4458 +
4459 + if (priv->wbfs.group_type == WBFS_GRP_B)
4460 + id = WBFS_GRP_B_OFFSET + i;
4461 + else
4462 + id = WBFS_GRP_A_OFFSET + i;
4463 +
4464 + err = ceetm_config_wbfs_cls(child_cl, dev, channel, id,
4465 + priv->wbfs.group_type);
4466 + if (err) {
4467 + pr_err(KBUILD_BASENAME " : %s : failed to configure the ceetm wbfs class %X\n",
4468 + __func__, child_cl->common.classid);
4469 + goto err_init_wbfs_cls;
4470 + }
4471 +
4472 + /* Add class handle in Qdisc */
4473 + ceetm_link_class(sch, &priv->clhash, &child_cl->common);
4474 + pr_debug(KBUILD_BASENAME " : %s : added ceetm wbfs class %X associated with CQ %d and CCG %d\n",
4475 + __func__, child_cl->common.classid,
4476 + child_cl->wbfs.cq->idx, child_cl->wbfs.ccg->idx);
4477 + }
4478 +
4479 + /* Signal the root class that a group has been configured */
4480 + switch (priv->wbfs.group_type) {
4481 + case WBFS_GRP_LARGE:
4482 + root_cl->root.wbfs_grp_large = true;
4483 + break;
4484 + case WBFS_GRP_A:
4485 + root_cl->root.wbfs_grp_a = true;
4486 + break;
4487 + case WBFS_GRP_B:
4488 + root_cl->root.wbfs_grp_b = true;
4489 + break;
4490 + }
4491 +
4492 + return 0;
4493 +
4494 +err_init_wbfs_cls:
4495 + ceetm_cls_destroy(sch, child_cl);
4496 +err_init_wbfs:
4497 + ceetm_destroy(sch);
4498 + return err;
4499 +}
4500 +
4501 +/* Configure a generic ceetm qdisc */
4502 +static int ceetm_init(struct Qdisc *sch, struct nlattr *opt)
4503 +{
4504 + struct tc_ceetm_qopt *qopt;
4505 + struct nlattr *tb[TCA_CEETM_QOPS + 1];
4506 + int ret;
4507 + struct ceetm_qdisc *priv = qdisc_priv(sch);
4508 + struct net_device *dev = qdisc_dev(sch);
4509 +
4510 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
4511 +
4512 + if (!netif_is_multiqueue(dev))
4513 + return -EOPNOTSUPP;
4514 +
4515 + if (!opt) {
4516 + pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
4517 + return -EINVAL;
4518 + }
4519 +
4520 + ret = nla_parse_nested(tb, TCA_CEETM_QOPS, opt, ceetm_policy);
4521 + if (ret < 0) {
4522 + pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
4523 + return ret;
4524 + }
4525 +
4526 + if (!tb[TCA_CEETM_QOPS]) {
4527 + pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
4528 + return -EINVAL;
4529 + }
4530 +
4531 + if (TC_H_MIN(sch->handle)) {
4532 + pr_err("CEETM: a qdisc should not have a minor\n");
4533 + return -EINVAL;
4534 + }
4535 +
4536 + qopt = nla_data(tb[TCA_CEETM_QOPS]);
4537 +
4538 + /* Initialize the class hash list. Each qdisc has its own class hash */
4539 + ret = qdisc_class_hash_init(&priv->clhash);
4540 + if (ret < 0) {
4541 + pr_err(KBUILD_BASENAME " : %s : qdisc_class_hash_init failed\n",
4542 + __func__);
4543 + return ret;
4544 + }
4545 +
4546 + priv->type = qopt->type;
4547 +
4548 + switch (priv->type) {
4549 + case CEETM_ROOT:
4550 + ret = ceetm_init_root(sch, priv, qopt);
4551 + break;
4552 + case CEETM_PRIO:
4553 + ret = ceetm_init_prio(sch, priv, qopt);
4554 + break;
4555 + case CEETM_WBFS:
4556 + ret = ceetm_init_wbfs(sch, priv, qopt);
4557 + break;
4558 + default:
4559 + pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
4560 + ceetm_destroy(sch);
4561 + ret = -EINVAL;
4562 + }
4563 +
4564 + return ret;
4565 +}
4566 +
4567 +/* Edit a root ceetm qdisc */
4568 +static int ceetm_change_root(struct Qdisc *sch, struct ceetm_qdisc *priv,
4569 + struct net_device *dev,
4570 + struct tc_ceetm_qopt *qopt)
4571 +{
4572 + int err = 0;
4573 + u64 bps;
4574 +
4575 + if (priv->shaped != (bool)qopt->shaped) {
4576 + pr_err("CEETM: qdisc %X is %s\n", sch->handle,
4577 + priv->shaped ? "shaped" : "unshaped");
4578 + return -EINVAL;
4579 + }
4580 +
4581 + /* Nothing to modify for unshaped qdiscs */
4582 + if (!priv->shaped)
4583 + return 0;
4584 +
4585 + /* Configure the LNI shaper */
4586 + if (priv->root.overhead != qopt->overhead) {
4587 + err = qman_ceetm_lni_enable_shaper(priv->root.lni, 1,
4588 + qopt->overhead);
4589 + if (err)
4590 + goto change_err;
4591 + priv->root.overhead = qopt->overhead;
4592 + }
4593 +
4594 + if (priv->root.rate != qopt->rate) {
4595 + bps = qopt->rate << 3; /* Bps -> bps */
4596 + err = qman_ceetm_lni_set_commit_rate_bps(priv->root.lni, bps,
4597 + dev->mtu);
4598 + if (err)
4599 + goto change_err;
4600 + priv->root.rate = qopt->rate;
4601 + }
4602 +
4603 + if (priv->root.ceil != qopt->ceil) {
4604 + bps = qopt->ceil << 3; /* Bps -> bps */
4605 + err = qman_ceetm_lni_set_excess_rate_bps(priv->root.lni, bps,
4606 + dev->mtu);
4607 + if (err)
4608 + goto change_err;
4609 + priv->root.ceil = qopt->ceil;
4610 + }
4611 +
4612 + return 0;
4613 +
4614 +change_err:
4615 + pr_err(KBUILD_BASENAME " : %s : failed to configure the root ceetm qdisc %X\n",
4616 + __func__, sch->handle);
4617 + return err;
4618 +}
4619 +
4620 +/* Edit a wbfs ceetm qdisc */
4621 +static int ceetm_change_wbfs(struct Qdisc *sch, struct ceetm_qdisc *priv,
4622 + struct tc_ceetm_qopt *qopt)
4623 +{
4624 + int err;
4625 + bool group_b;
4626 + struct qm_ceetm_channel *channel;
4627 + struct ceetm_class *prio_class, *root_class;
4628 + struct ceetm_qdisc *prio_qdisc;
4629 +
4630 + if (qopt->qcount) {
4631 + pr_err("CEETM: the qcount can not be modified\n");
4632 + return -EINVAL;
4633 + }
4634 +
4635 + if (qopt->qweight[0]) {
4636 + pr_err("CEETM: the qweight can be modified through the wbfs classes\n");
4637 + return -EINVAL;
4638 + }
4639 +
4640 + if (!priv->shaped && (qopt->cr || qopt->er)) {
4641 + pr_err("CEETM: CR/ER can be enabled only for shaped wbfs ceetm qdiscs\n");
4642 + return -EINVAL;
4643 + }
4644 +
4645 + if (priv->shaped && !(qopt->cr || qopt->er)) {
4646 + pr_err("CEETM: either CR or ER must be enabled for shaped wbfs ceetm qdiscs\n");
4647 + return -EINVAL;
4648 + }
4649 +
4650 + /* Nothing to modify for unshaped qdiscs */
4651 + if (!priv->shaped)
4652 + return 0;
4653 +
4654 + prio_class = priv->wbfs.parent;
4655 + prio_qdisc = qdisc_priv(prio_class->parent);
4656 + root_class = prio_qdisc->prio.parent;
4657 + channel = root_class->root.ch;
4658 + group_b = priv->wbfs.group_type == WBFS_GRP_B;
4659 +
4660 + if (qopt->cr != priv->wbfs.cr) {
4661 + err = qman_ceetm_channel_set_group_cr_eligibility(channel,
4662 + group_b,
4663 + qopt->cr);
4664 + if (err)
4665 + goto change_err;
4666 + priv->wbfs.cr = qopt->cr;
4667 + }
4668 +
4669 + if (qopt->er != priv->wbfs.er) {
4670 + err = qman_ceetm_channel_set_group_er_eligibility(channel,
4671 + group_b,
4672 + qopt->er);
4673 + if (err)
4674 + goto change_err;
4675 + priv->wbfs.er = qopt->er;
4676 + }
4677 +
4678 + return 0;
4679 +
4680 +change_err:
4681 + pr_err(KBUILD_BASENAME " : %s : failed to configure the wbfs ceetm qdisc %X\n",
4682 + __func__, sch->handle);
4683 + return err;
4684 +}
4685 +
4686 +/* Edit a ceetm qdisc */
4687 +static int ceetm_change(struct Qdisc *sch, struct nlattr *opt)
4688 +{
4689 + struct tc_ceetm_qopt *qopt;
4690 + struct nlattr *tb[TCA_CEETM_QOPS + 1];
4691 + int ret;
4692 + struct ceetm_qdisc *priv = qdisc_priv(sch);
4693 + struct net_device *dev = qdisc_dev(sch);
4694 +
4695 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
4696 +
4697 + ret = nla_parse_nested(tb, TCA_CEETM_QOPS, opt, ceetm_policy);
4698 + if (ret < 0) {
4699 + pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
4700 + return ret;
4701 + }
4702 +
4703 + if (!tb[TCA_CEETM_QOPS]) {
4704 + pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
4705 + return -EINVAL;
4706 + }
4707 +
4708 + if (TC_H_MIN(sch->handle)) {
4709 + pr_err("CEETM: a qdisc should not have a minor\n");
4710 + return -EINVAL;
4711 + }
4712 +
4713 + qopt = nla_data(tb[TCA_CEETM_QOPS]);
4714 +
4715 + if (priv->type != qopt->type) {
4716 + pr_err("CEETM: qdisc %X is not of the provided type\n",
4717 + sch->handle);
4718 + return -EINVAL;
4719 + }
4720 +
4721 + switch (priv->type) {
4722 + case CEETM_ROOT:
4723 + ret = ceetm_change_root(sch, priv, dev, qopt);
4724 + break;
4725 + case CEETM_PRIO:
4726 + pr_err("CEETM: prio qdiscs can not be modified\n");
4727 + ret = -EINVAL;
4728 + break;
4729 + case CEETM_WBFS:
4730 + ret = ceetm_change_wbfs(sch, priv, qopt);
4731 + break;
4732 + default:
4733 + pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
4734 + ret = -EINVAL;
4735 + }
4736 +
4737 + return ret;
4738 +}
4739 +
4740 +/* Attach the underlying pfifo qdiscs */
4741 +static void ceetm_attach(struct Qdisc *sch)
4742 +{
4743 + struct net_device *dev = qdisc_dev(sch);
4744 + struct ceetm_qdisc *priv = qdisc_priv(sch);
4745 + struct Qdisc *qdisc, *old_qdisc;
4746 + unsigned int i;
4747 +
4748 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
4749 +
4750 + for (i = 0; i < dev->num_tx_queues; i++) {
4751 + qdisc = priv->root.qdiscs[i];
4752 + old_qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc);
4753 + if (old_qdisc)
4754 + qdisc_destroy(old_qdisc);
4755 + }
4756 +}
4757 +
4758 +static unsigned long ceetm_cls_get(struct Qdisc *sch, u32 classid)
4759 +{
4760 + struct ceetm_class *cl;
4761 +
4762 + pr_debug(KBUILD_BASENAME " : %s : classid %X from qdisc %X\n",
4763 + __func__, classid, sch->handle);
4764 + cl = ceetm_find(classid, sch);
4765 +
4766 + if (cl)
4767 + cl->refcnt++; /* Will decrement in put() */
4768 + return (unsigned long)cl;
4769 +}
4770 +
4771 +static void ceetm_cls_put(struct Qdisc *sch, unsigned long arg)
4772 +{
4773 + struct ceetm_class *cl = (struct ceetm_class *)arg;
4774 +
4775 + pr_debug(KBUILD_BASENAME " : %s : classid %X from qdisc %X\n",
4776 + __func__, cl->common.classid, sch->handle);
4777 + cl->refcnt--;
4778 +
4779 + if (cl->refcnt == 0)
4780 + ceetm_cls_destroy(sch, cl);
4781 +}
4782 +
4783 +static int ceetm_cls_change_root(struct ceetm_class *cl,
4784 + struct tc_ceetm_copt *copt,
4785 + struct net_device *dev)
4786 +{
4787 + int err;
4788 + u64 bps;
4789 +
4790 + if ((bool)copt->shaped != cl->shaped) {
4791 + pr_err("CEETM: class %X is %s\n", cl->common.classid,
4792 + cl->shaped ? "shaped" : "unshaped");
4793 + return -EINVAL;
4794 + }
4795 +
4796 + if (cl->shaped && cl->root.rate != copt->rate) {
4797 + bps = copt->rate << 3; /* Bps -> bps */
4798 + err = qman_ceetm_channel_set_commit_rate_bps(cl->root.ch, bps,
4799 + dev->mtu);
4800 + if (err)
4801 + goto change_cls_err;
4802 + cl->root.rate = copt->rate;
4803 + }
4804 +
4805 + if (cl->shaped && cl->root.ceil != copt->ceil) {
4806 + bps = copt->ceil << 3; /* Bps -> bps */
4807 + err = qman_ceetm_channel_set_excess_rate_bps(cl->root.ch, bps,
4808 + dev->mtu);
4809 + if (err)
4810 + goto change_cls_err;
4811 + cl->root.ceil = copt->ceil;
4812 + }
4813 +
4814 + if (!cl->shaped && cl->root.tbl != copt->tbl) {
4815 + err = qman_ceetm_channel_set_weight(cl->root.ch, copt->tbl);
4816 + if (err)
4817 + goto change_cls_err;
4818 + cl->root.tbl = copt->tbl;
4819 + }
4820 +
4821 + return 0;
4822 +
4823 +change_cls_err:
4824 + pr_err(KBUILD_BASENAME " : %s : failed to configure the ceetm root class %X\n",
4825 + __func__, cl->common.classid);
4826 + return err;
4827 +}
4828 +
4829 +static int ceetm_cls_change_prio(struct ceetm_class *cl,
4830 + struct tc_ceetm_copt *copt)
4831 +{
4832 + int err;
4833 +
4834 + if (!cl->shaped && (copt->cr || copt->er)) {
4835 + pr_err("CEETM: only shaped classes can have CR and ER enabled\n");
4836 + return -EINVAL;
4837 + }
4838 +
4839 + if (cl->prio.cr != (bool)copt->cr) {
4840 + err = qman_ceetm_channel_set_cq_cr_eligibility(
4841 + cl->prio.cq->parent,
4842 + cl->prio.cq->idx,
4843 + copt->cr);
4844 + if (err)
4845 + goto change_cls_err;
4846 + cl->prio.cr = copt->cr;
4847 + }
4848 +
4849 + if (cl->prio.er != (bool)copt->er) {
4850 + err = qman_ceetm_channel_set_cq_er_eligibility(
4851 + cl->prio.cq->parent,
4852 + cl->prio.cq->idx,
4853 + copt->er);
4854 + if (err)
4855 + goto change_cls_err;
4856 + cl->prio.er = copt->er;
4857 + }
4858 +
4859 + return 0;
4860 +
4861 +change_cls_err:
4862 + pr_err(KBUILD_BASENAME " : %s : failed to configure the ceetm prio class %X\n",
4863 + __func__, cl->common.classid);
4864 + return err;
4865 +}
4866 +
4867 +static int ceetm_cls_change_wbfs(struct ceetm_class *cl,
4868 + struct tc_ceetm_copt *copt)
4869 +{
4870 + int err;
4871 +
4872 + if (copt->weight != cl->wbfs.weight) {
4873 + /* Configure the CQ weight: real number multiplied by 100 to
4874 + * get rid of the fraction
4875 + */
4876 + err = qman_ceetm_set_queue_weight_in_ratio(cl->wbfs.cq,
4877 + copt->weight * 100);
4878 +
4879 + if (err) {
4880 + pr_err(KBUILD_BASENAME " : %s : failed to configure the ceetm wbfs class %X\n",
4881 + __func__, cl->common.classid);
4882 + return err;
4883 + }
4884 +
4885 + cl->wbfs.weight = copt->weight;
4886 + }
4887 +
4888 + return 0;
4889 +}
4890 +
4891 +/* Add a ceetm root class or configure a ceetm root/prio/wbfs class */
4892 +static int ceetm_cls_change(struct Qdisc *sch, u32 classid, u32 parentid,
4893 + struct nlattr **tca, unsigned long *arg)
4894 +{
4895 + int err;
4896 + u64 bps;
4897 + struct ceetm_qdisc *priv;
4898 + struct ceetm_class *cl = (struct ceetm_class *)*arg;
4899 + struct nlattr *opt = tca[TCA_OPTIONS];
4900 + struct nlattr *tb[__TCA_CEETM_MAX];
4901 + struct tc_ceetm_copt *copt;
4902 + struct qm_ceetm_channel *channel;
4903 + struct net_device *dev = qdisc_dev(sch);
4904 +
4905 + pr_debug(KBUILD_BASENAME " : %s : classid %X under qdisc %X\n",
4906 + __func__, classid, sch->handle);
4907 +
4908 + if (strcmp(sch->ops->id, ceetm_qdisc_ops.id)) {
4909 + pr_err("CEETM: a ceetm class can not be attached to other qdisc/class types\n");
4910 + return -EINVAL;
4911 + }
4912 +
4913 + priv = qdisc_priv(sch);
4914 +
4915 + if (!opt) {
4916 + pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
4917 + return -EINVAL;
4918 + }
4919 +
4920 + if (!cl && sch->handle != parentid) {
4921 + pr_err("CEETM: classes can be attached to the root ceetm qdisc only\n");
4922 + return -EINVAL;
4923 + }
4924 +
4925 + if (!cl && priv->type != CEETM_ROOT) {
4926 + pr_err("CEETM: only root ceetm classes can be attached to the root ceetm qdisc\n");
4927 + return -EINVAL;
4928 + }
4929 +
4930 + err = nla_parse_nested(tb, TCA_CEETM_COPT, opt, ceetm_policy);
4931 + if (err < 0) {
4932 + pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
4933 + return -EINVAL;
4934 + }
4935 +
4936 + if (!tb[TCA_CEETM_COPT]) {
4937 + pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
4938 + return -EINVAL;
4939 + }
4940 +
4941 + if (TC_H_MIN(classid) >= PFIFO_MIN_OFFSET) {
4942 + pr_err("CEETM: only minors 0x01 to 0x20 can be used for ceetm root classes\n");
4943 + return -EINVAL;
4944 + }
4945 +
4946 + copt = nla_data(tb[TCA_CEETM_COPT]);
4947 +
4948 + /* Configure an existing ceetm class */
4949 + if (cl) {
4950 + if (copt->type != cl->type) {
4951 + pr_err("CEETM: class %X is not of the provided type\n",
4952 + cl->common.classid);
4953 + return -EINVAL;
4954 + }
4955 +
4956 + switch (copt->type) {
4957 + case CEETM_ROOT:
4958 + return ceetm_cls_change_root(cl, copt, dev);
4959 +
4960 + case CEETM_PRIO:
4961 + return ceetm_cls_change_prio(cl, copt);
4962 +
4963 + case CEETM_WBFS:
4964 + return ceetm_cls_change_wbfs(cl, copt);
4965 +
4966 + default:
4967 + pr_err(KBUILD_BASENAME " : %s : invalid class\n",
4968 + __func__);
4969 + return -EINVAL;
4970 + }
4971 + }
4972 +
4973 + /* Add a new root ceetm class */
4974 + if (copt->type != CEETM_ROOT) {
4975 + pr_err("CEETM: only root ceetm classes can be attached to the root ceetm qdisc\n");
4976 + return -EINVAL;
4977 + }
4978 +
4979 + if (copt->shaped && !priv->shaped) {
4980 + pr_err("CEETM: can not add a shaped ceetm root class under an unshaped ceetm root qdisc\n");
4981 + return -EINVAL;
4982 + }
4983 +
4984 + cl = kzalloc(sizeof(*cl), GFP_KERNEL);
4985 + if (!cl)
4986 + return -ENOMEM;
4987 +
4988 + cl->type = copt->type;
4989 + cl->shaped = copt->shaped;
4990 + cl->root.rate = copt->rate;
4991 + cl->root.ceil = copt->ceil;
4992 + cl->root.tbl = copt->tbl;
4993 +
4994 + cl->common.classid = classid;
4995 + cl->refcnt = 1;
4996 + cl->parent = sch;
4997 + cl->root.child = NULL;
4998 + cl->root.wbfs_grp_a = false;
4999 + cl->root.wbfs_grp_b = false;
5000 + cl->root.wbfs_grp_large = false;
5001 +
5002 + /* Claim a CEETM channel */
5003 + err = qman_ceetm_channel_claim(&channel, priv->root.lni);
5004 + if (err) {
5005 + pr_err(KBUILD_BASENAME " : %s : failed to claim a channel\n",
5006 + __func__);
5007 + goto claim_err;
5008 + }
5009 +
5010 + cl->root.ch = channel;
5011 +
5012 + if (cl->shaped) {
5013 + /* Configure the channel shaper */
5014 + err = qman_ceetm_channel_enable_shaper(channel, 1);
5015 + if (err)
5016 + goto channel_err;
5017 +
5018 + bps = cl->root.rate << 3; /* Bps -> bps */
5019 + err = qman_ceetm_channel_set_commit_rate_bps(channel, bps,
5020 + dev->mtu);
5021 + if (err)
5022 + goto channel_err;
5023 +
5024 + bps = cl->root.ceil << 3; /* Bps -> bps */
5025 + err = qman_ceetm_channel_set_excess_rate_bps(channel, bps,
5026 + dev->mtu);
5027 + if (err)
5028 + goto channel_err;
5029 +
5030 + } else {
5031 + /* Configure the uFQ algorithm */
5032 + err = qman_ceetm_channel_set_weight(channel, cl->root.tbl);
5033 + if (err)
5034 + goto channel_err;
5035 + }
5036 +
5037 + /* Add class handle in Qdisc */
5038 + ceetm_link_class(sch, &priv->clhash, &cl->common);
5039 +
5040 + pr_debug(KBUILD_BASENAME " : %s : configured class %X associated with channel %d\n",
5041 + __func__, classid, channel->idx);
5042 + *arg = (unsigned long)cl;
5043 + return 0;
5044 +
5045 +channel_err:
5046 + pr_err(KBUILD_BASENAME " : %s : failed to configure the channel %d\n",
5047 + __func__, channel->idx);
5048 + if (qman_ceetm_channel_release(channel))
5049 + pr_err(KBUILD_BASENAME " : %s : failed to release the channel %d\n",
5050 + __func__, channel->idx);
5051 +claim_err:
5052 + kfree(cl);
5053 + return err;
5054 +}
5055 +
5056 +static void ceetm_cls_walk(struct Qdisc *sch, struct qdisc_walker *arg)
5057 +{
5058 + struct ceetm_qdisc *priv = qdisc_priv(sch);
5059 + struct ceetm_class *cl;
5060 + unsigned int i;
5061 +
5062 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
5063 +
5064 + if (arg->stop)
5065 + return;
5066 +
5067 + for (i = 0; i < priv->clhash.hashsize; i++) {
5068 + hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) {
5069 + if (arg->count < arg->skip) {
5070 + arg->count++;
5071 + continue;
5072 + }
5073 + if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
5074 + arg->stop = 1;
5075 + return;
5076 + }
5077 + arg->count++;
5078 + }
5079 + }
5080 +}
5081 +
5082 +static int ceetm_cls_dump(struct Qdisc *sch, unsigned long arg,
5083 + struct sk_buff *skb, struct tcmsg *tcm)
5084 +{
5085 + struct ceetm_class *cl = (struct ceetm_class *)arg;
5086 + struct nlattr *nest;
5087 + struct tc_ceetm_copt copt;
5088 +
5089 + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
5090 + __func__, cl->common.classid, sch->handle);
5091 +
5092 + sch_tree_lock(sch);
5093 +
5094 + tcm->tcm_parent = ((struct Qdisc *)cl->parent)->handle;
5095 + tcm->tcm_handle = cl->common.classid;
5096 +
5097 + memset(&copt, 0, sizeof(copt));
5098 +
5099 + copt.shaped = cl->shaped;
5100 + copt.type = cl->type;
5101 +
5102 + switch (cl->type) {
5103 + case CEETM_ROOT:
5104 + if (cl->root.child)
5105 + tcm->tcm_info = cl->root.child->handle;
5106 +
5107 + copt.rate = cl->root.rate;
5108 + copt.ceil = cl->root.ceil;
5109 + copt.tbl = cl->root.tbl;
5110 + break;
5111 +
5112 + case CEETM_PRIO:
5113 + if (cl->prio.child)
5114 + tcm->tcm_info = cl->prio.child->handle;
5115 +
5116 + copt.cr = cl->prio.cr;
5117 + copt.er = cl->prio.er;
5118 + break;
5119 +
5120 + case CEETM_WBFS:
5121 + copt.weight = cl->wbfs.weight;
5122 + break;
5123 + }
5124 +
5125 + nest = nla_nest_start(skb, TCA_OPTIONS);
5126 + if (!nest)
5127 + goto nla_put_failure;
5128 + if (nla_put(skb, TCA_CEETM_COPT, sizeof(copt), &copt))
5129 + goto nla_put_failure;
5130 + nla_nest_end(skb, nest);
5131 + sch_tree_unlock(sch);
5132 + return skb->len;
5133 +
5134 +nla_put_failure:
5135 + sch_tree_unlock(sch);
5136 + nla_nest_cancel(skb, nest);
5137 + return -EMSGSIZE;
5138 +}
5139 +
5140 +static int ceetm_cls_delete(struct Qdisc *sch, unsigned long arg)
5141 +{
5142 + struct ceetm_qdisc *priv = qdisc_priv(sch);
5143 + struct ceetm_class *cl = (struct ceetm_class *)arg;
5144 +
5145 + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
5146 + __func__, cl->common.classid, sch->handle);
5147 +
5148 + sch_tree_lock(sch);
5149 + qdisc_class_hash_remove(&priv->clhash, &cl->common);
5150 + cl->refcnt--;
5151 +
5152 + /* The refcnt should be at least 1 since we have incremented it in
5153 + * get(). Will decrement again in put() where we will call destroy()
5154 + * to actually free the memory if it reaches 0.
5155 + */
5156 + WARN_ON(cl->refcnt == 0);
5157 +
5158 + sch_tree_unlock(sch);
5159 + return 0;
5160 +}
5161 +
5162 +/* Get the class' child qdisc, if any */
5163 +static struct Qdisc *ceetm_cls_leaf(struct Qdisc *sch, unsigned long arg)
5164 +{
5165 + struct ceetm_class *cl = (struct ceetm_class *)arg;
5166 +
5167 + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
5168 + __func__, cl->common.classid, sch->handle);
5169 +
5170 + switch (cl->type) {
5171 + case CEETM_ROOT:
5172 + return cl->root.child;
5173 +
5174 + case CEETM_PRIO:
5175 + return cl->prio.child;
5176 + }
5177 +
5178 + return NULL;
5179 +}
5180 +
5181 +static int ceetm_cls_graft(struct Qdisc *sch, unsigned long arg,
5182 + struct Qdisc *new, struct Qdisc **old)
5183 +{
5184 + if (new && strcmp(new->ops->id, ceetm_qdisc_ops.id)) {
5185 + pr_err("CEETM: only ceetm qdiscs can be attached to ceetm classes\n");
5186 + return -EOPNOTSUPP;
5187 + }
5188 +
5189 + return 0;
5190 +}
5191 +
5192 +static int ceetm_cls_dump_stats(struct Qdisc *sch, unsigned long arg,
5193 + struct gnet_dump *d)
5194 +{
5195 + unsigned int i;
5196 + struct ceetm_class *cl = (struct ceetm_class *)arg;
5197 + struct gnet_stats_basic_packed tmp_bstats;
5198 + struct ceetm_class_stats *cstats = NULL;
5199 + struct qm_ceetm_cq *cq = NULL;
5200 + struct tc_ceetm_xstats xstats;
5201 +
5202 + memset(&xstats, 0, sizeof(xstats));
5203 + memset(&tmp_bstats, 0, sizeof(tmp_bstats));
5204 +
5205 + switch (cl->type) {
5206 + case CEETM_ROOT:
5207 + return 0;
5208 + case CEETM_PRIO:
5209 + cq = cl->prio.cq;
5210 + break;
5211 + case CEETM_WBFS:
5212 + cq = cl->wbfs.cq;
5213 + break;
5214 + }
5215 +
5216 + for_each_online_cpu(i) {
5217 + switch (cl->type) {
5218 + case CEETM_PRIO:
5219 + cstats = per_cpu_ptr(cl->prio.cstats, i);
5220 + break;
5221 + case CEETM_WBFS:
5222 + cstats = per_cpu_ptr(cl->wbfs.cstats, i);
5223 + break;
5224 + }
5225 +
5226 + if (cstats) {
5227 + xstats.ern_drop_count += cstats->ern_drop_count;
5228 + xstats.congested_count += cstats->congested_count;
5229 + tmp_bstats.bytes += cstats->bstats.bytes;
5230 + tmp_bstats.packets += cstats->bstats.packets;
5231 + }
5232 + }
5233 +
5234 + if (gnet_stats_copy_basic(d, NULL, &tmp_bstats) < 0)
5235 + return -1;
5236 +
5237 + if (cq && qman_ceetm_cq_get_dequeue_statistics(cq, 0,
5238 + &xstats.frame_count,
5239 + &xstats.byte_count))
5240 + return -1;
5241 +
5242 + return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
5243 +}
5244 +
5245 +static struct tcf_proto **ceetm_tcf_chain(struct Qdisc *sch, unsigned long arg)
5246 +{
5247 + struct ceetm_qdisc *priv = qdisc_priv(sch);
5248 + struct ceetm_class *cl = (struct ceetm_class *)arg;
5249 + struct tcf_proto **fl = cl ? &cl->filter_list : &priv->filter_list;
5250 +
5251 + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
5252 + cl ? cl->common.classid : 0, sch->handle);
5253 + return fl;
5254 +}
5255 +
5256 +static unsigned long ceetm_tcf_bind(struct Qdisc *sch, unsigned long parent,
5257 + u32 classid)
5258 +{
5259 + struct ceetm_class *cl = ceetm_find(classid, sch);
5260 +
5261 + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
5262 + cl ? cl->common.classid : 0, sch->handle);
5263 + return (unsigned long)cl;
5264 +}
5265 +
5266 +static void ceetm_tcf_unbind(struct Qdisc *sch, unsigned long arg)
5267 +{
5268 + struct ceetm_class *cl = (struct ceetm_class *)arg;
5269 +
5270 + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
5271 + cl ? cl->common.classid : 0, sch->handle);
5272 +}
5273 +
5274 +const struct Qdisc_class_ops ceetm_cls_ops = {
5275 + .graft = ceetm_cls_graft,
5276 + .leaf = ceetm_cls_leaf,
5277 + .get = ceetm_cls_get,
5278 + .put = ceetm_cls_put,
5279 + .change = ceetm_cls_change,
5280 + .delete = ceetm_cls_delete,
5281 + .walk = ceetm_cls_walk,
5282 + .tcf_chain = ceetm_tcf_chain,
5283 + .bind_tcf = ceetm_tcf_bind,
5284 + .unbind_tcf = ceetm_tcf_unbind,
5285 + .dump = ceetm_cls_dump,
5286 + .dump_stats = ceetm_cls_dump_stats,
5287 +};
5288 +
5289 +struct Qdisc_ops ceetm_qdisc_ops __read_mostly = {
5290 + .id = "ceetm",
5291 + .priv_size = sizeof(struct ceetm_qdisc),
5292 + .cl_ops = &ceetm_cls_ops,
5293 + .init = ceetm_init,
5294 + .destroy = ceetm_destroy,
5295 + .change = ceetm_change,
5296 + .dump = ceetm_dump,
5297 + .attach = ceetm_attach,
5298 + .owner = THIS_MODULE,
5299 +};
5300 +
5301 +/* Run the filters and classifiers attached to the qdisc on the provided skb */
5302 +static struct ceetm_class *ceetm_classify(struct sk_buff *skb,
5303 + struct Qdisc *sch, int *qerr,
5304 + bool *act_drop)
5305 +{
5306 + struct ceetm_qdisc *priv = qdisc_priv(sch);
5307 + struct ceetm_class *cl = NULL, *wbfs_cl;
5308 + struct tcf_result res;
5309 + struct tcf_proto *tcf;
5310 + int result;
5311 +
5312 + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
5313 + tcf = priv->filter_list;
5314 + while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
5315 +#ifdef CONFIG_NET_CLS_ACT
5316 + switch (result) {
5317 + case TC_ACT_QUEUED:
5318 + case TC_ACT_STOLEN:
5319 + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
5320 + case TC_ACT_SHOT:
5321 + /* No valid class found due to action */
5322 + *act_drop = true;
5323 + return NULL;
5324 + }
5325 +#endif
5326 + cl = (void *)res.class;
5327 + if (!cl) {
5328 + if (res.classid == sch->handle) {
5329 + /* The filter leads to the qdisc */
5330 + /* TODO default qdisc */
5331 + return NULL;
5332 + }
5333 +
5334 + cl = ceetm_find(res.classid, sch);
5335 + if (!cl)
5336 + /* The filter leads to an invalid class */
5337 + break;
5338 + }
5339 +
5340 + /* The class might have its own filters attached */
5341 + tcf = cl->filter_list;
5342 + }
5343 +
5344 + if (!cl) {
5345 + /* No valid class found */
5346 + /* TODO default qdisc */
5347 + return NULL;
5348 + }
5349 +
5350 + switch (cl->type) {
5351 + case CEETM_ROOT:
5352 + if (cl->root.child) {
5353 + /* Run the prio qdisc classifiers */
5354 + return ceetm_classify(skb, cl->root.child, qerr,
5355 + act_drop);
5356 + } else {
5357 + /* The root class does not have a child prio qdisc */
5358 + /* TODO default qdisc */
5359 + return NULL;
5360 + }
5361 + case CEETM_PRIO:
5362 + if (cl->prio.child) {
5363 + /* If filters lead to a wbfs class, return it.
5364 + * Otherwise, return the prio class
5365 + */
5366 + wbfs_cl = ceetm_classify(skb, cl->prio.child, qerr,
5367 + act_drop);
5368 + /* A NULL result might indicate either an erroneous
5369 + * filter, or no filters at all. We will assume the
5370 + * latter
5371 + */
5372 + return wbfs_cl ? : cl;
5373 + }
5374 + }
5375 +
5376 + /* For wbfs and childless prio classes, return the class directly */
5377 + return cl;
5378 +}
5379 +
5380 +int __hot ceetm_tx(struct sk_buff *skb, struct net_device *net_dev)
5381 +{
5382 + int ret;
5383 + bool act_drop = false;
5384 + struct Qdisc *sch = net_dev->qdisc;
5385 + struct ceetm_class *cl;
5386 + struct dpa_priv_s *priv_dpa;
5387 + struct qman_fq *egress_fq, *conf_fq;
5388 + struct ceetm_qdisc *priv = qdisc_priv(sch);
5389 + struct ceetm_qdisc_stats *qstats = this_cpu_ptr(priv->root.qstats);
5390 + struct ceetm_class_stats *cstats;
5391 + const int queue_mapping = dpa_get_queue_mapping(skb);
5392 + spinlock_t *root_lock = qdisc_lock(sch);
5393 +
5394 + spin_lock(root_lock);
5395 + cl = ceetm_classify(skb, sch, &ret, &act_drop);
5396 + spin_unlock(root_lock);
5397 +
5398 +#ifdef CONFIG_NET_CLS_ACT
5399 + if (act_drop) {
5400 + if (ret & __NET_XMIT_BYPASS)
5401 + qstats->drops++;
5402 + goto drop;
5403 + }
5404 +#endif
5405 + /* TODO default class */
5406 + if (unlikely(!cl)) {
5407 + qstats->drops++;
5408 + goto drop;
5409 + }
5410 +
5411 + priv_dpa = netdev_priv(net_dev);
5412 + conf_fq = priv_dpa->conf_fqs[queue_mapping];
5413 +
5414 + /* Choose the proper tx fq and update the basic stats (bytes and
5415 + * packets sent by the class)
5416 + */
5417 + switch (cl->type) {
5418 + case CEETM_PRIO:
5419 + egress_fq = &cl->prio.fq->fq;
5420 + cstats = this_cpu_ptr(cl->prio.cstats);
5421 + break;
5422 + case CEETM_WBFS:
5423 + egress_fq = &cl->wbfs.fq->fq;
5424 + cstats = this_cpu_ptr(cl->wbfs.cstats);
5425 + break;
5426 + default:
5427 + qstats->drops++;
5428 + goto drop;
5429 + }
5430 +
5431 + bstats_update(&cstats->bstats, skb);
5432 + return dpa_tx_extended(skb, net_dev, egress_fq, conf_fq);
5433 +
5434 +drop:
5435 + dev_kfree_skb_any(skb);
5436 + return NET_XMIT_SUCCESS;
5437 +}
5438 +
5439 +static int __init ceetm_register(void)
5440 +{
5441 + int _errno = 0;
5442 +
5443 + pr_info(KBUILD_MODNAME ": " DPA_CEETM_DESCRIPTION "\n");
5444 +
5445 + _errno = register_qdisc(&ceetm_qdisc_ops);
5446 + if (unlikely(_errno))
5447 + pr_err(KBUILD_MODNAME
5448 + ": %s:%hu:%s(): register_qdisc() = %d\n",
5449 + KBUILD_BASENAME ".c", __LINE__, __func__, _errno);
5450 +
5451 + return _errno;
5452 +}
5453 +
5454 +static void __exit ceetm_unregister(void)
5455 +{
5456 + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
5457 + KBUILD_BASENAME ".c", __func__);
5458 +
5459 + unregister_qdisc(&ceetm_qdisc_ops);
5460 +}
5461 +
5462 +module_init(ceetm_register);
5463 +module_exit(ceetm_unregister);
5464 --- /dev/null
5465 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h
5466 @@ -0,0 +1,236 @@
5467 +/* Copyright 2008-2016 Freescale Semiconductor Inc.
5468 + *
5469 + * Redistribution and use in source and binary forms, with or without
5470 + * modification, are permitted provided that the following conditions are met:
5471 + * * Redistributions of source code must retain the above copyright
5472 + * notice, this list of conditions and the following disclaimer.
5473 + * * Redistributions in binary form must reproduce the above copyright
5474 + * notice, this list of conditions and the following disclaimer in the
5475 + * documentation and/or other materials provided with the distribution.
5476 + * * Neither the name of Freescale Semiconductor nor the
5477 + * names of its contributors may be used to endorse or promote products
5478 + * derived from this software without specific prior written permission.
5479 + *
5480 + *
5481 + * ALTERNATIVELY, this software may be distributed under the terms of the
5482 + * GNU General Public License ("GPL") as published by the Free Software
5483 + * Foundation, either version 2 of that License or (at your option) any
5484 + * later version.
5485 + *
5486 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
5487 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
5488 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
5489 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
5490 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
5491 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
5492 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
5493 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
5494 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5495 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5496 + */
5497 +
5498 +#ifndef __DPAA_ETH_CEETM_H
5499 +#define __DPAA_ETH_CEETM_H
5500 +
5501 +#include <net/pkt_sched.h>
5502 +#include <net/netlink.h>
5503 +#include <lnxwrp_fm.h>
5504 +
5505 +#include "mac.h"
5506 +#include "dpaa_eth_common.h"
5507 +
5508 +/* Mask to determine the sub-portal id from a channel number */
5509 +#define CHANNEL_SP_MASK 0x1f
5510 +/* The number of the last channel that services DCP0, connected to FMan 0.
5511 + * Value validated for B4 and T series platforms.
5512 + */
5513 +#define DCP0_MAX_CHANNEL 0x80f
5514 +/* A2V=1 - field A2 is valid
5515 + * A0V=1 - field A0 is valid - enables frame confirmation
5516 + * OVOM=1 - override operation mode bits with values from A2
5517 + * EBD=1 - external buffers are deallocated at the end of the FMan flow
5518 + * NL=0 - the BMI releases all the internal buffers
5519 + */
5520 +#define CEETM_CONTEXT_A 0x1a00000080000000
5521 +/* The ratio between the superior and inferior congestion state thresholds. The
5522 + * lower threshold is set to 7/8 of the superior one (as the default for WQ
5523 + * scheduling).
5524 + */
5525 +#define CEETM_CCGR_RATIO 0.875
5526 +/* For functional purposes, there are num_tx_queues pfifo qdiscs through which
5527 + * frames reach the driver. Their handles start from 1:21. Handles 1:1 to 1:20
5528 + * are reserved for the maximum 32 CEETM channels (majors and minors are in
5529 + * hex).
5530 + */
5531 +#define PFIFO_MIN_OFFSET 0x21
5532 +
5533 +/* A maximum of 8 CQs can be linked to a CQ channel or to a WBFS scheduler. */
5534 +#define CEETM_MAX_PRIO_QCOUNT 8
5535 +#define CEETM_MAX_WBFS_QCOUNT 8
5536 +#define CEETM_MIN_WBFS_QCOUNT 4
5537 +
5538 +/* The id offsets of the CQs belonging to WBFS groups (ids 8-11/15 for group A
5539 + * and/or 12-15 for group B).
5540 + */
5541 +#define WBFS_GRP_A_OFFSET 8
5542 +#define WBFS_GRP_B_OFFSET 12
5543 +
5544 +#define WBFS_GRP_A 1
5545 +#define WBFS_GRP_B 2
5546 +#define WBFS_GRP_LARGE 3
5547 +
5548 +enum {
5549 + TCA_CEETM_UNSPEC,
5550 + TCA_CEETM_COPT,
5551 + TCA_CEETM_QOPS,
5552 + __TCA_CEETM_MAX,
5553 +};
5554 +
5555 +/* CEETM configuration types */
5556 +enum {
5557 + CEETM_ROOT = 1,
5558 + CEETM_PRIO,
5559 + CEETM_WBFS
5560 +};
5561 +
5562 +#define TCA_CEETM_MAX (__TCA_CEETM_MAX - 1)
5563 +extern const struct nla_policy ceetm_policy[TCA_CEETM_MAX + 1];
5564 +
5565 +struct ceetm_class;
5566 +struct ceetm_qdisc_stats;
5567 +struct ceetm_class_stats;
5568 +
5569 +struct ceetm_fq {
5570 + struct qman_fq fq;
5571 + struct net_device *net_dev;
5572 + struct ceetm_class *ceetm_cls;
5573 +};
5574 +
5575 +struct root_q {
5576 + struct Qdisc **qdiscs;
5577 + __u16 overhead;
5578 + __u32 rate;
5579 + __u32 ceil;
5580 + struct qm_ceetm_sp *sp;
5581 + struct qm_ceetm_lni *lni;
5582 + struct ceetm_qdisc_stats __percpu *qstats;
5583 +};
5584 +
5585 +struct prio_q {
5586 + __u16 qcount;
5587 + struct ceetm_class *parent;
5588 +};
5589 +
5590 +struct wbfs_q {
5591 + __u16 qcount;
5592 + int group_type;
5593 + struct ceetm_class *parent;
5594 + __u16 cr;
5595 + __u16 er;
5596 +};
5597 +
5598 +struct ceetm_qdisc {
5599 + int type; /* LNI/CHNL/WBFS */
5600 + bool shaped;
5601 + union {
5602 + struct root_q root;
5603 + struct prio_q prio;
5604 + struct wbfs_q wbfs;
5605 + };
5606 + struct Qdisc_class_hash clhash;
5607 + struct tcf_proto *filter_list; /* qdisc attached filters */
5608 +};
5609 +
5610 +/* CEETM Qdisc configuration parameters */
5611 +struct tc_ceetm_qopt {
5612 + __u32 type;
5613 + __u16 shaped;
5614 + __u16 qcount;
5615 + __u16 overhead;
5616 + __u32 rate;
5617 + __u32 ceil;
5618 + __u16 cr;
5619 + __u16 er;
5620 + __u8 qweight[CEETM_MAX_WBFS_QCOUNT];
5621 +};
5622 +
5623 +struct root_c {
5624 + unsigned int rate;
5625 + unsigned int ceil;
5626 + unsigned int tbl;
5627 + bool wbfs_grp_a;
5628 + bool wbfs_grp_b;
5629 + bool wbfs_grp_large;
5630 + struct Qdisc *child;
5631 + struct qm_ceetm_channel *ch;
5632 +};
5633 +
5634 +struct prio_c {
5635 + bool cr;
5636 + bool er;
5637 + struct ceetm_fq *fq; /* Hardware FQ instance Handle */
5638 + struct qm_ceetm_lfq *lfq;
5639 + struct qm_ceetm_cq *cq; /* Hardware Class Queue instance Handle */
5640 + struct qm_ceetm_ccg *ccg;
5641 + /* only one wbfs can be linked to one priority CQ */
5642 + struct Qdisc *child;
5643 + struct ceetm_class_stats __percpu *cstats;
5644 +};
5645 +
5646 +struct wbfs_c {
5647 + __u8 weight; /* The weight of the class between 1 and 248 */
5648 + struct ceetm_fq *fq; /* Hardware FQ instance Handle */
5649 + struct qm_ceetm_lfq *lfq;
5650 + struct qm_ceetm_cq *cq; /* Hardware Class Queue instance Handle */
5651 + struct qm_ceetm_ccg *ccg;
5652 + struct ceetm_class_stats __percpu *cstats;
5653 +};
5654 +
5655 +struct ceetm_class {
5656 + struct Qdisc_class_common common;
5657 + int refcnt; /* usage count of this class */
5658 + struct tcf_proto *filter_list; /* class attached filters */
5659 + struct Qdisc *parent;
5660 + bool shaped;
5661 + int type; /* ROOT/PRIO/WBFS */
5662 + union {
5663 + struct root_c root;
5664 + struct prio_c prio;
5665 + struct wbfs_c wbfs;
5666 + };
5667 +};
5668 +
5669 +/* CEETM Class configuration parameters */
5670 +struct tc_ceetm_copt {
5671 + __u32 type;
5672 + __u16 shaped;
5673 + __u32 rate;
5674 + __u32 ceil;
5675 + __u16 tbl;
5676 + __u16 cr;
5677 + __u16 er;
5678 + __u8 weight;
5679 +};
5680 +
5681 +/* CEETM stats */
5682 +struct ceetm_qdisc_stats {
5683 + __u32 drops;
5684 +};
5685 +
5686 +struct ceetm_class_stats {
5687 + /* Software counters */
5688 + struct gnet_stats_basic_packed bstats;
5689 + __u32 ern_drop_count;
5690 + __u32 congested_count;
5691 +};
5692 +
5693 +struct tc_ceetm_xstats {
5694 + __u32 ern_drop_count;
5695 + __u32 congested_count;
5696 + /* Hardware counters */
5697 + __u64 frame_count;
5698 + __u64 byte_count;
5699 +};
5700 +
5701 +int __hot ceetm_tx(struct sk_buff *skb, struct net_device *net_dev);
5702 +#endif
5703 --- /dev/null
5704 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.c
5705 @@ -0,0 +1,1812 @@
5706 +/* Copyright 2008-2013 Freescale Semiconductor, Inc.
5707 + *
5708 + * Redistribution and use in source and binary forms, with or without
5709 + * modification, are permitted provided that the following conditions are met:
5710 + * * Redistributions of source code must retain the above copyright
5711 + * notice, this list of conditions and the following disclaimer.
5712 + * * Redistributions in binary form must reproduce the above copyright
5713 + * notice, this list of conditions and the following disclaimer in the
5714 + * documentation and/or other materials provided with the distribution.
5715 + * * Neither the name of Freescale Semiconductor nor the
5716 + * names of its contributors may be used to endorse or promote products
5717 + * derived from this software without specific prior written permission.
5718 + *
5719 + *
5720 + * ALTERNATIVELY, this software may be distributed under the terms of the
5721 + * GNU General Public License ("GPL") as published by the Free Software
5722 + * Foundation, either version 2 of that License or (at your option) any
5723 + * later version.
5724 + *
5725 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
5726 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
5727 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
5728 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
5729 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
5730 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
5731 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
5732 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
5733 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5734 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5735 + */
5736 +
5737 +#include <linux/init.h>
5738 +#include <linux/module.h>
5739 +#include <linux/of_platform.h>
5740 +#include <linux/of_net.h>
5741 +#include <linux/etherdevice.h>
5742 +#include <linux/kthread.h>
5743 +#include <linux/percpu.h>
5744 +#include <linux/highmem.h>
5745 +#include <linux/sort.h>
5746 +#include <linux/fsl_qman.h>
5747 +#include <linux/ip.h>
5748 +#include <linux/ipv6.h>
5749 +#include <linux/if_vlan.h> /* vlan_eth_hdr */
5750 +#include "dpaa_eth.h"
5751 +#include "dpaa_eth_common.h"
5752 +#ifdef CONFIG_FSL_DPAA_1588
5753 +#include "dpaa_1588.h"
5754 +#endif
5755 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
5756 +#include "dpaa_debugfs.h"
5757 +#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
5758 +#include "mac.h"
5759 +
5760 +/* Size in bytes of the FQ taildrop threshold */
5761 +#define DPA_FQ_TD 0x200000
5762 +
5763 +#ifdef CONFIG_PTP_1588_CLOCK_DPAA
5764 +struct ptp_priv_s ptp_priv;
5765 +#endif
5766 +
5767 +static struct dpa_bp *dpa_bp_array[64];
5768 +
5769 +int dpa_max_frm;
5770 +EXPORT_SYMBOL(dpa_max_frm);
5771 +
5772 +int dpa_rx_extra_headroom;
5773 +EXPORT_SYMBOL(dpa_rx_extra_headroom);
5774 +
5775 +int dpa_num_cpus = NR_CPUS;
5776 +
5777 +static const struct fqid_cell tx_confirm_fqids[] = {
5778 + {0, DPAA_ETH_TX_QUEUES}
5779 +};
5780 +
5781 +static struct fqid_cell default_fqids[][3] = {
5782 + [RX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_RX_QUEUES} },
5783 + [TX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_TX_QUEUES} }
5784 +};
5785 +
5786 +static const char fsl_qman_frame_queues[][25] = {
5787 + [RX] = "fsl,qman-frame-queues-rx",
5788 + [TX] = "fsl,qman-frame-queues-tx"
5789 +};
5790 +#ifdef CONFIG_FSL_DPAA_HOOKS
5791 +/* A set of callbacks for hooking into the fastpath at different points. */
5792 +struct dpaa_eth_hooks_s dpaa_eth_hooks;
5793 +EXPORT_SYMBOL(dpaa_eth_hooks);
5794 +/* This function should only be called on the probe paths, since it makes no
5795 + * effort to guarantee consistency of the destination hooks structure.
5796 + */
5797 +void fsl_dpaa_eth_set_hooks(struct dpaa_eth_hooks_s *hooks)
5798 +{
5799 + if (hooks)
5800 + dpaa_eth_hooks = *hooks;
5801 + else
5802 + pr_err("NULL pointer to hooks!\n");
5803 +}
5804 +EXPORT_SYMBOL(fsl_dpaa_eth_set_hooks);
5805 +#endif
5806 +
5807 +int dpa_netdev_init(struct net_device *net_dev,
5808 + const uint8_t *mac_addr,
5809 + uint16_t tx_timeout)
5810 +{
5811 + int err;
5812 + struct dpa_priv_s *priv = netdev_priv(net_dev);
5813 + struct device *dev = net_dev->dev.parent;
5814 +
5815 + net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
5816 +
5817 + net_dev->features |= net_dev->hw_features;
5818 + net_dev->vlan_features = net_dev->features;
5819 +
5820 + memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
5821 + memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
5822 +
5823 + net_dev->ethtool_ops = &dpa_ethtool_ops;
5824 +
5825 + net_dev->needed_headroom = priv->tx_headroom;
5826 + net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
5827 +
5828 + err = register_netdev(net_dev);
5829 + if (err < 0) {
5830 + dev_err(dev, "register_netdev() = %d\n", err);
5831 + return err;
5832 + }
5833 +
5834 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
5835 + /* create debugfs entry for this net_device */
5836 + err = dpa_netdev_debugfs_create(net_dev);
5837 + if (err) {
5838 + unregister_netdev(net_dev);
5839 + return err;
5840 + }
5841 +#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
5842 +
5843 + return 0;
5844 +}
5845 +EXPORT_SYMBOL(dpa_netdev_init);
5846 +
5847 +int __cold dpa_start(struct net_device *net_dev)
5848 +{
5849 + int err, i;
5850 + struct dpa_priv_s *priv;
5851 + struct mac_device *mac_dev;
5852 +
5853 + priv = netdev_priv(net_dev);
5854 + mac_dev = priv->mac_dev;
5855 +
5856 + err = mac_dev->init_phy(net_dev, priv->mac_dev);
5857 + if (err < 0) {
5858 + if (netif_msg_ifup(priv))
5859 + netdev_err(net_dev, "init_phy() = %d\n", err);
5860 + return err;
5861 + }
5862 +
5863 + for_each_port_device(i, mac_dev->port_dev) {
5864 + err = fm_port_enable(mac_dev->port_dev[i]);
5865 + if (err)
5866 + goto mac_start_failed;
5867 + }
5868 +
5869 + err = priv->mac_dev->start(mac_dev);
5870 + if (err < 0) {
5871 + if (netif_msg_ifup(priv))
5872 + netdev_err(net_dev, "mac_dev->start() = %d\n", err);
5873 + goto mac_start_failed;
5874 + }
5875 +
5876 + netif_tx_start_all_queues(net_dev);
5877 +
5878 + return 0;
5879 +
5880 +mac_start_failed:
5881 + for_each_port_device(i, mac_dev->port_dev)
5882 + fm_port_disable(mac_dev->port_dev[i]);
5883 +
5884 + return err;
5885 +}
5886 +EXPORT_SYMBOL(dpa_start);
5887 +
5888 +int __cold dpa_stop(struct net_device *net_dev)
5889 +{
5890 + int _errno, i, err;
5891 + struct dpa_priv_s *priv;
5892 + struct mac_device *mac_dev;
5893 +
5894 + priv = netdev_priv(net_dev);
5895 + mac_dev = priv->mac_dev;
5896 +
5897 + netif_tx_stop_all_queues(net_dev);
5898 + /* Allow the Fman (Tx) port to process in-flight frames before we
5899 + * try switching it off.
5900 + */
5901 + usleep_range(5000, 10000);
5902 +
5903 + _errno = mac_dev->stop(mac_dev);
5904 + if (unlikely(_errno < 0))
5905 + if (netif_msg_ifdown(priv))
5906 + netdev_err(net_dev, "mac_dev->stop() = %d\n",
5907 + _errno);
5908 +
5909 + for_each_port_device(i, mac_dev->port_dev) {
5910 + err = fm_port_disable(mac_dev->port_dev[i]);
5911 + _errno = err ? err : _errno;
5912 + }
5913 +
5914 + if (mac_dev->phy_dev)
5915 + phy_disconnect(mac_dev->phy_dev);
5916 + mac_dev->phy_dev = NULL;
5917 +
5918 + return _errno;
5919 +}
5920 +EXPORT_SYMBOL(dpa_stop);
5921 +
5922 +void __cold dpa_timeout(struct net_device *net_dev)
5923 +{
5924 + const struct dpa_priv_s *priv;
5925 + struct dpa_percpu_priv_s *percpu_priv;
5926 +
5927 + priv = netdev_priv(net_dev);
5928 + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
5929 +
5930 + if (netif_msg_timer(priv))
5931 + netdev_crit(net_dev, "Transmit timeout!\n");
5932 +
5933 + percpu_priv->stats.tx_errors++;
5934 +}
5935 +EXPORT_SYMBOL(dpa_timeout);
5936 +
5937 +/* net_device */
5938 +
5939 +/**
5940 + * @param net_dev the device for which statistics are calculated
5941 + * @param stats the function fills this structure with the device's statistics
5942 + * @return the address of the structure containing the statistics
5943 + *
5944 + * Calculates the statistics for the given device by adding the statistics
5945 + * collected by each CPU.
5946 + */
5947 +void __cold
5948 +dpa_get_stats64(struct net_device *net_dev,
5949 + struct rtnl_link_stats64 *stats)
5950 +{
5951 + struct dpa_priv_s *priv = netdev_priv(net_dev);
5952 + u64 *cpustats;
5953 + u64 *netstats = (u64 *)stats;
5954 + int i, j;
5955 + struct dpa_percpu_priv_s *percpu_priv;
5956 + int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64);
5957 +
5958 + for_each_possible_cpu(i) {
5959 + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
5960 +
5961 + cpustats = (u64 *)&percpu_priv->stats;
5962 +
5963 + for (j = 0; j < numstats; j++)
5964 + netstats[j] += cpustats[j];
5965 + }
5966 +}
5967 +EXPORT_SYMBOL(dpa_get_stats64);
5968 +
5969 +int dpa_change_mtu(struct net_device *net_dev, int new_mtu)
5970 +{
5971 + const int max_mtu = dpa_get_max_mtu();
5972 +
5973 + /* Make sure we don't exceed the Ethernet controller's MAXFRM */
5974 + if (new_mtu < 68 || new_mtu > max_mtu) {
5975 + netdev_err(net_dev, "Invalid L3 mtu %d (must be between %d and %d).\n",
5976 + new_mtu, 68, max_mtu);
5977 + return -EINVAL;
5978 + }
5979 + net_dev->mtu = new_mtu;
5980 +
5981 + return 0;
5982 +}
5983 +EXPORT_SYMBOL(dpa_change_mtu);
5984 +
5985 +/* .ndo_init callback */
5986 +int dpa_ndo_init(struct net_device *net_dev)
5987 +{
5988 + /* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
5989 + * we choose conservatively and let the user explicitly set a higher
5990 + * MTU via ifconfig. Otherwise, the user may end up with different MTUs
5991 + * in the same LAN.
5992 + * If on the other hand fsl_fm_max_frm has been chosen below 1500,
5993 + * start with the maximum allowed.
5994 + */
5995 + int init_mtu = min(dpa_get_max_mtu(), ETH_DATA_LEN);
5996 +
5997 + pr_debug("Setting initial MTU on net device: %d\n", init_mtu);
5998 + net_dev->mtu = init_mtu;
5999 +
6000 + return 0;
6001 +}
6002 +EXPORT_SYMBOL(dpa_ndo_init);
6003 +
6004 +int dpa_set_features(struct net_device *dev, netdev_features_t features)
6005 +{
6006 + /* Not much to do here for now */
6007 + dev->features = features;
6008 + return 0;
6009 +}
6010 +EXPORT_SYMBOL(dpa_set_features);
6011 +
6012 +netdev_features_t dpa_fix_features(struct net_device *dev,
6013 + netdev_features_t features)
6014 +{
6015 + netdev_features_t unsupported_features = 0;
6016 +
6017 + /* In theory we should never be requested to enable features that
6018 + * we didn't set in netdev->features and netdev->hw_features at probe
6019 + * time, but double check just to be on the safe side.
6020 + * We don't support enabling Rx csum through ethtool yet
6021 + */
6022 + unsupported_features |= NETIF_F_RXCSUM;
6023 +
6024 + features &= ~unsupported_features;
6025 +
6026 + return features;
6027 +}
6028 +EXPORT_SYMBOL(dpa_fix_features);
6029 +
6030 +#ifdef CONFIG_FSL_DPAA_TS
6031 +u64 dpa_get_timestamp_ns(const struct dpa_priv_s *priv, enum port_type rx_tx,
6032 + const void *data)
6033 +{
6034 + u64 *ts, ns;
6035 +
6036 + ts = fm_port_get_buffer_time_stamp(priv->mac_dev->port_dev[rx_tx],
6037 + data);
6038 +
6039 + if (!ts || *ts == 0)
6040 + return 0;
6041 +
6042 + be64_to_cpus(ts);
6043 +
6044 + /* multiple DPA_PTP_NOMINAL_FREQ_PERIOD_NS for case of non power of 2 */
6045 + ns = *ts << DPA_PTP_NOMINAL_FREQ_PERIOD_SHIFT;
6046 +
6047 + return ns;
6048 +}
6049 +
6050 +int dpa_get_ts(const struct dpa_priv_s *priv, enum port_type rx_tx,
6051 + struct skb_shared_hwtstamps *shhwtstamps, const void *data)
6052 +{
6053 + u64 ns;
6054 +
6055 + ns = dpa_get_timestamp_ns(priv, rx_tx, data);
6056 +
6057 + if (ns == 0)
6058 + return -EINVAL;
6059 +
6060 + memset(shhwtstamps, 0, sizeof(*shhwtstamps));
6061 + shhwtstamps->hwtstamp = ns_to_ktime(ns);
6062 +
6063 + return 0;
6064 +}
6065 +
6066 +static void dpa_ts_tx_enable(struct net_device *dev)
6067 +{
6068 + struct dpa_priv_s *priv = netdev_priv(dev);
6069 + struct mac_device *mac_dev = priv->mac_dev;
6070 +
6071 + if (mac_dev->fm_rtc_enable)
6072 + mac_dev->fm_rtc_enable(get_fm_handle(dev));
6073 + if (mac_dev->ptp_enable)
6074 + mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev));
6075 +
6076 + priv->ts_tx_en = true;
6077 +}
6078 +
6079 +static void dpa_ts_tx_disable(struct net_device *dev)
6080 +{
6081 + struct dpa_priv_s *priv = netdev_priv(dev);
6082 +
6083 +#if 0
6084 +/* the RTC might be needed by the Rx Ts, cannot disable here
6085 + * no separate ptp_disable API for Rx/Tx, cannot disable here
6086 + */
6087 + struct mac_device *mac_dev = priv->mac_dev;
6088 +
6089 + if (mac_dev->fm_rtc_disable)
6090 + mac_dev->fm_rtc_disable(get_fm_handle(dev));
6091 +
6092 + if (mac_dev->ptp_disable)
6093 + mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev));
6094 +#endif
6095 +
6096 + priv->ts_tx_en = false;
6097 +}
6098 +
6099 +static void dpa_ts_rx_enable(struct net_device *dev)
6100 +{
6101 + struct dpa_priv_s *priv = netdev_priv(dev);
6102 + struct mac_device *mac_dev = priv->mac_dev;
6103 +
6104 + if (mac_dev->fm_rtc_enable)
6105 + mac_dev->fm_rtc_enable(get_fm_handle(dev));
6106 + if (mac_dev->ptp_enable)
6107 + mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev));
6108 +
6109 + priv->ts_rx_en = true;
6110 +}
6111 +
6112 +static void dpa_ts_rx_disable(struct net_device *dev)
6113 +{
6114 + struct dpa_priv_s *priv = netdev_priv(dev);
6115 +
6116 +#if 0
6117 +/* the RTC might be needed by the Tx Ts, cannot disable here
6118 + * no separate ptp_disable API for Rx/Tx, cannot disable here
6119 + */
6120 + struct mac_device *mac_dev = priv->mac_dev;
6121 +
6122 + if (mac_dev->fm_rtc_disable)
6123 + mac_dev->fm_rtc_disable(get_fm_handle(dev));
6124 +
6125 + if (mac_dev->ptp_disable)
6126 + mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev));
6127 +#endif
6128 +
6129 + priv->ts_rx_en = false;
6130 +}
6131 +
6132 +static int dpa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6133 +{
6134 + struct hwtstamp_config config;
6135 +
6136 + if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
6137 + return -EFAULT;
6138 +
6139 + switch (config.tx_type) {
6140 + case HWTSTAMP_TX_OFF:
6141 + dpa_ts_tx_disable(dev);
6142 + break;
6143 + case HWTSTAMP_TX_ON:
6144 + dpa_ts_tx_enable(dev);
6145 + break;
6146 + default:
6147 + return -ERANGE;
6148 + }
6149 +
6150 + if (config.rx_filter == HWTSTAMP_FILTER_NONE)
6151 + dpa_ts_rx_disable(dev);
6152 + else {
6153 + dpa_ts_rx_enable(dev);
6154 + /* TS is set for all frame types, not only those requested */
6155 + config.rx_filter = HWTSTAMP_FILTER_ALL;
6156 + }
6157 +
6158 + return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
6159 + -EFAULT : 0;
6160 +}
6161 +#endif /* CONFIG_FSL_DPAA_TS */
6162 +
6163 +int dpa_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6164 +{
6165 +#ifdef CONFIG_FSL_DPAA_1588
6166 + struct dpa_priv_s *priv = netdev_priv(dev);
6167 +#endif
6168 + int ret = 0;
6169 +
6170 + /* at least one timestamping feature must be enabled */
6171 +#ifdef CONFIG_FSL_DPAA_TS
6172 + if (!netif_running(dev))
6173 +#endif
6174 + return -EINVAL;
6175 +
6176 +#ifdef CONFIG_FSL_DPAA_TS
6177 + if (cmd == SIOCSHWTSTAMP)
6178 + return dpa_ts_ioctl(dev, rq, cmd);
6179 +#endif /* CONFIG_FSL_DPAA_TS */
6180 +
6181 +#ifdef CONFIG_FSL_DPAA_1588
6182 + if ((cmd >= PTP_ENBL_TXTS_IOCTL) && (cmd <= PTP_CLEANUP_TS)) {
6183 + if (priv->tsu && priv->tsu->valid)
6184 + ret = dpa_ioctl_1588(dev, rq, cmd);
6185 + else
6186 + ret = -ENODEV;
6187 + }
6188 +#endif
6189 +
6190 + return ret;
6191 +}
6192 +EXPORT_SYMBOL(dpa_ioctl);
6193 +
6194 +int __cold dpa_remove(struct platform_device *of_dev)
6195 +{
6196 + int err;
6197 + struct device *dev;
6198 + struct net_device *net_dev;
6199 + struct dpa_priv_s *priv;
6200 +
6201 + dev = &of_dev->dev;
6202 + net_dev = dev_get_drvdata(dev);
6203 +
6204 + priv = netdev_priv(net_dev);
6205 +
6206 + dpaa_eth_sysfs_remove(dev);
6207 +
6208 + dev_set_drvdata(dev, NULL);
6209 + unregister_netdev(net_dev);
6210 +
6211 + err = dpa_fq_free(dev, &priv->dpa_fq_list);
6212 +
6213 + qman_delete_cgr_safe(&priv->ingress_cgr);
6214 + qman_release_cgrid(priv->ingress_cgr.cgrid);
6215 + qman_delete_cgr_safe(&priv->cgr_data.cgr);
6216 + qman_release_cgrid(priv->cgr_data.cgr.cgrid);
6217 +
6218 + dpa_private_napi_del(net_dev);
6219 +
6220 + dpa_bp_free(priv);
6221 +
6222 + if (priv->buf_layout)
6223 + devm_kfree(dev, priv->buf_layout);
6224 +
6225 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
6226 + /* remove debugfs entry for this net_device */
6227 + dpa_netdev_debugfs_remove(net_dev);
6228 +#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
6229 +
6230 +#ifdef CONFIG_FSL_DPAA_1588
6231 + if (priv->tsu && priv->tsu->valid)
6232 + dpa_ptp_cleanup(priv);
6233 +#endif
6234 +
6235 + free_netdev(net_dev);
6236 +
6237 + return err;
6238 +}
6239 +EXPORT_SYMBOL(dpa_remove);
6240 +
6241 +struct mac_device * __cold __must_check
6242 +__attribute__((nonnull))
6243 +dpa_mac_probe(struct platform_device *_of_dev)
6244 +{
6245 + struct device *dpa_dev, *dev;
6246 + struct device_node *mac_node;
6247 + struct platform_device *of_dev;
6248 + struct mac_device *mac_dev;
6249 +#ifdef CONFIG_FSL_DPAA_1588
6250 + int lenp;
6251 + const phandle *phandle_prop;
6252 + struct net_device *net_dev = NULL;
6253 + struct dpa_priv_s *priv = NULL;
6254 + struct device_node *timer_node;
6255 +#endif
6256 + dpa_dev = &_of_dev->dev;
6257 +
6258 + mac_node = of_parse_phandle(_of_dev->dev.of_node, "fsl,fman-mac", 0);
6259 + if (unlikely(mac_node == NULL)) {
6260 + dev_err(dpa_dev, "Cannot find MAC device device tree node\n");
6261 + return ERR_PTR(-EFAULT);
6262 + }
6263 +
6264 + of_dev = of_find_device_by_node(mac_node);
6265 + if (unlikely(of_dev == NULL)) {
6266 + dev_err(dpa_dev, "of_find_device_by_node(%s) failed\n",
6267 + mac_node->full_name);
6268 + of_node_put(mac_node);
6269 + return ERR_PTR(-EINVAL);
6270 + }
6271 + of_node_put(mac_node);
6272 +
6273 + dev = &of_dev->dev;
6274 +
6275 + mac_dev = dev_get_drvdata(dev);
6276 + if (unlikely(mac_dev == NULL)) {
6277 + dev_err(dpa_dev, "dev_get_drvdata(%s) failed\n",
6278 + dev_name(dev));
6279 + return ERR_PTR(-EINVAL);
6280 + }
6281 +
6282 +#ifdef CONFIG_FSL_DPAA_1588
6283 + phandle_prop = of_get_property(mac_node, "ptimer-handle", &lenp);
6284 + if (phandle_prop && ((mac_dev->phy_if != PHY_INTERFACE_MODE_SGMII) ||
6285 + ((mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII) &&
6286 + (mac_dev->speed == SPEED_1000)))) {
6287 + timer_node = of_find_node_by_phandle(*phandle_prop);
6288 + if (timer_node)
6289 + net_dev = dev_get_drvdata(dpa_dev);
6290 + if (timer_node && net_dev) {
6291 + priv = netdev_priv(net_dev);
6292 + if (!dpa_ptp_init(priv))
6293 + dev_info(dev, "%s: ptp 1588 is initialized.\n",
6294 + mac_node->full_name);
6295 + }
6296 + }
6297 +#endif
6298 +
6299 +#ifdef CONFIG_PTP_1588_CLOCK_DPAA
6300 + if ((mac_dev->phy_if != PHY_INTERFACE_MODE_SGMII) ||
6301 + ((mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII) &&
6302 + (mac_dev->speed == SPEED_1000))) {
6303 + ptp_priv.node = of_parse_phandle(mac_node, "ptimer-handle", 0);
6304 + if (ptp_priv.node) {
6305 + ptp_priv.of_dev = of_find_device_by_node(ptp_priv.node);
6306 + if (unlikely(ptp_priv.of_dev == NULL)) {
6307 + dev_err(dpa_dev,
6308 + "Cannot find device represented by timer_node\n");
6309 + of_node_put(ptp_priv.node);
6310 + return ERR_PTR(-EINVAL);
6311 + }
6312 + ptp_priv.mac_dev = mac_dev;
6313 + }
6314 + }
6315 +#endif
6316 + return mac_dev;
6317 +}
6318 +EXPORT_SYMBOL(dpa_mac_probe);
6319 +
6320 +int dpa_set_mac_address(struct net_device *net_dev, void *addr)
6321 +{
6322 + const struct dpa_priv_s *priv;
6323 + int _errno;
6324 + struct mac_device *mac_dev;
6325 +
6326 + priv = netdev_priv(net_dev);
6327 +
6328 + _errno = eth_mac_addr(net_dev, addr);
6329 + if (_errno < 0) {
6330 + if (netif_msg_drv(priv))
6331 + netdev_err(net_dev,
6332 + "eth_mac_addr() = %d\n",
6333 + _errno);
6334 + return _errno;
6335 + }
6336 +
6337 + mac_dev = priv->mac_dev;
6338 +
6339 + _errno = mac_dev->change_addr(mac_dev->get_mac_handle(mac_dev),
6340 + net_dev->dev_addr);
6341 + if (_errno < 0) {
6342 + if (netif_msg_drv(priv))
6343 + netdev_err(net_dev,
6344 + "mac_dev->change_addr() = %d\n",
6345 + _errno);
6346 + return _errno;
6347 + }
6348 +
6349 + return 0;
6350 +}
6351 +EXPORT_SYMBOL(dpa_set_mac_address);
6352 +
6353 +void dpa_set_rx_mode(struct net_device *net_dev)
6354 +{
6355 + int _errno;
6356 + const struct dpa_priv_s *priv;
6357 +
6358 + priv = netdev_priv(net_dev);
6359 +
6360 + if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) {
6361 + priv->mac_dev->promisc = !priv->mac_dev->promisc;
6362 + _errno = priv->mac_dev->set_promisc(
6363 + priv->mac_dev->get_mac_handle(priv->mac_dev),
6364 + priv->mac_dev->promisc);
6365 + if (unlikely(_errno < 0) && netif_msg_drv(priv))
6366 + netdev_err(net_dev,
6367 + "mac_dev->set_promisc() = %d\n",
6368 + _errno);
6369 + }
6370 +
6371 + _errno = priv->mac_dev->set_multi(net_dev, priv->mac_dev);
6372 + if (unlikely(_errno < 0) && netif_msg_drv(priv))
6373 + netdev_err(net_dev, "mac_dev->set_multi() = %d\n", _errno);
6374 +}
6375 +EXPORT_SYMBOL(dpa_set_rx_mode);
6376 +
6377 +void dpa_set_buffers_layout(struct mac_device *mac_dev,
6378 + struct dpa_buffer_layout_s *layout)
6379 +{
6380 + struct fm_port_params params;
6381 +
6382 + /* Rx */
6383 + layout[RX].priv_data_size = (uint16_t)DPA_RX_PRIV_DATA_SIZE;
6384 + layout[RX].parse_results = true;
6385 + layout[RX].hash_results = true;
6386 +#ifdef CONFIG_FSL_DPAA_TS
6387 + layout[RX].time_stamp = true;
6388 +#endif
6389 + fm_port_get_buff_layout_ext_params(mac_dev->port_dev[RX], &params);
6390 + layout[RX].manip_extra_space = params.manip_extra_space;
6391 + /* a value of zero for data alignment means "don't care", so align to
6392 + * a non-zero value to prevent FMD from using its own default
6393 + */
6394 + layout[RX].data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT;
6395 +
6396 + /* Tx */
6397 + layout[TX].priv_data_size = DPA_TX_PRIV_DATA_SIZE;
6398 + layout[TX].parse_results = true;
6399 + layout[TX].hash_results = true;
6400 +#ifdef CONFIG_FSL_DPAA_TS
6401 + layout[TX].time_stamp = true;
6402 +#endif
6403 + fm_port_get_buff_layout_ext_params(mac_dev->port_dev[TX], &params);
6404 + layout[TX].manip_extra_space = params.manip_extra_space;
6405 + layout[TX].data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT;
6406 +}
6407 +EXPORT_SYMBOL(dpa_set_buffers_layout);
6408 +
6409 +int __attribute__((nonnull))
6410 +dpa_bp_alloc(struct dpa_bp *dpa_bp)
6411 +{
6412 + int err;
6413 + struct bman_pool_params bp_params;
6414 + struct platform_device *pdev;
6415 +
6416 + if (dpa_bp->size == 0 || dpa_bp->config_count == 0) {
6417 + pr_err("Buffer pool is not properly initialized! Missing size or initial number of buffers");
6418 + return -EINVAL;
6419 + }
6420 +
6421 + memset(&bp_params, 0, sizeof(struct bman_pool_params));
6422 +#ifdef CONFIG_FMAN_PFC
6423 + bp_params.flags = BMAN_POOL_FLAG_THRESH;
6424 + bp_params.thresholds[0] = bp_params.thresholds[2] =
6425 + CONFIG_FSL_DPAA_ETH_REFILL_THRESHOLD;
6426 + bp_params.thresholds[1] = bp_params.thresholds[3] =
6427 + CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT;
6428 +#endif
6429 +
6430 + /* If the pool is already specified, we only create one per bpid */
6431 + if (dpa_bpid2pool_use(dpa_bp->bpid))
6432 + return 0;
6433 +
6434 + if (dpa_bp->bpid == 0)
6435 + bp_params.flags |= BMAN_POOL_FLAG_DYNAMIC_BPID;
6436 + else
6437 + bp_params.bpid = dpa_bp->bpid;
6438 +
6439 + dpa_bp->pool = bman_new_pool(&bp_params);
6440 + if (unlikely(dpa_bp->pool == NULL)) {
6441 + pr_err("bman_new_pool() failed\n");
6442 + return -ENODEV;
6443 + }
6444 +
6445 + dpa_bp->bpid = (uint8_t)bman_get_params(dpa_bp->pool)->bpid;
6446 +
6447 + pdev = platform_device_register_simple("dpaa_eth_bpool",
6448 + dpa_bp->bpid, NULL, 0);
6449 + if (IS_ERR(pdev)) {
6450 + pr_err("platform_device_register_simple() failed\n");
6451 + err = PTR_ERR(pdev);
6452 + goto pdev_register_failed;
6453 + }
6454 + {
6455 + struct dma_map_ops *ops = get_dma_ops(&pdev->dev);
6456 + ops->dma_supported = NULL;
6457 + }
6458 + err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
6459 + if (err) {
6460 + pr_err("dma_coerce_mask_and_coherent() failed\n");
6461 + goto pdev_mask_failed;
6462 + }
6463 +#ifdef CONFIG_FMAN_ARM
6464 + /* force coherency */
6465 + pdev->dev.archdata.dma_coherent = true;
6466 + arch_setup_dma_ops(&pdev->dev, 0, 0, NULL, true);
6467 +#endif
6468 +
6469 + dpa_bp->dev = &pdev->dev;
6470 +
6471 + if (dpa_bp->seed_cb) {
6472 + err = dpa_bp->seed_cb(dpa_bp);
6473 + if (err)
6474 + goto pool_seed_failed;
6475 + }
6476 +
6477 + dpa_bpid2pool_map(dpa_bp->bpid, dpa_bp);
6478 +
6479 + return 0;
6480 +
6481 +pool_seed_failed:
6482 +pdev_mask_failed:
6483 + platform_device_unregister(pdev);
6484 +pdev_register_failed:
6485 + bman_free_pool(dpa_bp->pool);
6486 +
6487 + return err;
6488 +}
6489 +EXPORT_SYMBOL(dpa_bp_alloc);
6490 +
6491 +void dpa_bp_drain(struct dpa_bp *bp)
6492 +{
6493 + int ret, num = 8;
6494 +
6495 + do {
6496 + struct bm_buffer bmb[8];
6497 + int i;
6498 +
6499 + ret = bman_acquire(bp->pool, bmb, num, 0);
6500 + if (ret < 0) {
6501 + if (num == 8) {
6502 + /* we have less than 8 buffers left;
6503 + * drain them one by one
6504 + */
6505 + num = 1;
6506 + ret = 1;
6507 + continue;
6508 + } else {
6509 + /* Pool is fully drained */
6510 + break;
6511 + }
6512 + }
6513 +
6514 + for (i = 0; i < num; i++) {
6515 + dma_addr_t addr = bm_buf_addr(&bmb[i]);
6516 +
6517 + dma_unmap_single(bp->dev, addr, bp->size,
6518 + DMA_BIDIRECTIONAL);
6519 +
6520 + bp->free_buf_cb(phys_to_virt(addr));
6521 + }
6522 + } while (ret > 0);
6523 +}
6524 +EXPORT_SYMBOL(dpa_bp_drain);
6525 +
6526 +static void __cold __attribute__((nonnull))
6527 +_dpa_bp_free(struct dpa_bp *dpa_bp)
6528 +{
6529 + struct dpa_bp *bp = dpa_bpid2pool(dpa_bp->bpid);
6530 +
6531 + /* the mapping between bpid and dpa_bp is done very late in the
6532 + * allocation procedure; if something failed before the mapping, the bp
6533 + * was not configured, therefore we don't need the below instructions
6534 + */
6535 + if (!bp)
6536 + return;
6537 +
6538 + if (!atomic_dec_and_test(&bp->refs))
6539 + return;
6540 +
6541 + if (bp->free_buf_cb)
6542 + dpa_bp_drain(bp);
6543 +
6544 + dpa_bp_array[bp->bpid] = NULL;
6545 + bman_free_pool(bp->pool);
6546 +
6547 + if (bp->dev)
6548 + platform_device_unregister(to_platform_device(bp->dev));
6549 +}
6550 +
6551 +void __cold __attribute__((nonnull))
6552 +dpa_bp_free(struct dpa_priv_s *priv)
6553 +{
6554 + int i;
6555 +
6556 + if (priv->dpa_bp)
6557 + for (i = 0; i < priv->bp_count; i++)
6558 + _dpa_bp_free(&priv->dpa_bp[i]);
6559 +}
6560 +EXPORT_SYMBOL(dpa_bp_free);
6561 +
6562 +struct dpa_bp *dpa_bpid2pool(int bpid)
6563 +{
6564 + return dpa_bp_array[bpid];
6565 +}
6566 +EXPORT_SYMBOL(dpa_bpid2pool);
6567 +
6568 +void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp)
6569 +{
6570 + dpa_bp_array[bpid] = dpa_bp;
6571 + atomic_set(&dpa_bp->refs, 1);
6572 +}
6573 +
6574 +bool dpa_bpid2pool_use(int bpid)
6575 +{
6576 + if (dpa_bpid2pool(bpid)) {
6577 + atomic_inc(&dpa_bp_array[bpid]->refs);
6578 + return true;
6579 + }
6580 +
6581 + return false;
6582 +}
6583 +
6584 +#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
6585 +u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb,
6586 + struct net_device *sb_dev,
6587 + select_queue_fallback_t fallback)
6588 +{
6589 + return dpa_get_queue_mapping(skb);
6590 +}
6591 +EXPORT_SYMBOL(dpa_select_queue);
6592 +#endif
6593 +
6594 +struct dpa_fq *dpa_fq_alloc(struct device *dev,
6595 + u32 fq_start,
6596 + u32 fq_count,
6597 + struct list_head *list,
6598 + enum dpa_fq_type fq_type)
6599 +{
6600 + int i;
6601 + struct dpa_fq *dpa_fq;
6602 +
6603 + dpa_fq = devm_kzalloc(dev, sizeof(*dpa_fq) * fq_count, GFP_KERNEL);
6604 + if (dpa_fq == NULL)
6605 + return NULL;
6606 +
6607 + for (i = 0; i < fq_count; i++) {
6608 + dpa_fq[i].fq_type = fq_type;
6609 + if (fq_type == FQ_TYPE_RX_PCD_HI_PRIO)
6610 + dpa_fq[i].fqid = fq_start ?
6611 + DPAA_ETH_FQ_DELTA + fq_start + i : 0;
6612 + else
6613 + dpa_fq[i].fqid = fq_start ? fq_start + i : 0;
6614 +
6615 + list_add_tail(&dpa_fq[i].list, list);
6616 + }
6617 +
6618 +#ifdef CONFIG_FMAN_PFC
6619 + if (fq_type == FQ_TYPE_TX)
6620 + for (i = 0; i < fq_count; i++)
6621 + dpa_fq[i].wq = i / dpa_num_cpus;
6622 + else
6623 +#endif
6624 + for (i = 0; i < fq_count; i++)
6625 + _dpa_assign_wq(dpa_fq + i);
6626 +
6627 + return dpa_fq;
6628 +}
6629 +EXPORT_SYMBOL(dpa_fq_alloc);
6630 +
6631 +/* Probing of FQs for MACful ports */
6632 +int dpa_fq_probe_mac(struct device *dev, struct list_head *list,
6633 + struct fm_port_fqs *port_fqs,
6634 + bool alloc_tx_conf_fqs,
6635 + enum port_type ptype)
6636 +{
6637 + struct fqid_cell *fqids = NULL;
6638 + const void *fqids_off = NULL;
6639 + struct dpa_fq *dpa_fq = NULL;
6640 + struct device_node *np = dev->of_node;
6641 + int num_ranges;
6642 + int i, lenp;
6643 +
6644 + if (ptype == TX && alloc_tx_conf_fqs) {
6645 + if (!dpa_fq_alloc(dev, tx_confirm_fqids->start,
6646 + tx_confirm_fqids->count, list,
6647 + FQ_TYPE_TX_CONF_MQ))
6648 + goto fq_alloc_failed;
6649 + }
6650 +
6651 + fqids_off = of_get_property(np, fsl_qman_frame_queues[ptype], &lenp);
6652 + if (fqids_off == NULL) {
6653 + /* No dts definition, so use the defaults. */
6654 + fqids = default_fqids[ptype];
6655 + num_ranges = 3;
6656 + } else {
6657 + num_ranges = lenp / sizeof(*fqids);
6658 +
6659 + fqids = devm_kzalloc(dev, sizeof(*fqids) * num_ranges,
6660 + GFP_KERNEL);
6661 + if (fqids == NULL)
6662 + goto fqids_alloc_failed;
6663 +
6664 + /* convert to CPU endianess */
6665 + for (i = 0; i < num_ranges; i++) {
6666 + fqids[i].start = be32_to_cpup(fqids_off +
6667 + i * sizeof(*fqids));
6668 + fqids[i].count = be32_to_cpup(fqids_off +
6669 + i * sizeof(*fqids) + sizeof(__be32));
6670 + }
6671 + }
6672 +
6673 + for (i = 0; i < num_ranges; i++) {
6674 + switch (i) {
6675 + case 0:
6676 + /* The first queue is the error queue */
6677 + if (fqids[i].count != 1)
6678 + goto invalid_error_queue;
6679 +
6680 + dpa_fq = dpa_fq_alloc(dev, fqids[i].start,
6681 + fqids[i].count, list,
6682 + ptype == RX ?
6683 + FQ_TYPE_RX_ERROR :
6684 + FQ_TYPE_TX_ERROR);
6685 + if (dpa_fq == NULL)
6686 + goto fq_alloc_failed;
6687 +
6688 + if (ptype == RX)
6689 + port_fqs->rx_errq = &dpa_fq[0];
6690 + else
6691 + port_fqs->tx_errq = &dpa_fq[0];
6692 + break;
6693 + case 1:
6694 + /* the second queue is the default queue */
6695 + if (fqids[i].count != 1)
6696 + goto invalid_default_queue;
6697 +
6698 + dpa_fq = dpa_fq_alloc(dev, fqids[i].start,
6699 + fqids[i].count, list,
6700 + ptype == RX ?
6701 + FQ_TYPE_RX_DEFAULT :
6702 + FQ_TYPE_TX_CONFIRM);
6703 + if (dpa_fq == NULL)
6704 + goto fq_alloc_failed;
6705 +
6706 + if (ptype == RX)
6707 + port_fqs->rx_defq = &dpa_fq[0];
6708 + else
6709 + port_fqs->tx_defq = &dpa_fq[0];
6710 + break;
6711 + default:
6712 + /* all subsequent queues are either RX* PCD or Tx */
6713 + if (ptype == RX) {
6714 + if (!dpa_fq_alloc(dev, fqids[i].start,
6715 + fqids[i].count, list,
6716 + FQ_TYPE_RX_PCD) ||
6717 + !dpa_fq_alloc(dev, fqids[i].start,
6718 + fqids[i].count, list,
6719 + FQ_TYPE_RX_PCD_HI_PRIO))
6720 + goto fq_alloc_failed;
6721 + } else {
6722 + if (!dpa_fq_alloc(dev, fqids[i].start,
6723 + fqids[i].count, list,
6724 + FQ_TYPE_TX))
6725 + goto fq_alloc_failed;
6726 + }
6727 + break;
6728 + }
6729 + }
6730 +
6731 + return 0;
6732 +
6733 +fq_alloc_failed:
6734 +fqids_alloc_failed:
6735 + dev_err(dev, "Cannot allocate memory for frame queues\n");
6736 + return -ENOMEM;
6737 +
6738 +invalid_default_queue:
6739 +invalid_error_queue:
6740 + dev_err(dev, "Too many default or error queues\n");
6741 + return -EINVAL;
6742 +}
6743 +EXPORT_SYMBOL(dpa_fq_probe_mac);
6744 +
6745 +static u32 rx_pool_channel;
6746 +static DEFINE_SPINLOCK(rx_pool_channel_init);
6747 +
6748 +int dpa_get_channel(void)
6749 +{
6750 + spin_lock(&rx_pool_channel_init);
6751 + if (!rx_pool_channel) {
6752 + u32 pool;
6753 + int ret = qman_alloc_pool(&pool);
6754 + if (!ret)
6755 + rx_pool_channel = pool;
6756 + }
6757 + spin_unlock(&rx_pool_channel_init);
6758 + if (!rx_pool_channel)
6759 + return -ENOMEM;
6760 + return rx_pool_channel;
6761 +}
6762 +EXPORT_SYMBOL(dpa_get_channel);
6763 +
6764 +void dpa_release_channel(void)
6765 +{
6766 + qman_release_pool(rx_pool_channel);
6767 +}
6768 +EXPORT_SYMBOL(dpa_release_channel);
6769 +
6770 +void dpaa_eth_add_channel(u16 channel)
6771 +{
6772 + const cpumask_t *cpus = qman_affine_cpus();
6773 + u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel);
6774 + int cpu;
6775 + struct qman_portal *portal;
6776 +
6777 + for_each_cpu(cpu, cpus) {
6778 + portal = (struct qman_portal *)qman_get_affine_portal(cpu);
6779 + qman_p_static_dequeue_add(portal, pool);
6780 + }
6781 +}
6782 +EXPORT_SYMBOL(dpaa_eth_add_channel);
6783 +
6784 +/**
6785 + * Congestion group state change notification callback.
6786 + * Stops the device's egress queues while they are congested and
6787 + * wakes them upon exiting congested state.
6788 + * Also updates some CGR-related stats.
6789 + */
6790 +static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr,
6791 +
6792 + int congested)
6793 +{
6794 + struct dpa_priv_s *priv = (struct dpa_priv_s *)container_of(cgr,
6795 + struct dpa_priv_s, cgr_data.cgr);
6796 +
6797 + if (congested) {
6798 + priv->cgr_data.congestion_start_jiffies = jiffies;
6799 + netif_tx_stop_all_queues(priv->net_dev);
6800 + priv->cgr_data.cgr_congested_count++;
6801 + } else {
6802 + priv->cgr_data.congested_jiffies +=
6803 + (jiffies - priv->cgr_data.congestion_start_jiffies);
6804 + netif_tx_wake_all_queues(priv->net_dev);
6805 + }
6806 +}
6807 +
6808 +int dpaa_eth_cgr_init(struct dpa_priv_s *priv)
6809 +{
6810 + struct qm_mcc_initcgr initcgr;
6811 + u32 cs_th;
6812 + int err;
6813 +
6814 + err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid);
6815 + if (err < 0) {
6816 + pr_err("Error %d allocating CGR ID\n", err);
6817 + goto out_error;
6818 + }
6819 + priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
6820 +
6821 + /* Enable Congestion State Change Notifications and CS taildrop */
6822 + initcgr.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES;
6823 + initcgr.cgr.cscn_en = QM_CGR_EN;
6824 +
6825 + /* Set different thresholds based on the MAC speed.
6826 + * TODO: this may turn suboptimal if the MAC is reconfigured at a speed
6827 + * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link.
6828 + * In such cases, we ought to reconfigure the threshold, too.
6829 + */
6830 + if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
6831 + cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_10G;
6832 + else
6833 + cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_1G;
6834 + qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
6835 +
6836 + initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
6837 + initcgr.cgr.cstd_en = QM_CGR_EN;
6838 +
6839 + err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT,
6840 + &initcgr);
6841 + if (err < 0) {
6842 + pr_err("Error %d creating CGR with ID %d\n", err,
6843 + priv->cgr_data.cgr.cgrid);
6844 + qman_release_cgrid(priv->cgr_data.cgr.cgrid);
6845 + goto out_error;
6846 + }
6847 + pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n",
6848 + priv->cgr_data.cgr.cgrid, priv->mac_dev->addr,
6849 + priv->cgr_data.cgr.chan);
6850 +
6851 +out_error:
6852 + return err;
6853 +}
6854 +EXPORT_SYMBOL(dpaa_eth_cgr_init);
6855 +
6856 +static inline void dpa_setup_ingress(const struct dpa_priv_s *priv,
6857 + struct dpa_fq *fq,
6858 + const struct qman_fq *template)
6859 +{
6860 + fq->fq_base = *template;
6861 + fq->net_dev = priv->net_dev;
6862 +
6863 + fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
6864 + fq->channel = priv->channel;
6865 +}
6866 +
6867 +static inline void dpa_setup_egress(const struct dpa_priv_s *priv,
6868 + struct dpa_fq *fq,
6869 + struct fm_port *port,
6870 + const struct qman_fq *template)
6871 +{
6872 + fq->fq_base = *template;
6873 + fq->net_dev = priv->net_dev;
6874 +
6875 + if (port) {
6876 + fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
6877 + fq->channel = (uint16_t)fm_get_tx_port_channel(port);
6878 + } else {
6879 + fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
6880 + }
6881 +}
6882 +
6883 +void dpa_fq_setup(struct dpa_priv_s *priv, const struct dpa_fq_cbs_t *fq_cbs,
6884 + struct fm_port *tx_port)
6885 +{
6886 + struct dpa_fq *fq;
6887 + uint16_t portals[NR_CPUS];
6888 + int cpu, portal_cnt = 0, num_portals = 0;
6889 + uint32_t pcd_fqid, pcd_fqid_hi_prio;
6890 + const cpumask_t *affine_cpus = qman_affine_cpus();
6891 + int egress_cnt = 0, conf_cnt = 0;
6892 +
6893 + /* Prepare for PCD FQs init */
6894 + for_each_cpu(cpu, affine_cpus)
6895 + portals[num_portals++] = qman_affine_channel(cpu);
6896 + if (num_portals == 0)
6897 + dev_err(priv->net_dev->dev.parent,
6898 + "No Qman software (affine) channels found");
6899 +
6900 + pcd_fqid = (priv->mac_dev) ?
6901 + DPAA_ETH_PCD_FQ_BASE(priv->mac_dev->res->start) : 0;
6902 + pcd_fqid_hi_prio = (priv->mac_dev) ?
6903 + DPAA_ETH_PCD_FQ_HI_PRIO_BASE(priv->mac_dev->res->start) : 0;
6904 +
6905 + /* Initialize each FQ in the list */
6906 + list_for_each_entry(fq, &priv->dpa_fq_list, list) {
6907 + switch (fq->fq_type) {
6908 + case FQ_TYPE_RX_DEFAULT:
6909 + BUG_ON(!priv->mac_dev);
6910 + dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
6911 + break;
6912 + case FQ_TYPE_RX_ERROR:
6913 + BUG_ON(!priv->mac_dev);
6914 + dpa_setup_ingress(priv, fq, &fq_cbs->rx_errq);
6915 + break;
6916 + case FQ_TYPE_RX_PCD:
6917 + /* For MACless we can't have dynamic Rx queues */
6918 + BUG_ON(!priv->mac_dev && !fq->fqid);
6919 + dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
6920 + if (!fq->fqid)
6921 + fq->fqid = pcd_fqid++;
6922 + fq->channel = portals[portal_cnt];
6923 + portal_cnt = (portal_cnt + 1) % num_portals;
6924 + break;
6925 + case FQ_TYPE_RX_PCD_HI_PRIO:
6926 + /* For MACless we can't have dynamic Hi Pri Rx queues */
6927 + BUG_ON(!priv->mac_dev && !fq->fqid);
6928 + dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
6929 + if (!fq->fqid)
6930 + fq->fqid = pcd_fqid_hi_prio++;
6931 + fq->channel = portals[portal_cnt];
6932 + portal_cnt = (portal_cnt + 1) % num_portals;
6933 + break;
6934 + case FQ_TYPE_TX:
6935 + dpa_setup_egress(priv, fq, tx_port,
6936 + &fq_cbs->egress_ern);
6937 + /* If we have more Tx queues than the number of cores,
6938 + * just ignore the extra ones.
6939 + */
6940 + if (egress_cnt < DPAA_ETH_TX_QUEUES)
6941 + priv->egress_fqs[egress_cnt++] = &fq->fq_base;
6942 + break;
6943 + case FQ_TYPE_TX_CONFIRM:
6944 + BUG_ON(!priv->mac_dev);
6945 + dpa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
6946 + break;
6947 + case FQ_TYPE_TX_CONF_MQ:
6948 + BUG_ON(!priv->mac_dev);
6949 + dpa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
6950 + priv->conf_fqs[conf_cnt++] = &fq->fq_base;
6951 + break;
6952 + case FQ_TYPE_TX_ERROR:
6953 + BUG_ON(!priv->mac_dev);
6954 + dpa_setup_ingress(priv, fq, &fq_cbs->tx_errq);
6955 + break;
6956 + default:
6957 + dev_warn(priv->net_dev->dev.parent,
6958 + "Unknown FQ type detected!\n");
6959 + break;
6960 + }
6961 + }
6962 +
6963 + /* The number of Tx queues may be smaller than the number of cores, if
6964 + * the Tx queue range is specified in the device tree instead of being
6965 + * dynamically allocated.
6966 + * Make sure all CPUs receive a corresponding Tx queue.
6967 + */
6968 + while (egress_cnt < DPAA_ETH_TX_QUEUES) {
6969 + list_for_each_entry(fq, &priv->dpa_fq_list, list) {
6970 + if (fq->fq_type != FQ_TYPE_TX)
6971 + continue;
6972 + priv->egress_fqs[egress_cnt++] = &fq->fq_base;
6973 + if (egress_cnt == DPAA_ETH_TX_QUEUES)
6974 + break;
6975 + }
6976 + }
6977 +}
6978 +EXPORT_SYMBOL(dpa_fq_setup);
6979 +
6980 +int dpa_fq_init(struct dpa_fq *dpa_fq, bool td_enable)
6981 +{
6982 + int _errno;
6983 + const struct dpa_priv_s *priv;
6984 + struct device *dev;
6985 + struct qman_fq *fq;
6986 + struct qm_mcc_initfq initfq;
6987 + struct qman_fq *confq;
6988 + int queue_id;
6989 +
6990 + priv = netdev_priv(dpa_fq->net_dev);
6991 + dev = dpa_fq->net_dev->dev.parent;
6992 +
6993 + if (dpa_fq->fqid == 0)
6994 + dpa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
6995 +
6996 + dpa_fq->init = !(dpa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY);
6997 +
6998 + _errno = qman_create_fq(dpa_fq->fqid, dpa_fq->flags, &dpa_fq->fq_base);
6999 + if (_errno) {
7000 + dev_err(dev, "qman_create_fq() failed\n");
7001 + return _errno;
7002 + }
7003 + fq = &dpa_fq->fq_base;
7004 +
7005 + if (dpa_fq->init) {
7006 + memset(&initfq, 0, sizeof(initfq));
7007 +
7008 + initfq.we_mask = QM_INITFQ_WE_FQCTRL;
7009 + /* FIXME: why would we want to keep an empty FQ in cache? */
7010 + initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
7011 +
7012 + /* Try to reduce the number of portal interrupts for
7013 + * Tx Confirmation FQs.
7014 + */
7015 + if (dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
7016 + initfq.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
7017 +
7018 + /* FQ placement */
7019 + initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
7020 +
7021 + initfq.fqd.dest.channel = dpa_fq->channel;
7022 + initfq.fqd.dest.wq = dpa_fq->wq;
7023 +
7024 + /* Put all egress queues in a congestion group of their own.
7025 + * Sensu stricto, the Tx confirmation queues are Rx FQs,
7026 + * rather than Tx - but they nonetheless account for the
7027 + * memory footprint on behalf of egress traffic. We therefore
7028 + * place them in the netdev's CGR, along with the Tx FQs.
7029 + */
7030 + if (dpa_fq->fq_type == FQ_TYPE_TX ||
7031 + dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM ||
7032 + dpa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) {
7033 + initfq.we_mask |= QM_INITFQ_WE_CGID;
7034 + initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE;
7035 + initfq.fqd.cgid = (uint8_t)priv->cgr_data.cgr.cgrid;
7036 + /* Set a fixed overhead accounting, in an attempt to
7037 + * reduce the impact of fixed-size skb shells and the
7038 + * driver's needed headroom on system memory. This is
7039 + * especially the case when the egress traffic is
7040 + * composed of small datagrams.
7041 + * Unfortunately, QMan's OAL value is capped to an
7042 + * insufficient value, but even that is better than
7043 + * no overhead accounting at all.
7044 + */
7045 + initfq.we_mask |= QM_INITFQ_WE_OAC;
7046 + initfq.fqd.oac_init.oac = QM_OAC_CG;
7047 + initfq.fqd.oac_init.oal =
7048 + (signed char)(min(sizeof(struct sk_buff) +
7049 + priv->tx_headroom, (size_t)FSL_QMAN_MAX_OAL));
7050 + }
7051 +
7052 + if (td_enable) {
7053 + initfq.we_mask |= QM_INITFQ_WE_TDTHRESH;
7054 + qm_fqd_taildrop_set(&initfq.fqd.td,
7055 + DPA_FQ_TD, 1);
7056 + initfq.fqd.fq_ctrl = QM_FQCTRL_TDE;
7057 + }
7058 +
7059 + /* Configure the Tx confirmation queue, now that we know
7060 + * which Tx queue it pairs with.
7061 + */
7062 + if (dpa_fq->fq_type == FQ_TYPE_TX) {
7063 + queue_id = _dpa_tx_fq_to_id(priv, &dpa_fq->fq_base);
7064 + if (queue_id >= 0) {
7065 + confq = priv->conf_fqs[queue_id];
7066 + if (confq) {
7067 + initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
7068 + /* ContextA: OVOM=1 (use contextA2 bits instead of ICAD)
7069 + * A2V=1 (contextA A2 field is valid)
7070 + * A0V=1 (contextA A0 field is valid)
7071 + * B0V=1 (contextB field is valid)
7072 + * ContextA A2: EBD=1 (deallocate buffers inside FMan)
7073 + * ContextB B0(ASPID): 0 (absolute Virtual Storage ID)
7074 + */
7075 + initfq.fqd.context_a.hi = 0x1e000000;
7076 + initfq.fqd.context_a.lo = 0x80000000;
7077 + }
7078 + }
7079 + }
7080 +
7081 + /* Put all *private* ingress queues in our "ingress CGR". */
7082 + if (priv->use_ingress_cgr &&
7083 + (dpa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
7084 + dpa_fq->fq_type == FQ_TYPE_RX_ERROR ||
7085 + dpa_fq->fq_type == FQ_TYPE_RX_PCD ||
7086 + dpa_fq->fq_type == FQ_TYPE_RX_PCD_HI_PRIO)) {
7087 + initfq.we_mask |= QM_INITFQ_WE_CGID;
7088 + initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE;
7089 + initfq.fqd.cgid = (uint8_t)priv->ingress_cgr.cgrid;
7090 + /* Set a fixed overhead accounting, just like for the
7091 + * egress CGR.
7092 + */
7093 + initfq.we_mask |= QM_INITFQ_WE_OAC;
7094 + initfq.fqd.oac_init.oac = QM_OAC_CG;
7095 + initfq.fqd.oac_init.oal =
7096 + (signed char)(min(sizeof(struct sk_buff) +
7097 + priv->tx_headroom, (size_t)FSL_QMAN_MAX_OAL));
7098 + }
7099 +
7100 + /* Initialization common to all ingress queues */
7101 + if (dpa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
7102 + initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
7103 + initfq.fqd.fq_ctrl |=
7104 + QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK;
7105 + initfq.fqd.context_a.stashing.exclusive =
7106 + QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
7107 + QM_STASHING_EXCL_ANNOTATION;
7108 + initfq.fqd.context_a.stashing.data_cl = 2;
7109 + initfq.fqd.context_a.stashing.annotation_cl = 1;
7110 + initfq.fqd.context_a.stashing.context_cl =
7111 + DIV_ROUND_UP(sizeof(struct qman_fq), 64);
7112 + }
7113 +
7114 + _errno = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
7115 + if (_errno < 0) {
7116 + if (DPA_RX_PCD_HI_PRIO_FQ_INIT_FAIL(dpa_fq, _errno)) {
7117 + dpa_fq->init = 0;
7118 + } else {
7119 + dev_err(dev, "qman_init_fq(%u) = %d\n",
7120 + qman_fq_fqid(fq), _errno);
7121 + qman_destroy_fq(fq, 0);
7122 + }
7123 + return _errno;
7124 + }
7125 + }
7126 +
7127 + dpa_fq->fqid = qman_fq_fqid(fq);
7128 +
7129 + return 0;
7130 +}
7131 +EXPORT_SYMBOL(dpa_fq_init);
7132 +
7133 +int __cold __attribute__((nonnull))
7134 +_dpa_fq_free(struct device *dev, struct qman_fq *fq)
7135 +{
7136 + int _errno, __errno;
7137 + struct dpa_fq *dpa_fq;
7138 + const struct dpa_priv_s *priv;
7139 +
7140 + _errno = 0;
7141 +
7142 + dpa_fq = container_of(fq, struct dpa_fq, fq_base);
7143 + priv = netdev_priv(dpa_fq->net_dev);
7144 +
7145 + if (dpa_fq->init) {
7146 + _errno = qman_retire_fq(fq, NULL);
7147 + if (unlikely(_errno < 0) && netif_msg_drv(priv))
7148 + dev_err(dev, "qman_retire_fq(%u) = %d\n",
7149 + qman_fq_fqid(fq), _errno);
7150 +
7151 + __errno = qman_oos_fq(fq);
7152 + if (unlikely(__errno < 0) && netif_msg_drv(priv)) {
7153 + dev_err(dev, "qman_oos_fq(%u) = %d\n",
7154 + qman_fq_fqid(fq), __errno);
7155 + if (_errno >= 0)
7156 + _errno = __errno;
7157 + }
7158 + }
7159 +
7160 + qman_destroy_fq(fq, 0);
7161 + list_del(&dpa_fq->list);
7162 +
7163 + return _errno;
7164 +}
7165 +EXPORT_SYMBOL(_dpa_fq_free);
7166 +
7167 +int __cold __attribute__((nonnull))
7168 +dpa_fq_free(struct device *dev, struct list_head *list)
7169 +{
7170 + int _errno, __errno;
7171 + struct dpa_fq *dpa_fq, *tmp;
7172 +
7173 + _errno = 0;
7174 + list_for_each_entry_safe(dpa_fq, tmp, list, list) {
7175 + __errno = _dpa_fq_free(dev, (struct qman_fq *)dpa_fq);
7176 + if (unlikely(__errno < 0) && _errno >= 0)
7177 + _errno = __errno;
7178 + }
7179 +
7180 + return _errno;
7181 +}
7182 +EXPORT_SYMBOL(dpa_fq_free);
7183 +
7184 +int dpa_fqs_init(struct device *dev, struct list_head *list, bool td_enable)
7185 +{
7186 + int _errno, __errno;
7187 + struct dpa_fq *dpa_fq, *tmp;
7188 + static bool print_msg __read_mostly;
7189 +
7190 + _errno = 0;
7191 + print_msg = true;
7192 + list_for_each_entry_safe(dpa_fq, tmp, list, list) {
7193 + __errno = dpa_fq_init(dpa_fq, td_enable);
7194 + if (unlikely(__errno < 0) && _errno >= 0) {
7195 + if (DPA_RX_PCD_HI_PRIO_FQ_INIT_FAIL(dpa_fq, __errno)) {
7196 + if (print_msg) {
7197 + dev_warn(dev,
7198 + "Skip RX PCD High Priority FQs initialization\n");
7199 + print_msg = false;
7200 + }
7201 + if (_dpa_fq_free(dev, (struct qman_fq *)dpa_fq))
7202 + dev_warn(dev,
7203 + "Error freeing frame queues\n");
7204 + } else {
7205 + _errno = __errno;
7206 + break;
7207 + }
7208 + }
7209 + }
7210 +
7211 + return _errno;
7212 +}
7213 +EXPORT_SYMBOL(dpa_fqs_init);
7214 +static void
7215 +dpaa_eth_init_tx_port(struct fm_port *port, struct dpa_fq *errq,
7216 + struct dpa_fq *defq, struct dpa_buffer_layout_s *buf_layout)
7217 +{
7218 + struct fm_port_params tx_port_param;
7219 + bool frag_enabled = false;
7220 +
7221 + memset(&tx_port_param, 0, sizeof(tx_port_param));
7222 + dpaa_eth_init_port(tx, port, tx_port_param, errq->fqid, defq->fqid,
7223 + buf_layout, frag_enabled);
7224 +}
7225 +
7226 +static void
7227 +dpaa_eth_init_rx_port(struct fm_port *port, struct dpa_bp *bp, size_t count,
7228 + struct dpa_fq *errq, struct dpa_fq *defq,
7229 + struct dpa_buffer_layout_s *buf_layout)
7230 +{
7231 + struct fm_port_params rx_port_param;
7232 + int i;
7233 + bool frag_enabled = false;
7234 +
7235 + memset(&rx_port_param, 0, sizeof(rx_port_param));
7236 + count = min(ARRAY_SIZE(rx_port_param.pool_param), count);
7237 + rx_port_param.num_pools = (uint8_t)count;
7238 + for (i = 0; i < count; i++) {
7239 + if (i >= rx_port_param.num_pools)
7240 + break;
7241 + rx_port_param.pool_param[i].id = bp[i].bpid;
7242 + rx_port_param.pool_param[i].size = (uint16_t)bp[i].size;
7243 + }
7244 +
7245 + dpaa_eth_init_port(rx, port, rx_port_param, errq->fqid, defq->fqid,
7246 + buf_layout, frag_enabled);
7247 +}
7248 +
7249 +#if defined(CONFIG_FSL_SDK_FMAN_TEST)
7250 +/* Defined as weak, to be implemented by fman pcd tester. */
7251 +int dpa_alloc_pcd_fqids(struct device *, uint32_t, uint8_t, uint32_t *)
7252 +__attribute__((weak));
7253 +
7254 +int dpa_free_pcd_fqids(struct device *, uint32_t) __attribute__((weak));
7255 +#else
7256 +int dpa_alloc_pcd_fqids(struct device *, uint32_t, uint8_t, uint32_t *);
7257 +
7258 +int dpa_free_pcd_fqids(struct device *, uint32_t);
7259 +
7260 +#endif /* CONFIG_FSL_SDK_FMAN_TEST */
7261 +
7262 +
7263 +int dpa_alloc_pcd_fqids(struct device *dev, uint32_t num,
7264 + uint8_t alignment, uint32_t *base_fqid)
7265 +{
7266 + dev_crit(dev, "callback not implemented!\n");
7267 +
7268 + return 0;
7269 +}
7270 +
7271 +int dpa_free_pcd_fqids(struct device *dev, uint32_t base_fqid)
7272 +{
7273 +
7274 + dev_crit(dev, "callback not implemented!\n");
7275 +
7276 + return 0;
7277 +}
7278 +
7279 +void dpaa_eth_init_ports(struct mac_device *mac_dev,
7280 + struct dpa_bp *bp, size_t count,
7281 + struct fm_port_fqs *port_fqs,
7282 + struct dpa_buffer_layout_s *buf_layout,
7283 + struct device *dev)
7284 +{
7285 + struct fm_port_pcd_param rx_port_pcd_param;
7286 + struct fm_port *rxport = mac_dev->port_dev[RX];
7287 + struct fm_port *txport = mac_dev->port_dev[TX];
7288 +
7289 + dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
7290 + port_fqs->tx_defq, &buf_layout[TX]);
7291 + dpaa_eth_init_rx_port(rxport, bp, count, port_fqs->rx_errq,
7292 + port_fqs->rx_defq, &buf_layout[RX]);
7293 +
7294 + rx_port_pcd_param.cba = dpa_alloc_pcd_fqids;
7295 + rx_port_pcd_param.cbf = dpa_free_pcd_fqids;
7296 + rx_port_pcd_param.dev = dev;
7297 + fm_port_pcd_bind(rxport, &rx_port_pcd_param);
7298 +}
7299 +EXPORT_SYMBOL(dpaa_eth_init_ports);
7300 +
7301 +void dpa_release_sgt(struct qm_sg_entry *sgt)
7302 +{
7303 + struct dpa_bp *dpa_bp;
7304 + struct bm_buffer bmb[DPA_BUFF_RELEASE_MAX];
7305 + uint8_t i = 0, j;
7306 +
7307 + memset(bmb, 0, DPA_BUFF_RELEASE_MAX * sizeof(struct bm_buffer));
7308 +
7309 + do {
7310 + dpa_bp = dpa_bpid2pool(qm_sg_entry_get_bpid(&sgt[i]));
7311 + DPA_BUG_ON(!dpa_bp);
7312 +
7313 + j = 0;
7314 + do {
7315 + DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
7316 + bm_buffer_set64(&bmb[j], qm_sg_addr(&sgt[i]));
7317 +
7318 + j++; i++;
7319 + } while (j < ARRAY_SIZE(bmb) &&
7320 + !qm_sg_entry_get_final(&sgt[i-1]) &&
7321 + qm_sg_entry_get_bpid(&sgt[i-1]) ==
7322 + qm_sg_entry_get_bpid(&sgt[i]));
7323 +
7324 + while (bman_release(dpa_bp->pool, bmb, j, 0))
7325 + cpu_relax();
7326 + } while (!qm_sg_entry_get_final(&sgt[i-1]));
7327 +}
7328 +EXPORT_SYMBOL(dpa_release_sgt);
7329 +
7330 +void __attribute__((nonnull))
7331 +dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd)
7332 +{
7333 + struct qm_sg_entry *sgt;
7334 + struct dpa_bp *dpa_bp;
7335 + struct bm_buffer bmb;
7336 + dma_addr_t addr;
7337 + void *vaddr;
7338 +
7339 + bmb.opaque = 0;
7340 + bm_buffer_set64(&bmb, qm_fd_addr(fd));
7341 +
7342 + dpa_bp = dpa_bpid2pool(fd->bpid);
7343 + DPA_BUG_ON(!dpa_bp);
7344 +
7345 + if (fd->format == qm_fd_sg) {
7346 + vaddr = phys_to_virt(qm_fd_addr(fd));
7347 + sgt = vaddr + dpa_fd_offset(fd);
7348 +
7349 + dma_unmap_single(dpa_bp->dev, qm_fd_addr(fd), dpa_bp->size,
7350 + DMA_BIDIRECTIONAL);
7351 +
7352 + dpa_release_sgt(sgt);
7353 + addr = dma_map_single(dpa_bp->dev, vaddr, dpa_bp->size,
7354 + DMA_BIDIRECTIONAL);
7355 + if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
7356 + dev_err(dpa_bp->dev, "DMA mapping failed");
7357 + return;
7358 + }
7359 + bm_buffer_set64(&bmb, addr);
7360 + }
7361 +
7362 + while (bman_release(dpa_bp->pool, &bmb, 1, 0))
7363 + cpu_relax();
7364 +}
7365 +EXPORT_SYMBOL(dpa_fd_release);
7366 +
7367 +void count_ern(struct dpa_percpu_priv_s *percpu_priv,
7368 + const struct qm_mr_entry *msg)
7369 +{
7370 + switch (msg->ern.rc & QM_MR_RC_MASK) {
7371 + case QM_MR_RC_CGR_TAILDROP:
7372 + percpu_priv->ern_cnt.cg_tdrop++;
7373 + break;
7374 + case QM_MR_RC_WRED:
7375 + percpu_priv->ern_cnt.wred++;
7376 + break;
7377 + case QM_MR_RC_ERROR:
7378 + percpu_priv->ern_cnt.err_cond++;
7379 + break;
7380 + case QM_MR_RC_ORPWINDOW_EARLY:
7381 + percpu_priv->ern_cnt.early_window++;
7382 + break;
7383 + case QM_MR_RC_ORPWINDOW_LATE:
7384 + percpu_priv->ern_cnt.late_window++;
7385 + break;
7386 + case QM_MR_RC_FQ_TAILDROP:
7387 + percpu_priv->ern_cnt.fq_tdrop++;
7388 + break;
7389 + case QM_MR_RC_ORPWINDOW_RETIRED:
7390 + percpu_priv->ern_cnt.fq_retired++;
7391 + break;
7392 + case QM_MR_RC_ORP_ZERO:
7393 + percpu_priv->ern_cnt.orp_zero++;
7394 + break;
7395 + }
7396 +}
7397 +EXPORT_SYMBOL(count_ern);
7398 +
7399 +/**
7400 + * Turn on HW checksum computation for this outgoing frame.
7401 + * If the current protocol is not something we support in this regard
7402 + * (or if the stack has already computed the SW checksum), we do nothing.
7403 + *
7404 + * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
7405 + * otherwise.
7406 + *
7407 + * Note that this function may modify the fd->cmd field and the skb data buffer
7408 + * (the Parse Results area).
7409 + */
7410 +int dpa_enable_tx_csum(struct dpa_priv_s *priv,
7411 + struct sk_buff *skb, struct qm_fd *fd, char *parse_results)
7412 +{
7413 + fm_prs_result_t *parse_result;
7414 + struct iphdr *iph;
7415 + struct ipv6hdr *ipv6h = NULL;
7416 + u8 l4_proto;
7417 + u16 ethertype = ntohs(skb->protocol);
7418 + int retval = 0;
7419 +
7420 + if (skb->ip_summed != CHECKSUM_PARTIAL)
7421 + return 0;
7422 +
7423 + /* Note: L3 csum seems to be already computed in sw, but we can't choose
7424 + * L4 alone from the FM configuration anyway.
7425 + */
7426 +
7427 + /* Fill in some fields of the Parse Results array, so the FMan
7428 + * can find them as if they came from the FMan Parser.
7429 + */
7430 + parse_result = (fm_prs_result_t *)parse_results;
7431 +
7432 + /* If we're dealing with VLAN, get the real Ethernet type */
7433 + if (ethertype == ETH_P_8021Q) {
7434 + /* We can't always assume the MAC header is set correctly
7435 + * by the stack, so reset to beginning of skb->data
7436 + */
7437 + skb_reset_mac_header(skb);
7438 + ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
7439 + }
7440 +
7441 + /* Fill in the relevant L3 parse result fields
7442 + * and read the L4 protocol type
7443 + */
7444 + switch (ethertype) {
7445 + case ETH_P_IP:
7446 + parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4);
7447 + iph = ip_hdr(skb);
7448 + DPA_BUG_ON(iph == NULL);
7449 + l4_proto = iph->protocol;
7450 + break;
7451 + case ETH_P_IPV6:
7452 + parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6);
7453 + ipv6h = ipv6_hdr(skb);
7454 + DPA_BUG_ON(ipv6h == NULL);
7455 + l4_proto = ipv6h->nexthdr;
7456 + break;
7457 + default:
7458 + /* We shouldn't even be here */
7459 + if (netif_msg_tx_err(priv) && net_ratelimit())
7460 + netdev_alert(priv->net_dev,
7461 + "Can't compute HW csum for L3 proto 0x%x\n",
7462 + ntohs(skb->protocol));
7463 + retval = -EIO;
7464 + goto return_error;
7465 + }
7466 +
7467 + /* Fill in the relevant L4 parse result fields */
7468 + switch (l4_proto) {
7469 + case IPPROTO_UDP:
7470 + parse_result->l4r = FM_L4_PARSE_RESULT_UDP;
7471 + break;
7472 + case IPPROTO_TCP:
7473 + parse_result->l4r = FM_L4_PARSE_RESULT_TCP;
7474 + break;
7475 + default:
7476 + /* This can as well be a BUG() */
7477 + if (netif_msg_tx_err(priv) && net_ratelimit())
7478 + netdev_alert(priv->net_dev,
7479 + "Can't compute HW csum for L4 proto 0x%x\n",
7480 + l4_proto);
7481 + retval = -EIO;
7482 + goto return_error;
7483 + }
7484 +
7485 + /* At index 0 is IPOffset_1 as defined in the Parse Results */
7486 + parse_result->ip_off[0] = (uint8_t)skb_network_offset(skb);
7487 + parse_result->l4_off = (uint8_t)skb_transport_offset(skb);
7488 +
7489 + /* Enable L3 (and L4, if TCP or UDP) HW checksum. */
7490 + fd->cmd |= FM_FD_CMD_RPD | FM_FD_CMD_DTC;
7491 +
7492 + /* On P1023 and similar platforms fd->cmd interpretation could
7493 + * be disabled by setting CONTEXT_A bit ICMD; currently this bit
7494 + * is not set so we do not need to check; in the future, if/when
7495 + * using context_a we need to check this bit
7496 + */
7497 +
7498 +return_error:
7499 + return retval;
7500 +}
7501 +EXPORT_SYMBOL(dpa_enable_tx_csum);
7502 +
7503 +#ifdef CONFIG_FSL_DPAA_CEETM
7504 +void dpa_enable_ceetm(struct net_device *dev)
7505 +{
7506 + struct dpa_priv_s *priv = netdev_priv(dev);
7507 + priv->ceetm_en = true;
7508 +}
7509 +EXPORT_SYMBOL(dpa_enable_ceetm);
7510 +
7511 +void dpa_disable_ceetm(struct net_device *dev)
7512 +{
7513 + struct dpa_priv_s *priv = netdev_priv(dev);
7514 + priv->ceetm_en = false;
7515 +}
7516 +EXPORT_SYMBOL(dpa_disable_ceetm);
7517 +#endif
7518 --- /dev/null
7519 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.h
7520 @@ -0,0 +1,226 @@
7521 +/* Copyright 2008-2013 Freescale Semiconductor, Inc.
7522 + *
7523 + * Redistribution and use in source and binary forms, with or without
7524 + * modification, are permitted provided that the following conditions are met:
7525 + * * Redistributions of source code must retain the above copyright
7526 + * notice, this list of conditions and the following disclaimer.
7527 + * * Redistributions in binary form must reproduce the above copyright
7528 + * notice, this list of conditions and the following disclaimer in the
7529 + * documentation and/or other materials provided with the distribution.
7530 + * * Neither the name of Freescale Semiconductor nor the
7531 + * names of its contributors may be used to endorse or promote products
7532 + * derived from this software without specific prior written permission.
7533 + *
7534 + *
7535 + * ALTERNATIVELY, this software may be distributed under the terms of the
7536 + * GNU General Public License ("GPL") as published by the Free Software
7537 + * Foundation, either version 2 of that License or (at your option) any
7538 + * later version.
7539 + *
7540 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
7541 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
7542 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
7543 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
7544 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
7545 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
7546 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
7547 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
7548 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
7549 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
7550 + */
7551 +
7552 +#ifndef __DPAA_ETH_COMMON_H
7553 +#define __DPAA_ETH_COMMON_H
7554 +
7555 +#include <linux/etherdevice.h> /* struct net_device */
7556 +#include <linux/fsl_bman.h> /* struct bm_buffer */
7557 +#include <linux/of_platform.h> /* struct platform_device */
7558 +#include <linux/net_tstamp.h> /* struct hwtstamp_config */
7559 +
7560 +#include "dpaa_eth.h"
7561 +#include "lnxwrp_fsl_fman.h"
7562 +
7563 +#define dpaa_eth_init_port(type, port, param, errq_id, defq_id, buf_layout,\
7564 + frag_enabled) \
7565 +{ \
7566 + param.errq = errq_id; \
7567 + param.defq = defq_id; \
7568 + param.priv_data_size = buf_layout->priv_data_size; \
7569 + param.parse_results = buf_layout->parse_results; \
7570 + param.hash_results = buf_layout->hash_results; \
7571 + param.frag_enable = frag_enabled; \
7572 + param.time_stamp = buf_layout->time_stamp; \
7573 + param.manip_extra_space = buf_layout->manip_extra_space; \
7574 + param.data_align = buf_layout->data_align; \
7575 + fm_set_##type##_port_params(port, &param); \
7576 +}
7577 +
7578 +#define DPA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
7579 +
7580 +#define DPA_SGT_ENTRIES_THRESHOLD DPA_SGT_MAX_ENTRIES
7581 +
7582 +#define DPA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */
7583 +
7584 +#define DPA_RX_PCD_HI_PRIO_FQ_INIT_FAIL(dpa_fq, _errno) \
7585 + (((dpa_fq)->fq_type == FQ_TYPE_RX_PCD_HI_PRIO) && \
7586 + (_errno == -EIO))
7587 +/* return codes for the dpaa-eth hooks */
7588 +enum dpaa_eth_hook_result {
7589 + /* fd/skb was retained by the hook.
7590 + *
7591 + * On the Rx path, this means the Ethernet driver will _not_
7592 + * deliver the skb to the stack. Instead, the hook implementation
7593 + * is expected to properly dispose of the skb.
7594 + *
7595 + * On the Tx path, the Ethernet driver's dpa_tx() function will
7596 + * immediately return NETDEV_TX_OK. The hook implementation is expected
7597 + * to free the skb. *DO*NOT* release it to BMan, or enqueue it to FMan,
7598 + * unless you know exactly what you're doing!
7599 + *
7600 + * On the confirmation/error paths, the Ethernet driver will _not_
7601 + * perform any fd cleanup, nor update the interface statistics.
7602 + */
7603 + DPAA_ETH_STOLEN,
7604 + /* fd/skb was returned to the Ethernet driver for regular processing.
7605 + * The hook is not allowed to, for instance, reallocate the skb (as if
7606 + * by linearizing, copying, cloning or reallocating the headroom).
7607 + */
7608 + DPAA_ETH_CONTINUE
7609 +};
7610 +
7611 +typedef enum dpaa_eth_hook_result (*dpaa_eth_ingress_hook_t)(
7612 + struct sk_buff *skb, struct net_device *net_dev, u32 fqid);
7613 +typedef enum dpaa_eth_hook_result (*dpaa_eth_egress_hook_t)(
7614 + struct sk_buff *skb, struct net_device *net_dev);
7615 +typedef enum dpaa_eth_hook_result (*dpaa_eth_confirm_hook_t)(
7616 + struct net_device *net_dev, const struct qm_fd *fd, u32 fqid);
7617 +
7618 +/* used in napi related functions */
7619 +extern u16 qman_portal_max;
7620 +
7621 +/* from dpa_ethtool.c */
7622 +extern const struct ethtool_ops dpa_ethtool_ops;
7623 +
7624 +#ifdef CONFIG_FSL_DPAA_HOOKS
7625 +/* Various hooks used for unit-testing and/or fastpath optimizations.
7626 + * Currently only one set of such hooks is supported.
7627 + */
7628 +struct dpaa_eth_hooks_s {
7629 + /* Invoked on the Tx private path, immediately after receiving the skb
7630 + * from the stack.
7631 + */
7632 + dpaa_eth_egress_hook_t tx;
7633 +
7634 + /* Invoked on the Rx private path, right before passing the skb
7635 + * up the stack. At that point, the packet's protocol id has already
7636 + * been set. The skb's data pointer is now at the L3 header, and
7637 + * skb->mac_header points to the L2 header. skb->len has been adjusted
7638 + * to be the length of L3+payload (i.e., the length of the
7639 + * original frame minus the L2 header len).
7640 + * For more details on what the skb looks like, see eth_type_trans().
7641 + */
7642 + dpaa_eth_ingress_hook_t rx_default;
7643 +
7644 + /* Driver hook for the Rx error private path. */
7645 + dpaa_eth_confirm_hook_t rx_error;
7646 + /* Driver hook for the Tx confirmation private path. */
7647 + dpaa_eth_confirm_hook_t tx_confirm;
7648 + /* Driver hook for the Tx error private path. */
7649 + dpaa_eth_confirm_hook_t tx_error;
7650 +};
7651 +
7652 +void fsl_dpaa_eth_set_hooks(struct dpaa_eth_hooks_s *hooks);
7653 +
7654 +extern struct dpaa_eth_hooks_s dpaa_eth_hooks;
7655 +#endif
7656 +
7657 +int dpa_netdev_init(struct net_device *net_dev,
7658 + const uint8_t *mac_addr,
7659 + uint16_t tx_timeout);
7660 +int __cold dpa_start(struct net_device *net_dev);
7661 +int __cold dpa_stop(struct net_device *net_dev);
7662 +void __cold dpa_timeout(struct net_device *net_dev);
7663 +void __cold
7664 +dpa_get_stats64(struct net_device *net_dev,
7665 + struct rtnl_link_stats64 *stats);
7666 +int dpa_change_mtu(struct net_device *net_dev, int new_mtu);
7667 +int dpa_ndo_init(struct net_device *net_dev);
7668 +int dpa_set_features(struct net_device *dev, netdev_features_t features);
7669 +netdev_features_t dpa_fix_features(struct net_device *dev,
7670 + netdev_features_t features);
7671 +#ifdef CONFIG_FSL_DPAA_TS
7672 +u64 dpa_get_timestamp_ns(const struct dpa_priv_s *priv,
7673 + enum port_type rx_tx, const void *data);
7674 +/* Updates the skb shared hw timestamp from the hardware timestamp */
7675 +int dpa_get_ts(const struct dpa_priv_s *priv, enum port_type rx_tx,
7676 + struct skb_shared_hwtstamps *shhwtstamps, const void *data);
7677 +#endif /* CONFIG_FSL_DPAA_TS */
7678 +int dpa_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
7679 +int __cold dpa_remove(struct platform_device *of_dev);
7680 +struct mac_device * __cold __must_check
7681 +__attribute__((nonnull)) dpa_mac_probe(struct platform_device *_of_dev);
7682 +int dpa_set_mac_address(struct net_device *net_dev, void *addr);
7683 +void dpa_set_rx_mode(struct net_device *net_dev);
7684 +void dpa_set_buffers_layout(struct mac_device *mac_dev,
7685 + struct dpa_buffer_layout_s *layout);
7686 +int __attribute__((nonnull))
7687 +dpa_bp_alloc(struct dpa_bp *dpa_bp);
7688 +void __cold __attribute__((nonnull))
7689 +dpa_bp_free(struct dpa_priv_s *priv);
7690 +struct dpa_bp *dpa_bpid2pool(int bpid);
7691 +void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp);
7692 +bool dpa_bpid2pool_use(int bpid);
7693 +void dpa_bp_drain(struct dpa_bp *bp);
7694 +#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
7695 +u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb,
7696 + struct net_device *sb_dev,
7697 + select_queue_fallback_t fallback);
7698 +#endif
7699 +struct dpa_fq *dpa_fq_alloc(struct device *dev,
7700 + u32 fq_start,
7701 + u32 fq_count,
7702 + struct list_head *list,
7703 + enum dpa_fq_type fq_type);
7704 +int dpa_fq_probe_mac(struct device *dev, struct list_head *list,
7705 + struct fm_port_fqs *port_fqs,
7706 + bool tx_conf_fqs_per_core,
7707 + enum port_type ptype);
7708 +int dpa_get_channel(void);
7709 +void dpa_release_channel(void);
7710 +void dpaa_eth_add_channel(u16 channel);
7711 +int dpaa_eth_cgr_init(struct dpa_priv_s *priv);
7712 +void dpa_fq_setup(struct dpa_priv_s *priv, const struct dpa_fq_cbs_t *fq_cbs,
7713 + struct fm_port *tx_port);
7714 +int dpa_fq_init(struct dpa_fq *dpa_fq, bool td_enable);
7715 +int dpa_fqs_init(struct device *dev, struct list_head *list, bool td_enable);
7716 +int __cold __attribute__((nonnull))
7717 +dpa_fq_free(struct device *dev, struct list_head *list);
7718 +void dpaa_eth_init_ports(struct mac_device *mac_dev,
7719 + struct dpa_bp *bp, size_t count,
7720 + struct fm_port_fqs *port_fqs,
7721 + struct dpa_buffer_layout_s *buf_layout,
7722 + struct device *dev);
7723 +void dpa_release_sgt(struct qm_sg_entry *sgt);
7724 +void __attribute__((nonnull))
7725 +dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd);
7726 +void count_ern(struct dpa_percpu_priv_s *percpu_priv,
7727 + const struct qm_mr_entry *msg);
7728 +int dpa_enable_tx_csum(struct dpa_priv_s *priv,
7729 + struct sk_buff *skb, struct qm_fd *fd, char *parse_results);
7730 +#ifdef CONFIG_FSL_DPAA_CEETM
7731 +void dpa_enable_ceetm(struct net_device *dev);
7732 +void dpa_disable_ceetm(struct net_device *dev);
7733 +#endif
7734 +struct proxy_device {
7735 + struct mac_device *mac_dev;
7736 +};
7737 +
7738 +/* mac device control functions exposed by proxy interface*/
7739 +int dpa_proxy_start(struct net_device *net_dev);
7740 +int dpa_proxy_stop(struct proxy_device *proxy_dev, struct net_device *net_dev);
7741 +int dpa_proxy_set_mac_address(struct proxy_device *proxy_dev,
7742 + struct net_device *net_dev);
7743 +int dpa_proxy_set_rx_mode(struct proxy_device *proxy_dev,
7744 + struct net_device *net_dev);
7745 +
7746 +#endif /* __DPAA_ETH_COMMON_H */
7747 --- /dev/null
7748 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c
7749 @@ -0,0 +1,381 @@
7750 +/* Copyright 2008-2013 Freescale Semiconductor Inc.
7751 + *
7752 + * Redistribution and use in source and binary forms, with or without
7753 + * modification, are permitted provided that the following conditions are met:
7754 + * * Redistributions of source code must retain the above copyright
7755 + * notice, this list of conditions and the following disclaimer.
7756 + * * Redistributions in binary form must reproduce the above copyright
7757 + * notice, this list of conditions and the following disclaimer in the
7758 + * documentation and/or other materials provided with the distribution.
7759 + * * Neither the name of Freescale Semiconductor nor the
7760 + * names of its contributors may be used to endorse or promote products
7761 + * derived from this software without specific prior written permission.
7762 + *
7763 + *
7764 + * ALTERNATIVELY, this software may be distributed under the terms of the
7765 + * GNU General Public License ("GPL") as published by the Free Software
7766 + * Foundation, either version 2 of that License or (at your option) any
7767 + * later version.
7768 + *
7769 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
7770 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
7771 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
7772 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
7773 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
7774 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
7775 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
7776 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
7777 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
7778 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
7779 + */
7780 +
7781 +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
7782 +#define pr_fmt(fmt) \
7783 + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
7784 + KBUILD_BASENAME".c", __LINE__, __func__
7785 +#else
7786 +#define pr_fmt(fmt) \
7787 + KBUILD_MODNAME ": " fmt
7788 +#endif
7789 +
7790 +#include <linux/init.h>
7791 +#include <linux/module.h>
7792 +#include <linux/of_platform.h>
7793 +#include "dpaa_eth.h"
7794 +#include "dpaa_eth_common.h"
7795 +#include "dpaa_eth_base.h"
7796 +#include "lnxwrp_fsl_fman.h" /* fm_get_rx_extra_headroom(), fm_get_max_frm() */
7797 +#include "mac.h"
7798 +
7799 +#define DPA_DESCRIPTION "FSL DPAA Proxy initialization driver"
7800 +
7801 +MODULE_LICENSE("Dual BSD/GPL");
7802 +
7803 +MODULE_DESCRIPTION(DPA_DESCRIPTION);
7804 +
7805 +static int __cold dpa_eth_proxy_remove(struct platform_device *of_dev);
7806 +#ifdef CONFIG_PM
7807 +
7808 +static int proxy_suspend(struct device *dev)
7809 +{
7810 + struct proxy_device *proxy_dev = dev_get_drvdata(dev);
7811 + struct mac_device *mac_dev = proxy_dev->mac_dev;
7812 + int err = 0;
7813 +
7814 + err = fm_port_suspend(mac_dev->port_dev[RX]);
7815 + if (err)
7816 + goto port_suspend_failed;
7817 +
7818 + err = fm_port_suspend(mac_dev->port_dev[TX]);
7819 + if (err)
7820 + err = fm_port_resume(mac_dev->port_dev[RX]);
7821 +
7822 +port_suspend_failed:
7823 + return err;
7824 +}
7825 +
7826 +static int proxy_resume(struct device *dev)
7827 +{
7828 + struct proxy_device *proxy_dev = dev_get_drvdata(dev);
7829 + struct mac_device *mac_dev = proxy_dev->mac_dev;
7830 + int err = 0;
7831 +
7832 + err = fm_port_resume(mac_dev->port_dev[TX]);
7833 + if (err)
7834 + goto port_resume_failed;
7835 +
7836 + err = fm_port_resume(mac_dev->port_dev[RX]);
7837 + if (err)
7838 + err = fm_port_suspend(mac_dev->port_dev[TX]);
7839 +
7840 +port_resume_failed:
7841 + return err;
7842 +}
7843 +
7844 +static const struct dev_pm_ops proxy_pm_ops = {
7845 + .suspend = proxy_suspend,
7846 + .resume = proxy_resume,
7847 +};
7848 +
7849 +#define PROXY_PM_OPS (&proxy_pm_ops)
7850 +
7851 +#else /* CONFIG_PM */
7852 +
7853 +#define PROXY_PM_OPS NULL
7854 +
7855 +#endif /* CONFIG_PM */
7856 +
7857 +static int dpaa_eth_proxy_probe(struct platform_device *_of_dev)
7858 +{
7859 + int err = 0, i;
7860 + struct device *dev;
7861 + struct device_node *dpa_node;
7862 + struct dpa_bp *dpa_bp;
7863 + struct list_head proxy_fq_list;
7864 + size_t count;
7865 + struct fm_port_fqs port_fqs;
7866 + struct dpa_buffer_layout_s *buf_layout = NULL;
7867 + struct mac_device *mac_dev;
7868 + struct proxy_device *proxy_dev;
7869 +
7870 + dev = &_of_dev->dev;
7871 +
7872 + dpa_node = dev->of_node;
7873 +
7874 + if (!of_device_is_available(dpa_node))
7875 + return -ENODEV;
7876 +
7877 + /* Get the buffer pools assigned to this interface */
7878 + dpa_bp = dpa_bp_probe(_of_dev, &count);
7879 + if (IS_ERR(dpa_bp))
7880 + return PTR_ERR(dpa_bp);
7881 +
7882 + mac_dev = dpa_mac_probe(_of_dev);
7883 + if (IS_ERR(mac_dev))
7884 + return PTR_ERR(mac_dev);
7885 +
7886 + proxy_dev = devm_kzalloc(dev, sizeof(*proxy_dev), GFP_KERNEL);
7887 + if (!proxy_dev) {
7888 + dev_err(dev, "devm_kzalloc() failed\n");
7889 + return -ENOMEM;
7890 + }
7891 +
7892 + proxy_dev->mac_dev = mac_dev;
7893 + dev_set_drvdata(dev, proxy_dev);
7894 +
7895 + /* We have physical ports, so we need to establish
7896 + * the buffer layout.
7897 + */
7898 + buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout),
7899 + GFP_KERNEL);
7900 + if (!buf_layout) {
7901 + dev_err(dev, "devm_kzalloc() failed\n");
7902 + return -ENOMEM;
7903 + }
7904 + dpa_set_buffers_layout(mac_dev, buf_layout);
7905 +
7906 + INIT_LIST_HEAD(&proxy_fq_list);
7907 +
7908 + memset(&port_fqs, 0, sizeof(port_fqs));
7909 +
7910 + err = dpa_fq_probe_mac(dev, &proxy_fq_list, &port_fqs, true, RX);
7911 + if (!err)
7912 + err = dpa_fq_probe_mac(dev, &proxy_fq_list, &port_fqs, true,
7913 + TX);
7914 + if (err < 0) {
7915 + devm_kfree(dev, buf_layout);
7916 + return err;
7917 + }
7918 +
7919 + /* Proxy initializer - Just configures the MAC on behalf of
7920 + * another partition.
7921 + */
7922 + dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs,
7923 + buf_layout, dev);
7924 +
7925 + /* Proxy interfaces need to be started, and the allocated
7926 + * memory freed
7927 + */
7928 + devm_kfree(dev, buf_layout);
7929 + devm_kfree(dev, dpa_bp);
7930 +
7931 + /* Free FQ structures */
7932 + devm_kfree(dev, port_fqs.rx_defq);
7933 + devm_kfree(dev, port_fqs.rx_errq);
7934 + devm_kfree(dev, port_fqs.tx_defq);
7935 + devm_kfree(dev, port_fqs.tx_errq);
7936 +
7937 + for_each_port_device(i, mac_dev->port_dev) {
7938 + err = fm_port_enable(mac_dev->port_dev[i]);
7939 + if (err)
7940 + goto port_enable_fail;
7941 + }
7942 +
7943 + dev_info(dev, "probed MAC device with MAC address: %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n",
7944 + mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2],
7945 + mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]);
7946 +
7947 + return 0; /* Proxy interface initialization ended */
7948 +
7949 +port_enable_fail:
7950 + for_each_port_device(i, mac_dev->port_dev)
7951 + fm_port_disable(mac_dev->port_dev[i]);
7952 + dpa_eth_proxy_remove(_of_dev);
7953 +
7954 + return err;
7955 +}
7956 +
7957 +int dpa_proxy_set_mac_address(struct proxy_device *proxy_dev,
7958 + struct net_device *net_dev)
7959 +{
7960 + struct mac_device *mac_dev;
7961 + int _errno;
7962 +
7963 + mac_dev = proxy_dev->mac_dev;
7964 +
7965 + _errno = mac_dev->change_addr(mac_dev->get_mac_handle(mac_dev),
7966 + net_dev->dev_addr);
7967 + if (_errno < 0)
7968 + return _errno;
7969 +
7970 + return 0;
7971 +}
7972 +EXPORT_SYMBOL(dpa_proxy_set_mac_address);
7973 +
7974 +int dpa_proxy_set_rx_mode(struct proxy_device *proxy_dev,
7975 + struct net_device *net_dev)
7976 +{
7977 + struct mac_device *mac_dev = proxy_dev->mac_dev;
7978 + int _errno;
7979 +
7980 + if (!!(net_dev->flags & IFF_PROMISC) != mac_dev->promisc) {
7981 + mac_dev->promisc = !mac_dev->promisc;
7982 + _errno = mac_dev->set_promisc(mac_dev->get_mac_handle(mac_dev),
7983 + mac_dev->promisc);
7984 + if (unlikely(_errno < 0))
7985 + netdev_err(net_dev, "mac_dev->set_promisc() = %d\n",
7986 + _errno);
7987 + }
7988 +
7989 + _errno = mac_dev->set_multi(net_dev, mac_dev);
7990 + if (unlikely(_errno < 0))
7991 + return _errno;
7992 +
7993 + return 0;
7994 +}
7995 +EXPORT_SYMBOL(dpa_proxy_set_rx_mode);
7996 +
7997 +int dpa_proxy_start(struct net_device *net_dev)
7998 +{
7999 + struct mac_device *mac_dev;
8000 + const struct dpa_priv_s *priv;
8001 + struct proxy_device *proxy_dev;
8002 + int _errno;
8003 + int i;
8004 +
8005 + priv = netdev_priv(net_dev);
8006 + proxy_dev = (struct proxy_device *)priv->peer;
8007 + mac_dev = proxy_dev->mac_dev;
8008 +
8009 + _errno = mac_dev->init_phy(net_dev, mac_dev);
8010 + if (_errno < 0) {
8011 + if (netif_msg_drv(priv))
8012 + netdev_err(net_dev, "init_phy() = %d\n",
8013 + _errno);
8014 + return _errno;
8015 + }
8016 +
8017 + for_each_port_device(i, mac_dev->port_dev) {
8018 + _errno = fm_port_enable(mac_dev->port_dev[i]);
8019 + if (_errno)
8020 + goto port_enable_fail;
8021 + }
8022 +
8023 + _errno = mac_dev->start(mac_dev);
8024 + if (_errno < 0) {
8025 + if (netif_msg_drv(priv))
8026 + netdev_err(net_dev, "mac_dev->start() = %d\n",
8027 + _errno);
8028 + goto port_enable_fail;
8029 + }
8030 +
8031 + return _errno;
8032 +
8033 +port_enable_fail:
8034 + for_each_port_device(i, mac_dev->port_dev)
8035 + fm_port_disable(mac_dev->port_dev[i]);
8036 +
8037 + return _errno;
8038 +}
8039 +EXPORT_SYMBOL(dpa_proxy_start);
8040 +
8041 +int dpa_proxy_stop(struct proxy_device *proxy_dev, struct net_device *net_dev)
8042 +{
8043 + struct mac_device *mac_dev = proxy_dev->mac_dev;
8044 + const struct dpa_priv_s *priv = netdev_priv(net_dev);
8045 + int _errno, i, err;
8046 +
8047 + _errno = mac_dev->stop(mac_dev);
8048 + if (_errno < 0) {
8049 + if (netif_msg_drv(priv))
8050 + netdev_err(net_dev, "mac_dev->stop() = %d\n",
8051 + _errno);
8052 + return _errno;
8053 + }
8054 +
8055 + for_each_port_device(i, mac_dev->port_dev) {
8056 + err = fm_port_disable(mac_dev->port_dev[i]);
8057 + _errno = err ? err : _errno;
8058 + }
8059 +
8060 + if (mac_dev->phy_dev)
8061 + phy_disconnect(mac_dev->phy_dev);
8062 + mac_dev->phy_dev = NULL;
8063 +
8064 + return _errno;
8065 +}
8066 +EXPORT_SYMBOL(dpa_proxy_stop);
8067 +
8068 +static int __cold dpa_eth_proxy_remove(struct platform_device *of_dev)
8069 +{
8070 + struct device *dev = &of_dev->dev;
8071 + struct proxy_device *proxy_dev = dev_get_drvdata(dev);
8072 +
8073 + kfree(proxy_dev);
8074 +
8075 + dev_set_drvdata(dev, NULL);
8076 +
8077 + return 0;
8078 +}
8079 +
8080 +static const struct of_device_id dpa_proxy_match[] = {
8081 + {
8082 + .compatible = "fsl,dpa-ethernet-init"
8083 + },
8084 + {}
8085 +};
8086 +MODULE_DEVICE_TABLE(of, dpa_proxy_match);
8087 +
8088 +static struct platform_driver dpa_proxy_driver = {
8089 + .driver = {
8090 + .name = KBUILD_MODNAME "-proxy",
8091 + .of_match_table = dpa_proxy_match,
8092 + .owner = THIS_MODULE,
8093 + .pm = PROXY_PM_OPS,
8094 + },
8095 + .probe = dpaa_eth_proxy_probe,
8096 + .remove = dpa_eth_proxy_remove
8097 +};
8098 +
8099 +static int __init __cold dpa_proxy_load(void)
8100 +{
8101 + int _errno;
8102 +
8103 + pr_info(DPA_DESCRIPTION "\n");
8104 +
8105 + /* Initialize dpaa_eth mirror values */
8106 + dpa_rx_extra_headroom = fm_get_rx_extra_headroom();
8107 + dpa_max_frm = fm_get_max_frm();
8108 +
8109 + _errno = platform_driver_register(&dpa_proxy_driver);
8110 + if (unlikely(_errno < 0)) {
8111 + pr_err(KBUILD_MODNAME
8112 + ": %s:%hu:%s(): platform_driver_register() = %d\n",
8113 + KBUILD_BASENAME".c", __LINE__, __func__, _errno);
8114 + }
8115 +
8116 + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
8117 + KBUILD_BASENAME".c", __func__);
8118 +
8119 + return _errno;
8120 +}
8121 +module_init(dpa_proxy_load);
8122 +
8123 +static void __exit __cold dpa_proxy_unload(void)
8124 +{
8125 + platform_driver_unregister(&dpa_proxy_driver);
8126 +
8127 + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
8128 + KBUILD_BASENAME".c", __func__);
8129 +}
8130 +module_exit(dpa_proxy_unload);
8131 --- /dev/null
8132 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c
8133 @@ -0,0 +1,1113 @@
8134 +/* Copyright 2012 Freescale Semiconductor Inc.
8135 + *
8136 + * Redistribution and use in source and binary forms, with or without
8137 + * modification, are permitted provided that the following conditions are met:
8138 + * * Redistributions of source code must retain the above copyright
8139 + * notice, this list of conditions and the following disclaimer.
8140 + * * Redistributions in binary form must reproduce the above copyright
8141 + * notice, this list of conditions and the following disclaimer in the
8142 + * documentation and/or other materials provided with the distribution.
8143 + * * Neither the name of Freescale Semiconductor nor the
8144 + * names of its contributors may be used to endorse or promote products
8145 + * derived from this software without specific prior written permission.
8146 + *
8147 + *
8148 + * ALTERNATIVELY, this software may be distributed under the terms of the
8149 + * GNU General Public License ("GPL") as published by the Free Software
8150 + * Foundation, either version 2 of that License or (at your option) any
8151 + * later version.
8152 + *
8153 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
8154 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
8155 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
8156 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
8157 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
8158 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
8159 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
8160 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
8161 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
8162 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
8163 + */
8164 +
8165 +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
8166 +#define pr_fmt(fmt) \
8167 + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
8168 + KBUILD_BASENAME".c", __LINE__, __func__
8169 +#else
8170 +#define pr_fmt(fmt) \
8171 + KBUILD_MODNAME ": " fmt
8172 +#endif
8173 +
8174 +#include <linux/init.h>
8175 +#include <linux/skbuff.h>
8176 +#include <linux/highmem.h>
8177 +#include <linux/fsl_bman.h>
8178 +
8179 +#include "dpaa_eth.h"
8180 +#include "dpaa_eth_common.h"
8181 +#ifdef CONFIG_FSL_DPAA_1588
8182 +#include "dpaa_1588.h"
8183 +#endif
8184 +#ifdef CONFIG_FSL_DPAA_CEETM
8185 +#include "dpaa_eth_ceetm.h"
8186 +#endif
8187 +
8188 +/* DMA map and add a page frag back into the bpool.
8189 + * @vaddr fragment must have been allocated with netdev_alloc_frag(),
8190 + * specifically for fitting into @dpa_bp.
8191 + */
8192 +static void dpa_bp_recycle_frag(struct dpa_bp *dpa_bp, unsigned long vaddr,
8193 + int *count_ptr)
8194 +{
8195 + struct bm_buffer bmb;
8196 + dma_addr_t addr;
8197 +
8198 + bmb.opaque = 0;
8199 +
8200 + addr = dma_map_single(dpa_bp->dev, (void *)vaddr, dpa_bp->size,
8201 + DMA_BIDIRECTIONAL);
8202 + if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
8203 + dev_err(dpa_bp->dev, "DMA mapping failed");
8204 + return;
8205 + }
8206 +
8207 + bm_buffer_set64(&bmb, addr);
8208 +
8209 + while (bman_release(dpa_bp->pool, &bmb, 1, 0))
8210 + cpu_relax();
8211 +
8212 + (*count_ptr)++;
8213 +}
8214 +
8215 +static int _dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp)
8216 +{
8217 + struct bm_buffer bmb[8];
8218 + void *new_buf;
8219 + dma_addr_t addr;
8220 + uint8_t i;
8221 + struct device *dev = dpa_bp->dev;
8222 + struct sk_buff *skb, **skbh;
8223 +
8224 + memset(bmb, 0, sizeof(struct bm_buffer) * 8);
8225 +
8226 + for (i = 0; i < 8; i++) {
8227 + /* We'll prepend the skb back-pointer; can't use the DPA
8228 + * priv space, because FMan will overwrite it (from offset 0)
8229 + * if it ends up being the second, third, etc. fragment
8230 + * in a S/G frame.
8231 + *
8232 + * We only need enough space to store a pointer, but allocate
8233 + * an entire cacheline for performance reasons.
8234 + */
8235 +#ifndef CONFIG_PPC
8236 + if (unlikely(dpaa_errata_a010022))
8237 + new_buf = page_address(alloc_page(GFP_ATOMIC));
8238 + else
8239 +#endif
8240 + new_buf = netdev_alloc_frag(SMP_CACHE_BYTES + DPA_BP_RAW_SIZE);
8241 +
8242 + if (unlikely(!new_buf))
8243 + goto netdev_alloc_failed;
8244 + new_buf = PTR_ALIGN(new_buf + SMP_CACHE_BYTES, SMP_CACHE_BYTES);
8245 +
8246 + skb = build_skb(new_buf, DPA_SKB_SIZE(dpa_bp->size) +
8247 + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
8248 + if (unlikely(!skb)) {
8249 + put_page(virt_to_head_page(new_buf));
8250 + goto build_skb_failed;
8251 + }
8252 + DPA_WRITE_SKB_PTR(skb, skbh, new_buf, -1);
8253 +
8254 + addr = dma_map_single(dev, new_buf,
8255 + dpa_bp->size, DMA_BIDIRECTIONAL);
8256 + if (unlikely(dma_mapping_error(dev, addr)))
8257 + goto dma_map_failed;
8258 +
8259 + bm_buffer_set64(&bmb[i], addr);
8260 + }
8261 +
8262 +release_bufs:
8263 + /* Release the buffers. In case bman is busy, keep trying
8264 + * until successful. bman_release() is guaranteed to succeed
8265 + * in a reasonable amount of time
8266 + */
8267 + while (unlikely(bman_release(dpa_bp->pool, bmb, i, 0)))
8268 + cpu_relax();
8269 + return i;
8270 +
8271 +dma_map_failed:
8272 + kfree_skb(skb);
8273 +
8274 +build_skb_failed:
8275 +netdev_alloc_failed:
8276 + net_err_ratelimited("dpa_bp_add_8_bufs() failed\n");
8277 + WARN_ONCE(1, "Memory allocation failure on Rx\n");
8278 +
8279 + bm_buffer_set64(&bmb[i], 0);
8280 + /* Avoid releasing a completely null buffer; bman_release() requires
8281 + * at least one buffer.
8282 + */
8283 + if (likely(i))
8284 + goto release_bufs;
8285 +
8286 + return 0;
8287 +}
8288 +
8289 +/* Cold path wrapper over _dpa_bp_add_8_bufs(). */
8290 +static void dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp, int cpu)
8291 +{
8292 + int *count_ptr = per_cpu_ptr(dpa_bp->percpu_count, cpu);
8293 + *count_ptr += _dpa_bp_add_8_bufs(dpa_bp);
8294 +}
8295 +
8296 +int dpa_bp_priv_seed(struct dpa_bp *dpa_bp)
8297 +{
8298 + int i;
8299 +
8300 + /* Give each CPU an allotment of "config_count" buffers */
8301 + for_each_possible_cpu(i) {
8302 + int j;
8303 +
8304 + /* Although we access another CPU's counters here
8305 + * we do it at boot time so it is safe
8306 + */
8307 + for (j = 0; j < dpa_bp->config_count; j += 8)
8308 + dpa_bp_add_8_bufs(dpa_bp, i);
8309 + }
8310 + return 0;
8311 +}
8312 +EXPORT_SYMBOL(dpa_bp_priv_seed);
8313 +
8314 +/* Add buffers/(pages) for Rx processing whenever bpool count falls below
8315 + * REFILL_THRESHOLD.
8316 + */
8317 +int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *countptr)
8318 +{
8319 + int count = *countptr;
8320 + int new_bufs;
8321 +
8322 + if (unlikely(count < CONFIG_FSL_DPAA_ETH_REFILL_THRESHOLD)) {
8323 + do {
8324 + new_bufs = _dpa_bp_add_8_bufs(dpa_bp);
8325 + if (unlikely(!new_bufs)) {
8326 + /* Avoid looping forever if we've temporarily
8327 + * run out of memory. We'll try again at the
8328 + * next NAPI cycle.
8329 + */
8330 + break;
8331 + }
8332 + count += new_bufs;
8333 + } while (count < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT);
8334 +
8335 + *countptr = count;
8336 + if (unlikely(count < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT))
8337 + return -ENOMEM;
8338 + }
8339 +
8340 + return 0;
8341 +}
8342 +EXPORT_SYMBOL(dpaa_eth_refill_bpools);
8343 +
8344 +/* Cleanup function for outgoing frame descriptors that were built on Tx path,
8345 + * either contiguous frames or scatter/gather ones.
8346 + * Skb freeing is not handled here.
8347 + *
8348 + * This function may be called on error paths in the Tx function, so guard
8349 + * against cases when not all fd relevant fields were filled in.
8350 + *
8351 + * Return the skb backpointer, since for S/G frames the buffer containing it
8352 + * gets freed here.
8353 + */
8354 +struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,
8355 + const struct qm_fd *fd)
8356 +{
8357 + const struct qm_sg_entry *sgt;
8358 + int i;
8359 + struct dpa_bp *dpa_bp = priv->dpa_bp;
8360 + dma_addr_t addr = qm_fd_addr(fd);
8361 + dma_addr_t sg_addr;
8362 + struct sk_buff **skbh;
8363 + struct sk_buff *skb = NULL;
8364 + const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
8365 + int nr_frags;
8366 + int sg_len;
8367 +
8368 + /* retrieve skb back pointer */
8369 + DPA_READ_SKB_PTR(skb, skbh, phys_to_virt(addr), 0);
8370 +
8371 + if (unlikely(fd->format == qm_fd_sg)) {
8372 + nr_frags = skb_shinfo(skb)->nr_frags;
8373 + dma_unmap_single(dpa_bp->dev, addr, dpa_fd_offset(fd) +
8374 + sizeof(struct qm_sg_entry) * (1 + nr_frags),
8375 + dma_dir);
8376 +
8377 + /* The sgt buffer has been allocated with netdev_alloc_frag(),
8378 + * it's from lowmem.
8379 + */
8380 + sgt = phys_to_virt(addr + dpa_fd_offset(fd));
8381 +#ifdef CONFIG_FSL_DPAA_1588
8382 + if (priv->tsu && priv->tsu->valid &&
8383 + priv->tsu->hwts_tx_en_ioctl)
8384 + dpa_ptp_store_txstamp(priv, skb, (void *)skbh);
8385 +#endif
8386 +#ifdef CONFIG_FSL_DPAA_TS
8387 + if (unlikely(priv->ts_tx_en &&
8388 + skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
8389 + struct skb_shared_hwtstamps shhwtstamps;
8390 +
8391 + dpa_get_ts(priv, TX, &shhwtstamps, (void *)skbh);
8392 + skb_tstamp_tx(skb, &shhwtstamps);
8393 + }
8394 +#endif /* CONFIG_FSL_DPAA_TS */
8395 +
8396 + /* sgt[0] is from lowmem, was dma_map_single()-ed */
8397 + sg_addr = qm_sg_addr(&sgt[0]);
8398 + sg_len = qm_sg_entry_get_len(&sgt[0]);
8399 + dma_unmap_single(dpa_bp->dev, sg_addr, sg_len, dma_dir);
8400 +
8401 + /* remaining pages were mapped with dma_map_page() */
8402 + for (i = 1; i <= nr_frags; i++) {
8403 + DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
8404 + sg_addr = qm_sg_addr(&sgt[i]);
8405 + sg_len = qm_sg_entry_get_len(&sgt[i]);
8406 + dma_unmap_page(dpa_bp->dev, sg_addr, sg_len, dma_dir);
8407 + }
8408 +
8409 + /* Free the page frag that we allocated on Tx */
8410 + put_page(virt_to_head_page(sgt));
8411 + } else {
8412 + dma_unmap_single(dpa_bp->dev, addr,
8413 + skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
8414 +#ifdef CONFIG_FSL_DPAA_TS
8415 + /* get the timestamp for non-SG frames */
8416 +#ifdef CONFIG_FSL_DPAA_1588
8417 + if (priv->tsu && priv->tsu->valid &&
8418 + priv->tsu->hwts_tx_en_ioctl)
8419 + dpa_ptp_store_txstamp(priv, skb, (void *)skbh);
8420 +#endif
8421 + if (unlikely(priv->ts_tx_en &&
8422 + skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
8423 + struct skb_shared_hwtstamps shhwtstamps;
8424 +
8425 + dpa_get_ts(priv, TX, &shhwtstamps, (void *)skbh);
8426 + skb_tstamp_tx(skb, &shhwtstamps);
8427 + }
8428 +#endif
8429 + }
8430 +
8431 + return skb;
8432 +}
8433 +EXPORT_SYMBOL(_dpa_cleanup_tx_fd);
8434 +
8435 +#ifndef CONFIG_FSL_DPAA_TS
8436 +bool dpa_skb_is_recyclable(struct sk_buff *skb)
8437 +{
8438 + /* No recycling possible if skb buffer is kmalloc'ed */
8439 + if (skb->head_frag == 0)
8440 + return false;
8441 +
8442 + /* or if it's an userspace buffer */
8443 + if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
8444 + return false;
8445 +
8446 + /* or if it's cloned or shared */
8447 + if (skb_shared(skb) || skb_cloned(skb) ||
8448 + skb->fclone != SKB_FCLONE_UNAVAILABLE)
8449 + return false;
8450 +
8451 + return true;
8452 +}
8453 +EXPORT_SYMBOL(dpa_skb_is_recyclable);
8454 +
8455 +bool dpa_buf_is_recyclable(struct sk_buff *skb,
8456 + uint32_t min_size,
8457 + uint16_t min_offset,
8458 + unsigned char **new_buf_start)
8459 +{
8460 + unsigned char *new;
8461 +
8462 + /* In order to recycle a buffer, the following conditions must be met:
8463 + * - buffer size no less than the buffer pool size
8464 + * - buffer size no higher than an upper limit (to avoid moving too much
8465 + * system memory to the buffer pools)
8466 + * - buffer address aligned to cacheline bytes
8467 + * - offset of data from start of buffer no lower than a minimum value
8468 + * - offset of data from start of buffer no higher than a maximum value
8469 + */
8470 + new = min(skb_end_pointer(skb) - min_size, skb->data - min_offset);
8471 +
8472 + /* left align to the nearest cacheline */
8473 + new = (unsigned char *)((unsigned long)new & ~(SMP_CACHE_BYTES - 1));
8474 +
8475 + if (likely(new >= skb->head &&
8476 + new >= (skb->data - DPA_MAX_FD_OFFSET) &&
8477 + skb_end_pointer(skb) - new <= DPA_RECYCLE_MAX_SIZE)) {
8478 + *new_buf_start = new;
8479 + return true;
8480 + }
8481 +
8482 + return false;
8483 +}
8484 +EXPORT_SYMBOL(dpa_buf_is_recyclable);
8485 +#endif
8486 +
8487 +/* Build a linear skb around the received buffer.
8488 + * We are guaranteed there is enough room at the end of the data buffer to
8489 + * accommodate the shared info area of the skb.
8490 + */
8491 +static struct sk_buff *__hot contig_fd_to_skb(const struct dpa_priv_s *priv,
8492 + const struct qm_fd *fd, int *use_gro)
8493 +{
8494 + dma_addr_t addr = qm_fd_addr(fd);
8495 + ssize_t fd_off = dpa_fd_offset(fd);
8496 + void *vaddr;
8497 + const fm_prs_result_t *parse_results;
8498 + struct sk_buff *skb = NULL, **skbh;
8499 +
8500 + vaddr = phys_to_virt(addr);
8501 + DPA_BUG_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
8502 +
8503 + /* Retrieve the skb and adjust data and tail pointers, to make sure
8504 + * forwarded skbs will have enough space on Tx if extra headers
8505 + * are added.
8506 + */
8507 + DPA_READ_SKB_PTR(skb, skbh, vaddr, -1);
8508 +
8509 +#ifdef CONFIG_FSL_DPAA_ETH_JUMBO_FRAME
8510 + /* When using jumbo Rx buffers, we risk having frames dropped due to
8511 + * the socket backlog reaching its maximum allowed size.
8512 + * Use the frame length for the skb truesize instead of the buffer
8513 + * size, as this is the size of the data that actually gets copied to
8514 + * userspace.
8515 + * The stack may increase the payload. In this case, it will want to
8516 + * warn us that the frame length is larger than the truesize. We
8517 + * bypass the warning.
8518 + */
8519 +#ifndef CONFIG_PPC
8520 + /* We do not support Jumbo frames on LS1043 and thus we edit
8521 + * the skb truesize only when the 4k errata is not present.
8522 + */
8523 + if (likely(!dpaa_errata_a010022))
8524 +#endif
8525 + skb->truesize = SKB_TRUESIZE(dpa_fd_length(fd));
8526 +#endif
8527 +
8528 + DPA_BUG_ON(fd_off != priv->rx_headroom);
8529 + skb_reserve(skb, fd_off);
8530 + skb_put(skb, dpa_fd_length(fd));
8531 +
8532 + /* Peek at the parse results for csum validation */
8533 + parse_results = (const fm_prs_result_t *)(vaddr +
8534 + DPA_RX_PRIV_DATA_SIZE);
8535 + _dpa_process_parse_results(parse_results, fd, skb, use_gro);
8536 +
8537 +#ifdef CONFIG_FSL_DPAA_1588
8538 + if (priv->tsu && priv->tsu->valid && priv->tsu->hwts_rx_en_ioctl)
8539 + dpa_ptp_store_rxstamp(priv, skb, vaddr);
8540 +#endif
8541 +#ifdef CONFIG_FSL_DPAA_TS
8542 + if (priv->ts_rx_en)
8543 + dpa_get_ts(priv, RX, skb_hwtstamps(skb), vaddr);
8544 +#endif /* CONFIG_FSL_DPAA_TS */
8545 +
8546 + return skb;
8547 +}
8548 +
8549 +
8550 +/* Build an skb with the data of the first S/G entry in the linear portion and
8551 + * the rest of the frame as skb fragments.
8552 + *
8553 + * The page fragment holding the S/G Table is recycled here.
8554 + */
8555 +static struct sk_buff *__hot sg_fd_to_skb(const struct dpa_priv_s *priv,
8556 + const struct qm_fd *fd, int *use_gro,
8557 + int *count_ptr)
8558 +{
8559 + const struct qm_sg_entry *sgt;
8560 + dma_addr_t addr = qm_fd_addr(fd);
8561 + ssize_t fd_off = dpa_fd_offset(fd);
8562 + dma_addr_t sg_addr;
8563 + void *vaddr, *sg_vaddr;
8564 + struct dpa_bp *dpa_bp;
8565 + struct page *page, *head_page;
8566 + int frag_offset, frag_len;
8567 + int page_offset;
8568 + int i;
8569 + const fm_prs_result_t *parse_results;
8570 + struct sk_buff *skb = NULL, *skb_tmp, **skbh;
8571 +
8572 + vaddr = phys_to_virt(addr);
8573 + DPA_BUG_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
8574 +
8575 + dpa_bp = priv->dpa_bp;
8576 + /* Iterate through the SGT entries and add data buffers to the skb */
8577 + sgt = vaddr + fd_off;
8578 + for (i = 0; i < DPA_SGT_MAX_ENTRIES; i++) {
8579 + /* Extension bit is not supported */
8580 + DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
8581 +
8582 + /* We use a single global Rx pool */
8583 + DPA_BUG_ON(dpa_bp !=
8584 + dpa_bpid2pool(qm_sg_entry_get_bpid(&sgt[i])));
8585 +
8586 + sg_addr = qm_sg_addr(&sgt[i]);
8587 + sg_vaddr = phys_to_virt(sg_addr);
8588 + DPA_BUG_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
8589 + SMP_CACHE_BYTES));
8590 +
8591 + dma_unmap_single(dpa_bp->dev, sg_addr, dpa_bp->size,
8592 + DMA_BIDIRECTIONAL);
8593 + if (i == 0) {
8594 + DPA_READ_SKB_PTR(skb, skbh, sg_vaddr, -1);
8595 + DPA_BUG_ON(skb->head != sg_vaddr);
8596 +#ifdef CONFIG_FSL_DPAA_1588
8597 + if (priv->tsu && priv->tsu->valid &&
8598 + priv->tsu->hwts_rx_en_ioctl)
8599 + dpa_ptp_store_rxstamp(priv, skb, vaddr);
8600 +#endif
8601 +#ifdef CONFIG_FSL_DPAA_TS
8602 + if (priv->ts_rx_en)
8603 + dpa_get_ts(priv, RX, skb_hwtstamps(skb), vaddr);
8604 +#endif /* CONFIG_FSL_DPAA_TS */
8605 +
8606 + /* In the case of a SG frame, FMan stores the Internal
8607 + * Context in the buffer containing the sgt.
8608 + * Inspect the parse results before anything else.
8609 + */
8610 + parse_results = (const fm_prs_result_t *)(vaddr +
8611 + DPA_RX_PRIV_DATA_SIZE);
8612 + _dpa_process_parse_results(parse_results, fd, skb,
8613 + use_gro);
8614 +
8615 + /* Make sure forwarded skbs will have enough space
8616 + * on Tx, if extra headers are added.
8617 + */
8618 + DPA_BUG_ON(fd_off != priv->rx_headroom);
8619 + skb_reserve(skb, fd_off);
8620 + skb_put(skb, qm_sg_entry_get_len(&sgt[i]));
8621 + } else {
8622 + /* Not the first S/G entry; all data from buffer will
8623 + * be added in an skb fragment; fragment index is offset
8624 + * by one since first S/G entry was incorporated in the
8625 + * linear part of the skb.
8626 + *
8627 + * Caution: 'page' may be a tail page.
8628 + */
8629 + DPA_READ_SKB_PTR(skb_tmp, skbh, sg_vaddr, -1);
8630 + page = virt_to_page(sg_vaddr);
8631 + head_page = virt_to_head_page(sg_vaddr);
8632 +
8633 + /* Free (only) the skbuff shell because its data buffer
8634 + * is already a frag in the main skb.
8635 + */
8636 + get_page(head_page);
8637 + dev_kfree_skb(skb_tmp);
8638 +
8639 + /* Compute offset in (possibly tail) page */
8640 + page_offset = ((unsigned long)sg_vaddr &
8641 + (PAGE_SIZE - 1)) +
8642 + (page_address(page) - page_address(head_page));
8643 + /* page_offset only refers to the beginning of sgt[i];
8644 + * but the buffer itself may have an internal offset.
8645 + */
8646 + frag_offset = qm_sg_entry_get_offset(&sgt[i]) +
8647 + page_offset;
8648 + frag_len = qm_sg_entry_get_len(&sgt[i]);
8649 + /* skb_add_rx_frag() does no checking on the page; if
8650 + * we pass it a tail page, we'll end up with
8651 + * bad page accounting and eventually with segafults.
8652 + */
8653 + skb_add_rx_frag(skb, i - 1, head_page, frag_offset,
8654 + frag_len, dpa_bp->size);
8655 + }
8656 + /* Update the pool count for the current {cpu x bpool} */
8657 + (*count_ptr)--;
8658 +
8659 + if (qm_sg_entry_get_final(&sgt[i]))
8660 + break;
8661 + }
8662 + WARN_ONCE(i == DPA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
8663 +
8664 + /* recycle the SGT fragment */
8665 + DPA_BUG_ON(dpa_bp != dpa_bpid2pool(fd->bpid));
8666 + dpa_bp_recycle_frag(dpa_bp, (unsigned long)vaddr, count_ptr);
8667 + return skb;
8668 +}
8669 +
8670 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
8671 +static inline int dpa_skb_loop(const struct dpa_priv_s *priv,
8672 + struct sk_buff *skb)
8673 +{
8674 + if (unlikely(priv->loop_to < 0))
8675 + return 0; /* loop disabled by default */
8676 +
8677 + skb_push(skb, ETH_HLEN); /* compensate for eth_type_trans */
8678 + dpa_tx(skb, dpa_loop_netdevs[priv->loop_to]);
8679 +
8680 + return 1; /* Frame Tx on the selected interface */
8681 +}
8682 +#endif
8683 +
8684 +void __hot _dpa_rx(struct net_device *net_dev,
8685 + struct qman_portal *portal,
8686 + const struct dpa_priv_s *priv,
8687 + struct dpa_percpu_priv_s *percpu_priv,
8688 + const struct qm_fd *fd,
8689 + u32 fqid,
8690 + int *count_ptr)
8691 +{
8692 + struct dpa_bp *dpa_bp;
8693 + struct sk_buff *skb;
8694 + dma_addr_t addr = qm_fd_addr(fd);
8695 + u32 fd_status = fd->status;
8696 + unsigned int skb_len;
8697 + struct rtnl_link_stats64 *percpu_stats = &percpu_priv->stats;
8698 + int use_gro = net_dev->features & NETIF_F_GRO;
8699 +
8700 + if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) {
8701 + if (netif_msg_hw(priv) && net_ratelimit())
8702 + netdev_warn(net_dev, "FD status = 0x%08x\n",
8703 + fd_status & FM_FD_STAT_RX_ERRORS);
8704 +
8705 + percpu_stats->rx_errors++;
8706 + goto _release_frame;
8707 + }
8708 +
8709 + dpa_bp = priv->dpa_bp;
8710 + DPA_BUG_ON(dpa_bp != dpa_bpid2pool(fd->bpid));
8711 +
8712 + /* prefetch the first 64 bytes of the frame or the SGT start */
8713 + dma_unmap_single(dpa_bp->dev, addr, dpa_bp->size, DMA_BIDIRECTIONAL);
8714 + prefetch(phys_to_virt(addr) + dpa_fd_offset(fd));
8715 +
8716 + /* The only FD types that we may receive are contig and S/G */
8717 + DPA_BUG_ON((fd->format != qm_fd_contig) && (fd->format != qm_fd_sg));
8718 +
8719 + if (likely(fd->format == qm_fd_contig)) {
8720 +#ifdef CONFIG_FSL_DPAA_HOOKS
8721 + /* Execute the Rx processing hook, if it exists. */
8722 + if (dpaa_eth_hooks.rx_default &&
8723 + dpaa_eth_hooks.rx_default((void *)fd, net_dev,
8724 + fqid) == DPAA_ETH_STOLEN) {
8725 + /* won't count the rx bytes in */
8726 + return;
8727 + }
8728 +#endif
8729 + skb = contig_fd_to_skb(priv, fd, &use_gro);
8730 + } else {
8731 + skb = sg_fd_to_skb(priv, fd, &use_gro, count_ptr);
8732 + percpu_priv->rx_sg++;
8733 + }
8734 +
8735 + /* Account for either the contig buffer or the SGT buffer (depending on
8736 + * which case we were in) having been removed from the pool.
8737 + */
8738 + (*count_ptr)--;
8739 + skb->protocol = eth_type_trans(skb, net_dev);
8740 +
8741 + /* IP Reassembled frames are allowed to be larger than MTU */
8742 + if (unlikely(dpa_check_rx_mtu(skb, net_dev->mtu) &&
8743 + !(fd_status & FM_FD_IPR))) {
8744 + percpu_stats->rx_dropped++;
8745 + goto drop_bad_frame;
8746 + }
8747 +
8748 + skb_len = skb->len;
8749 +
8750 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
8751 + if (dpa_skb_loop(priv, skb)) {
8752 + percpu_stats->rx_packets++;
8753 + percpu_stats->rx_bytes += skb_len;
8754 + return;
8755 + }
8756 +#endif
8757 +
8758 + if (use_gro) {
8759 + gro_result_t gro_result;
8760 + const struct qman_portal_config *pc =
8761 + qman_p_get_portal_config(portal);
8762 + struct dpa_napi_portal *np = &percpu_priv->np[pc->index];
8763 +
8764 + np->p = portal;
8765 + gro_result = napi_gro_receive(&np->napi, skb);
8766 + /* If frame is dropped by the stack, rx_dropped counter is
8767 + * incremented automatically, so no need for us to update it
8768 + */
8769 + if (unlikely(gro_result == GRO_DROP))
8770 + goto packet_dropped;
8771 + } else if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
8772 + goto packet_dropped;
8773 +
8774 + percpu_stats->rx_packets++;
8775 + percpu_stats->rx_bytes += skb_len;
8776 +
8777 +packet_dropped:
8778 + return;
8779 +
8780 +drop_bad_frame:
8781 + dev_kfree_skb(skb);
8782 + return;
8783 +
8784 +_release_frame:
8785 + dpa_fd_release(net_dev, fd);
8786 +}
8787 +
8788 +int __hot skb_to_contig_fd(struct dpa_priv_s *priv,
8789 + struct sk_buff *skb, struct qm_fd *fd,
8790 + int *count_ptr, int *offset)
8791 +{
8792 + struct sk_buff **skbh;
8793 + dma_addr_t addr;
8794 + struct dpa_bp *dpa_bp = priv->dpa_bp;
8795 + struct net_device *net_dev = priv->net_dev;
8796 + int err;
8797 + enum dma_data_direction dma_dir;
8798 + unsigned char *buffer_start;
8799 + int dma_map_size;
8800 +
8801 +#ifndef CONFIG_FSL_DPAA_TS
8802 + /* Check recycling conditions; only if timestamp support is not
8803 + * enabled, otherwise we need the fd back on tx confirmation
8804 + */
8805 +
8806 + /* We can recycle the buffer if:
8807 + * - the pool is not full
8808 + * - the buffer meets the skb recycling conditions
8809 + * - the buffer meets our own (size, offset, align) conditions
8810 + */
8811 + if (likely((*count_ptr < dpa_bp->target_count) &&
8812 + dpa_skb_is_recyclable(skb) &&
8813 + dpa_buf_is_recyclable(skb, dpa_bp->size,
8814 + priv->tx_headroom, &buffer_start))) {
8815 + /* Buffer is recyclable; use the new start address
8816 + * and set fd parameters and DMA mapping direction
8817 + */
8818 + fd->bpid = dpa_bp->bpid;
8819 + DPA_BUG_ON(skb->data - buffer_start > DPA_MAX_FD_OFFSET);
8820 + fd->offset = (uint16_t)(skb->data - buffer_start);
8821 + dma_dir = DMA_BIDIRECTIONAL;
8822 + dma_map_size = dpa_bp->size;
8823 +
8824 + DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, -1);
8825 + *offset = skb_headroom(skb) - fd->offset;
8826 + } else
8827 +#endif
8828 + {
8829 + /* Not recyclable.
8830 + * We are guaranteed to have at least tx_headroom bytes
8831 + * available, so just use that for offset.
8832 + */
8833 + fd->bpid = 0xff;
8834 + buffer_start = skb->data - priv->tx_headroom;
8835 + fd->offset = priv->tx_headroom;
8836 + dma_dir = DMA_TO_DEVICE;
8837 + dma_map_size = skb_tail_pointer(skb) - buffer_start;
8838 +
8839 + /* The buffer will be Tx-confirmed, but the TxConf cb must
8840 + * necessarily look at our Tx private data to retrieve the
8841 + * skbuff. (In short: can't use DPA_WRITE_SKB_PTR() here.)
8842 + */
8843 + DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, 0);
8844 + }
8845 +
8846 + /* Enable L3/L4 hardware checksum computation.
8847 + *
8848 + * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
8849 + * need to write into the skb.
8850 + */
8851 + err = dpa_enable_tx_csum(priv, skb, fd,
8852 + ((char *)skbh) + DPA_TX_PRIV_DATA_SIZE);
8853 + if (unlikely(err < 0)) {
8854 + if (netif_msg_tx_err(priv) && net_ratelimit())
8855 + netdev_err(net_dev, "HW csum error: %d\n", err);
8856 + return err;
8857 + }
8858 +
8859 + /* Fill in the rest of the FD fields */
8860 + fd->format = qm_fd_contig;
8861 + fd->length20 = skb->len;
8862 + fd->cmd |= FM_FD_CMD_FCO;
8863 +
8864 + /* Map the entire buffer size that may be seen by FMan, but no more */
8865 + addr = dma_map_single(dpa_bp->dev, skbh, dma_map_size, dma_dir);
8866 + if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
8867 + if (netif_msg_tx_err(priv) && net_ratelimit())
8868 + netdev_err(net_dev, "dma_map_single() failed\n");
8869 + return -EINVAL;
8870 + }
8871 + qm_fd_addr_set64(fd, addr);
8872 +
8873 + return 0;
8874 +}
8875 +EXPORT_SYMBOL(skb_to_contig_fd);
8876 +
8877 +#ifndef CONFIG_PPC
8878 +struct sk_buff *split_skb_at_4k_boundaries(struct sk_buff *skb)
8879 +{
8880 + unsigned int length, nr_frags, moved_len = 0;
8881 + u64 page_start;
8882 + struct page *page;
8883 + skb_frag_t *frag;
8884 + int i = 0, j = 0;
8885 +
8886 + /* make sure skb is not shared */
8887 + skb = skb_share_check(skb, GFP_ATOMIC);
8888 + if (!skb)
8889 + return NULL;
8890 +
8891 + nr_frags = skb_shinfo(skb)->nr_frags;
8892 + page_start = (u64)skb->data;
8893 +
8894 + /* split the linear part at the first 4k boundary and create one (big)
8895 + * fragment with the rest
8896 + */
8897 + if (HAS_DMA_ISSUE(skb->data, skb_headlen(skb))) {
8898 + /* we'll add one more frag, make sure there's room */
8899 + if (nr_frags + 1 > DPA_SGT_MAX_ENTRIES)
8900 + return NULL;
8901 +
8902 + /* next page boundary */
8903 + page_start = (page_start + 0x1000) & ~0xFFF;
8904 + page = virt_to_page(page_start);
8905 +
8906 + /* move the rest of fragments to make room for a new one at j */
8907 + for (i = nr_frags - 1; i >= j; i--)
8908 + skb_shinfo(skb)->frags[i + 1] = skb_shinfo(skb)->frags[i];
8909 +
8910 + /* move length bytes to a paged fragment at j */
8911 + length = min((u64)0x1000,
8912 + (u64)skb->data + skb_headlen(skb) - page_start);
8913 + skb->data_len += length;
8914 + moved_len += length;
8915 + skb_fill_page_desc(skb, j++, page, 0, length);
8916 + get_page(page);
8917 + skb_shinfo(skb)->nr_frags = ++nr_frags;
8918 + }
8919 + /* adjust the tail pointer */
8920 + skb->tail -= moved_len;
8921 + j = 0;
8922 +
8923 + /* split any paged fragment that crosses a 4K boundary */
8924 + while (j < nr_frags) {
8925 + frag = &skb_shinfo(skb)->frags[j];
8926 +
8927 + /* if there is a 4K boundary between the fragment's offset and end */
8928 + if (HAS_DMA_ISSUE(frag->page_offset, frag->size)) {
8929 + /* we'll add one more frag, make sure there's room */
8930 + if (nr_frags + 1 > DPA_SGT_MAX_ENTRIES)
8931 + return NULL;
8932 +
8933 + /* new page boundary */
8934 + page_start = (u64)page_address(skb_frag_page(frag)) +
8935 + frag->page_offset + 0x1000;
8936 + page_start = (u64)page_start & ~0xFFF;
8937 + page = virt_to_page(page_start);
8938 +
8939 + /* move the rest of fragments to make room for a new one at j+1 */
8940 + for (i = nr_frags - 1; i > j; i--)
8941 + skb_shinfo(skb)->frags[i + 1] =
8942 + skb_shinfo(skb)->frags[i];
8943 +
8944 + /* move length bytes to a new paged fragment at j+1 */
8945 + length = (u64)page_address(skb_frag_page(frag)) +
8946 + frag->page_offset + frag->size - page_start;
8947 + frag->size -= length;
8948 + skb_fill_page_desc(skb, j + 1, page, 0, length);
8949 + get_page(page);
8950 + skb_shinfo(skb)->nr_frags = ++nr_frags;
8951 + }
8952 +
8953 + /* move to next frag */
8954 + j++;
8955 + }
8956 +
8957 + return skb;
8958 +}
8959 +#endif
8960 +
8961 +int __hot skb_to_sg_fd(struct dpa_priv_s *priv,
8962 + struct sk_buff *skb, struct qm_fd *fd)
8963 +{
8964 + struct dpa_bp *dpa_bp = priv->dpa_bp;
8965 + dma_addr_t addr;
8966 + dma_addr_t sg_addr;
8967 + struct sk_buff **skbh;
8968 + struct net_device *net_dev = priv->net_dev;
8969 + int sg_len, sgt_size;
8970 + int err;
8971 +
8972 + struct qm_sg_entry *sgt;
8973 + void *sgt_buf;
8974 + skb_frag_t *frag;
8975 + int i = 0, j = 0;
8976 + int nr_frags;
8977 + const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
8978 +
8979 + nr_frags = skb_shinfo(skb)->nr_frags;
8980 + fd->format = qm_fd_sg;
8981 +
8982 + sgt_size = sizeof(struct qm_sg_entry) * (1 + nr_frags);
8983 +
8984 + /* Get a page frag to store the SGTable, or a full page if the errata
8985 + * is in place and we need to avoid crossing a 4k boundary.
8986 + */
8987 +#ifndef CONFIG_PPC
8988 + if (unlikely(dpaa_errata_a010022))
8989 + sgt_buf = page_address(alloc_page(GFP_ATOMIC));
8990 + else
8991 +#endif
8992 + sgt_buf = netdev_alloc_frag(priv->tx_headroom + sgt_size);
8993 + if (unlikely(!sgt_buf)) {
8994 + dev_err(dpa_bp->dev, "netdev_alloc_frag() failed\n");
8995 + return -ENOMEM;
8996 + }
8997 +
8998 + /* it seems that the memory allocator does not zero the allocated mem */
8999 + memset(sgt_buf, 0, priv->tx_headroom + sgt_size);
9000 +
9001 + /* Enable L3/L4 hardware checksum computation.
9002 + *
9003 + * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
9004 + * need to write into the skb.
9005 + */
9006 + err = dpa_enable_tx_csum(priv, skb, fd,
9007 + sgt_buf + DPA_TX_PRIV_DATA_SIZE);
9008 + if (unlikely(err < 0)) {
9009 + if (netif_msg_tx_err(priv) && net_ratelimit())
9010 + netdev_err(net_dev, "HW csum error: %d\n", err);
9011 + goto csum_failed;
9012 + }
9013 +
9014 + /* Assign the data from skb->data to the first SG list entry */
9015 + sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
9016 + sg_len = skb_headlen(skb);
9017 + qm_sg_entry_set_bpid(&sgt[0], 0xff);
9018 + qm_sg_entry_set_offset(&sgt[0], 0);
9019 + qm_sg_entry_set_len(&sgt[0], sg_len);
9020 + qm_sg_entry_set_ext(&sgt[0], 0);
9021 + qm_sg_entry_set_final(&sgt[0], 0);
9022 +
9023 + addr = dma_map_single(dpa_bp->dev, skb->data, sg_len, dma_dir);
9024 + if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
9025 + dev_err(dpa_bp->dev, "DMA mapping failed");
9026 + err = -EINVAL;
9027 + goto sg0_map_failed;
9028 + }
9029 +
9030 + qm_sg_entry_set64(&sgt[0], addr);
9031 +
9032 + /* populate the rest of SGT entries */
9033 + for (i = 1; i <= nr_frags; i++) {
9034 + frag = &skb_shinfo(skb)->frags[i - 1];
9035 + qm_sg_entry_set_bpid(&sgt[i], 0xff);
9036 + qm_sg_entry_set_offset(&sgt[i], 0);
9037 + qm_sg_entry_set_len(&sgt[i], frag->size);
9038 + qm_sg_entry_set_ext(&sgt[i], 0);
9039 +
9040 + if (i == nr_frags)
9041 + qm_sg_entry_set_final(&sgt[i], 1);
9042 + else
9043 + qm_sg_entry_set_final(&sgt[i], 0);
9044 +
9045 + DPA_BUG_ON(!skb_frag_page(frag));
9046 + addr = skb_frag_dma_map(dpa_bp->dev, frag, 0, frag->size,
9047 + dma_dir);
9048 + if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
9049 + dev_err(dpa_bp->dev, "DMA mapping failed");
9050 + err = -EINVAL;
9051 + goto sg_map_failed;
9052 + }
9053 +
9054 + /* keep the offset in the address */
9055 + qm_sg_entry_set64(&sgt[i], addr);
9056 + }
9057 +
9058 + fd->length20 = skb->len;
9059 + fd->offset = priv->tx_headroom;
9060 +
9061 + /* DMA map the SGT page */
9062 + DPA_WRITE_SKB_PTR(skb, skbh, sgt_buf, 0);
9063 + addr = dma_map_single(dpa_bp->dev, sgt_buf,
9064 + priv->tx_headroom + sgt_size,
9065 + dma_dir);
9066 +
9067 + if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
9068 + dev_err(dpa_bp->dev, "DMA mapping failed");
9069 + err = -EINVAL;
9070 + goto sgt_map_failed;
9071 + }
9072 +
9073 + qm_fd_addr_set64(fd, addr);
9074 + fd->bpid = 0xff;
9075 + fd->cmd |= FM_FD_CMD_FCO;
9076 +
9077 + return 0;
9078 +
9079 +sgt_map_failed:
9080 +sg_map_failed:
9081 + for (j = 0; j < i; j++) {
9082 + sg_addr = qm_sg_addr(&sgt[j]);
9083 + dma_unmap_page(dpa_bp->dev, sg_addr,
9084 + qm_sg_entry_get_len(&sgt[j]), dma_dir);
9085 + }
9086 +sg0_map_failed:
9087 +csum_failed:
9088 + put_page(virt_to_head_page(sgt_buf));
9089 +
9090 + return err;
9091 +}
9092 +EXPORT_SYMBOL(skb_to_sg_fd);
9093 +
9094 +int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev)
9095 +{
9096 + struct dpa_priv_s *priv;
9097 + const int queue_mapping = dpa_get_queue_mapping(skb);
9098 + struct qman_fq *egress_fq, *conf_fq;
9099 +
9100 +#ifdef CONFIG_FSL_DPAA_HOOKS
9101 + /* If there is a Tx hook, run it. */
9102 + if (dpaa_eth_hooks.tx &&
9103 + dpaa_eth_hooks.tx(skb, net_dev) == DPAA_ETH_STOLEN)
9104 + /* won't update any Tx stats */
9105 + return NETDEV_TX_OK;
9106 +#endif
9107 +
9108 + priv = netdev_priv(net_dev);
9109 +
9110 +#ifdef CONFIG_FSL_DPAA_CEETM
9111 + if (priv->ceetm_en)
9112 + return ceetm_tx(skb, net_dev);
9113 +#endif
9114 +
9115 + egress_fq = priv->egress_fqs[queue_mapping];
9116 + conf_fq = priv->conf_fqs[queue_mapping];
9117 +
9118 + return dpa_tx_extended(skb, net_dev, egress_fq, conf_fq);
9119 +}
9120 +
9121 +int __hot dpa_tx_extended(struct sk_buff *skb, struct net_device *net_dev,
9122 + struct qman_fq *egress_fq, struct qman_fq *conf_fq)
9123 +{
9124 + struct dpa_priv_s *priv;
9125 + struct qm_fd fd;
9126 + struct dpa_percpu_priv_s *percpu_priv;
9127 + struct rtnl_link_stats64 *percpu_stats;
9128 + int err = 0;
9129 + const bool nonlinear = skb_is_nonlinear(skb);
9130 + int *countptr, offset = 0;
9131 +
9132 + priv = netdev_priv(net_dev);
9133 + /* Non-migratable context, safe to use raw_cpu_ptr */
9134 + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
9135 + percpu_stats = &percpu_priv->stats;
9136 + countptr = raw_cpu_ptr(priv->dpa_bp->percpu_count);
9137 +
9138 + clear_fd(&fd);
9139 +
9140 +#ifdef CONFIG_FSL_DPAA_1588
9141 + if (priv->tsu && priv->tsu->valid && priv->tsu->hwts_tx_en_ioctl)
9142 + fd.cmd |= FM_FD_CMD_UPD;
9143 +#endif
9144 +#ifdef CONFIG_FSL_DPAA_TS
9145 + if (unlikely(priv->ts_tx_en &&
9146 + skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
9147 + fd.cmd |= FM_FD_CMD_UPD;
9148 + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
9149 +#endif /* CONFIG_FSL_DPAA_TS */
9150 +
9151 +#ifndef CONFIG_PPC
9152 + if (unlikely(dpaa_errata_a010022)) {
9153 + skb = split_skb_at_4k_boundaries(skb);
9154 + if (!skb)
9155 + goto skb_to_fd_failed;
9156 + }
9157 +#endif
9158 +
9159 + /* MAX_SKB_FRAGS is larger than our DPA_SGT_MAX_ENTRIES; make sure
9160 + * we don't feed FMan with more fragments than it supports.
9161 + * Btw, we're using the first sgt entry to store the linear part of
9162 + * the skb, so we're one extra frag short.
9163 + */
9164 + if (nonlinear &&
9165 + likely(skb_shinfo(skb)->nr_frags < DPA_SGT_MAX_ENTRIES)) {
9166 + /* Just create a S/G fd based on the skb */
9167 + err = skb_to_sg_fd(priv, skb, &fd);
9168 + percpu_priv->tx_frag_skbuffs++;
9169 + } else {
9170 + /* Make sure we have enough headroom to accommodate private
9171 + * data, parse results, etc. Normally this shouldn't happen if
9172 + * we're here via the standard kernel stack.
9173 + */
9174 + if (unlikely(skb_headroom(skb) < priv->tx_headroom)) {
9175 + struct sk_buff *skb_new;
9176 +
9177 + skb_new = skb_realloc_headroom(skb, priv->tx_headroom);
9178 + if (unlikely(!skb_new)) {
9179 + dev_kfree_skb(skb);
9180 + percpu_stats->tx_errors++;
9181 + return NETDEV_TX_OK;
9182 + }
9183 + dev_kfree_skb(skb);
9184 + skb = skb_new;
9185 + }
9186 +
9187 + /* We're going to store the skb backpointer at the beginning
9188 + * of the data buffer, so we need a privately owned skb
9189 + */
9190 +
9191 + /* Code borrowed from skb_unshare(). */
9192 + if (skb_cloned(skb)) {
9193 + struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
9194 + kfree_skb(skb);
9195 + skb = nskb;
9196 + /* skb_copy() has now linearized the skbuff. */
9197 + } else if (unlikely(nonlinear)) {
9198 + /* We are here because the egress skb contains
9199 + * more fragments than we support. In this case,
9200 + * we have no choice but to linearize it ourselves.
9201 + */
9202 + err = __skb_linearize(skb);
9203 + }
9204 + if (unlikely(!skb || err < 0))
9205 + /* Common out-of-memory error path */
9206 + goto enomem;
9207 +
9208 + err = skb_to_contig_fd(priv, skb, &fd, countptr, &offset);
9209 + }
9210 + if (unlikely(err < 0))
9211 + goto skb_to_fd_failed;
9212 +
9213 + if (fd.bpid != 0xff) {
9214 + skb_recycle(skb);
9215 + /* skb_recycle() reserves NET_SKB_PAD as skb headroom,
9216 + * but we need the skb to look as if returned by build_skb().
9217 + * We need to manually adjust the tailptr as well.
9218 + */
9219 + skb->data = skb->head + offset;
9220 + skb_reset_tail_pointer(skb);
9221 +
9222 + (*countptr)++;
9223 + percpu_priv->tx_returned++;
9224 + }
9225 +
9226 + if (unlikely(dpa_xmit(priv, percpu_stats, &fd, egress_fq, conf_fq) < 0))
9227 + goto xmit_failed;
9228 +
9229 + return NETDEV_TX_OK;
9230 +
9231 +xmit_failed:
9232 + if (fd.bpid != 0xff) {
9233 + (*countptr)--;
9234 + percpu_priv->tx_returned--;
9235 + dpa_fd_release(net_dev, &fd);
9236 + percpu_stats->tx_errors++;
9237 + return NETDEV_TX_OK;
9238 + }
9239 + _dpa_cleanup_tx_fd(priv, &fd);
9240 +skb_to_fd_failed:
9241 +enomem:
9242 + percpu_stats->tx_errors++;
9243 + dev_kfree_skb(skb);
9244 + return NETDEV_TX_OK;
9245 +}
9246 +EXPORT_SYMBOL(dpa_tx_extended);
9247 --- /dev/null
9248 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c
9249 @@ -0,0 +1,278 @@
9250 +/* Copyright 2008-2012 Freescale Semiconductor Inc.
9251 + *
9252 + * Redistribution and use in source and binary forms, with or without
9253 + * modification, are permitted provided that the following conditions are met:
9254 + * * Redistributions of source code must retain the above copyright
9255 + * notice, this list of conditions and the following disclaimer.
9256 + * * Redistributions in binary form must reproduce the above copyright
9257 + * notice, this list of conditions and the following disclaimer in the
9258 + * documentation and/or other materials provided with the distribution.
9259 + * * Neither the name of Freescale Semiconductor nor the
9260 + * names of its contributors may be used to endorse or promote products
9261 + * derived from this software without specific prior written permission.
9262 + *
9263 + *
9264 + * ALTERNATIVELY, this software may be distributed under the terms of the
9265 + * GNU General Public License ("GPL") as published by the Free Software
9266 + * Foundation, either version 2 of that License or (at your option) any
9267 + * later version.
9268 + *
9269 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
9270 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
9271 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
9272 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
9273 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
9274 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
9275 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
9276 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
9277 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
9278 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
9279 + */
9280 +
9281 +#include <linux/init.h>
9282 +#include <linux/module.h>
9283 +#include <linux/kthread.h>
9284 +#include <linux/io.h>
9285 +#include <linux/of_net.h>
9286 +#include "dpaa_eth.h"
9287 +#include "mac.h" /* struct mac_device */
9288 +#ifdef CONFIG_FSL_DPAA_1588
9289 +#include "dpaa_1588.h"
9290 +#endif
9291 +
9292 +static ssize_t dpaa_eth_show_addr(struct device *dev,
9293 + struct device_attribute *attr, char *buf)
9294 +{
9295 + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
9296 + struct mac_device *mac_dev = priv->mac_dev;
9297 +
9298 + if (mac_dev)
9299 + return sprintf(buf, "%llx",
9300 + (unsigned long long)mac_dev->res->start);
9301 + else
9302 + return sprintf(buf, "none");
9303 +}
9304 +
9305 +static ssize_t dpaa_eth_show_type(struct device *dev,
9306 + struct device_attribute *attr, char *buf)
9307 +{
9308 + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
9309 + ssize_t res = 0;
9310 +
9311 + if (priv)
9312 + res = sprintf(buf, "%s", priv->if_type);
9313 +
9314 + return res;
9315 +}
9316 +
9317 +static ssize_t dpaa_eth_show_fqids(struct device *dev,
9318 + struct device_attribute *attr, char *buf)
9319 +{
9320 + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
9321 + ssize_t bytes = 0;
9322 + int i = 0;
9323 + char *str;
9324 + struct dpa_fq *fq;
9325 + struct dpa_fq *tmp;
9326 + struct dpa_fq *prev = NULL;
9327 + u32 first_fqid = 0;
9328 + u32 last_fqid = 0;
9329 + char *prevstr = NULL;
9330 +
9331 + list_for_each_entry_safe(fq, tmp, &priv->dpa_fq_list, list) {
9332 + switch (fq->fq_type) {
9333 + case FQ_TYPE_RX_DEFAULT:
9334 + str = "Rx default";
9335 + break;
9336 + case FQ_TYPE_RX_ERROR:
9337 + str = "Rx error";
9338 + break;
9339 + case FQ_TYPE_RX_PCD:
9340 + str = "Rx PCD";
9341 + break;
9342 + case FQ_TYPE_TX_CONFIRM:
9343 + str = "Tx default confirmation";
9344 + break;
9345 + case FQ_TYPE_TX_CONF_MQ:
9346 + str = "Tx confirmation (mq)";
9347 + break;
9348 + case FQ_TYPE_TX_ERROR:
9349 + str = "Tx error";
9350 + break;
9351 + case FQ_TYPE_TX:
9352 + str = "Tx";
9353 + break;
9354 + case FQ_TYPE_RX_PCD_HI_PRIO:
9355 + str ="Rx PCD High Priority";
9356 + break;
9357 + default:
9358 + str = "Unknown";
9359 + }
9360 +
9361 + if (prev && (abs(fq->fqid - prev->fqid) != 1 ||
9362 + str != prevstr)) {
9363 + if (last_fqid == first_fqid)
9364 + bytes += sprintf(buf + bytes,
9365 + "%s: %d\n", prevstr, prev->fqid);
9366 + else
9367 + bytes += sprintf(buf + bytes,
9368 + "%s: %d - %d\n", prevstr,
9369 + first_fqid, last_fqid);
9370 + }
9371 +
9372 + if (prev && abs(fq->fqid - prev->fqid) == 1 && str == prevstr)
9373 + last_fqid = fq->fqid;
9374 + else
9375 + first_fqid = last_fqid = fq->fqid;
9376 +
9377 + prev = fq;
9378 + prevstr = str;
9379 + i++;
9380 + }
9381 +
9382 + if (prev) {
9383 + if (last_fqid == first_fqid)
9384 + bytes += sprintf(buf + bytes, "%s: %d\n", prevstr,
9385 + prev->fqid);
9386 + else
9387 + bytes += sprintf(buf + bytes, "%s: %d - %d\n", prevstr,
9388 + first_fqid, last_fqid);
9389 + }
9390 +
9391 + return bytes;
9392 +}
9393 +
9394 +static ssize_t dpaa_eth_show_bpids(struct device *dev,
9395 + struct device_attribute *attr, char *buf)
9396 +{
9397 + ssize_t bytes = 0;
9398 + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
9399 + struct dpa_bp *dpa_bp = priv->dpa_bp;
9400 + int i = 0;
9401 +
9402 + for (i = 0; i < priv->bp_count; i++)
9403 + bytes += snprintf(buf + bytes, PAGE_SIZE, "%u\n",
9404 + dpa_bp[i].bpid);
9405 +
9406 + return bytes;
9407 +}
9408 +
9409 +static ssize_t dpaa_eth_show_mac_regs(struct device *dev,
9410 + struct device_attribute *attr, char *buf)
9411 +{
9412 + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
9413 + struct mac_device *mac_dev = priv->mac_dev;
9414 + int n = 0;
9415 +
9416 + if (mac_dev)
9417 + n = fm_mac_dump_regs(mac_dev, buf, n);
9418 + else
9419 + return sprintf(buf, "no mac registers\n");
9420 +
9421 + return n;
9422 +}
9423 +
9424 +static ssize_t dpaa_eth_show_mac_rx_stats(struct device *dev,
9425 + struct device_attribute *attr, char *buf)
9426 +{
9427 + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
9428 + struct mac_device *mac_dev = priv->mac_dev;
9429 + int n = 0;
9430 +
9431 + if (mac_dev)
9432 + n = fm_mac_dump_rx_stats(mac_dev, buf, n);
9433 + else
9434 + return sprintf(buf, "no mac rx stats\n");
9435 +
9436 + return n;
9437 +}
9438 +
9439 +static ssize_t dpaa_eth_show_mac_tx_stats(struct device *dev,
9440 + struct device_attribute *attr, char *buf)
9441 +{
9442 + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
9443 + struct mac_device *mac_dev = priv->mac_dev;
9444 + int n = 0;
9445 +
9446 + if (mac_dev)
9447 + n = fm_mac_dump_tx_stats(mac_dev, buf, n);
9448 + else
9449 + return sprintf(buf, "no mac tx stats\n");
9450 +
9451 + return n;
9452 +}
9453 +
9454 +#ifdef CONFIG_FSL_DPAA_1588
9455 +static ssize_t dpaa_eth_show_ptp_1588(struct device *dev,
9456 + struct device_attribute *attr, char *buf)
9457 +{
9458 + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
9459 +
9460 + if (priv->tsu && priv->tsu->valid)
9461 + return sprintf(buf, "1\n");
9462 + else
9463 + return sprintf(buf, "0\n");
9464 +}
9465 +
9466 +static ssize_t dpaa_eth_set_ptp_1588(struct device *dev,
9467 + struct device_attribute *attr,
9468 + const char *buf, size_t count)
9469 +{
9470 + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
9471 + unsigned int num;
9472 + unsigned long flags;
9473 +
9474 + if (kstrtouint(buf, 0, &num) < 0)
9475 + return -EINVAL;
9476 +
9477 + local_irq_save(flags);
9478 +
9479 + if (num) {
9480 + if (priv->tsu)
9481 + priv->tsu->valid = TRUE;
9482 + } else {
9483 + if (priv->tsu)
9484 + priv->tsu->valid = FALSE;
9485 + }
9486 +
9487 + local_irq_restore(flags);
9488 +
9489 + return count;
9490 +}
9491 +#endif
9492 +
9493 +static struct device_attribute dpaa_eth_attrs[] = {
9494 + __ATTR(device_addr, S_IRUGO, dpaa_eth_show_addr, NULL),
9495 + __ATTR(device_type, S_IRUGO, dpaa_eth_show_type, NULL),
9496 + __ATTR(fqids, S_IRUGO, dpaa_eth_show_fqids, NULL),
9497 + __ATTR(bpids, S_IRUGO, dpaa_eth_show_bpids, NULL),
9498 + __ATTR(mac_regs, S_IRUGO, dpaa_eth_show_mac_regs, NULL),
9499 + __ATTR(mac_rx_stats, S_IRUGO, dpaa_eth_show_mac_rx_stats, NULL),
9500 + __ATTR(mac_tx_stats, S_IRUGO, dpaa_eth_show_mac_tx_stats, NULL),
9501 +#ifdef CONFIG_FSL_DPAA_1588
9502 + __ATTR(ptp_1588, S_IRUGO | S_IWUSR, dpaa_eth_show_ptp_1588,
9503 + dpaa_eth_set_ptp_1588),
9504 +#endif
9505 +};
9506 +
9507 +void dpaa_eth_sysfs_init(struct device *dev)
9508 +{
9509 + int i;
9510 +
9511 + for (i = 0; i < ARRAY_SIZE(dpaa_eth_attrs); i++)
9512 + if (device_create_file(dev, &dpaa_eth_attrs[i])) {
9513 + dev_err(dev, "Error creating sysfs file\n");
9514 + while (i > 0)
9515 + device_remove_file(dev, &dpaa_eth_attrs[--i]);
9516 + return;
9517 + }
9518 +}
9519 +EXPORT_SYMBOL(dpaa_eth_sysfs_init);
9520 +
9521 +void dpaa_eth_sysfs_remove(struct device *dev)
9522 +{
9523 + int i;
9524 +
9525 + for (i = 0; i < ARRAY_SIZE(dpaa_eth_attrs); i++)
9526 + device_remove_file(dev, &dpaa_eth_attrs[i]);
9527 +}
9528 --- /dev/null
9529 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h
9530 @@ -0,0 +1,144 @@
9531 +/* Copyright 2013 Freescale Semiconductor Inc.
9532 + *
9533 + * Redistribution and use in source and binary forms, with or without
9534 + * modification, are permitted provided that the following conditions are met:
9535 + * * Redistributions of source code must retain the above copyright
9536 + * notice, this list of conditions and the following disclaimer.
9537 + * * Redistributions in binary form must reproduce the above copyright
9538 + * notice, this list of conditions and the following disclaimer in the
9539 + * documentation and/or other materials provided with the distribution.
9540 + * * Neither the name of Freescale Semiconductor nor the
9541 + * names of its contributors may be used to endorse or promote products
9542 + * derived from this software without specific prior written permission.
9543 + *
9544 + *
9545 + * ALTERNATIVELY, this software may be distributed under the terms of the
9546 + * GNU General Public License ("GPL") as published by the Free Software
9547 + * Foundation, either version 2 of that License or (at your option) any
9548 + * later version.
9549 + *
9550 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
9551 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
9552 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
9553 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
9554 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
9555 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
9556 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
9557 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
9558 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
9559 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
9560 + */
9561 +
9562 +#undef TRACE_SYSTEM
9563 +#define TRACE_SYSTEM dpaa_eth
9564 +
9565 +#if !defined(_DPAA_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
9566 +#define _DPAA_ETH_TRACE_H
9567 +
9568 +#include <linux/skbuff.h>
9569 +#include <linux/netdevice.h>
9570 +#include "dpaa_eth.h"
9571 +#include <linux/tracepoint.h>
9572 +
9573 +#define fd_format_name(format) { qm_fd_##format, #format }
9574 +#define fd_format_list \
9575 + fd_format_name(contig), \
9576 + fd_format_name(sg)
9577 +#define TR_FMT "[%s] fqid=%d, fd: addr=0x%llx, format=%s, off=%u, len=%u," \
9578 + " status=0x%08x"
9579 +
9580 +/* This is used to declare a class of events.
9581 + * individual events of this type will be defined below.
9582 + */
9583 +
9584 +/* Store details about a frame descriptor and the FQ on which it was
9585 + * transmitted/received.
9586 + */
9587 +DECLARE_EVENT_CLASS(dpaa_eth_fd,
9588 + /* Trace function prototype */
9589 + TP_PROTO(struct net_device *netdev,
9590 + struct qman_fq *fq,
9591 + const struct qm_fd *fd),
9592 +
9593 + /* Repeat argument list here */
9594 + TP_ARGS(netdev, fq, fd),
9595 +
9596 + /* A structure containing the relevant information we want to record.
9597 + * Declare name and type for each normal element, name, type and size
9598 + * for arrays. Use __string for variable length strings.
9599 + */
9600 + TP_STRUCT__entry(
9601 + __field(u32, fqid)
9602 + __field(u64, fd_addr)
9603 + __field(u8, fd_format)
9604 + __field(u16, fd_offset)
9605 + __field(u32, fd_length)
9606 + __field(u32, fd_status)
9607 + __string(name, netdev->name)
9608 + ),
9609 +
9610 + /* The function that assigns values to the above declared fields */
9611 + TP_fast_assign(
9612 + __entry->fqid = fq->fqid;
9613 + __entry->fd_addr = qm_fd_addr_get64(fd);
9614 + __entry->fd_format = fd->format;
9615 + __entry->fd_offset = dpa_fd_offset(fd);
9616 + __entry->fd_length = dpa_fd_length(fd);
9617 + __entry->fd_status = fd->status;
9618 + __assign_str(name, netdev->name);
9619 + ),
9620 +
9621 + /* This is what gets printed when the trace event is triggered */
9622 + /* TODO: print the status using __print_flags() */
9623 + TP_printk(TR_FMT,
9624 + __get_str(name), __entry->fqid, __entry->fd_addr,
9625 + __print_symbolic(__entry->fd_format, fd_format_list),
9626 + __entry->fd_offset, __entry->fd_length, __entry->fd_status)
9627 +);
9628 +
9629 +/* Now declare events of the above type. Format is:
9630 + * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class
9631 + */
9632 +
9633 +/* Tx (egress) fd */
9634 +DEFINE_EVENT(dpaa_eth_fd, dpa_tx_fd,
9635 +
9636 + TP_PROTO(struct net_device *netdev,
9637 + struct qman_fq *fq,
9638 + const struct qm_fd *fd),
9639 +
9640 + TP_ARGS(netdev, fq, fd)
9641 +);
9642 +
9643 +/* Rx fd */
9644 +DEFINE_EVENT(dpaa_eth_fd, dpa_rx_fd,
9645 +
9646 + TP_PROTO(struct net_device *netdev,
9647 + struct qman_fq *fq,
9648 + const struct qm_fd *fd),
9649 +
9650 + TP_ARGS(netdev, fq, fd)
9651 +);
9652 +
9653 +/* Tx confirmation fd */
9654 +DEFINE_EVENT(dpaa_eth_fd, dpa_tx_conf_fd,
9655 +
9656 + TP_PROTO(struct net_device *netdev,
9657 + struct qman_fq *fq,
9658 + const struct qm_fd *fd),
9659 +
9660 + TP_ARGS(netdev, fq, fd)
9661 +);
9662 +
9663 +/* If only one event of a certain type needs to be declared, use TRACE_EVENT().
9664 + * The syntax is the same as for DECLARE_EVENT_CLASS().
9665 + */
9666 +
9667 +#endif /* _DPAA_ETH_TRACE_H */
9668 +
9669 +/* This must be outside ifdef _DPAA_ETH_TRACE_H */
9670 +#undef TRACE_INCLUDE_PATH
9671 +#define TRACE_INCLUDE_PATH .
9672 +#undef TRACE_INCLUDE_FILE
9673 +#define TRACE_INCLUDE_FILE dpaa_eth_trace
9674 +#include <trace/define_trace.h>
9675 --- /dev/null
9676 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c
9677 @@ -0,0 +1,544 @@
9678 +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
9679 + *
9680 + * Redistribution and use in source and binary forms, with or without
9681 + * modification, are permitted provided that the following conditions are met:
9682 + * * Redistributions of source code must retain the above copyright
9683 + * notice, this list of conditions and the following disclaimer.
9684 + * * Redistributions in binary form must reproduce the above copyright
9685 + * notice, this list of conditions and the following disclaimer in the
9686 + * documentation and/or other materials provided with the distribution.
9687 + * * Neither the name of Freescale Semiconductor nor the
9688 + * names of its contributors may be used to endorse or promote products
9689 + * derived from this software without specific prior written permission.
9690 + *
9691 + *
9692 + * ALTERNATIVELY, this software may be distributed under the terms of the
9693 + * GNU General Public License ("GPL") as published by the Free Software
9694 + * Foundation, either version 2 of that License or (at your option) any
9695 + * later version.
9696 + *
9697 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
9698 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
9699 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
9700 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
9701 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
9702 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
9703 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
9704 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
9705 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
9706 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
9707 + */
9708 +
9709 +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
9710 +#define pr_fmt(fmt) \
9711 + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
9712 + KBUILD_BASENAME".c", __LINE__, __func__
9713 +#else
9714 +#define pr_fmt(fmt) \
9715 + KBUILD_MODNAME ": " fmt
9716 +#endif
9717 +
9718 +#include <linux/string.h>
9719 +
9720 +#include "dpaa_eth.h"
9721 +#include "mac.h" /* struct mac_device */
9722 +#include "dpaa_eth_common.h"
9723 +
9724 +static const char dpa_stats_percpu[][ETH_GSTRING_LEN] = {
9725 + "interrupts",
9726 + "rx packets",
9727 + "tx packets",
9728 + "tx recycled",
9729 + "tx confirm",
9730 + "tx S/G",
9731 + "rx S/G",
9732 + "tx error",
9733 + "rx error",
9734 + "bp count"
9735 +};
9736 +
9737 +static char dpa_stats_global[][ETH_GSTRING_LEN] = {
9738 + /* dpa rx errors */
9739 + "rx dma error",
9740 + "rx frame physical error",
9741 + "rx frame size error",
9742 + "rx header error",
9743 + "rx csum error",
9744 +
9745 + /* demultiplexing errors */
9746 + "qman cg_tdrop",
9747 + "qman wred",
9748 + "qman error cond",
9749 + "qman early window",
9750 + "qman late window",
9751 + "qman fq tdrop",
9752 + "qman fq retired",
9753 + "qman orp disabled",
9754 +
9755 + /* congestion related stats */
9756 + "congestion time (ms)",
9757 + "entered congestion",
9758 + "congested (0/1)"
9759 +};
9760 +
9761 +#define DPA_STATS_PERCPU_LEN ARRAY_SIZE(dpa_stats_percpu)
9762 +#define DPA_STATS_GLOBAL_LEN ARRAY_SIZE(dpa_stats_global)
9763 +
9764 +static int __cold dpa_get_settings(struct net_device *net_dev,
9765 + struct ethtool_cmd *et_cmd)
9766 +{
9767 + int _errno;
9768 + struct dpa_priv_s *priv;
9769 +
9770 + priv = netdev_priv(net_dev);
9771 +
9772 + if (priv->mac_dev == NULL) {
9773 + netdev_info(net_dev, "This is a MAC-less interface\n");
9774 + return -ENODEV;
9775 + }
9776 + if (unlikely(priv->mac_dev->phy_dev == NULL)) {
9777 + netdev_dbg(net_dev, "phy device not initialized\n");
9778 + return 0;
9779 + }
9780 +
9781 + _errno = phy_ethtool_gset(priv->mac_dev->phy_dev, et_cmd);
9782 + if (unlikely(_errno < 0))
9783 + netdev_err(net_dev, "phy_ethtool_gset() = %d\n", _errno);
9784 +
9785 + return _errno;
9786 +}
9787 +
9788 +static int __cold dpa_set_settings(struct net_device *net_dev,
9789 + struct ethtool_cmd *et_cmd)
9790 +{
9791 + int _errno;
9792 + struct dpa_priv_s *priv;
9793 +
9794 + priv = netdev_priv(net_dev);
9795 +
9796 + if (priv->mac_dev == NULL) {
9797 + netdev_info(net_dev, "This is a MAC-less interface\n");
9798 + return -ENODEV;
9799 + }
9800 + if (unlikely(priv->mac_dev->phy_dev == NULL)) {
9801 + netdev_err(net_dev, "phy device not initialized\n");
9802 + return -ENODEV;
9803 + }
9804 +
9805 + _errno = phy_ethtool_sset(priv->mac_dev->phy_dev, et_cmd);
9806 + if (unlikely(_errno < 0))
9807 + netdev_err(net_dev, "phy_ethtool_sset() = %d\n", _errno);
9808 +
9809 + return _errno;
9810 +}
9811 +
9812 +static void __cold dpa_get_drvinfo(struct net_device *net_dev,
9813 + struct ethtool_drvinfo *drvinfo)
9814 +{
9815 + int _errno;
9816 +
9817 + strncpy(drvinfo->driver, KBUILD_MODNAME,
9818 + sizeof(drvinfo->driver) - 1)[sizeof(drvinfo->driver)-1] = 0;
9819 + _errno = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
9820 + "%X", 0);
9821 +
9822 + if (unlikely(_errno >= sizeof(drvinfo->fw_version))) {
9823 + /* Truncated output */
9824 + netdev_notice(net_dev, "snprintf() = %d\n", _errno);
9825 + } else if (unlikely(_errno < 0)) {
9826 + netdev_warn(net_dev, "snprintf() = %d\n", _errno);
9827 + memset(drvinfo->fw_version, 0, sizeof(drvinfo->fw_version));
9828 + }
9829 + strncpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
9830 + sizeof(drvinfo->bus_info)-1)[sizeof(drvinfo->bus_info)-1] = 0;
9831 +}
9832 +
9833 +static uint32_t __cold dpa_get_msglevel(struct net_device *net_dev)
9834 +{
9835 + return ((struct dpa_priv_s *)netdev_priv(net_dev))->msg_enable;
9836 +}
9837 +
9838 +static void __cold dpa_set_msglevel(struct net_device *net_dev,
9839 + uint32_t msg_enable)
9840 +{
9841 + ((struct dpa_priv_s *)netdev_priv(net_dev))->msg_enable = msg_enable;
9842 +}
9843 +
9844 +static int __cold dpa_nway_reset(struct net_device *net_dev)
9845 +{
9846 + int _errno;
9847 + struct dpa_priv_s *priv;
9848 +
9849 + priv = netdev_priv(net_dev);
9850 +
9851 + if (priv->mac_dev == NULL) {
9852 + netdev_info(net_dev, "This is a MAC-less interface\n");
9853 + return -ENODEV;
9854 + }
9855 + if (unlikely(priv->mac_dev->phy_dev == NULL)) {
9856 + netdev_err(net_dev, "phy device not initialized\n");
9857 + return -ENODEV;
9858 + }
9859 +
9860 + _errno = 0;
9861 + if (priv->mac_dev->phy_dev->autoneg) {
9862 + _errno = phy_start_aneg(priv->mac_dev->phy_dev);
9863 + if (unlikely(_errno < 0))
9864 + netdev_err(net_dev, "phy_start_aneg() = %d\n",
9865 + _errno);
9866 + }
9867 +
9868 + return _errno;
9869 +}
9870 +
9871 +static void __cold dpa_get_pauseparam(struct net_device *net_dev,
9872 + struct ethtool_pauseparam *epause)
9873 +{
9874 + struct dpa_priv_s *priv;
9875 + struct mac_device *mac_dev;
9876 + struct phy_device *phy_dev;
9877 +
9878 + priv = netdev_priv(net_dev);
9879 + mac_dev = priv->mac_dev;
9880 +
9881 + if (mac_dev == NULL) {
9882 + netdev_info(net_dev, "This is a MAC-less interface\n");
9883 + return;
9884 + }
9885 +
9886 + phy_dev = mac_dev->phy_dev;
9887 + if (unlikely(phy_dev == NULL)) {
9888 + netdev_err(net_dev, "phy device not initialized\n");
9889 + return;
9890 + }
9891 +
9892 + epause->autoneg = mac_dev->autoneg_pause;
9893 + epause->rx_pause = mac_dev->rx_pause_active;
9894 + epause->tx_pause = mac_dev->tx_pause_active;
9895 +}
9896 +
9897 +static int __cold dpa_set_pauseparam(struct net_device *net_dev,
9898 + struct ethtool_pauseparam *epause)
9899 +{
9900 + struct dpa_priv_s *priv;
9901 + struct mac_device *mac_dev;
9902 + struct phy_device *phy_dev;
9903 + int _errno;
9904 + u32 newadv, oldadv;
9905 + bool rx_pause, tx_pause;
9906 +
9907 + priv = netdev_priv(net_dev);
9908 + mac_dev = priv->mac_dev;
9909 +
9910 + if (mac_dev == NULL) {
9911 + netdev_info(net_dev, "This is a MAC-less interface\n");
9912 + return -ENODEV;
9913 + }
9914 +
9915 + phy_dev = mac_dev->phy_dev;
9916 + if (unlikely(phy_dev == NULL)) {
9917 + netdev_err(net_dev, "phy device not initialized\n");
9918 + return -ENODEV;
9919 + }
9920 +
9921 + if (!(phy_dev->supported & SUPPORTED_Pause) ||
9922 + (!(phy_dev->supported & SUPPORTED_Asym_Pause) &&
9923 + (epause->rx_pause != epause->tx_pause)))
9924 + return -EINVAL;
9925 +
9926 + /* The MAC should know how to handle PAUSE frame autonegotiation before
9927 + * adjust_link is triggered by a forced renegotiation of sym/asym PAUSE
9928 + * settings.
9929 + */
9930 + mac_dev->autoneg_pause = !!epause->autoneg;
9931 + mac_dev->rx_pause_req = !!epause->rx_pause;
9932 + mac_dev->tx_pause_req = !!epause->tx_pause;
9933 +
9934 + /* Determine the sym/asym advertised PAUSE capabilities from the desired
9935 + * rx/tx pause settings.
9936 + */
9937 + newadv = 0;
9938 + if (epause->rx_pause)
9939 + newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause;
9940 + if (epause->tx_pause)
9941 + newadv |= ADVERTISED_Asym_Pause;
9942 +
9943 + oldadv = phy_dev->advertising &
9944 + (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
9945 +
9946 + /* If there are differences between the old and the new advertised
9947 + * values, restart PHY autonegotiation and advertise the new values.
9948 + */
9949 + if (oldadv != newadv) {
9950 + phy_dev->advertising &= ~(ADVERTISED_Pause
9951 + | ADVERTISED_Asym_Pause);
9952 + phy_dev->advertising |= newadv;
9953 + if (phy_dev->autoneg) {
9954 + _errno = phy_start_aneg(phy_dev);
9955 + if (unlikely(_errno < 0))
9956 + netdev_err(net_dev, "phy_start_aneg() = %d\n",
9957 + _errno);
9958 + }
9959 + }
9960 +
9961 + get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
9962 + _errno = set_mac_active_pause(mac_dev, rx_pause, tx_pause);
9963 + if (unlikely(_errno < 0))
9964 + netdev_err(net_dev, "set_mac_active_pause() = %d\n", _errno);
9965 +
9966 + return _errno;
9967 +}
9968 +
9969 +#ifdef CONFIG_PM
9970 +static void dpa_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
9971 +{
9972 + struct dpa_priv_s *priv = netdev_priv(net_dev);
9973 +
9974 + wol->supported = 0;
9975 + wol->wolopts = 0;
9976 +
9977 + if (!priv->wol || !device_can_wakeup(net_dev->dev.parent))
9978 + return;
9979 +
9980 + if (priv->wol & DPAA_WOL_MAGIC) {
9981 + wol->supported = WAKE_MAGIC;
9982 + wol->wolopts = WAKE_MAGIC;
9983 + }
9984 +}
9985 +
9986 +static int dpa_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
9987 +{
9988 + struct dpa_priv_s *priv = netdev_priv(net_dev);
9989 +
9990 + if (priv->mac_dev == NULL) {
9991 + netdev_info(net_dev, "This is a MAC-less interface\n");
9992 + return -ENODEV;
9993 + }
9994 +
9995 + if (unlikely(priv->mac_dev->phy_dev == NULL)) {
9996 + netdev_dbg(net_dev, "phy device not initialized\n");
9997 + return -ENODEV;
9998 + }
9999 +
10000 + if (!device_can_wakeup(net_dev->dev.parent) ||
10001 + (wol->wolopts & ~WAKE_MAGIC))
10002 + return -EOPNOTSUPP;
10003 +
10004 + priv->wol = 0;
10005 +
10006 + if (wol->wolopts & WAKE_MAGIC) {
10007 + priv->wol = DPAA_WOL_MAGIC;
10008 + device_set_wakeup_enable(net_dev->dev.parent, 1);
10009 + } else {
10010 + device_set_wakeup_enable(net_dev->dev.parent, 0);
10011 + }
10012 +
10013 + return 0;
10014 +}
10015 +#endif
10016 +
10017 +static int dpa_get_eee(struct net_device *net_dev, struct ethtool_eee *et_eee)
10018 +{
10019 + struct dpa_priv_s *priv;
10020 +
10021 + priv = netdev_priv(net_dev);
10022 + if (priv->mac_dev == NULL) {
10023 + netdev_info(net_dev, "This is a MAC-less interface\n");
10024 + return -ENODEV;
10025 + }
10026 +
10027 + if (unlikely(priv->mac_dev->phy_dev == NULL)) {
10028 + netdev_err(net_dev, "phy device not initialized\n");
10029 + return -ENODEV;
10030 + }
10031 +
10032 + return phy_ethtool_get_eee(priv->mac_dev->phy_dev, et_eee);
10033 +}
10034 +
10035 +static int dpa_set_eee(struct net_device *net_dev, struct ethtool_eee *et_eee)
10036 +{
10037 + struct dpa_priv_s *priv;
10038 +
10039 + priv = netdev_priv(net_dev);
10040 + if (priv->mac_dev == NULL) {
10041 + netdev_info(net_dev, "This is a MAC-less interface\n");
10042 + return -ENODEV;
10043 + }
10044 +
10045 + if (unlikely(priv->mac_dev->phy_dev == NULL)) {
10046 + netdev_err(net_dev, "phy device not initialized\n");
10047 + return -ENODEV;
10048 + }
10049 +
10050 + return phy_ethtool_set_eee(priv->mac_dev->phy_dev, et_eee);
10051 +}
10052 +
10053 +static int dpa_get_sset_count(struct net_device *net_dev, int type)
10054 +{
10055 + unsigned int total_stats, num_stats;
10056 +
10057 + num_stats = num_online_cpus() + 1;
10058 + total_stats = num_stats * DPA_STATS_PERCPU_LEN + DPA_STATS_GLOBAL_LEN;
10059 +
10060 + switch (type) {
10061 + case ETH_SS_STATS:
10062 + return total_stats;
10063 + default:
10064 + return -EOPNOTSUPP;
10065 + }
10066 +}
10067 +
10068 +static void copy_stats(struct dpa_percpu_priv_s *percpu_priv, int num_cpus,
10069 + int crr_cpu, u64 bp_count, u64 *data)
10070 +{
10071 + int num_stat_values = num_cpus + 1;
10072 + int crr_stat = 0;
10073 +
10074 + /* update current CPU's stats and also add them to the total values */
10075 + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->in_interrupt;
10076 + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->in_interrupt;
10077 +
10078 + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.rx_packets;
10079 + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.rx_packets;
10080 +
10081 + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.tx_packets;
10082 + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.tx_packets;
10083 +
10084 + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->tx_returned;
10085 + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->tx_returned;
10086 +
10087 + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->tx_confirm;
10088 + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->tx_confirm;
10089 +
10090 + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->tx_frag_skbuffs;
10091 + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->tx_frag_skbuffs;
10092 +
10093 + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->rx_sg;
10094 + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->rx_sg;
10095 +
10096 + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.tx_errors;
10097 + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.tx_errors;
10098 +
10099 + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.rx_errors;
10100 + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.rx_errors;
10101 +
10102 + data[crr_stat * num_stat_values + crr_cpu] = bp_count;
10103 + data[crr_stat++ * num_stat_values + num_cpus] += bp_count;
10104 +}
10105 +
10106 +static void dpa_get_ethtool_stats(struct net_device *net_dev,
10107 + struct ethtool_stats *stats, u64 *data)
10108 +{
10109 + u64 bp_count, cg_time, cg_num, cg_status;
10110 + struct dpa_percpu_priv_s *percpu_priv;
10111 + struct qm_mcr_querycgr query_cgr;
10112 + struct dpa_rx_errors rx_errors;
10113 + struct dpa_ern_cnt ern_cnt;
10114 + struct dpa_priv_s *priv;
10115 + unsigned int num_cpus, offset;
10116 + struct dpa_bp *dpa_bp;
10117 + int total_stats, i;
10118 +
10119 + total_stats = dpa_get_sset_count(net_dev, ETH_SS_STATS);
10120 + priv = netdev_priv(net_dev);
10121 + dpa_bp = priv->dpa_bp;
10122 + num_cpus = num_online_cpus();
10123 + bp_count = 0;
10124 +
10125 + memset(&rx_errors, 0, sizeof(struct dpa_rx_errors));
10126 + memset(&ern_cnt, 0, sizeof(struct dpa_ern_cnt));
10127 + memset(data, 0, total_stats * sizeof(u64));
10128 +
10129 + for_each_online_cpu(i) {
10130 + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
10131 +
10132 + if (dpa_bp->percpu_count)
10133 + bp_count = *(per_cpu_ptr(dpa_bp->percpu_count, i));
10134 +
10135 + rx_errors.dme += percpu_priv->rx_errors.dme;
10136 + rx_errors.fpe += percpu_priv->rx_errors.fpe;
10137 + rx_errors.fse += percpu_priv->rx_errors.fse;
10138 + rx_errors.phe += percpu_priv->rx_errors.phe;
10139 + rx_errors.cse += percpu_priv->rx_errors.cse;
10140 +
10141 + ern_cnt.cg_tdrop += percpu_priv->ern_cnt.cg_tdrop;
10142 + ern_cnt.wred += percpu_priv->ern_cnt.wred;
10143 + ern_cnt.err_cond += percpu_priv->ern_cnt.err_cond;
10144 + ern_cnt.early_window += percpu_priv->ern_cnt.early_window;
10145 + ern_cnt.late_window += percpu_priv->ern_cnt.late_window;
10146 + ern_cnt.fq_tdrop += percpu_priv->ern_cnt.fq_tdrop;
10147 + ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired;
10148 + ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero;
10149 +
10150 + copy_stats(percpu_priv, num_cpus, i, bp_count, data);
10151 + }
10152 +
10153 + offset = (num_cpus + 1) * DPA_STATS_PERCPU_LEN;
10154 + memcpy(data + offset, &rx_errors, sizeof(struct dpa_rx_errors));
10155 +
10156 + offset += sizeof(struct dpa_rx_errors) / sizeof(u64);
10157 + memcpy(data + offset, &ern_cnt, sizeof(struct dpa_ern_cnt));
10158 +
10159 + /* gather congestion related counters */
10160 + cg_num = 0;
10161 + cg_status = 0;
10162 + cg_time = jiffies_to_msecs(priv->cgr_data.congested_jiffies);
10163 + if (qman_query_cgr(&priv->cgr_data.cgr, &query_cgr) == 0) {
10164 + cg_num = priv->cgr_data.cgr_congested_count;
10165 + cg_status = query_cgr.cgr.cs;
10166 +
10167 + /* reset congestion stats (like QMan API does */
10168 + priv->cgr_data.congested_jiffies = 0;
10169 + priv->cgr_data.cgr_congested_count = 0;
10170 + }
10171 +
10172 + offset += sizeof(struct dpa_ern_cnt) / sizeof(u64);
10173 + data[offset++] = cg_time;
10174 + data[offset++] = cg_num;
10175 + data[offset++] = cg_status;
10176 +}
10177 +
10178 +static void dpa_get_strings(struct net_device *net_dev, u32 stringset, u8 *data)
10179 +{
10180 + unsigned int i, j, num_cpus, size;
10181 + char stat_string_cpu[ETH_GSTRING_LEN];
10182 + u8 *strings;
10183 +
10184 + strings = data;
10185 + num_cpus = num_online_cpus();
10186 + size = DPA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN;
10187 +
10188 + for (i = 0; i < DPA_STATS_PERCPU_LEN; i++) {
10189 + for (j = 0; j < num_cpus; j++) {
10190 + snprintf(stat_string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]", dpa_stats_percpu[i], j);
10191 + memcpy(strings, stat_string_cpu, ETH_GSTRING_LEN);
10192 + strings += ETH_GSTRING_LEN;
10193 + }
10194 + snprintf(stat_string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]", dpa_stats_percpu[i]);
10195 + memcpy(strings, stat_string_cpu, ETH_GSTRING_LEN);
10196 + strings += ETH_GSTRING_LEN;
10197 + }
10198 + memcpy(strings, dpa_stats_global, size);
10199 +}
10200 +
10201 +const struct ethtool_ops dpa_ethtool_ops = {
10202 + .get_settings = dpa_get_settings,
10203 + .set_settings = dpa_set_settings,
10204 + .get_drvinfo = dpa_get_drvinfo,
10205 + .get_msglevel = dpa_get_msglevel,
10206 + .set_msglevel = dpa_set_msglevel,
10207 + .nway_reset = dpa_nway_reset,
10208 + .get_pauseparam = dpa_get_pauseparam,
10209 + .set_pauseparam = dpa_set_pauseparam,
10210 + .self_test = NULL, /* TODO invoke the cold-boot unit-test? */
10211 + .get_link = ethtool_op_get_link,
10212 + .get_eee = dpa_get_eee,
10213 + .set_eee = dpa_set_eee,
10214 + .get_sset_count = dpa_get_sset_count,
10215 + .get_ethtool_stats = dpa_get_ethtool_stats,
10216 + .get_strings = dpa_get_strings,
10217 +#ifdef CONFIG_PM
10218 + .get_wol = dpa_get_wol,
10219 + .set_wol = dpa_set_wol,
10220 +#endif
10221 +};
10222 --- /dev/null
10223 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ptp.c
10224 @@ -0,0 +1,290 @@
10225 +/*
10226 + * DPAA Ethernet Driver -- PTP 1588 clock using the dTSEC
10227 + *
10228 + * Author: Yangbo Lu <yangbo.lu@freescale.com>
10229 + *
10230 + * Copyright 2014 Freescale Semiconductor, Inc.
10231 + *
10232 + * This program is free software; you can redistribute it and/or modify it
10233 + * under the terms of the GNU General Public License as published by the
10234 + * Free Software Foundation; either version 2 of the License, or (at your
10235 + * option) any later version.
10236 +*/
10237 +
10238 +#include <linux/device.h>
10239 +#include <linux/hrtimer.h>
10240 +#include <linux/init.h>
10241 +#include <linux/interrupt.h>
10242 +#include <linux/kernel.h>
10243 +#include <linux/module.h>
10244 +#include <linux/of.h>
10245 +#include <linux/of_platform.h>
10246 +#include <linux/timex.h>
10247 +#include <linux/io.h>
10248 +
10249 +#include <linux/ptp_clock_kernel.h>
10250 +
10251 +#include "dpaa_eth.h"
10252 +#include "mac.h"
10253 +
10254 +struct ptp_clock *clock;
10255 +
10256 +static struct mac_device *mac_dev;
10257 +static u32 freqCompensation;
10258 +
10259 +/* Bit definitions for the TMR_CTRL register */
10260 +#define ALM1P (1<<31) /* Alarm1 output polarity */
10261 +#define ALM2P (1<<30) /* Alarm2 output polarity */
10262 +#define FS (1<<28) /* FIPER start indication */
10263 +#define PP1L (1<<27) /* Fiper1 pulse loopback mode enabled. */
10264 +#define PP2L (1<<26) /* Fiper2 pulse loopback mode enabled. */
10265 +#define TCLK_PERIOD_SHIFT (16) /* 1588 timer reference clock period. */
10266 +#define TCLK_PERIOD_MASK (0x3ff)
10267 +#define RTPE (1<<15) /* Record Tx Timestamp to PAL Enable. */
10268 +#define FRD (1<<14) /* FIPER Realignment Disable */
10269 +#define ESFDP (1<<11) /* External Tx/Rx SFD Polarity. */
10270 +#define ESFDE (1<<10) /* External Tx/Rx SFD Enable. */
10271 +#define ETEP2 (1<<9) /* External trigger 2 edge polarity */
10272 +#define ETEP1 (1<<8) /* External trigger 1 edge polarity */
10273 +#define COPH (1<<7) /* Generated clock output phase. */
10274 +#define CIPH (1<<6) /* External oscillator input clock phase */
10275 +#define TMSR (1<<5) /* Timer soft reset. */
10276 +#define BYP (1<<3) /* Bypass drift compensated clock */
10277 +#define TE (1<<2) /* 1588 timer enable. */
10278 +#define CKSEL_SHIFT (0) /* 1588 Timer reference clock source */
10279 +#define CKSEL_MASK (0x3)
10280 +
10281 +/* Bit definitions for the TMR_TEVENT register */
10282 +#define ETS2 (1<<25) /* External trigger 2 timestamp sampled */
10283 +#define ETS1 (1<<24) /* External trigger 1 timestamp sampled */
10284 +#define ALM2 (1<<17) /* Current time = alarm time register 2 */
10285 +#define ALM1 (1<<16) /* Current time = alarm time register 1 */
10286 +#define PP1 (1<<7) /* periodic pulse generated on FIPER1 */
10287 +#define PP2 (1<<6) /* periodic pulse generated on FIPER2 */
10288 +#define PP3 (1<<5) /* periodic pulse generated on FIPER3 */
10289 +
10290 +/* Bit definitions for the TMR_TEMASK register */
10291 +#define ETS2EN (1<<25) /* External trigger 2 timestamp enable */
10292 +#define ETS1EN (1<<24) /* External trigger 1 timestamp enable */
10293 +#define ALM2EN (1<<17) /* Timer ALM2 event enable */
10294 +#define ALM1EN (1<<16) /* Timer ALM1 event enable */
10295 +#define PP1EN (1<<7) /* Periodic pulse event 1 enable */
10296 +#define PP2EN (1<<6) /* Periodic pulse event 2 enable */
10297 +
10298 +/* Bit definitions for the TMR_PEVENT register */
10299 +#define TXP2 (1<<9) /* PTP transmitted timestamp im TXTS2 */
10300 +#define TXP1 (1<<8) /* PTP transmitted timestamp in TXTS1 */
10301 +#define RXP (1<<0) /* PTP frame has been received */
10302 +
10303 +/* Bit definitions for the TMR_PEMASK register */
10304 +#define TXP2EN (1<<9) /* Transmit PTP packet event 2 enable */
10305 +#define TXP1EN (1<<8) /* Transmit PTP packet event 1 enable */
10306 +#define RXPEN (1<<0) /* Receive PTP packet event enable */
10307 +
10308 +/* Bit definitions for the TMR_STAT register */
10309 +#define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */
10310 +#define STAT_VEC_MASK (0x3f)
10311 +
10312 +/* Bit definitions for the TMR_PRSC register */
10313 +#define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */
10314 +#define PRSC_OCK_MASK (0xffff)
10315 +
10316 +
10317 +#define N_EXT_TS 2
10318 +
10319 +static void set_alarm(void)
10320 +{
10321 + u64 ns;
10322 +
10323 + if (mac_dev->fm_rtc_get_cnt)
10324 + mac_dev->fm_rtc_get_cnt(mac_dev->fm_dev, &ns);
10325 + ns += 1500000000ULL;
10326 + ns = div_u64(ns, 1000000000UL) * 1000000000ULL;
10327 + ns -= DPA_PTP_NOMINAL_FREQ_PERIOD_NS;
10328 + if (mac_dev->fm_rtc_set_alarm)
10329 + mac_dev->fm_rtc_set_alarm(mac_dev->fm_dev, 0, ns);
10330 +}
10331 +
10332 +static void set_fipers(void)
10333 +{
10334 + u64 fiper;
10335 +
10336 + if (mac_dev->fm_rtc_disable)
10337 + mac_dev->fm_rtc_disable(mac_dev->fm_dev);
10338 +
10339 + set_alarm();
10340 + fiper = 1000000000ULL - DPA_PTP_NOMINAL_FREQ_PERIOD_NS;
10341 + if (mac_dev->fm_rtc_set_fiper)
10342 + mac_dev->fm_rtc_set_fiper(mac_dev->fm_dev, 0, fiper);
10343 +
10344 + if (mac_dev->fm_rtc_enable)
10345 + mac_dev->fm_rtc_enable(mac_dev->fm_dev);
10346 +}
10347 +
10348 +/* PTP clock operations */
10349 +
10350 +static int ptp_dpa_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
10351 +{
10352 + u64 adj;
10353 + u32 diff, tmr_add;
10354 + int neg_adj = 0;
10355 +
10356 + if (ppb < 0) {
10357 + neg_adj = 1;
10358 + ppb = -ppb;
10359 + }
10360 +
10361 + tmr_add = freqCompensation;
10362 + adj = tmr_add;
10363 + adj *= ppb;
10364 + diff = div_u64(adj, 1000000000ULL);
10365 +
10366 + tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
10367 +
10368 + if (mac_dev->fm_rtc_set_drift)
10369 + mac_dev->fm_rtc_set_drift(mac_dev->fm_dev, tmr_add);
10370 +
10371 + return 0;
10372 +}
10373 +
10374 +static int ptp_dpa_adjtime(struct ptp_clock_info *ptp, s64 delta)
10375 +{
10376 + s64 now;
10377 +
10378 + if (mac_dev->fm_rtc_get_cnt)
10379 + mac_dev->fm_rtc_get_cnt(mac_dev->fm_dev, &now);
10380 +
10381 + now += delta;
10382 +
10383 + if (mac_dev->fm_rtc_set_cnt)
10384 + mac_dev->fm_rtc_set_cnt(mac_dev->fm_dev, now);
10385 + set_fipers();
10386 +
10387 + return 0;
10388 +}
10389 +
10390 +static int ptp_dpa_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
10391 +{
10392 + u64 ns;
10393 + u32 remainder;
10394 +
10395 + if (mac_dev->fm_rtc_get_cnt)
10396 + mac_dev->fm_rtc_get_cnt(mac_dev->fm_dev, &ns);
10397 +
10398 + ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
10399 + ts->tv_nsec = remainder;
10400 + return 0;
10401 +}
10402 +
10403 +static int ptp_dpa_settime(struct ptp_clock_info *ptp,
10404 + const struct timespec64 *ts)
10405 +{
10406 + u64 ns;
10407 +
10408 + ns = ts->tv_sec * 1000000000ULL;
10409 + ns += ts->tv_nsec;
10410 +
10411 + if (mac_dev->fm_rtc_set_cnt)
10412 + mac_dev->fm_rtc_set_cnt(mac_dev->fm_dev, ns);
10413 + set_fipers();
10414 + return 0;
10415 +}
10416 +
10417 +static int ptp_dpa_enable(struct ptp_clock_info *ptp,
10418 + struct ptp_clock_request *rq, int on)
10419 +{
10420 + u32 bit;
10421 +
10422 + switch (rq->type) {
10423 + case PTP_CLK_REQ_EXTTS:
10424 + switch (rq->extts.index) {
10425 + case 0:
10426 + bit = ETS1EN;
10427 + break;
10428 + case 1:
10429 + bit = ETS2EN;
10430 + break;
10431 + default:
10432 + return -EINVAL;
10433 + }
10434 + if (on) {
10435 + if (mac_dev->fm_rtc_enable_interrupt)
10436 + mac_dev->fm_rtc_enable_interrupt(
10437 + mac_dev->fm_dev, bit);
10438 + } else {
10439 + if (mac_dev->fm_rtc_disable_interrupt)
10440 + mac_dev->fm_rtc_disable_interrupt(
10441 + mac_dev->fm_dev, bit);
10442 + }
10443 + return 0;
10444 +
10445 + case PTP_CLK_REQ_PPS:
10446 + if (on) {
10447 + if (mac_dev->fm_rtc_enable_interrupt)
10448 + mac_dev->fm_rtc_enable_interrupt(
10449 + mac_dev->fm_dev, PP1EN);
10450 + } else {
10451 + if (mac_dev->fm_rtc_disable_interrupt)
10452 + mac_dev->fm_rtc_disable_interrupt(
10453 + mac_dev->fm_dev, PP1EN);
10454 + }
10455 + return 0;
10456 +
10457 + default:
10458 + break;
10459 + }
10460 +
10461 + return -EOPNOTSUPP;
10462 +}
10463 +
10464 +static struct ptp_clock_info ptp_dpa_caps = {
10465 + .owner = THIS_MODULE,
10466 + .name = "dpaa clock",
10467 + .max_adj = 512000,
10468 + .n_alarm = 0,
10469 + .n_ext_ts = N_EXT_TS,
10470 + .n_per_out = 0,
10471 + .pps = 1,
10472 + .adjfreq = ptp_dpa_adjfreq,
10473 + .adjtime = ptp_dpa_adjtime,
10474 + .gettime64 = ptp_dpa_gettime,
10475 + .settime64 = ptp_dpa_settime,
10476 + .enable = ptp_dpa_enable,
10477 +};
10478 +
10479 +static int __init __cold dpa_ptp_load(void)
10480 +{
10481 + struct device *ptp_dev;
10482 + struct timespec64 now;
10483 + int dpa_phc_index;
10484 + int err;
10485 +
10486 + if (!(ptp_priv.of_dev && ptp_priv.mac_dev))
10487 + return -ENODEV;
10488 +
10489 + ptp_dev = &ptp_priv.of_dev->dev;
10490 + mac_dev = ptp_priv.mac_dev;
10491 +
10492 + if (mac_dev->fm_rtc_get_drift)
10493 + mac_dev->fm_rtc_get_drift(mac_dev->fm_dev, &freqCompensation);
10494 +
10495 + getnstimeofday64(&now);
10496 + ptp_dpa_settime(&ptp_dpa_caps, &now);
10497 +
10498 + clock = ptp_clock_register(&ptp_dpa_caps, ptp_dev);
10499 + if (IS_ERR(clock)) {
10500 + err = PTR_ERR(clock);
10501 + return err;
10502 + }
10503 + dpa_phc_index = ptp_clock_index(clock);
10504 + return 0;
10505 +}
10506 +module_init(dpa_ptp_load);
10507 +
10508 +static void __exit __cold dpa_ptp_unload(void)
10509 +{
10510 + if (mac_dev->fm_rtc_disable_interrupt)
10511 + mac_dev->fm_rtc_disable_interrupt(mac_dev->fm_dev, 0xffffffff);
10512 + ptp_clock_unregister(clock);
10513 +}
10514 +module_exit(dpa_ptp_unload);
10515 --- /dev/null
10516 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c
10517 @@ -0,0 +1,909 @@
10518 +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
10519 + *
10520 + * Redistribution and use in source and binary forms, with or without
10521 + * modification, are permitted provided that the following conditions are met:
10522 + * * Redistributions of source code must retain the above copyright
10523 + * notice, this list of conditions and the following disclaimer.
10524 + * * Redistributions in binary form must reproduce the above copyright
10525 + * notice, this list of conditions and the following disclaimer in the
10526 + * documentation and/or other materials provided with the distribution.
10527 + * * Neither the name of Freescale Semiconductor nor the
10528 + * names of its contributors may be used to endorse or promote products
10529 + * derived from this software without specific prior written permission.
10530 + *
10531 + *
10532 + * ALTERNATIVELY, this software may be distributed under the terms of the
10533 + * GNU General Public License ("GPL") as published by the Free Software
10534 + * Foundation, either version 2 of that License or (at your option) any
10535 + * later version.
10536 + *
10537 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
10538 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
10539 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
10540 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
10541 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
10542 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
10543 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
10544 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
10545 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
10546 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10547 + */
10548 +
10549 +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
10550 +#define pr_fmt(fmt) \
10551 + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
10552 + KBUILD_BASENAME".c", __LINE__, __func__
10553 +#else
10554 +#define pr_fmt(fmt) \
10555 + KBUILD_MODNAME ": " fmt
10556 +#endif
10557 +
10558 +#include <linux/init.h>
10559 +#include <linux/module.h>
10560 +#include <linux/io.h>
10561 +#include <linux/of_platform.h>
10562 +#include <linux/of_mdio.h>
10563 +#include <linux/phy.h>
10564 +#include <linux/netdevice.h>
10565 +
10566 +#include "dpaa_eth.h"
10567 +#include "mac.h"
10568 +#include "lnxwrp_fsl_fman.h"
10569 +
10570 +#include "error_ext.h" /* GET_ERROR_TYPE, E_OK */
10571 +
10572 +#include "fsl_fman_dtsec.h"
10573 +#include "fsl_fman_tgec.h"
10574 +#include "fsl_fman_memac.h"
10575 +#include "../sdk_fman/src/wrapper/lnxwrp_sysfs_fm.h"
10576 +
10577 +#define MAC_DESCRIPTION "FSL FMan MAC API based driver"
10578 +
10579 +MODULE_LICENSE("Dual BSD/GPL");
10580 +
10581 +MODULE_AUTHOR("Emil Medve <Emilian.Medve@Freescale.com>");
10582 +
10583 +MODULE_DESCRIPTION(MAC_DESCRIPTION);
10584 +
10585 +struct mac_priv_s {
10586 + struct fm_mac_dev *fm_mac;
10587 +};
10588 +
10589 +const char *mac_driver_description __initconst = MAC_DESCRIPTION;
10590 +const size_t mac_sizeof_priv[] = {
10591 + [DTSEC] = sizeof(struct mac_priv_s),
10592 + [XGMAC] = sizeof(struct mac_priv_s),
10593 + [MEMAC] = sizeof(struct mac_priv_s)
10594 +};
10595 +
10596 +static const enet_mode_t _100[] = {
10597 + [PHY_INTERFACE_MODE_MII] = e_ENET_MODE_MII_100,
10598 + [PHY_INTERFACE_MODE_RMII] = e_ENET_MODE_RMII_100
10599 +};
10600 +
10601 +static const enet_mode_t _1000[] = {
10602 + [PHY_INTERFACE_MODE_GMII] = e_ENET_MODE_GMII_1000,
10603 + [PHY_INTERFACE_MODE_SGMII] = e_ENET_MODE_SGMII_1000,
10604 + [PHY_INTERFACE_MODE_QSGMII] = e_ENET_MODE_QSGMII_1000,
10605 + [PHY_INTERFACE_MODE_TBI] = e_ENET_MODE_TBI_1000,
10606 + [PHY_INTERFACE_MODE_RGMII] = e_ENET_MODE_RGMII_1000,
10607 + [PHY_INTERFACE_MODE_RGMII_ID] = e_ENET_MODE_RGMII_1000,
10608 + [PHY_INTERFACE_MODE_RGMII_RXID] = e_ENET_MODE_RGMII_1000,
10609 + [PHY_INTERFACE_MODE_RGMII_TXID] = e_ENET_MODE_RGMII_1000,
10610 + [PHY_INTERFACE_MODE_RTBI] = e_ENET_MODE_RTBI_1000
10611 +};
10612 +
10613 +static enet_mode_t __cold __attribute__((nonnull))
10614 +macdev2enetinterface(const struct mac_device *mac_dev)
10615 +{
10616 + switch (mac_dev->max_speed) {
10617 + case SPEED_100:
10618 + return _100[mac_dev->phy_if];
10619 + case SPEED_1000:
10620 + return _1000[mac_dev->phy_if];
10621 + case SPEED_2500:
10622 + return e_ENET_MODE_SGMII_2500;
10623 + case SPEED_10000:
10624 + return e_ENET_MODE_XGMII_10000;
10625 + default:
10626 + return e_ENET_MODE_MII_100;
10627 + }
10628 +}
10629 +
10630 +static void mac_exception(handle_t _mac_dev, e_FmMacExceptions exception)
10631 +{
10632 + struct mac_device *mac_dev;
10633 +
10634 + mac_dev = (struct mac_device *)_mac_dev;
10635 +
10636 + if (e_FM_MAC_EX_10G_RX_FIFO_OVFL == exception) {
10637 + /* don't flag RX FIFO after the first */
10638 + fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev),
10639 + e_FM_MAC_EX_10G_RX_FIFO_OVFL, false);
10640 + dev_err(mac_dev->dev, "10G MAC got RX FIFO Error = %x\n",
10641 + exception);
10642 + }
10643 +
10644 + dev_dbg(mac_dev->dev, "%s:%s() -> %d\n", KBUILD_BASENAME".c", __func__,
10645 + exception);
10646 +}
10647 +
10648 +static int __cold init(struct mac_device *mac_dev)
10649 +{
10650 + int _errno;
10651 + struct mac_priv_s *priv;
10652 + t_FmMacParams param;
10653 + uint32_t version;
10654 +
10655 + priv = macdev_priv(mac_dev);
10656 +
10657 + param.baseAddr = (typeof(param.baseAddr))(uintptr_t)devm_ioremap(
10658 + mac_dev->dev, mac_dev->res->start, 0x2000);
10659 + param.enetMode = macdev2enetinterface(mac_dev);
10660 + memcpy(&param.addr, mac_dev->addr, min(sizeof(param.addr),
10661 + sizeof(mac_dev->addr)));
10662 + param.macId = mac_dev->cell_index;
10663 + param.h_Fm = (handle_t)mac_dev->fm;
10664 + param.mdioIrq = NO_IRQ;
10665 + param.f_Exception = mac_exception;
10666 + param.f_Event = mac_exception;
10667 + param.h_App = mac_dev;
10668 +
10669 + priv->fm_mac = fm_mac_config(&param);
10670 + if (unlikely(priv->fm_mac == NULL)) {
10671 + _errno = -EINVAL;
10672 + goto _return;
10673 + }
10674 +
10675 + fm_mac_set_handle(mac_dev->fm_dev, priv->fm_mac,
10676 + (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ?
10677 + param.macId : param.macId + FM_MAX_NUM_OF_1G_MACS);
10678 +
10679 + _errno = fm_mac_config_max_frame_length(priv->fm_mac,
10680 + fm_get_max_frm());
10681 + if (unlikely(_errno < 0))
10682 + goto _return_fm_mac_free;
10683 +
10684 + if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) {
10685 + /* 10G always works with pad and CRC */
10686 + _errno = fm_mac_config_pad_and_crc(priv->fm_mac, true);
10687 + if (unlikely(_errno < 0))
10688 + goto _return_fm_mac_free;
10689 +
10690 + _errno = fm_mac_config_half_duplex(priv->fm_mac,
10691 + mac_dev->half_duplex);
10692 + if (unlikely(_errno < 0))
10693 + goto _return_fm_mac_free;
10694 + } else {
10695 + _errno = fm_mac_config_reset_on_init(priv->fm_mac, true);
10696 + if (unlikely(_errno < 0))
10697 + goto _return_fm_mac_free;
10698 + }
10699 +
10700 + _errno = fm_mac_init(priv->fm_mac);
10701 + if (unlikely(_errno < 0))
10702 + goto _return_fm_mac_free;
10703 +
10704 +#ifndef CONFIG_FMAN_MIB_CNT_OVF_IRQ_EN
10705 + /* For 1G MAC, disable by default the MIB counters overflow interrupt */
10706 + if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) {
10707 + _errno = fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev),
10708 + e_FM_MAC_EX_1G_RX_MIB_CNT_OVFL, FALSE);
10709 + if (unlikely(_errno < 0))
10710 + goto _return_fm_mac_free;
10711 + }
10712 +#endif /* !CONFIG_FMAN_MIB_CNT_OVF_IRQ_EN */
10713 +
10714 + /* For 10G MAC, disable Tx ECC exception */
10715 + if (macdev2enetinterface(mac_dev) == e_ENET_MODE_XGMII_10000) {
10716 + _errno = fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev),
10717 + e_FM_MAC_EX_10G_1TX_ECC_ER, FALSE);
10718 + if (unlikely(_errno < 0))
10719 + goto _return_fm_mac_free;
10720 + }
10721 +
10722 + _errno = fm_mac_get_version(priv->fm_mac, &version);
10723 + if (unlikely(_errno < 0))
10724 + goto _return_fm_mac_free;
10725 +
10726 + dev_info(mac_dev->dev, "FMan %s version: 0x%08x\n",
10727 + ((macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ?
10728 + "dTSEC" : "XGEC"), version);
10729 +
10730 + goto _return;
10731 +
10732 +
10733 +_return_fm_mac_free:
10734 + fm_mac_free(mac_dev->get_mac_handle(mac_dev));
10735 +
10736 +_return:
10737 + return _errno;
10738 +}
10739 +
10740 +static int __cold memac_init(struct mac_device *mac_dev)
10741 +{
10742 + int _errno;
10743 + struct mac_priv_s *priv;
10744 + t_FmMacParams param;
10745 +
10746 + priv = macdev_priv(mac_dev);
10747 +
10748 + param.baseAddr = (typeof(param.baseAddr))(uintptr_t)devm_ioremap(
10749 + mac_dev->dev, mac_dev->res->start, 0x2000);
10750 + param.enetMode = macdev2enetinterface(mac_dev);
10751 + memcpy(&param.addr, mac_dev->addr, sizeof(mac_dev->addr));
10752 + param.macId = mac_dev->cell_index;
10753 + param.h_Fm = (handle_t)mac_dev->fm;
10754 + param.mdioIrq = NO_IRQ;
10755 + param.f_Exception = mac_exception;
10756 + param.f_Event = mac_exception;
10757 + param.h_App = mac_dev;
10758 +
10759 + priv->fm_mac = fm_mac_config(&param);
10760 + if (unlikely(priv->fm_mac == NULL)) {
10761 + _errno = -EINVAL;
10762 + goto _return;
10763 + }
10764 +
10765 + fm_mac_set_handle(mac_dev->fm_dev, priv->fm_mac,
10766 + (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ?
10767 + param.macId : param.macId + FM_MAX_NUM_OF_1G_MACS);
10768 +
10769 + _errno = fm_mac_config_max_frame_length(priv->fm_mac, fm_get_max_frm());
10770 + if (unlikely(_errno < 0))
10771 + goto _return_fm_mac_free;
10772 +
10773 + _errno = fm_mac_config_reset_on_init(priv->fm_mac, true);
10774 + if (unlikely(_errno < 0))
10775 + goto _return_fm_mac_free;
10776 +
10777 + _errno = fm_mac_init(priv->fm_mac);
10778 + if (unlikely(_errno < 0))
10779 + goto _return_fm_mac_free;
10780 +
10781 + dev_info(mac_dev->dev, "FMan MEMAC\n");
10782 +
10783 + goto _return;
10784 +
10785 +_return_fm_mac_free:
10786 + fm_mac_free(priv->fm_mac);
10787 +
10788 +_return:
10789 + return _errno;
10790 +}
10791 +
10792 +static int __cold start(struct mac_device *mac_dev)
10793 +{
10794 + int _errno;
10795 + struct phy_device *phy_dev = mac_dev->phy_dev;
10796 +
10797 + _errno = fm_mac_enable(mac_dev->get_mac_handle(mac_dev));
10798 +
10799 + if (!_errno && phy_dev)
10800 + phy_start(phy_dev);
10801 +
10802 + return _errno;
10803 +}
10804 +
10805 +static int __cold stop(struct mac_device *mac_dev)
10806 +{
10807 + if (mac_dev->phy_dev)
10808 + phy_stop(mac_dev->phy_dev);
10809 +
10810 + return fm_mac_disable(mac_dev->get_mac_handle(mac_dev));
10811 +}
10812 +
10813 +static int __cold set_multi(struct net_device *net_dev,
10814 + struct mac_device *mac_dev)
10815 +{
10816 + struct mac_priv_s *mac_priv;
10817 + struct mac_address *old_addr, *tmp;
10818 + struct netdev_hw_addr *ha;
10819 + int _errno;
10820 +
10821 + mac_priv = macdev_priv(mac_dev);
10822 +
10823 + /* Clear previous address list */
10824 + list_for_each_entry_safe(old_addr, tmp, &mac_dev->mc_addr_list, list) {
10825 + _errno = fm_mac_remove_hash_mac_addr(mac_priv->fm_mac,
10826 + (t_EnetAddr *)old_addr->addr);
10827 + if (_errno < 0)
10828 + return _errno;
10829 +
10830 + list_del(&old_addr->list);
10831 + kfree(old_addr);
10832 + }
10833 +
10834 + /* Add all the addresses from the new list */
10835 + netdev_for_each_mc_addr(ha, net_dev) {
10836 + _errno = fm_mac_add_hash_mac_addr(mac_priv->fm_mac,
10837 + (t_EnetAddr *)ha->addr);
10838 + if (_errno < 0)
10839 + return _errno;
10840 +
10841 + tmp = kmalloc(sizeof(struct mac_address), GFP_ATOMIC);
10842 + if (!tmp) {
10843 + dev_err(mac_dev->dev, "Out of memory\n");
10844 + return -ENOMEM;
10845 + }
10846 + memcpy(tmp->addr, ha->addr, ETH_ALEN);
10847 + list_add(&tmp->list, &mac_dev->mc_addr_list);
10848 + }
10849 + return 0;
10850 +}
10851 +
10852 +/* Avoid redundant calls to FMD, if the MAC driver already contains the desired
10853 + * active PAUSE settings. Otherwise, the new active settings should be reflected
10854 + * in FMan.
10855 + */
10856 +int set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx)
10857 +{
10858 + struct fm_mac_dev *fm_mac_dev = mac_dev->get_mac_handle(mac_dev);
10859 + int _errno = 0;
10860 +
10861 + if (unlikely(rx != mac_dev->rx_pause_active)) {
10862 + _errno = fm_mac_set_rx_pause_frames(fm_mac_dev, rx);
10863 + if (likely(_errno == 0))
10864 + mac_dev->rx_pause_active = rx;
10865 + }
10866 +
10867 + if (unlikely(tx != mac_dev->tx_pause_active)) {
10868 + _errno = fm_mac_set_tx_pause_frames(fm_mac_dev, tx);
10869 + if (likely(_errno == 0))
10870 + mac_dev->tx_pause_active = tx;
10871 + }
10872 +
10873 + return _errno;
10874 +}
10875 +EXPORT_SYMBOL(set_mac_active_pause);
10876 +
10877 +/* Determine the MAC RX/TX PAUSE frames settings based on PHY
10878 + * autonegotiation or values set by eththool.
10879 + */
10880 +void get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause, bool *tx_pause)
10881 +{
10882 + struct phy_device *phy_dev = mac_dev->phy_dev;
10883 + u16 lcl_adv, rmt_adv;
10884 + u8 flowctrl;
10885 +
10886 + *rx_pause = *tx_pause = false;
10887 +
10888 + if (!phy_dev->duplex)
10889 + return;
10890 +
10891 + /* If PAUSE autonegotiation is disabled, the TX/RX PAUSE settings
10892 + * are those set by ethtool.
10893 + */
10894 + if (!mac_dev->autoneg_pause) {
10895 + *rx_pause = mac_dev->rx_pause_req;
10896 + *tx_pause = mac_dev->tx_pause_req;
10897 + return;
10898 + }
10899 +
10900 + /* Else if PAUSE autonegotiation is enabled, the TX/RX PAUSE
10901 + * settings depend on the result of the link negotiation.
10902 + */
10903 +
10904 + /* get local capabilities */
10905 + lcl_adv = 0;
10906 + if (phy_dev->advertising & ADVERTISED_Pause)
10907 + lcl_adv |= ADVERTISE_PAUSE_CAP;
10908 + if (phy_dev->advertising & ADVERTISED_Asym_Pause)
10909 + lcl_adv |= ADVERTISE_PAUSE_ASYM;
10910 +
10911 + /* get link partner capabilities */
10912 + rmt_adv = 0;
10913 + if (phy_dev->pause)
10914 + rmt_adv |= LPA_PAUSE_CAP;
10915 + if (phy_dev->asym_pause)
10916 + rmt_adv |= LPA_PAUSE_ASYM;
10917 +
10918 + /* Calculate TX/RX settings based on local and peer advertised
10919 + * symmetric/asymmetric PAUSE capabilities.
10920 + */
10921 + flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
10922 + if (flowctrl & FLOW_CTRL_RX)
10923 + *rx_pause = true;
10924 + if (flowctrl & FLOW_CTRL_TX)
10925 + *tx_pause = true;
10926 +}
10927 +EXPORT_SYMBOL(get_pause_cfg);
10928 +
10929 +static void adjust_link_void(struct net_device *net_dev)
10930 +{
10931 +}
10932 +
10933 +static void adjust_link(struct net_device *net_dev)
10934 +{
10935 + struct dpa_priv_s *priv = netdev_priv(net_dev);
10936 + struct mac_device *mac_dev = priv->mac_dev;
10937 + struct phy_device *phy_dev = mac_dev->phy_dev;
10938 + struct fm_mac_dev *fm_mac_dev;
10939 + bool rx_pause, tx_pause;
10940 + int _errno;
10941 +
10942 + fm_mac_dev = mac_dev->get_mac_handle(mac_dev);
10943 + fm_mac_adjust_link(fm_mac_dev, phy_dev->link, phy_dev->speed,
10944 + phy_dev->duplex);
10945 +
10946 + get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
10947 + _errno = set_mac_active_pause(mac_dev, rx_pause, tx_pause);
10948 + if (unlikely(_errno < 0))
10949 + netdev_err(net_dev, "set_mac_active_pause() = %d\n", _errno);
10950 +}
10951 +
10952 +/* Initializes driver's PHY state, and attaches to the PHY.
10953 + * Returns 0 on success.
10954 + */
10955 +static int dtsec_init_phy(struct net_device *net_dev,
10956 + struct mac_device *mac_dev)
10957 +{
10958 + struct phy_device *phy_dev;
10959 +
10960 + if (of_phy_is_fixed_link(mac_dev->phy_node))
10961 + phy_dev = of_phy_attach(net_dev, mac_dev->phy_node,
10962 + 0, mac_dev->phy_if);
10963 + else
10964 + phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
10965 + &adjust_link, 0, mac_dev->phy_if);
10966 + if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) {
10967 + netdev_err(net_dev, "Could not connect to PHY %s\n",
10968 + mac_dev->phy_node ?
10969 + mac_dev->phy_node->full_name :
10970 + mac_dev->fixed_bus_id);
10971 + return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev);
10972 + }
10973 +
10974 + /* Remove any features not supported by the controller */
10975 + phy_dev->supported &= mac_dev->if_support;
10976 + /* Enable the symmetric and asymmetric PAUSE frame advertisements,
10977 + * as most of the PHY drivers do not enable them by default.
10978 + */
10979 + phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
10980 + phy_dev->advertising = phy_dev->supported;
10981 +
10982 + mac_dev->phy_dev = phy_dev;
10983 +
10984 + return 0;
10985 +}
10986 +
10987 +static int xgmac_init_phy(struct net_device *net_dev,
10988 + struct mac_device *mac_dev)
10989 +{
10990 + struct phy_device *phy_dev;
10991 +
10992 + if (of_phy_is_fixed_link(mac_dev->phy_node))
10993 + phy_dev = of_phy_attach(net_dev, mac_dev->phy_node,
10994 + 0, mac_dev->phy_if);
10995 + else
10996 + phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
10997 + &adjust_link_void, 0, mac_dev->phy_if);
10998 + if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) {
10999 + netdev_err(net_dev, "Could not attach to PHY %s\n",
11000 + mac_dev->phy_node ?
11001 + mac_dev->phy_node->full_name :
11002 + mac_dev->fixed_bus_id);
11003 + return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev);
11004 + }
11005 +
11006 + phy_dev->supported &= mac_dev->if_support;
11007 + /* Enable the symmetric and asymmetric PAUSE frame advertisements,
11008 + * as most of the PHY drivers do not enable them by default.
11009 + */
11010 + phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
11011 + phy_dev->advertising = phy_dev->supported;
11012 +
11013 + mac_dev->phy_dev = phy_dev;
11014 +
11015 + return 0;
11016 +}
11017 +
11018 +static int memac_init_phy(struct net_device *net_dev,
11019 + struct mac_device *mac_dev)
11020 +{
11021 + struct phy_device *phy_dev;
11022 +
11023 + if (of_phy_is_fixed_link(mac_dev->phy_node)) {
11024 + phy_dev = of_phy_attach(net_dev, mac_dev->phy_node,
11025 + 0, mac_dev->phy_if);
11026 + } else if ((macdev2enetinterface(mac_dev) == e_ENET_MODE_XGMII_10000) ||
11027 + (macdev2enetinterface(mac_dev) == e_ENET_MODE_SGMII_2500)) {
11028 + phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
11029 + &adjust_link_void, 0,
11030 + mac_dev->phy_if);
11031 + } else {
11032 + phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
11033 + &adjust_link, 0, mac_dev->phy_if);
11034 + }
11035 +
11036 + if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) {
11037 + netdev_err(net_dev, "Could not connect to PHY %s\n",
11038 + mac_dev->phy_node ?
11039 + mac_dev->phy_node->full_name :
11040 + mac_dev->fixed_bus_id);
11041 + return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev);
11042 + }
11043 +
11044 + /* Remove any features not supported by the controller */
11045 + phy_dev->supported &= mac_dev->if_support;
11046 + /* Enable the symmetric and asymmetric PAUSE frame advertisements,
11047 + * as most of the PHY drivers do not enable them by default.
11048 + */
11049 + phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
11050 + phy_dev->advertising = phy_dev->supported;
11051 +
11052 + mac_dev->phy_dev = phy_dev;
11053 +
11054 + return 0;
11055 +}
11056 +
11057 +static int __cold uninit(struct fm_mac_dev *fm_mac_dev)
11058 +{
11059 + int _errno, __errno;
11060 +
11061 + _errno = fm_mac_disable(fm_mac_dev);
11062 + __errno = fm_mac_free(fm_mac_dev);
11063 +
11064 + if (unlikely(__errno < 0))
11065 + _errno = __errno;
11066 +
11067 + return _errno;
11068 +}
11069 +
11070 +static struct fm_mac_dev *get_mac_handle(struct mac_device *mac_dev)
11071 +{
11072 + const struct mac_priv_s *priv;
11073 + priv = macdev_priv(mac_dev);
11074 + return priv->fm_mac;
11075 +}
11076 +
11077 +static int dtsec_dump_regs(struct mac_device *h_mac, char *buf, int nn)
11078 +{
11079 + struct dtsec_regs *p_mm = (struct dtsec_regs *) h_mac->vaddr;
11080 + int i = 0, n = nn;
11081 +
11082 + FM_DMP_SUBTITLE(buf, n, "\n");
11083 +
11084 + FM_DMP_TITLE(buf, n, p_mm, "FM MAC - DTSEC-%d", h_mac->cell_index);
11085 +
11086 + FM_DMP_V32(buf, n, p_mm, tsec_id);
11087 + FM_DMP_V32(buf, n, p_mm, tsec_id2);
11088 + FM_DMP_V32(buf, n, p_mm, ievent);
11089 + FM_DMP_V32(buf, n, p_mm, imask);
11090 + FM_DMP_V32(buf, n, p_mm, ecntrl);
11091 + FM_DMP_V32(buf, n, p_mm, ptv);
11092 + FM_DMP_V32(buf, n, p_mm, tmr_ctrl);
11093 + FM_DMP_V32(buf, n, p_mm, tmr_pevent);
11094 + FM_DMP_V32(buf, n, p_mm, tmr_pemask);
11095 + FM_DMP_V32(buf, n, p_mm, tctrl);
11096 + FM_DMP_V32(buf, n, p_mm, rctrl);
11097 + FM_DMP_V32(buf, n, p_mm, maccfg1);
11098 + FM_DMP_V32(buf, n, p_mm, maccfg2);
11099 + FM_DMP_V32(buf, n, p_mm, ipgifg);
11100 + FM_DMP_V32(buf, n, p_mm, hafdup);
11101 + FM_DMP_V32(buf, n, p_mm, maxfrm);
11102 +
11103 + FM_DMP_V32(buf, n, p_mm, macstnaddr1);
11104 + FM_DMP_V32(buf, n, p_mm, macstnaddr2);
11105 +
11106 + for (i = 0; i < 7; ++i) {
11107 + FM_DMP_V32(buf, n, p_mm, macaddr[i].exact_match1);
11108 + FM_DMP_V32(buf, n, p_mm, macaddr[i].exact_match2);
11109 + }
11110 +
11111 + FM_DMP_V32(buf, n, p_mm, car1);
11112 + FM_DMP_V32(buf, n, p_mm, car2);
11113 +
11114 + return n;
11115 +}
11116 +
11117 +static int xgmac_dump_regs(struct mac_device *h_mac, char *buf, int nn)
11118 +{
11119 + struct tgec_regs *p_mm = (struct tgec_regs *) h_mac->vaddr;
11120 + int n = nn;
11121 +
11122 + FM_DMP_SUBTITLE(buf, n, "\n");
11123 + FM_DMP_TITLE(buf, n, p_mm, "FM MAC - TGEC -%d", h_mac->cell_index);
11124 +
11125 + FM_DMP_V32(buf, n, p_mm, tgec_id);
11126 + FM_DMP_V32(buf, n, p_mm, command_config);
11127 + FM_DMP_V32(buf, n, p_mm, mac_addr_0);
11128 + FM_DMP_V32(buf, n, p_mm, mac_addr_1);
11129 + FM_DMP_V32(buf, n, p_mm, maxfrm);
11130 + FM_DMP_V32(buf, n, p_mm, pause_quant);
11131 + FM_DMP_V32(buf, n, p_mm, rx_fifo_sections);
11132 + FM_DMP_V32(buf, n, p_mm, tx_fifo_sections);
11133 + FM_DMP_V32(buf, n, p_mm, rx_fifo_almost_f_e);
11134 + FM_DMP_V32(buf, n, p_mm, tx_fifo_almost_f_e);
11135 + FM_DMP_V32(buf, n, p_mm, hashtable_ctrl);
11136 + FM_DMP_V32(buf, n, p_mm, mdio_cfg_status);
11137 + FM_DMP_V32(buf, n, p_mm, mdio_command);
11138 + FM_DMP_V32(buf, n, p_mm, mdio_data);
11139 + FM_DMP_V32(buf, n, p_mm, mdio_regaddr);
11140 + FM_DMP_V32(buf, n, p_mm, status);
11141 + FM_DMP_V32(buf, n, p_mm, tx_ipg_len);
11142 + FM_DMP_V32(buf, n, p_mm, mac_addr_2);
11143 + FM_DMP_V32(buf, n, p_mm, mac_addr_3);
11144 + FM_DMP_V32(buf, n, p_mm, rx_fifo_ptr_rd);
11145 + FM_DMP_V32(buf, n, p_mm, rx_fifo_ptr_wr);
11146 + FM_DMP_V32(buf, n, p_mm, tx_fifo_ptr_rd);
11147 + FM_DMP_V32(buf, n, p_mm, tx_fifo_ptr_wr);
11148 + FM_DMP_V32(buf, n, p_mm, imask);
11149 + FM_DMP_V32(buf, n, p_mm, ievent);
11150 +
11151 + return n;
11152 +}
11153 +
11154 +static int memac_dump_regs(struct mac_device *h_mac, char *buf, int nn)
11155 +{
11156 + struct memac_regs *p_mm = (struct memac_regs *) h_mac->vaddr;
11157 + int i = 0, n = nn;
11158 +
11159 + FM_DMP_SUBTITLE(buf, n, "\n");
11160 + FM_DMP_TITLE(buf, n, p_mm, "FM MAC - MEMAC -%d", h_mac->cell_index);
11161 +
11162 + FM_DMP_V32(buf, n, p_mm, command_config);
11163 + FM_DMP_V32(buf, n, p_mm, mac_addr0.mac_addr_l);
11164 + FM_DMP_V32(buf, n, p_mm, mac_addr0.mac_addr_u);
11165 + FM_DMP_V32(buf, n, p_mm, maxfrm);
11166 + FM_DMP_V32(buf, n, p_mm, hashtable_ctrl);
11167 + FM_DMP_V32(buf, n, p_mm, ievent);
11168 + FM_DMP_V32(buf, n, p_mm, tx_ipg_length);
11169 + FM_DMP_V32(buf, n, p_mm, imask);
11170 +
11171 + for (i = 0; i < 4; ++i)
11172 + FM_DMP_V32(buf, n, p_mm, pause_quanta[i]);
11173 +
11174 + for (i = 0; i < 4; ++i)
11175 + FM_DMP_V32(buf, n, p_mm, pause_thresh[i]);
11176 +
11177 + FM_DMP_V32(buf, n, p_mm, rx_pause_status);
11178 +
11179 + for (i = 0; i < MEMAC_NUM_OF_PADDRS; ++i) {
11180 + FM_DMP_V32(buf, n, p_mm, mac_addr[i].mac_addr_l);
11181 + FM_DMP_V32(buf, n, p_mm, mac_addr[i].mac_addr_u);
11182 + }
11183 +
11184 + FM_DMP_V32(buf, n, p_mm, lpwake_timer);
11185 + FM_DMP_V32(buf, n, p_mm, sleep_timer);
11186 + FM_DMP_V32(buf, n, p_mm, statn_config);
11187 + FM_DMP_V32(buf, n, p_mm, if_mode);
11188 + FM_DMP_V32(buf, n, p_mm, if_status);
11189 + FM_DMP_V32(buf, n, p_mm, hg_config);
11190 + FM_DMP_V32(buf, n, p_mm, hg_pause_quanta);
11191 + FM_DMP_V32(buf, n, p_mm, hg_pause_thresh);
11192 + FM_DMP_V32(buf, n, p_mm, hgrx_pause_status);
11193 + FM_DMP_V32(buf, n, p_mm, hg_fifos_status);
11194 + FM_DMP_V32(buf, n, p_mm, rhm);
11195 + FM_DMP_V32(buf, n, p_mm, thm);
11196 +
11197 + return n;
11198 +}
11199 +
11200 +static int memac_dump_regs_rx(struct mac_device *h_mac, char *buf, int nn)
11201 +{
11202 + struct memac_regs *p_mm = (struct memac_regs *) h_mac->vaddr;
11203 + int n = nn;
11204 +
11205 + FM_DMP_SUBTITLE(buf, n, "\n");
11206 + FM_DMP_TITLE(buf, n, p_mm, "FM MAC - MEMAC -%d Rx stats", h_mac->cell_index);
11207 +
11208 + /* Rx Statistics Counter */
11209 + FM_DMP_V32(buf, n, p_mm, reoct_l);
11210 + FM_DMP_V32(buf, n, p_mm, reoct_u);
11211 + FM_DMP_V32(buf, n, p_mm, roct_l);
11212 + FM_DMP_V32(buf, n, p_mm, roct_u);
11213 + FM_DMP_V32(buf, n, p_mm, raln_l);
11214 + FM_DMP_V32(buf, n, p_mm, raln_u);
11215 + FM_DMP_V32(buf, n, p_mm, rxpf_l);
11216 + FM_DMP_V32(buf, n, p_mm, rxpf_u);
11217 + FM_DMP_V32(buf, n, p_mm, rfrm_l);
11218 + FM_DMP_V32(buf, n, p_mm, rfrm_u);
11219 + FM_DMP_V32(buf, n, p_mm, rfcs_l);
11220 + FM_DMP_V32(buf, n, p_mm, rfcs_u);
11221 + FM_DMP_V32(buf, n, p_mm, rvlan_l);
11222 + FM_DMP_V32(buf, n, p_mm, rvlan_u);
11223 + FM_DMP_V32(buf, n, p_mm, rerr_l);
11224 + FM_DMP_V32(buf, n, p_mm, rerr_u);
11225 + FM_DMP_V32(buf, n, p_mm, ruca_l);
11226 + FM_DMP_V32(buf, n, p_mm, ruca_u);
11227 + FM_DMP_V32(buf, n, p_mm, rmca_l);
11228 + FM_DMP_V32(buf, n, p_mm, rmca_u);
11229 + FM_DMP_V32(buf, n, p_mm, rbca_l);
11230 + FM_DMP_V32(buf, n, p_mm, rbca_u);
11231 + FM_DMP_V32(buf, n, p_mm, rdrp_l);
11232 + FM_DMP_V32(buf, n, p_mm, rdrp_u);
11233 + FM_DMP_V32(buf, n, p_mm, rpkt_l);
11234 + FM_DMP_V32(buf, n, p_mm, rpkt_u);
11235 + FM_DMP_V32(buf, n, p_mm, rund_l);
11236 + FM_DMP_V32(buf, n, p_mm, rund_u);
11237 + FM_DMP_V32(buf, n, p_mm, r64_l);
11238 + FM_DMP_V32(buf, n, p_mm, r64_u);
11239 + FM_DMP_V32(buf, n, p_mm, r127_l);
11240 + FM_DMP_V32(buf, n, p_mm, r127_u);
11241 + FM_DMP_V32(buf, n, p_mm, r255_l);
11242 + FM_DMP_V32(buf, n, p_mm, r255_u);
11243 + FM_DMP_V32(buf, n, p_mm, r511_l);
11244 + FM_DMP_V32(buf, n, p_mm, r511_u);
11245 + FM_DMP_V32(buf, n, p_mm, r1023_l);
11246 + FM_DMP_V32(buf, n, p_mm, r1023_u);
11247 + FM_DMP_V32(buf, n, p_mm, r1518_l);
11248 + FM_DMP_V32(buf, n, p_mm, r1518_u);
11249 + FM_DMP_V32(buf, n, p_mm, r1519x_l);
11250 + FM_DMP_V32(buf, n, p_mm, r1519x_u);
11251 + FM_DMP_V32(buf, n, p_mm, rovr_l);
11252 + FM_DMP_V32(buf, n, p_mm, rovr_u);
11253 + FM_DMP_V32(buf, n, p_mm, rjbr_l);
11254 + FM_DMP_V32(buf, n, p_mm, rjbr_u);
11255 + FM_DMP_V32(buf, n, p_mm, rfrg_l);
11256 + FM_DMP_V32(buf, n, p_mm, rfrg_u);
11257 + FM_DMP_V32(buf, n, p_mm, rcnp_l);
11258 + FM_DMP_V32(buf, n, p_mm, rcnp_u);
11259 + FM_DMP_V32(buf, n, p_mm, rdrntp_l);
11260 + FM_DMP_V32(buf, n, p_mm, rdrntp_u);
11261 +
11262 + return n;
11263 +}
11264 +
11265 +static int memac_dump_regs_tx(struct mac_device *h_mac, char *buf, int nn)
11266 +{
11267 + struct memac_regs *p_mm = (struct memac_regs *) h_mac->vaddr;
11268 + int n = nn;
11269 +
11270 + FM_DMP_SUBTITLE(buf, n, "\n");
11271 + FM_DMP_TITLE(buf, n, p_mm, "FM MAC - MEMAC -%d Tx stats", h_mac->cell_index);
11272 +
11273 +
11274 + /* Tx Statistics Counter */
11275 + FM_DMP_V32(buf, n, p_mm, teoct_l);
11276 + FM_DMP_V32(buf, n, p_mm, teoct_u);
11277 + FM_DMP_V32(buf, n, p_mm, toct_l);
11278 + FM_DMP_V32(buf, n, p_mm, toct_u);
11279 + FM_DMP_V32(buf, n, p_mm, txpf_l);
11280 + FM_DMP_V32(buf, n, p_mm, txpf_u);
11281 + FM_DMP_V32(buf, n, p_mm, tfrm_l);
11282 + FM_DMP_V32(buf, n, p_mm, tfrm_u);
11283 + FM_DMP_V32(buf, n, p_mm, tfcs_l);
11284 + FM_DMP_V32(buf, n, p_mm, tfcs_u);
11285 + FM_DMP_V32(buf, n, p_mm, tvlan_l);
11286 + FM_DMP_V32(buf, n, p_mm, tvlan_u);
11287 + FM_DMP_V32(buf, n, p_mm, terr_l);
11288 + FM_DMP_V32(buf, n, p_mm, terr_u);
11289 + FM_DMP_V32(buf, n, p_mm, tuca_l);
11290 + FM_DMP_V32(buf, n, p_mm, tuca_u);
11291 + FM_DMP_V32(buf, n, p_mm, tmca_l);
11292 + FM_DMP_V32(buf, n, p_mm, tmca_u);
11293 + FM_DMP_V32(buf, n, p_mm, tbca_l);
11294 + FM_DMP_V32(buf, n, p_mm, tbca_u);
11295 + FM_DMP_V32(buf, n, p_mm, tpkt_l);
11296 + FM_DMP_V32(buf, n, p_mm, tpkt_u);
11297 + FM_DMP_V32(buf, n, p_mm, tund_l);
11298 + FM_DMP_V32(buf, n, p_mm, tund_u);
11299 + FM_DMP_V32(buf, n, p_mm, t64_l);
11300 + FM_DMP_V32(buf, n, p_mm, t64_u);
11301 + FM_DMP_V32(buf, n, p_mm, t127_l);
11302 + FM_DMP_V32(buf, n, p_mm, t127_u);
11303 + FM_DMP_V32(buf, n, p_mm, t255_l);
11304 + FM_DMP_V32(buf, n, p_mm, t255_u);
11305 + FM_DMP_V32(buf, n, p_mm, t511_l);
11306 + FM_DMP_V32(buf, n, p_mm, t511_u);
11307 + FM_DMP_V32(buf, n, p_mm, t1023_l);
11308 + FM_DMP_V32(buf, n, p_mm, t1023_u);
11309 + FM_DMP_V32(buf, n, p_mm, t1518_l);
11310 + FM_DMP_V32(buf, n, p_mm, t1518_u);
11311 + FM_DMP_V32(buf, n, p_mm, t1519x_l);
11312 + FM_DMP_V32(buf, n, p_mm, t1519x_u);
11313 + FM_DMP_V32(buf, n, p_mm, tcnp_l);
11314 + FM_DMP_V32(buf, n, p_mm, tcnp_u);
11315 +
11316 + return n;
11317 +}
11318 +
11319 +int fm_mac_dump_regs(struct mac_device *h_mac, char *buf, int nn)
11320 +{
11321 + int n = nn;
11322 +
11323 + n = h_mac->dump_mac_regs(h_mac, buf, n);
11324 +
11325 + return n;
11326 +}
11327 +EXPORT_SYMBOL(fm_mac_dump_regs);
11328 +
11329 +int fm_mac_dump_rx_stats(struct mac_device *h_mac, char *buf, int nn)
11330 +{
11331 + int n = nn;
11332 +
11333 + if(h_mac->dump_mac_rx_stats)
11334 + n = h_mac->dump_mac_rx_stats(h_mac, buf, n);
11335 +
11336 + return n;
11337 +}
11338 +EXPORT_SYMBOL(fm_mac_dump_rx_stats);
11339 +
11340 +int fm_mac_dump_tx_stats(struct mac_device *h_mac, char *buf, int nn)
11341 +{
11342 + int n = nn;
11343 +
11344 + if(h_mac->dump_mac_tx_stats)
11345 + n = h_mac->dump_mac_tx_stats(h_mac, buf, n);
11346 +
11347 + return n;
11348 +}
11349 +EXPORT_SYMBOL(fm_mac_dump_tx_stats);
11350 +
11351 +static void __cold setup_dtsec(struct mac_device *mac_dev)
11352 +{
11353 + mac_dev->init_phy = dtsec_init_phy;
11354 + mac_dev->init = init;
11355 + mac_dev->start = start;
11356 + mac_dev->stop = stop;
11357 + mac_dev->set_promisc = fm_mac_set_promiscuous;
11358 + mac_dev->change_addr = fm_mac_modify_mac_addr;
11359 + mac_dev->set_multi = set_multi;
11360 + mac_dev->uninit = uninit;
11361 + mac_dev->ptp_enable = fm_mac_enable_1588_time_stamp;
11362 + mac_dev->ptp_disable = fm_mac_disable_1588_time_stamp;
11363 + mac_dev->get_mac_handle = get_mac_handle;
11364 + mac_dev->set_tx_pause = fm_mac_set_tx_pause_frames;
11365 + mac_dev->set_rx_pause = fm_mac_set_rx_pause_frames;
11366 + mac_dev->fm_rtc_enable = fm_rtc_enable;
11367 + mac_dev->fm_rtc_disable = fm_rtc_disable;
11368 + mac_dev->fm_rtc_get_cnt = fm_rtc_get_cnt;
11369 + mac_dev->fm_rtc_set_cnt = fm_rtc_set_cnt;
11370 + mac_dev->fm_rtc_get_drift = fm_rtc_get_drift;
11371 + mac_dev->fm_rtc_set_drift = fm_rtc_set_drift;
11372 + mac_dev->fm_rtc_set_alarm = fm_rtc_set_alarm;
11373 + mac_dev->fm_rtc_set_fiper = fm_rtc_set_fiper;
11374 + mac_dev->set_wol = fm_mac_set_wol;
11375 + mac_dev->dump_mac_regs = dtsec_dump_regs;
11376 +}
11377 +
11378 +static void __cold setup_xgmac(struct mac_device *mac_dev)
11379 +{
11380 + mac_dev->init_phy = xgmac_init_phy;
11381 + mac_dev->init = init;
11382 + mac_dev->start = start;
11383 + mac_dev->stop = stop;
11384 + mac_dev->set_promisc = fm_mac_set_promiscuous;
11385 + mac_dev->change_addr = fm_mac_modify_mac_addr;
11386 + mac_dev->set_multi = set_multi;
11387 + mac_dev->uninit = uninit;
11388 + mac_dev->get_mac_handle = get_mac_handle;
11389 + mac_dev->set_tx_pause = fm_mac_set_tx_pause_frames;
11390 + mac_dev->set_rx_pause = fm_mac_set_rx_pause_frames;
11391 + mac_dev->set_wol = fm_mac_set_wol;
11392 + mac_dev->dump_mac_regs = xgmac_dump_regs;
11393 +}
11394 +
11395 +static void __cold setup_memac(struct mac_device *mac_dev)
11396 +{
11397 + mac_dev->init_phy = memac_init_phy;
11398 + mac_dev->init = memac_init;
11399 + mac_dev->start = start;
11400 + mac_dev->stop = stop;
11401 + mac_dev->set_promisc = fm_mac_set_promiscuous;
11402 + mac_dev->change_addr = fm_mac_modify_mac_addr;
11403 + mac_dev->set_multi = set_multi;
11404 + mac_dev->uninit = uninit;
11405 + mac_dev->get_mac_handle = get_mac_handle;
11406 + mac_dev->set_tx_pause = fm_mac_set_tx_pause_frames;
11407 + mac_dev->set_rx_pause = fm_mac_set_rx_pause_frames;
11408 + mac_dev->fm_rtc_enable = fm_rtc_enable;
11409 + mac_dev->fm_rtc_disable = fm_rtc_disable;
11410 + mac_dev->fm_rtc_get_cnt = fm_rtc_get_cnt;
11411 + mac_dev->fm_rtc_set_cnt = fm_rtc_set_cnt;
11412 + mac_dev->fm_rtc_get_drift = fm_rtc_get_drift;
11413 + mac_dev->fm_rtc_set_drift = fm_rtc_set_drift;
11414 + mac_dev->fm_rtc_set_alarm = fm_rtc_set_alarm;
11415 + mac_dev->fm_rtc_set_fiper = fm_rtc_set_fiper;
11416 + mac_dev->set_wol = fm_mac_set_wol;
11417 + mac_dev->dump_mac_regs = memac_dump_regs;
11418 + mac_dev->dump_mac_rx_stats = memac_dump_regs_rx;
11419 + mac_dev->dump_mac_tx_stats = memac_dump_regs_tx;
11420 +}
11421 +
11422 +void (*const mac_setup[])(struct mac_device *mac_dev) = {
11423 + [DTSEC] = setup_dtsec,
11424 + [XGMAC] = setup_xgmac,
11425 + [MEMAC] = setup_memac
11426 +};
11427 --- /dev/null
11428 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac.c
11429 @@ -0,0 +1,489 @@
11430 +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
11431 + *
11432 + * Redistribution and use in source and binary forms, with or without
11433 + * modification, are permitted provided that the following conditions are met:
11434 + * * Redistributions of source code must retain the above copyright
11435 + * notice, this list of conditions and the following disclaimer.
11436 + * * Redistributions in binary form must reproduce the above copyright
11437 + * notice, this list of conditions and the following disclaimer in the
11438 + * documentation and/or other materials provided with the distribution.
11439 + * * Neither the name of Freescale Semiconductor nor the
11440 + * names of its contributors may be used to endorse or promote products
11441 + * derived from this software without specific prior written permission.
11442 + *
11443 + *
11444 + * ALTERNATIVELY, this software may be distributed under the terms of the
11445 + * GNU General Public License ("GPL") as published by the Free Software
11446 + * Foundation, either version 2 of that License or (at your option) any
11447 + * later version.
11448 + *
11449 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
11450 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
11451 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
11452 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
11453 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
11454 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
11455 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
11456 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
11457 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11458 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11459 + */
11460 +
11461 +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
11462 +#define pr_fmt(fmt) \
11463 + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
11464 + KBUILD_BASENAME".c", __LINE__, __func__
11465 +#else
11466 +#define pr_fmt(fmt) \
11467 + KBUILD_MODNAME ": " fmt
11468 +#endif
11469 +
11470 +#include <linux/init.h>
11471 +#include <linux/module.h>
11472 +#include <linux/of_address.h>
11473 +#include <linux/of_platform.h>
11474 +#include <linux/of_net.h>
11475 +#include <linux/of_mdio.h>
11476 +#include <linux/phy_fixed.h>
11477 +#include <linux/device.h>
11478 +#include <linux/phy.h>
11479 +#include <linux/io.h>
11480 +
11481 +#include "lnxwrp_fm_ext.h"
11482 +
11483 +#include "mac.h"
11484 +
11485 +#define DTSEC_SUPPORTED \
11486 + (SUPPORTED_10baseT_Half \
11487 + | SUPPORTED_10baseT_Full \
11488 + | SUPPORTED_100baseT_Half \
11489 + | SUPPORTED_100baseT_Full \
11490 + | SUPPORTED_Autoneg \
11491 + | SUPPORTED_Pause \
11492 + | SUPPORTED_Asym_Pause \
11493 + | SUPPORTED_MII)
11494 +
11495 +static const char phy_str[][11] = {
11496 + [PHY_INTERFACE_MODE_MII] = "mii",
11497 + [PHY_INTERFACE_MODE_GMII] = "gmii",
11498 + [PHY_INTERFACE_MODE_SGMII] = "sgmii",
11499 + [PHY_INTERFACE_MODE_QSGMII] = "qsgmii",
11500 + [PHY_INTERFACE_MODE_TBI] = "tbi",
11501 + [PHY_INTERFACE_MODE_RMII] = "rmii",
11502 + [PHY_INTERFACE_MODE_RGMII] = "rgmii",
11503 + [PHY_INTERFACE_MODE_RGMII_ID] = "rgmii-id",
11504 + [PHY_INTERFACE_MODE_RGMII_RXID] = "rgmii-rxid",
11505 + [PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid",
11506 + [PHY_INTERFACE_MODE_RTBI] = "rtbi",
11507 + [PHY_INTERFACE_MODE_XGMII] = "xgmii",
11508 + [PHY_INTERFACE_MODE_SGMII_2500] = "sgmii-2500",
11509 +};
11510 +
11511 +static phy_interface_t __pure __attribute__((nonnull)) str2phy(const char *str)
11512 +{
11513 + int i;
11514 +
11515 + for (i = 0; i < ARRAY_SIZE(phy_str); i++)
11516 + if (strcmp(str, phy_str[i]) == 0)
11517 + return (phy_interface_t)i;
11518 +
11519 + return PHY_INTERFACE_MODE_MII;
11520 +}
11521 +
11522 +static const uint16_t phy2speed[] = {
11523 + [PHY_INTERFACE_MODE_MII] = SPEED_100,
11524 + [PHY_INTERFACE_MODE_GMII] = SPEED_1000,
11525 + [PHY_INTERFACE_MODE_SGMII] = SPEED_1000,
11526 + [PHY_INTERFACE_MODE_QSGMII] = SPEED_1000,
11527 + [PHY_INTERFACE_MODE_TBI] = SPEED_1000,
11528 + [PHY_INTERFACE_MODE_RMII] = SPEED_100,
11529 + [PHY_INTERFACE_MODE_RGMII] = SPEED_1000,
11530 + [PHY_INTERFACE_MODE_RGMII_ID] = SPEED_1000,
11531 + [PHY_INTERFACE_MODE_RGMII_RXID] = SPEED_1000,
11532 + [PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000,
11533 + [PHY_INTERFACE_MODE_RTBI] = SPEED_1000,
11534 + [PHY_INTERFACE_MODE_XGMII] = SPEED_10000,
11535 + [PHY_INTERFACE_MODE_SGMII_2500] = SPEED_2500,
11536 +};
11537 +
11538 +static struct mac_device * __cold
11539 +alloc_macdev(struct device *dev, size_t sizeof_priv,
11540 + void (*setup)(struct mac_device *mac_dev))
11541 +{
11542 + struct mac_device *mac_dev;
11543 +
11544 + mac_dev = devm_kzalloc(dev, sizeof(*mac_dev) + sizeof_priv, GFP_KERNEL);
11545 + if (unlikely(mac_dev == NULL))
11546 + mac_dev = ERR_PTR(-ENOMEM);
11547 + else {
11548 + mac_dev->dev = dev;
11549 + dev_set_drvdata(dev, mac_dev);
11550 + setup(mac_dev);
11551 + }
11552 +
11553 + return mac_dev;
11554 +}
11555 +
11556 +static int __cold free_macdev(struct mac_device *mac_dev)
11557 +{
11558 + dev_set_drvdata(mac_dev->dev, NULL);
11559 +
11560 + return mac_dev->uninit(mac_dev->get_mac_handle(mac_dev));
11561 +}
11562 +
11563 +static const struct of_device_id mac_match[] = {
11564 + [DTSEC] = {
11565 + .compatible = "fsl,fman-1g-mac"
11566 + },
11567 + [XGMAC] = {
11568 + .compatible = "fsl,fman-10g-mac"
11569 + },
11570 + [MEMAC] = {
11571 + .compatible = "fsl,fman-memac"
11572 + },
11573 + {}
11574 +};
11575 +MODULE_DEVICE_TABLE(of, mac_match);
11576 +
11577 +static int __cold mac_probe(struct platform_device *_of_dev)
11578 +{
11579 + int _errno, i;
11580 + struct device *dev;
11581 + struct device_node *mac_node, *dev_node;
11582 + struct mac_device *mac_dev;
11583 + struct platform_device *of_dev;
11584 + struct resource res;
11585 + const uint8_t *mac_addr;
11586 + const char *char_prop;
11587 + int nph;
11588 + u32 cell_index;
11589 + const struct of_device_id *match;
11590 +
11591 + dev = &_of_dev->dev;
11592 + mac_node = dev->of_node;
11593 +
11594 + match = of_match_device(mac_match, dev);
11595 + if (!match)
11596 + return -EINVAL;
11597 +
11598 + for (i = 0; i < ARRAY_SIZE(mac_match) - 1 && match != mac_match + i;
11599 + i++)
11600 + ;
11601 + BUG_ON(i >= ARRAY_SIZE(mac_match) - 1);
11602 +
11603 + mac_dev = alloc_macdev(dev, mac_sizeof_priv[i], mac_setup[i]);
11604 + if (IS_ERR(mac_dev)) {
11605 + _errno = PTR_ERR(mac_dev);
11606 + dev_err(dev, "alloc_macdev() = %d\n", _errno);
11607 + goto _return;
11608 + }
11609 +
11610 + INIT_LIST_HEAD(&mac_dev->mc_addr_list);
11611 +
11612 + /* Get the FM node */
11613 + dev_node = of_get_parent(mac_node);
11614 + if (unlikely(dev_node == NULL)) {
11615 + dev_err(dev, "of_get_parent(%s) failed\n",
11616 + mac_node->full_name);
11617 + _errno = -EINVAL;
11618 + goto _return_dev_set_drvdata;
11619 + }
11620 +
11621 + of_dev = of_find_device_by_node(dev_node);
11622 + if (unlikely(of_dev == NULL)) {
11623 + dev_err(dev, "of_find_device_by_node(%s) failed\n",
11624 + dev_node->full_name);
11625 + _errno = -EINVAL;
11626 + goto _return_of_node_put;
11627 + }
11628 +
11629 + mac_dev->fm_dev = fm_bind(&of_dev->dev);
11630 + if (unlikely(mac_dev->fm_dev == NULL)) {
11631 + dev_err(dev, "fm_bind(%s) failed\n", dev_node->full_name);
11632 + _errno = -ENODEV;
11633 + goto _return_of_node_put;
11634 + }
11635 +
11636 + mac_dev->fm = (void *)fm_get_handle(mac_dev->fm_dev);
11637 + of_node_put(dev_node);
11638 +
11639 + /* Get the address of the memory mapped registers */
11640 + _errno = of_address_to_resource(mac_node, 0, &res);
11641 + if (unlikely(_errno < 0)) {
11642 + dev_err(dev, "of_address_to_resource(%s) = %d\n",
11643 + mac_node->full_name, _errno);
11644 + goto _return_dev_set_drvdata;
11645 + }
11646 +
11647 + mac_dev->res = __devm_request_region(
11648 + dev,
11649 + fm_get_mem_region(mac_dev->fm_dev),
11650 + res.start, res.end + 1 - res.start, "mac");
11651 + if (unlikely(mac_dev->res == NULL)) {
11652 + dev_err(dev, "__devm_request_mem_region(mac) failed\n");
11653 + _errno = -EBUSY;
11654 + goto _return_dev_set_drvdata;
11655 + }
11656 +
11657 + mac_dev->vaddr = devm_ioremap(dev, mac_dev->res->start,
11658 + mac_dev->res->end + 1
11659 + - mac_dev->res->start);
11660 + if (unlikely(mac_dev->vaddr == NULL)) {
11661 + dev_err(dev, "devm_ioremap() failed\n");
11662 + _errno = -EIO;
11663 + goto _return_dev_set_drvdata;
11664 + }
11665 +
11666 +#define TBIPA_OFFSET 0x1c
11667 +#define TBIPA_DEFAULT_ADDR 5 /* override if used as external PHY addr. */
11668 + mac_dev->tbi_node = of_parse_phandle(mac_node, "tbi-handle", 0);
11669 + if (mac_dev->tbi_node) {
11670 + u32 tbiaddr = TBIPA_DEFAULT_ADDR;
11671 + const __be32 *tbi_reg;
11672 + void __iomem *addr;
11673 +
11674 + tbi_reg = of_get_property(mac_dev->tbi_node, "reg", NULL);
11675 + if (tbi_reg)
11676 + tbiaddr = be32_to_cpup(tbi_reg);
11677 + addr = mac_dev->vaddr + TBIPA_OFFSET;
11678 + /* TODO: out_be32 does not exist on ARM */
11679 + out_be32(addr, tbiaddr);
11680 + }
11681 +
11682 + if (!of_device_is_available(mac_node)) {
11683 + devm_iounmap(dev, mac_dev->vaddr);
11684 + __devm_release_region(dev, fm_get_mem_region(mac_dev->fm_dev),
11685 + res.start, res.end + 1 - res.start);
11686 + fm_unbind(mac_dev->fm_dev);
11687 + devm_kfree(dev, mac_dev);
11688 + dev_set_drvdata(dev, NULL);
11689 + return -ENODEV;
11690 + }
11691 +
11692 + /* Get the cell-index */
11693 + _errno = of_property_read_u32(mac_node, "cell-index", &cell_index);
11694 + if (unlikely(_errno)) {
11695 + dev_err(dev, "Cannot read cell-index of mac node %s from device tree\n",
11696 + mac_node->full_name);
11697 + goto _return_dev_set_drvdata;
11698 + }
11699 + mac_dev->cell_index = (uint8_t)cell_index;
11700 + if (mac_dev->cell_index >= 8)
11701 + mac_dev->cell_index -= 8;
11702 +
11703 + /* Get the MAC address */
11704 + mac_addr = of_get_mac_address(mac_node);
11705 + if (unlikely(mac_addr == NULL)) {
11706 + dev_err(dev, "of_get_mac_address(%s) failed\n",
11707 + mac_node->full_name);
11708 + _errno = -EINVAL;
11709 + goto _return_dev_set_drvdata;
11710 + }
11711 + memcpy(mac_dev->addr, mac_addr, sizeof(mac_dev->addr));
11712 +
11713 + /* Verify the number of port handles */
11714 + nph = of_count_phandle_with_args(mac_node, "fsl,fman-ports", NULL);
11715 + if (unlikely(nph < 0)) {
11716 + dev_err(dev, "Cannot read port handles of mac node %s from device tree\n",
11717 + mac_node->full_name);
11718 + _errno = nph;
11719 + goto _return_dev_set_drvdata;
11720 + }
11721 +
11722 + if (nph != ARRAY_SIZE(mac_dev->port_dev)) {
11723 + dev_err(dev, "Not supported number of port handles of mac node %s from device tree\n",
11724 + mac_node->full_name);
11725 + _errno = -EINVAL;
11726 + goto _return_dev_set_drvdata;
11727 + }
11728 +
11729 + for_each_port_device(i, mac_dev->port_dev) {
11730 + dev_node = of_parse_phandle(mac_node, "fsl,fman-ports", i);
11731 + if (unlikely(dev_node == NULL)) {
11732 + dev_err(dev, "Cannot find port node referenced by mac node %s from device tree\n",
11733 + mac_node->full_name);
11734 + _errno = -EINVAL;
11735 + goto _return_of_node_put;
11736 + }
11737 +
11738 + of_dev = of_find_device_by_node(dev_node);
11739 + if (unlikely(of_dev == NULL)) {
11740 + dev_err(dev, "of_find_device_by_node(%s) failed\n",
11741 + dev_node->full_name);
11742 + _errno = -EINVAL;
11743 + goto _return_of_node_put;
11744 + }
11745 +
11746 + mac_dev->port_dev[i] = fm_port_bind(&of_dev->dev);
11747 + if (unlikely(mac_dev->port_dev[i] == NULL)) {
11748 + dev_err(dev, "dev_get_drvdata(%s) failed\n",
11749 + dev_node->full_name);
11750 + _errno = -EINVAL;
11751 + goto _return_of_node_put;
11752 + }
11753 + of_node_put(dev_node);
11754 + }
11755 +
11756 + /* Get the PHY connection type */
11757 + _errno = of_property_read_string(mac_node, "phy-connection-type",
11758 + &char_prop);
11759 + if (unlikely(_errno)) {
11760 + dev_warn(dev,
11761 + "Cannot read PHY connection type of mac node %s from device tree. Defaulting to MII\n",
11762 + mac_node->full_name);
11763 + mac_dev->phy_if = PHY_INTERFACE_MODE_MII;
11764 + } else
11765 + mac_dev->phy_if = str2phy(char_prop);
11766 +
11767 + mac_dev->link = false;
11768 + mac_dev->half_duplex = false;
11769 + mac_dev->speed = phy2speed[mac_dev->phy_if];
11770 + mac_dev->max_speed = mac_dev->speed;
11771 + mac_dev->if_support = DTSEC_SUPPORTED;
11772 + /* We don't support half-duplex in SGMII mode */
11773 + if (strstr(char_prop, "sgmii") || strstr(char_prop, "qsgmii") ||
11774 + strstr(char_prop, "sgmii-2500"))
11775 + mac_dev->if_support &= ~(SUPPORTED_10baseT_Half |
11776 + SUPPORTED_100baseT_Half);
11777 +
11778 + /* Gigabit support (no half-duplex) */
11779 + if (mac_dev->max_speed == SPEED_1000 ||
11780 + mac_dev->max_speed == SPEED_2500)
11781 + mac_dev->if_support |= SUPPORTED_1000baseT_Full;
11782 +
11783 + /* The 10G interface only supports one mode */
11784 + if (strstr(char_prop, "xgmii"))
11785 + mac_dev->if_support = SUPPORTED_10000baseT_Full;
11786 +
11787 + /* Get the rest of the PHY information */
11788 + mac_dev->phy_node = of_parse_phandle(mac_node, "phy-handle", 0);
11789 + if (!mac_dev->phy_node) {
11790 + struct phy_device *phy;
11791 +
11792 + if (!of_phy_is_fixed_link(mac_node)) {
11793 + dev_err(dev, "Wrong PHY information of mac node %s\n",
11794 + mac_node->full_name);
11795 + goto _return_dev_set_drvdata;
11796 + }
11797 +
11798 + _errno = of_phy_register_fixed_link(mac_node);
11799 + if (_errno)
11800 + goto _return_dev_set_drvdata;
11801 +
11802 + mac_dev->fixed_link = devm_kzalloc(mac_dev->dev,
11803 + sizeof(*mac_dev->fixed_link),
11804 + GFP_KERNEL);
11805 + if (!mac_dev->fixed_link)
11806 + goto _return_dev_set_drvdata;
11807 +
11808 + mac_dev->phy_node = of_node_get(mac_node);
11809 + phy = of_phy_find_device(mac_dev->phy_node);
11810 + if (!phy)
11811 + goto _return_dev_set_drvdata;
11812 +
11813 + mac_dev->fixed_link->link = phy->link;
11814 + mac_dev->fixed_link->speed = phy->speed;
11815 + mac_dev->fixed_link->duplex = phy->duplex;
11816 + mac_dev->fixed_link->pause = phy->pause;
11817 + mac_dev->fixed_link->asym_pause = phy->asym_pause;
11818 + }
11819 +
11820 + _errno = mac_dev->init(mac_dev);
11821 + if (unlikely(_errno < 0)) {
11822 + dev_err(dev, "mac_dev->init() = %d\n", _errno);
11823 + goto _return_dev_set_drvdata;
11824 + }
11825 +
11826 + /* pause frame autonegotiation enabled*/
11827 + mac_dev->autoneg_pause = true;
11828 +
11829 + /* by intializing the values to false, force FMD to enable PAUSE frames
11830 + * on RX and TX
11831 + */
11832 + mac_dev->rx_pause_req = mac_dev->tx_pause_req = true;
11833 + mac_dev->rx_pause_active = mac_dev->tx_pause_active = false;
11834 + _errno = set_mac_active_pause(mac_dev, true, true);
11835 + if (unlikely(_errno < 0))
11836 + dev_err(dev, "set_mac_active_pause() = %d\n", _errno);
11837 +
11838 + dev_info(dev,
11839 + "FMan MAC address: %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n",
11840 + mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2],
11841 + mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]);
11842 +
11843 + goto _return;
11844 +
11845 +_return_of_node_put:
11846 + of_node_put(dev_node);
11847 +_return_dev_set_drvdata:
11848 + dev_set_drvdata(dev, NULL);
11849 +_return:
11850 + return _errno;
11851 +}
11852 +
11853 +static int __cold mac_remove(struct platform_device *of_dev)
11854 +{
11855 + int i, _errno;
11856 + struct device *dev;
11857 + struct mac_device *mac_dev;
11858 +
11859 + dev = &of_dev->dev;
11860 + mac_dev = (struct mac_device *)dev_get_drvdata(dev);
11861 +
11862 + for_each_port_device(i, mac_dev->port_dev)
11863 + fm_port_unbind(mac_dev->port_dev[i]);
11864 +
11865 + fm_unbind(mac_dev->fm_dev);
11866 +
11867 + _errno = free_macdev(mac_dev);
11868 +
11869 + return _errno;
11870 +}
11871 +
11872 +static struct platform_driver mac_driver = {
11873 + .driver = {
11874 + .name = KBUILD_MODNAME,
11875 + .of_match_table = mac_match,
11876 + .owner = THIS_MODULE,
11877 + },
11878 + .probe = mac_probe,
11879 + .remove = mac_remove
11880 +};
11881 +
11882 +static int __init __cold mac_load(void)
11883 +{
11884 + int _errno;
11885 +
11886 + pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
11887 + KBUILD_BASENAME".c", __func__);
11888 +
11889 + pr_info(KBUILD_MODNAME ": %s\n", mac_driver_description);
11890 +
11891 + _errno = platform_driver_register(&mac_driver);
11892 + if (unlikely(_errno < 0)) {
11893 + pr_err(KBUILD_MODNAME ": %s:%hu:%s(): platform_driver_register() = %d\n",
11894 + KBUILD_BASENAME".c", __LINE__, __func__, _errno);
11895 + goto _return;
11896 + }
11897 +
11898 + goto _return;
11899 +
11900 +_return:
11901 + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
11902 + KBUILD_BASENAME".c", __func__);
11903 +
11904 + return _errno;
11905 +}
11906 +module_init(mac_load);
11907 +
11908 +static void __exit __cold mac_unload(void)
11909 +{
11910 + pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
11911 + KBUILD_BASENAME".c", __func__);
11912 +
11913 + platform_driver_unregister(&mac_driver);
11914 +
11915 + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
11916 + KBUILD_BASENAME".c", __func__);
11917 +}
11918 +module_exit(mac_unload);
11919 --- /dev/null
11920 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac.h
11921 @@ -0,0 +1,135 @@
11922 +/* Copyright 2008-2011 Freescale Semiconductor, Inc.
11923 + *
11924 + * Redistribution and use in source and binary forms, with or without
11925 + * modification, are permitted provided that the following conditions are met:
11926 + * * Redistributions of source code must retain the above copyright
11927 + * notice, this list of conditions and the following disclaimer.
11928 + * * Redistributions in binary form must reproduce the above copyright
11929 + * notice, this list of conditions and the following disclaimer in the
11930 + * documentation and/or other materials provided with the distribution.
11931 + * * Neither the name of Freescale Semiconductor nor the
11932 + * names of its contributors may be used to endorse or promote products
11933 + * derived from this software without specific prior written permission.
11934 + *
11935 + *
11936 + * ALTERNATIVELY, this software may be distributed under the terms of the
11937 + * GNU General Public License ("GPL") as published by the Free Software
11938 + * Foundation, either version 2 of that License or (at your option) any
11939 + * later version.
11940 + *
11941 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
11942 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
11943 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
11944 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
11945 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
11946 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
11947 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
11948 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
11949 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11950 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11951 + */
11952 +
11953 +#ifndef __MAC_H
11954 +#define __MAC_H
11955 +
11956 +#include <linux/device.h> /* struct device, BUS_ID_SIZE */
11957 +#include <linux/if_ether.h> /* ETH_ALEN */
11958 +#include <linux/phy.h> /* phy_interface_t, struct phy_device */
11959 +#include <linux/list.h>
11960 +
11961 +#include "lnxwrp_fsl_fman.h" /* struct port_device */
11962 +
11963 +enum {DTSEC, XGMAC, MEMAC};
11964 +
11965 +struct mac_device {
11966 + struct device *dev;
11967 + void *priv;
11968 + uint8_t cell_index;
11969 + struct resource *res;
11970 + void __iomem *vaddr;
11971 + uint8_t addr[ETH_ALEN];
11972 + bool promisc;
11973 +
11974 + struct fm *fm_dev;
11975 + struct fm_port *port_dev[2];
11976 +
11977 + phy_interface_t phy_if;
11978 + u32 if_support;
11979 + bool link;
11980 + bool half_duplex;
11981 + uint16_t speed;
11982 + uint16_t max_speed;
11983 + struct device_node *phy_node;
11984 + char fixed_bus_id[MII_BUS_ID_SIZE + 3];
11985 + struct device_node *tbi_node;
11986 + struct phy_device *phy_dev;
11987 + void *fm;
11988 + /* List of multicast addresses */
11989 + struct list_head mc_addr_list;
11990 + struct fixed_phy_status *fixed_link;
11991 +
11992 + bool autoneg_pause;
11993 + bool rx_pause_req;
11994 + bool tx_pause_req;
11995 + bool rx_pause_active;
11996 + bool tx_pause_active;
11997 +
11998 + struct fm_mac_dev *(*get_mac_handle)(struct mac_device *mac_dev);
11999 + int (*init_phy)(struct net_device *net_dev, struct mac_device *mac_dev);
12000 + int (*init)(struct mac_device *mac_dev);
12001 + int (*start)(struct mac_device *mac_dev);
12002 + int (*stop)(struct mac_device *mac_dev);
12003 + int (*set_promisc)(struct fm_mac_dev *fm_mac_dev, bool enable);
12004 + int (*change_addr)(struct fm_mac_dev *fm_mac_dev, uint8_t *addr);
12005 + int (*set_multi)(struct net_device *net_dev,
12006 + struct mac_device *mac_dev);
12007 + int (*uninit)(struct fm_mac_dev *fm_mac_dev);
12008 + int (*ptp_enable)(struct fm_mac_dev *fm_mac_dev);
12009 + int (*ptp_disable)(struct fm_mac_dev *fm_mac_dev);
12010 + int (*set_rx_pause)(struct fm_mac_dev *fm_mac_dev, bool en);
12011 + int (*set_tx_pause)(struct fm_mac_dev *fm_mac_dev, bool en);
12012 + int (*fm_rtc_enable)(struct fm *fm_dev);
12013 + int (*fm_rtc_disable)(struct fm *fm_dev);
12014 + int (*fm_rtc_get_cnt)(struct fm *fm_dev, uint64_t *ts);
12015 + int (*fm_rtc_set_cnt)(struct fm *fm_dev, uint64_t ts);
12016 + int (*fm_rtc_get_drift)(struct fm *fm_dev, uint32_t *drift);
12017 + int (*fm_rtc_set_drift)(struct fm *fm_dev, uint32_t drift);
12018 + int (*fm_rtc_set_alarm)(struct fm *fm_dev, uint32_t id, uint64_t time);
12019 + int (*fm_rtc_set_fiper)(struct fm *fm_dev, uint32_t id,
12020 + uint64_t fiper);
12021 +#ifdef CONFIG_PTP_1588_CLOCK_DPAA
12022 + int (*fm_rtc_enable_interrupt)(struct fm *fm_dev, uint32_t events);
12023 + int (*fm_rtc_disable_interrupt)(struct fm *fm_dev, uint32_t events);
12024 +#endif
12025 + int (*set_wol)(struct fm_port *port, struct fm_mac_dev *fm_mac_dev,
12026 + bool en);
12027 + int (*dump_mac_regs)(struct mac_device *h_mac, char *buf, int nn);
12028 + int (*dump_mac_rx_stats)(struct mac_device *h_mac, char *buf, int nn);
12029 + int (*dump_mac_tx_stats)(struct mac_device *h_mac, char *buf, int nn);
12030 +};
12031 +
12032 +struct mac_address {
12033 + uint8_t addr[ETH_ALEN];
12034 + struct list_head list;
12035 +};
12036 +
12037 +#define get_fm_handle(net_dev) \
12038 + (((struct dpa_priv_s *)netdev_priv(net_dev))->mac_dev->fm_dev)
12039 +
12040 +#define for_each_port_device(i, port_dev) \
12041 + for (i = 0; i < ARRAY_SIZE(port_dev); i++)
12042 +
12043 +static inline __attribute((nonnull)) void *macdev_priv(
12044 + const struct mac_device *mac_dev)
12045 +{
12046 + return (void *)mac_dev + sizeof(*mac_dev);
12047 +}
12048 +
12049 +extern const char *mac_driver_description;
12050 +extern const size_t mac_sizeof_priv[];
12051 +extern void (*const mac_setup[])(struct mac_device *mac_dev);
12052 +
12053 +int set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx);
12054 +void get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause, bool *tx_pause);
12055 +
12056 +#endif /* __MAC_H */
12057 --- /dev/null
12058 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/offline_port.c
12059 @@ -0,0 +1,848 @@
12060 +/* Copyright 2011-2012 Freescale Semiconductor Inc.
12061 + *
12062 + * Redistribution and use in source and binary forms, with or without
12063 + * modification, are permitted provided that the following conditions are met:
12064 + * * Redistributions of source code must retain the above copyright
12065 + * notice, this list of conditions and the following disclaimer.
12066 + * * Redistributions in binary form must reproduce the above copyright
12067 + * notice, this list of conditions and the following disclaimer in the
12068 + * documentation and/or other materials provided with the distribution.
12069 + * * Neither the name of Freescale Semiconductor nor the
12070 + * names of its contributors may be used to endorse or promote products
12071 + * derived from this software without specific prior written permission.
12072 + *
12073 + *
12074 + * ALTERNATIVELY, this software may be distributed under the terms of the
12075 + * GNU General Public License ("GPL") as published by the Free Software
12076 + * Foundation, either version 2 of that License or (at your option) any
12077 + * later version.
12078 + *
12079 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
12080 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
12081 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
12082 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
12083 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
12084 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
12085 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
12086 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
12087 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12088 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12089 + */
12090 +
12091 +/* Offline Parsing / Host Command port driver for FSL QorIQ FMan.
12092 + * Validates device-tree configuration and sets up the offline ports.
12093 + */
12094 +
12095 +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
12096 +#define pr_fmt(fmt) \
12097 + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
12098 + KBUILD_BASENAME".c", __LINE__, __func__
12099 +#else
12100 +#define pr_fmt(fmt) \
12101 + KBUILD_MODNAME ": " fmt
12102 +#endif
12103 +
12104 +
12105 +#include <linux/init.h>
12106 +#include <linux/module.h>
12107 +#include <linux/of_platform.h>
12108 +#include <linux/fsl_qman.h>
12109 +
12110 +#include "offline_port.h"
12111 +#include "dpaa_eth.h"
12112 +#include "dpaa_eth_common.h"
12113 +
12114 +#define OH_MOD_DESCRIPTION "FSL FMan Offline Parsing port driver"
12115 +/* Manip extra space and data alignment for fragmentation */
12116 +#define FRAG_MANIP_SPACE 128
12117 +#define FRAG_DATA_ALIGN 64
12118 +
12119 +
12120 +MODULE_LICENSE("Dual BSD/GPL");
12121 +MODULE_AUTHOR("Bogdan Hamciuc <bogdan.hamciuc@freescale.com>");
12122 +MODULE_DESCRIPTION(OH_MOD_DESCRIPTION);
12123 +
12124 +
12125 +static const struct of_device_id oh_port_match_table[] = {
12126 + {
12127 + .compatible = "fsl,dpa-oh"
12128 + },
12129 + {
12130 + .compatible = "fsl,dpa-oh-shared"
12131 + },
12132 + {}
12133 +};
12134 +MODULE_DEVICE_TABLE(of, oh_port_match_table);
12135 +
12136 +#ifdef CONFIG_PM
12137 +
12138 +static int oh_suspend(struct device *dev)
12139 +{
12140 + struct dpa_oh_config_s *oh_config;
12141 +
12142 + oh_config = dev_get_drvdata(dev);
12143 + return fm_port_suspend(oh_config->oh_port);
12144 +}
12145 +
12146 +static int oh_resume(struct device *dev)
12147 +{
12148 + struct dpa_oh_config_s *oh_config;
12149 +
12150 + oh_config = dev_get_drvdata(dev);
12151 + return fm_port_resume(oh_config->oh_port);
12152 +}
12153 +
12154 +static const struct dev_pm_ops oh_pm_ops = {
12155 + .suspend = oh_suspend,
12156 + .resume = oh_resume,
12157 +};
12158 +
12159 +#define OH_PM_OPS (&oh_pm_ops)
12160 +
12161 +#else /* CONFIG_PM */
12162 +
12163 +#define OH_PM_OPS NULL
12164 +
12165 +#endif /* CONFIG_PM */
12166 +
12167 +/* Creates Frame Queues */
12168 +static uint32_t oh_fq_create(struct qman_fq *fq,
12169 + uint32_t fq_id, uint16_t channel,
12170 + uint16_t wq_id)
12171 +{
12172 + struct qm_mcc_initfq fq_opts;
12173 + uint32_t create_flags, init_flags;
12174 + uint32_t ret = 0;
12175 +
12176 + if (fq == NULL)
12177 + return 1;
12178 +
12179 + /* Set flags for FQ create */
12180 + create_flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_TO_DCPORTAL;
12181 +
12182 + /* Create frame queue */
12183 + ret = qman_create_fq(fq_id, create_flags, fq);
12184 + if (ret != 0)
12185 + return 1;
12186 +
12187 + /* Set flags for FQ init */
12188 + init_flags = QMAN_INITFQ_FLAG_SCHED;
12189 +
12190 + /* Set FQ init options. Specify destination WQ ID and channel */
12191 + fq_opts.we_mask = QM_INITFQ_WE_DESTWQ;
12192 + fq_opts.fqd.dest.wq = wq_id;
12193 + fq_opts.fqd.dest.channel = channel;
12194 +
12195 + /* Initialize frame queue */
12196 + ret = qman_init_fq(fq, init_flags, &fq_opts);
12197 + if (ret != 0) {
12198 + qman_destroy_fq(fq, 0);
12199 + return 1;
12200 + }
12201 +
12202 + return 0;
12203 +}
12204 +
12205 +static void dump_fq(struct device *dev, int fqid, uint16_t channel)
12206 +{
12207 + if (channel) {
12208 + /* display fqs with a valid (!= 0) destination channel */
12209 + dev_info(dev, "FQ ID:%d Channel ID:%d\n", fqid, channel);
12210 + }
12211 +}
12212 +
12213 +static void dump_fq_duple(struct device *dev, struct qman_fq *fqs,
12214 + int fqs_count, uint16_t channel_id)
12215 +{
12216 + int i;
12217 + for (i = 0; i < fqs_count; i++)
12218 + dump_fq(dev, (fqs + i)->fqid, channel_id);
12219 +}
12220 +
12221 +static void dump_oh_config(struct device *dev, struct dpa_oh_config_s *conf)
12222 +{
12223 + struct list_head *fq_list;
12224 + struct fq_duple *fqd;
12225 + int i;
12226 +
12227 + dev_info(dev, "Default egress frame queue: %d\n", conf->default_fqid);
12228 + dev_info(dev, "Default error frame queue: %d\n", conf->error_fqid);
12229 +
12230 + /* TX queues (old initialization) */
12231 + dev_info(dev, "Initialized queues:");
12232 + for (i = 0; i < conf->egress_cnt; i++)
12233 + dump_fq_duple(dev, conf->egress_fqs, conf->egress_cnt,
12234 + conf->channel);
12235 +
12236 + /* initialized ingress queues */
12237 + list_for_each(fq_list, &conf->fqs_ingress_list) {
12238 + fqd = list_entry(fq_list, struct fq_duple, fq_list);
12239 + dump_fq_duple(dev, fqd->fqs, fqd->fqs_count, fqd->channel_id);
12240 + }
12241 +
12242 + /* initialized egress queues */
12243 + list_for_each(fq_list, &conf->fqs_egress_list) {
12244 + fqd = list_entry(fq_list, struct fq_duple, fq_list);
12245 + dump_fq_duple(dev, fqd->fqs, fqd->fqs_count, fqd->channel_id);
12246 + }
12247 +}
12248 +
12249 +/* Destroys Frame Queues */
12250 +static void oh_fq_destroy(struct qman_fq *fq)
12251 +{
12252 + int _errno = 0;
12253 +
12254 + _errno = qman_retire_fq(fq, NULL);
12255 + if (unlikely(_errno < 0))
12256 + pr_err(KBUILD_MODNAME": %s:%hu:%s(): qman_retire_fq(%u)=%d\n",
12257 + KBUILD_BASENAME".c", __LINE__, __func__,
12258 + qman_fq_fqid(fq), _errno);
12259 +
12260 + _errno = qman_oos_fq(fq);
12261 + if (unlikely(_errno < 0)) {
12262 + pr_err(KBUILD_MODNAME": %s:%hu:%s(): qman_oos_fq(%u)=%d\n",
12263 + KBUILD_BASENAME".c", __LINE__, __func__,
12264 + qman_fq_fqid(fq), _errno);
12265 + }
12266 +
12267 + qman_destroy_fq(fq, 0);
12268 +}
12269 +
12270 +/* Allocation code for the OH port's PCD frame queues */
12271 +static int __cold oh_alloc_pcd_fqids(struct device *dev,
12272 + uint32_t num,
12273 + uint8_t alignment,
12274 + uint32_t *base_fqid)
12275 +{
12276 + dev_crit(dev, "callback not implemented!\n");
12277 + BUG();
12278 +
12279 + return 0;
12280 +}
12281 +
12282 +static int __cold oh_free_pcd_fqids(struct device *dev, uint32_t base_fqid)
12283 +{
12284 + dev_crit(dev, "callback not implemented!\n");
12285 + BUG();
12286 +
12287 + return 0;
12288 +}
12289 +
12290 +static void oh_set_buffer_layout(struct fm_port *port,
12291 + struct dpa_buffer_layout_s *layout)
12292 +{
12293 + struct fm_port_params params;
12294 +
12295 + layout->priv_data_size = DPA_TX_PRIV_DATA_SIZE;
12296 + layout->parse_results = true;
12297 + layout->hash_results = true;
12298 + layout->time_stamp = false;
12299 +
12300 + fm_port_get_buff_layout_ext_params(port, &params);
12301 + layout->manip_extra_space = params.manip_extra_space;
12302 + layout->data_align = params.data_align;
12303 +}
12304 +
12305 +static int
12306 +oh_port_probe(struct platform_device *_of_dev)
12307 +{
12308 + struct device *dpa_oh_dev;
12309 + struct device_node *dpa_oh_node;
12310 + int lenp, _errno = 0, fq_idx, duple_idx;
12311 + int n_size, i, j, ret, duples_count;
12312 + struct platform_device *oh_of_dev;
12313 + struct device_node *oh_node, *bpool_node = NULL, *root_node;
12314 + struct device *oh_dev;
12315 + struct dpa_oh_config_s *oh_config = NULL;
12316 + const __be32 *oh_all_queues;
12317 + const __be32 *channel_ids;
12318 + const __be32 *oh_tx_queues;
12319 + uint32_t queues_count;
12320 + uint32_t crt_fqid_base;
12321 + uint32_t crt_fq_count;
12322 + bool frag_enabled = false;
12323 + struct fm_port_params oh_port_tx_params;
12324 + struct fm_port_pcd_param oh_port_pcd_params;
12325 + struct dpa_buffer_layout_s buf_layout;
12326 +
12327 + /* True if the current partition owns the OH port. */
12328 + bool init_oh_port;
12329 +
12330 + const struct of_device_id *match;
12331 + int crt_ext_pools_count;
12332 + u32 ext_pool_size;
12333 + u32 port_id;
12334 + u32 channel_id;
12335 +
12336 + int channel_ids_count;
12337 + int channel_idx;
12338 + struct fq_duple *fqd;
12339 + struct list_head *fq_list, *fq_list_tmp;
12340 +
12341 + const __be32 *bpool_cfg;
12342 + uint32_t bpid;
12343 +
12344 + memset(&oh_port_tx_params, 0, sizeof(oh_port_tx_params));
12345 + dpa_oh_dev = &_of_dev->dev;
12346 + dpa_oh_node = dpa_oh_dev->of_node;
12347 + BUG_ON(dpa_oh_node == NULL);
12348 +
12349 + match = of_match_device(oh_port_match_table, dpa_oh_dev);
12350 + if (!match)
12351 + return -EINVAL;
12352 +
12353 + dev_dbg(dpa_oh_dev, "Probing OH port...\n");
12354 +
12355 + /* Find the referenced OH node */
12356 + oh_node = of_parse_phandle(dpa_oh_node, "fsl,fman-oh-port", 0);
12357 + if (oh_node == NULL) {
12358 + dev_err(dpa_oh_dev,
12359 + "Can't find OH node referenced from node %s\n",
12360 + dpa_oh_node->full_name);
12361 + return -EINVAL;
12362 + }
12363 + dev_info(dpa_oh_dev, "Found OH node handle compatible with %s\n",
12364 + match->compatible);
12365 +
12366 + _errno = of_property_read_u32(oh_node, "cell-index", &port_id);
12367 + if (_errno) {
12368 + dev_err(dpa_oh_dev, "No port id found in node %s\n",
12369 + dpa_oh_node->full_name);
12370 + goto return_kfree;
12371 + }
12372 +
12373 + _errno = of_property_read_u32(oh_node, "fsl,qman-channel-id",
12374 + &channel_id);
12375 + if (_errno) {
12376 + dev_err(dpa_oh_dev, "No channel id found in node %s\n",
12377 + dpa_oh_node->full_name);
12378 + goto return_kfree;
12379 + }
12380 +
12381 + oh_of_dev = of_find_device_by_node(oh_node);
12382 + BUG_ON(oh_of_dev == NULL);
12383 + oh_dev = &oh_of_dev->dev;
12384 +
12385 + /* The OH port must be initialized exactly once.
12386 + * The following scenarios are of interest:
12387 + * - the node is Linux-private (will always initialize it);
12388 + * - the node is shared between two Linux partitions
12389 + * (only one of them will initialize it);
12390 + * - the node is shared between a Linux and a LWE partition
12391 + * (Linux will initialize it) - "fsl,dpa-oh-shared"
12392 + */
12393 +
12394 + /* Check if the current partition owns the OH port
12395 + * and ought to initialize it. It may be the case that we leave this
12396 + * to another (also Linux) partition.
12397 + */
12398 + init_oh_port = strcmp(match->compatible, "fsl,dpa-oh-shared");
12399 +
12400 + /* If we aren't the "owner" of the OH node, we're done here. */
12401 + if (!init_oh_port) {
12402 + dev_dbg(dpa_oh_dev,
12403 + "Not owning the shared OH port %s, will not initialize it.\n",
12404 + oh_node->full_name);
12405 + of_node_put(oh_node);
12406 + return 0;
12407 + }
12408 +
12409 + /* Allocate OH dev private data */
12410 + oh_config = devm_kzalloc(dpa_oh_dev, sizeof(*oh_config), GFP_KERNEL);
12411 + if (oh_config == NULL) {
12412 + dev_err(dpa_oh_dev,
12413 + "Can't allocate private data for OH node %s referenced from node %s!\n",
12414 + oh_node->full_name, dpa_oh_node->full_name);
12415 + _errno = -ENOMEM;
12416 + goto return_kfree;
12417 + }
12418 +
12419 + INIT_LIST_HEAD(&oh_config->fqs_ingress_list);
12420 + INIT_LIST_HEAD(&oh_config->fqs_egress_list);
12421 +
12422 + /* FQs that enter OH port */
12423 + lenp = 0;
12424 + oh_all_queues = of_get_property(dpa_oh_node,
12425 + "fsl,qman-frame-queues-ingress", &lenp);
12426 + if (lenp % (2 * sizeof(*oh_all_queues))) {
12427 + dev_warn(dpa_oh_dev,
12428 + "Wrong ingress queues format for OH node %s referenced from node %s!\n",
12429 + oh_node->full_name, dpa_oh_node->full_name);
12430 + /* just ignore the last unpaired value */
12431 + }
12432 +
12433 + duples_count = lenp / (2 * sizeof(*oh_all_queues));
12434 + dev_err(dpa_oh_dev, "Allocating %d ingress frame queues duples\n",
12435 + duples_count);
12436 + for (duple_idx = 0; duple_idx < duples_count; duple_idx++) {
12437 + crt_fqid_base = be32_to_cpu(oh_all_queues[2 * duple_idx]);
12438 + crt_fq_count = be32_to_cpu(oh_all_queues[2 * duple_idx + 1]);
12439 +
12440 + fqd = devm_kzalloc(dpa_oh_dev,
12441 + sizeof(struct fq_duple), GFP_KERNEL);
12442 + if (!fqd) {
12443 + dev_err(dpa_oh_dev, "Can't allocate structures for ingress frame queues for OH node %s referenced from node %s!\n",
12444 + oh_node->full_name,
12445 + dpa_oh_node->full_name);
12446 + _errno = -ENOMEM;
12447 + goto return_kfree;
12448 + }
12449 +
12450 + fqd->fqs = devm_kzalloc(dpa_oh_dev,
12451 + crt_fq_count * sizeof(struct qman_fq),
12452 + GFP_KERNEL);
12453 + if (!fqd->fqs) {
12454 + dev_err(dpa_oh_dev, "Can't allocate structures for ingress frame queues for OH node %s referenced from node %s!\n",
12455 + oh_node->full_name,
12456 + dpa_oh_node->full_name);
12457 + _errno = -ENOMEM;
12458 + goto return_kfree;
12459 + }
12460 +
12461 + for (j = 0; j < crt_fq_count; j++)
12462 + (fqd->fqs + j)->fqid = crt_fqid_base + j;
12463 + fqd->fqs_count = crt_fq_count;
12464 + fqd->channel_id = (uint16_t)channel_id;
12465 + list_add(&fqd->fq_list, &oh_config->fqs_ingress_list);
12466 + }
12467 +
12468 + /* create the ingress queues */
12469 + list_for_each(fq_list, &oh_config->fqs_ingress_list) {
12470 + fqd = list_entry(fq_list, struct fq_duple, fq_list);
12471 +
12472 + for (j = 0; j < fqd->fqs_count; j++) {
12473 + ret = oh_fq_create(fqd->fqs + j,
12474 + (fqd->fqs + j)->fqid,
12475 + fqd->channel_id, 3);
12476 + if (ret != 0) {
12477 + dev_err(dpa_oh_dev, "Unable to create ingress frame queue %d for OH node %s referenced from node %s!\n",
12478 + (fqd->fqs + j)->fqid,
12479 + oh_node->full_name,
12480 + dpa_oh_node->full_name);
12481 + _errno = -EINVAL;
12482 + goto return_kfree;
12483 + }
12484 + }
12485 + }
12486 +
12487 + /* FQs that exit OH port */
12488 + lenp = 0;
12489 + oh_all_queues = of_get_property(dpa_oh_node,
12490 + "fsl,qman-frame-queues-egress", &lenp);
12491 + if (lenp % (2 * sizeof(*oh_all_queues))) {
12492 + dev_warn(dpa_oh_dev,
12493 + "Wrong egress queues format for OH node %s referenced from node %s!\n",
12494 + oh_node->full_name, dpa_oh_node->full_name);
12495 + /* just ignore the last unpaired value */
12496 + }
12497 +
12498 + duples_count = lenp / (2 * sizeof(*oh_all_queues));
12499 + dev_dbg(dpa_oh_dev, "Allocating %d egress frame queues duples\n",
12500 + duples_count);
12501 + for (duple_idx = 0; duple_idx < duples_count; duple_idx++) {
12502 + crt_fqid_base = be32_to_cpu(oh_all_queues[2 * duple_idx]);
12503 + crt_fq_count = be32_to_cpu(oh_all_queues[2 * duple_idx + 1]);
12504 +
12505 + fqd = devm_kzalloc(dpa_oh_dev,
12506 + sizeof(struct fq_duple), GFP_KERNEL);
12507 + if (!fqd) {
12508 + dev_err(dpa_oh_dev, "Can't allocate structures for egress frame queues for OH node %s referenced from node %s!\n",
12509 + oh_node->full_name,
12510 + dpa_oh_node->full_name);
12511 + _errno = -ENOMEM;
12512 + goto return_kfree;
12513 + }
12514 +
12515 + fqd->fqs = devm_kzalloc(dpa_oh_dev,
12516 + crt_fq_count * sizeof(struct qman_fq),
12517 + GFP_KERNEL);
12518 + if (!fqd->fqs) {
12519 + dev_err(dpa_oh_dev,
12520 + "Can't allocate structures for egress frame queues for OH node %s referenced from node %s!\n",
12521 + oh_node->full_name,
12522 + dpa_oh_node->full_name);
12523 + _errno = -ENOMEM;
12524 + goto return_kfree;
12525 + }
12526 +
12527 + for (j = 0; j < crt_fq_count; j++)
12528 + (fqd->fqs + j)->fqid = crt_fqid_base + j;
12529 + fqd->fqs_count = crt_fq_count;
12530 + /* channel ID is specified in another attribute */
12531 + fqd->channel_id = 0;
12532 + list_add_tail(&fqd->fq_list, &oh_config->fqs_egress_list);
12533 +
12534 + /* allocate the queue */
12535 +
12536 + }
12537 +
12538 + /* channel_ids for FQs that exit OH port */
12539 + lenp = 0;
12540 + channel_ids = of_get_property(dpa_oh_node,
12541 + "fsl,qman-channel-ids-egress", &lenp);
12542 +
12543 + channel_ids_count = lenp / (sizeof(*channel_ids));
12544 + if (channel_ids_count != duples_count) {
12545 + dev_warn(dpa_oh_dev,
12546 + "Not all egress queues have a channel id for OH node %s referenced from node %s!\n",
12547 + oh_node->full_name, dpa_oh_node->full_name);
12548 + /* just ignore the queues that do not have a Channel ID */
12549 + }
12550 +
12551 + channel_idx = 0;
12552 + list_for_each(fq_list, &oh_config->fqs_egress_list) {
12553 + if (channel_idx + 1 > channel_ids_count)
12554 + break;
12555 + fqd = list_entry(fq_list, struct fq_duple, fq_list);
12556 + fqd->channel_id =
12557 + (uint16_t)be32_to_cpu(channel_ids[channel_idx++]);
12558 + }
12559 +
12560 + /* create egress queues */
12561 + list_for_each(fq_list, &oh_config->fqs_egress_list) {
12562 + fqd = list_entry(fq_list, struct fq_duple, fq_list);
12563 +
12564 + if (fqd->channel_id == 0) {
12565 + /* missing channel id in dts */
12566 + continue;
12567 + }
12568 +
12569 + for (j = 0; j < fqd->fqs_count; j++) {
12570 + ret = oh_fq_create(fqd->fqs + j,
12571 + (fqd->fqs + j)->fqid,
12572 + fqd->channel_id, 3);
12573 + if (ret != 0) {
12574 + dev_err(dpa_oh_dev, "Unable to create egress frame queue %d for OH node %s referenced from node %s!\n",
12575 + (fqd->fqs + j)->fqid,
12576 + oh_node->full_name,
12577 + dpa_oh_node->full_name);
12578 + _errno = -EINVAL;
12579 + goto return_kfree;
12580 + }
12581 + }
12582 + }
12583 +
12584 + /* Read FQ ids/nums for the DPA OH node */
12585 + oh_all_queues = of_get_property(dpa_oh_node,
12586 + "fsl,qman-frame-queues-oh", &lenp);
12587 + if (oh_all_queues == NULL) {
12588 + dev_err(dpa_oh_dev,
12589 + "No frame queues have been defined for OH node %s referenced from node %s\n",
12590 + oh_node->full_name, dpa_oh_node->full_name);
12591 + _errno = -EINVAL;
12592 + goto return_kfree;
12593 + }
12594 +
12595 + /* Check that the OH error and default FQs are there */
12596 + BUG_ON(lenp % (2 * sizeof(*oh_all_queues)));
12597 + queues_count = lenp / (2 * sizeof(*oh_all_queues));
12598 + if (queues_count != 2) {
12599 + dev_err(dpa_oh_dev,
12600 + "Error and Default queues must be defined for OH node %s referenced from node %s\n",
12601 + oh_node->full_name, dpa_oh_node->full_name);
12602 + _errno = -EINVAL;
12603 + goto return_kfree;
12604 + }
12605 +
12606 + /* Read the FQIDs defined for this OH port */
12607 + dev_dbg(dpa_oh_dev, "Reading %d queues...\n", queues_count);
12608 + fq_idx = 0;
12609 +
12610 + /* Error FQID - must be present */
12611 + crt_fqid_base = be32_to_cpu(oh_all_queues[fq_idx++]);
12612 + crt_fq_count = be32_to_cpu(oh_all_queues[fq_idx++]);
12613 + if (crt_fq_count != 1) {
12614 + dev_err(dpa_oh_dev,
12615 + "Only 1 Error FQ allowed in OH node %s referenced from node %s (read: %d FQIDs).\n",
12616 + oh_node->full_name, dpa_oh_node->full_name,
12617 + crt_fq_count);
12618 + _errno = -EINVAL;
12619 + goto return_kfree;
12620 + }
12621 + oh_config->error_fqid = crt_fqid_base;
12622 + dev_dbg(dpa_oh_dev, "Read Error FQID 0x%x for OH port %s.\n",
12623 + oh_config->error_fqid, oh_node->full_name);
12624 +
12625 + /* Default FQID - must be present */
12626 + crt_fqid_base = be32_to_cpu(oh_all_queues[fq_idx++]);
12627 + crt_fq_count = be32_to_cpu(oh_all_queues[fq_idx++]);
12628 + if (crt_fq_count != 1) {
12629 + dev_err(dpa_oh_dev,
12630 + "Only 1 Default FQ allowed in OH node %s referenced from %s (read: %d FQIDs).\n",
12631 + oh_node->full_name, dpa_oh_node->full_name,
12632 + crt_fq_count);
12633 + _errno = -EINVAL;
12634 + goto return_kfree;
12635 + }
12636 + oh_config->default_fqid = crt_fqid_base;
12637 + dev_dbg(dpa_oh_dev, "Read Default FQID 0x%x for OH port %s.\n",
12638 + oh_config->default_fqid, oh_node->full_name);
12639 +
12640 + /* TX FQID - presence is optional */
12641 + oh_tx_queues = of_get_property(dpa_oh_node, "fsl,qman-frame-queues-tx",
12642 + &lenp);
12643 + if (oh_tx_queues == NULL) {
12644 + dev_dbg(dpa_oh_dev,
12645 + "No tx queues have been defined for OH node %s referenced from node %s\n",
12646 + oh_node->full_name, dpa_oh_node->full_name);
12647 + goto config_port;
12648 + }
12649 +
12650 + /* Check that queues-tx has only a base and a count defined */
12651 + BUG_ON(lenp % (2 * sizeof(*oh_tx_queues)));
12652 + queues_count = lenp / (2 * sizeof(*oh_tx_queues));
12653 + if (queues_count != 1) {
12654 + dev_err(dpa_oh_dev,
12655 + "TX queues must be defined in only one <base count> tuple for OH node %s referenced from node %s\n",
12656 + oh_node->full_name, dpa_oh_node->full_name);
12657 + _errno = -EINVAL;
12658 + goto return_kfree;
12659 + }
12660 +
12661 + fq_idx = 0;
12662 + crt_fqid_base = be32_to_cpu(oh_tx_queues[fq_idx++]);
12663 + crt_fq_count = be32_to_cpu(oh_tx_queues[fq_idx++]);
12664 + oh_config->egress_cnt = crt_fq_count;
12665 +
12666 + /* Allocate TX queues */
12667 + dev_dbg(dpa_oh_dev, "Allocating %d queues for TX...\n", crt_fq_count);
12668 + oh_config->egress_fqs = devm_kzalloc(dpa_oh_dev,
12669 + crt_fq_count * sizeof(struct qman_fq), GFP_KERNEL);
12670 + if (oh_config->egress_fqs == NULL) {
12671 + dev_err(dpa_oh_dev,
12672 + "Can't allocate private data for TX queues for OH node %s referenced from node %s!\n",
12673 + oh_node->full_name, dpa_oh_node->full_name);
12674 + _errno = -ENOMEM;
12675 + goto return_kfree;
12676 + }
12677 +
12678 + /* Create TX queues */
12679 + for (i = 0; i < crt_fq_count; i++) {
12680 + ret = oh_fq_create(oh_config->egress_fqs + i,
12681 + crt_fqid_base + i, (uint16_t)channel_id, 3);
12682 + if (ret != 0) {
12683 + dev_err(dpa_oh_dev,
12684 + "Unable to create TX frame queue %d for OH node %s referenced from node %s!\n",
12685 + crt_fqid_base + i, oh_node->full_name,
12686 + dpa_oh_node->full_name);
12687 + _errno = -EINVAL;
12688 + goto return_kfree;
12689 + }
12690 + }
12691 +
12692 +config_port:
12693 + /* Get a handle to the fm_port so we can set
12694 + * its configuration params
12695 + */
12696 + oh_config->oh_port = fm_port_bind(oh_dev);
12697 + if (oh_config->oh_port == NULL) {
12698 + dev_err(dpa_oh_dev, "NULL drvdata from fm port dev %s!\n",
12699 + oh_node->full_name);
12700 + _errno = -EINVAL;
12701 + goto return_kfree;
12702 + }
12703 +
12704 + oh_set_buffer_layout(oh_config->oh_port, &buf_layout);
12705 +
12706 + /* read the pool handlers */
12707 + crt_ext_pools_count = of_count_phandle_with_args(dpa_oh_node,
12708 + "fsl,bman-buffer-pools", NULL);
12709 + if (crt_ext_pools_count <= 0) {
12710 + dev_info(dpa_oh_dev,
12711 + "OH port %s has no buffer pool. Fragmentation will not be enabled\n",
12712 + oh_node->full_name);
12713 + goto init_port;
12714 + }
12715 +
12716 + /* used for reading ext_pool_size*/
12717 + root_node = of_find_node_by_path("/");
12718 + if (root_node == NULL) {
12719 + dev_err(dpa_oh_dev, "of_find_node_by_path(/) failed\n");
12720 + _errno = -EINVAL;
12721 + goto return_kfree;
12722 + }
12723 +
12724 + n_size = of_n_size_cells(root_node);
12725 + of_node_put(root_node);
12726 +
12727 + dev_dbg(dpa_oh_dev, "OH port number of pools = %d\n",
12728 + crt_ext_pools_count);
12729 +
12730 + oh_port_tx_params.num_pools = (uint8_t)crt_ext_pools_count;
12731 +
12732 + for (i = 0; i < crt_ext_pools_count; i++) {
12733 + bpool_node = of_parse_phandle(dpa_oh_node,
12734 + "fsl,bman-buffer-pools", i);
12735 + if (bpool_node == NULL) {
12736 + dev_err(dpa_oh_dev, "Invalid Buffer pool node\n");
12737 + _errno = -EINVAL;
12738 + goto return_kfree;
12739 + }
12740 +
12741 + _errno = of_property_read_u32(bpool_node, "fsl,bpid", &bpid);
12742 + if (_errno) {
12743 + dev_err(dpa_oh_dev, "Invalid Buffer Pool ID\n");
12744 + _errno = -EINVAL;
12745 + goto return_kfree;
12746 + }
12747 +
12748 + oh_port_tx_params.pool_param[i].id = (uint8_t)bpid;
12749 + dev_dbg(dpa_oh_dev, "OH port bpool id = %u\n", bpid);
12750 +
12751 + bpool_cfg = of_get_property(bpool_node,
12752 + "fsl,bpool-ethernet-cfg", &lenp);
12753 + if (bpool_cfg == NULL) {
12754 + dev_err(dpa_oh_dev, "Invalid Buffer pool config params\n");
12755 + _errno = -EINVAL;
12756 + goto return_kfree;
12757 + }
12758 +
12759 + ext_pool_size = of_read_number(bpool_cfg + n_size, n_size);
12760 + oh_port_tx_params.pool_param[i].size = (uint16_t)ext_pool_size;
12761 + dev_dbg(dpa_oh_dev, "OH port bpool size = %u\n",
12762 + ext_pool_size);
12763 + of_node_put(bpool_node);
12764 +
12765 + }
12766 +
12767 + if (buf_layout.data_align != FRAG_DATA_ALIGN ||
12768 + buf_layout.manip_extra_space != FRAG_MANIP_SPACE)
12769 + goto init_port;
12770 +
12771 + frag_enabled = true;
12772 + dev_info(dpa_oh_dev, "IP Fragmentation enabled for OH port %d",
12773 + port_id);
12774 +
12775 +init_port:
12776 + of_node_put(oh_node);
12777 + /* Set Tx params */
12778 + dpaa_eth_init_port(tx, oh_config->oh_port, oh_port_tx_params,
12779 + oh_config->error_fqid, oh_config->default_fqid, (&buf_layout),
12780 + frag_enabled);
12781 + /* Set PCD params */
12782 + oh_port_pcd_params.cba = oh_alloc_pcd_fqids;
12783 + oh_port_pcd_params.cbf = oh_free_pcd_fqids;
12784 + oh_port_pcd_params.dev = dpa_oh_dev;
12785 + fm_port_pcd_bind(oh_config->oh_port, &oh_port_pcd_params);
12786 +
12787 + dev_set_drvdata(dpa_oh_dev, oh_config);
12788 +
12789 + /* Enable the OH port */
12790 + _errno = fm_port_enable(oh_config->oh_port);
12791 + if (_errno)
12792 + goto return_kfree;
12793 +
12794 + dev_info(dpa_oh_dev, "OH port %s enabled.\n", oh_node->full_name);
12795 +
12796 + /* print of all referenced & created queues */
12797 + dump_oh_config(dpa_oh_dev, oh_config);
12798 +
12799 + return 0;
12800 +
12801 +return_kfree:
12802 + if (bpool_node)
12803 + of_node_put(bpool_node);
12804 + if (oh_node)
12805 + of_node_put(oh_node);
12806 + if (oh_config && oh_config->egress_fqs)
12807 + devm_kfree(dpa_oh_dev, oh_config->egress_fqs);
12808 +
12809 + list_for_each_safe(fq_list, fq_list_tmp, &oh_config->fqs_ingress_list) {
12810 + fqd = list_entry(fq_list, struct fq_duple, fq_list);
12811 + list_del(fq_list);
12812 + devm_kfree(dpa_oh_dev, fqd->fqs);
12813 + devm_kfree(dpa_oh_dev, fqd);
12814 + }
12815 +
12816 + list_for_each_safe(fq_list, fq_list_tmp, &oh_config->fqs_egress_list) {
12817 + fqd = list_entry(fq_list, struct fq_duple, fq_list);
12818 + list_del(fq_list);
12819 + devm_kfree(dpa_oh_dev, fqd->fqs);
12820 + devm_kfree(dpa_oh_dev, fqd);
12821 + }
12822 +
12823 + devm_kfree(dpa_oh_dev, oh_config);
12824 + return _errno;
12825 +}
12826 +
12827 +static int __cold oh_port_remove(struct platform_device *_of_dev)
12828 +{
12829 + int _errno = 0, i;
12830 + struct dpa_oh_config_s *oh_config;
12831 +
12832 + pr_info("Removing OH port...\n");
12833 +
12834 + oh_config = dev_get_drvdata(&_of_dev->dev);
12835 + if (oh_config == NULL) {
12836 + pr_err(KBUILD_MODNAME
12837 + ": %s:%hu:%s(): No OH config in device private data!\n",
12838 + KBUILD_BASENAME".c", __LINE__, __func__);
12839 + _errno = -ENODEV;
12840 + goto return_error;
12841 + }
12842 +
12843 + if (oh_config->egress_fqs)
12844 + for (i = 0; i < oh_config->egress_cnt; i++)
12845 + oh_fq_destroy(oh_config->egress_fqs + i);
12846 +
12847 + if (oh_config->oh_port == NULL) {
12848 + pr_err(KBUILD_MODNAME
12849 + ": %s:%hu:%s(): No fm port in device private data!\n",
12850 + KBUILD_BASENAME".c", __LINE__, __func__);
12851 + _errno = -EINVAL;
12852 + goto free_egress_fqs;
12853 + }
12854 +
12855 + _errno = fm_port_disable(oh_config->oh_port);
12856 +
12857 +free_egress_fqs:
12858 + if (oh_config->egress_fqs)
12859 + devm_kfree(&_of_dev->dev, oh_config->egress_fqs);
12860 + devm_kfree(&_of_dev->dev, oh_config);
12861 + dev_set_drvdata(&_of_dev->dev, NULL);
12862 +
12863 +return_error:
12864 + return _errno;
12865 +}
12866 +
12867 +static struct platform_driver oh_port_driver = {
12868 + .driver = {
12869 + .name = KBUILD_MODNAME,
12870 + .of_match_table = oh_port_match_table,
12871 + .owner = THIS_MODULE,
12872 + .pm = OH_PM_OPS,
12873 + },
12874 + .probe = oh_port_probe,
12875 + .remove = oh_port_remove
12876 +};
12877 +
12878 +static int __init __cold oh_port_load(void)
12879 +{
12880 + int _errno;
12881 +
12882 + pr_info(OH_MOD_DESCRIPTION "\n");
12883 +
12884 + _errno = platform_driver_register(&oh_port_driver);
12885 + if (_errno < 0) {
12886 + pr_err(KBUILD_MODNAME
12887 + ": %s:%hu:%s(): platform_driver_register() = %d\n",
12888 + KBUILD_BASENAME".c", __LINE__, __func__, _errno);
12889 + }
12890 +
12891 + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
12892 + KBUILD_BASENAME".c", __func__);
12893 + return _errno;
12894 +}
12895 +module_init(oh_port_load);
12896 +
12897 +static void __exit __cold oh_port_unload(void)
12898 +{
12899 + pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
12900 + KBUILD_BASENAME".c", __func__);
12901 +
12902 + platform_driver_unregister(&oh_port_driver);
12903 +
12904 + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
12905 + KBUILD_BASENAME".c", __func__);
12906 +}
12907 +module_exit(oh_port_unload);
12908 --- /dev/null
12909 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/offline_port.h
12910 @@ -0,0 +1,59 @@
12911 +/* Copyright 2011 Freescale Semiconductor Inc.
12912 + *
12913 + * Redistribution and use in source and binary forms, with or without
12914 + * modification, are permitted provided that the following conditions are met:
12915 + * * Redistributions of source code must retain the above copyright
12916 + * notice, this list of conditions and the following disclaimer.
12917 + * * Redistributions in binary form must reproduce the above copyright
12918 + * notice, this list of conditions and the following disclaimer in the
12919 + * documentation and/or other materials provided with the distribution.
12920 + * * Neither the name of Freescale Semiconductor nor the
12921 + * names of its contributors may be used to endorse or promote products
12922 + * derived from this software without specific prior written permission.
12923 + *
12924 + *
12925 + * ALTERNATIVELY, this software may be distributed under the terms of the
12926 + * GNU General Public License ("GPL") as published by the Free Software
12927 + * Foundation, either version 2 of that License or (at your option) any
12928 + * later version.
12929 + *
12930 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
12931 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
12932 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
12933 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
12934 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
12935 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
12936 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
12937 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
12938 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12939 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12940 + */
12941 +
12942 +#ifndef __OFFLINE_PORT_H
12943 +#define __OFFLINE_PORT_H
12944 +
12945 +struct fm_port;
12946 +struct qman_fq;
12947 +
12948 +/* fqs are defined in duples (base_fq, fq_count) */
12949 +struct fq_duple {
12950 + struct qman_fq *fqs;
12951 + int fqs_count;
12952 + uint16_t channel_id;
12953 + struct list_head fq_list;
12954 +};
12955 +
12956 +/* OH port configuration */
12957 +struct dpa_oh_config_s {
12958 + uint32_t error_fqid;
12959 + uint32_t default_fqid;
12960 + struct fm_port *oh_port;
12961 + uint32_t egress_cnt;
12962 + struct qman_fq *egress_fqs;
12963 + uint16_t channel;
12964 +
12965 + struct list_head fqs_ingress_list;
12966 + struct list_head fqs_egress_list;
12967 +};
12968 +
12969 +#endif /* __OFFLINE_PORT_H */