1 From f7f94b1e7e9c6044a23bab1c5e773f6259f2d3e0 Mon Sep 17 00:00:00 2001
2 From: Madalin Bucur <madalin.bucur@nxp.com>
3 Date: Wed, 10 May 2017 16:39:42 +0300
4 Subject: [PATCH] dpa: SDK DPAA 1.x Ethernet driver
6 Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
8 drivers/net/ethernet/freescale/sdk_dpaa/Kconfig | 173 ++
9 drivers/net/ethernet/freescale/sdk_dpaa/Makefile | 46 +
10 .../net/ethernet/freescale/sdk_dpaa/dpaa_1588.c | 580 ++++++
11 .../net/ethernet/freescale/sdk_dpaa/dpaa_1588.h | 138 ++
12 .../net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c | 180 ++
13 .../net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h | 43 +
14 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c | 1210 ++++++++++++
15 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h | 697 +++++++
16 .../ethernet/freescale/sdk_dpaa/dpaa_eth_base.c | 263 +++
17 .../ethernet/freescale/sdk_dpaa/dpaa_eth_base.h | 50 +
18 .../ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c | 1991 ++++++++++++++++++++
19 .../ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h | 236 +++
20 .../ethernet/freescale/sdk_dpaa/dpaa_eth_common.c | 1812 ++++++++++++++++++
21 .../ethernet/freescale/sdk_dpaa/dpaa_eth_common.h | 226 +++
22 .../ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c | 381 ++++
23 .../net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c | 1113 +++++++++++
24 .../ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c | 278 +++
25 .../ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h | 144 ++
26 .../net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c | 544 ++++++
27 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ptp.c | 290 +++
28 drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c | 909 +++++++++
29 drivers/net/ethernet/freescale/sdk_dpaa/mac.c | 489 +++++
30 drivers/net/ethernet/freescale/sdk_dpaa/mac.h | 135 ++
31 .../net/ethernet/freescale/sdk_dpaa/offline_port.c | 848 +++++++++
32 .../net/ethernet/freescale/sdk_dpaa/offline_port.h | 59 +
33 25 files changed, 12835 insertions(+)
34 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/Kconfig
35 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/Makefile
36 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.c
37 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.h
38 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c
39 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h
40 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c
41 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h
42 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.c
43 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.h
44 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c
45 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h
46 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.c
47 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.h
48 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c
49 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c
50 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c
51 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h
52 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c
53 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ptp.c
54 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c
55 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/mac.c
56 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/mac.h
57 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/offline_port.c
58 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/offline_port.h
61 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/Kconfig
63 +menuconfig FSL_SDK_DPAA_ETH
64 + tristate "DPAA Ethernet"
65 + depends on (FSL_SOC || ARM64 || ARM) && FSL_SDK_BMAN && FSL_SDK_QMAN && FSL_SDK_FMAN && !FSL_DPAA_ETH
68 + Data Path Acceleration Architecture Ethernet driver,
69 + supporting the Freescale QorIQ chips.
70 + Depends on Freescale Buffer Manager and Queue Manager
71 + driver and Frame Manager Driver.
75 +config FSL_DPAA_HOOKS
76 + bool "DPAA Ethernet driver hooks"
78 +config FSL_DPAA_CEETM
79 + bool "DPAA CEETM QoS"
80 + depends on NET_SCHED
83 + Enable QoS offloading support through the CEETM hardware block.
85 +config FSL_DPAA_OFFLINE_PORTS
86 + bool "Offline Ports support"
87 + depends on FSL_SDK_DPAA_ETH
90 + The Offline Parsing / Host Command ports (short: OH ports, of Offline ports) provide
91 + most of the functionality of the regular, online ports, except they receive their
92 + frames from a core or an accelerator on the SoC, via QMan frame queues,
93 + rather than directly from the network.
94 + Offline ports are configured via PCD (Parse-Classify-Distribute) schemes, just like
95 + any online FMan port. They deliver the processed frames to frame queues, according
96 + to the applied PCD configurations.
98 + Choosing this feature will not impact the functionality and/or performance of the system,
99 + so it is safe to have it.
101 +config FSL_DPAA_ADVANCED_DRIVERS
102 + bool "Advanced DPAA Ethernet drivers"
103 + depends on FSL_SDK_DPAA_ETH
106 + Besides the standard DPAA Ethernet driver the DPAA Proxy initialization driver
107 + is needed to support advanced scenarios. Select this to also build the advanced
110 +config FSL_DPAA_ETH_JUMBO_FRAME
111 + bool "Optimize for jumbo frames"
114 + Optimize the DPAA Ethernet driver throughput for large frames
115 + termination traffic (e.g. 4K and above).
116 + NOTE: This option can only be used if FSL_FM_MAX_FRAME_SIZE
117 + is set to 9600 bytes.
118 + Using this option in combination with small frames increases
119 + significantly the driver's memory footprint and may even deplete
120 + the system memory. Also, the skb truesize is altered and messages
121 + from the stack that warn against this are bypassed.
122 + This option is not available on LS1043.
125 + bool "Linux compliant timestamping"
126 + depends on FSL_SDK_DPAA_ETH
129 + Enable Linux API compliant timestamping support.
131 +config FSL_DPAA_1588
132 + bool "IEEE 1588-compliant timestamping"
133 + depends on FSL_SDK_DPAA_ETH
137 + Enable IEEE1588 support code.
139 +config FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
140 + bool "Use driver's Tx queue selection mechanism"
142 + depends on FSL_SDK_DPAA_ETH
144 + The DPAA-Ethernet driver defines a ndo_select_queue() callback for optimal selection
145 + of the egress FQ. That will override the XPS support for this netdevice.
146 + If for whatever reason you want to be in control of the egress FQ-to-CPU selection and mapping,
147 + or simply don't want to use the driver's ndo_select_queue() callback, then unselect this
148 + and use the standard XPS support instead.
150 +config FSL_DPAA_ETH_MAX_BUF_COUNT
151 + int "Maximum nuber of buffers in private bpool"
152 + depends on FSL_SDK_DPAA_ETH
156 + The maximum number of buffers to be by default allocated in the DPAA-Ethernet private port's
157 + buffer pool. One needn't normally modify this, as it has probably been tuned for performance
158 + already. This cannot be lower than DPAA_ETH_REFILL_THRESHOLD.
160 +config FSL_DPAA_ETH_REFILL_THRESHOLD
161 + int "Private bpool refill threshold"
162 + depends on FSL_SDK_DPAA_ETH
163 + range 32 FSL_DPAA_ETH_MAX_BUF_COUNT
166 + The DPAA-Ethernet driver will start replenishing buffer pools whose count
167 + falls below this threshold. This must be related to DPAA_ETH_MAX_BUF_COUNT. One needn't normally
168 + modify this value unless one has very specific performance reasons.
170 +config FSL_DPAA_CS_THRESHOLD_1G
171 + hex "Egress congestion threshold on 1G ports"
172 + depends on FSL_SDK_DPAA_ETH
173 + range 0x1000 0x10000000
174 + default "0x06000000"
176 + The size in bytes of the egress Congestion State notification threshold on 1G ports.
177 + The 1G dTSECs can quite easily be flooded by cores doing Tx in a tight loop
178 + (e.g. by sending UDP datagrams at "while(1) speed"),
179 + and the larger the frame size, the more acute the problem.
180 + So we have to find a balance between these factors:
181 + - avoiding the device staying congested for a prolonged time (risking
182 + the netdev watchdog to fire - see also the tx_timeout module param);
183 + - affecting performance of protocols such as TCP, which otherwise
184 + behave well under the congestion notification mechanism;
185 + - preventing the Tx cores from tightly-looping (as if the congestion
186 + threshold was too low to be effective);
187 + - running out of memory if the CS threshold is set too high.
189 +config FSL_DPAA_CS_THRESHOLD_10G
190 + hex "Egress congestion threshold on 10G ports"
191 + depends on FSL_SDK_DPAA_ETH
192 + range 0x1000 0x20000000
193 + default "0x10000000"
195 + The size in bytes of the egress Congestion State notification threshold on 10G ports.
197 +config FSL_DPAA_INGRESS_CS_THRESHOLD
198 + hex "Ingress congestion threshold on FMan ports"
199 + depends on FSL_SDK_DPAA_ETH
200 + default "0x10000000"
202 + The size in bytes of the ingress tail-drop threshold on FMan ports.
203 + Traffic piling up above this value will be rejected by QMan and discarded by FMan.
205 +config FSL_DPAA_ETH_DEBUGFS
206 + bool "DPAA Ethernet debugfs interface"
207 + depends on DEBUG_FS && FSL_SDK_DPAA_ETH
210 + This option compiles debugfs code for the DPAA Ethernet driver.
212 +config FSL_DPAA_ETH_DEBUG
213 + bool "DPAA Ethernet Debug Support"
214 + depends on FSL_SDK_DPAA_ETH
217 + This option compiles debug code for the DPAA Ethernet driver.
219 +config FSL_DPAA_DBG_LOOP
220 + bool "DPAA Ethernet Debug loopback"
221 + depends on FSL_DPAA_ETH_DEBUGFS && FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
224 + This option allows to divert all received traffic on a certain interface A towards a
225 + selected interface B. This option is used to benchmark the HW + Ethernet driver in
226 + isolation from the Linux networking stack. The loops are controlled by debugfs entries,
227 + one for each interface. By default all loops are disabled (target value is -1). I.e. to
228 + change the loop setting for interface 4 and divert all received traffic to interface 5
229 + write Tx interface number in the receive interface debugfs file:
230 + # cat /sys/kernel/debug/powerpc/fsl_dpa/eth4_loop
232 + # echo 5 > /sys/kernel/debug/powerpc/fsl_dpa/eth4_loop
233 + # cat /sys/kernel/debug/powerpc/fsl_dpa/eth4_loop
235 +endif # FSL_SDK_DPAA_ETH
237 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/Makefile
240 +# Makefile for the Freescale Ethernet controllers
242 +ccflags-y += -DVERSION=\"\"
244 +# Include netcomm SW specific definitions
245 +include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
247 +ccflags-y += -I$(NET_DPA)
249 +obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_mac.o fsl_dpa.o
250 +obj-$(CONFIG_PTP_1588_CLOCK_DPAA) += dpaa_ptp.o
252 +fsl_dpa-objs += dpaa_ethtool.o dpaa_eth_sysfs.o dpaa_eth.o dpaa_eth_sg.o dpaa_eth_common.o
253 +ifeq ($(CONFIG_FSL_DPAA_DBG_LOOP),y)
254 +fsl_dpa-objs += dpaa_debugfs.o
256 +ifeq ($(CONFIG_FSL_DPAA_1588),y)
257 +fsl_dpa-objs += dpaa_1588.o
259 +ifeq ($(CONFIG_FSL_DPAA_CEETM),y)
260 +ccflags-y += -Idrivers/net/ethernet/freescale/sdk_fman/src/wrapper
261 +fsl_dpa-objs += dpaa_eth_ceetm.o
264 +fsl_mac-objs += mac.o mac-api.o
267 +ifeq ($(CONFIG_FSL_DPAA_ADVANCED_DRIVERS),y)
268 +obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_advanced.o
269 +obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_proxy.o
271 +fsl_advanced-objs += dpaa_eth_base.o
272 +# suport for multiple drivers per kernel module comes in kernel 3.14
273 +# so we are forced to generate several modules for the advanced drivers
274 +fsl_proxy-objs += dpaa_eth_proxy.o
276 +ifeq ($(CONFIG_FSL_DPAA_OFFLINE_PORTS),y)
277 +obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_oh.o
279 +fsl_oh-objs += offline_port.o
283 +# Needed by the tracing framework
284 +CFLAGS_dpaa_eth.o := -I$(src)
286 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.c
288 +/* Copyright (C) 2011 Freescale Semiconductor, Inc.
289 + * Copyright (C) 2009 IXXAT Automation, GmbH
291 + * DPAA Ethernet Driver -- IEEE 1588 interface functionality
293 + * This program is free software; you can redistribute it and/or modify
294 + * it under the terms of the GNU General Public License as published by
295 + * the Free Software Foundation; either version 2 of the License, or
296 + * (at your option) any later version.
298 + * This program is distributed in the hope that it will be useful,
299 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
300 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
301 + * GNU General Public License for more details.
303 + * You should have received a copy of the GNU General Public License along
304 + * with this program; if not, write to the Free Software Foundation, Inc.,
305 + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
308 +#include <linux/io.h>
309 +#include <linux/device.h>
310 +#include <linux/fs.h>
311 +#include <linux/vmalloc.h>
312 +#include <linux/spinlock.h>
313 +#include <linux/ip.h>
314 +#include <linux/ipv6.h>
315 +#include <linux/udp.h>
316 +#include <asm/div64.h>
317 +#include "dpaa_eth.h"
318 +#include "dpaa_eth_common.h"
319 +#include "dpaa_1588.h"
322 +static int dpa_ptp_init_circ(struct dpa_ptp_circ_buf *ptp_buf, u32 size)
324 + struct circ_buf *circ_buf = &ptp_buf->circ_buf;
326 + circ_buf->buf = vmalloc(sizeof(struct dpa_ptp_data) * size);
327 + if (!circ_buf->buf)
330 + circ_buf->head = 0;
331 + circ_buf->tail = 0;
332 + ptp_buf->size = size;
333 + spin_lock_init(&ptp_buf->ptp_lock);
338 +static void dpa_ptp_reset_circ(struct dpa_ptp_circ_buf *ptp_buf, u32 size)
340 + struct circ_buf *circ_buf = &ptp_buf->circ_buf;
342 + circ_buf->head = 0;
343 + circ_buf->tail = 0;
344 + ptp_buf->size = size;
347 +static int dpa_ptp_insert(struct dpa_ptp_circ_buf *ptp_buf,
348 + struct dpa_ptp_data *data)
350 + struct circ_buf *circ_buf = &ptp_buf->circ_buf;
351 + int size = ptp_buf->size;
352 + struct dpa_ptp_data *tmp;
353 + unsigned long flags;
356 + spin_lock_irqsave(&ptp_buf->ptp_lock, flags);
358 + head = circ_buf->head;
359 + tail = circ_buf->tail;
361 + if (CIRC_SPACE(head, tail, size) <= 0)
362 + circ_buf->tail = (tail + 1) & (size - 1);
364 + tmp = (struct dpa_ptp_data *)(circ_buf->buf) + head;
365 + memcpy(tmp, data, sizeof(struct dpa_ptp_data));
367 + circ_buf->head = (head + 1) & (size - 1);
369 + spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
374 +static int dpa_ptp_is_ident_match(struct dpa_ptp_ident *dst,
375 + struct dpa_ptp_ident *src)
379 + if ((dst->version != src->version) || (dst->msg_type != src->msg_type))
382 + if ((dst->netw_prot == src->netw_prot)
383 + || src->netw_prot == DPA_PTP_PROT_DONTCARE) {
384 + if (dst->seq_id != src->seq_id)
387 + ret = memcmp(dst->snd_port_id, src->snd_port_id,
388 + DPA_PTP_SOURCE_PORT_LENGTH);
398 +static int dpa_ptp_find_and_remove(struct dpa_ptp_circ_buf *ptp_buf,
399 + struct dpa_ptp_ident *ident,
400 + struct dpa_ptp_time *ts)
402 + struct circ_buf *circ_buf = &ptp_buf->circ_buf;
403 + int size = ptp_buf->size;
404 + int head, tail, idx;
405 + unsigned long flags;
406 + struct dpa_ptp_data *tmp, *tmp2;
407 + struct dpa_ptp_ident *tmp_ident;
409 + spin_lock_irqsave(&ptp_buf->ptp_lock, flags);
411 + head = circ_buf->head;
412 + tail = idx = circ_buf->tail;
414 + if (CIRC_CNT(head, tail, size) == 0) {
415 + spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
419 + while (idx != head) {
420 + tmp = (struct dpa_ptp_data *)(circ_buf->buf) + idx;
421 + tmp_ident = &tmp->ident;
422 + if (dpa_ptp_is_ident_match(tmp_ident, ident))
424 + idx = (idx + 1) & (size - 1);
428 + spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
432 + ts->sec = tmp->ts.sec;
433 + ts->nsec = tmp->ts.nsec;
436 + if (CIRC_CNT(idx, tail, size) > TS_ACCUMULATION_THRESHOLD) {
437 + tail = circ_buf->tail =
438 + (idx - TS_ACCUMULATION_THRESHOLD) & (size - 1);
441 + while (CIRC_CNT(idx, tail, size) > 0) {
442 + tmp = (struct dpa_ptp_data *)(circ_buf->buf) + idx;
443 + idx = (idx - 1) & (size - 1);
444 + tmp2 = (struct dpa_ptp_data *)(circ_buf->buf) + idx;
448 + circ_buf->tail = (tail + 1) & (size - 1);
450 + spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
455 +/* Parse the PTP packets
457 + * The PTP header can be found in an IPv4 packet, IPv6 patcket or in
458 + * an IEEE802.3 ethernet frame. This function returns the position of
459 + * the PTP packet or NULL if no PTP found
461 +static u8 *dpa_ptp_parse_packet(struct sk_buff *skb, u16 *eth_type)
463 + u8 *pos = skb->data + ETH_ALEN + ETH_ALEN;
464 + u8 *ptp_loc = NULL;
466 + u32 access_len = ETH_ALEN + ETH_ALEN + DPA_ETYPE_LEN;
468 + struct udphdr *udph;
469 + struct ipv6hdr *ipv6h;
471 + /* when we can receive S/G frames we need to check the data we want to
472 + * access is in the linear skb buffer
474 + if (!pskb_may_pull(skb, access_len))
477 + *eth_type = *((u16 *)pos);
479 + /* Check if inner tag is here */
480 + if (*eth_type == ETH_P_8021Q) {
481 + access_len += DPA_VLAN_TAG_LEN;
483 + if (!pskb_may_pull(skb, access_len))
486 + pos += DPA_VLAN_TAG_LEN;
487 + *eth_type = *((u16 *)pos);
490 + pos += DPA_ETYPE_LEN;
492 + switch (*eth_type) {
493 + /* Transport of PTP over Ethernet */
497 + if (!pskb_may_pull(skb, access_len + PTP_OFFS_MSG_TYPE + 1))
500 + msg_type = *((u8 *)(ptp_loc + PTP_OFFS_MSG_TYPE)) & 0xf;
501 + if ((msg_type == PTP_MSGTYPE_SYNC)
502 + || (msg_type == PTP_MSGTYPE_DELREQ)
503 + || (msg_type == PTP_MSGTYPE_PDELREQ)
504 + || (msg_type == PTP_MSGTYPE_PDELRESP))
507 + /* Transport of PTP over IPv4 */
509 + iph = (struct iphdr *)pos;
510 + access_len += sizeof(struct iphdr);
512 + if (!pskb_may_pull(skb, access_len))
515 + if (ntohs(iph->protocol) != IPPROTO_UDP)
518 + access_len += iph->ihl * 4 - sizeof(struct iphdr) +
519 + sizeof(struct udphdr);
521 + if (!pskb_may_pull(skb, access_len))
524 + pos += iph->ihl * 4;
525 + udph = (struct udphdr *)pos;
526 + if (ntohs(udph->dest) != 319)
528 + ptp_loc = pos + sizeof(struct udphdr);
530 + /* Transport of PTP over IPv6 */
532 + ipv6h = (struct ipv6hdr *)pos;
534 + access_len += sizeof(struct ipv6hdr) + sizeof(struct udphdr);
536 + if (ntohs(ipv6h->nexthdr) != IPPROTO_UDP)
539 + pos += sizeof(struct ipv6hdr);
540 + udph = (struct udphdr *)pos;
541 + if (ntohs(udph->dest) != 319)
543 + ptp_loc = pos + sizeof(struct udphdr);
552 +static int dpa_ptp_store_stamp(const struct dpa_priv_s *priv,
553 + struct sk_buff *skb, void *data, enum port_type rx_tx,
554 + struct dpa_ptp_data *ptp_data)
561 + ptp_loc = dpa_ptp_parse_packet(skb, ð_type);
565 + switch (eth_type) {
567 + ptp_data->ident.netw_prot = DPA_PTP_PROT_IPV4;
570 + ptp_data->ident.netw_prot = DPA_PTP_PROT_IPV6;
573 + ptp_data->ident.netw_prot = DPA_PTP_PROT_802_3;
579 + if (!pskb_may_pull(skb, ptp_loc - skb->data + PTP_OFFS_SEQ_ID + 2))
582 + ptp_data->ident.version = *(ptp_loc + PTP_OFFS_VER_PTP) & 0xf;
583 + ptp_data->ident.msg_type = *(ptp_loc + PTP_OFFS_MSG_TYPE) & 0xf;
584 + ptp_data->ident.seq_id = *((u16 *)(ptp_loc + PTP_OFFS_SEQ_ID));
585 + memcpy(ptp_data->ident.snd_port_id, ptp_loc + PTP_OFFS_SRCPRTID,
586 + DPA_PTP_SOURCE_PORT_LENGTH);
588 + nsec = dpa_get_timestamp_ns(priv, rx_tx, data);
589 + mod = do_div(nsec, NANOSEC_PER_SECOND);
590 + ptp_data->ts.sec = nsec;
591 + ptp_data->ts.nsec = mod;
596 +void dpa_ptp_store_txstamp(const struct dpa_priv_s *priv,
597 + struct sk_buff *skb, void *data)
599 + struct dpa_ptp_tsu *tsu = priv->tsu;
600 + struct dpa_ptp_data ptp_tx_data;
602 + if (dpa_ptp_store_stamp(priv, skb, data, TX, &ptp_tx_data))
605 + dpa_ptp_insert(&tsu->tx_timestamps, &ptp_tx_data);
608 +void dpa_ptp_store_rxstamp(const struct dpa_priv_s *priv,
609 + struct sk_buff *skb, void *data)
611 + struct dpa_ptp_tsu *tsu = priv->tsu;
612 + struct dpa_ptp_data ptp_rx_data;
614 + if (dpa_ptp_store_stamp(priv, skb, data, RX, &ptp_rx_data))
617 + dpa_ptp_insert(&tsu->rx_timestamps, &ptp_rx_data);
620 +static uint8_t dpa_get_tx_timestamp(struct dpa_ptp_tsu *ptp_tsu,
621 + struct dpa_ptp_ident *ident,
622 + struct dpa_ptp_time *ts)
624 + struct dpa_ptp_tsu *tsu = ptp_tsu;
625 + struct dpa_ptp_time tmp;
628 + flag = dpa_ptp_find_and_remove(&tsu->tx_timestamps, ident, &tmp);
631 + ts->nsec = tmp.nsec;
638 +static uint8_t dpa_get_rx_timestamp(struct dpa_ptp_tsu *ptp_tsu,
639 + struct dpa_ptp_ident *ident,
640 + struct dpa_ptp_time *ts)
642 + struct dpa_ptp_tsu *tsu = ptp_tsu;
643 + struct dpa_ptp_time tmp;
646 + flag = dpa_ptp_find_and_remove(&tsu->rx_timestamps, ident, &tmp);
649 + ts->nsec = tmp.nsec;
656 +static void dpa_set_fiper_alarm(struct dpa_ptp_tsu *tsu,
657 + struct dpa_ptp_time *cnt_time)
659 + struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
662 + if (mac_dev->fm_rtc_disable)
663 + mac_dev->fm_rtc_disable(get_fm_handle(tsu->dpa_priv->net_dev));
665 + /* TMR_FIPER1 will pulse every second after ALARM1 expired */
666 + tmp = (u64)cnt_time->sec * NANOSEC_PER_SECOND + (u64)cnt_time->nsec;
667 + fiper = NANOSEC_PER_SECOND - DPA_PTP_NOMINAL_FREQ_PERIOD_NS;
668 + if (mac_dev->fm_rtc_set_alarm)
669 + mac_dev->fm_rtc_set_alarm(get_fm_handle(tsu->dpa_priv->net_dev),
671 + if (mac_dev->fm_rtc_set_fiper)
672 + mac_dev->fm_rtc_set_fiper(get_fm_handle(tsu->dpa_priv->net_dev),
675 + if (mac_dev->fm_rtc_enable)
676 + mac_dev->fm_rtc_enable(get_fm_handle(tsu->dpa_priv->net_dev));
679 +static void dpa_get_curr_cnt(struct dpa_ptp_tsu *tsu,
680 + struct dpa_ptp_time *curr_time)
682 + struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
686 + if (mac_dev->fm_rtc_get_cnt)
687 + mac_dev->fm_rtc_get_cnt(get_fm_handle(tsu->dpa_priv->net_dev),
690 + mod = do_div(tmp, NANOSEC_PER_SECOND);
691 + curr_time->sec = (u32)tmp;
692 + curr_time->nsec = mod;
695 +static void dpa_set_1588cnt(struct dpa_ptp_tsu *tsu,
696 + struct dpa_ptp_time *cnt_time)
698 + struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
701 + tmp = (u64)cnt_time->sec * NANOSEC_PER_SECOND + (u64)cnt_time->nsec;
703 + if (mac_dev->fm_rtc_set_cnt)
704 + mac_dev->fm_rtc_set_cnt(get_fm_handle(tsu->dpa_priv->net_dev),
707 + /* Restart fiper two seconds later */
708 + cnt_time->sec += 2;
709 + cnt_time->nsec = 0;
710 + dpa_set_fiper_alarm(tsu, cnt_time);
713 +static void dpa_get_drift(struct dpa_ptp_tsu *tsu, u32 *addend)
715 + struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
718 + if (mac_dev->fm_rtc_get_drift)
719 + mac_dev->fm_rtc_get_drift(get_fm_handle(tsu->dpa_priv->net_dev),
725 +static void dpa_set_drift(struct dpa_ptp_tsu *tsu, u32 addend)
727 + struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
729 + if (mac_dev->fm_rtc_set_drift)
730 + mac_dev->fm_rtc_set_drift(get_fm_handle(tsu->dpa_priv->net_dev),
734 +static void dpa_flush_timestamp(struct dpa_ptp_tsu *tsu)
736 + dpa_ptp_reset_circ(&tsu->rx_timestamps, DEFAULT_PTP_RX_BUF_SZ);
737 + dpa_ptp_reset_circ(&tsu->tx_timestamps, DEFAULT_PTP_TX_BUF_SZ);
740 +int dpa_ioctl_1588(struct net_device *dev, struct ifreq *ifr, int cmd)
742 + struct dpa_priv_s *priv = netdev_priv(dev);
743 + struct dpa_ptp_tsu *tsu = priv->tsu;
744 + struct mac_device *mac_dev = priv->mac_dev;
745 + struct dpa_ptp_data ptp_data;
746 + struct dpa_ptp_data *ptp_data_user;
747 + struct dpa_ptp_time act_time;
751 + if (!tsu || !tsu->valid)
755 + case PTP_ENBL_TXTS_IOCTL:
756 + tsu->hwts_tx_en_ioctl = 1;
757 + if (mac_dev->fm_rtc_enable)
758 + mac_dev->fm_rtc_enable(get_fm_handle(dev));
759 + if (mac_dev->ptp_enable)
760 + mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev));
762 + case PTP_DSBL_TXTS_IOCTL:
763 + tsu->hwts_tx_en_ioctl = 0;
764 + if (mac_dev->fm_rtc_disable)
765 + mac_dev->fm_rtc_disable(get_fm_handle(dev));
766 + if (mac_dev->ptp_disable)
767 + mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev));
769 + case PTP_ENBL_RXTS_IOCTL:
770 + tsu->hwts_rx_en_ioctl = 1;
772 + case PTP_DSBL_RXTS_IOCTL:
773 + tsu->hwts_rx_en_ioctl = 0;
775 + case PTP_GET_RX_TIMESTAMP:
776 + ptp_data_user = (struct dpa_ptp_data *)ifr->ifr_data;
777 + if (copy_from_user(&ptp_data.ident,
778 + &ptp_data_user->ident, sizeof(ptp_data.ident)))
781 + if (dpa_get_rx_timestamp(tsu, &ptp_data.ident, &ptp_data.ts))
784 + if (copy_to_user((void __user *)&ptp_data_user->ts,
785 + &ptp_data.ts, sizeof(ptp_data.ts)))
788 + case PTP_GET_TX_TIMESTAMP:
789 + ptp_data_user = (struct dpa_ptp_data *)ifr->ifr_data;
790 + if (copy_from_user(&ptp_data.ident,
791 + &ptp_data_user->ident, sizeof(ptp_data.ident)))
794 + if (dpa_get_tx_timestamp(tsu, &ptp_data.ident, &ptp_data.ts))
797 + if (copy_to_user((void __user *)&ptp_data_user->ts,
798 + &ptp_data.ts, sizeof(ptp_data.ts)))
802 + dpa_get_curr_cnt(tsu, &act_time);
803 + if (copy_to_user(ifr->ifr_data, &act_time, sizeof(act_time)))
807 + if (copy_from_user(&act_time, ifr->ifr_data, sizeof(act_time)))
809 + dpa_set_1588cnt(tsu, &act_time);
812 + dpa_get_drift(tsu, &addend);
813 + if (copy_to_user(ifr->ifr_data, &addend, sizeof(addend)))
817 + if (copy_from_user(&addend, ifr->ifr_data, sizeof(addend)))
819 + dpa_set_drift(tsu, addend);
821 + case PTP_SET_FIPER_ALARM:
822 + if (copy_from_user(&act_time, ifr->ifr_data, sizeof(act_time)))
824 + dpa_set_fiper_alarm(tsu, &act_time);
826 + case PTP_CLEANUP_TS:
827 + dpa_flush_timestamp(tsu);
836 +int dpa_ptp_init(struct dpa_priv_s *priv)
838 + struct dpa_ptp_tsu *tsu;
840 + /* Allocate memory for PTP structure */
841 + tsu = kzalloc(sizeof(struct dpa_ptp_tsu), GFP_KERNEL);
846 + tsu->dpa_priv = priv;
848 + dpa_ptp_init_circ(&tsu->rx_timestamps, DEFAULT_PTP_RX_BUF_SZ);
849 + dpa_ptp_init_circ(&tsu->tx_timestamps, DEFAULT_PTP_TX_BUF_SZ);
855 +EXPORT_SYMBOL(dpa_ptp_init);
857 +void dpa_ptp_cleanup(struct dpa_priv_s *priv)
859 + struct dpa_ptp_tsu *tsu = priv->tsu;
861 + tsu->valid = FALSE;
862 + vfree(tsu->rx_timestamps.circ_buf.buf);
863 + vfree(tsu->tx_timestamps.circ_buf.buf);
867 +EXPORT_SYMBOL(dpa_ptp_cleanup);
869 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.h
871 +/* Copyright (C) 2011 Freescale Semiconductor, Inc.
873 + * This program is free software; you can redistribute it and/or modify
874 + * it under the terms of the GNU General Public License as published by
875 + * the Free Software Foundation; either version 2 of the License, or
876 + * (at your option) any later version.
878 + * This program is distributed in the hope that it will be useful,
879 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
880 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
881 + * GNU General Public License for more details.
883 + * You should have received a copy of the GNU General Public License along
884 + * with this program; if not, write to the Free Software Foundation, Inc.,
885 + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
888 +#ifndef __DPAA_1588_H__
889 +#define __DPAA_1588_H__
891 +#include <linux/netdevice.h>
892 +#include <linux/etherdevice.h>
893 +#include <linux/circ_buf.h>
894 +#include <linux/fsl_qman.h>
896 +#define DEFAULT_PTP_RX_BUF_SZ 256
897 +#define DEFAULT_PTP_TX_BUF_SZ 256
899 +/* 1588 private ioctl calls */
900 +#define PTP_ENBL_TXTS_IOCTL SIOCDEVPRIVATE
901 +#define PTP_DSBL_TXTS_IOCTL (SIOCDEVPRIVATE + 1)
902 +#define PTP_ENBL_RXTS_IOCTL (SIOCDEVPRIVATE + 2)
903 +#define PTP_DSBL_RXTS_IOCTL (SIOCDEVPRIVATE + 3)
904 +#define PTP_GET_TX_TIMESTAMP (SIOCDEVPRIVATE + 4)
905 +#define PTP_GET_RX_TIMESTAMP (SIOCDEVPRIVATE + 5)
906 +#define PTP_SET_TIME (SIOCDEVPRIVATE + 6)
907 +#define PTP_GET_TIME (SIOCDEVPRIVATE + 7)
908 +#define PTP_SET_FIPER_ALARM (SIOCDEVPRIVATE + 8)
909 +#define PTP_SET_ADJ (SIOCDEVPRIVATE + 9)
910 +#define PTP_GET_ADJ (SIOCDEVPRIVATE + 10)
911 +#define PTP_CLEANUP_TS (SIOCDEVPRIVATE + 11)
913 +/* PTP V2 message type */
915 + PTP_MSGTYPE_SYNC = 0x0,
916 + PTP_MSGTYPE_DELREQ = 0x1,
917 + PTP_MSGTYPE_PDELREQ = 0x2,
918 + PTP_MSGTYPE_PDELRESP = 0x3,
919 + PTP_MSGTYPE_FLWUP = 0x8,
920 + PTP_MSGTYPE_DELRESP = 0x9,
921 + PTP_MSGTYPE_PDELRES_FLWUP = 0xA,
922 + PTP_MSGTYPE_ANNOUNCE = 0xB,
923 + PTP_MSGTYPE_SGNLNG = 0xC,
924 + PTP_MSGTYPE_MNGMNT = 0xD,
927 +/* Byte offset of data in the PTP V2 headers */
928 +#define PTP_OFFS_MSG_TYPE 0
929 +#define PTP_OFFS_VER_PTP 1
930 +#define PTP_OFFS_MSG_LEN 2
931 +#define PTP_OFFS_DOM_NMB 4
932 +#define PTP_OFFS_FLAGS 6
933 +#define PTP_OFFS_CORFIELD 8
934 +#define PTP_OFFS_SRCPRTID 20
935 +#define PTP_OFFS_SEQ_ID 30
936 +#define PTP_OFFS_CTRL 32
937 +#define PTP_OFFS_LOGMEAN 33
939 +#define PTP_IP_OFFS 14
940 +#define PTP_UDP_OFFS 34
941 +#define PTP_HEADER_OFFS 42
942 +#define PTP_MSG_TYPE_OFFS (PTP_HEADER_OFFS + PTP_OFFS_MSG_TYPE)
943 +#define PTP_SPORT_ID_OFFS (PTP_HEADER_OFFS + PTP_OFFS_SRCPRTID)
944 +#define PTP_SEQ_ID_OFFS (PTP_HEADER_OFFS + PTP_OFFS_SEQ_ID)
945 +#define PTP_CTRL_OFFS (PTP_HEADER_OFFS + PTP_OFFS_CTRL)
947 +/* 1588-2008 network protocol enumeration values */
948 +#define DPA_PTP_PROT_IPV4 1
949 +#define DPA_PTP_PROT_IPV6 2
950 +#define DPA_PTP_PROT_802_3 3
951 +#define DPA_PTP_PROT_DONTCARE 0xFFFF
953 +#define DPA_PTP_SOURCE_PORT_LENGTH 10
954 +#define DPA_PTP_HEADER_SZE 34
955 +#define DPA_ETYPE_LEN 2
956 +#define DPA_VLAN_TAG_LEN 4
957 +#define NANOSEC_PER_SECOND 1000000000
959 +/* The threshold between the current found one and the oldest one */
960 +#define TS_ACCUMULATION_THRESHOLD 50
962 +/* Struct needed to identify a timestamp */
963 +struct dpa_ptp_ident {
968 + u8 snd_port_id[DPA_PTP_SOURCE_PORT_LENGTH];
971 +/* Timestamp format in 1588-2008 */
972 +struct dpa_ptp_time {
973 + u64 sec; /* just 48 bit used */
977 +/* needed for timestamp data over ioctl */
978 +struct dpa_ptp_data {
979 + struct dpa_ptp_ident ident;
980 + struct dpa_ptp_time ts;
983 +struct dpa_ptp_circ_buf {
984 + struct circ_buf circ_buf;
986 + spinlock_t ptp_lock;
989 +/* PTP TSU control structure */
990 +struct dpa_ptp_tsu {
991 + struct dpa_priv_s *dpa_priv;
993 + struct dpa_ptp_circ_buf rx_timestamps;
994 + struct dpa_ptp_circ_buf tx_timestamps;
996 + /* HW timestamping over ioctl enabled flag */
997 + int hwts_tx_en_ioctl;
998 + int hwts_rx_en_ioctl;
1001 +extern int dpa_ptp_init(struct dpa_priv_s *priv);
1002 +extern void dpa_ptp_cleanup(struct dpa_priv_s *priv);
1003 +extern void dpa_ptp_store_txstamp(const struct dpa_priv_s *priv,
1004 + struct sk_buff *skb, void *data);
1005 +extern void dpa_ptp_store_rxstamp(const struct dpa_priv_s *priv,
1006 + struct sk_buff *skb, void *data);
1007 +extern int dpa_ioctl_1588(struct net_device *dev, struct ifreq *ifr, int cmd);
1010 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c
1012 +/* Copyright 2008-2013 Freescale Semiconductor Inc.
1014 + * Redistribution and use in source and binary forms, with or without
1015 + * modification, are permitted provided that the following conditions are met:
1016 + * * Redistributions of source code must retain the above copyright
1017 + * notice, this list of conditions and the following disclaimer.
1018 + * * Redistributions in binary form must reproduce the above copyright
1019 + * notice, this list of conditions and the following disclaimer in the
1020 + * documentation and/or other materials provided with the distribution.
1021 + * * Neither the name of Freescale Semiconductor nor the
1022 + * names of its contributors may be used to endorse or promote products
1023 + * derived from this software without specific prior written permission.
1026 + * ALTERNATIVELY, this software may be distributed under the terms of the
1027 + * GNU General Public License ("GPL") as published by the Free Software
1028 + * Foundation, either version 2 of that License or (at your option) any
1031 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
1032 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1033 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1034 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
1035 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1036 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1037 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1038 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1039 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1040 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1043 +#include <linux/module.h>
1044 +#include <linux/fsl_qman.h> /* struct qm_mcr_querycgr */
1045 +#include <linux/debugfs.h>
1046 +#include "dpaa_debugfs.h"
1047 +#include "dpaa_eth.h" /* struct dpa_priv_s, dpa_percpu_priv_s, dpa_bp */
1049 +#define DPA_DEBUGFS_DESCRIPTION "FSL DPAA Ethernet debugfs entries"
1050 +#define DPA_ETH_DEBUGFS_ROOT "fsl_dpa"
1052 +static struct dentry *dpa_debugfs_root;
1054 +static int __cold dpa_debugfs_loop_open(struct inode *inode, struct file *file);
1055 +static ssize_t dpa_loop_write(struct file *f,
1056 + const char __user *buf, size_t count, loff_t *off);
1058 +static const struct file_operations dpa_debugfs_lp_fops = {
1059 + .open = dpa_debugfs_loop_open,
1060 + .write = dpa_loop_write,
1062 + .llseek = seq_lseek,
1063 + .release = single_release,
1066 +static int dpa_debugfs_loop_show(struct seq_file *file, void *offset)
1068 + struct dpa_priv_s *priv;
1070 + BUG_ON(offset == NULL);
1072 + priv = netdev_priv((struct net_device *)file->private);
1073 + seq_printf(file, "%d->%d\n", priv->loop_id, priv->loop_to);
1078 +static int user_input_convert(const char __user *user_buf, size_t count,
1083 + if (count > sizeof(buf) - 1)
1085 + if (copy_from_user(buf, user_buf, count))
1087 + buf[count] = '\0';
1088 + if (kstrtol(buf, 0, val))
1093 +static ssize_t dpa_loop_write(struct file *f,
1094 + const char __user *buf, size_t count, loff_t *off)
1096 + struct dpa_priv_s *priv;
1097 + struct net_device *netdev;
1098 + struct seq_file *sf;
1102 + ret = user_input_convert(buf, count, &val);
1106 + sf = (struct seq_file *)f->private_data;
1107 + netdev = (struct net_device *)sf->private;
1108 + priv = netdev_priv(netdev);
1110 + priv->loop_to = ((val < 0) || (val > 20)) ? -1 : val;
1115 +static int __cold dpa_debugfs_loop_open(struct inode *inode, struct file *file)
1118 + const struct net_device *net_dev;
1120 + _errno = single_open(file, dpa_debugfs_loop_show, inode->i_private);
1121 + if (unlikely(_errno < 0)) {
1122 + net_dev = (struct net_device *)inode->i_private;
1124 + if (netif_msg_drv((struct dpa_priv_s *)netdev_priv(net_dev)))
1125 + netdev_err(net_dev, "single_open() = %d\n",
1133 +int dpa_netdev_debugfs_create(struct net_device *net_dev)
1135 + struct dpa_priv_s *priv = netdev_priv(net_dev);
1137 + char loop_file_name[100];
1139 + if (unlikely(dpa_debugfs_root == NULL)) {
1140 + pr_err(KBUILD_MODNAME ": %s:%hu:%s(): \t%s\n",
1141 + KBUILD_BASENAME".c", __LINE__, __func__,
1142 + "root debugfs missing, possible module ordering issue");
1146 + sprintf(loop_file_name, "eth%d_loop", ++cnt);
1147 + priv->debugfs_loop_file = debugfs_create_file(loop_file_name,
1151 + &dpa_debugfs_lp_fops);
1152 + if (unlikely(priv->debugfs_loop_file == NULL)) {
1153 + netdev_err(net_dev, "debugfs_create_file(%s/%s)",
1154 + dpa_debugfs_root->d_iname,
1162 +void dpa_netdev_debugfs_remove(struct net_device *net_dev)
1164 + struct dpa_priv_s *priv = netdev_priv(net_dev);
1166 + debugfs_remove(priv->debugfs_loop_file);
1169 +int __init dpa_debugfs_module_init(void)
1173 + pr_info(KBUILD_MODNAME ": " DPA_DEBUGFS_DESCRIPTION "\n");
1175 + dpa_debugfs_root = debugfs_create_dir(DPA_ETH_DEBUGFS_ROOT, NULL);
1177 + if (unlikely(dpa_debugfs_root == NULL)) {
1179 + pr_err(KBUILD_MODNAME ": %s:%hu:%s():\n",
1180 + KBUILD_BASENAME".c", __LINE__, __func__);
1181 + pr_err("\tdebugfs_create_dir(%s/"KBUILD_MODNAME") = %d\n",
1182 + DPA_ETH_DEBUGFS_ROOT, _errno);
1188 +void __exit dpa_debugfs_module_exit(void)
1190 + debugfs_remove(dpa_debugfs_root);
1193 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h
1195 +/* Copyright 2008-2013 Freescale Semiconductor Inc.
1197 + * Redistribution and use in source and binary forms, with or without
1198 + * modification, are permitted provided that the following conditions are met:
1199 + * * Redistributions of source code must retain the above copyright
1200 + * notice, this list of conditions and the following disclaimer.
1201 + * * Redistributions in binary form must reproduce the above copyright
1202 + * notice, this list of conditions and the following disclaimer in the
1203 + * documentation and/or other materials provided with the distribution.
1204 + * * Neither the name of Freescale Semiconductor nor the
1205 + * names of its contributors may be used to endorse or promote products
1206 + * derived from this software without specific prior written permission.
1209 + * ALTERNATIVELY, this software may be distributed under the terms of the
1210 + * GNU General Public License ("GPL") as published by the Free Software
1211 + * Foundation, either version 2 of that License or (at your option) any
1214 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
1215 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1216 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1217 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
1218 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1219 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1220 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1221 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1222 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1223 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1226 +#ifndef DPAA_DEBUGFS_H_
1227 +#define DPAA_DEBUGFS_H_
1229 +#include <linux/netdevice.h>
1230 +#include <linux/dcache.h> /* struct dentry needed in dpaa_eth.h */
1232 +int dpa_netdev_debugfs_create(struct net_device *net_dev);
1233 +void dpa_netdev_debugfs_remove(struct net_device *net_dev);
1234 +int __init dpa_debugfs_module_init(void);
1235 +void __exit dpa_debugfs_module_exit(void);
1237 +#endif /* DPAA_DEBUGFS_H_ */
1239 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c
1241 +/* Copyright 2008-2013 Freescale Semiconductor Inc.
1243 + * Redistribution and use in source and binary forms, with or without
1244 + * modification, are permitted provided that the following conditions are met:
1245 + * * Redistributions of source code must retain the above copyright
1246 + * notice, this list of conditions and the following disclaimer.
1247 + * * Redistributions in binary form must reproduce the above copyright
1248 + * notice, this list of conditions and the following disclaimer in the
1249 + * documentation and/or other materials provided with the distribution.
1250 + * * Neither the name of Freescale Semiconductor nor the
1251 + * names of its contributors may be used to endorse or promote products
1252 + * derived from this software without specific prior written permission.
1255 + * ALTERNATIVELY, this software may be distributed under the terms of the
1256 + * GNU General Public License ("GPL") as published by the Free Software
1257 + * Foundation, either version 2 of that License or (at your option) any
1260 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
1261 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1262 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1263 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
1264 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1265 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1266 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1267 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1268 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1269 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1272 +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
1273 +#define pr_fmt(fmt) \
1274 + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
1275 + KBUILD_BASENAME".c", __LINE__, __func__
1277 +#define pr_fmt(fmt) \
1278 + KBUILD_MODNAME ": " fmt
1281 +#include <linux/init.h>
1282 +#include <linux/module.h>
1283 +#include <linux/of_mdio.h>
1284 +#include <linux/of_net.h>
1285 +#include <linux/kthread.h>
1286 +#include <linux/io.h>
1287 +#include <linux/if_arp.h> /* arp_hdr_len() */
1288 +#include <linux/if_vlan.h> /* VLAN_HLEN */
1289 +#include <linux/icmp.h> /* struct icmphdr */
1290 +#include <linux/ip.h> /* struct iphdr */
1291 +#include <linux/ipv6.h> /* struct ipv6hdr */
1292 +#include <linux/udp.h> /* struct udphdr */
1293 +#include <linux/tcp.h> /* struct tcphdr */
1294 +#include <linux/net.h> /* net_ratelimit() */
1295 +#include <linux/if_ether.h> /* ETH_P_IP and ETH_P_IPV6 */
1296 +#include <linux/highmem.h>
1297 +#include <linux/percpu.h>
1298 +#include <linux/dma-mapping.h>
1299 +#include <linux/fsl_bman.h>
1300 +#ifdef CONFIG_SOC_BUS
1301 +#include <linux/sys_soc.h> /* soc_device_match */
1304 +#include "fsl_fman.h"
1305 +#include "fm_ext.h"
1306 +#include "fm_port_ext.h"
1309 +#include "dpaa_eth.h"
1310 +#include "dpaa_eth_common.h"
1311 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
1312 +#include "dpaa_debugfs.h"
1313 +#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
1315 +/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
1316 + * using trace events only need to #include <trace/events/sched.h>
1318 +#define CREATE_TRACE_POINTS
1319 +#include "dpaa_eth_trace.h"
1321 +#define DPA_NAPI_WEIGHT 64
1323 +/* Valid checksum indication */
1324 +#define DPA_CSUM_VALID 0xFFFF
1326 +#define DPA_DESCRIPTION "FSL DPAA Ethernet driver"
1328 +MODULE_LICENSE("Dual BSD/GPL");
1330 +MODULE_AUTHOR("Andy Fleming <afleming@freescale.com>");
1332 +MODULE_DESCRIPTION(DPA_DESCRIPTION);
1334 +static uint8_t debug = -1;
1335 +module_param(debug, byte, S_IRUGO);
1336 +MODULE_PARM_DESC(debug, "Module/Driver verbosity level");
1338 +/* This has to work in tandem with the DPA_CS_THRESHOLD_xxx values. */
1339 +static uint16_t tx_timeout = 1000;
1340 +module_param(tx_timeout, ushort, S_IRUGO);
1341 +MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
1343 +static const char rtx[][3] = {
1349 +bool dpaa_errata_a010022;
1350 +EXPORT_SYMBOL(dpaa_errata_a010022);
1355 +#define DPAA_ETH_MAX_PAD (L1_CACHE_BYTES * 8)
1357 +static uint8_t dpa_priv_common_bpid;
1359 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
1360 +struct net_device *dpa_loop_netdevs[20];
1365 +static int dpaa_suspend(struct device *dev)
1367 + struct net_device *net_dev;
1368 + struct dpa_priv_s *priv;
1369 + struct mac_device *mac_dev;
1372 + net_dev = dev_get_drvdata(dev);
1374 + if (net_dev->flags & IFF_UP) {
1375 + priv = netdev_priv(net_dev);
1376 + mac_dev = priv->mac_dev;
1378 + if (priv->wol & DPAA_WOL_MAGIC) {
1379 + err = priv->mac_dev->set_wol(mac_dev->port_dev[RX],
1380 + priv->mac_dev->get_mac_handle(mac_dev), true);
1382 + netdev_err(net_dev, "set_wol() = %d\n", err);
1383 + goto set_wol_failed;
1387 + err = fm_port_suspend(mac_dev->port_dev[RX]);
1389 + netdev_err(net_dev, "fm_port_suspend(RX) = %d\n", err);
1390 + goto rx_port_suspend_failed;
1393 + err = fm_port_suspend(mac_dev->port_dev[TX]);
1395 + netdev_err(net_dev, "fm_port_suspend(TX) = %d\n", err);
1396 + goto tx_port_suspend_failed;
1402 +tx_port_suspend_failed:
1403 + fm_port_resume(mac_dev->port_dev[RX]);
1404 +rx_port_suspend_failed:
1405 + if (priv->wol & DPAA_WOL_MAGIC) {
1406 + priv->mac_dev->set_wol(mac_dev->port_dev[RX],
1407 + priv->mac_dev->get_mac_handle(mac_dev), false);
1413 +static int dpaa_resume(struct device *dev)
1415 + struct net_device *net_dev;
1416 + struct dpa_priv_s *priv;
1417 + struct mac_device *mac_dev;
1420 + net_dev = dev_get_drvdata(dev);
1422 + if (net_dev->flags & IFF_UP) {
1423 + priv = netdev_priv(net_dev);
1424 + mac_dev = priv->mac_dev;
1426 + err = fm_mac_resume(mac_dev->get_mac_handle(mac_dev));
1428 + netdev_err(net_dev, "fm_mac_resume = %d\n", err);
1429 + goto resume_failed;
1432 + err = fm_port_resume(mac_dev->port_dev[TX]);
1434 + netdev_err(net_dev, "fm_port_resume(TX) = %d\n", err);
1435 + goto resume_failed;
1438 + err = fm_port_resume(mac_dev->port_dev[RX]);
1440 + netdev_err(net_dev, "fm_port_resume(RX) = %d\n", err);
1441 + goto resume_failed;
1444 + if (priv->wol & DPAA_WOL_MAGIC) {
1445 + err = priv->mac_dev->set_wol(mac_dev->port_dev[RX],
1446 + priv->mac_dev->get_mac_handle(mac_dev), false);
1448 + netdev_err(net_dev, "set_wol() = %d\n", err);
1449 + goto resume_failed;
1460 +static const struct dev_pm_ops dpaa_pm_ops = {
1461 + .suspend = dpaa_suspend,
1462 + .resume = dpaa_resume,
1465 +#define DPAA_PM_OPS (&dpaa_pm_ops)
1467 +#else /* CONFIG_PM */
1469 +#define DPAA_PM_OPS NULL
1471 +#endif /* CONFIG_PM */
1473 +/* Checks whether the checksum field in Parse Results array is valid
1474 + * (equals 0xFFFF) and increments the .cse counter otherwise
1477 +dpa_csum_validation(const struct dpa_priv_s *priv,
1478 + struct dpa_percpu_priv_s *percpu_priv,
1479 + const struct qm_fd *fd)
1481 + dma_addr_t addr = qm_fd_addr(fd);
1482 + struct dpa_bp *dpa_bp = priv->dpa_bp;
1483 + void *frm = phys_to_virt(addr);
1484 + fm_prs_result_t *parse_result;
1486 + if (unlikely(!frm))
1489 + dma_sync_single_for_cpu(dpa_bp->dev, addr, DPA_RX_PRIV_DATA_SIZE +
1490 + DPA_PARSE_RESULTS_SIZE, DMA_BIDIRECTIONAL);
1492 + parse_result = (fm_prs_result_t *)(frm + DPA_RX_PRIV_DATA_SIZE);
1494 + if (parse_result->cksum != DPA_CSUM_VALID)
1495 + percpu_priv->rx_errors.cse++;
1498 +static void _dpa_rx_error(struct net_device *net_dev,
1499 + const struct dpa_priv_s *priv,
1500 + struct dpa_percpu_priv_s *percpu_priv,
1501 + const struct qm_fd *fd,
1504 + /* limit common, possibly innocuous Rx FIFO Overflow errors'
1505 + * interference with zero-loss convergence benchmark results.
1507 + if (likely(fd->status & FM_FD_STAT_ERR_PHYSICAL))
1508 + pr_warn_once("fsl-dpa: non-zero error counters in fman statistics (sysfs)\n");
1510 + if (netif_msg_hw(priv) && net_ratelimit())
1511 + netdev_dbg(net_dev, "Err FD status = 0x%08x\n",
1512 + fd->status & FM_FD_STAT_RX_ERRORS);
1513 +#ifdef CONFIG_FSL_DPAA_HOOKS
1514 + if (dpaa_eth_hooks.rx_error &&
1515 + dpaa_eth_hooks.rx_error(net_dev, fd, fqid) == DPAA_ETH_STOLEN)
1516 + /* it's up to the hook to perform resource cleanup */
1519 + percpu_priv->stats.rx_errors++;
1521 + if (fd->status & FM_PORT_FRM_ERR_DMA)
1522 + percpu_priv->rx_errors.dme++;
1523 + if (fd->status & FM_PORT_FRM_ERR_PHYSICAL)
1524 + percpu_priv->rx_errors.fpe++;
1525 + if (fd->status & FM_PORT_FRM_ERR_SIZE)
1526 + percpu_priv->rx_errors.fse++;
1527 + if (fd->status & FM_PORT_FRM_ERR_PRS_HDR_ERR)
1528 + percpu_priv->rx_errors.phe++;
1529 + if (fd->status & FM_FD_STAT_L4CV)
1530 + dpa_csum_validation(priv, percpu_priv, fd);
1532 + dpa_fd_release(net_dev, fd);
1535 +static void _dpa_tx_error(struct net_device *net_dev,
1536 + const struct dpa_priv_s *priv,
1537 + struct dpa_percpu_priv_s *percpu_priv,
1538 + const struct qm_fd *fd,
1541 + struct sk_buff *skb;
1543 + if (netif_msg_hw(priv) && net_ratelimit())
1544 + netdev_warn(net_dev, "FD status = 0x%08x\n",
1545 + fd->status & FM_FD_STAT_TX_ERRORS);
1546 +#ifdef CONFIG_FSL_DPAA_HOOKS
1547 + if (dpaa_eth_hooks.tx_error &&
1548 + dpaa_eth_hooks.tx_error(net_dev, fd, fqid) == DPAA_ETH_STOLEN)
1549 + /* now the hook must ensure proper cleanup */
1552 + percpu_priv->stats.tx_errors++;
1554 + /* If we intended the buffers from this frame to go into the bpools
1555 + * when the FMan transmit was done, we need to put it in manually.
1557 + if (fd->bpid != 0xff) {
1558 + dpa_fd_release(net_dev, fd);
1562 + skb = _dpa_cleanup_tx_fd(priv, fd);
1563 + dev_kfree_skb(skb);
1566 +/* Helper function to factor out frame validation logic on all Rx paths. Its
1567 + * purpose is to extract from the Parse Results structure information about
1568 + * the integrity of the frame, its checksum, the length of the parsed headers
1569 + * and whether the frame is suitable for GRO.
1571 + * Assumes no parser errors, since any error frame is dropped before this
1572 + * function is called.
1574 + * @skb will have its ip_summed field overwritten;
1575 + * @use_gro will only be written with 0, if the frame is definitely not
1576 + * GRO-able; otherwise, it will be left unchanged;
1577 + * @hdr_size will be written with a safe value, at least the size of the
1578 + * headers' length.
1580 +void __hot _dpa_process_parse_results(const fm_prs_result_t *parse_results,
1581 + const struct qm_fd *fd,
1582 + struct sk_buff *skb, int *use_gro)
1584 + if (fd->status & FM_FD_STAT_L4CV) {
1585 + /* The parser has run and performed L4 checksum validation.
1586 + * We know there were no parser errors (and implicitly no
1587 + * L4 csum error), otherwise we wouldn't be here.
1589 + skb->ip_summed = CHECKSUM_UNNECESSARY;
1591 + /* Don't go through GRO for certain types of traffic that
1592 + * we know are not GRO-able, such as dgram-based protocols.
1593 + * In the worst-case scenarios, such as small-pkt terminating
1594 + * UDP, the extra GRO processing would be overkill.
1596 + * The only protocol the Parser supports that is also GRO-able
1597 + * is currently TCP.
1599 + if (!fm_l4_frame_is_tcp(parse_results))
1605 + /* We're here because either the parser didn't run or the L4 checksum
1606 + * was not verified. This may include the case of a UDP frame with
1607 + * checksum zero or an L4 proto other than TCP/UDP
1609 + skb->ip_summed = CHECKSUM_NONE;
1611 + /* Bypass GRO for unknown traffic or if no PCDs are applied */
1615 +int dpaa_eth_poll(struct napi_struct *napi, int budget)
1617 + struct dpa_napi_portal *np =
1618 + container_of(napi, struct dpa_napi_portal, napi);
1620 + int cleaned = qman_p_poll_dqrr(np->p, budget);
1622 + if (cleaned < budget) {
1624 + napi_complete(napi);
1625 + tmp = qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
1631 +EXPORT_SYMBOL(dpaa_eth_poll);
1633 +static void __hot _dpa_tx_conf(struct net_device *net_dev,
1634 + const struct dpa_priv_s *priv,
1635 + struct dpa_percpu_priv_s *percpu_priv,
1636 + const struct qm_fd *fd,
1639 + struct sk_buff *skb;
1641 + /* do we need the timestamp for the error frames? */
1643 + if (unlikely(fd->status & FM_FD_STAT_TX_ERRORS) != 0) {
1644 + if (netif_msg_hw(priv) && net_ratelimit())
1645 + netdev_warn(net_dev, "FD status = 0x%08x\n",
1646 + fd->status & FM_FD_STAT_TX_ERRORS);
1648 + percpu_priv->stats.tx_errors++;
1651 + /* hopefully we need not get the timestamp before the hook */
1652 +#ifdef CONFIG_FSL_DPAA_HOOKS
1653 + if (dpaa_eth_hooks.tx_confirm && dpaa_eth_hooks.tx_confirm(net_dev,
1654 + fd, fqid) == DPAA_ETH_STOLEN)
1655 + /* it's the hook that must now perform cleanup */
1658 + /* This might not perfectly reflect the reality, if the core dequeuing
1659 + * the Tx confirmation is different from the one that did the enqueue,
1660 + * but at least it'll show up in the total count.
1662 + percpu_priv->tx_confirm++;
1664 + skb = _dpa_cleanup_tx_fd(priv, fd);
1666 + dev_kfree_skb(skb);
1669 +enum qman_cb_dqrr_result
1670 +priv_rx_error_dqrr(struct qman_portal *portal,
1671 + struct qman_fq *fq,
1672 + const struct qm_dqrr_entry *dq)
1674 + struct net_device *net_dev;
1675 + struct dpa_priv_s *priv;
1676 + struct dpa_percpu_priv_s *percpu_priv;
1679 + net_dev = ((struct dpa_fq *)fq)->net_dev;
1680 + priv = netdev_priv(net_dev);
1682 + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
1683 + count_ptr = raw_cpu_ptr(priv->dpa_bp->percpu_count);
1685 + if (dpaa_eth_napi_schedule(percpu_priv, portal))
1686 + return qman_cb_dqrr_stop;
1688 + if (unlikely(dpaa_eth_refill_bpools(priv->dpa_bp, count_ptr)))
1689 + /* Unable to refill the buffer pool due to insufficient
1690 + * system memory. Just release the frame back into the pool,
1691 + * otherwise we'll soon end up with an empty buffer pool.
1693 + dpa_fd_release(net_dev, &dq->fd);
1695 + _dpa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
1697 + return qman_cb_dqrr_consume;
1701 +enum qman_cb_dqrr_result __hot
1702 +priv_rx_default_dqrr(struct qman_portal *portal,
1703 + struct qman_fq *fq,
1704 + const struct qm_dqrr_entry *dq)
1706 + struct net_device *net_dev;
1707 + struct dpa_priv_s *priv;
1708 + struct dpa_percpu_priv_s *percpu_priv;
1710 + struct dpa_bp *dpa_bp;
1712 + net_dev = ((struct dpa_fq *)fq)->net_dev;
1713 + priv = netdev_priv(net_dev);
1714 + dpa_bp = priv->dpa_bp;
1716 + /* Trace the Rx fd */
1717 + trace_dpa_rx_fd(net_dev, fq, &dq->fd);
1719 + /* IRQ handler, non-migratable; safe to use raw_cpu_ptr here */
1720 + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
1721 + count_ptr = raw_cpu_ptr(dpa_bp->percpu_count);
1723 + if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal)))
1724 + return qman_cb_dqrr_stop;
1726 + /* Vale of plenty: make sure we didn't run out of buffers */
1728 + if (unlikely(dpaa_eth_refill_bpools(dpa_bp, count_ptr)))
1729 + /* Unable to refill the buffer pool due to insufficient
1730 + * system memory. Just release the frame back into the pool,
1731 + * otherwise we'll soon end up with an empty buffer pool.
1733 + dpa_fd_release(net_dev, &dq->fd);
1735 + _dpa_rx(net_dev, portal, priv, percpu_priv, &dq->fd, fq->fqid,
1738 + return qman_cb_dqrr_consume;
1741 +enum qman_cb_dqrr_result
1742 +priv_tx_conf_error_dqrr(struct qman_portal *portal,
1743 + struct qman_fq *fq,
1744 + const struct qm_dqrr_entry *dq)
1746 + struct net_device *net_dev;
1747 + struct dpa_priv_s *priv;
1748 + struct dpa_percpu_priv_s *percpu_priv;
1750 + net_dev = ((struct dpa_fq *)fq)->net_dev;
1751 + priv = netdev_priv(net_dev);
1753 + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
1755 + if (dpaa_eth_napi_schedule(percpu_priv, portal))
1756 + return qman_cb_dqrr_stop;
1758 + _dpa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
1760 + return qman_cb_dqrr_consume;
1763 +enum qman_cb_dqrr_result __hot
1764 +priv_tx_conf_default_dqrr(struct qman_portal *portal,
1765 + struct qman_fq *fq,
1766 + const struct qm_dqrr_entry *dq)
1768 + struct net_device *net_dev;
1769 + struct dpa_priv_s *priv;
1770 + struct dpa_percpu_priv_s *percpu_priv;
1772 + net_dev = ((struct dpa_fq *)fq)->net_dev;
1773 + priv = netdev_priv(net_dev);
1775 + /* Trace the fd */
1776 + trace_dpa_tx_conf_fd(net_dev, fq, &dq->fd);
1778 + /* Non-migratable context, safe to use raw_cpu_ptr */
1779 + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
1781 + if (dpaa_eth_napi_schedule(percpu_priv, portal))
1782 + return qman_cb_dqrr_stop;
1784 + _dpa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
1786 + return qman_cb_dqrr_consume;
1789 +void priv_ern(struct qman_portal *portal,
1790 + struct qman_fq *fq,
1791 + const struct qm_mr_entry *msg)
1793 + struct net_device *net_dev;
1794 + const struct dpa_priv_s *priv;
1795 + struct sk_buff *skb;
1796 + struct dpa_percpu_priv_s *percpu_priv;
1797 + struct qm_fd fd = msg->ern.fd;
1799 + net_dev = ((struct dpa_fq *)fq)->net_dev;
1800 + priv = netdev_priv(net_dev);
1801 + /* Non-migratable context, safe to use raw_cpu_ptr */
1802 + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
1804 + percpu_priv->stats.tx_dropped++;
1805 + percpu_priv->stats.tx_fifo_errors++;
1806 + count_ern(percpu_priv, msg);
1808 + /* If we intended this buffer to go into the pool
1809 + * when the FM was done, we need to put it in
1812 + if (msg->ern.fd.bpid != 0xff) {
1813 + dpa_fd_release(net_dev, &fd);
1817 + skb = _dpa_cleanup_tx_fd(priv, &fd);
1818 + dev_kfree_skb_any(skb);
1821 +const struct dpa_fq_cbs_t private_fq_cbs = {
1822 + .rx_defq = { .cb = { .dqrr = priv_rx_default_dqrr } },
1823 + .tx_defq = { .cb = { .dqrr = priv_tx_conf_default_dqrr } },
1824 + .rx_errq = { .cb = { .dqrr = priv_rx_error_dqrr } },
1825 + .tx_errq = { .cb = { .dqrr = priv_tx_conf_error_dqrr } },
1826 + .egress_ern = { .cb = { .ern = priv_ern } }
1828 +EXPORT_SYMBOL(private_fq_cbs);
1830 +static void dpaa_eth_napi_enable(struct dpa_priv_s *priv)
1832 + struct dpa_percpu_priv_s *percpu_priv;
1835 + for_each_possible_cpu(i) {
1836 + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
1838 + for (j = 0; j < qman_portal_max; j++)
1839 + napi_enable(&percpu_priv->np[j].napi);
1843 +static void dpaa_eth_napi_disable(struct dpa_priv_s *priv)
1845 + struct dpa_percpu_priv_s *percpu_priv;
1848 + for_each_possible_cpu(i) {
1849 + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
1851 + for (j = 0; j < qman_portal_max; j++)
1852 + napi_disable(&percpu_priv->np[j].napi);
1856 +static int __cold dpa_eth_priv_start(struct net_device *net_dev)
1859 + struct dpa_priv_s *priv;
1861 + priv = netdev_priv(net_dev);
1863 + dpaa_eth_napi_enable(priv);
1865 + err = dpa_start(net_dev);
1867 + dpaa_eth_napi_disable(priv);
1874 +static int __cold dpa_eth_priv_stop(struct net_device *net_dev)
1877 + struct dpa_priv_s *priv;
1879 + _errno = dpa_stop(net_dev);
1880 + /* Allow NAPI to consume any frame still in the Rx/TxConfirm
1881 + * ingress queues. This is to avoid a race between the current
1882 + * context and ksoftirqd which could leave NAPI disabled while
1883 + * in fact there's still Rx traffic to be processed.
1885 + usleep_range(5000, 10000);
1887 + priv = netdev_priv(net_dev);
1888 + dpaa_eth_napi_disable(priv);
1893 +#ifdef CONFIG_NET_POLL_CONTROLLER
1894 +static void dpaa_eth_poll_controller(struct net_device *net_dev)
1896 + struct dpa_priv_s *priv = netdev_priv(net_dev);
1897 + struct dpa_percpu_priv_s *percpu_priv =
1898 + raw_cpu_ptr(priv->percpu_priv);
1899 + struct qman_portal *p;
1900 + const struct qman_portal_config *pc;
1901 + struct dpa_napi_portal *np;
1903 + p = (struct qman_portal *)qman_get_affine_portal(smp_processor_id());
1904 + pc = qman_p_get_portal_config(p);
1905 + np = &percpu_priv->np[pc->index];
1907 + qman_p_irqsource_remove(np->p, QM_PIRQ_DQRI);
1908 + qman_p_poll_dqrr(np->p, np->napi.weight);
1909 + qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
1913 +static const struct net_device_ops dpa_private_ops = {
1914 + .ndo_open = dpa_eth_priv_start,
1915 + .ndo_start_xmit = dpa_tx,
1916 + .ndo_stop = dpa_eth_priv_stop,
1917 + .ndo_tx_timeout = dpa_timeout,
1918 + .ndo_get_stats64 = dpa_get_stats64,
1919 + .ndo_set_mac_address = dpa_set_mac_address,
1920 + .ndo_validate_addr = eth_validate_addr,
1921 +#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
1922 + .ndo_select_queue = dpa_select_queue,
1924 + .ndo_change_mtu = dpa_change_mtu,
1925 + .ndo_set_rx_mode = dpa_set_rx_mode,
1926 + .ndo_init = dpa_ndo_init,
1927 + .ndo_set_features = dpa_set_features,
1928 + .ndo_fix_features = dpa_fix_features,
1929 + .ndo_do_ioctl = dpa_ioctl,
1930 +#ifdef CONFIG_NET_POLL_CONTROLLER
1931 + .ndo_poll_controller = dpaa_eth_poll_controller,
1935 +static int dpa_private_napi_add(struct net_device *net_dev)
1937 + struct dpa_priv_s *priv = netdev_priv(net_dev);
1938 + struct dpa_percpu_priv_s *percpu_priv;
1941 + for_each_possible_cpu(cpu) {
1942 + percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
1944 + percpu_priv->np = devm_kzalloc(net_dev->dev.parent,
1945 + qman_portal_max * sizeof(struct dpa_napi_portal),
1948 + if (unlikely(percpu_priv->np == NULL)) {
1949 + dev_err(net_dev->dev.parent, "devm_kzalloc() failed\n");
1953 + for (i = 0; i < qman_portal_max; i++)
1954 + netif_napi_add(net_dev, &percpu_priv->np[i].napi,
1955 + dpaa_eth_poll, DPA_NAPI_WEIGHT);
1961 +void dpa_private_napi_del(struct net_device *net_dev)
1963 + struct dpa_priv_s *priv = netdev_priv(net_dev);
1964 + struct dpa_percpu_priv_s *percpu_priv;
1967 + for_each_possible_cpu(cpu) {
1968 + percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
1970 + if (percpu_priv->np) {
1971 + for (i = 0; i < qman_portal_max; i++)
1972 + netif_napi_del(&percpu_priv->np[i].napi);
1974 + devm_kfree(net_dev->dev.parent, percpu_priv->np);
1978 +EXPORT_SYMBOL(dpa_private_napi_del);
1980 +static int dpa_private_netdev_init(struct net_device *net_dev)
1983 + struct dpa_priv_s *priv = netdev_priv(net_dev);
1984 + struct dpa_percpu_priv_s *percpu_priv;
1985 + const uint8_t *mac_addr;
1987 + /* Although we access another CPU's private data here
1988 + * we do it at initialization so it is safe
1990 + for_each_possible_cpu(i) {
1991 + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
1992 + percpu_priv->net_dev = net_dev;
1995 + net_dev->netdev_ops = &dpa_private_ops;
1996 + mac_addr = priv->mac_dev->addr;
1998 + net_dev->mem_start = priv->mac_dev->res->start;
1999 + net_dev->mem_end = priv->mac_dev->res->end;
2001 + net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2004 + /* Advertise S/G and HIGHDMA support for private interfaces */
2005 + net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA;
2006 + /* Recent kernels enable GSO automatically, if
2007 + * we declare NETIF_F_SG. For conformity, we'll
2008 + * still declare GSO explicitly.
2010 + net_dev->features |= NETIF_F_GSO;
2012 + /* Advertise GRO support */
2013 + net_dev->features |= NETIF_F_GRO;
2015 + return dpa_netdev_init(net_dev, mac_addr, tx_timeout);
2018 +static struct dpa_bp * __cold
2019 +dpa_priv_bp_probe(struct device *dev)
2021 + struct dpa_bp *dpa_bp;
2023 + dpa_bp = devm_kzalloc(dev, sizeof(*dpa_bp), GFP_KERNEL);
2024 + if (unlikely(dpa_bp == NULL)) {
2025 + dev_err(dev, "devm_kzalloc() failed\n");
2026 + return ERR_PTR(-ENOMEM);
2029 + dpa_bp->percpu_count = devm_alloc_percpu(dev, *dpa_bp->percpu_count);
2030 + dpa_bp->target_count = CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT;
2032 + dpa_bp->seed_cb = dpa_bp_priv_seed;
2033 + dpa_bp->free_buf_cb = _dpa_bp_free_pf;
2038 +/* Place all ingress FQs (Rx Default, Rx Error, PCD FQs) in a dedicated CGR.
2039 + * We won't be sending congestion notifications to FMan; for now, we just use
2040 + * this CGR to generate enqueue rejections to FMan in order to drop the frames
2041 + * before they reach our ingress queues and eat up memory.
2043 +static int dpaa_eth_priv_ingress_cgr_init(struct dpa_priv_s *priv)
2045 + struct qm_mcc_initcgr initcgr;
2049 + err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid);
2051 + pr_err("Error %d allocating CGR ID\n", err);
2055 + /* Enable CS TD, but disable Congestion State Change Notifications. */
2056 + initcgr.we_mask = QM_CGR_WE_CS_THRES;
2057 + initcgr.cgr.cscn_en = QM_CGR_EN;
2058 + cs_th = CONFIG_FSL_DPAA_INGRESS_CS_THRESHOLD;
2059 + qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
2061 + initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
2062 + initcgr.cgr.cstd_en = QM_CGR_EN;
2064 + /* This is actually a hack, because this CGR will be associated with
2065 + * our affine SWP. However, we'll place our ingress FQs in it.
2067 + err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT,
2070 + pr_err("Error %d creating ingress CGR with ID %d\n", err,
2071 + priv->ingress_cgr.cgrid);
2072 + qman_release_cgrid(priv->ingress_cgr.cgrid);
2075 + pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n",
2076 + priv->ingress_cgr.cgrid, priv->mac_dev->addr);
2078 + /* struct qman_cgr allows special cgrid values (i.e. outside the 0..255
2079 + * range), but we have no common initialization path between the
2080 + * different variants of the DPAA Eth driver, so we do it here rather
2081 + * than modifying every other variant than "private Eth".
2083 + priv->use_ingress_cgr = true;
2089 +static int dpa_priv_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
2092 + struct dpa_priv_s *priv = netdev_priv(net_dev);
2095 + if (netif_msg_probe(priv))
2096 + dev_dbg(net_dev->dev.parent,
2097 + "Using private BM buffer pools\n");
2099 + priv->bp_count = count;
2101 + for (i = 0; i < count; i++) {
2103 + err = dpa_bp_alloc(&dpa_bp[i]);
2105 + dpa_bp_free(priv);
2106 + priv->dpa_bp = NULL;
2110 + priv->dpa_bp = &dpa_bp[i];
2113 + dpa_priv_common_bpid = priv->dpa_bp->bpid;
2117 +static const struct of_device_id dpa_match[];
2119 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
2120 +static int dpa_new_loop_id(void)
2129 +dpaa_eth_priv_probe(struct platform_device *_of_dev)
2131 + int err = 0, i, channel;
2132 + struct device *dev;
2133 + struct device_node *dpa_node;
2134 + struct dpa_bp *dpa_bp;
2136 + struct net_device *net_dev = NULL;
2137 + struct dpa_priv_s *priv = NULL;
2138 + struct dpa_percpu_priv_s *percpu_priv;
2139 + struct fm_port_fqs port_fqs;
2140 + struct dpa_buffer_layout_s *buf_layout = NULL;
2141 + struct mac_device *mac_dev;
2143 + dev = &_of_dev->dev;
2145 + dpa_node = dev->of_node;
2147 + if (!of_device_is_available(dpa_node))
2150 + /* Get the buffer pools assigned to this interface;
2151 + * run only once the default pool probing code
2153 + dpa_bp = (dpa_bpid2pool(dpa_priv_common_bpid)) ? :
2154 + dpa_priv_bp_probe(dev);
2155 + if (IS_ERR(dpa_bp))
2156 + return PTR_ERR(dpa_bp);
2158 + /* Allocate this early, so we can store relevant information in
2159 + * the private area (needed by 1588 code in dpa_mac_probe)
2161 + net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES);
2163 + dev_err(dev, "alloc_etherdev_mq() failed\n");
2164 + goto alloc_etherdev_mq_failed;
2167 + /* Do this here, so we can be verbose early */
2168 + SET_NETDEV_DEV(net_dev, dev);
2169 + dev_set_drvdata(dev, net_dev);
2171 + priv = netdev_priv(net_dev);
2172 + priv->net_dev = net_dev;
2173 + strcpy(priv->if_type, "private");
2175 + priv->msg_enable = netif_msg_init(debug, -1);
2177 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
2178 + priv->loop_id = dpa_new_loop_id();
2179 + priv->loop_to = -1; /* disabled by default */
2180 + dpa_loop_netdevs[priv->loop_id] = net_dev;
2183 + mac_dev = dpa_mac_probe(_of_dev);
2184 + if (IS_ERR(mac_dev) || !mac_dev) {
2185 + err = PTR_ERR(mac_dev);
2186 + goto mac_probe_failed;
2189 + /* We have physical ports, so we need to establish
2190 + * the buffer layout.
2192 + buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout),
2194 + if (!buf_layout) {
2195 + dev_err(dev, "devm_kzalloc() failed\n");
2196 + goto alloc_failed;
2198 + dpa_set_buffers_layout(mac_dev, buf_layout);
2200 + /* For private ports, need to compute the size of the default
2201 + * buffer pool, based on FMan port buffer layout;also update
2202 + * the maximum buffer size for private ports if necessary
2204 + dpa_bp->size = dpa_bp_size(&buf_layout[RX]);
2206 +#ifdef CONFIG_FSL_DPAA_ETH_JUMBO_FRAME
2207 + /* We only want to use jumbo frame optimization if we actually have
2208 + * L2 MAX FRM set for jumbo frames as well.
2211 + if (likely(!dpaa_errata_a010022))
2213 + if(fm_get_max_frm() < 9600)
2215 + "Invalid configuration: if jumbo frames support is on, FSL_FM_MAX_FRAME_SIZE should be set to 9600\n");
2218 + INIT_LIST_HEAD(&priv->dpa_fq_list);
2220 + memset(&port_fqs, 0, sizeof(port_fqs));
2222 + err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list, &port_fqs, true, RX);
2224 + err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list,
2225 + &port_fqs, true, TX);
2228 + goto fq_probe_failed;
2232 + err = dpa_priv_bp_create(net_dev, dpa_bp, count);
2235 + goto bp_create_failed;
2237 + priv->mac_dev = mac_dev;
2239 + channel = dpa_get_channel();
2241 + if (channel < 0) {
2243 + goto get_channel_failed;
2246 + priv->channel = (uint16_t)channel;
2247 + dpaa_eth_add_channel(priv->channel);
2249 + dpa_fq_setup(priv, &private_fq_cbs, priv->mac_dev->port_dev[TX]);
2251 + /* Create a congestion group for this netdev, with
2252 + * dynamically-allocated CGR ID.
2253 + * Must be executed after probing the MAC, but before
2254 + * assigning the egress FQs to the CGRs.
2256 + err = dpaa_eth_cgr_init(priv);
2258 + dev_err(dev, "Error initializing CGR\n");
2259 + goto tx_cgr_init_failed;
2261 + err = dpaa_eth_priv_ingress_cgr_init(priv);
2263 + dev_err(dev, "Error initializing ingress CGR\n");
2264 + goto rx_cgr_init_failed;
2267 + /* Add the FQs to the interface, and make them active */
2268 + err = dpa_fqs_init(dev, &priv->dpa_fq_list, false);
2270 + goto fq_alloc_failed;
2272 + priv->buf_layout = buf_layout;
2273 + priv->tx_headroom = dpa_get_headroom(&priv->buf_layout[TX]);
2274 + priv->rx_headroom = dpa_get_headroom(&priv->buf_layout[RX]);
2276 + /* All real interfaces need their ports initialized */
2277 + dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs,
2280 +#ifdef CONFIG_FMAN_PFC
2281 + for (i = 0; i < CONFIG_FMAN_PFC_COS_COUNT; i++) {
2282 + err = fm_port_set_pfc_priorities_mapping_to_qman_wq(
2283 + mac_dev->port_dev[TX], i, i);
2284 + if (unlikely(err != 0)) {
2285 + dev_err(dev, "Error maping PFC %u to WQ %u\n", i, i);
2286 + goto pfc_mapping_failed;
2291 + priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
2293 + if (priv->percpu_priv == NULL) {
2294 + dev_err(dev, "devm_alloc_percpu() failed\n");
2296 + goto alloc_percpu_failed;
2298 + for_each_possible_cpu(i) {
2299 + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
2300 + memset(percpu_priv, 0, sizeof(*percpu_priv));
2303 + /* Initialize NAPI */
2304 + err = dpa_private_napi_add(net_dev);
2307 + goto napi_add_failed;
2309 + err = dpa_private_netdev_init(net_dev);
2312 + goto netdev_init_failed;
2314 + dpaa_eth_sysfs_init(&net_dev->dev);
2317 + device_set_wakeup_capable(dev, true);
2320 + pr_info("fsl_dpa: Probed interface %s\n", net_dev->name);
2324 +netdev_init_failed:
2326 + dpa_private_napi_del(net_dev);
2327 +alloc_percpu_failed:
2328 +#ifdef CONFIG_FMAN_PFC
2329 +pfc_mapping_failed:
2331 + dpa_fq_free(dev, &priv->dpa_fq_list);
2333 + qman_delete_cgr_safe(&priv->ingress_cgr);
2334 + qman_release_cgrid(priv->ingress_cgr.cgrid);
2335 +rx_cgr_init_failed:
2336 + qman_delete_cgr_safe(&priv->cgr_data.cgr);
2337 + qman_release_cgrid(priv->cgr_data.cgr.cgrid);
2338 +tx_cgr_init_failed:
2339 +get_channel_failed:
2340 + dpa_bp_free(priv);
2345 + dev_set_drvdata(dev, NULL);
2346 + free_netdev(net_dev);
2347 +alloc_etherdev_mq_failed:
2348 + if (atomic_read(&dpa_bp->refs) == 0)
2349 + devm_kfree(dev, dpa_bp);
2354 +static const struct of_device_id dpa_match[] = {
2356 + .compatible = "fsl,dpa-ethernet"
2360 +MODULE_DEVICE_TABLE(of, dpa_match);
2362 +static struct platform_driver dpa_driver = {
2364 + .name = KBUILD_MODNAME,
2365 + .of_match_table = dpa_match,
2366 + .owner = THIS_MODULE,
2367 + .pm = DPAA_PM_OPS,
2369 + .probe = dpaa_eth_priv_probe,
2370 + .remove = dpa_remove
2374 +static bool __init __cold soc_has_errata_a010022(void)
2376 +#ifdef CONFIG_SOC_BUS
2377 + const struct soc_device_attribute soc_msi_matches[] = {
2378 + { .family = "QorIQ LS1043A",
2383 + if (soc_device_match(soc_msi_matches))
2388 + return true; /* cannot identify SoC */
2393 +static int __init __cold dpa_load(void)
2397 + pr_info(DPA_DESCRIPTION "\n");
2399 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
2400 + dpa_debugfs_module_init();
2401 +#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
2403 + /* initialise dpaa_eth mirror values */
2404 + dpa_rx_extra_headroom = fm_get_rx_extra_headroom();
2405 + dpa_max_frm = fm_get_max_frm();
2406 + dpa_num_cpus = num_possible_cpus();
2409 + /* Detect if the current SoC requires the 4K alignment workaround */
2410 + dpaa_errata_a010022 = soc_has_errata_a010022();
2413 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
2414 + memset(dpa_loop_netdevs, 0, sizeof(dpa_loop_netdevs));
2417 + _errno = platform_driver_register(&dpa_driver);
2418 + if (unlikely(_errno < 0)) {
2419 + pr_err(KBUILD_MODNAME
2420 + ": %s:%hu:%s(): platform_driver_register() = %d\n",
2421 + KBUILD_BASENAME".c", __LINE__, __func__, _errno);
2424 + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
2425 + KBUILD_BASENAME".c", __func__);
2429 +module_init(dpa_load);
2431 +static void __exit __cold dpa_unload(void)
2433 + pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
2434 + KBUILD_BASENAME".c", __func__);
2436 + platform_driver_unregister(&dpa_driver);
2438 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
2439 + dpa_debugfs_module_exit();
2440 +#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
2442 + /* Only one channel is used and needs to be relased after all
2443 + * interfaces are removed
2445 + dpa_release_channel();
2447 + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
2448 + KBUILD_BASENAME".c", __func__);
2450 +module_exit(dpa_unload);
2452 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h
2454 +/* Copyright 2008-2012 Freescale Semiconductor Inc.
2456 + * Redistribution and use in source and binary forms, with or without
2457 + * modification, are permitted provided that the following conditions are met:
2458 + * * Redistributions of source code must retain the above copyright
2459 + * notice, this list of conditions and the following disclaimer.
2460 + * * Redistributions in binary form must reproduce the above copyright
2461 + * notice, this list of conditions and the following disclaimer in the
2462 + * documentation and/or other materials provided with the distribution.
2463 + * * Neither the name of Freescale Semiconductor nor the
2464 + * names of its contributors may be used to endorse or promote products
2465 + * derived from this software without specific prior written permission.
2468 + * ALTERNATIVELY, this software may be distributed under the terms of the
2469 + * GNU General Public License ("GPL") as published by the Free Software
2470 + * Foundation, either version 2 of that License or (at your option) any
2473 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
2474 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
2475 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
2476 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
2477 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
2478 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
2479 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
2480 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2481 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
2482 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2488 +#include <linux/netdevice.h>
2489 +#include <linux/fsl_qman.h> /* struct qman_fq */
2491 +#include "fm_ext.h"
2492 +#include "dpaa_eth_trace.h"
2494 +extern int dpa_rx_extra_headroom;
2495 +extern int dpa_max_frm;
2496 +extern int dpa_num_cpus;
2498 +#define dpa_get_rx_extra_headroom() dpa_rx_extra_headroom
2499 +#define dpa_get_max_frm() dpa_max_frm
2501 +#define dpa_get_max_mtu() \
2502 + (dpa_get_max_frm() - (VLAN_ETH_HLEN + ETH_FCS_LEN))
2506 +/* Simple enum of FQ types - used for array indexing */
2507 +enum port_type {RX, TX};
2509 +/* TODO: This structure should be renamed & moved to the FMD wrapper */
2510 +struct dpa_buffer_layout_s {
2511 + uint16_t priv_data_size;
2512 + bool parse_results;
2514 + bool hash_results;
2515 + uint8_t manip_extra_space;
2516 + uint16_t data_align;
2519 +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
2520 +#define DPA_BUG_ON(cond) BUG_ON(cond)
2522 +#define DPA_BUG_ON(cond)
2525 +#define DPA_TX_PRIV_DATA_SIZE 16
2526 +#define DPA_PARSE_RESULTS_SIZE sizeof(fm_prs_result_t)
2527 +#define DPA_TIME_STAMP_SIZE 8
2528 +#define DPA_HASH_RESULTS_SIZE 8
2529 +#define DPA_RX_PRIV_DATA_SIZE (DPA_TX_PRIV_DATA_SIZE + \
2530 + dpa_get_rx_extra_headroom())
2532 +#define FM_FD_STAT_RX_ERRORS \
2533 + (FM_PORT_FRM_ERR_DMA | FM_PORT_FRM_ERR_PHYSICAL | \
2534 + FM_PORT_FRM_ERR_SIZE | FM_PORT_FRM_ERR_CLS_DISCARD | \
2535 + FM_PORT_FRM_ERR_EXTRACTION | FM_PORT_FRM_ERR_NO_SCHEME | \
2536 + FM_PORT_FRM_ERR_ILL_PLCR | FM_PORT_FRM_ERR_PRS_TIMEOUT | \
2537 + FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT | FM_PORT_FRM_ERR_PRS_HDR_ERR)
2539 +#define FM_FD_STAT_TX_ERRORS \
2540 + (FM_PORT_FRM_ERR_UNSUPPORTED_FORMAT | \
2541 + FM_PORT_FRM_ERR_LENGTH | FM_PORT_FRM_ERR_DMA)
2543 +#ifndef CONFIG_FSL_DPAA_ETH_JUMBO_FRAME
2544 +/* The raw buffer size must be cacheline aligned.
2545 + * Normally we use 2K buffers.
2547 +#define DPA_BP_RAW_SIZE 2048
2549 +/* For jumbo frame optimizations, use buffers large enough to accommodate
2550 + * 9.6K frames, FD maximum offset, skb sh_info overhead and some extra
2551 + * space to account for further alignments.
2553 +#define DPA_MAX_FRM_SIZE 9600
2555 +#define DPA_BP_RAW_SIZE \
2556 + ((DPA_MAX_FRM_SIZE + DPA_MAX_FD_OFFSET + \
2557 + sizeof(struct skb_shared_info) + 128) & ~(SMP_CACHE_BYTES - 1))
2558 +#else /* CONFIG_PPC */
2559 +#define DPA_BP_RAW_SIZE ((unlikely(dpaa_errata_a010022)) ? 2048 : \
2560 + ((DPA_MAX_FRM_SIZE + DPA_MAX_FD_OFFSET + \
2561 + sizeof(struct skb_shared_info) + 128) & ~(SMP_CACHE_BYTES - 1)))
2562 +#endif /* CONFIG_PPC */
2563 +#endif /* CONFIG_FSL_DPAA_ETH_JUMBO_FRAME */
2565 +/* This is what FMan is ever allowed to use.
2566 + * FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is
2567 + * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that,
2568 + * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us
2569 + * half-page-aligned buffers (can we?), so we reserve some more space
2570 + * for start-of-buffer alignment.
2572 +#define dpa_bp_size(buffer_layout) (SKB_WITH_OVERHEAD(DPA_BP_RAW_SIZE) - \
2574 +/* We must ensure that skb_shinfo is always cacheline-aligned. */
2575 +#define DPA_SKB_SIZE(size) ((size) & ~(SMP_CACHE_BYTES - 1))
2577 +/* Maximum size of a buffer for which recycling is allowed.
2578 + * We need an upper limit such that forwarded skbs that get reallocated on Tx
2579 + * aren't allowed to grow unboundedly. On the other hand, we need to make sure
2580 + * that skbs allocated by us will not fail to be recycled due to their size.
2582 + * For a requested size, the kernel allocator provides the next power of two
2583 + * sized block, which the stack will use as is, regardless of the actual size
2584 + * it required; since we must accommodate at most 9.6K buffers (L2 maximum
2585 + * supported frame size), set the recycling upper limit to 16K.
2587 +#define DPA_RECYCLE_MAX_SIZE 16384
2589 +#if defined(CONFIG_FSL_SDK_FMAN_TEST)
2590 +/*TODO: temporary for fman pcd testing */
2591 +#define FMAN_PCD_TESTS_MAX_NUM_RANGES 20
2594 +#define DPAA_ETH_FQ_DELTA 0x10000
2596 +#define DPAA_ETH_PCD_FQ_BASE(device_addr) \
2597 + (((device_addr) & 0x1fffff) >> 6)
2599 +#define DPAA_ETH_PCD_FQ_HI_PRIO_BASE(device_addr) \
2600 + (DPAA_ETH_FQ_DELTA + DPAA_ETH_PCD_FQ_BASE(device_addr))
2602 +/* Largest value that the FQD's OAL field can hold.
2603 + * This is DPAA-1.x specific.
2604 + * TODO: This rather belongs in fsl_qman.h
2606 +#define FSL_QMAN_MAX_OAL 127
2608 +/* Maximum offset value for a contig or sg FD (represented on 9 bits) */
2609 +#define DPA_MAX_FD_OFFSET ((1 << 9) - 1)
2611 +/* Default alignment for start of data in an Rx FD */
2612 +#define DPA_FD_DATA_ALIGNMENT 16
2614 +/* Values for the L3R field of the FM Parse Results
2616 +/* L3 Type field: First IP Present IPv4 */
2617 +#define FM_L3_PARSE_RESULT_IPV4 0x8000
2618 +/* L3 Type field: First IP Present IPv6 */
2619 +#define FM_L3_PARSE_RESULT_IPV6 0x4000
2621 +/* Values for the L4R field of the FM Parse Results
2622 + * See $8.8.4.7.20 - L4 HXS - L4 Results from DPAA-Rev2 Reference Manual.
2624 +/* L4 Type field: UDP */
2625 +#define FM_L4_PARSE_RESULT_UDP 0x40
2626 +/* L4 Type field: TCP */
2627 +#define FM_L4_PARSE_RESULT_TCP 0x20
2628 +/* FD status field indicating whether the FM Parser has attempted to validate
2629 + * the L4 csum of the frame.
2630 + * Note that having this bit set doesn't necessarily imply that the checksum
2631 + * is valid. One would have to check the parse results to find that out.
2633 +#define FM_FD_STAT_L4CV 0x00000004
2636 +#define FM_FD_STAT_ERR_PHYSICAL FM_PORT_FRM_ERR_PHYSICAL
2638 +/* Check if the parsed frame was found to be a TCP segment.
2640 + * @parse_result_ptr must be of type (fm_prs_result_t *).
2642 +#define fm_l4_frame_is_tcp(parse_result_ptr) \
2643 + ((parse_result_ptr)->l4r & FM_L4_PARSE_RESULT_TCP)
2645 +/* number of Tx queues to FMan */
2646 +#ifdef CONFIG_FMAN_PFC
2647 +#define DPAA_ETH_TX_QUEUES (NR_CPUS * CONFIG_FMAN_PFC_COS_COUNT)
2649 +#define DPAA_ETH_TX_QUEUES NR_CPUS
2652 +#define DPAA_ETH_RX_QUEUES 128
2654 +/* Convenience macros for storing/retrieving the skb back-pointers. They must
2655 + * accommodate both recycling and confirmation paths - i.e. cases when the buf
2656 + * was allocated by ourselves, respectively by the stack. In the former case,
2657 + * we could store the skb at negative offset; in the latter case, we can't,
2658 + * so we'll use 0 as offset.
2660 + * NB: @off is an offset from a (struct sk_buff **) pointer!
2662 +#define DPA_WRITE_SKB_PTR(skb, skbh, addr, off) \
2664 + skbh = (struct sk_buff **)addr; \
2665 + *(skbh + (off)) = skb; \
2667 +#define DPA_READ_SKB_PTR(skb, skbh, addr, off) \
2669 + skbh = (struct sk_buff **)addr; \
2670 + skb = *(skbh + (off)); \
2674 +/* Magic Packet wakeup */
2675 +#define DPAA_WOL_MAGIC 0x00000001
2678 +#if defined(CONFIG_FSL_SDK_FMAN_TEST)
2685 +/* More detailed FQ types - used for fine-grained WQ assignments */
2687 + FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */
2688 + FQ_TYPE_RX_ERROR, /* Rx Error FQs */
2689 + FQ_TYPE_RX_PCD, /* User-defined PCDs */
2690 + FQ_TYPE_TX, /* "Real" Tx FQs */
2691 + FQ_TYPE_TX_CONFIRM, /* Tx default Conf FQ (actually an Rx FQ) */
2692 + FQ_TYPE_TX_CONF_MQ, /* Tx conf FQs (one for each Tx FQ) */
2693 + FQ_TYPE_TX_ERROR, /* Tx Error FQs (these are actually Rx FQs) */
2694 + FQ_TYPE_RX_PCD_HI_PRIO, /* User-defined high-priority PCDs */
2698 + struct qman_fq fq_base;
2699 + struct list_head list;
2700 + struct net_device *net_dev;
2706 + enum dpa_fq_type fq_type;
2709 +struct dpa_fq_cbs_t {
2710 + struct qman_fq rx_defq;
2711 + struct qman_fq tx_defq;
2712 + struct qman_fq rx_errq;
2713 + struct qman_fq tx_errq;
2714 + struct qman_fq egress_ern;
2723 + struct bman_pool *pool;
2725 + struct device *dev;
2727 + /* The buffer pools used for the private ports are initialized
2728 + * with target_count buffers for each CPU; at runtime the
2729 + * number of buffers per CPU is constantly brought back to this
2733 + /* The configured value for the number of buffers in the pool,
2734 + * used for shared port buffer pools
2740 + /* physical address of the contiguous memory used by the pool to store
2744 + /* virtual address of the contiguous memory used by the pool to store
2747 + void __iomem *vaddr;
2748 + /* current number of buffers in the bpool alloted to this CPU */
2749 + int __percpu *percpu_count;
2751 + /* some bpools need to be seeded before use by this cb */
2752 + int (*seed_cb)(struct dpa_bp *);
2753 + /* some bpools need to be emptied before freeing; this cb is used
2754 + * for freeing of individual buffers taken from the pool
2756 + void (*free_buf_cb)(void *addr);
2759 +struct dpa_rx_errors {
2760 + u64 dme; /* DMA Error */
2761 + u64 fpe; /* Frame Physical Error */
2762 + u64 fse; /* Frame Size Error */
2763 + u64 phe; /* Header Error */
2764 + u64 cse; /* Checksum Validation Error */
2767 +/* Counters for QMan ERN frames - one counter per rejection code */
2768 +struct dpa_ern_cnt {
2769 + u64 cg_tdrop; /* Congestion group taildrop */
2770 + u64 wred; /* WRED congestion */
2771 + u64 err_cond; /* Error condition */
2772 + u64 early_window; /* Order restoration, frame too early */
2773 + u64 late_window; /* Order restoration, frame too late */
2774 + u64 fq_tdrop; /* FQ taildrop */
2775 + u64 fq_retired; /* FQ is retired */
2776 + u64 orp_zero; /* ORP disabled */
2779 +struct dpa_napi_portal {
2780 + struct napi_struct napi;
2781 + struct qman_portal *p;
2784 +struct dpa_percpu_priv_s {
2785 + struct net_device *net_dev;
2786 + struct dpa_napi_portal *np;
2790 + /* fragmented (non-linear) skbuffs received from the stack */
2791 + u64 tx_frag_skbuffs;
2792 + /* number of S/G frames received */
2795 + struct rtnl_link_stats64 stats;
2796 + struct dpa_rx_errors rx_errors;
2797 + struct dpa_ern_cnt ern_cnt;
2800 +struct dpa_priv_s {
2801 + struct dpa_percpu_priv_s __percpu *percpu_priv;
2802 + struct dpa_bp *dpa_bp;
2803 + /* Store here the needed Tx headroom for convenience and speed
2804 + * (even though it can be computed based on the fields of buf_layout)
2806 + uint16_t tx_headroom;
2807 + struct net_device *net_dev;
2808 + struct mac_device *mac_dev;
2809 + struct qman_fq *egress_fqs[DPAA_ETH_TX_QUEUES];
2810 + struct qman_fq *conf_fqs[DPAA_ETH_TX_QUEUES];
2814 + uint16_t channel; /* "fsl,qman-channel-id" */
2815 + struct list_head dpa_fq_list;
2817 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
2818 + struct dentry *debugfs_loop_file;
2821 + uint32_t msg_enable; /* net_device message level */
2822 +#ifdef CONFIG_FSL_DPAA_1588
2823 + struct dpa_ptp_tsu *tsu;
2826 +#if defined(CONFIG_FSL_SDK_FMAN_TEST)
2827 +/* TODO: this is temporary until pcd support is implemented in dpaa */
2828 + int priv_pcd_num_ranges;
2829 + struct pcd_range priv_pcd_ranges[FMAN_PCD_TESTS_MAX_NUM_RANGES];
2834 + * All egress queues to a given net device belong to one
2835 + * (and the same) congestion group.
2837 + struct qman_cgr cgr;
2838 + /* If congested, when it began. Used for performance stats. */
2839 + u32 congestion_start_jiffies;
2840 + /* Number of jiffies the Tx port was congested. */
2841 + u32 congested_jiffies;
2843 + * Counter for the number of times the CGR
2844 + * entered congestion state
2846 + u32 cgr_congested_count;
2848 + /* Use a per-port CGR for ingress traffic. */
2849 + bool use_ingress_cgr;
2850 + struct qman_cgr ingress_cgr;
2852 +#ifdef CONFIG_FSL_DPAA_TS
2853 + bool ts_tx_en; /* Tx timestamping enabled */
2854 + bool ts_rx_en; /* Rx timestamping enabled */
2855 +#endif /* CONFIG_FSL_DPAA_TS */
2857 + struct dpa_buffer_layout_s *buf_layout;
2858 + uint16_t rx_headroom;
2865 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
2869 +#ifdef CONFIG_FSL_DPAA_CEETM
2870 + bool ceetm_en; /* CEETM QoS enabled */
2874 +struct fm_port_fqs {
2875 + struct dpa_fq *tx_defq;
2876 + struct dpa_fq *tx_errq;
2877 + struct dpa_fq *rx_defq;
2878 + struct dpa_fq *rx_errq;
2882 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
2883 +extern struct net_device *dpa_loop_netdevs[20];
2886 +/* functions with different implementation for SG and non-SG: */
2887 +int dpa_bp_priv_seed(struct dpa_bp *dpa_bp);
2888 +int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *count_ptr);
2889 +void __hot _dpa_rx(struct net_device *net_dev,
2890 + struct qman_portal *portal,
2891 + const struct dpa_priv_s *priv,
2892 + struct dpa_percpu_priv_s *percpu_priv,
2893 + const struct qm_fd *fd,
2896 +int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev);
2897 +int __hot dpa_tx_extended(struct sk_buff *skb, struct net_device *net_dev,
2898 + struct qman_fq *egress_fq, struct qman_fq *conf_fq);
2899 +struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,
2900 + const struct qm_fd *fd);
2901 +void __hot _dpa_process_parse_results(const fm_prs_result_t *parse_results,
2902 + const struct qm_fd *fd,
2903 + struct sk_buff *skb,
2905 +#ifndef CONFIG_FSL_DPAA_TS
2906 +bool dpa_skb_is_recyclable(struct sk_buff *skb);
2907 +bool dpa_buf_is_recyclable(struct sk_buff *skb,
2908 + uint32_t min_size,
2909 + uint16_t min_offset,
2910 + unsigned char **new_buf_start);
2912 +int __hot skb_to_contig_fd(struct dpa_priv_s *priv,
2913 + struct sk_buff *skb, struct qm_fd *fd,
2914 + int *count_ptr, int *offset);
2915 +int __hot skb_to_sg_fd(struct dpa_priv_s *priv,
2916 + struct sk_buff *skb, struct qm_fd *fd);
2917 +int __cold __attribute__((nonnull))
2918 + _dpa_fq_free(struct device *dev, struct qman_fq *fq);
2920 +/* Turn on HW checksum computation for this outgoing frame.
2921 + * If the current protocol is not something we support in this regard
2922 + * (or if the stack has already computed the SW checksum), we do nothing.
2924 + * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
2927 + * Note that this function may modify the fd->cmd field and the skb data buffer
2928 + * (the Parse Results area).
2930 +int dpa_enable_tx_csum(struct dpa_priv_s *priv,
2931 + struct sk_buff *skb, struct qm_fd *fd, char *parse_results);
2933 +static inline int dpaa_eth_napi_schedule(struct dpa_percpu_priv_s *percpu_priv,
2934 + struct qman_portal *portal)
2936 + /* In case of threaded ISR for RT enable kernel,
2937 + * in_irq() does not return appropriate value, so use
2938 + * in_serving_softirq to distinguish softirq or irq context.
2940 + if (unlikely(in_irq() || !in_serving_softirq())) {
2941 + /* Disable QMan IRQ and invoke NAPI */
2942 + int ret = qman_p_irqsource_remove(portal, QM_PIRQ_DQRI);
2943 + if (likely(!ret)) {
2944 + const struct qman_portal_config *pc =
2945 + qman_p_get_portal_config(portal);
2946 + struct dpa_napi_portal *np =
2947 + &percpu_priv->np[pc->index];
2950 + napi_schedule(&np->napi);
2951 + percpu_priv->in_interrupt++;
2958 +static inline ssize_t __const __must_check __attribute__((nonnull))
2959 +dpa_fd_length(const struct qm_fd *fd)
2961 + return fd->length20;
2964 +static inline ssize_t __const __must_check __attribute__((nonnull))
2965 +dpa_fd_offset(const struct qm_fd *fd)
2967 + return fd->offset;
2970 +/* Verifies if the skb length is below the interface MTU */
2971 +static inline int dpa_check_rx_mtu(struct sk_buff *skb, int mtu)
2973 + if (unlikely(skb->len > mtu))
2974 + if ((skb->protocol != htons(ETH_P_8021Q))
2975 + || (skb->len > mtu + 4))
2981 +static inline uint16_t dpa_get_headroom(struct dpa_buffer_layout_s *bl)
2983 + uint16_t headroom;
2984 + /* The frame headroom must accommodate:
2985 + * - the driver private data area
2986 + * - parse results, hash results, timestamp if selected
2987 + * - manip extra space
2988 + * If either hash results or time stamp are selected, both will
2989 + * be copied to/from the frame headroom, as TS is located between PR and
2990 + * HR in the IC and IC copy size has a granularity of 16bytes
2991 + * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM)
2993 + * Also make sure the headroom is a multiple of data_align bytes
2995 + headroom = (uint16_t)(bl->priv_data_size +
2996 + (bl->parse_results ? DPA_PARSE_RESULTS_SIZE : 0) +
2997 + (bl->hash_results || bl->time_stamp ?
2998 + DPA_TIME_STAMP_SIZE + DPA_HASH_RESULTS_SIZE : 0) +
2999 + bl->manip_extra_space);
3001 + return bl->data_align ? ALIGN(headroom, bl->data_align) : headroom;
3004 +int fm_mac_dump_regs(struct mac_device *h_dev, char *buf, int n);
3005 +int fm_mac_dump_rx_stats(struct mac_device *h_dev, char *buf, int n);
3006 +int fm_mac_dump_tx_stats(struct mac_device *h_dev, char *buf, int n);
3008 +void dpaa_eth_sysfs_remove(struct device *dev);
3009 +void dpaa_eth_sysfs_init(struct device *dev);
3010 +int dpaa_eth_poll(struct napi_struct *napi, int budget);
3012 +void dpa_private_napi_del(struct net_device *net_dev);
3014 +/* Equivalent to a memset(0), but works faster */
3015 +static inline void clear_fd(struct qm_fd *fd)
3017 + fd->opaque_addr = 0;
3022 +static inline int _dpa_tx_fq_to_id(const struct dpa_priv_s *priv,
3023 + struct qman_fq *tx_fq)
3027 + for (i = 0; i < DPAA_ETH_TX_QUEUES; i++)
3028 + if (priv->egress_fqs[i] == tx_fq)
3034 +static inline int __hot dpa_xmit(struct dpa_priv_s *priv,
3035 + struct rtnl_link_stats64 *percpu_stats,
3036 + struct qm_fd *fd, struct qman_fq *egress_fq,
3037 + struct qman_fq *conf_fq)
3041 + if (fd->bpid == 0xff)
3042 + fd->cmd |= qman_fq_fqid(conf_fq);
3044 + /* Trace this Tx fd */
3045 + trace_dpa_tx_fd(priv->net_dev, egress_fq, fd);
3047 + for (i = 0; i < 100000; i++) {
3048 + err = qman_enqueue(egress_fq, fd, 0);
3049 + if (err != -EBUSY)
3053 + if (unlikely(err < 0)) {
3054 + /* TODO differentiate b/w -EBUSY (EQCR full) and other codes? */
3055 + percpu_stats->tx_errors++;
3056 + percpu_stats->tx_fifo_errors++;
3060 + percpu_stats->tx_packets++;
3061 + percpu_stats->tx_bytes += dpa_fd_length(fd);
3066 +/* Use multiple WQs for FQ assignment:
3067 + * - Tx Confirmation queues go to WQ1.
3068 + * - Rx Default, Tx and PCD queues go to WQ3 (no differentiation between
3069 + * Rx and Tx traffic, or between Rx Default and Rx PCD frames).
3070 + * - Rx Error and Tx Error queues go to WQ2 (giving them a better chance
3071 + * to be scheduled, in case there are many more FQs in WQ3).
3072 + * This ensures that Tx-confirmed buffers are timely released. In particular,
3073 + * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they
3074 + * are greatly outnumbered by other FQs in the system (usually PCDs), while
3075 + * dequeue scheduling is round-robin.
3077 +static inline void _dpa_assign_wq(struct dpa_fq *fq)
3079 + switch (fq->fq_type) {
3080 + case FQ_TYPE_TX_CONFIRM:
3081 + case FQ_TYPE_TX_CONF_MQ:
3084 + case FQ_TYPE_RX_DEFAULT:
3088 + case FQ_TYPE_RX_ERROR:
3089 + case FQ_TYPE_TX_ERROR:
3090 + case FQ_TYPE_RX_PCD_HI_PRIO:
3093 + case FQ_TYPE_RX_PCD:
3097 + WARN(1, "Invalid FQ type %d for FQID %d!\n",
3098 + fq->fq_type, fq->fqid);
3102 +#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
3103 +/* Use in lieu of skb_get_queue_mapping() */
3104 +#ifdef CONFIG_FMAN_PFC
3105 +#define dpa_get_queue_mapping(skb) \
3106 + (((skb)->priority < CONFIG_FMAN_PFC_COS_COUNT) ? \
3107 + ((skb)->priority * dpa_num_cpus + smp_processor_id()) : \
3108 + ((CONFIG_FMAN_PFC_COS_COUNT - 1) * \
3109 + dpa_num_cpus + smp_processor_id()));
3112 +#define dpa_get_queue_mapping(skb) \
3113 + raw_smp_processor_id()
3116 +/* Use the queue selected by XPS */
3117 +#define dpa_get_queue_mapping(skb) \
3118 + skb_get_queue_mapping(skb)
3121 +#ifdef CONFIG_PTP_1588_CLOCK_DPAA
3122 +struct ptp_priv_s {
3123 + struct device_node *node;
3124 + struct platform_device *of_dev;
3125 + struct mac_device *mac_dev;
3127 +extern struct ptp_priv_s ptp_priv;
3130 +static inline void _dpa_bp_free_pf(void *addr)
3132 + put_page(virt_to_head_page(addr));
3135 +/* TODO: LS1043A SoC has a HW issue regarding FMan DMA transactions; The issue
3136 + * manifests itself at high traffic rates when frames exceed 4K memory
3137 + * boundaries; For the moment, we use a SW workaround to avoid frames larger
3138 + * than 4K or that exceed 4K alignments.
3142 +extern bool dpaa_errata_a010022; /* SoC affected by A010022 errata */
3144 +#define HAS_DMA_ISSUE(start, size) \
3145 + (((u64)(start) + (size)) > (((u64)(start) + 0x1000) & ~0xFFF))
3146 +#define BOUNDARY_4K(start, size) (((u64)(start) + (u64)(size)) & ~0xFFF)
3148 +#endif /* !CONFIG_PPC */
3150 +#endif /* __DPA_H */
3152 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.c
3154 +/* Copyright 2008-2013 Freescale Semiconductor, Inc.
3156 + * Redistribution and use in source and binary forms, with or without
3157 + * modification, are permitted provided that the following conditions are met:
3158 + * * Redistributions of source code must retain the above copyright
3159 + * notice, this list of conditions and the following disclaimer.
3160 + * * Redistributions in binary form must reproduce the above copyright
3161 + * notice, this list of conditions and the following disclaimer in the
3162 + * documentation and/or other materials provided with the distribution.
3163 + * * Neither the name of Freescale Semiconductor nor the
3164 + * names of its contributors may be used to endorse or promote products
3165 + * derived from this software without specific prior written permission.
3168 + * ALTERNATIVELY, this software may be distributed under the terms of the
3169 + * GNU General Public License ("GPL") as published by the Free Software
3170 + * Foundation, either version 2 of that License or (at your option) any
3173 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
3174 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
3175 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
3176 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
3177 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
3178 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
3179 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
3180 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
3181 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
3182 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3185 +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
3186 +#define pr_fmt(fmt) \
3187 + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
3188 + KBUILD_BASENAME".c", __LINE__, __func__
3190 +#define pr_fmt(fmt) \
3191 + KBUILD_MODNAME ": " fmt
3194 +#include <linux/init.h>
3195 +#include <linux/module.h>
3196 +#include <linux/io.h>
3197 +#include <linux/of_platform.h>
3198 +#include <linux/of_net.h>
3199 +#include <linux/etherdevice.h>
3200 +#include <linux/kthread.h>
3201 +#include <linux/percpu.h>
3202 +#include <linux/highmem.h>
3203 +#include <linux/sort.h>
3204 +#include <linux/fsl_qman.h>
3205 +#include "dpaa_eth.h"
3206 +#include "dpaa_eth_common.h"
3207 +#include "dpaa_eth_base.h"
3209 +#define DPA_DESCRIPTION "FSL DPAA Advanced drivers:"
3211 +MODULE_LICENSE("Dual BSD/GPL");
3213 +uint8_t advanced_debug = -1;
3214 +module_param(advanced_debug, byte, S_IRUGO);
3215 +MODULE_PARM_DESC(advanced_debug, "Module/Driver verbosity level");
3216 +EXPORT_SYMBOL(advanced_debug);
3218 +static int dpa_bp_cmp(const void *dpa_bp0, const void *dpa_bp1)
3220 + return ((struct dpa_bp *)dpa_bp0)->size -
3221 + ((struct dpa_bp *)dpa_bp1)->size;
3224 +struct dpa_bp * __cold __must_check /* __attribute__((nonnull)) */
3225 +dpa_bp_probe(struct platform_device *_of_dev, size_t *count)
3227 + int i, lenp, na, ns, err;
3228 + struct device *dev;
3229 + struct device_node *dev_node;
3230 + const __be32 *bpool_cfg;
3231 + struct dpa_bp *dpa_bp;
3234 + dev = &_of_dev->dev;
3236 + *count = of_count_phandle_with_args(dev->of_node,
3237 + "fsl,bman-buffer-pools", NULL);
3239 + dev_err(dev, "missing fsl,bman-buffer-pools device tree entry\n");
3240 + return ERR_PTR(-EINVAL);
3243 + dpa_bp = devm_kzalloc(dev, *count * sizeof(*dpa_bp), GFP_KERNEL);
3244 + if (dpa_bp == NULL) {
3245 + dev_err(dev, "devm_kzalloc() failed\n");
3246 + return ERR_PTR(-ENOMEM);
3249 + dev_node = of_find_node_by_path("/");
3250 + if (unlikely(dev_node == NULL)) {
3251 + dev_err(dev, "of_find_node_by_path(/) failed\n");
3252 + return ERR_PTR(-EINVAL);
3255 + na = of_n_addr_cells(dev_node);
3256 + ns = of_n_size_cells(dev_node);
3258 + for (i = 0; i < *count; i++) {
3259 + of_node_put(dev_node);
3261 + dev_node = of_parse_phandle(dev->of_node,
3262 + "fsl,bman-buffer-pools", i);
3263 + if (dev_node == NULL) {
3264 + dev_err(dev, "of_find_node_by_phandle() failed\n");
3265 + return ERR_PTR(-EFAULT);
3268 + if (unlikely(!of_device_is_compatible(dev_node, "fsl,bpool"))) {
3270 + "!of_device_is_compatible(%s, fsl,bpool)\n",
3271 + dev_node->full_name);
3272 + dpa_bp = ERR_PTR(-EINVAL);
3273 + goto _return_of_node_put;
3276 + err = of_property_read_u32(dev_node, "fsl,bpid", &bpid);
3278 + dev_err(dev, "Cannot find buffer pool ID in the device tree\n");
3279 + dpa_bp = ERR_PTR(-EINVAL);
3280 + goto _return_of_node_put;
3282 + dpa_bp[i].bpid = (uint8_t)bpid;
3284 + bpool_cfg = of_get_property(dev_node, "fsl,bpool-ethernet-cfg",
3286 + if (bpool_cfg && (lenp == (2 * ns + na) * sizeof(*bpool_cfg))) {
3287 + const uint32_t *seed_pool;
3289 + dpa_bp[i].config_count =
3290 + (int)of_read_number(bpool_cfg, ns);
3292 + (size_t)of_read_number(bpool_cfg + ns, ns);
3294 + of_read_number(bpool_cfg + 2 * ns, na);
3296 + seed_pool = of_get_property(dev_node,
3297 + "fsl,bpool-ethernet-seeds", &lenp);
3298 + dpa_bp[i].seed_pool = !!seed_pool;
3302 + "Missing/invalid fsl,bpool-ethernet-cfg device tree entry for node %s\n",
3303 + dev_node->full_name);
3304 + dpa_bp = ERR_PTR(-EINVAL);
3305 + goto _return_of_node_put;
3309 + sort(dpa_bp, *count, sizeof(*dpa_bp), dpa_bp_cmp, NULL);
3313 +_return_of_node_put:
3315 + of_node_put(dev_node);
3319 +EXPORT_SYMBOL(dpa_bp_probe);
3321 +int dpa_bp_shared_port_seed(struct dpa_bp *bp)
3323 + void __iomem **ptr;
3325 + /* In MAC-less and Shared-MAC scenarios the physical
3326 + * address of the buffer pool in device tree is set
3327 + * to 0 to specify that another entity (USDPAA) will
3328 + * allocate and seed the buffers
3333 + /* allocate memory region for buffers */
3334 + devm_request_mem_region(bp->dev, bp->paddr,
3335 + bp->size * bp->config_count, KBUILD_MODNAME);
3336 + /* managed ioremap unmapping */
3337 + ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
3341 + bp->vaddr = ioremap_cache_ns(bp->paddr, bp->size * bp->config_count);
3343 + bp->vaddr = ioremap_prot(bp->paddr, bp->size * bp->config_count, 0);
3345 + if (bp->vaddr == NULL) {
3346 + pr_err("Could not map memory for pool %d\n", bp->bpid);
3351 + devres_add(bp->dev, ptr);
3353 + /* seed pool with buffers from that memory region */
3354 + if (bp->seed_pool) {
3355 + int count = bp->target_count;
3356 + dma_addr_t addr = bp->paddr;
3359 + struct bm_buffer bufs[8];
3360 + uint8_t num_bufs = 0;
3363 + BUG_ON(addr > 0xffffffffffffull);
3364 + bufs[num_bufs].bpid = bp->bpid;
3365 + bm_buffer_set64(&bufs[num_bufs++], addr);
3368 + } while (--count && (num_bufs < 8));
3370 + while (bman_release(bp->pool, bufs, num_bufs, 0))
3377 +EXPORT_SYMBOL(dpa_bp_shared_port_seed);
3379 +int dpa_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
3382 + struct dpa_priv_s *priv = netdev_priv(net_dev);
3385 + priv->dpa_bp = dpa_bp;
3386 + priv->bp_count = count;
3388 + for (i = 0; i < count; i++) {
3390 + err = dpa_bp_alloc(&dpa_bp[i]);
3392 + dpa_bp_free(priv);
3393 + priv->dpa_bp = NULL;
3400 +EXPORT_SYMBOL(dpa_bp_create);
3402 +static int __init __cold dpa_advanced_load(void)
3404 + pr_info(DPA_DESCRIPTION "\n");
3408 +module_init(dpa_advanced_load);
3410 +static void __exit __cold dpa_advanced_unload(void)
3412 + pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
3413 + KBUILD_BASENAME".c", __func__);
3416 +module_exit(dpa_advanced_unload);
3418 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.h
3420 +/* Copyright 2008-2013 Freescale Semiconductor, Inc.
3422 + * Redistribution and use in source and binary forms, with or without
3423 + * modification, are permitted provided that the following conditions are met:
3424 + * * Redistributions of source code must retain the above copyright
3425 + * notice, this list of conditions and the following disclaimer.
3426 + * * Redistributions in binary form must reproduce the above copyright
3427 + * notice, this list of conditions and the following disclaimer in the
3428 + * documentation and/or other materials provided with the distribution.
3429 + * * Neither the name of Freescale Semiconductor nor the
3430 + * names of its contributors may be used to endorse or promote products
3431 + * derived from this software without specific prior written permission.
3434 + * ALTERNATIVELY, this software may be distributed under the terms of the
3435 + * GNU General Public License ("GPL") as published by the Free Software
3436 + * Foundation, either version 2 of that License or (at your option) any
3439 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
3440 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
3441 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
3442 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
3443 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
3444 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
3445 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
3446 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
3447 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
3448 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3451 +#ifndef __DPAA_ETH_BASE_H
3452 +#define __DPAA_ETH_BASE_H
3454 +#include <linux/etherdevice.h> /* struct net_device */
3455 +#include <linux/fsl_bman.h> /* struct bm_buffer */
3456 +#include <linux/of_platform.h> /* struct platform_device */
3457 +#include <linux/net_tstamp.h> /* struct hwtstamp_config */
3459 +extern uint8_t advanced_debug;
3460 +extern const struct dpa_fq_cbs_t shared_fq_cbs;
3461 +extern int __hot dpa_shared_tx(struct sk_buff *skb, struct net_device *net_dev);
3463 +struct dpa_bp * __cold __must_check /* __attribute__((nonnull)) */
3464 +dpa_bp_probe(struct platform_device *_of_dev, size_t *count);
3465 +int dpa_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
3467 +int dpa_bp_shared_port_seed(struct dpa_bp *bp);
3469 +#endif /* __DPAA_ETH_BASE_H */
3471 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c
3473 +/* Copyright 2008-2016 Freescale Semiconductor Inc.
3475 + * Redistribution and use in source and binary forms, with or without
3476 + * modification, are permitted provided that the following conditions are met:
3477 + * * Redistributions of source code must retain the above copyright
3478 + * notice, this list of conditions and the following disclaimer.
3479 + * * Redistributions in binary form must reproduce the above copyright
3480 + * notice, this list of conditions and the following disclaimer in the
3481 + * documentation and/or other materials provided with the distribution.
3482 + * * Neither the name of Freescale Semiconductor nor the
3483 + * names of its contributors may be used to endorse or promote products
3484 + * derived from this software without specific prior written permission.
3487 + * ALTERNATIVELY, this software may be distributed under the terms of the
3488 + * GNU General Public License ("GPL") as published by the Free Software
3489 + * Foundation, either version 2 of that License or (at your option) any
3492 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
3493 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
3494 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
3495 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
3496 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
3497 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
3498 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
3499 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
3500 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
3501 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3504 +#include <linux/init.h>
3505 +#include "dpaa_eth_ceetm.h"
3507 +#define DPA_CEETM_DESCRIPTION "FSL DPAA CEETM qdisc"
3509 +const struct nla_policy ceetm_policy[TCA_CEETM_MAX + 1] = {
3510 + [TCA_CEETM_COPT] = { .len = sizeof(struct tc_ceetm_copt) },
3511 + [TCA_CEETM_QOPS] = { .len = sizeof(struct tc_ceetm_qopt) },
3514 +struct Qdisc_ops ceetm_qdisc_ops;
3516 +/* Obtain the DCP and the SP ids from the FMan port */
3517 +static void get_dcp_and_sp(struct net_device *dev, enum qm_dc_portal *dcp_id,
3518 + unsigned int *sp_id)
3521 + t_LnxWrpFmPortDev *port_dev;
3522 + struct dpa_priv_s *dpa_priv = netdev_priv(dev);
3523 + struct mac_device *mac_dev = dpa_priv->mac_dev;
3525 + port_dev = (t_LnxWrpFmPortDev *)mac_dev->port_dev[TX];
3526 + channel = port_dev->txCh;
3528 + *sp_id = channel & CHANNEL_SP_MASK;
3529 + pr_debug(KBUILD_BASENAME " : FM sub-portal ID %d\n", *sp_id);
3531 + if (channel < DCP0_MAX_CHANNEL) {
3532 + *dcp_id = qm_dc_portal_fman0;
3533 + pr_debug(KBUILD_BASENAME " : DCP ID 0\n");
3535 + *dcp_id = qm_dc_portal_fman1;
3536 + pr_debug(KBUILD_BASENAME " : DCP ID 1\n");
3540 +/* Enqueue Rejection Notification callback */
3541 +static void ceetm_ern(struct qman_portal *portal, struct qman_fq *fq,
3542 + const struct qm_mr_entry *msg)
3544 + struct net_device *net_dev;
3545 + struct ceetm_class *cls;
3546 + struct ceetm_class_stats *cstats = NULL;
3547 + const struct dpa_priv_s *dpa_priv;
3548 + struct dpa_percpu_priv_s *dpa_percpu_priv;
3549 + struct sk_buff *skb;
3550 + struct qm_fd fd = msg->ern.fd;
3552 + net_dev = ((struct ceetm_fq *)fq)->net_dev;
3553 + dpa_priv = netdev_priv(net_dev);
3554 + dpa_percpu_priv = raw_cpu_ptr(dpa_priv->percpu_priv);
3556 + /* Increment DPA counters */
3557 + dpa_percpu_priv->stats.tx_dropped++;
3558 + dpa_percpu_priv->stats.tx_fifo_errors++;
3560 + /* Increment CEETM counters */
3561 + cls = ((struct ceetm_fq *)fq)->ceetm_cls;
3562 + switch (cls->type) {
3564 + cstats = this_cpu_ptr(cls->prio.cstats);
3567 + cstats = this_cpu_ptr(cls->wbfs.cstats);
3572 + cstats->ern_drop_count++;
3574 + if (fd.bpid != 0xff) {
3575 + dpa_fd_release(net_dev, &fd);
3579 + skb = _dpa_cleanup_tx_fd(dpa_priv, &fd);
3580 + dev_kfree_skb_any(skb);
3583 +/* Congestion State Change Notification callback */
3584 +static void ceetm_cscn(struct qm_ceetm_ccg *ccg, void *cb_ctx, int congested)
3586 + struct ceetm_fq *ceetm_fq = (struct ceetm_fq *)cb_ctx;
3587 + struct dpa_priv_s *dpa_priv = netdev_priv(ceetm_fq->net_dev);
3588 + struct ceetm_class *cls = ceetm_fq->ceetm_cls;
3589 + struct ceetm_class_stats *cstats = NULL;
3591 + switch (cls->type) {
3593 + cstats = this_cpu_ptr(cls->prio.cstats);
3596 + cstats = this_cpu_ptr(cls->wbfs.cstats);
3601 + dpa_priv->cgr_data.congestion_start_jiffies = jiffies;
3602 + netif_tx_stop_all_queues(dpa_priv->net_dev);
3603 + dpa_priv->cgr_data.cgr_congested_count++;
3605 + cstats->congested_count++;
3607 + dpa_priv->cgr_data.congested_jiffies +=
3608 + (jiffies - dpa_priv->cgr_data.congestion_start_jiffies);
3609 + netif_tx_wake_all_queues(dpa_priv->net_dev);
3613 +/* Allocate a ceetm fq */
3614 +static int ceetm_alloc_fq(struct ceetm_fq **fq, struct net_device *dev,
3615 + struct ceetm_class *cls)
3617 + *fq = kzalloc(sizeof(**fq), GFP_KERNEL);
3621 + (*fq)->net_dev = dev;
3622 + (*fq)->ceetm_cls = cls;
3626 +/* Configure a ceetm Class Congestion Group */
3627 +static int ceetm_config_ccg(struct qm_ceetm_ccg **ccg,
3628 + struct qm_ceetm_channel *channel, unsigned int id,
3629 + struct ceetm_fq *fq, struct dpa_priv_s *dpa_priv)
3634 + struct qm_ceetm_ccg_params ccg_params;
3636 + err = qman_ceetm_ccg_claim(ccg, channel, id, ceetm_cscn, fq);
3640 + /* Configure the count mode (frames/bytes), enable congestion state
3641 + * notifications, configure the congestion entry and exit thresholds,
3642 + * enable tail-drop, configure the tail-drop mode, and set the
3643 + * overhead accounting limit
3645 + ccg_mask = QM_CCGR_WE_MODE |
3646 + QM_CCGR_WE_CSCN_EN |
3647 + QM_CCGR_WE_CS_THRES_IN | QM_CCGR_WE_CS_THRES_OUT |
3648 + QM_CCGR_WE_TD_EN | QM_CCGR_WE_TD_MODE |
3651 + ccg_params.mode = 0; /* count bytes */
3652 + ccg_params.cscn_en = 1; /* generate notifications */
3653 + ccg_params.td_en = 1; /* enable tail-drop */
3654 + ccg_params.td_mode = 0; /* tail-drop on congestion state */
3655 + ccg_params.oal = (signed char)(min(sizeof(struct sk_buff) +
3656 + dpa_priv->tx_headroom, (size_t)FSL_QMAN_MAX_OAL));
3658 + /* Set the congestion state thresholds according to the link speed */
3659 + if (dpa_priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
3660 + cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_10G;
3662 + cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_1G;
3664 + qm_cgr_cs_thres_set64(&ccg_params.cs_thres_in, cs_th, 1);
3665 + qm_cgr_cs_thres_set64(&ccg_params.cs_thres_out,
3666 + cs_th * CEETM_CCGR_RATIO, 1);
3668 + err = qman_ceetm_ccg_set(*ccg, ccg_mask, &ccg_params);
3675 +/* Configure a ceetm Logical Frame Queue */
3676 +static int ceetm_config_lfq(struct qm_ceetm_cq *cq, struct ceetm_fq *fq,
3677 + struct qm_ceetm_lfq **lfq)
3683 + err = qman_ceetm_lfq_claim(lfq, cq);
3687 + /* Get the former contexts in order to preserve context B */
3688 + err = qman_ceetm_lfq_get_context(*lfq, &context_a, &context_b);
3692 + context_a = CEETM_CONTEXT_A;
3693 + err = qman_ceetm_lfq_set_context(*lfq, context_a, context_b);
3697 + (*lfq)->ern = ceetm_ern;
3699 + err = qman_ceetm_create_fq(*lfq, &fq->fq);
3706 +/* Configure a prio ceetm class */
3707 +static int ceetm_config_prio_cls(struct ceetm_class *cls,
3708 + struct net_device *dev,
3709 + struct qm_ceetm_channel *channel,
3713 + struct dpa_priv_s *dpa_priv = netdev_priv(dev);
3715 + err = ceetm_alloc_fq(&cls->prio.fq, dev, cls);
3719 + /* Claim and configure the CCG */
3720 + err = ceetm_config_ccg(&cls->prio.ccg, channel, id, cls->prio.fq,
3725 + /* Claim and configure the CQ */
3726 + err = qman_ceetm_cq_claim(&cls->prio.cq, channel, id, cls->prio.ccg);
3730 + if (cls->shaped) {
3731 + err = qman_ceetm_channel_set_cq_cr_eligibility(channel, id, 1);
3735 + err = qman_ceetm_channel_set_cq_er_eligibility(channel, id, 1);
3740 + /* Claim and configure a LFQ */
3741 + err = ceetm_config_lfq(cls->prio.cq, cls->prio.fq, &cls->prio.lfq);
3748 +/* Configure a wbfs ceetm class */
3749 +static int ceetm_config_wbfs_cls(struct ceetm_class *cls,
3750 + struct net_device *dev,
3751 + struct qm_ceetm_channel *channel,
3752 + unsigned int id, int type)
3755 + struct dpa_priv_s *dpa_priv = netdev_priv(dev);
3757 + err = ceetm_alloc_fq(&cls->wbfs.fq, dev, cls);
3761 + /* Claim and configure the CCG */
3762 + err = ceetm_config_ccg(&cls->wbfs.ccg, channel, id, cls->wbfs.fq,
3767 + /* Claim and configure the CQ */
3768 + if (type == WBFS_GRP_B)
3769 + err = qman_ceetm_cq_claim_B(&cls->wbfs.cq, channel, id,
3772 + err = qman_ceetm_cq_claim_A(&cls->wbfs.cq, channel, id,
3777 + /* Configure the CQ weight: real number multiplied by 100 to get rid
3780 + err = qman_ceetm_set_queue_weight_in_ratio(cls->wbfs.cq,
3781 + cls->wbfs.weight * 100);
3785 + /* Claim and configure a LFQ */
3786 + err = ceetm_config_lfq(cls->wbfs.cq, cls->wbfs.fq, &cls->wbfs.lfq);
3793 +/* Find class in qdisc hash table using given handle */
3794 +static inline struct ceetm_class *ceetm_find(u32 handle, struct Qdisc *sch)
3796 + struct ceetm_qdisc *priv = qdisc_priv(sch);
3797 + struct Qdisc_class_common *clc;
3799 + pr_debug(KBUILD_BASENAME " : %s : find class %X in qdisc %X\n",
3800 + __func__, handle, sch->handle);
3802 + clc = qdisc_class_find(&priv->clhash, handle);
3803 + return clc ? container_of(clc, struct ceetm_class, common) : NULL;
3806 +/* Insert a class in the qdisc's class hash */
3807 +static void ceetm_link_class(struct Qdisc *sch,
3808 + struct Qdisc_class_hash *clhash,
3809 + struct Qdisc_class_common *common)
3811 + sch_tree_lock(sch);
3812 + qdisc_class_hash_insert(clhash, common);
3813 + sch_tree_unlock(sch);
3814 + qdisc_class_hash_grow(sch, clhash);
3817 +/* Destroy a ceetm class */
3818 +static void ceetm_cls_destroy(struct Qdisc *sch, struct ceetm_class *cl)
3823 + pr_debug(KBUILD_BASENAME " : %s : destroy class %X from under %X\n",
3824 + __func__, cl->common.classid, sch->handle);
3826 + switch (cl->type) {
3828 + if (cl->root.child) {
3829 + qdisc_destroy(cl->root.child);
3830 + cl->root.child = NULL;
3833 + if (cl->root.ch && qman_ceetm_channel_release(cl->root.ch))
3834 + pr_err(KBUILD_BASENAME
3835 + " : %s : error releasing the channel %d\n",
3836 + __func__, cl->root.ch->idx);
3841 + if (cl->prio.child) {
3842 + qdisc_destroy(cl->prio.child);
3843 + cl->prio.child = NULL;
3846 + if (cl->prio.lfq && qman_ceetm_lfq_release(cl->prio.lfq))
3847 + pr_err(KBUILD_BASENAME
3848 + " : %s : error releasing the LFQ %d\n",
3849 + __func__, cl->prio.lfq->idx);
3851 + if (cl->prio.cq && qman_ceetm_cq_release(cl->prio.cq))
3852 + pr_err(KBUILD_BASENAME
3853 + " : %s : error releasing the CQ %d\n",
3854 + __func__, cl->prio.cq->idx);
3856 + if (cl->prio.ccg && qman_ceetm_ccg_release(cl->prio.ccg))
3857 + pr_err(KBUILD_BASENAME
3858 + " : %s : error releasing the CCG %d\n",
3859 + __func__, cl->prio.ccg->idx);
3861 + kfree(cl->prio.fq);
3863 + if (cl->prio.cstats)
3864 + free_percpu(cl->prio.cstats);
3869 + if (cl->wbfs.lfq && qman_ceetm_lfq_release(cl->wbfs.lfq))
3870 + pr_err(KBUILD_BASENAME
3871 + " : %s : error releasing the LFQ %d\n",
3872 + __func__, cl->wbfs.lfq->idx);
3874 + if (cl->wbfs.cq && qman_ceetm_cq_release(cl->wbfs.cq))
3875 + pr_err(KBUILD_BASENAME
3876 + " : %s : error releasing the CQ %d\n",
3877 + __func__, cl->wbfs.cq->idx);
3879 + if (cl->wbfs.ccg && qman_ceetm_ccg_release(cl->wbfs.ccg))
3880 + pr_err(KBUILD_BASENAME
3881 + " : %s : error releasing the CCG %d\n",
3882 + __func__, cl->wbfs.ccg->idx);
3884 + kfree(cl->wbfs.fq);
3886 + if (cl->wbfs.cstats)
3887 + free_percpu(cl->wbfs.cstats);
3890 + tcf_destroy_chain(&cl->filter_list);
3894 +/* Destroy a ceetm qdisc */
3895 +static void ceetm_destroy(struct Qdisc *sch)
3897 + unsigned int ntx, i;
3898 + struct hlist_node *next;
3899 + struct ceetm_class *cl;
3900 + struct ceetm_qdisc *priv = qdisc_priv(sch);
3901 + struct net_device *dev = qdisc_dev(sch);
3903 + pr_debug(KBUILD_BASENAME " : %s : destroy qdisc %X\n",
3904 + __func__, sch->handle);
3906 + /* All filters need to be removed before destroying the classes */
3907 + tcf_destroy_chain(&priv->filter_list);
3909 + for (i = 0; i < priv->clhash.hashsize; i++) {
3910 + hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode)
3911 + tcf_destroy_chain(&cl->filter_list);
3914 + for (i = 0; i < priv->clhash.hashsize; i++) {
3915 + hlist_for_each_entry_safe(cl, next, &priv->clhash.hash[i],
3917 + ceetm_cls_destroy(sch, cl);
3920 + qdisc_class_hash_destroy(&priv->clhash);
3922 + switch (priv->type) {
3924 + dpa_disable_ceetm(dev);
3926 + if (priv->root.lni && qman_ceetm_lni_release(priv->root.lni))
3927 + pr_err(KBUILD_BASENAME
3928 + " : %s : error releasing the LNI %d\n",
3929 + __func__, priv->root.lni->idx);
3931 + if (priv->root.sp && qman_ceetm_sp_release(priv->root.sp))
3932 + pr_err(KBUILD_BASENAME
3933 + " : %s : error releasing the SP %d\n",
3934 + __func__, priv->root.sp->idx);
3936 + if (priv->root.qstats)
3937 + free_percpu(priv->root.qstats);
3939 + if (!priv->root.qdiscs)
3942 + /* Remove the pfifo qdiscs */
3943 + for (ntx = 0; ntx < dev->num_tx_queues; ntx++)
3944 + if (priv->root.qdiscs[ntx])
3945 + qdisc_destroy(priv->root.qdiscs[ntx]);
3947 + kfree(priv->root.qdiscs);
3951 + if (priv->prio.parent)
3952 + priv->prio.parent->root.child = NULL;
3956 + if (priv->wbfs.parent)
3957 + priv->wbfs.parent->prio.child = NULL;
3962 +static int ceetm_dump(struct Qdisc *sch, struct sk_buff *skb)
3964 + struct Qdisc *qdisc;
3965 + unsigned int ntx, i;
3966 + struct nlattr *nest;
3967 + struct tc_ceetm_qopt qopt;
3968 + struct ceetm_qdisc_stats *qstats;
3969 + struct net_device *dev = qdisc_dev(sch);
3970 + struct ceetm_qdisc *priv = qdisc_priv(sch);
3972 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
3974 + sch_tree_lock(sch);
3975 + memset(&qopt, 0, sizeof(qopt));
3976 + qopt.type = priv->type;
3977 + qopt.shaped = priv->shaped;
3979 + switch (priv->type) {
3981 + /* Gather statistics from the underlying pfifo qdiscs */
3983 + memset(&sch->bstats, 0, sizeof(sch->bstats));
3984 + memset(&sch->qstats, 0, sizeof(sch->qstats));
3986 + for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
3987 + qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
3988 + sch->q.qlen += qdisc->q.qlen;
3989 + sch->bstats.bytes += qdisc->bstats.bytes;
3990 + sch->bstats.packets += qdisc->bstats.packets;
3991 + sch->qstats.qlen += qdisc->qstats.qlen;
3992 + sch->qstats.backlog += qdisc->qstats.backlog;
3993 + sch->qstats.drops += qdisc->qstats.drops;
3994 + sch->qstats.requeues += qdisc->qstats.requeues;
3995 + sch->qstats.overlimits += qdisc->qstats.overlimits;
3998 + for_each_online_cpu(i) {
3999 + qstats = per_cpu_ptr(priv->root.qstats, i);
4000 + sch->qstats.drops += qstats->drops;
4003 + qopt.rate = priv->root.rate;
4004 + qopt.ceil = priv->root.ceil;
4005 + qopt.overhead = priv->root.overhead;
4009 + qopt.qcount = priv->prio.qcount;
4013 + qopt.qcount = priv->wbfs.qcount;
4014 + qopt.cr = priv->wbfs.cr;
4015 + qopt.er = priv->wbfs.er;
4019 + pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
4020 + sch_tree_unlock(sch);
4024 + nest = nla_nest_start(skb, TCA_OPTIONS);
4026 + goto nla_put_failure;
4027 + if (nla_put(skb, TCA_CEETM_QOPS, sizeof(qopt), &qopt))
4028 + goto nla_put_failure;
4029 + nla_nest_end(skb, nest);
4031 + sch_tree_unlock(sch);
4035 + sch_tree_unlock(sch);
4036 + nla_nest_cancel(skb, nest);
4040 +/* Configure a root ceetm qdisc */
4041 +static int ceetm_init_root(struct Qdisc *sch, struct ceetm_qdisc *priv,
4042 + struct tc_ceetm_qopt *qopt)
4044 + struct netdev_queue *dev_queue;
4045 + struct Qdisc *qdisc;
4046 + enum qm_dc_portal dcp_id;
4047 + unsigned int i, sp_id, parent_id;
4050 + struct qm_ceetm_sp *sp;
4051 + struct qm_ceetm_lni *lni;
4052 + struct net_device *dev = qdisc_dev(sch);
4053 + struct dpa_priv_s *dpa_priv = netdev_priv(dev);
4054 + struct mac_device *mac_dev = dpa_priv->mac_dev;
4056 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
4058 + /* Validate inputs */
4059 + if (sch->parent != TC_H_ROOT) {
4060 + pr_err("CEETM: a root ceetm qdisc can not be attached to a class\n");
4061 + tcf_destroy_chain(&priv->filter_list);
4062 + qdisc_class_hash_destroy(&priv->clhash);
4067 + pr_err("CEETM: the interface is lacking a mac\n");
4069 + goto err_init_root;
4072 + /* pre-allocate underlying pfifo qdiscs */
4073 + priv->root.qdiscs = kcalloc(dev->num_tx_queues,
4074 + sizeof(priv->root.qdiscs[0]),
4076 + if (!priv->root.qdiscs) {
4078 + goto err_init_root;
4081 + for (i = 0; i < dev->num_tx_queues; i++) {
4082 + dev_queue = netdev_get_tx_queue(dev, i);
4083 + parent_id = TC_H_MAKE(TC_H_MAJ(sch->handle),
4084 + TC_H_MIN(i + PFIFO_MIN_OFFSET));
4086 + qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
4090 + goto err_init_root;
4093 + priv->root.qdiscs[i] = qdisc;
4094 + qdisc->flags |= TCQ_F_ONETXQUEUE;
4097 + sch->flags |= TCQ_F_MQROOT;
4099 + priv->root.qstats = alloc_percpu(struct ceetm_qdisc_stats);
4100 + if (!priv->root.qstats) {
4101 + pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
4104 + goto err_init_root;
4107 + priv->shaped = qopt->shaped;
4108 + priv->root.rate = qopt->rate;
4109 + priv->root.ceil = qopt->ceil;
4110 + priv->root.overhead = qopt->overhead;
4112 + /* Claim the SP */
4113 + get_dcp_and_sp(dev, &dcp_id, &sp_id);
4114 + err = qman_ceetm_sp_claim(&sp, dcp_id, sp_id);
4116 + pr_err(KBUILD_BASENAME " : %s : failed to claim the SP\n",
4118 + goto err_init_root;
4121 + priv->root.sp = sp;
4123 + /* Claim the LNI - will use the same id as the SP id since SPs 0-7
4124 + * are connected to the TX FMan ports
4126 + err = qman_ceetm_lni_claim(&lni, dcp_id, sp_id);
4128 + pr_err(KBUILD_BASENAME " : %s : failed to claim the LNI\n",
4130 + goto err_init_root;
4133 + priv->root.lni = lni;
4135 + err = qman_ceetm_sp_set_lni(sp, lni);
4137 + pr_err(KBUILD_BASENAME " : %s : failed to link the SP and LNI\n",
4139 + goto err_init_root;
4144 + /* Configure the LNI shaper */
4145 + if (priv->shaped) {
4146 + err = qman_ceetm_lni_enable_shaper(lni, 1, priv->root.overhead);
4148 + pr_err(KBUILD_BASENAME " : %s : failed to configure the LNI shaper\n",
4150 + goto err_init_root;
4153 + bps = priv->root.rate << 3; /* Bps -> bps */
4154 + err = qman_ceetm_lni_set_commit_rate_bps(lni, bps, dev->mtu);
4156 + pr_err(KBUILD_BASENAME " : %s : failed to configure the LNI shaper\n",
4158 + goto err_init_root;
4161 + bps = priv->root.ceil << 3; /* Bps -> bps */
4162 + err = qman_ceetm_lni_set_excess_rate_bps(lni, bps, dev->mtu);
4164 + pr_err(KBUILD_BASENAME " : %s : failed to configure the LNI shaper\n",
4166 + goto err_init_root;
4170 + /* TODO default configuration */
4172 + dpa_enable_ceetm(dev);
4176 + ceetm_destroy(sch);
4180 +/* Configure a prio ceetm qdisc */
4181 +static int ceetm_init_prio(struct Qdisc *sch, struct ceetm_qdisc *priv,
4182 + struct tc_ceetm_qopt *qopt)
4186 + struct ceetm_class *parent_cl, *child_cl;
4187 + struct Qdisc *parent_qdisc;
4188 + struct net_device *dev = qdisc_dev(sch);
4190 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
4192 + if (sch->parent == TC_H_ROOT) {
4193 + pr_err("CEETM: a prio ceetm qdisc can not be root\n");
4195 + goto err_init_prio;
4198 + parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent));
4199 + if (strcmp(parent_qdisc->ops->id, ceetm_qdisc_ops.id)) {
4200 + pr_err("CEETM: a ceetm qdisc can not be attached to other qdisc/class types\n");
4202 + goto err_init_prio;
4205 + /* Obtain the parent root ceetm_class */
4206 + parent_cl = ceetm_find(sch->parent, parent_qdisc);
4208 + if (!parent_cl || parent_cl->type != CEETM_ROOT) {
4209 + pr_err("CEETM: a prio ceetm qdiscs can be added only under a root ceetm class\n");
4211 + goto err_init_prio;
4214 + priv->prio.parent = parent_cl;
4215 + parent_cl->root.child = sch;
4217 + priv->shaped = parent_cl->shaped;
4218 + priv->prio.qcount = qopt->qcount;
4220 + /* Create and configure qcount child classes */
4221 + for (i = 0; i < priv->prio.qcount; i++) {
4222 + child_cl = kzalloc(sizeof(*child_cl), GFP_KERNEL);
4224 + pr_err(KBUILD_BASENAME " : %s : kzalloc() failed\n",
4227 + goto err_init_prio;
4230 + child_cl->prio.cstats = alloc_percpu(struct ceetm_class_stats);
4231 + if (!child_cl->prio.cstats) {
4232 + pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
4235 + goto err_init_prio_cls;
4238 + child_cl->common.classid = TC_H_MAKE(sch->handle, (i + 1));
4239 + child_cl->refcnt = 1;
4240 + child_cl->parent = sch;
4241 + child_cl->type = CEETM_PRIO;
4242 + child_cl->shaped = priv->shaped;
4243 + child_cl->prio.child = NULL;
4245 + /* All shaped CQs have CR and ER enabled by default */
4246 + child_cl->prio.cr = child_cl->shaped;
4247 + child_cl->prio.er = child_cl->shaped;
4248 + child_cl->prio.fq = NULL;
4249 + child_cl->prio.cq = NULL;
4251 + /* Configure the corresponding hardware CQ */
4252 + err = ceetm_config_prio_cls(child_cl, dev,
4253 + parent_cl->root.ch, i);
4255 + pr_err(KBUILD_BASENAME " : %s : failed to configure the ceetm prio class %X\n",
4256 + __func__, child_cl->common.classid);
4257 + goto err_init_prio_cls;
4260 + /* Add class handle in Qdisc */
4261 + ceetm_link_class(sch, &priv->clhash, &child_cl->common);
4262 + pr_debug(KBUILD_BASENAME " : %s : added ceetm prio class %X associated with CQ %d and CCG %d\n",
4263 + __func__, child_cl->common.classid,
4264 + child_cl->prio.cq->idx, child_cl->prio.ccg->idx);
4270 + ceetm_cls_destroy(sch, child_cl);
4272 + ceetm_destroy(sch);
4276 +/* Configure a wbfs ceetm qdisc */
4277 +static int ceetm_init_wbfs(struct Qdisc *sch, struct ceetm_qdisc *priv,
4278 + struct tc_ceetm_qopt *qopt)
4280 + int err, group_b, small_group;
4281 + unsigned int i, id, prio_a, prio_b;
4282 + struct ceetm_class *parent_cl, *child_cl, *root_cl;
4283 + struct Qdisc *parent_qdisc;
4284 + struct ceetm_qdisc *parent_priv;
4285 + struct qm_ceetm_channel *channel;
4286 + struct net_device *dev = qdisc_dev(sch);
4288 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
4290 + /* Validate inputs */
4291 + if (sch->parent == TC_H_ROOT) {
4292 + pr_err("CEETM: a wbfs ceetm qdiscs can not be root\n");
4294 + goto err_init_wbfs;
4297 + /* Obtain the parent prio ceetm qdisc */
4298 + parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent));
4299 + if (strcmp(parent_qdisc->ops->id, ceetm_qdisc_ops.id)) {
4300 + pr_err("CEETM: a ceetm qdisc can not be attached to other qdisc/class types\n");
4302 + goto err_init_wbfs;
4305 + /* Obtain the parent prio ceetm class */
4306 + parent_cl = ceetm_find(sch->parent, parent_qdisc);
4307 + parent_priv = qdisc_priv(parent_qdisc);
4309 + if (!parent_cl || parent_cl->type != CEETM_PRIO) {
4310 + pr_err("CEETM: a wbfs ceetm qdiscs can be added only under a prio ceetm class\n");
4312 + goto err_init_wbfs;
4315 + if (!qopt->qcount || !qopt->qweight[0]) {
4316 + pr_err("CEETM: qcount and qweight are mandatory for a wbfs ceetm qdisc\n");
4318 + goto err_init_wbfs;
4321 + priv->shaped = parent_cl->shaped;
4323 + if (!priv->shaped && (qopt->cr || qopt->er)) {
4324 + pr_err("CEETM: CR/ER can be enabled only for shaped wbfs ceetm qdiscs\n");
4326 + goto err_init_wbfs;
4329 + if (priv->shaped && !(qopt->cr || qopt->er)) {
4330 + pr_err("CEETM: either CR or ER must be enabled for shaped wbfs ceetm qdiscs\n");
4332 + goto err_init_wbfs;
4335 + /* Obtain the parent root ceetm class */
4336 + root_cl = parent_priv->prio.parent;
4337 + if ((root_cl->root.wbfs_grp_a && root_cl->root.wbfs_grp_b) ||
4338 + root_cl->root.wbfs_grp_large) {
4339 + pr_err("CEETM: no more wbfs classes are available\n");
4341 + goto err_init_wbfs;
4344 + if ((root_cl->root.wbfs_grp_a || root_cl->root.wbfs_grp_b) &&
4345 + qopt->qcount == CEETM_MAX_WBFS_QCOUNT) {
4346 + pr_err("CEETM: only %d wbfs classes are available\n",
4347 + CEETM_MIN_WBFS_QCOUNT);
4349 + goto err_init_wbfs;
4352 + priv->wbfs.parent = parent_cl;
4353 + parent_cl->prio.child = sch;
4355 + priv->wbfs.qcount = qopt->qcount;
4356 + priv->wbfs.cr = qopt->cr;
4357 + priv->wbfs.er = qopt->er;
4359 + channel = root_cl->root.ch;
4361 + /* Configure the hardware wbfs channel groups */
4362 + if (priv->wbfs.qcount == CEETM_MAX_WBFS_QCOUNT) {
4363 + /* Configure the large group A */
4364 + priv->wbfs.group_type = WBFS_GRP_LARGE;
4365 + small_group = false;
4367 + prio_a = TC_H_MIN(parent_cl->common.classid) - 1;
4370 + } else if (root_cl->root.wbfs_grp_a) {
4371 + /* Configure the group B */
4372 + priv->wbfs.group_type = WBFS_GRP_B;
4374 + err = qman_ceetm_channel_get_group(channel, &small_group,
4375 + &prio_a, &prio_b);
4377 + pr_err(KBUILD_BASENAME " : %s : failed to get group details\n",
4379 + goto err_init_wbfs;
4382 + small_group = true;
4384 + prio_b = TC_H_MIN(parent_cl->common.classid) - 1;
4385 + /* If group A isn't configured, configure it as group B */
4386 + prio_a = prio_a ? : prio_b;
4389 + /* Configure the small group A */
4390 + priv->wbfs.group_type = WBFS_GRP_A;
4392 + err = qman_ceetm_channel_get_group(channel, &small_group,
4393 + &prio_a, &prio_b);
4395 + pr_err(KBUILD_BASENAME " : %s : failed to get group details\n",
4397 + goto err_init_wbfs;
4400 + small_group = true;
4402 + prio_a = TC_H_MIN(parent_cl->common.classid) - 1;
4403 + /* If group B isn't configured, configure it as group A */
4404 + prio_b = prio_b ? : prio_a;
4407 + err = qman_ceetm_channel_set_group(channel, small_group, prio_a,
4410 + goto err_init_wbfs;
4412 + if (priv->shaped) {
4413 + err = qman_ceetm_channel_set_group_cr_eligibility(channel,
4417 + pr_err(KBUILD_BASENAME " : %s : failed to set group CR eligibility\n",
4419 + goto err_init_wbfs;
4422 + err = qman_ceetm_channel_set_group_er_eligibility(channel,
4426 + pr_err(KBUILD_BASENAME " : %s : failed to set group ER eligibility\n",
4428 + goto err_init_wbfs;
4432 + /* Create qcount child classes */
4433 + for (i = 0; i < priv->wbfs.qcount; i++) {
4434 + child_cl = kzalloc(sizeof(*child_cl), GFP_KERNEL);
4436 + pr_err(KBUILD_BASENAME " : %s : kzalloc() failed\n",
4439 + goto err_init_wbfs;
4442 + child_cl->wbfs.cstats = alloc_percpu(struct ceetm_class_stats);
4443 + if (!child_cl->wbfs.cstats) {
4444 + pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
4447 + goto err_init_wbfs_cls;
4450 + child_cl->common.classid = TC_H_MAKE(sch->handle, (i + 1));
4451 + child_cl->refcnt = 1;
4452 + child_cl->parent = sch;
4453 + child_cl->type = CEETM_WBFS;
4454 + child_cl->shaped = priv->shaped;
4455 + child_cl->wbfs.fq = NULL;
4456 + child_cl->wbfs.cq = NULL;
4457 + child_cl->wbfs.weight = qopt->qweight[i];
4459 + if (priv->wbfs.group_type == WBFS_GRP_B)
4460 + id = WBFS_GRP_B_OFFSET + i;
4462 + id = WBFS_GRP_A_OFFSET + i;
4464 + err = ceetm_config_wbfs_cls(child_cl, dev, channel, id,
4465 + priv->wbfs.group_type);
4467 + pr_err(KBUILD_BASENAME " : %s : failed to configure the ceetm wbfs class %X\n",
4468 + __func__, child_cl->common.classid);
4469 + goto err_init_wbfs_cls;
4472 + /* Add class handle in Qdisc */
4473 + ceetm_link_class(sch, &priv->clhash, &child_cl->common);
4474 + pr_debug(KBUILD_BASENAME " : %s : added ceetm wbfs class %X associated with CQ %d and CCG %d\n",
4475 + __func__, child_cl->common.classid,
4476 + child_cl->wbfs.cq->idx, child_cl->wbfs.ccg->idx);
4479 + /* Signal the root class that a group has been configured */
4480 + switch (priv->wbfs.group_type) {
4481 + case WBFS_GRP_LARGE:
4482 + root_cl->root.wbfs_grp_large = true;
4485 + root_cl->root.wbfs_grp_a = true;
4488 + root_cl->root.wbfs_grp_b = true;
4495 + ceetm_cls_destroy(sch, child_cl);
4497 + ceetm_destroy(sch);
4501 +/* Configure a generic ceetm qdisc */
4502 +static int ceetm_init(struct Qdisc *sch, struct nlattr *opt)
4504 + struct tc_ceetm_qopt *qopt;
4505 + struct nlattr *tb[TCA_CEETM_QOPS + 1];
4507 + struct ceetm_qdisc *priv = qdisc_priv(sch);
4508 + struct net_device *dev = qdisc_dev(sch);
4510 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
4512 + if (!netif_is_multiqueue(dev))
4513 + return -EOPNOTSUPP;
4516 + pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
4520 + ret = nla_parse_nested(tb, TCA_CEETM_QOPS, opt, ceetm_policy);
4522 + pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
4526 + if (!tb[TCA_CEETM_QOPS]) {
4527 + pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
4531 + if (TC_H_MIN(sch->handle)) {
4532 + pr_err("CEETM: a qdisc should not have a minor\n");
4536 + qopt = nla_data(tb[TCA_CEETM_QOPS]);
4538 + /* Initialize the class hash list. Each qdisc has its own class hash */
4539 + ret = qdisc_class_hash_init(&priv->clhash);
4541 + pr_err(KBUILD_BASENAME " : %s : qdisc_class_hash_init failed\n",
4546 + priv->type = qopt->type;
4548 + switch (priv->type) {
4550 + ret = ceetm_init_root(sch, priv, qopt);
4553 + ret = ceetm_init_prio(sch, priv, qopt);
4556 + ret = ceetm_init_wbfs(sch, priv, qopt);
4559 + pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
4560 + ceetm_destroy(sch);
4567 +/* Edit a root ceetm qdisc */
4568 +static int ceetm_change_root(struct Qdisc *sch, struct ceetm_qdisc *priv,
4569 + struct net_device *dev,
4570 + struct tc_ceetm_qopt *qopt)
4575 + if (priv->shaped != (bool)qopt->shaped) {
4576 + pr_err("CEETM: qdisc %X is %s\n", sch->handle,
4577 + priv->shaped ? "shaped" : "unshaped");
4581 + /* Nothing to modify for unshaped qdiscs */
4582 + if (!priv->shaped)
4585 + /* Configure the LNI shaper */
4586 + if (priv->root.overhead != qopt->overhead) {
4587 + err = qman_ceetm_lni_enable_shaper(priv->root.lni, 1,
4591 + priv->root.overhead = qopt->overhead;
4594 + if (priv->root.rate != qopt->rate) {
4595 + bps = qopt->rate << 3; /* Bps -> bps */
4596 + err = qman_ceetm_lni_set_commit_rate_bps(priv->root.lni, bps,
4600 + priv->root.rate = qopt->rate;
4603 + if (priv->root.ceil != qopt->ceil) {
4604 + bps = qopt->ceil << 3; /* Bps -> bps */
4605 + err = qman_ceetm_lni_set_excess_rate_bps(priv->root.lni, bps,
4609 + priv->root.ceil = qopt->ceil;
4615 + pr_err(KBUILD_BASENAME " : %s : failed to configure the root ceetm qdisc %X\n",
4616 + __func__, sch->handle);
4620 +/* Edit a wbfs ceetm qdisc */
4621 +static int ceetm_change_wbfs(struct Qdisc *sch, struct ceetm_qdisc *priv,
4622 + struct tc_ceetm_qopt *qopt)
4626 + struct qm_ceetm_channel *channel;
4627 + struct ceetm_class *prio_class, *root_class;
4628 + struct ceetm_qdisc *prio_qdisc;
4630 + if (qopt->qcount) {
4631 + pr_err("CEETM: the qcount can not be modified\n");
4635 + if (qopt->qweight[0]) {
4636 + pr_err("CEETM: the qweight can be modified through the wbfs classes\n");
4640 + if (!priv->shaped && (qopt->cr || qopt->er)) {
4641 + pr_err("CEETM: CR/ER can be enabled only for shaped wbfs ceetm qdiscs\n");
4645 + if (priv->shaped && !(qopt->cr || qopt->er)) {
4646 + pr_err("CEETM: either CR or ER must be enabled for shaped wbfs ceetm qdiscs\n");
4650 + /* Nothing to modify for unshaped qdiscs */
4651 + if (!priv->shaped)
4654 + prio_class = priv->wbfs.parent;
4655 + prio_qdisc = qdisc_priv(prio_class->parent);
4656 + root_class = prio_qdisc->prio.parent;
4657 + channel = root_class->root.ch;
4658 + group_b = priv->wbfs.group_type == WBFS_GRP_B;
4660 + if (qopt->cr != priv->wbfs.cr) {
4661 + err = qman_ceetm_channel_set_group_cr_eligibility(channel,
4666 + priv->wbfs.cr = qopt->cr;
4669 + if (qopt->er != priv->wbfs.er) {
4670 + err = qman_ceetm_channel_set_group_er_eligibility(channel,
4675 + priv->wbfs.er = qopt->er;
4681 + pr_err(KBUILD_BASENAME " : %s : failed to configure the wbfs ceetm qdisc %X\n",
4682 + __func__, sch->handle);
4686 +/* Edit a ceetm qdisc */
4687 +static int ceetm_change(struct Qdisc *sch, struct nlattr *opt)
4689 + struct tc_ceetm_qopt *qopt;
4690 + struct nlattr *tb[TCA_CEETM_QOPS + 1];
4692 + struct ceetm_qdisc *priv = qdisc_priv(sch);
4693 + struct net_device *dev = qdisc_dev(sch);
4695 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
4697 + ret = nla_parse_nested(tb, TCA_CEETM_QOPS, opt, ceetm_policy);
4699 + pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
4703 + if (!tb[TCA_CEETM_QOPS]) {
4704 + pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
4708 + if (TC_H_MIN(sch->handle)) {
4709 + pr_err("CEETM: a qdisc should not have a minor\n");
4713 + qopt = nla_data(tb[TCA_CEETM_QOPS]);
4715 + if (priv->type != qopt->type) {
4716 + pr_err("CEETM: qdisc %X is not of the provided type\n",
4721 + switch (priv->type) {
4723 + ret = ceetm_change_root(sch, priv, dev, qopt);
4726 + pr_err("CEETM: prio qdiscs can not be modified\n");
4730 + ret = ceetm_change_wbfs(sch, priv, qopt);
4733 + pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
4740 +/* Attach the underlying pfifo qdiscs */
4741 +static void ceetm_attach(struct Qdisc *sch)
4743 + struct net_device *dev = qdisc_dev(sch);
4744 + struct ceetm_qdisc *priv = qdisc_priv(sch);
4745 + struct Qdisc *qdisc, *old_qdisc;
4748 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
4750 + for (i = 0; i < dev->num_tx_queues; i++) {
4751 + qdisc = priv->root.qdiscs[i];
4752 + old_qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc);
4754 + qdisc_destroy(old_qdisc);
4758 +static unsigned long ceetm_cls_get(struct Qdisc *sch, u32 classid)
4760 + struct ceetm_class *cl;
4762 + pr_debug(KBUILD_BASENAME " : %s : classid %X from qdisc %X\n",
4763 + __func__, classid, sch->handle);
4764 + cl = ceetm_find(classid, sch);
4767 + cl->refcnt++; /* Will decrement in put() */
4768 + return (unsigned long)cl;
4771 +static void ceetm_cls_put(struct Qdisc *sch, unsigned long arg)
4773 + struct ceetm_class *cl = (struct ceetm_class *)arg;
4775 + pr_debug(KBUILD_BASENAME " : %s : classid %X from qdisc %X\n",
4776 + __func__, cl->common.classid, sch->handle);
4779 + if (cl->refcnt == 0)
4780 + ceetm_cls_destroy(sch, cl);
4783 +static int ceetm_cls_change_root(struct ceetm_class *cl,
4784 + struct tc_ceetm_copt *copt,
4785 + struct net_device *dev)
4790 + if ((bool)copt->shaped != cl->shaped) {
4791 + pr_err("CEETM: class %X is %s\n", cl->common.classid,
4792 + cl->shaped ? "shaped" : "unshaped");
4796 + if (cl->shaped && cl->root.rate != copt->rate) {
4797 + bps = copt->rate << 3; /* Bps -> bps */
4798 + err = qman_ceetm_channel_set_commit_rate_bps(cl->root.ch, bps,
4801 + goto change_cls_err;
4802 + cl->root.rate = copt->rate;
4805 + if (cl->shaped && cl->root.ceil != copt->ceil) {
4806 + bps = copt->ceil << 3; /* Bps -> bps */
4807 + err = qman_ceetm_channel_set_excess_rate_bps(cl->root.ch, bps,
4810 + goto change_cls_err;
4811 + cl->root.ceil = copt->ceil;
4814 + if (!cl->shaped && cl->root.tbl != copt->tbl) {
4815 + err = qman_ceetm_channel_set_weight(cl->root.ch, copt->tbl);
4817 + goto change_cls_err;
4818 + cl->root.tbl = copt->tbl;
4824 + pr_err(KBUILD_BASENAME " : %s : failed to configure the ceetm root class %X\n",
4825 + __func__, cl->common.classid);
4829 +static int ceetm_cls_change_prio(struct ceetm_class *cl,
4830 + struct tc_ceetm_copt *copt)
4834 + if (!cl->shaped && (copt->cr || copt->er)) {
4835 + pr_err("CEETM: only shaped classes can have CR and ER enabled\n");
4839 + if (cl->prio.cr != (bool)copt->cr) {
4840 + err = qman_ceetm_channel_set_cq_cr_eligibility(
4841 + cl->prio.cq->parent,
4845 + goto change_cls_err;
4846 + cl->prio.cr = copt->cr;
4849 + if (cl->prio.er != (bool)copt->er) {
4850 + err = qman_ceetm_channel_set_cq_er_eligibility(
4851 + cl->prio.cq->parent,
4855 + goto change_cls_err;
4856 + cl->prio.er = copt->er;
4862 + pr_err(KBUILD_BASENAME " : %s : failed to configure the ceetm prio class %X\n",
4863 + __func__, cl->common.classid);
4867 +static int ceetm_cls_change_wbfs(struct ceetm_class *cl,
4868 + struct tc_ceetm_copt *copt)
4872 + if (copt->weight != cl->wbfs.weight) {
4873 + /* Configure the CQ weight: real number multiplied by 100 to
4874 + * get rid of the fraction
4876 + err = qman_ceetm_set_queue_weight_in_ratio(cl->wbfs.cq,
4877 + copt->weight * 100);
4880 + pr_err(KBUILD_BASENAME " : %s : failed to configure the ceetm wbfs class %X\n",
4881 + __func__, cl->common.classid);
4885 + cl->wbfs.weight = copt->weight;
4891 +/* Add a ceetm root class or configure a ceetm root/prio/wbfs class */
4892 +static int ceetm_cls_change(struct Qdisc *sch, u32 classid, u32 parentid,
4893 + struct nlattr **tca, unsigned long *arg)
4897 + struct ceetm_qdisc *priv;
4898 + struct ceetm_class *cl = (struct ceetm_class *)*arg;
4899 + struct nlattr *opt = tca[TCA_OPTIONS];
4900 + struct nlattr *tb[__TCA_CEETM_MAX];
4901 + struct tc_ceetm_copt *copt;
4902 + struct qm_ceetm_channel *channel;
4903 + struct net_device *dev = qdisc_dev(sch);
4905 + pr_debug(KBUILD_BASENAME " : %s : classid %X under qdisc %X\n",
4906 + __func__, classid, sch->handle);
4908 + if (strcmp(sch->ops->id, ceetm_qdisc_ops.id)) {
4909 + pr_err("CEETM: a ceetm class can not be attached to other qdisc/class types\n");
4913 + priv = qdisc_priv(sch);
4916 + pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
4920 + if (!cl && sch->handle != parentid) {
4921 + pr_err("CEETM: classes can be attached to the root ceetm qdisc only\n");
4925 + if (!cl && priv->type != CEETM_ROOT) {
4926 + pr_err("CEETM: only root ceetm classes can be attached to the root ceetm qdisc\n");
4930 + err = nla_parse_nested(tb, TCA_CEETM_COPT, opt, ceetm_policy);
4932 + pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
4936 + if (!tb[TCA_CEETM_COPT]) {
4937 + pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
4941 + if (TC_H_MIN(classid) >= PFIFO_MIN_OFFSET) {
4942 + pr_err("CEETM: only minors 0x01 to 0x20 can be used for ceetm root classes\n");
4946 + copt = nla_data(tb[TCA_CEETM_COPT]);
4948 + /* Configure an existing ceetm class */
4950 + if (copt->type != cl->type) {
4951 + pr_err("CEETM: class %X is not of the provided type\n",
4952 + cl->common.classid);
4956 + switch (copt->type) {
4958 + return ceetm_cls_change_root(cl, copt, dev);
4961 + return ceetm_cls_change_prio(cl, copt);
4964 + return ceetm_cls_change_wbfs(cl, copt);
4967 + pr_err(KBUILD_BASENAME " : %s : invalid class\n",
4973 + /* Add a new root ceetm class */
4974 + if (copt->type != CEETM_ROOT) {
4975 + pr_err("CEETM: only root ceetm classes can be attached to the root ceetm qdisc\n");
4979 + if (copt->shaped && !priv->shaped) {
4980 + pr_err("CEETM: can not add a shaped ceetm root class under an unshaped ceetm root qdisc\n");
4984 + cl = kzalloc(sizeof(*cl), GFP_KERNEL);
4988 + cl->type = copt->type;
4989 + cl->shaped = copt->shaped;
4990 + cl->root.rate = copt->rate;
4991 + cl->root.ceil = copt->ceil;
4992 + cl->root.tbl = copt->tbl;
4994 + cl->common.classid = classid;
4997 + cl->root.child = NULL;
4998 + cl->root.wbfs_grp_a = false;
4999 + cl->root.wbfs_grp_b = false;
5000 + cl->root.wbfs_grp_large = false;
5002 + /* Claim a CEETM channel */
5003 + err = qman_ceetm_channel_claim(&channel, priv->root.lni);
5005 + pr_err(KBUILD_BASENAME " : %s : failed to claim a channel\n",
5010 + cl->root.ch = channel;
5013 + /* Configure the channel shaper */
5014 + err = qman_ceetm_channel_enable_shaper(channel, 1);
5018 + bps = cl->root.rate << 3; /* Bps -> bps */
5019 + err = qman_ceetm_channel_set_commit_rate_bps(channel, bps,
5024 + bps = cl->root.ceil << 3; /* Bps -> bps */
5025 + err = qman_ceetm_channel_set_excess_rate_bps(channel, bps,
5031 + /* Configure the uFQ algorithm */
5032 + err = qman_ceetm_channel_set_weight(channel, cl->root.tbl);
5037 + /* Add class handle in Qdisc */
5038 + ceetm_link_class(sch, &priv->clhash, &cl->common);
5040 + pr_debug(KBUILD_BASENAME " : %s : configured class %X associated with channel %d\n",
5041 + __func__, classid, channel->idx);
5042 + *arg = (unsigned long)cl;
5046 + pr_err(KBUILD_BASENAME " : %s : failed to configure the channel %d\n",
5047 + __func__, channel->idx);
5048 + if (qman_ceetm_channel_release(channel))
5049 + pr_err(KBUILD_BASENAME " : %s : failed to release the channel %d\n",
5050 + __func__, channel->idx);
5056 +static void ceetm_cls_walk(struct Qdisc *sch, struct qdisc_walker *arg)
5058 + struct ceetm_qdisc *priv = qdisc_priv(sch);
5059 + struct ceetm_class *cl;
5062 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
5067 + for (i = 0; i < priv->clhash.hashsize; i++) {
5068 + hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) {
5069 + if (arg->count < arg->skip) {
5073 + if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
5082 +static int ceetm_cls_dump(struct Qdisc *sch, unsigned long arg,
5083 + struct sk_buff *skb, struct tcmsg *tcm)
5085 + struct ceetm_class *cl = (struct ceetm_class *)arg;
5086 + struct nlattr *nest;
5087 + struct tc_ceetm_copt copt;
5089 + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
5090 + __func__, cl->common.classid, sch->handle);
5092 + sch_tree_lock(sch);
5094 + tcm->tcm_parent = ((struct Qdisc *)cl->parent)->handle;
5095 + tcm->tcm_handle = cl->common.classid;
5097 + memset(&copt, 0, sizeof(copt));
5099 + copt.shaped = cl->shaped;
5100 + copt.type = cl->type;
5102 + switch (cl->type) {
5104 + if (cl->root.child)
5105 + tcm->tcm_info = cl->root.child->handle;
5107 + copt.rate = cl->root.rate;
5108 + copt.ceil = cl->root.ceil;
5109 + copt.tbl = cl->root.tbl;
5113 + if (cl->prio.child)
5114 + tcm->tcm_info = cl->prio.child->handle;
5116 + copt.cr = cl->prio.cr;
5117 + copt.er = cl->prio.er;
5121 + copt.weight = cl->wbfs.weight;
5125 + nest = nla_nest_start(skb, TCA_OPTIONS);
5127 + goto nla_put_failure;
5128 + if (nla_put(skb, TCA_CEETM_COPT, sizeof(copt), &copt))
5129 + goto nla_put_failure;
5130 + nla_nest_end(skb, nest);
5131 + sch_tree_unlock(sch);
5135 + sch_tree_unlock(sch);
5136 + nla_nest_cancel(skb, nest);
5140 +static int ceetm_cls_delete(struct Qdisc *sch, unsigned long arg)
5142 + struct ceetm_qdisc *priv = qdisc_priv(sch);
5143 + struct ceetm_class *cl = (struct ceetm_class *)arg;
5145 + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
5146 + __func__, cl->common.classid, sch->handle);
5148 + sch_tree_lock(sch);
5149 + qdisc_class_hash_remove(&priv->clhash, &cl->common);
5152 + /* The refcnt should be at least 1 since we have incremented it in
5153 + * get(). Will decrement again in put() where we will call destroy()
5154 + * to actually free the memory if it reaches 0.
5156 + WARN_ON(cl->refcnt == 0);
5158 + sch_tree_unlock(sch);
5162 +/* Get the class' child qdisc, if any */
5163 +static struct Qdisc *ceetm_cls_leaf(struct Qdisc *sch, unsigned long arg)
5165 + struct ceetm_class *cl = (struct ceetm_class *)arg;
5167 + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
5168 + __func__, cl->common.classid, sch->handle);
5170 + switch (cl->type) {
5172 + return cl->root.child;
5175 + return cl->prio.child;
5181 +static int ceetm_cls_graft(struct Qdisc *sch, unsigned long arg,
5182 + struct Qdisc *new, struct Qdisc **old)
5184 + if (new && strcmp(new->ops->id, ceetm_qdisc_ops.id)) {
5185 + pr_err("CEETM: only ceetm qdiscs can be attached to ceetm classes\n");
5186 + return -EOPNOTSUPP;
5192 +static int ceetm_cls_dump_stats(struct Qdisc *sch, unsigned long arg,
5193 + struct gnet_dump *d)
5196 + struct ceetm_class *cl = (struct ceetm_class *)arg;
5197 + struct gnet_stats_basic_packed tmp_bstats;
5198 + struct ceetm_class_stats *cstats = NULL;
5199 + struct qm_ceetm_cq *cq = NULL;
5200 + struct tc_ceetm_xstats xstats;
5202 + memset(&xstats, 0, sizeof(xstats));
5203 + memset(&tmp_bstats, 0, sizeof(tmp_bstats));
5205 + switch (cl->type) {
5216 + for_each_online_cpu(i) {
5217 + switch (cl->type) {
5219 + cstats = per_cpu_ptr(cl->prio.cstats, i);
5222 + cstats = per_cpu_ptr(cl->wbfs.cstats, i);
5227 + xstats.ern_drop_count += cstats->ern_drop_count;
5228 + xstats.congested_count += cstats->congested_count;
5229 + tmp_bstats.bytes += cstats->bstats.bytes;
5230 + tmp_bstats.packets += cstats->bstats.packets;
5234 + if (gnet_stats_copy_basic(d, NULL, &tmp_bstats) < 0)
5237 + if (cq && qman_ceetm_cq_get_dequeue_statistics(cq, 0,
5238 + &xstats.frame_count,
5239 + &xstats.byte_count))
5242 + return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
5245 +static struct tcf_proto **ceetm_tcf_chain(struct Qdisc *sch, unsigned long arg)
5247 + struct ceetm_qdisc *priv = qdisc_priv(sch);
5248 + struct ceetm_class *cl = (struct ceetm_class *)arg;
5249 + struct tcf_proto **fl = cl ? &cl->filter_list : &priv->filter_list;
5251 + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
5252 + cl ? cl->common.classid : 0, sch->handle);
5256 +static unsigned long ceetm_tcf_bind(struct Qdisc *sch, unsigned long parent,
5259 + struct ceetm_class *cl = ceetm_find(classid, sch);
5261 + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
5262 + cl ? cl->common.classid : 0, sch->handle);
5263 + return (unsigned long)cl;
5266 +static void ceetm_tcf_unbind(struct Qdisc *sch, unsigned long arg)
5268 + struct ceetm_class *cl = (struct ceetm_class *)arg;
5270 + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
5271 + cl ? cl->common.classid : 0, sch->handle);
5274 +const struct Qdisc_class_ops ceetm_cls_ops = {
5275 + .graft = ceetm_cls_graft,
5276 + .leaf = ceetm_cls_leaf,
5277 + .get = ceetm_cls_get,
5278 + .put = ceetm_cls_put,
5279 + .change = ceetm_cls_change,
5280 + .delete = ceetm_cls_delete,
5281 + .walk = ceetm_cls_walk,
5282 + .tcf_chain = ceetm_tcf_chain,
5283 + .bind_tcf = ceetm_tcf_bind,
5284 + .unbind_tcf = ceetm_tcf_unbind,
5285 + .dump = ceetm_cls_dump,
5286 + .dump_stats = ceetm_cls_dump_stats,
5289 +struct Qdisc_ops ceetm_qdisc_ops __read_mostly = {
5291 + .priv_size = sizeof(struct ceetm_qdisc),
5292 + .cl_ops = &ceetm_cls_ops,
5293 + .init = ceetm_init,
5294 + .destroy = ceetm_destroy,
5295 + .change = ceetm_change,
5296 + .dump = ceetm_dump,
5297 + .attach = ceetm_attach,
5298 + .owner = THIS_MODULE,
5301 +/* Run the filters and classifiers attached to the qdisc on the provided skb */
5302 +static struct ceetm_class *ceetm_classify(struct sk_buff *skb,
5303 + struct Qdisc *sch, int *qerr,
5306 + struct ceetm_qdisc *priv = qdisc_priv(sch);
5307 + struct ceetm_class *cl = NULL, *wbfs_cl;
5308 + struct tcf_result res;
5309 + struct tcf_proto *tcf;
5312 + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
5313 + tcf = priv->filter_list;
5314 + while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
5315 +#ifdef CONFIG_NET_CLS_ACT
5317 + case TC_ACT_QUEUED:
5318 + case TC_ACT_STOLEN:
5319 + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
5321 + /* No valid class found due to action */
5326 + cl = (void *)res.class;
5328 + if (res.classid == sch->handle) {
5329 + /* The filter leads to the qdisc */
5330 + /* TODO default qdisc */
5334 + cl = ceetm_find(res.classid, sch);
5336 + /* The filter leads to an invalid class */
5340 + /* The class might have its own filters attached */
5341 + tcf = cl->filter_list;
5345 + /* No valid class found */
5346 + /* TODO default qdisc */
5350 + switch (cl->type) {
5352 + if (cl->root.child) {
5353 + /* Run the prio qdisc classifiers */
5354 + return ceetm_classify(skb, cl->root.child, qerr,
5357 + /* The root class does not have a child prio qdisc */
5358 + /* TODO default qdisc */
5362 + if (cl->prio.child) {
5363 + /* If filters lead to a wbfs class, return it.
5364 + * Otherwise, return the prio class
5366 + wbfs_cl = ceetm_classify(skb, cl->prio.child, qerr,
5368 + /* A NULL result might indicate either an erroneous
5369 + * filter, or no filters at all. We will assume the
5372 + return wbfs_cl ? : cl;
5376 + /* For wbfs and childless prio classes, return the class directly */
5380 +int __hot ceetm_tx(struct sk_buff *skb, struct net_device *net_dev)
5383 + bool act_drop = false;
5384 + struct Qdisc *sch = net_dev->qdisc;
5385 + struct ceetm_class *cl;
5386 + struct dpa_priv_s *priv_dpa;
5387 + struct qman_fq *egress_fq, *conf_fq;
5388 + struct ceetm_qdisc *priv = qdisc_priv(sch);
5389 + struct ceetm_qdisc_stats *qstats = this_cpu_ptr(priv->root.qstats);
5390 + struct ceetm_class_stats *cstats;
5391 + const int queue_mapping = dpa_get_queue_mapping(skb);
5392 + spinlock_t *root_lock = qdisc_lock(sch);
5394 + spin_lock(root_lock);
5395 + cl = ceetm_classify(skb, sch, &ret, &act_drop);
5396 + spin_unlock(root_lock);
5398 +#ifdef CONFIG_NET_CLS_ACT
5400 + if (ret & __NET_XMIT_BYPASS)
5405 + /* TODO default class */
5406 + if (unlikely(!cl)) {
5411 + priv_dpa = netdev_priv(net_dev);
5412 + conf_fq = priv_dpa->conf_fqs[queue_mapping];
5414 + /* Choose the proper tx fq and update the basic stats (bytes and
5415 + * packets sent by the class)
5417 + switch (cl->type) {
5419 + egress_fq = &cl->prio.fq->fq;
5420 + cstats = this_cpu_ptr(cl->prio.cstats);
5423 + egress_fq = &cl->wbfs.fq->fq;
5424 + cstats = this_cpu_ptr(cl->wbfs.cstats);
5431 + bstats_update(&cstats->bstats, skb);
5432 + return dpa_tx_extended(skb, net_dev, egress_fq, conf_fq);
5435 + dev_kfree_skb_any(skb);
5436 + return NET_XMIT_SUCCESS;
5439 +static int __init ceetm_register(void)
5443 + pr_info(KBUILD_MODNAME ": " DPA_CEETM_DESCRIPTION "\n");
5445 + _errno = register_qdisc(&ceetm_qdisc_ops);
5446 + if (unlikely(_errno))
5447 + pr_err(KBUILD_MODNAME
5448 + ": %s:%hu:%s(): register_qdisc() = %d\n",
5449 + KBUILD_BASENAME ".c", __LINE__, __func__, _errno);
5454 +static void __exit ceetm_unregister(void)
5456 + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
5457 + KBUILD_BASENAME ".c", __func__);
5459 + unregister_qdisc(&ceetm_qdisc_ops);
5462 +module_init(ceetm_register);
5463 +module_exit(ceetm_unregister);
5465 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h
5467 +/* Copyright 2008-2016 Freescale Semiconductor Inc.
5469 + * Redistribution and use in source and binary forms, with or without
5470 + * modification, are permitted provided that the following conditions are met:
5471 + * * Redistributions of source code must retain the above copyright
5472 + * notice, this list of conditions and the following disclaimer.
5473 + * * Redistributions in binary form must reproduce the above copyright
5474 + * notice, this list of conditions and the following disclaimer in the
5475 + * documentation and/or other materials provided with the distribution.
5476 + * * Neither the name of Freescale Semiconductor nor the
5477 + * names of its contributors may be used to endorse or promote products
5478 + * derived from this software without specific prior written permission.
5481 + * ALTERNATIVELY, this software may be distributed under the terms of the
5482 + * GNU General Public License ("GPL") as published by the Free Software
5483 + * Foundation, either version 2 of that License or (at your option) any
5486 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
5487 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
5488 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
5489 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
5490 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
5491 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
5492 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
5493 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
5494 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5495 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5498 +#ifndef __DPAA_ETH_CEETM_H
5499 +#define __DPAA_ETH_CEETM_H
5501 +#include <net/pkt_sched.h>
5502 +#include <net/netlink.h>
5503 +#include <lnxwrp_fm.h>
5506 +#include "dpaa_eth_common.h"
5508 +/* Mask to determine the sub-portal id from a channel number */
5509 +#define CHANNEL_SP_MASK 0x1f
5510 +/* The number of the last channel that services DCP0, connected to FMan 0.
5511 + * Value validated for B4 and T series platforms.
5513 +#define DCP0_MAX_CHANNEL 0x80f
5514 +/* A2V=1 - field A2 is valid
5515 + * A0V=1 - field A0 is valid - enables frame confirmation
5516 + * OVOM=1 - override operation mode bits with values from A2
5517 + * EBD=1 - external buffers are deallocated at the end of the FMan flow
5518 + * NL=0 - the BMI releases all the internal buffers
5520 +#define CEETM_CONTEXT_A 0x1a00000080000000
5521 +/* The ratio between the superior and inferior congestion state thresholds. The
5522 + * lower threshold is set to 7/8 of the superior one (as the default for WQ
5525 +#define CEETM_CCGR_RATIO 0.875
5526 +/* For functional purposes, there are num_tx_queues pfifo qdiscs through which
5527 + * frames reach the driver. Their handles start from 1:21. Handles 1:1 to 1:20
5528 + * are reserved for the maximum 32 CEETM channels (majors and minors are in
5531 +#define PFIFO_MIN_OFFSET 0x21
5533 +/* A maximum of 8 CQs can be linked to a CQ channel or to a WBFS scheduler. */
5534 +#define CEETM_MAX_PRIO_QCOUNT 8
5535 +#define CEETM_MAX_WBFS_QCOUNT 8
5536 +#define CEETM_MIN_WBFS_QCOUNT 4
5538 +/* The id offsets of the CQs belonging to WBFS groups (ids 8-11/15 for group A
5539 + * and/or 12-15 for group B).
5541 +#define WBFS_GRP_A_OFFSET 8
5542 +#define WBFS_GRP_B_OFFSET 12
5544 +#define WBFS_GRP_A 1
5545 +#define WBFS_GRP_B 2
5546 +#define WBFS_GRP_LARGE 3
5555 +/* CEETM configuration types */
5562 +#define TCA_CEETM_MAX (__TCA_CEETM_MAX - 1)
5563 +extern const struct nla_policy ceetm_policy[TCA_CEETM_MAX + 1];
5565 +struct ceetm_class;
5566 +struct ceetm_qdisc_stats;
5567 +struct ceetm_class_stats;
5570 + struct qman_fq fq;
5571 + struct net_device *net_dev;
5572 + struct ceetm_class *ceetm_cls;
5576 + struct Qdisc **qdiscs;
5580 + struct qm_ceetm_sp *sp;
5581 + struct qm_ceetm_lni *lni;
5582 + struct ceetm_qdisc_stats __percpu *qstats;
5587 + struct ceetm_class *parent;
5593 + struct ceetm_class *parent;
5598 +struct ceetm_qdisc {
5599 + int type; /* LNI/CHNL/WBFS */
5602 + struct root_q root;
5603 + struct prio_q prio;
5604 + struct wbfs_q wbfs;
5606 + struct Qdisc_class_hash clhash;
5607 + struct tcf_proto *filter_list; /* qdisc attached filters */
5610 +/* CEETM Qdisc configuration parameters */
5611 +struct tc_ceetm_qopt {
5620 + __u8 qweight[CEETM_MAX_WBFS_QCOUNT];
5624 + unsigned int rate;
5625 + unsigned int ceil;
5629 + bool wbfs_grp_large;
5630 + struct Qdisc *child;
5631 + struct qm_ceetm_channel *ch;
5637 + struct ceetm_fq *fq; /* Hardware FQ instance Handle */
5638 + struct qm_ceetm_lfq *lfq;
5639 + struct qm_ceetm_cq *cq; /* Hardware Class Queue instance Handle */
5640 + struct qm_ceetm_ccg *ccg;
5641 + /* only one wbfs can be linked to one priority CQ */
5642 + struct Qdisc *child;
5643 + struct ceetm_class_stats __percpu *cstats;
5647 + __u8 weight; /* The weight of the class between 1 and 248 */
5648 + struct ceetm_fq *fq; /* Hardware FQ instance Handle */
5649 + struct qm_ceetm_lfq *lfq;
5650 + struct qm_ceetm_cq *cq; /* Hardware Class Queue instance Handle */
5651 + struct qm_ceetm_ccg *ccg;
5652 + struct ceetm_class_stats __percpu *cstats;
5655 +struct ceetm_class {
5656 + struct Qdisc_class_common common;
5657 + int refcnt; /* usage count of this class */
5658 + struct tcf_proto *filter_list; /* class attached filters */
5659 + struct Qdisc *parent;
5661 + int type; /* ROOT/PRIO/WBFS */
5663 + struct root_c root;
5664 + struct prio_c prio;
5665 + struct wbfs_c wbfs;
5669 +/* CEETM Class configuration parameters */
5670 +struct tc_ceetm_copt {
5682 +struct ceetm_qdisc_stats {
5686 +struct ceetm_class_stats {
5687 + /* Software counters */
5688 + struct gnet_stats_basic_packed bstats;
5689 + __u32 ern_drop_count;
5690 + __u32 congested_count;
5693 +struct tc_ceetm_xstats {
5694 + __u32 ern_drop_count;
5695 + __u32 congested_count;
5696 + /* Hardware counters */
5697 + __u64 frame_count;
5701 +int __hot ceetm_tx(struct sk_buff *skb, struct net_device *net_dev);
5704 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.c
5706 +/* Copyright 2008-2013 Freescale Semiconductor, Inc.
5708 + * Redistribution and use in source and binary forms, with or without
5709 + * modification, are permitted provided that the following conditions are met:
5710 + * * Redistributions of source code must retain the above copyright
5711 + * notice, this list of conditions and the following disclaimer.
5712 + * * Redistributions in binary form must reproduce the above copyright
5713 + * notice, this list of conditions and the following disclaimer in the
5714 + * documentation and/or other materials provided with the distribution.
5715 + * * Neither the name of Freescale Semiconductor nor the
5716 + * names of its contributors may be used to endorse or promote products
5717 + * derived from this software without specific prior written permission.
5720 + * ALTERNATIVELY, this software may be distributed under the terms of the
5721 + * GNU General Public License ("GPL") as published by the Free Software
5722 + * Foundation, either version 2 of that License or (at your option) any
5725 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
5726 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
5727 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
5728 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
5729 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
5730 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
5731 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
5732 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
5733 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5734 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5737 +#include <linux/init.h>
5738 +#include <linux/module.h>
5739 +#include <linux/of_platform.h>
5740 +#include <linux/of_net.h>
5741 +#include <linux/etherdevice.h>
5742 +#include <linux/kthread.h>
5743 +#include <linux/percpu.h>
5744 +#include <linux/highmem.h>
5745 +#include <linux/sort.h>
5746 +#include <linux/fsl_qman.h>
5747 +#include <linux/ip.h>
5748 +#include <linux/ipv6.h>
5749 +#include <linux/if_vlan.h> /* vlan_eth_hdr */
5750 +#include "dpaa_eth.h"
5751 +#include "dpaa_eth_common.h"
5752 +#ifdef CONFIG_FSL_DPAA_1588
5753 +#include "dpaa_1588.h"
5755 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
5756 +#include "dpaa_debugfs.h"
5757 +#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
5760 +/* Size in bytes of the FQ taildrop threshold */
5761 +#define DPA_FQ_TD 0x200000
5763 +#ifdef CONFIG_PTP_1588_CLOCK_DPAA
5764 +struct ptp_priv_s ptp_priv;
5767 +static struct dpa_bp *dpa_bp_array[64];
5770 +EXPORT_SYMBOL(dpa_max_frm);
5772 +int dpa_rx_extra_headroom;
5773 +EXPORT_SYMBOL(dpa_rx_extra_headroom);
5775 +int dpa_num_cpus = NR_CPUS;
5777 +static const struct fqid_cell tx_confirm_fqids[] = {
5778 + {0, DPAA_ETH_TX_QUEUES}
5781 +static struct fqid_cell default_fqids[][3] = {
5782 + [RX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_RX_QUEUES} },
5783 + [TX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_TX_QUEUES} }
5786 +static const char fsl_qman_frame_queues[][25] = {
5787 + [RX] = "fsl,qman-frame-queues-rx",
5788 + [TX] = "fsl,qman-frame-queues-tx"
5790 +#ifdef CONFIG_FSL_DPAA_HOOKS
5791 +/* A set of callbacks for hooking into the fastpath at different points. */
5792 +struct dpaa_eth_hooks_s dpaa_eth_hooks;
5793 +EXPORT_SYMBOL(dpaa_eth_hooks);
5794 +/* This function should only be called on the probe paths, since it makes no
5795 + * effort to guarantee consistency of the destination hooks structure.
5797 +void fsl_dpaa_eth_set_hooks(struct dpaa_eth_hooks_s *hooks)
5800 + dpaa_eth_hooks = *hooks;
5802 + pr_err("NULL pointer to hooks!\n");
5804 +EXPORT_SYMBOL(fsl_dpaa_eth_set_hooks);
5807 +int dpa_netdev_init(struct net_device *net_dev,
5808 + const uint8_t *mac_addr,
5809 + uint16_t tx_timeout)
5812 + struct dpa_priv_s *priv = netdev_priv(net_dev);
5813 + struct device *dev = net_dev->dev.parent;
5815 + net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
5817 + net_dev->features |= net_dev->hw_features;
5818 + net_dev->vlan_features = net_dev->features;
5820 + memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
5821 + memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
5823 + net_dev->ethtool_ops = &dpa_ethtool_ops;
5825 + net_dev->needed_headroom = priv->tx_headroom;
5826 + net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
5828 + err = register_netdev(net_dev);
5830 + dev_err(dev, "register_netdev() = %d\n", err);
5834 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
5835 + /* create debugfs entry for this net_device */
5836 + err = dpa_netdev_debugfs_create(net_dev);
5838 + unregister_netdev(net_dev);
5841 +#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
5845 +EXPORT_SYMBOL(dpa_netdev_init);
5847 +int __cold dpa_start(struct net_device *net_dev)
5850 + struct dpa_priv_s *priv;
5851 + struct mac_device *mac_dev;
5853 + priv = netdev_priv(net_dev);
5854 + mac_dev = priv->mac_dev;
5856 + err = mac_dev->init_phy(net_dev, priv->mac_dev);
5858 + if (netif_msg_ifup(priv))
5859 + netdev_err(net_dev, "init_phy() = %d\n", err);
5863 + for_each_port_device(i, mac_dev->port_dev) {
5864 + err = fm_port_enable(mac_dev->port_dev[i]);
5866 + goto mac_start_failed;
5869 + err = priv->mac_dev->start(mac_dev);
5871 + if (netif_msg_ifup(priv))
5872 + netdev_err(net_dev, "mac_dev->start() = %d\n", err);
5873 + goto mac_start_failed;
5876 + netif_tx_start_all_queues(net_dev);
5881 + for_each_port_device(i, mac_dev->port_dev)
5882 + fm_port_disable(mac_dev->port_dev[i]);
5886 +EXPORT_SYMBOL(dpa_start);
5888 +int __cold dpa_stop(struct net_device *net_dev)
5890 + int _errno, i, err;
5891 + struct dpa_priv_s *priv;
5892 + struct mac_device *mac_dev;
5894 + priv = netdev_priv(net_dev);
5895 + mac_dev = priv->mac_dev;
5897 + netif_tx_stop_all_queues(net_dev);
5898 + /* Allow the Fman (Tx) port to process in-flight frames before we
5899 + * try switching it off.
5901 + usleep_range(5000, 10000);
5903 + _errno = mac_dev->stop(mac_dev);
5904 + if (unlikely(_errno < 0))
5905 + if (netif_msg_ifdown(priv))
5906 + netdev_err(net_dev, "mac_dev->stop() = %d\n",
5909 + for_each_port_device(i, mac_dev->port_dev) {
5910 + err = fm_port_disable(mac_dev->port_dev[i]);
5911 + _errno = err ? err : _errno;
5914 + if (mac_dev->phy_dev)
5915 + phy_disconnect(mac_dev->phy_dev);
5916 + mac_dev->phy_dev = NULL;
5920 +EXPORT_SYMBOL(dpa_stop);
5922 +void __cold dpa_timeout(struct net_device *net_dev)
5924 + const struct dpa_priv_s *priv;
5925 + struct dpa_percpu_priv_s *percpu_priv;
5927 + priv = netdev_priv(net_dev);
5928 + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
5930 + if (netif_msg_timer(priv))
5931 + netdev_crit(net_dev, "Transmit timeout!\n");
5933 + percpu_priv->stats.tx_errors++;
5935 +EXPORT_SYMBOL(dpa_timeout);
5940 + * @param net_dev the device for which statistics are calculated
5941 + * @param stats the function fills this structure with the device's statistics
5942 + * @return the address of the structure containing the statistics
5944 + * Calculates the statistics for the given device by adding the statistics
5945 + * collected by each CPU.
5948 +dpa_get_stats64(struct net_device *net_dev,
5949 + struct rtnl_link_stats64 *stats)
5951 + struct dpa_priv_s *priv = netdev_priv(net_dev);
5953 + u64 *netstats = (u64 *)stats;
5955 + struct dpa_percpu_priv_s *percpu_priv;
5956 + int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64);
5958 + for_each_possible_cpu(i) {
5959 + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
5961 + cpustats = (u64 *)&percpu_priv->stats;
5963 + for (j = 0; j < numstats; j++)
5964 + netstats[j] += cpustats[j];
5967 +EXPORT_SYMBOL(dpa_get_stats64);
5969 +int dpa_change_mtu(struct net_device *net_dev, int new_mtu)
5971 + const int max_mtu = dpa_get_max_mtu();
5973 + /* Make sure we don't exceed the Ethernet controller's MAXFRM */
5974 + if (new_mtu < 68 || new_mtu > max_mtu) {
5975 + netdev_err(net_dev, "Invalid L3 mtu %d (must be between %d and %d).\n",
5976 + new_mtu, 68, max_mtu);
5979 + net_dev->mtu = new_mtu;
5983 +EXPORT_SYMBOL(dpa_change_mtu);
5985 +/* .ndo_init callback */
5986 +int dpa_ndo_init(struct net_device *net_dev)
5988 + /* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
5989 + * we choose conservatively and let the user explicitly set a higher
5990 + * MTU via ifconfig. Otherwise, the user may end up with different MTUs
5991 + * in the same LAN.
5992 + * If on the other hand fsl_fm_max_frm has been chosen below 1500,
5993 + * start with the maximum allowed.
5995 + int init_mtu = min(dpa_get_max_mtu(), ETH_DATA_LEN);
5997 + pr_debug("Setting initial MTU on net device: %d\n", init_mtu);
5998 + net_dev->mtu = init_mtu;
6002 +EXPORT_SYMBOL(dpa_ndo_init);
6004 +int dpa_set_features(struct net_device *dev, netdev_features_t features)
6006 + /* Not much to do here for now */
6007 + dev->features = features;
6010 +EXPORT_SYMBOL(dpa_set_features);
6012 +netdev_features_t dpa_fix_features(struct net_device *dev,
6013 + netdev_features_t features)
6015 + netdev_features_t unsupported_features = 0;
6017 + /* In theory we should never be requested to enable features that
6018 + * we didn't set in netdev->features and netdev->hw_features at probe
6019 + * time, but double check just to be on the safe side.
6020 + * We don't support enabling Rx csum through ethtool yet
6022 + unsupported_features |= NETIF_F_RXCSUM;
6024 + features &= ~unsupported_features;
6028 +EXPORT_SYMBOL(dpa_fix_features);
6030 +#ifdef CONFIG_FSL_DPAA_TS
6031 +u64 dpa_get_timestamp_ns(const struct dpa_priv_s *priv, enum port_type rx_tx,
6036 + ts = fm_port_get_buffer_time_stamp(priv->mac_dev->port_dev[rx_tx],
6039 + if (!ts || *ts == 0)
6044 + /* multiple DPA_PTP_NOMINAL_FREQ_PERIOD_NS for case of non power of 2 */
6045 + ns = *ts << DPA_PTP_NOMINAL_FREQ_PERIOD_SHIFT;
6050 +int dpa_get_ts(const struct dpa_priv_s *priv, enum port_type rx_tx,
6051 + struct skb_shared_hwtstamps *shhwtstamps, const void *data)
6055 + ns = dpa_get_timestamp_ns(priv, rx_tx, data);
6060 + memset(shhwtstamps, 0, sizeof(*shhwtstamps));
6061 + shhwtstamps->hwtstamp = ns_to_ktime(ns);
6066 +static void dpa_ts_tx_enable(struct net_device *dev)
6068 + struct dpa_priv_s *priv = netdev_priv(dev);
6069 + struct mac_device *mac_dev = priv->mac_dev;
6071 + if (mac_dev->fm_rtc_enable)
6072 + mac_dev->fm_rtc_enable(get_fm_handle(dev));
6073 + if (mac_dev->ptp_enable)
6074 + mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev));
6076 + priv->ts_tx_en = true;
6079 +static void dpa_ts_tx_disable(struct net_device *dev)
6081 + struct dpa_priv_s *priv = netdev_priv(dev);
6084 +/* the RTC might be needed by the Rx Ts, cannot disable here
6085 + * no separate ptp_disable API for Rx/Tx, cannot disable here
6087 + struct mac_device *mac_dev = priv->mac_dev;
6089 + if (mac_dev->fm_rtc_disable)
6090 + mac_dev->fm_rtc_disable(get_fm_handle(dev));
6092 + if (mac_dev->ptp_disable)
6093 + mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev));
6096 + priv->ts_tx_en = false;
6099 +static void dpa_ts_rx_enable(struct net_device *dev)
6101 + struct dpa_priv_s *priv = netdev_priv(dev);
6102 + struct mac_device *mac_dev = priv->mac_dev;
6104 + if (mac_dev->fm_rtc_enable)
6105 + mac_dev->fm_rtc_enable(get_fm_handle(dev));
6106 + if (mac_dev->ptp_enable)
6107 + mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev));
6109 + priv->ts_rx_en = true;
6112 +static void dpa_ts_rx_disable(struct net_device *dev)
6114 + struct dpa_priv_s *priv = netdev_priv(dev);
6117 +/* the RTC might be needed by the Tx Ts, cannot disable here
6118 + * no separate ptp_disable API for Rx/Tx, cannot disable here
6120 + struct mac_device *mac_dev = priv->mac_dev;
6122 + if (mac_dev->fm_rtc_disable)
6123 + mac_dev->fm_rtc_disable(get_fm_handle(dev));
6125 + if (mac_dev->ptp_disable)
6126 + mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev));
6129 + priv->ts_rx_en = false;
6132 +static int dpa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6134 + struct hwtstamp_config config;
6136 + if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
6139 + switch (config.tx_type) {
6140 + case HWTSTAMP_TX_OFF:
6141 + dpa_ts_tx_disable(dev);
6143 + case HWTSTAMP_TX_ON:
6144 + dpa_ts_tx_enable(dev);
6150 + if (config.rx_filter == HWTSTAMP_FILTER_NONE)
6151 + dpa_ts_rx_disable(dev);
6153 + dpa_ts_rx_enable(dev);
6154 + /* TS is set for all frame types, not only those requested */
6155 + config.rx_filter = HWTSTAMP_FILTER_ALL;
6158 + return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
6161 +#endif /* CONFIG_FSL_DPAA_TS */
6163 +int dpa_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6165 +#ifdef CONFIG_FSL_DPAA_1588
6166 + struct dpa_priv_s *priv = netdev_priv(dev);
6170 + /* at least one timestamping feature must be enabled */
6171 +#ifdef CONFIG_FSL_DPAA_TS
6172 + if (!netif_running(dev))
6176 +#ifdef CONFIG_FSL_DPAA_TS
6177 + if (cmd == SIOCSHWTSTAMP)
6178 + return dpa_ts_ioctl(dev, rq, cmd);
6179 +#endif /* CONFIG_FSL_DPAA_TS */
6181 +#ifdef CONFIG_FSL_DPAA_1588
6182 + if ((cmd >= PTP_ENBL_TXTS_IOCTL) && (cmd <= PTP_CLEANUP_TS)) {
6183 + if (priv->tsu && priv->tsu->valid)
6184 + ret = dpa_ioctl_1588(dev, rq, cmd);
6192 +EXPORT_SYMBOL(dpa_ioctl);
6194 +int __cold dpa_remove(struct platform_device *of_dev)
6197 + struct device *dev;
6198 + struct net_device *net_dev;
6199 + struct dpa_priv_s *priv;
6201 + dev = &of_dev->dev;
6202 + net_dev = dev_get_drvdata(dev);
6204 + priv = netdev_priv(net_dev);
6206 + dpaa_eth_sysfs_remove(dev);
6208 + dev_set_drvdata(dev, NULL);
6209 + unregister_netdev(net_dev);
6211 + err = dpa_fq_free(dev, &priv->dpa_fq_list);
6213 + qman_delete_cgr_safe(&priv->ingress_cgr);
6214 + qman_release_cgrid(priv->ingress_cgr.cgrid);
6215 + qman_delete_cgr_safe(&priv->cgr_data.cgr);
6216 + qman_release_cgrid(priv->cgr_data.cgr.cgrid);
6218 + dpa_private_napi_del(net_dev);
6220 + dpa_bp_free(priv);
6222 + if (priv->buf_layout)
6223 + devm_kfree(dev, priv->buf_layout);
6225 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
6226 + /* remove debugfs entry for this net_device */
6227 + dpa_netdev_debugfs_remove(net_dev);
6228 +#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
6230 +#ifdef CONFIG_FSL_DPAA_1588
6231 + if (priv->tsu && priv->tsu->valid)
6232 + dpa_ptp_cleanup(priv);
6235 + free_netdev(net_dev);
6239 +EXPORT_SYMBOL(dpa_remove);
6241 +struct mac_device * __cold __must_check
6242 +__attribute__((nonnull))
6243 +dpa_mac_probe(struct platform_device *_of_dev)
6245 + struct device *dpa_dev, *dev;
6246 + struct device_node *mac_node;
6247 + struct platform_device *of_dev;
6248 + struct mac_device *mac_dev;
6249 +#ifdef CONFIG_FSL_DPAA_1588
6251 + const phandle *phandle_prop;
6252 + struct net_device *net_dev = NULL;
6253 + struct dpa_priv_s *priv = NULL;
6254 + struct device_node *timer_node;
6256 + dpa_dev = &_of_dev->dev;
6258 + mac_node = of_parse_phandle(_of_dev->dev.of_node, "fsl,fman-mac", 0);
6259 + if (unlikely(mac_node == NULL)) {
6260 + dev_err(dpa_dev, "Cannot find MAC device device tree node\n");
6261 + return ERR_PTR(-EFAULT);
6264 + of_dev = of_find_device_by_node(mac_node);
6265 + if (unlikely(of_dev == NULL)) {
6266 + dev_err(dpa_dev, "of_find_device_by_node(%s) failed\n",
6267 + mac_node->full_name);
6268 + of_node_put(mac_node);
6269 + return ERR_PTR(-EINVAL);
6271 + of_node_put(mac_node);
6273 + dev = &of_dev->dev;
6275 + mac_dev = dev_get_drvdata(dev);
6276 + if (unlikely(mac_dev == NULL)) {
6277 + dev_err(dpa_dev, "dev_get_drvdata(%s) failed\n",
6279 + return ERR_PTR(-EINVAL);
6282 +#ifdef CONFIG_FSL_DPAA_1588
6283 + phandle_prop = of_get_property(mac_node, "ptimer-handle", &lenp);
6284 + if (phandle_prop && ((mac_dev->phy_if != PHY_INTERFACE_MODE_SGMII) ||
6285 + ((mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII) &&
6286 + (mac_dev->speed == SPEED_1000)))) {
6287 + timer_node = of_find_node_by_phandle(*phandle_prop);
6289 + net_dev = dev_get_drvdata(dpa_dev);
6290 + if (timer_node && net_dev) {
6291 + priv = netdev_priv(net_dev);
6292 + if (!dpa_ptp_init(priv))
6293 + dev_info(dev, "%s: ptp 1588 is initialized.\n",
6294 + mac_node->full_name);
6299 +#ifdef CONFIG_PTP_1588_CLOCK_DPAA
6300 + if ((mac_dev->phy_if != PHY_INTERFACE_MODE_SGMII) ||
6301 + ((mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII) &&
6302 + (mac_dev->speed == SPEED_1000))) {
6303 + ptp_priv.node = of_parse_phandle(mac_node, "ptimer-handle", 0);
6304 + if (ptp_priv.node) {
6305 + ptp_priv.of_dev = of_find_device_by_node(ptp_priv.node);
6306 + if (unlikely(ptp_priv.of_dev == NULL)) {
6308 + "Cannot find device represented by timer_node\n");
6309 + of_node_put(ptp_priv.node);
6310 + return ERR_PTR(-EINVAL);
6312 + ptp_priv.mac_dev = mac_dev;
6318 +EXPORT_SYMBOL(dpa_mac_probe);
6320 +int dpa_set_mac_address(struct net_device *net_dev, void *addr)
6322 + const struct dpa_priv_s *priv;
6324 + struct mac_device *mac_dev;
6326 + priv = netdev_priv(net_dev);
6328 + _errno = eth_mac_addr(net_dev, addr);
6330 + if (netif_msg_drv(priv))
6331 + netdev_err(net_dev,
6332 + "eth_mac_addr() = %d\n",
6337 + mac_dev = priv->mac_dev;
6339 + _errno = mac_dev->change_addr(mac_dev->get_mac_handle(mac_dev),
6340 + net_dev->dev_addr);
6342 + if (netif_msg_drv(priv))
6343 + netdev_err(net_dev,
6344 + "mac_dev->change_addr() = %d\n",
6351 +EXPORT_SYMBOL(dpa_set_mac_address);
6353 +void dpa_set_rx_mode(struct net_device *net_dev)
6356 + const struct dpa_priv_s *priv;
6358 + priv = netdev_priv(net_dev);
6360 + if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) {
6361 + priv->mac_dev->promisc = !priv->mac_dev->promisc;
6362 + _errno = priv->mac_dev->set_promisc(
6363 + priv->mac_dev->get_mac_handle(priv->mac_dev),
6364 + priv->mac_dev->promisc);
6365 + if (unlikely(_errno < 0) && netif_msg_drv(priv))
6366 + netdev_err(net_dev,
6367 + "mac_dev->set_promisc() = %d\n",
6371 + _errno = priv->mac_dev->set_multi(net_dev, priv->mac_dev);
6372 + if (unlikely(_errno < 0) && netif_msg_drv(priv))
6373 + netdev_err(net_dev, "mac_dev->set_multi() = %d\n", _errno);
6375 +EXPORT_SYMBOL(dpa_set_rx_mode);
6377 +void dpa_set_buffers_layout(struct mac_device *mac_dev,
6378 + struct dpa_buffer_layout_s *layout)
6380 + struct fm_port_params params;
6383 + layout[RX].priv_data_size = (uint16_t)DPA_RX_PRIV_DATA_SIZE;
6384 + layout[RX].parse_results = true;
6385 + layout[RX].hash_results = true;
6386 +#ifdef CONFIG_FSL_DPAA_TS
6387 + layout[RX].time_stamp = true;
6389 + fm_port_get_buff_layout_ext_params(mac_dev->port_dev[RX], ¶ms);
6390 + layout[RX].manip_extra_space = params.manip_extra_space;
6391 + /* a value of zero for data alignment means "don't care", so align to
6392 + * a non-zero value to prevent FMD from using its own default
6394 + layout[RX].data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT;
6397 + layout[TX].priv_data_size = DPA_TX_PRIV_DATA_SIZE;
6398 + layout[TX].parse_results = true;
6399 + layout[TX].hash_results = true;
6400 +#ifdef CONFIG_FSL_DPAA_TS
6401 + layout[TX].time_stamp = true;
6403 + fm_port_get_buff_layout_ext_params(mac_dev->port_dev[TX], ¶ms);
6404 + layout[TX].manip_extra_space = params.manip_extra_space;
6405 + layout[TX].data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT;
6407 +EXPORT_SYMBOL(dpa_set_buffers_layout);
6409 +int __attribute__((nonnull))
6410 +dpa_bp_alloc(struct dpa_bp *dpa_bp)
6413 + struct bman_pool_params bp_params;
6414 + struct platform_device *pdev;
6416 + if (dpa_bp->size == 0 || dpa_bp->config_count == 0) {
6417 + pr_err("Buffer pool is not properly initialized! Missing size or initial number of buffers");
6421 + memset(&bp_params, 0, sizeof(struct bman_pool_params));
6422 +#ifdef CONFIG_FMAN_PFC
6423 + bp_params.flags = BMAN_POOL_FLAG_THRESH;
6424 + bp_params.thresholds[0] = bp_params.thresholds[2] =
6425 + CONFIG_FSL_DPAA_ETH_REFILL_THRESHOLD;
6426 + bp_params.thresholds[1] = bp_params.thresholds[3] =
6427 + CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT;
6430 + /* If the pool is already specified, we only create one per bpid */
6431 + if (dpa_bpid2pool_use(dpa_bp->bpid))
6434 + if (dpa_bp->bpid == 0)
6435 + bp_params.flags |= BMAN_POOL_FLAG_DYNAMIC_BPID;
6437 + bp_params.bpid = dpa_bp->bpid;
6439 + dpa_bp->pool = bman_new_pool(&bp_params);
6440 + if (unlikely(dpa_bp->pool == NULL)) {
6441 + pr_err("bman_new_pool() failed\n");
6445 + dpa_bp->bpid = (uint8_t)bman_get_params(dpa_bp->pool)->bpid;
6447 + pdev = platform_device_register_simple("dpaa_eth_bpool",
6448 + dpa_bp->bpid, NULL, 0);
6449 + if (IS_ERR(pdev)) {
6450 + pr_err("platform_device_register_simple() failed\n");
6451 + err = PTR_ERR(pdev);
6452 + goto pdev_register_failed;
6455 + struct dma_map_ops *ops = get_dma_ops(&pdev->dev);
6456 + ops->dma_supported = NULL;
6458 + err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
6460 + pr_err("dma_coerce_mask_and_coherent() failed\n");
6461 + goto pdev_mask_failed;
6463 +#ifdef CONFIG_FMAN_ARM
6464 + /* force coherency */
6465 + pdev->dev.archdata.dma_coherent = true;
6466 + arch_setup_dma_ops(&pdev->dev, 0, 0, NULL, true);
6469 + dpa_bp->dev = &pdev->dev;
6471 + if (dpa_bp->seed_cb) {
6472 + err = dpa_bp->seed_cb(dpa_bp);
6474 + goto pool_seed_failed;
6477 + dpa_bpid2pool_map(dpa_bp->bpid, dpa_bp);
6483 + platform_device_unregister(pdev);
6484 +pdev_register_failed:
6485 + bman_free_pool(dpa_bp->pool);
6489 +EXPORT_SYMBOL(dpa_bp_alloc);
6491 +void dpa_bp_drain(struct dpa_bp *bp)
6496 + struct bm_buffer bmb[8];
6499 + ret = bman_acquire(bp->pool, bmb, num, 0);
6502 + /* we have less than 8 buffers left;
6503 + * drain them one by one
6509 + /* Pool is fully drained */
6514 + for (i = 0; i < num; i++) {
6515 + dma_addr_t addr = bm_buf_addr(&bmb[i]);
6517 + dma_unmap_single(bp->dev, addr, bp->size,
6518 + DMA_BIDIRECTIONAL);
6520 + bp->free_buf_cb(phys_to_virt(addr));
6522 + } while (ret > 0);
6524 +EXPORT_SYMBOL(dpa_bp_drain);
6526 +static void __cold __attribute__((nonnull))
6527 +_dpa_bp_free(struct dpa_bp *dpa_bp)
6529 + struct dpa_bp *bp = dpa_bpid2pool(dpa_bp->bpid);
6531 + /* the mapping between bpid and dpa_bp is done very late in the
6532 + * allocation procedure; if something failed before the mapping, the bp
6533 + * was not configured, therefore we don't need the below instructions
6538 + if (!atomic_dec_and_test(&bp->refs))
6541 + if (bp->free_buf_cb)
6544 + dpa_bp_array[bp->bpid] = NULL;
6545 + bman_free_pool(bp->pool);
6548 + platform_device_unregister(to_platform_device(bp->dev));
6551 +void __cold __attribute__((nonnull))
6552 +dpa_bp_free(struct dpa_priv_s *priv)
6557 + for (i = 0; i < priv->bp_count; i++)
6558 + _dpa_bp_free(&priv->dpa_bp[i]);
6560 +EXPORT_SYMBOL(dpa_bp_free);
6562 +struct dpa_bp *dpa_bpid2pool(int bpid)
6564 + return dpa_bp_array[bpid];
6566 +EXPORT_SYMBOL(dpa_bpid2pool);
6568 +void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp)
6570 + dpa_bp_array[bpid] = dpa_bp;
6571 + atomic_set(&dpa_bp->refs, 1);
6574 +bool dpa_bpid2pool_use(int bpid)
6576 + if (dpa_bpid2pool(bpid)) {
6577 + atomic_inc(&dpa_bp_array[bpid]->refs);
6584 +#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
6585 +u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb,
6586 + struct net_device *sb_dev,
6587 + select_queue_fallback_t fallback)
6589 + return dpa_get_queue_mapping(skb);
6591 +EXPORT_SYMBOL(dpa_select_queue);
6594 +struct dpa_fq *dpa_fq_alloc(struct device *dev,
6597 + struct list_head *list,
6598 + enum dpa_fq_type fq_type)
6601 + struct dpa_fq *dpa_fq;
6603 + dpa_fq = devm_kzalloc(dev, sizeof(*dpa_fq) * fq_count, GFP_KERNEL);
6604 + if (dpa_fq == NULL)
6607 + for (i = 0; i < fq_count; i++) {
6608 + dpa_fq[i].fq_type = fq_type;
6609 + if (fq_type == FQ_TYPE_RX_PCD_HI_PRIO)
6610 + dpa_fq[i].fqid = fq_start ?
6611 + DPAA_ETH_FQ_DELTA + fq_start + i : 0;
6613 + dpa_fq[i].fqid = fq_start ? fq_start + i : 0;
6615 + list_add_tail(&dpa_fq[i].list, list);
6618 +#ifdef CONFIG_FMAN_PFC
6619 + if (fq_type == FQ_TYPE_TX)
6620 + for (i = 0; i < fq_count; i++)
6621 + dpa_fq[i].wq = i / dpa_num_cpus;
6624 + for (i = 0; i < fq_count; i++)
6625 + _dpa_assign_wq(dpa_fq + i);
6629 +EXPORT_SYMBOL(dpa_fq_alloc);
6631 +/* Probing of FQs for MACful ports */
6632 +int dpa_fq_probe_mac(struct device *dev, struct list_head *list,
6633 + struct fm_port_fqs *port_fqs,
6634 + bool alloc_tx_conf_fqs,
6635 + enum port_type ptype)
6637 + struct fqid_cell *fqids = NULL;
6638 + const void *fqids_off = NULL;
6639 + struct dpa_fq *dpa_fq = NULL;
6640 + struct device_node *np = dev->of_node;
6644 + if (ptype == TX && alloc_tx_conf_fqs) {
6645 + if (!dpa_fq_alloc(dev, tx_confirm_fqids->start,
6646 + tx_confirm_fqids->count, list,
6647 + FQ_TYPE_TX_CONF_MQ))
6648 + goto fq_alloc_failed;
6651 + fqids_off = of_get_property(np, fsl_qman_frame_queues[ptype], &lenp);
6652 + if (fqids_off == NULL) {
6653 + /* No dts definition, so use the defaults. */
6654 + fqids = default_fqids[ptype];
6657 + num_ranges = lenp / sizeof(*fqids);
6659 + fqids = devm_kzalloc(dev, sizeof(*fqids) * num_ranges,
6661 + if (fqids == NULL)
6662 + goto fqids_alloc_failed;
6664 + /* convert to CPU endianess */
6665 + for (i = 0; i < num_ranges; i++) {
6666 + fqids[i].start = be32_to_cpup(fqids_off +
6667 + i * sizeof(*fqids));
6668 + fqids[i].count = be32_to_cpup(fqids_off +
6669 + i * sizeof(*fqids) + sizeof(__be32));
6673 + for (i = 0; i < num_ranges; i++) {
6676 + /* The first queue is the error queue */
6677 + if (fqids[i].count != 1)
6678 + goto invalid_error_queue;
6680 + dpa_fq = dpa_fq_alloc(dev, fqids[i].start,
6681 + fqids[i].count, list,
6683 + FQ_TYPE_RX_ERROR :
6684 + FQ_TYPE_TX_ERROR);
6685 + if (dpa_fq == NULL)
6686 + goto fq_alloc_failed;
6689 + port_fqs->rx_errq = &dpa_fq[0];
6691 + port_fqs->tx_errq = &dpa_fq[0];
6694 + /* the second queue is the default queue */
6695 + if (fqids[i].count != 1)
6696 + goto invalid_default_queue;
6698 + dpa_fq = dpa_fq_alloc(dev, fqids[i].start,
6699 + fqids[i].count, list,
6701 + FQ_TYPE_RX_DEFAULT :
6702 + FQ_TYPE_TX_CONFIRM);
6703 + if (dpa_fq == NULL)
6704 + goto fq_alloc_failed;
6707 + port_fqs->rx_defq = &dpa_fq[0];
6709 + port_fqs->tx_defq = &dpa_fq[0];
6712 + /* all subsequent queues are either RX* PCD or Tx */
6713 + if (ptype == RX) {
6714 + if (!dpa_fq_alloc(dev, fqids[i].start,
6715 + fqids[i].count, list,
6716 + FQ_TYPE_RX_PCD) ||
6717 + !dpa_fq_alloc(dev, fqids[i].start,
6718 + fqids[i].count, list,
6719 + FQ_TYPE_RX_PCD_HI_PRIO))
6720 + goto fq_alloc_failed;
6722 + if (!dpa_fq_alloc(dev, fqids[i].start,
6723 + fqids[i].count, list,
6725 + goto fq_alloc_failed;
6734 +fqids_alloc_failed:
6735 + dev_err(dev, "Cannot allocate memory for frame queues\n");
6738 +invalid_default_queue:
6739 +invalid_error_queue:
6740 + dev_err(dev, "Too many default or error queues\n");
6743 +EXPORT_SYMBOL(dpa_fq_probe_mac);
6745 +static u32 rx_pool_channel;
6746 +static DEFINE_SPINLOCK(rx_pool_channel_init);
6748 +int dpa_get_channel(void)
6750 + spin_lock(&rx_pool_channel_init);
6751 + if (!rx_pool_channel) {
6753 + int ret = qman_alloc_pool(&pool);
6755 + rx_pool_channel = pool;
6757 + spin_unlock(&rx_pool_channel_init);
6758 + if (!rx_pool_channel)
6760 + return rx_pool_channel;
6762 +EXPORT_SYMBOL(dpa_get_channel);
6764 +void dpa_release_channel(void)
6766 + qman_release_pool(rx_pool_channel);
6768 +EXPORT_SYMBOL(dpa_release_channel);
6770 +void dpaa_eth_add_channel(u16 channel)
6772 + const cpumask_t *cpus = qman_affine_cpus();
6773 + u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel);
6775 + struct qman_portal *portal;
6777 + for_each_cpu(cpu, cpus) {
6778 + portal = (struct qman_portal *)qman_get_affine_portal(cpu);
6779 + qman_p_static_dequeue_add(portal, pool);
6782 +EXPORT_SYMBOL(dpaa_eth_add_channel);
6785 + * Congestion group state change notification callback.
6786 + * Stops the device's egress queues while they are congested and
6787 + * wakes them upon exiting congested state.
6788 + * Also updates some CGR-related stats.
6790 +static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr,
6794 + struct dpa_priv_s *priv = (struct dpa_priv_s *)container_of(cgr,
6795 + struct dpa_priv_s, cgr_data.cgr);
6798 + priv->cgr_data.congestion_start_jiffies = jiffies;
6799 + netif_tx_stop_all_queues(priv->net_dev);
6800 + priv->cgr_data.cgr_congested_count++;
6802 + priv->cgr_data.congested_jiffies +=
6803 + (jiffies - priv->cgr_data.congestion_start_jiffies);
6804 + netif_tx_wake_all_queues(priv->net_dev);
6808 +int dpaa_eth_cgr_init(struct dpa_priv_s *priv)
6810 + struct qm_mcc_initcgr initcgr;
6814 + err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid);
6816 + pr_err("Error %d allocating CGR ID\n", err);
6819 + priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
6821 + /* Enable Congestion State Change Notifications and CS taildrop */
6822 + initcgr.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES;
6823 + initcgr.cgr.cscn_en = QM_CGR_EN;
6825 + /* Set different thresholds based on the MAC speed.
6826 + * TODO: this may turn suboptimal if the MAC is reconfigured at a speed
6827 + * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link.
6828 + * In such cases, we ought to reconfigure the threshold, too.
6830 + if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
6831 + cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_10G;
6833 + cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_1G;
6834 + qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
6836 + initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
6837 + initcgr.cgr.cstd_en = QM_CGR_EN;
6839 + err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT,
6842 + pr_err("Error %d creating CGR with ID %d\n", err,
6843 + priv->cgr_data.cgr.cgrid);
6844 + qman_release_cgrid(priv->cgr_data.cgr.cgrid);
6847 + pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n",
6848 + priv->cgr_data.cgr.cgrid, priv->mac_dev->addr,
6849 + priv->cgr_data.cgr.chan);
6854 +EXPORT_SYMBOL(dpaa_eth_cgr_init);
6856 +static inline void dpa_setup_ingress(const struct dpa_priv_s *priv,
6857 + struct dpa_fq *fq,
6858 + const struct qman_fq *template)
6860 + fq->fq_base = *template;
6861 + fq->net_dev = priv->net_dev;
6863 + fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
6864 + fq->channel = priv->channel;
6867 +static inline void dpa_setup_egress(const struct dpa_priv_s *priv,
6868 + struct dpa_fq *fq,
6869 + struct fm_port *port,
6870 + const struct qman_fq *template)
6872 + fq->fq_base = *template;
6873 + fq->net_dev = priv->net_dev;
6876 + fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
6877 + fq->channel = (uint16_t)fm_get_tx_port_channel(port);
6879 + fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
6883 +void dpa_fq_setup(struct dpa_priv_s *priv, const struct dpa_fq_cbs_t *fq_cbs,
6884 + struct fm_port *tx_port)
6886 + struct dpa_fq *fq;
6887 + uint16_t portals[NR_CPUS];
6888 + int cpu, portal_cnt = 0, num_portals = 0;
6889 + uint32_t pcd_fqid, pcd_fqid_hi_prio;
6890 + const cpumask_t *affine_cpus = qman_affine_cpus();
6891 + int egress_cnt = 0, conf_cnt = 0;
6893 + /* Prepare for PCD FQs init */
6894 + for_each_cpu(cpu, affine_cpus)
6895 + portals[num_portals++] = qman_affine_channel(cpu);
6896 + if (num_portals == 0)
6897 + dev_err(priv->net_dev->dev.parent,
6898 + "No Qman software (affine) channels found");
6900 + pcd_fqid = (priv->mac_dev) ?
6901 + DPAA_ETH_PCD_FQ_BASE(priv->mac_dev->res->start) : 0;
6902 + pcd_fqid_hi_prio = (priv->mac_dev) ?
6903 + DPAA_ETH_PCD_FQ_HI_PRIO_BASE(priv->mac_dev->res->start) : 0;
6905 + /* Initialize each FQ in the list */
6906 + list_for_each_entry(fq, &priv->dpa_fq_list, list) {
6907 + switch (fq->fq_type) {
6908 + case FQ_TYPE_RX_DEFAULT:
6909 + BUG_ON(!priv->mac_dev);
6910 + dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
6912 + case FQ_TYPE_RX_ERROR:
6913 + BUG_ON(!priv->mac_dev);
6914 + dpa_setup_ingress(priv, fq, &fq_cbs->rx_errq);
6916 + case FQ_TYPE_RX_PCD:
6917 + /* For MACless we can't have dynamic Rx queues */
6918 + BUG_ON(!priv->mac_dev && !fq->fqid);
6919 + dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
6921 + fq->fqid = pcd_fqid++;
6922 + fq->channel = portals[portal_cnt];
6923 + portal_cnt = (portal_cnt + 1) % num_portals;
6925 + case FQ_TYPE_RX_PCD_HI_PRIO:
6926 + /* For MACless we can't have dynamic Hi Pri Rx queues */
6927 + BUG_ON(!priv->mac_dev && !fq->fqid);
6928 + dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
6930 + fq->fqid = pcd_fqid_hi_prio++;
6931 + fq->channel = portals[portal_cnt];
6932 + portal_cnt = (portal_cnt + 1) % num_portals;
6935 + dpa_setup_egress(priv, fq, tx_port,
6936 + &fq_cbs->egress_ern);
6937 + /* If we have more Tx queues than the number of cores,
6938 + * just ignore the extra ones.
6940 + if (egress_cnt < DPAA_ETH_TX_QUEUES)
6941 + priv->egress_fqs[egress_cnt++] = &fq->fq_base;
6943 + case FQ_TYPE_TX_CONFIRM:
6944 + BUG_ON(!priv->mac_dev);
6945 + dpa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
6947 + case FQ_TYPE_TX_CONF_MQ:
6948 + BUG_ON(!priv->mac_dev);
6949 + dpa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
6950 + priv->conf_fqs[conf_cnt++] = &fq->fq_base;
6952 + case FQ_TYPE_TX_ERROR:
6953 + BUG_ON(!priv->mac_dev);
6954 + dpa_setup_ingress(priv, fq, &fq_cbs->tx_errq);
6957 + dev_warn(priv->net_dev->dev.parent,
6958 + "Unknown FQ type detected!\n");
6963 + /* The number of Tx queues may be smaller than the number of cores, if
6964 + * the Tx queue range is specified in the device tree instead of being
6965 + * dynamically allocated.
6966 + * Make sure all CPUs receive a corresponding Tx queue.
6968 + while (egress_cnt < DPAA_ETH_TX_QUEUES) {
6969 + list_for_each_entry(fq, &priv->dpa_fq_list, list) {
6970 + if (fq->fq_type != FQ_TYPE_TX)
6972 + priv->egress_fqs[egress_cnt++] = &fq->fq_base;
6973 + if (egress_cnt == DPAA_ETH_TX_QUEUES)
6978 +EXPORT_SYMBOL(dpa_fq_setup);
6980 +int dpa_fq_init(struct dpa_fq *dpa_fq, bool td_enable)
6983 + const struct dpa_priv_s *priv;
6984 + struct device *dev;
6985 + struct qman_fq *fq;
6986 + struct qm_mcc_initfq initfq;
6987 + struct qman_fq *confq;
6990 + priv = netdev_priv(dpa_fq->net_dev);
6991 + dev = dpa_fq->net_dev->dev.parent;
6993 + if (dpa_fq->fqid == 0)
6994 + dpa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
6996 + dpa_fq->init = !(dpa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY);
6998 + _errno = qman_create_fq(dpa_fq->fqid, dpa_fq->flags, &dpa_fq->fq_base);
7000 + dev_err(dev, "qman_create_fq() failed\n");
7003 + fq = &dpa_fq->fq_base;
7005 + if (dpa_fq->init) {
7006 + memset(&initfq, 0, sizeof(initfq));
7008 + initfq.we_mask = QM_INITFQ_WE_FQCTRL;
7009 + /* FIXME: why would we want to keep an empty FQ in cache? */
7010 + initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
7012 + /* Try to reduce the number of portal interrupts for
7013 + * Tx Confirmation FQs.
7015 + if (dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
7016 + initfq.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
7018 + /* FQ placement */
7019 + initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
7021 + initfq.fqd.dest.channel = dpa_fq->channel;
7022 + initfq.fqd.dest.wq = dpa_fq->wq;
7024 + /* Put all egress queues in a congestion group of their own.
7025 + * Sensu stricto, the Tx confirmation queues are Rx FQs,
7026 + * rather than Tx - but they nonetheless account for the
7027 + * memory footprint on behalf of egress traffic. We therefore
7028 + * place them in the netdev's CGR, along with the Tx FQs.
7030 + if (dpa_fq->fq_type == FQ_TYPE_TX ||
7031 + dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM ||
7032 + dpa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) {
7033 + initfq.we_mask |= QM_INITFQ_WE_CGID;
7034 + initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE;
7035 + initfq.fqd.cgid = (uint8_t)priv->cgr_data.cgr.cgrid;
7036 + /* Set a fixed overhead accounting, in an attempt to
7037 + * reduce the impact of fixed-size skb shells and the
7038 + * driver's needed headroom on system memory. This is
7039 + * especially the case when the egress traffic is
7040 + * composed of small datagrams.
7041 + * Unfortunately, QMan's OAL value is capped to an
7042 + * insufficient value, but even that is better than
7043 + * no overhead accounting at all.
7045 + initfq.we_mask |= QM_INITFQ_WE_OAC;
7046 + initfq.fqd.oac_init.oac = QM_OAC_CG;
7047 + initfq.fqd.oac_init.oal =
7048 + (signed char)(min(sizeof(struct sk_buff) +
7049 + priv->tx_headroom, (size_t)FSL_QMAN_MAX_OAL));
7053 + initfq.we_mask |= QM_INITFQ_WE_TDTHRESH;
7054 + qm_fqd_taildrop_set(&initfq.fqd.td,
7056 + initfq.fqd.fq_ctrl = QM_FQCTRL_TDE;
7059 + /* Configure the Tx confirmation queue, now that we know
7060 + * which Tx queue it pairs with.
7062 + if (dpa_fq->fq_type == FQ_TYPE_TX) {
7063 + queue_id = _dpa_tx_fq_to_id(priv, &dpa_fq->fq_base);
7064 + if (queue_id >= 0) {
7065 + confq = priv->conf_fqs[queue_id];
7067 + initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
7068 + /* ContextA: OVOM=1 (use contextA2 bits instead of ICAD)
7069 + * A2V=1 (contextA A2 field is valid)
7070 + * A0V=1 (contextA A0 field is valid)
7071 + * B0V=1 (contextB field is valid)
7072 + * ContextA A2: EBD=1 (deallocate buffers inside FMan)
7073 + * ContextB B0(ASPID): 0 (absolute Virtual Storage ID)
7075 + initfq.fqd.context_a.hi = 0x1e000000;
7076 + initfq.fqd.context_a.lo = 0x80000000;
7081 + /* Put all *private* ingress queues in our "ingress CGR". */
7082 + if (priv->use_ingress_cgr &&
7083 + (dpa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
7084 + dpa_fq->fq_type == FQ_TYPE_RX_ERROR ||
7085 + dpa_fq->fq_type == FQ_TYPE_RX_PCD ||
7086 + dpa_fq->fq_type == FQ_TYPE_RX_PCD_HI_PRIO)) {
7087 + initfq.we_mask |= QM_INITFQ_WE_CGID;
7088 + initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE;
7089 + initfq.fqd.cgid = (uint8_t)priv->ingress_cgr.cgrid;
7090 + /* Set a fixed overhead accounting, just like for the
7093 + initfq.we_mask |= QM_INITFQ_WE_OAC;
7094 + initfq.fqd.oac_init.oac = QM_OAC_CG;
7095 + initfq.fqd.oac_init.oal =
7096 + (signed char)(min(sizeof(struct sk_buff) +
7097 + priv->tx_headroom, (size_t)FSL_QMAN_MAX_OAL));
7100 + /* Initialization common to all ingress queues */
7101 + if (dpa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
7102 + initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
7103 + initfq.fqd.fq_ctrl |=
7104 + QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK;
7105 + initfq.fqd.context_a.stashing.exclusive =
7106 + QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
7107 + QM_STASHING_EXCL_ANNOTATION;
7108 + initfq.fqd.context_a.stashing.data_cl = 2;
7109 + initfq.fqd.context_a.stashing.annotation_cl = 1;
7110 + initfq.fqd.context_a.stashing.context_cl =
7111 + DIV_ROUND_UP(sizeof(struct qman_fq), 64);
7114 + _errno = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
7116 + if (DPA_RX_PCD_HI_PRIO_FQ_INIT_FAIL(dpa_fq, _errno)) {
7119 + dev_err(dev, "qman_init_fq(%u) = %d\n",
7120 + qman_fq_fqid(fq), _errno);
7121 + qman_destroy_fq(fq, 0);
7127 + dpa_fq->fqid = qman_fq_fqid(fq);
7131 +EXPORT_SYMBOL(dpa_fq_init);
7133 +int __cold __attribute__((nonnull))
7134 +_dpa_fq_free(struct device *dev, struct qman_fq *fq)
7136 + int _errno, __errno;
7137 + struct dpa_fq *dpa_fq;
7138 + const struct dpa_priv_s *priv;
7142 + dpa_fq = container_of(fq, struct dpa_fq, fq_base);
7143 + priv = netdev_priv(dpa_fq->net_dev);
7145 + if (dpa_fq->init) {
7146 + _errno = qman_retire_fq(fq, NULL);
7147 + if (unlikely(_errno < 0) && netif_msg_drv(priv))
7148 + dev_err(dev, "qman_retire_fq(%u) = %d\n",
7149 + qman_fq_fqid(fq), _errno);
7151 + __errno = qman_oos_fq(fq);
7152 + if (unlikely(__errno < 0) && netif_msg_drv(priv)) {
7153 + dev_err(dev, "qman_oos_fq(%u) = %d\n",
7154 + qman_fq_fqid(fq), __errno);
7160 + qman_destroy_fq(fq, 0);
7161 + list_del(&dpa_fq->list);
7165 +EXPORT_SYMBOL(_dpa_fq_free);
7167 +int __cold __attribute__((nonnull))
7168 +dpa_fq_free(struct device *dev, struct list_head *list)
7170 + int _errno, __errno;
7171 + struct dpa_fq *dpa_fq, *tmp;
7174 + list_for_each_entry_safe(dpa_fq, tmp, list, list) {
7175 + __errno = _dpa_fq_free(dev, (struct qman_fq *)dpa_fq);
7176 + if (unlikely(__errno < 0) && _errno >= 0)
7182 +EXPORT_SYMBOL(dpa_fq_free);
7184 +int dpa_fqs_init(struct device *dev, struct list_head *list, bool td_enable)
7186 + int _errno, __errno;
7187 + struct dpa_fq *dpa_fq, *tmp;
7188 + static bool print_msg __read_mostly;
7192 + list_for_each_entry_safe(dpa_fq, tmp, list, list) {
7193 + __errno = dpa_fq_init(dpa_fq, td_enable);
7194 + if (unlikely(__errno < 0) && _errno >= 0) {
7195 + if (DPA_RX_PCD_HI_PRIO_FQ_INIT_FAIL(dpa_fq, __errno)) {
7198 + "Skip RX PCD High Priority FQs initialization\n");
7199 + print_msg = false;
7201 + if (_dpa_fq_free(dev, (struct qman_fq *)dpa_fq))
7203 + "Error freeing frame queues\n");
7213 +EXPORT_SYMBOL(dpa_fqs_init);
7215 +dpaa_eth_init_tx_port(struct fm_port *port, struct dpa_fq *errq,
7216 + struct dpa_fq *defq, struct dpa_buffer_layout_s *buf_layout)
7218 + struct fm_port_params tx_port_param;
7219 + bool frag_enabled = false;
7221 + memset(&tx_port_param, 0, sizeof(tx_port_param));
7222 + dpaa_eth_init_port(tx, port, tx_port_param, errq->fqid, defq->fqid,
7223 + buf_layout, frag_enabled);
7227 +dpaa_eth_init_rx_port(struct fm_port *port, struct dpa_bp *bp, size_t count,
7228 + struct dpa_fq *errq, struct dpa_fq *defq,
7229 + struct dpa_buffer_layout_s *buf_layout)
7231 + struct fm_port_params rx_port_param;
7233 + bool frag_enabled = false;
7235 + memset(&rx_port_param, 0, sizeof(rx_port_param));
7236 + count = min(ARRAY_SIZE(rx_port_param.pool_param), count);
7237 + rx_port_param.num_pools = (uint8_t)count;
7238 + for (i = 0; i < count; i++) {
7239 + if (i >= rx_port_param.num_pools)
7241 + rx_port_param.pool_param[i].id = bp[i].bpid;
7242 + rx_port_param.pool_param[i].size = (uint16_t)bp[i].size;
7245 + dpaa_eth_init_port(rx, port, rx_port_param, errq->fqid, defq->fqid,
7246 + buf_layout, frag_enabled);
7249 +#if defined(CONFIG_FSL_SDK_FMAN_TEST)
7250 +/* Defined as weak, to be implemented by fman pcd tester. */
7251 +int dpa_alloc_pcd_fqids(struct device *, uint32_t, uint8_t, uint32_t *)
7252 +__attribute__((weak));
7254 +int dpa_free_pcd_fqids(struct device *, uint32_t) __attribute__((weak));
7256 +int dpa_alloc_pcd_fqids(struct device *, uint32_t, uint8_t, uint32_t *);
7258 +int dpa_free_pcd_fqids(struct device *, uint32_t);
7260 +#endif /* CONFIG_FSL_SDK_FMAN_TEST */
7263 +int dpa_alloc_pcd_fqids(struct device *dev, uint32_t num,
7264 + uint8_t alignment, uint32_t *base_fqid)
7266 + dev_crit(dev, "callback not implemented!\n");
7271 +int dpa_free_pcd_fqids(struct device *dev, uint32_t base_fqid)
7274 + dev_crit(dev, "callback not implemented!\n");
7279 +void dpaa_eth_init_ports(struct mac_device *mac_dev,
7280 + struct dpa_bp *bp, size_t count,
7281 + struct fm_port_fqs *port_fqs,
7282 + struct dpa_buffer_layout_s *buf_layout,
7283 + struct device *dev)
7285 + struct fm_port_pcd_param rx_port_pcd_param;
7286 + struct fm_port *rxport = mac_dev->port_dev[RX];
7287 + struct fm_port *txport = mac_dev->port_dev[TX];
7289 + dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
7290 + port_fqs->tx_defq, &buf_layout[TX]);
7291 + dpaa_eth_init_rx_port(rxport, bp, count, port_fqs->rx_errq,
7292 + port_fqs->rx_defq, &buf_layout[RX]);
7294 + rx_port_pcd_param.cba = dpa_alloc_pcd_fqids;
7295 + rx_port_pcd_param.cbf = dpa_free_pcd_fqids;
7296 + rx_port_pcd_param.dev = dev;
7297 + fm_port_pcd_bind(rxport, &rx_port_pcd_param);
7299 +EXPORT_SYMBOL(dpaa_eth_init_ports);
7301 +void dpa_release_sgt(struct qm_sg_entry *sgt)
7303 + struct dpa_bp *dpa_bp;
7304 + struct bm_buffer bmb[DPA_BUFF_RELEASE_MAX];
7307 + memset(bmb, 0, DPA_BUFF_RELEASE_MAX * sizeof(struct bm_buffer));
7310 + dpa_bp = dpa_bpid2pool(qm_sg_entry_get_bpid(&sgt[i]));
7311 + DPA_BUG_ON(!dpa_bp);
7315 + DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
7316 + bm_buffer_set64(&bmb[j], qm_sg_addr(&sgt[i]));
7319 + } while (j < ARRAY_SIZE(bmb) &&
7320 + !qm_sg_entry_get_final(&sgt[i-1]) &&
7321 + qm_sg_entry_get_bpid(&sgt[i-1]) ==
7322 + qm_sg_entry_get_bpid(&sgt[i]));
7324 + while (bman_release(dpa_bp->pool, bmb, j, 0))
7326 + } while (!qm_sg_entry_get_final(&sgt[i-1]));
7328 +EXPORT_SYMBOL(dpa_release_sgt);
7330 +void __attribute__((nonnull))
7331 +dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd)
7333 + struct qm_sg_entry *sgt;
7334 + struct dpa_bp *dpa_bp;
7335 + struct bm_buffer bmb;
7340 + bm_buffer_set64(&bmb, qm_fd_addr(fd));
7342 + dpa_bp = dpa_bpid2pool(fd->bpid);
7343 + DPA_BUG_ON(!dpa_bp);
7345 + if (fd->format == qm_fd_sg) {
7346 + vaddr = phys_to_virt(qm_fd_addr(fd));
7347 + sgt = vaddr + dpa_fd_offset(fd);
7349 + dma_unmap_single(dpa_bp->dev, qm_fd_addr(fd), dpa_bp->size,
7350 + DMA_BIDIRECTIONAL);
7352 + dpa_release_sgt(sgt);
7353 + addr = dma_map_single(dpa_bp->dev, vaddr, dpa_bp->size,
7354 + DMA_BIDIRECTIONAL);
7355 + if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
7356 + dev_err(dpa_bp->dev, "DMA mapping failed");
7359 + bm_buffer_set64(&bmb, addr);
7362 + while (bman_release(dpa_bp->pool, &bmb, 1, 0))
7365 +EXPORT_SYMBOL(dpa_fd_release);
7367 +void count_ern(struct dpa_percpu_priv_s *percpu_priv,
7368 + const struct qm_mr_entry *msg)
7370 + switch (msg->ern.rc & QM_MR_RC_MASK) {
7371 + case QM_MR_RC_CGR_TAILDROP:
7372 + percpu_priv->ern_cnt.cg_tdrop++;
7374 + case QM_MR_RC_WRED:
7375 + percpu_priv->ern_cnt.wred++;
7377 + case QM_MR_RC_ERROR:
7378 + percpu_priv->ern_cnt.err_cond++;
7380 + case QM_MR_RC_ORPWINDOW_EARLY:
7381 + percpu_priv->ern_cnt.early_window++;
7383 + case QM_MR_RC_ORPWINDOW_LATE:
7384 + percpu_priv->ern_cnt.late_window++;
7386 + case QM_MR_RC_FQ_TAILDROP:
7387 + percpu_priv->ern_cnt.fq_tdrop++;
7389 + case QM_MR_RC_ORPWINDOW_RETIRED:
7390 + percpu_priv->ern_cnt.fq_retired++;
7392 + case QM_MR_RC_ORP_ZERO:
7393 + percpu_priv->ern_cnt.orp_zero++;
7397 +EXPORT_SYMBOL(count_ern);
7400 + * Turn on HW checksum computation for this outgoing frame.
7401 + * If the current protocol is not something we support in this regard
7402 + * (or if the stack has already computed the SW checksum), we do nothing.
7404 + * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
7407 + * Note that this function may modify the fd->cmd field and the skb data buffer
7408 + * (the Parse Results area).
7410 +int dpa_enable_tx_csum(struct dpa_priv_s *priv,
7411 + struct sk_buff *skb, struct qm_fd *fd, char *parse_results)
7413 + fm_prs_result_t *parse_result;
7414 + struct iphdr *iph;
7415 + struct ipv6hdr *ipv6h = NULL;
7417 + u16 ethertype = ntohs(skb->protocol);
7420 + if (skb->ip_summed != CHECKSUM_PARTIAL)
7423 + /* Note: L3 csum seems to be already computed in sw, but we can't choose
7424 + * L4 alone from the FM configuration anyway.
7427 + /* Fill in some fields of the Parse Results array, so the FMan
7428 + * can find them as if they came from the FMan Parser.
7430 + parse_result = (fm_prs_result_t *)parse_results;
7432 + /* If we're dealing with VLAN, get the real Ethernet type */
7433 + if (ethertype == ETH_P_8021Q) {
7434 + /* We can't always assume the MAC header is set correctly
7435 + * by the stack, so reset to beginning of skb->data
7437 + skb_reset_mac_header(skb);
7438 + ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
7441 + /* Fill in the relevant L3 parse result fields
7442 + * and read the L4 protocol type
7444 + switch (ethertype) {
7446 + parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4);
7447 + iph = ip_hdr(skb);
7448 + DPA_BUG_ON(iph == NULL);
7449 + l4_proto = iph->protocol;
7452 + parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6);
7453 + ipv6h = ipv6_hdr(skb);
7454 + DPA_BUG_ON(ipv6h == NULL);
7455 + l4_proto = ipv6h->nexthdr;
7458 + /* We shouldn't even be here */
7459 + if (netif_msg_tx_err(priv) && net_ratelimit())
7460 + netdev_alert(priv->net_dev,
7461 + "Can't compute HW csum for L3 proto 0x%x\n",
7462 + ntohs(skb->protocol));
7464 + goto return_error;
7467 + /* Fill in the relevant L4 parse result fields */
7468 + switch (l4_proto) {
7470 + parse_result->l4r = FM_L4_PARSE_RESULT_UDP;
7473 + parse_result->l4r = FM_L4_PARSE_RESULT_TCP;
7476 + /* This can as well be a BUG() */
7477 + if (netif_msg_tx_err(priv) && net_ratelimit())
7478 + netdev_alert(priv->net_dev,
7479 + "Can't compute HW csum for L4 proto 0x%x\n",
7482 + goto return_error;
7485 + /* At index 0 is IPOffset_1 as defined in the Parse Results */
7486 + parse_result->ip_off[0] = (uint8_t)skb_network_offset(skb);
7487 + parse_result->l4_off = (uint8_t)skb_transport_offset(skb);
7489 + /* Enable L3 (and L4, if TCP or UDP) HW checksum. */
7490 + fd->cmd |= FM_FD_CMD_RPD | FM_FD_CMD_DTC;
7492 + /* On P1023 and similar platforms fd->cmd interpretation could
7493 + * be disabled by setting CONTEXT_A bit ICMD; currently this bit
7494 + * is not set so we do not need to check; in the future, if/when
7495 + * using context_a we need to check this bit
7501 +EXPORT_SYMBOL(dpa_enable_tx_csum);
7503 +#ifdef CONFIG_FSL_DPAA_CEETM
7504 +void dpa_enable_ceetm(struct net_device *dev)
7506 + struct dpa_priv_s *priv = netdev_priv(dev);
7507 + priv->ceetm_en = true;
7509 +EXPORT_SYMBOL(dpa_enable_ceetm);
7511 +void dpa_disable_ceetm(struct net_device *dev)
7513 + struct dpa_priv_s *priv = netdev_priv(dev);
7514 + priv->ceetm_en = false;
7516 +EXPORT_SYMBOL(dpa_disable_ceetm);
7519 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.h
7521 +/* Copyright 2008-2013 Freescale Semiconductor, Inc.
7523 + * Redistribution and use in source and binary forms, with or without
7524 + * modification, are permitted provided that the following conditions are met:
7525 + * * Redistributions of source code must retain the above copyright
7526 + * notice, this list of conditions and the following disclaimer.
7527 + * * Redistributions in binary form must reproduce the above copyright
7528 + * notice, this list of conditions and the following disclaimer in the
7529 + * documentation and/or other materials provided with the distribution.
7530 + * * Neither the name of Freescale Semiconductor nor the
7531 + * names of its contributors may be used to endorse or promote products
7532 + * derived from this software without specific prior written permission.
7535 + * ALTERNATIVELY, this software may be distributed under the terms of the
7536 + * GNU General Public License ("GPL") as published by the Free Software
7537 + * Foundation, either version 2 of that License or (at your option) any
7540 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
7541 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
7542 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
7543 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
7544 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
7545 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
7546 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
7547 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
7548 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
7549 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
7552 +#ifndef __DPAA_ETH_COMMON_H
7553 +#define __DPAA_ETH_COMMON_H
7555 +#include <linux/etherdevice.h> /* struct net_device */
7556 +#include <linux/fsl_bman.h> /* struct bm_buffer */
7557 +#include <linux/of_platform.h> /* struct platform_device */
7558 +#include <linux/net_tstamp.h> /* struct hwtstamp_config */
7560 +#include "dpaa_eth.h"
7561 +#include "lnxwrp_fsl_fman.h"
7563 +#define dpaa_eth_init_port(type, port, param, errq_id, defq_id, buf_layout,\
7566 + param.errq = errq_id; \
7567 + param.defq = defq_id; \
7568 + param.priv_data_size = buf_layout->priv_data_size; \
7569 + param.parse_results = buf_layout->parse_results; \
7570 + param.hash_results = buf_layout->hash_results; \
7571 + param.frag_enable = frag_enabled; \
7572 + param.time_stamp = buf_layout->time_stamp; \
7573 + param.manip_extra_space = buf_layout->manip_extra_space; \
7574 + param.data_align = buf_layout->data_align; \
7575 + fm_set_##type##_port_params(port, ¶m); \
7578 +#define DPA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
7580 +#define DPA_SGT_ENTRIES_THRESHOLD DPA_SGT_MAX_ENTRIES
7582 +#define DPA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */
7584 +#define DPA_RX_PCD_HI_PRIO_FQ_INIT_FAIL(dpa_fq, _errno) \
7585 + (((dpa_fq)->fq_type == FQ_TYPE_RX_PCD_HI_PRIO) && \
7587 +/* return codes for the dpaa-eth hooks */
7588 +enum dpaa_eth_hook_result {
7589 + /* fd/skb was retained by the hook.
7591 + * On the Rx path, this means the Ethernet driver will _not_
7592 + * deliver the skb to the stack. Instead, the hook implementation
7593 + * is expected to properly dispose of the skb.
7595 + * On the Tx path, the Ethernet driver's dpa_tx() function will
7596 + * immediately return NETDEV_TX_OK. The hook implementation is expected
7597 + * to free the skb. *DO*NOT* release it to BMan, or enqueue it to FMan,
7598 + * unless you know exactly what you're doing!
7600 + * On the confirmation/error paths, the Ethernet driver will _not_
7601 + * perform any fd cleanup, nor update the interface statistics.
7604 + /* fd/skb was returned to the Ethernet driver for regular processing.
7605 + * The hook is not allowed to, for instance, reallocate the skb (as if
7606 + * by linearizing, copying, cloning or reallocating the headroom).
7611 +typedef enum dpaa_eth_hook_result (*dpaa_eth_ingress_hook_t)(
7612 + struct sk_buff *skb, struct net_device *net_dev, u32 fqid);
7613 +typedef enum dpaa_eth_hook_result (*dpaa_eth_egress_hook_t)(
7614 + struct sk_buff *skb, struct net_device *net_dev);
7615 +typedef enum dpaa_eth_hook_result (*dpaa_eth_confirm_hook_t)(
7616 + struct net_device *net_dev, const struct qm_fd *fd, u32 fqid);
7618 +/* used in napi related functions */
7619 +extern u16 qman_portal_max;
7621 +/* from dpa_ethtool.c */
7622 +extern const struct ethtool_ops dpa_ethtool_ops;
7624 +#ifdef CONFIG_FSL_DPAA_HOOKS
7625 +/* Various hooks used for unit-testing and/or fastpath optimizations.
7626 + * Currently only one set of such hooks is supported.
7628 +struct dpaa_eth_hooks_s {
7629 + /* Invoked on the Tx private path, immediately after receiving the skb
7632 + dpaa_eth_egress_hook_t tx;
7634 + /* Invoked on the Rx private path, right before passing the skb
7635 + * up the stack. At that point, the packet's protocol id has already
7636 + * been set. The skb's data pointer is now at the L3 header, and
7637 + * skb->mac_header points to the L2 header. skb->len has been adjusted
7638 + * to be the length of L3+payload (i.e., the length of the
7639 + * original frame minus the L2 header len).
7640 + * For more details on what the skb looks like, see eth_type_trans().
7642 + dpaa_eth_ingress_hook_t rx_default;
7644 + /* Driver hook for the Rx error private path. */
7645 + dpaa_eth_confirm_hook_t rx_error;
7646 + /* Driver hook for the Tx confirmation private path. */
7647 + dpaa_eth_confirm_hook_t tx_confirm;
7648 + /* Driver hook for the Tx error private path. */
7649 + dpaa_eth_confirm_hook_t tx_error;
7652 +void fsl_dpaa_eth_set_hooks(struct dpaa_eth_hooks_s *hooks);
7654 +extern struct dpaa_eth_hooks_s dpaa_eth_hooks;
7657 +int dpa_netdev_init(struct net_device *net_dev,
7658 + const uint8_t *mac_addr,
7659 + uint16_t tx_timeout);
7660 +int __cold dpa_start(struct net_device *net_dev);
7661 +int __cold dpa_stop(struct net_device *net_dev);
7662 +void __cold dpa_timeout(struct net_device *net_dev);
7664 +dpa_get_stats64(struct net_device *net_dev,
7665 + struct rtnl_link_stats64 *stats);
7666 +int dpa_change_mtu(struct net_device *net_dev, int new_mtu);
7667 +int dpa_ndo_init(struct net_device *net_dev);
7668 +int dpa_set_features(struct net_device *dev, netdev_features_t features);
7669 +netdev_features_t dpa_fix_features(struct net_device *dev,
7670 + netdev_features_t features);
7671 +#ifdef CONFIG_FSL_DPAA_TS
7672 +u64 dpa_get_timestamp_ns(const struct dpa_priv_s *priv,
7673 + enum port_type rx_tx, const void *data);
7674 +/* Updates the skb shared hw timestamp from the hardware timestamp */
7675 +int dpa_get_ts(const struct dpa_priv_s *priv, enum port_type rx_tx,
7676 + struct skb_shared_hwtstamps *shhwtstamps, const void *data);
7677 +#endif /* CONFIG_FSL_DPAA_TS */
7678 +int dpa_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
7679 +int __cold dpa_remove(struct platform_device *of_dev);
7680 +struct mac_device * __cold __must_check
7681 +__attribute__((nonnull)) dpa_mac_probe(struct platform_device *_of_dev);
7682 +int dpa_set_mac_address(struct net_device *net_dev, void *addr);
7683 +void dpa_set_rx_mode(struct net_device *net_dev);
7684 +void dpa_set_buffers_layout(struct mac_device *mac_dev,
7685 + struct dpa_buffer_layout_s *layout);
7686 +int __attribute__((nonnull))
7687 +dpa_bp_alloc(struct dpa_bp *dpa_bp);
7688 +void __cold __attribute__((nonnull))
7689 +dpa_bp_free(struct dpa_priv_s *priv);
7690 +struct dpa_bp *dpa_bpid2pool(int bpid);
7691 +void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp);
7692 +bool dpa_bpid2pool_use(int bpid);
7693 +void dpa_bp_drain(struct dpa_bp *bp);
7694 +#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
7695 +u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb,
7696 + struct net_device *sb_dev,
7697 + select_queue_fallback_t fallback);
7699 +struct dpa_fq *dpa_fq_alloc(struct device *dev,
7702 + struct list_head *list,
7703 + enum dpa_fq_type fq_type);
7704 +int dpa_fq_probe_mac(struct device *dev, struct list_head *list,
7705 + struct fm_port_fqs *port_fqs,
7706 + bool tx_conf_fqs_per_core,
7707 + enum port_type ptype);
7708 +int dpa_get_channel(void);
7709 +void dpa_release_channel(void);
7710 +void dpaa_eth_add_channel(u16 channel);
7711 +int dpaa_eth_cgr_init(struct dpa_priv_s *priv);
7712 +void dpa_fq_setup(struct dpa_priv_s *priv, const struct dpa_fq_cbs_t *fq_cbs,
7713 + struct fm_port *tx_port);
7714 +int dpa_fq_init(struct dpa_fq *dpa_fq, bool td_enable);
7715 +int dpa_fqs_init(struct device *dev, struct list_head *list, bool td_enable);
7716 +int __cold __attribute__((nonnull))
7717 +dpa_fq_free(struct device *dev, struct list_head *list);
7718 +void dpaa_eth_init_ports(struct mac_device *mac_dev,
7719 + struct dpa_bp *bp, size_t count,
7720 + struct fm_port_fqs *port_fqs,
7721 + struct dpa_buffer_layout_s *buf_layout,
7722 + struct device *dev);
7723 +void dpa_release_sgt(struct qm_sg_entry *sgt);
7724 +void __attribute__((nonnull))
7725 +dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd);
7726 +void count_ern(struct dpa_percpu_priv_s *percpu_priv,
7727 + const struct qm_mr_entry *msg);
7728 +int dpa_enable_tx_csum(struct dpa_priv_s *priv,
7729 + struct sk_buff *skb, struct qm_fd *fd, char *parse_results);
7730 +#ifdef CONFIG_FSL_DPAA_CEETM
7731 +void dpa_enable_ceetm(struct net_device *dev);
7732 +void dpa_disable_ceetm(struct net_device *dev);
7734 +struct proxy_device {
7735 + struct mac_device *mac_dev;
7738 +/* mac device control functions exposed by proxy interface*/
7739 +int dpa_proxy_start(struct net_device *net_dev);
7740 +int dpa_proxy_stop(struct proxy_device *proxy_dev, struct net_device *net_dev);
7741 +int dpa_proxy_set_mac_address(struct proxy_device *proxy_dev,
7742 + struct net_device *net_dev);
7743 +int dpa_proxy_set_rx_mode(struct proxy_device *proxy_dev,
7744 + struct net_device *net_dev);
7746 +#endif /* __DPAA_ETH_COMMON_H */
7748 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c
7750 +/* Copyright 2008-2013 Freescale Semiconductor Inc.
7752 + * Redistribution and use in source and binary forms, with or without
7753 + * modification, are permitted provided that the following conditions are met:
7754 + * * Redistributions of source code must retain the above copyright
7755 + * notice, this list of conditions and the following disclaimer.
7756 + * * Redistributions in binary form must reproduce the above copyright
7757 + * notice, this list of conditions and the following disclaimer in the
7758 + * documentation and/or other materials provided with the distribution.
7759 + * * Neither the name of Freescale Semiconductor nor the
7760 + * names of its contributors may be used to endorse or promote products
7761 + * derived from this software without specific prior written permission.
7764 + * ALTERNATIVELY, this software may be distributed under the terms of the
7765 + * GNU General Public License ("GPL") as published by the Free Software
7766 + * Foundation, either version 2 of that License or (at your option) any
7769 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
7770 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
7771 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
7772 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
7773 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
7774 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
7775 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
7776 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
7777 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
7778 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
7781 +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
7782 +#define pr_fmt(fmt) \
7783 + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
7784 + KBUILD_BASENAME".c", __LINE__, __func__
7786 +#define pr_fmt(fmt) \
7787 + KBUILD_MODNAME ": " fmt
7790 +#include <linux/init.h>
7791 +#include <linux/module.h>
7792 +#include <linux/of_platform.h>
7793 +#include "dpaa_eth.h"
7794 +#include "dpaa_eth_common.h"
7795 +#include "dpaa_eth_base.h"
7796 +#include "lnxwrp_fsl_fman.h" /* fm_get_rx_extra_headroom(), fm_get_max_frm() */
7799 +#define DPA_DESCRIPTION "FSL DPAA Proxy initialization driver"
7801 +MODULE_LICENSE("Dual BSD/GPL");
7803 +MODULE_DESCRIPTION(DPA_DESCRIPTION);
7805 +static int __cold dpa_eth_proxy_remove(struct platform_device *of_dev);
7808 +static int proxy_suspend(struct device *dev)
7810 + struct proxy_device *proxy_dev = dev_get_drvdata(dev);
7811 + struct mac_device *mac_dev = proxy_dev->mac_dev;
7814 + err = fm_port_suspend(mac_dev->port_dev[RX]);
7816 + goto port_suspend_failed;
7818 + err = fm_port_suspend(mac_dev->port_dev[TX]);
7820 + err = fm_port_resume(mac_dev->port_dev[RX]);
7822 +port_suspend_failed:
7826 +static int proxy_resume(struct device *dev)
7828 + struct proxy_device *proxy_dev = dev_get_drvdata(dev);
7829 + struct mac_device *mac_dev = proxy_dev->mac_dev;
7832 + err = fm_port_resume(mac_dev->port_dev[TX]);
7834 + goto port_resume_failed;
7836 + err = fm_port_resume(mac_dev->port_dev[RX]);
7838 + err = fm_port_suspend(mac_dev->port_dev[TX]);
7840 +port_resume_failed:
7844 +static const struct dev_pm_ops proxy_pm_ops = {
7845 + .suspend = proxy_suspend,
7846 + .resume = proxy_resume,
7849 +#define PROXY_PM_OPS (&proxy_pm_ops)
7851 +#else /* CONFIG_PM */
7853 +#define PROXY_PM_OPS NULL
7855 +#endif /* CONFIG_PM */
7857 +static int dpaa_eth_proxy_probe(struct platform_device *_of_dev)
7860 + struct device *dev;
7861 + struct device_node *dpa_node;
7862 + struct dpa_bp *dpa_bp;
7863 + struct list_head proxy_fq_list;
7865 + struct fm_port_fqs port_fqs;
7866 + struct dpa_buffer_layout_s *buf_layout = NULL;
7867 + struct mac_device *mac_dev;
7868 + struct proxy_device *proxy_dev;
7870 + dev = &_of_dev->dev;
7872 + dpa_node = dev->of_node;
7874 + if (!of_device_is_available(dpa_node))
7877 + /* Get the buffer pools assigned to this interface */
7878 + dpa_bp = dpa_bp_probe(_of_dev, &count);
7879 + if (IS_ERR(dpa_bp))
7880 + return PTR_ERR(dpa_bp);
7882 + mac_dev = dpa_mac_probe(_of_dev);
7883 + if (IS_ERR(mac_dev))
7884 + return PTR_ERR(mac_dev);
7886 + proxy_dev = devm_kzalloc(dev, sizeof(*proxy_dev), GFP_KERNEL);
7888 + dev_err(dev, "devm_kzalloc() failed\n");
7892 + proxy_dev->mac_dev = mac_dev;
7893 + dev_set_drvdata(dev, proxy_dev);
7895 + /* We have physical ports, so we need to establish
7896 + * the buffer layout.
7898 + buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout),
7900 + if (!buf_layout) {
7901 + dev_err(dev, "devm_kzalloc() failed\n");
7904 + dpa_set_buffers_layout(mac_dev, buf_layout);
7906 + INIT_LIST_HEAD(&proxy_fq_list);
7908 + memset(&port_fqs, 0, sizeof(port_fqs));
7910 + err = dpa_fq_probe_mac(dev, &proxy_fq_list, &port_fqs, true, RX);
7912 + err = dpa_fq_probe_mac(dev, &proxy_fq_list, &port_fqs, true,
7915 + devm_kfree(dev, buf_layout);
7919 + /* Proxy initializer - Just configures the MAC on behalf of
7920 + * another partition.
7922 + dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs,
7925 + /* Proxy interfaces need to be started, and the allocated
7928 + devm_kfree(dev, buf_layout);
7929 + devm_kfree(dev, dpa_bp);
7931 + /* Free FQ structures */
7932 + devm_kfree(dev, port_fqs.rx_defq);
7933 + devm_kfree(dev, port_fqs.rx_errq);
7934 + devm_kfree(dev, port_fqs.tx_defq);
7935 + devm_kfree(dev, port_fqs.tx_errq);
7937 + for_each_port_device(i, mac_dev->port_dev) {
7938 + err = fm_port_enable(mac_dev->port_dev[i]);
7940 + goto port_enable_fail;
7943 + dev_info(dev, "probed MAC device with MAC address: %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n",
7944 + mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2],
7945 + mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]);
7947 + return 0; /* Proxy interface initialization ended */
7950 + for_each_port_device(i, mac_dev->port_dev)
7951 + fm_port_disable(mac_dev->port_dev[i]);
7952 + dpa_eth_proxy_remove(_of_dev);
7957 +int dpa_proxy_set_mac_address(struct proxy_device *proxy_dev,
7958 + struct net_device *net_dev)
7960 + struct mac_device *mac_dev;
7963 + mac_dev = proxy_dev->mac_dev;
7965 + _errno = mac_dev->change_addr(mac_dev->get_mac_handle(mac_dev),
7966 + net_dev->dev_addr);
7972 +EXPORT_SYMBOL(dpa_proxy_set_mac_address);
7974 +int dpa_proxy_set_rx_mode(struct proxy_device *proxy_dev,
7975 + struct net_device *net_dev)
7977 + struct mac_device *mac_dev = proxy_dev->mac_dev;
7980 + if (!!(net_dev->flags & IFF_PROMISC) != mac_dev->promisc) {
7981 + mac_dev->promisc = !mac_dev->promisc;
7982 + _errno = mac_dev->set_promisc(mac_dev->get_mac_handle(mac_dev),
7983 + mac_dev->promisc);
7984 + if (unlikely(_errno < 0))
7985 + netdev_err(net_dev, "mac_dev->set_promisc() = %d\n",
7989 + _errno = mac_dev->set_multi(net_dev, mac_dev);
7990 + if (unlikely(_errno < 0))
7995 +EXPORT_SYMBOL(dpa_proxy_set_rx_mode);
7997 +int dpa_proxy_start(struct net_device *net_dev)
7999 + struct mac_device *mac_dev;
8000 + const struct dpa_priv_s *priv;
8001 + struct proxy_device *proxy_dev;
8005 + priv = netdev_priv(net_dev);
8006 + proxy_dev = (struct proxy_device *)priv->peer;
8007 + mac_dev = proxy_dev->mac_dev;
8009 + _errno = mac_dev->init_phy(net_dev, mac_dev);
8011 + if (netif_msg_drv(priv))
8012 + netdev_err(net_dev, "init_phy() = %d\n",
8017 + for_each_port_device(i, mac_dev->port_dev) {
8018 + _errno = fm_port_enable(mac_dev->port_dev[i]);
8020 + goto port_enable_fail;
8023 + _errno = mac_dev->start(mac_dev);
8025 + if (netif_msg_drv(priv))
8026 + netdev_err(net_dev, "mac_dev->start() = %d\n",
8028 + goto port_enable_fail;
8034 + for_each_port_device(i, mac_dev->port_dev)
8035 + fm_port_disable(mac_dev->port_dev[i]);
8039 +EXPORT_SYMBOL(dpa_proxy_start);
8041 +int dpa_proxy_stop(struct proxy_device *proxy_dev, struct net_device *net_dev)
8043 + struct mac_device *mac_dev = proxy_dev->mac_dev;
8044 + const struct dpa_priv_s *priv = netdev_priv(net_dev);
8045 + int _errno, i, err;
8047 + _errno = mac_dev->stop(mac_dev);
8049 + if (netif_msg_drv(priv))
8050 + netdev_err(net_dev, "mac_dev->stop() = %d\n",
8055 + for_each_port_device(i, mac_dev->port_dev) {
8056 + err = fm_port_disable(mac_dev->port_dev[i]);
8057 + _errno = err ? err : _errno;
8060 + if (mac_dev->phy_dev)
8061 + phy_disconnect(mac_dev->phy_dev);
8062 + mac_dev->phy_dev = NULL;
8066 +EXPORT_SYMBOL(dpa_proxy_stop);
8068 +static int __cold dpa_eth_proxy_remove(struct platform_device *of_dev)
8070 + struct device *dev = &of_dev->dev;
8071 + struct proxy_device *proxy_dev = dev_get_drvdata(dev);
8075 + dev_set_drvdata(dev, NULL);
8080 +static const struct of_device_id dpa_proxy_match[] = {
8082 + .compatible = "fsl,dpa-ethernet-init"
8086 +MODULE_DEVICE_TABLE(of, dpa_proxy_match);
8088 +static struct platform_driver dpa_proxy_driver = {
8090 + .name = KBUILD_MODNAME "-proxy",
8091 + .of_match_table = dpa_proxy_match,
8092 + .owner = THIS_MODULE,
8093 + .pm = PROXY_PM_OPS,
8095 + .probe = dpaa_eth_proxy_probe,
8096 + .remove = dpa_eth_proxy_remove
8099 +static int __init __cold dpa_proxy_load(void)
8103 + pr_info(DPA_DESCRIPTION "\n");
8105 + /* Initialize dpaa_eth mirror values */
8106 + dpa_rx_extra_headroom = fm_get_rx_extra_headroom();
8107 + dpa_max_frm = fm_get_max_frm();
8109 + _errno = platform_driver_register(&dpa_proxy_driver);
8110 + if (unlikely(_errno < 0)) {
8111 + pr_err(KBUILD_MODNAME
8112 + ": %s:%hu:%s(): platform_driver_register() = %d\n",
8113 + KBUILD_BASENAME".c", __LINE__, __func__, _errno);
8116 + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
8117 + KBUILD_BASENAME".c", __func__);
8121 +module_init(dpa_proxy_load);
8123 +static void __exit __cold dpa_proxy_unload(void)
8125 + platform_driver_unregister(&dpa_proxy_driver);
8127 + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
8128 + KBUILD_BASENAME".c", __func__);
8130 +module_exit(dpa_proxy_unload);
8132 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c
8134 +/* Copyright 2012 Freescale Semiconductor Inc.
8136 + * Redistribution and use in source and binary forms, with or without
8137 + * modification, are permitted provided that the following conditions are met:
8138 + * * Redistributions of source code must retain the above copyright
8139 + * notice, this list of conditions and the following disclaimer.
8140 + * * Redistributions in binary form must reproduce the above copyright
8141 + * notice, this list of conditions and the following disclaimer in the
8142 + * documentation and/or other materials provided with the distribution.
8143 + * * Neither the name of Freescale Semiconductor nor the
8144 + * names of its contributors may be used to endorse or promote products
8145 + * derived from this software without specific prior written permission.
8148 + * ALTERNATIVELY, this software may be distributed under the terms of the
8149 + * GNU General Public License ("GPL") as published by the Free Software
8150 + * Foundation, either version 2 of that License or (at your option) any
8153 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
8154 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
8155 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
8156 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
8157 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
8158 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
8159 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
8160 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
8161 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
8162 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
8165 +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
8166 +#define pr_fmt(fmt) \
8167 + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
8168 + KBUILD_BASENAME".c", __LINE__, __func__
8170 +#define pr_fmt(fmt) \
8171 + KBUILD_MODNAME ": " fmt
8174 +#include <linux/init.h>
8175 +#include <linux/skbuff.h>
8176 +#include <linux/highmem.h>
8177 +#include <linux/fsl_bman.h>
8179 +#include "dpaa_eth.h"
8180 +#include "dpaa_eth_common.h"
8181 +#ifdef CONFIG_FSL_DPAA_1588
8182 +#include "dpaa_1588.h"
8184 +#ifdef CONFIG_FSL_DPAA_CEETM
8185 +#include "dpaa_eth_ceetm.h"
8188 +/* DMA map and add a page frag back into the bpool.
8189 + * @vaddr fragment must have been allocated with netdev_alloc_frag(),
8190 + * specifically for fitting into @dpa_bp.
8192 +static void dpa_bp_recycle_frag(struct dpa_bp *dpa_bp, unsigned long vaddr,
8195 + struct bm_buffer bmb;
8200 + addr = dma_map_single(dpa_bp->dev, (void *)vaddr, dpa_bp->size,
8201 + DMA_BIDIRECTIONAL);
8202 + if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
8203 + dev_err(dpa_bp->dev, "DMA mapping failed");
8207 + bm_buffer_set64(&bmb, addr);
8209 + while (bman_release(dpa_bp->pool, &bmb, 1, 0))
8215 +static int _dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp)
8217 + struct bm_buffer bmb[8];
8221 + struct device *dev = dpa_bp->dev;
8222 + struct sk_buff *skb, **skbh;
8224 + memset(bmb, 0, sizeof(struct bm_buffer) * 8);
8226 + for (i = 0; i < 8; i++) {
8227 + /* We'll prepend the skb back-pointer; can't use the DPA
8228 + * priv space, because FMan will overwrite it (from offset 0)
8229 + * if it ends up being the second, third, etc. fragment
8232 + * We only need enough space to store a pointer, but allocate
8233 + * an entire cacheline for performance reasons.
8236 + if (unlikely(dpaa_errata_a010022))
8237 + new_buf = page_address(alloc_page(GFP_ATOMIC));
8240 + new_buf = netdev_alloc_frag(SMP_CACHE_BYTES + DPA_BP_RAW_SIZE);
8242 + if (unlikely(!new_buf))
8243 + goto netdev_alloc_failed;
8244 + new_buf = PTR_ALIGN(new_buf + SMP_CACHE_BYTES, SMP_CACHE_BYTES);
8246 + skb = build_skb(new_buf, DPA_SKB_SIZE(dpa_bp->size) +
8247 + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
8248 + if (unlikely(!skb)) {
8249 + put_page(virt_to_head_page(new_buf));
8250 + goto build_skb_failed;
8252 + DPA_WRITE_SKB_PTR(skb, skbh, new_buf, -1);
8254 + addr = dma_map_single(dev, new_buf,
8255 + dpa_bp->size, DMA_BIDIRECTIONAL);
8256 + if (unlikely(dma_mapping_error(dev, addr)))
8257 + goto dma_map_failed;
8259 + bm_buffer_set64(&bmb[i], addr);
8263 + /* Release the buffers. In case bman is busy, keep trying
8264 + * until successful. bman_release() is guaranteed to succeed
8265 + * in a reasonable amount of time
8267 + while (unlikely(bman_release(dpa_bp->pool, bmb, i, 0)))
8275 +netdev_alloc_failed:
8276 + net_err_ratelimited("dpa_bp_add_8_bufs() failed\n");
8277 + WARN_ONCE(1, "Memory allocation failure on Rx\n");
8279 + bm_buffer_set64(&bmb[i], 0);
8280 + /* Avoid releasing a completely null buffer; bman_release() requires
8281 + * at least one buffer.
8284 + goto release_bufs;
8289 +/* Cold path wrapper over _dpa_bp_add_8_bufs(). */
8290 +static void dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp, int cpu)
8292 + int *count_ptr = per_cpu_ptr(dpa_bp->percpu_count, cpu);
8293 + *count_ptr += _dpa_bp_add_8_bufs(dpa_bp);
8296 +int dpa_bp_priv_seed(struct dpa_bp *dpa_bp)
8300 + /* Give each CPU an allotment of "config_count" buffers */
8301 + for_each_possible_cpu(i) {
8304 + /* Although we access another CPU's counters here
8305 + * we do it at boot time so it is safe
8307 + for (j = 0; j < dpa_bp->config_count; j += 8)
8308 + dpa_bp_add_8_bufs(dpa_bp, i);
8312 +EXPORT_SYMBOL(dpa_bp_priv_seed);
8314 +/* Add buffers/(pages) for Rx processing whenever bpool count falls below
8315 + * REFILL_THRESHOLD.
8317 +int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *countptr)
8319 + int count = *countptr;
8322 + if (unlikely(count < CONFIG_FSL_DPAA_ETH_REFILL_THRESHOLD)) {
8324 + new_bufs = _dpa_bp_add_8_bufs(dpa_bp);
8325 + if (unlikely(!new_bufs)) {
8326 + /* Avoid looping forever if we've temporarily
8327 + * run out of memory. We'll try again at the
8328 + * next NAPI cycle.
8332 + count += new_bufs;
8333 + } while (count < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT);
8335 + *countptr = count;
8336 + if (unlikely(count < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT))
8342 +EXPORT_SYMBOL(dpaa_eth_refill_bpools);
8344 +/* Cleanup function for outgoing frame descriptors that were built on Tx path,
8345 + * either contiguous frames or scatter/gather ones.
8346 + * Skb freeing is not handled here.
8348 + * This function may be called on error paths in the Tx function, so guard
8349 + * against cases when not all fd relevant fields were filled in.
8351 + * Return the skb backpointer, since for S/G frames the buffer containing it
8352 + * gets freed here.
8354 +struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,
8355 + const struct qm_fd *fd)
8357 + const struct qm_sg_entry *sgt;
8359 + struct dpa_bp *dpa_bp = priv->dpa_bp;
8360 + dma_addr_t addr = qm_fd_addr(fd);
8361 + dma_addr_t sg_addr;
8362 + struct sk_buff **skbh;
8363 + struct sk_buff *skb = NULL;
8364 + const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
8368 + /* retrieve skb back pointer */
8369 + DPA_READ_SKB_PTR(skb, skbh, phys_to_virt(addr), 0);
8371 + if (unlikely(fd->format == qm_fd_sg)) {
8372 + nr_frags = skb_shinfo(skb)->nr_frags;
8373 + dma_unmap_single(dpa_bp->dev, addr, dpa_fd_offset(fd) +
8374 + sizeof(struct qm_sg_entry) * (1 + nr_frags),
8377 + /* The sgt buffer has been allocated with netdev_alloc_frag(),
8378 + * it's from lowmem.
8380 + sgt = phys_to_virt(addr + dpa_fd_offset(fd));
8381 +#ifdef CONFIG_FSL_DPAA_1588
8382 + if (priv->tsu && priv->tsu->valid &&
8383 + priv->tsu->hwts_tx_en_ioctl)
8384 + dpa_ptp_store_txstamp(priv, skb, (void *)skbh);
8386 +#ifdef CONFIG_FSL_DPAA_TS
8387 + if (unlikely(priv->ts_tx_en &&
8388 + skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
8389 + struct skb_shared_hwtstamps shhwtstamps;
8391 + dpa_get_ts(priv, TX, &shhwtstamps, (void *)skbh);
8392 + skb_tstamp_tx(skb, &shhwtstamps);
8394 +#endif /* CONFIG_FSL_DPAA_TS */
8396 + /* sgt[0] is from lowmem, was dma_map_single()-ed */
8397 + sg_addr = qm_sg_addr(&sgt[0]);
8398 + sg_len = qm_sg_entry_get_len(&sgt[0]);
8399 + dma_unmap_single(dpa_bp->dev, sg_addr, sg_len, dma_dir);
8401 + /* remaining pages were mapped with dma_map_page() */
8402 + for (i = 1; i <= nr_frags; i++) {
8403 + DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
8404 + sg_addr = qm_sg_addr(&sgt[i]);
8405 + sg_len = qm_sg_entry_get_len(&sgt[i]);
8406 + dma_unmap_page(dpa_bp->dev, sg_addr, sg_len, dma_dir);
8409 + /* Free the page frag that we allocated on Tx */
8410 + put_page(virt_to_head_page(sgt));
8412 + dma_unmap_single(dpa_bp->dev, addr,
8413 + skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
8414 +#ifdef CONFIG_FSL_DPAA_TS
8415 + /* get the timestamp for non-SG frames */
8416 +#ifdef CONFIG_FSL_DPAA_1588
8417 + if (priv->tsu && priv->tsu->valid &&
8418 + priv->tsu->hwts_tx_en_ioctl)
8419 + dpa_ptp_store_txstamp(priv, skb, (void *)skbh);
8421 + if (unlikely(priv->ts_tx_en &&
8422 + skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
8423 + struct skb_shared_hwtstamps shhwtstamps;
8425 + dpa_get_ts(priv, TX, &shhwtstamps, (void *)skbh);
8426 + skb_tstamp_tx(skb, &shhwtstamps);
8433 +EXPORT_SYMBOL(_dpa_cleanup_tx_fd);
8435 +#ifndef CONFIG_FSL_DPAA_TS
8436 +bool dpa_skb_is_recyclable(struct sk_buff *skb)
8438 + /* No recycling possible if skb buffer is kmalloc'ed */
8439 + if (skb->head_frag == 0)
8442 + /* or if it's an userspace buffer */
8443 + if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
8446 + /* or if it's cloned or shared */
8447 + if (skb_shared(skb) || skb_cloned(skb) ||
8448 + skb->fclone != SKB_FCLONE_UNAVAILABLE)
8453 +EXPORT_SYMBOL(dpa_skb_is_recyclable);
8455 +bool dpa_buf_is_recyclable(struct sk_buff *skb,
8456 + uint32_t min_size,
8457 + uint16_t min_offset,
8458 + unsigned char **new_buf_start)
8460 + unsigned char *new;
8462 + /* In order to recycle a buffer, the following conditions must be met:
8463 + * - buffer size no less than the buffer pool size
8464 + * - buffer size no higher than an upper limit (to avoid moving too much
8465 + * system memory to the buffer pools)
8466 + * - buffer address aligned to cacheline bytes
8467 + * - offset of data from start of buffer no lower than a minimum value
8468 + * - offset of data from start of buffer no higher than a maximum value
8470 + new = min(skb_end_pointer(skb) - min_size, skb->data - min_offset);
8472 + /* left align to the nearest cacheline */
8473 + new = (unsigned char *)((unsigned long)new & ~(SMP_CACHE_BYTES - 1));
8475 + if (likely(new >= skb->head &&
8476 + new >= (skb->data - DPA_MAX_FD_OFFSET) &&
8477 + skb_end_pointer(skb) - new <= DPA_RECYCLE_MAX_SIZE)) {
8478 + *new_buf_start = new;
8484 +EXPORT_SYMBOL(dpa_buf_is_recyclable);
8487 +/* Build a linear skb around the received buffer.
8488 + * We are guaranteed there is enough room at the end of the data buffer to
8489 + * accommodate the shared info area of the skb.
8491 +static struct sk_buff *__hot contig_fd_to_skb(const struct dpa_priv_s *priv,
8492 + const struct qm_fd *fd, int *use_gro)
8494 + dma_addr_t addr = qm_fd_addr(fd);
8495 + ssize_t fd_off = dpa_fd_offset(fd);
8497 + const fm_prs_result_t *parse_results;
8498 + struct sk_buff *skb = NULL, **skbh;
8500 + vaddr = phys_to_virt(addr);
8501 + DPA_BUG_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
8503 + /* Retrieve the skb and adjust data and tail pointers, to make sure
8504 + * forwarded skbs will have enough space on Tx if extra headers
8507 + DPA_READ_SKB_PTR(skb, skbh, vaddr, -1);
8509 +#ifdef CONFIG_FSL_DPAA_ETH_JUMBO_FRAME
8510 + /* When using jumbo Rx buffers, we risk having frames dropped due to
8511 + * the socket backlog reaching its maximum allowed size.
8512 + * Use the frame length for the skb truesize instead of the buffer
8513 + * size, as this is the size of the data that actually gets copied to
8515 + * The stack may increase the payload. In this case, it will want to
8516 + * warn us that the frame length is larger than the truesize. We
8517 + * bypass the warning.
8520 + /* We do not support Jumbo frames on LS1043 and thus we edit
8521 + * the skb truesize only when the 4k errata is not present.
8523 + if (likely(!dpaa_errata_a010022))
8525 + skb->truesize = SKB_TRUESIZE(dpa_fd_length(fd));
8528 + DPA_BUG_ON(fd_off != priv->rx_headroom);
8529 + skb_reserve(skb, fd_off);
8530 + skb_put(skb, dpa_fd_length(fd));
8532 + /* Peek at the parse results for csum validation */
8533 + parse_results = (const fm_prs_result_t *)(vaddr +
8534 + DPA_RX_PRIV_DATA_SIZE);
8535 + _dpa_process_parse_results(parse_results, fd, skb, use_gro);
8537 +#ifdef CONFIG_FSL_DPAA_1588
8538 + if (priv->tsu && priv->tsu->valid && priv->tsu->hwts_rx_en_ioctl)
8539 + dpa_ptp_store_rxstamp(priv, skb, vaddr);
8541 +#ifdef CONFIG_FSL_DPAA_TS
8542 + if (priv->ts_rx_en)
8543 + dpa_get_ts(priv, RX, skb_hwtstamps(skb), vaddr);
8544 +#endif /* CONFIG_FSL_DPAA_TS */
8550 +/* Build an skb with the data of the first S/G entry in the linear portion and
8551 + * the rest of the frame as skb fragments.
8553 + * The page fragment holding the S/G Table is recycled here.
8555 +static struct sk_buff *__hot sg_fd_to_skb(const struct dpa_priv_s *priv,
8556 + const struct qm_fd *fd, int *use_gro,
8559 + const struct qm_sg_entry *sgt;
8560 + dma_addr_t addr = qm_fd_addr(fd);
8561 + ssize_t fd_off = dpa_fd_offset(fd);
8562 + dma_addr_t sg_addr;
8563 + void *vaddr, *sg_vaddr;
8564 + struct dpa_bp *dpa_bp;
8565 + struct page *page, *head_page;
8566 + int frag_offset, frag_len;
8569 + const fm_prs_result_t *parse_results;
8570 + struct sk_buff *skb = NULL, *skb_tmp, **skbh;
8572 + vaddr = phys_to_virt(addr);
8573 + DPA_BUG_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
8575 + dpa_bp = priv->dpa_bp;
8576 + /* Iterate through the SGT entries and add data buffers to the skb */
8577 + sgt = vaddr + fd_off;
8578 + for (i = 0; i < DPA_SGT_MAX_ENTRIES; i++) {
8579 + /* Extension bit is not supported */
8580 + DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
8582 + /* We use a single global Rx pool */
8583 + DPA_BUG_ON(dpa_bp !=
8584 + dpa_bpid2pool(qm_sg_entry_get_bpid(&sgt[i])));
8586 + sg_addr = qm_sg_addr(&sgt[i]);
8587 + sg_vaddr = phys_to_virt(sg_addr);
8588 + DPA_BUG_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
8589 + SMP_CACHE_BYTES));
8591 + dma_unmap_single(dpa_bp->dev, sg_addr, dpa_bp->size,
8592 + DMA_BIDIRECTIONAL);
8594 + DPA_READ_SKB_PTR(skb, skbh, sg_vaddr, -1);
8595 + DPA_BUG_ON(skb->head != sg_vaddr);
8596 +#ifdef CONFIG_FSL_DPAA_1588
8597 + if (priv->tsu && priv->tsu->valid &&
8598 + priv->tsu->hwts_rx_en_ioctl)
8599 + dpa_ptp_store_rxstamp(priv, skb, vaddr);
8601 +#ifdef CONFIG_FSL_DPAA_TS
8602 + if (priv->ts_rx_en)
8603 + dpa_get_ts(priv, RX, skb_hwtstamps(skb), vaddr);
8604 +#endif /* CONFIG_FSL_DPAA_TS */
8606 + /* In the case of a SG frame, FMan stores the Internal
8607 + * Context in the buffer containing the sgt.
8608 + * Inspect the parse results before anything else.
8610 + parse_results = (const fm_prs_result_t *)(vaddr +
8611 + DPA_RX_PRIV_DATA_SIZE);
8612 + _dpa_process_parse_results(parse_results, fd, skb,
8615 + /* Make sure forwarded skbs will have enough space
8616 + * on Tx, if extra headers are added.
8618 + DPA_BUG_ON(fd_off != priv->rx_headroom);
8619 + skb_reserve(skb, fd_off);
8620 + skb_put(skb, qm_sg_entry_get_len(&sgt[i]));
8622 + /* Not the first S/G entry; all data from buffer will
8623 + * be added in an skb fragment; fragment index is offset
8624 + * by one since first S/G entry was incorporated in the
8625 + * linear part of the skb.
8627 + * Caution: 'page' may be a tail page.
8629 + DPA_READ_SKB_PTR(skb_tmp, skbh, sg_vaddr, -1);
8630 + page = virt_to_page(sg_vaddr);
8631 + head_page = virt_to_head_page(sg_vaddr);
8633 + /* Free (only) the skbuff shell because its data buffer
8634 + * is already a frag in the main skb.
8636 + get_page(head_page);
8637 + dev_kfree_skb(skb_tmp);
8639 + /* Compute offset in (possibly tail) page */
8640 + page_offset = ((unsigned long)sg_vaddr &
8641 + (PAGE_SIZE - 1)) +
8642 + (page_address(page) - page_address(head_page));
8643 + /* page_offset only refers to the beginning of sgt[i];
8644 + * but the buffer itself may have an internal offset.
8646 + frag_offset = qm_sg_entry_get_offset(&sgt[i]) +
8648 + frag_len = qm_sg_entry_get_len(&sgt[i]);
8649 + /* skb_add_rx_frag() does no checking on the page; if
8650 + * we pass it a tail page, we'll end up with
8651 + * bad page accounting and eventually with segafults.
8653 + skb_add_rx_frag(skb, i - 1, head_page, frag_offset,
8654 + frag_len, dpa_bp->size);
8656 + /* Update the pool count for the current {cpu x bpool} */
8659 + if (qm_sg_entry_get_final(&sgt[i]))
8662 + WARN_ONCE(i == DPA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
8664 + /* recycle the SGT fragment */
8665 + DPA_BUG_ON(dpa_bp != dpa_bpid2pool(fd->bpid));
8666 + dpa_bp_recycle_frag(dpa_bp, (unsigned long)vaddr, count_ptr);
8670 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
8671 +static inline int dpa_skb_loop(const struct dpa_priv_s *priv,
8672 + struct sk_buff *skb)
8674 + if (unlikely(priv->loop_to < 0))
8675 + return 0; /* loop disabled by default */
8677 + skb_push(skb, ETH_HLEN); /* compensate for eth_type_trans */
8678 + dpa_tx(skb, dpa_loop_netdevs[priv->loop_to]);
8680 + return 1; /* Frame Tx on the selected interface */
8684 +void __hot _dpa_rx(struct net_device *net_dev,
8685 + struct qman_portal *portal,
8686 + const struct dpa_priv_s *priv,
8687 + struct dpa_percpu_priv_s *percpu_priv,
8688 + const struct qm_fd *fd,
8692 + struct dpa_bp *dpa_bp;
8693 + struct sk_buff *skb;
8694 + dma_addr_t addr = qm_fd_addr(fd);
8695 + u32 fd_status = fd->status;
8696 + unsigned int skb_len;
8697 + struct rtnl_link_stats64 *percpu_stats = &percpu_priv->stats;
8698 + int use_gro = net_dev->features & NETIF_F_GRO;
8700 + if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) {
8701 + if (netif_msg_hw(priv) && net_ratelimit())
8702 + netdev_warn(net_dev, "FD status = 0x%08x\n",
8703 + fd_status & FM_FD_STAT_RX_ERRORS);
8705 + percpu_stats->rx_errors++;
8706 + goto _release_frame;
8709 + dpa_bp = priv->dpa_bp;
8710 + DPA_BUG_ON(dpa_bp != dpa_bpid2pool(fd->bpid));
8712 + /* prefetch the first 64 bytes of the frame or the SGT start */
8713 + dma_unmap_single(dpa_bp->dev, addr, dpa_bp->size, DMA_BIDIRECTIONAL);
8714 + prefetch(phys_to_virt(addr) + dpa_fd_offset(fd));
8716 + /* The only FD types that we may receive are contig and S/G */
8717 + DPA_BUG_ON((fd->format != qm_fd_contig) && (fd->format != qm_fd_sg));
8719 + if (likely(fd->format == qm_fd_contig)) {
8720 +#ifdef CONFIG_FSL_DPAA_HOOKS
8721 + /* Execute the Rx processing hook, if it exists. */
8722 + if (dpaa_eth_hooks.rx_default &&
8723 + dpaa_eth_hooks.rx_default((void *)fd, net_dev,
8724 + fqid) == DPAA_ETH_STOLEN) {
8725 + /* won't count the rx bytes in */
8729 + skb = contig_fd_to_skb(priv, fd, &use_gro);
8731 + skb = sg_fd_to_skb(priv, fd, &use_gro, count_ptr);
8732 + percpu_priv->rx_sg++;
8735 + /* Account for either the contig buffer or the SGT buffer (depending on
8736 + * which case we were in) having been removed from the pool.
8739 + skb->protocol = eth_type_trans(skb, net_dev);
8741 + /* IP Reassembled frames are allowed to be larger than MTU */
8742 + if (unlikely(dpa_check_rx_mtu(skb, net_dev->mtu) &&
8743 + !(fd_status & FM_FD_IPR))) {
8744 + percpu_stats->rx_dropped++;
8745 + goto drop_bad_frame;
8748 + skb_len = skb->len;
8750 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
8751 + if (dpa_skb_loop(priv, skb)) {
8752 + percpu_stats->rx_packets++;
8753 + percpu_stats->rx_bytes += skb_len;
8759 + gro_result_t gro_result;
8760 + const struct qman_portal_config *pc =
8761 + qman_p_get_portal_config(portal);
8762 + struct dpa_napi_portal *np = &percpu_priv->np[pc->index];
8765 + gro_result = napi_gro_receive(&np->napi, skb);
8766 + /* If frame is dropped by the stack, rx_dropped counter is
8767 + * incremented automatically, so no need for us to update it
8769 + if (unlikely(gro_result == GRO_DROP))
8770 + goto packet_dropped;
8771 + } else if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
8772 + goto packet_dropped;
8774 + percpu_stats->rx_packets++;
8775 + percpu_stats->rx_bytes += skb_len;
8781 + dev_kfree_skb(skb);
8785 + dpa_fd_release(net_dev, fd);
8788 +int __hot skb_to_contig_fd(struct dpa_priv_s *priv,
8789 + struct sk_buff *skb, struct qm_fd *fd,
8790 + int *count_ptr, int *offset)
8792 + struct sk_buff **skbh;
8794 + struct dpa_bp *dpa_bp = priv->dpa_bp;
8795 + struct net_device *net_dev = priv->net_dev;
8797 + enum dma_data_direction dma_dir;
8798 + unsigned char *buffer_start;
8801 +#ifndef CONFIG_FSL_DPAA_TS
8802 + /* Check recycling conditions; only if timestamp support is not
8803 + * enabled, otherwise we need the fd back on tx confirmation
8806 + /* We can recycle the buffer if:
8807 + * - the pool is not full
8808 + * - the buffer meets the skb recycling conditions
8809 + * - the buffer meets our own (size, offset, align) conditions
8811 + if (likely((*count_ptr < dpa_bp->target_count) &&
8812 + dpa_skb_is_recyclable(skb) &&
8813 + dpa_buf_is_recyclable(skb, dpa_bp->size,
8814 + priv->tx_headroom, &buffer_start))) {
8815 + /* Buffer is recyclable; use the new start address
8816 + * and set fd parameters and DMA mapping direction
8818 + fd->bpid = dpa_bp->bpid;
8819 + DPA_BUG_ON(skb->data - buffer_start > DPA_MAX_FD_OFFSET);
8820 + fd->offset = (uint16_t)(skb->data - buffer_start);
8821 + dma_dir = DMA_BIDIRECTIONAL;
8822 + dma_map_size = dpa_bp->size;
8824 + DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, -1);
8825 + *offset = skb_headroom(skb) - fd->offset;
8829 + /* Not recyclable.
8830 + * We are guaranteed to have at least tx_headroom bytes
8831 + * available, so just use that for offset.
8834 + buffer_start = skb->data - priv->tx_headroom;
8835 + fd->offset = priv->tx_headroom;
8836 + dma_dir = DMA_TO_DEVICE;
8837 + dma_map_size = skb_tail_pointer(skb) - buffer_start;
8839 + /* The buffer will be Tx-confirmed, but the TxConf cb must
8840 + * necessarily look at our Tx private data to retrieve the
8841 + * skbuff. (In short: can't use DPA_WRITE_SKB_PTR() here.)
8843 + DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, 0);
8846 + /* Enable L3/L4 hardware checksum computation.
8848 + * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
8849 + * need to write into the skb.
8851 + err = dpa_enable_tx_csum(priv, skb, fd,
8852 + ((char *)skbh) + DPA_TX_PRIV_DATA_SIZE);
8853 + if (unlikely(err < 0)) {
8854 + if (netif_msg_tx_err(priv) && net_ratelimit())
8855 + netdev_err(net_dev, "HW csum error: %d\n", err);
8859 + /* Fill in the rest of the FD fields */
8860 + fd->format = qm_fd_contig;
8861 + fd->length20 = skb->len;
8862 + fd->cmd |= FM_FD_CMD_FCO;
8864 + /* Map the entire buffer size that may be seen by FMan, but no more */
8865 + addr = dma_map_single(dpa_bp->dev, skbh, dma_map_size, dma_dir);
8866 + if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
8867 + if (netif_msg_tx_err(priv) && net_ratelimit())
8868 + netdev_err(net_dev, "dma_map_single() failed\n");
8871 + qm_fd_addr_set64(fd, addr);
8875 +EXPORT_SYMBOL(skb_to_contig_fd);
8878 +struct sk_buff *split_skb_at_4k_boundaries(struct sk_buff *skb)
8880 + unsigned int length, nr_frags, moved_len = 0;
8882 + struct page *page;
8886 + /* make sure skb is not shared */
8887 + skb = skb_share_check(skb, GFP_ATOMIC);
8891 + nr_frags = skb_shinfo(skb)->nr_frags;
8892 + page_start = (u64)skb->data;
8894 + /* split the linear part at the first 4k boundary and create one (big)
8895 + * fragment with the rest
8897 + if (HAS_DMA_ISSUE(skb->data, skb_headlen(skb))) {
8898 + /* we'll add one more frag, make sure there's room */
8899 + if (nr_frags + 1 > DPA_SGT_MAX_ENTRIES)
8902 + /* next page boundary */
8903 + page_start = (page_start + 0x1000) & ~0xFFF;
8904 + page = virt_to_page(page_start);
8906 + /* move the rest of fragments to make room for a new one at j */
8907 + for (i = nr_frags - 1; i >= j; i--)
8908 + skb_shinfo(skb)->frags[i + 1] = skb_shinfo(skb)->frags[i];
8910 + /* move length bytes to a paged fragment at j */
8911 + length = min((u64)0x1000,
8912 + (u64)skb->data + skb_headlen(skb) - page_start);
8913 + skb->data_len += length;
8914 + moved_len += length;
8915 + skb_fill_page_desc(skb, j++, page, 0, length);
8917 + skb_shinfo(skb)->nr_frags = ++nr_frags;
8919 + /* adjust the tail pointer */
8920 + skb->tail -= moved_len;
8923 + /* split any paged fragment that crosses a 4K boundary */
8924 + while (j < nr_frags) {
8925 + frag = &skb_shinfo(skb)->frags[j];
8927 + /* if there is a 4K boundary between the fragment's offset and end */
8928 + if (HAS_DMA_ISSUE(frag->page_offset, frag->size)) {
8929 + /* we'll add one more frag, make sure there's room */
8930 + if (nr_frags + 1 > DPA_SGT_MAX_ENTRIES)
8933 + /* new page boundary */
8934 + page_start = (u64)page_address(skb_frag_page(frag)) +
8935 + frag->page_offset + 0x1000;
8936 + page_start = (u64)page_start & ~0xFFF;
8937 + page = virt_to_page(page_start);
8939 + /* move the rest of fragments to make room for a new one at j+1 */
8940 + for (i = nr_frags - 1; i > j; i--)
8941 + skb_shinfo(skb)->frags[i + 1] =
8942 + skb_shinfo(skb)->frags[i];
8944 + /* move length bytes to a new paged fragment at j+1 */
8945 + length = (u64)page_address(skb_frag_page(frag)) +
8946 + frag->page_offset + frag->size - page_start;
8947 + frag->size -= length;
8948 + skb_fill_page_desc(skb, j + 1, page, 0, length);
8950 + skb_shinfo(skb)->nr_frags = ++nr_frags;
8953 + /* move to next frag */
8961 +int __hot skb_to_sg_fd(struct dpa_priv_s *priv,
8962 + struct sk_buff *skb, struct qm_fd *fd)
8964 + struct dpa_bp *dpa_bp = priv->dpa_bp;
8966 + dma_addr_t sg_addr;
8967 + struct sk_buff **skbh;
8968 + struct net_device *net_dev = priv->net_dev;
8969 + int sg_len, sgt_size;
8972 + struct qm_sg_entry *sgt;
8977 + const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
8979 + nr_frags = skb_shinfo(skb)->nr_frags;
8980 + fd->format = qm_fd_sg;
8982 + sgt_size = sizeof(struct qm_sg_entry) * (1 + nr_frags);
8984 + /* Get a page frag to store the SGTable, or a full page if the errata
8985 + * is in place and we need to avoid crossing a 4k boundary.
8988 + if (unlikely(dpaa_errata_a010022))
8989 + sgt_buf = page_address(alloc_page(GFP_ATOMIC));
8992 + sgt_buf = netdev_alloc_frag(priv->tx_headroom + sgt_size);
8993 + if (unlikely(!sgt_buf)) {
8994 + dev_err(dpa_bp->dev, "netdev_alloc_frag() failed\n");
8998 + /* it seems that the memory allocator does not zero the allocated mem */
8999 + memset(sgt_buf, 0, priv->tx_headroom + sgt_size);
9001 + /* Enable L3/L4 hardware checksum computation.
9003 + * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
9004 + * need to write into the skb.
9006 + err = dpa_enable_tx_csum(priv, skb, fd,
9007 + sgt_buf + DPA_TX_PRIV_DATA_SIZE);
9008 + if (unlikely(err < 0)) {
9009 + if (netif_msg_tx_err(priv) && net_ratelimit())
9010 + netdev_err(net_dev, "HW csum error: %d\n", err);
9014 + /* Assign the data from skb->data to the first SG list entry */
9015 + sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
9016 + sg_len = skb_headlen(skb);
9017 + qm_sg_entry_set_bpid(&sgt[0], 0xff);
9018 + qm_sg_entry_set_offset(&sgt[0], 0);
9019 + qm_sg_entry_set_len(&sgt[0], sg_len);
9020 + qm_sg_entry_set_ext(&sgt[0], 0);
9021 + qm_sg_entry_set_final(&sgt[0], 0);
9023 + addr = dma_map_single(dpa_bp->dev, skb->data, sg_len, dma_dir);
9024 + if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
9025 + dev_err(dpa_bp->dev, "DMA mapping failed");
9027 + goto sg0_map_failed;
9030 + qm_sg_entry_set64(&sgt[0], addr);
9032 + /* populate the rest of SGT entries */
9033 + for (i = 1; i <= nr_frags; i++) {
9034 + frag = &skb_shinfo(skb)->frags[i - 1];
9035 + qm_sg_entry_set_bpid(&sgt[i], 0xff);
9036 + qm_sg_entry_set_offset(&sgt[i], 0);
9037 + qm_sg_entry_set_len(&sgt[i], frag->size);
9038 + qm_sg_entry_set_ext(&sgt[i], 0);
9040 + if (i == nr_frags)
9041 + qm_sg_entry_set_final(&sgt[i], 1);
9043 + qm_sg_entry_set_final(&sgt[i], 0);
9045 + DPA_BUG_ON(!skb_frag_page(frag));
9046 + addr = skb_frag_dma_map(dpa_bp->dev, frag, 0, frag->size,
9048 + if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
9049 + dev_err(dpa_bp->dev, "DMA mapping failed");
9051 + goto sg_map_failed;
9054 + /* keep the offset in the address */
9055 + qm_sg_entry_set64(&sgt[i], addr);
9058 + fd->length20 = skb->len;
9059 + fd->offset = priv->tx_headroom;
9061 + /* DMA map the SGT page */
9062 + DPA_WRITE_SKB_PTR(skb, skbh, sgt_buf, 0);
9063 + addr = dma_map_single(dpa_bp->dev, sgt_buf,
9064 + priv->tx_headroom + sgt_size,
9067 + if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
9068 + dev_err(dpa_bp->dev, "DMA mapping failed");
9070 + goto sgt_map_failed;
9073 + qm_fd_addr_set64(fd, addr);
9075 + fd->cmd |= FM_FD_CMD_FCO;
9081 + for (j = 0; j < i; j++) {
9082 + sg_addr = qm_sg_addr(&sgt[j]);
9083 + dma_unmap_page(dpa_bp->dev, sg_addr,
9084 + qm_sg_entry_get_len(&sgt[j]), dma_dir);
9088 + put_page(virt_to_head_page(sgt_buf));
9092 +EXPORT_SYMBOL(skb_to_sg_fd);
9094 +int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev)
9096 + struct dpa_priv_s *priv;
9097 + const int queue_mapping = dpa_get_queue_mapping(skb);
9098 + struct qman_fq *egress_fq, *conf_fq;
9100 +#ifdef CONFIG_FSL_DPAA_HOOKS
9101 + /* If there is a Tx hook, run it. */
9102 + if (dpaa_eth_hooks.tx &&
9103 + dpaa_eth_hooks.tx(skb, net_dev) == DPAA_ETH_STOLEN)
9104 + /* won't update any Tx stats */
9105 + return NETDEV_TX_OK;
9108 + priv = netdev_priv(net_dev);
9110 +#ifdef CONFIG_FSL_DPAA_CEETM
9111 + if (priv->ceetm_en)
9112 + return ceetm_tx(skb, net_dev);
9115 + egress_fq = priv->egress_fqs[queue_mapping];
9116 + conf_fq = priv->conf_fqs[queue_mapping];
9118 + return dpa_tx_extended(skb, net_dev, egress_fq, conf_fq);
9121 +int __hot dpa_tx_extended(struct sk_buff *skb, struct net_device *net_dev,
9122 + struct qman_fq *egress_fq, struct qman_fq *conf_fq)
9124 + struct dpa_priv_s *priv;
9126 + struct dpa_percpu_priv_s *percpu_priv;
9127 + struct rtnl_link_stats64 *percpu_stats;
9129 + const bool nonlinear = skb_is_nonlinear(skb);
9130 + int *countptr, offset = 0;
9132 + priv = netdev_priv(net_dev);
9133 + /* Non-migratable context, safe to use raw_cpu_ptr */
9134 + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
9135 + percpu_stats = &percpu_priv->stats;
9136 + countptr = raw_cpu_ptr(priv->dpa_bp->percpu_count);
9140 +#ifdef CONFIG_FSL_DPAA_1588
9141 + if (priv->tsu && priv->tsu->valid && priv->tsu->hwts_tx_en_ioctl)
9142 + fd.cmd |= FM_FD_CMD_UPD;
9144 +#ifdef CONFIG_FSL_DPAA_TS
9145 + if (unlikely(priv->ts_tx_en &&
9146 + skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
9147 + fd.cmd |= FM_FD_CMD_UPD;
9148 + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
9149 +#endif /* CONFIG_FSL_DPAA_TS */
9152 + if (unlikely(dpaa_errata_a010022)) {
9153 + skb = split_skb_at_4k_boundaries(skb);
9155 + goto skb_to_fd_failed;
9159 + /* MAX_SKB_FRAGS is larger than our DPA_SGT_MAX_ENTRIES; make sure
9160 + * we don't feed FMan with more fragments than it supports.
9161 + * Btw, we're using the first sgt entry to store the linear part of
9162 + * the skb, so we're one extra frag short.
9165 + likely(skb_shinfo(skb)->nr_frags < DPA_SGT_MAX_ENTRIES)) {
9166 + /* Just create a S/G fd based on the skb */
9167 + err = skb_to_sg_fd(priv, skb, &fd);
9168 + percpu_priv->tx_frag_skbuffs++;
9170 + /* Make sure we have enough headroom to accommodate private
9171 + * data, parse results, etc. Normally this shouldn't happen if
9172 + * we're here via the standard kernel stack.
9174 + if (unlikely(skb_headroom(skb) < priv->tx_headroom)) {
9175 + struct sk_buff *skb_new;
9177 + skb_new = skb_realloc_headroom(skb, priv->tx_headroom);
9178 + if (unlikely(!skb_new)) {
9179 + dev_kfree_skb(skb);
9180 + percpu_stats->tx_errors++;
9181 + return NETDEV_TX_OK;
9183 + dev_kfree_skb(skb);
9187 + /* We're going to store the skb backpointer at the beginning
9188 + * of the data buffer, so we need a privately owned skb
9191 + /* Code borrowed from skb_unshare(). */
9192 + if (skb_cloned(skb)) {
9193 + struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
9196 + /* skb_copy() has now linearized the skbuff. */
9197 + } else if (unlikely(nonlinear)) {
9198 + /* We are here because the egress skb contains
9199 + * more fragments than we support. In this case,
9200 + * we have no choice but to linearize it ourselves.
9202 + err = __skb_linearize(skb);
9204 + if (unlikely(!skb || err < 0))
9205 + /* Common out-of-memory error path */
9208 + err = skb_to_contig_fd(priv, skb, &fd, countptr, &offset);
9210 + if (unlikely(err < 0))
9211 + goto skb_to_fd_failed;
9213 + if (fd.bpid != 0xff) {
9215 + /* skb_recycle() reserves NET_SKB_PAD as skb headroom,
9216 + * but we need the skb to look as if returned by build_skb().
9217 + * We need to manually adjust the tailptr as well.
9219 + skb->data = skb->head + offset;
9220 + skb_reset_tail_pointer(skb);
9223 + percpu_priv->tx_returned++;
9226 + if (unlikely(dpa_xmit(priv, percpu_stats, &fd, egress_fq, conf_fq) < 0))
9229 + return NETDEV_TX_OK;
9232 + if (fd.bpid != 0xff) {
9234 + percpu_priv->tx_returned--;
9235 + dpa_fd_release(net_dev, &fd);
9236 + percpu_stats->tx_errors++;
9237 + return NETDEV_TX_OK;
9239 + _dpa_cleanup_tx_fd(priv, &fd);
9242 + percpu_stats->tx_errors++;
9243 + dev_kfree_skb(skb);
9244 + return NETDEV_TX_OK;
9246 +EXPORT_SYMBOL(dpa_tx_extended);
9248 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c
9250 +/* Copyright 2008-2012 Freescale Semiconductor Inc.
9252 + * Redistribution and use in source and binary forms, with or without
9253 + * modification, are permitted provided that the following conditions are met:
9254 + * * Redistributions of source code must retain the above copyright
9255 + * notice, this list of conditions and the following disclaimer.
9256 + * * Redistributions in binary form must reproduce the above copyright
9257 + * notice, this list of conditions and the following disclaimer in the
9258 + * documentation and/or other materials provided with the distribution.
9259 + * * Neither the name of Freescale Semiconductor nor the
9260 + * names of its contributors may be used to endorse or promote products
9261 + * derived from this software without specific prior written permission.
9264 + * ALTERNATIVELY, this software may be distributed under the terms of the
9265 + * GNU General Public License ("GPL") as published by the Free Software
9266 + * Foundation, either version 2 of that License or (at your option) any
9269 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
9270 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
9271 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
9272 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
9273 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
9274 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
9275 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
9276 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
9277 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
9278 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
9281 +#include <linux/init.h>
9282 +#include <linux/module.h>
9283 +#include <linux/kthread.h>
9284 +#include <linux/io.h>
9285 +#include <linux/of_net.h>
9286 +#include "dpaa_eth.h"
9287 +#include "mac.h" /* struct mac_device */
9288 +#ifdef CONFIG_FSL_DPAA_1588
9289 +#include "dpaa_1588.h"
9292 +static ssize_t dpaa_eth_show_addr(struct device *dev,
9293 + struct device_attribute *attr, char *buf)
9295 + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
9296 + struct mac_device *mac_dev = priv->mac_dev;
9299 + return sprintf(buf, "%llx",
9300 + (unsigned long long)mac_dev->res->start);
9302 + return sprintf(buf, "none");
9305 +static ssize_t dpaa_eth_show_type(struct device *dev,
9306 + struct device_attribute *attr, char *buf)
9308 + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
9312 + res = sprintf(buf, "%s", priv->if_type);
9317 +static ssize_t dpaa_eth_show_fqids(struct device *dev,
9318 + struct device_attribute *attr, char *buf)
9320 + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
9321 + ssize_t bytes = 0;
9324 + struct dpa_fq *fq;
9325 + struct dpa_fq *tmp;
9326 + struct dpa_fq *prev = NULL;
9327 + u32 first_fqid = 0;
9328 + u32 last_fqid = 0;
9329 + char *prevstr = NULL;
9331 + list_for_each_entry_safe(fq, tmp, &priv->dpa_fq_list, list) {
9332 + switch (fq->fq_type) {
9333 + case FQ_TYPE_RX_DEFAULT:
9334 + str = "Rx default";
9336 + case FQ_TYPE_RX_ERROR:
9339 + case FQ_TYPE_RX_PCD:
9342 + case FQ_TYPE_TX_CONFIRM:
9343 + str = "Tx default confirmation";
9345 + case FQ_TYPE_TX_CONF_MQ:
9346 + str = "Tx confirmation (mq)";
9348 + case FQ_TYPE_TX_ERROR:
9354 + case FQ_TYPE_RX_PCD_HI_PRIO:
9355 + str ="Rx PCD High Priority";
9361 + if (prev && (abs(fq->fqid - prev->fqid) != 1 ||
9362 + str != prevstr)) {
9363 + if (last_fqid == first_fqid)
9364 + bytes += sprintf(buf + bytes,
9365 + "%s: %d\n", prevstr, prev->fqid);
9367 + bytes += sprintf(buf + bytes,
9368 + "%s: %d - %d\n", prevstr,
9369 + first_fqid, last_fqid);
9372 + if (prev && abs(fq->fqid - prev->fqid) == 1 && str == prevstr)
9373 + last_fqid = fq->fqid;
9375 + first_fqid = last_fqid = fq->fqid;
9383 + if (last_fqid == first_fqid)
9384 + bytes += sprintf(buf + bytes, "%s: %d\n", prevstr,
9387 + bytes += sprintf(buf + bytes, "%s: %d - %d\n", prevstr,
9388 + first_fqid, last_fqid);
9394 +static ssize_t dpaa_eth_show_bpids(struct device *dev,
9395 + struct device_attribute *attr, char *buf)
9397 + ssize_t bytes = 0;
9398 + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
9399 + struct dpa_bp *dpa_bp = priv->dpa_bp;
9402 + for (i = 0; i < priv->bp_count; i++)
9403 + bytes += snprintf(buf + bytes, PAGE_SIZE, "%u\n",
9409 +static ssize_t dpaa_eth_show_mac_regs(struct device *dev,
9410 + struct device_attribute *attr, char *buf)
9412 + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
9413 + struct mac_device *mac_dev = priv->mac_dev;
9417 + n = fm_mac_dump_regs(mac_dev, buf, n);
9419 + return sprintf(buf, "no mac registers\n");
9424 +static ssize_t dpaa_eth_show_mac_rx_stats(struct device *dev,
9425 + struct device_attribute *attr, char *buf)
9427 + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
9428 + struct mac_device *mac_dev = priv->mac_dev;
9432 + n = fm_mac_dump_rx_stats(mac_dev, buf, n);
9434 + return sprintf(buf, "no mac rx stats\n");
9439 +static ssize_t dpaa_eth_show_mac_tx_stats(struct device *dev,
9440 + struct device_attribute *attr, char *buf)
9442 + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
9443 + struct mac_device *mac_dev = priv->mac_dev;
9447 + n = fm_mac_dump_tx_stats(mac_dev, buf, n);
9449 + return sprintf(buf, "no mac tx stats\n");
9454 +#ifdef CONFIG_FSL_DPAA_1588
9455 +static ssize_t dpaa_eth_show_ptp_1588(struct device *dev,
9456 + struct device_attribute *attr, char *buf)
9458 + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
9460 + if (priv->tsu && priv->tsu->valid)
9461 + return sprintf(buf, "1\n");
9463 + return sprintf(buf, "0\n");
9466 +static ssize_t dpaa_eth_set_ptp_1588(struct device *dev,
9467 + struct device_attribute *attr,
9468 + const char *buf, size_t count)
9470 + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
9472 + unsigned long flags;
9474 + if (kstrtouint(buf, 0, &num) < 0)
9477 + local_irq_save(flags);
9481 + priv->tsu->valid = TRUE;
9484 + priv->tsu->valid = FALSE;
9487 + local_irq_restore(flags);
9493 +static struct device_attribute dpaa_eth_attrs[] = {
9494 + __ATTR(device_addr, S_IRUGO, dpaa_eth_show_addr, NULL),
9495 + __ATTR(device_type, S_IRUGO, dpaa_eth_show_type, NULL),
9496 + __ATTR(fqids, S_IRUGO, dpaa_eth_show_fqids, NULL),
9497 + __ATTR(bpids, S_IRUGO, dpaa_eth_show_bpids, NULL),
9498 + __ATTR(mac_regs, S_IRUGO, dpaa_eth_show_mac_regs, NULL),
9499 + __ATTR(mac_rx_stats, S_IRUGO, dpaa_eth_show_mac_rx_stats, NULL),
9500 + __ATTR(mac_tx_stats, S_IRUGO, dpaa_eth_show_mac_tx_stats, NULL),
9501 +#ifdef CONFIG_FSL_DPAA_1588
9502 + __ATTR(ptp_1588, S_IRUGO | S_IWUSR, dpaa_eth_show_ptp_1588,
9503 + dpaa_eth_set_ptp_1588),
9507 +void dpaa_eth_sysfs_init(struct device *dev)
9511 + for (i = 0; i < ARRAY_SIZE(dpaa_eth_attrs); i++)
9512 + if (device_create_file(dev, &dpaa_eth_attrs[i])) {
9513 + dev_err(dev, "Error creating sysfs file\n");
9515 + device_remove_file(dev, &dpaa_eth_attrs[--i]);
9519 +EXPORT_SYMBOL(dpaa_eth_sysfs_init);
9521 +void dpaa_eth_sysfs_remove(struct device *dev)
9525 + for (i = 0; i < ARRAY_SIZE(dpaa_eth_attrs); i++)
9526 + device_remove_file(dev, &dpaa_eth_attrs[i]);
9529 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h
9531 +/* Copyright 2013 Freescale Semiconductor Inc.
9533 + * Redistribution and use in source and binary forms, with or without
9534 + * modification, are permitted provided that the following conditions are met:
9535 + * * Redistributions of source code must retain the above copyright
9536 + * notice, this list of conditions and the following disclaimer.
9537 + * * Redistributions in binary form must reproduce the above copyright
9538 + * notice, this list of conditions and the following disclaimer in the
9539 + * documentation and/or other materials provided with the distribution.
9540 + * * Neither the name of Freescale Semiconductor nor the
9541 + * names of its contributors may be used to endorse or promote products
9542 + * derived from this software without specific prior written permission.
9545 + * ALTERNATIVELY, this software may be distributed under the terms of the
9546 + * GNU General Public License ("GPL") as published by the Free Software
9547 + * Foundation, either version 2 of that License or (at your option) any
9550 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
9551 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
9552 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
9553 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
9554 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
9555 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
9556 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
9557 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
9558 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
9559 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
9562 +#undef TRACE_SYSTEM
9563 +#define TRACE_SYSTEM dpaa_eth
9565 +#if !defined(_DPAA_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
9566 +#define _DPAA_ETH_TRACE_H
9568 +#include <linux/skbuff.h>
9569 +#include <linux/netdevice.h>
9570 +#include "dpaa_eth.h"
9571 +#include <linux/tracepoint.h>
9573 +#define fd_format_name(format) { qm_fd_##format, #format }
9574 +#define fd_format_list \
9575 + fd_format_name(contig), \
9576 + fd_format_name(sg)
9577 +#define TR_FMT "[%s] fqid=%d, fd: addr=0x%llx, format=%s, off=%u, len=%u," \
9580 +/* This is used to declare a class of events.
9581 + * individual events of this type will be defined below.
9584 +/* Store details about a frame descriptor and the FQ on which it was
9585 + * transmitted/received.
9587 +DECLARE_EVENT_CLASS(dpaa_eth_fd,
9588 + /* Trace function prototype */
9589 + TP_PROTO(struct net_device *netdev,
9590 + struct qman_fq *fq,
9591 + const struct qm_fd *fd),
9593 + /* Repeat argument list here */
9594 + TP_ARGS(netdev, fq, fd),
9596 + /* A structure containing the relevant information we want to record.
9597 + * Declare name and type for each normal element, name, type and size
9598 + * for arrays. Use __string for variable length strings.
9601 + __field(u32, fqid)
9602 + __field(u64, fd_addr)
9603 + __field(u8, fd_format)
9604 + __field(u16, fd_offset)
9605 + __field(u32, fd_length)
9606 + __field(u32, fd_status)
9607 + __string(name, netdev->name)
9610 + /* The function that assigns values to the above declared fields */
9612 + __entry->fqid = fq->fqid;
9613 + __entry->fd_addr = qm_fd_addr_get64(fd);
9614 + __entry->fd_format = fd->format;
9615 + __entry->fd_offset = dpa_fd_offset(fd);
9616 + __entry->fd_length = dpa_fd_length(fd);
9617 + __entry->fd_status = fd->status;
9618 + __assign_str(name, netdev->name);
9621 + /* This is what gets printed when the trace event is triggered */
9622 + /* TODO: print the status using __print_flags() */
9624 + __get_str(name), __entry->fqid, __entry->fd_addr,
9625 + __print_symbolic(__entry->fd_format, fd_format_list),
9626 + __entry->fd_offset, __entry->fd_length, __entry->fd_status)
9629 +/* Now declare events of the above type. Format is:
9630 + * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class
9633 +/* Tx (egress) fd */
9634 +DEFINE_EVENT(dpaa_eth_fd, dpa_tx_fd,
9636 + TP_PROTO(struct net_device *netdev,
9637 + struct qman_fq *fq,
9638 + const struct qm_fd *fd),
9640 + TP_ARGS(netdev, fq, fd)
9644 +DEFINE_EVENT(dpaa_eth_fd, dpa_rx_fd,
9646 + TP_PROTO(struct net_device *netdev,
9647 + struct qman_fq *fq,
9648 + const struct qm_fd *fd),
9650 + TP_ARGS(netdev, fq, fd)
9653 +/* Tx confirmation fd */
9654 +DEFINE_EVENT(dpaa_eth_fd, dpa_tx_conf_fd,
9656 + TP_PROTO(struct net_device *netdev,
9657 + struct qman_fq *fq,
9658 + const struct qm_fd *fd),
9660 + TP_ARGS(netdev, fq, fd)
9663 +/* If only one event of a certain type needs to be declared, use TRACE_EVENT().
9664 + * The syntax is the same as for DECLARE_EVENT_CLASS().
9667 +#endif /* _DPAA_ETH_TRACE_H */
9669 +/* This must be outside ifdef _DPAA_ETH_TRACE_H */
9670 +#undef TRACE_INCLUDE_PATH
9671 +#define TRACE_INCLUDE_PATH .
9672 +#undef TRACE_INCLUDE_FILE
9673 +#define TRACE_INCLUDE_FILE dpaa_eth_trace
9674 +#include <trace/define_trace.h>
9676 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c
9678 +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
9680 + * Redistribution and use in source and binary forms, with or without
9681 + * modification, are permitted provided that the following conditions are met:
9682 + * * Redistributions of source code must retain the above copyright
9683 + * notice, this list of conditions and the following disclaimer.
9684 + * * Redistributions in binary form must reproduce the above copyright
9685 + * notice, this list of conditions and the following disclaimer in the
9686 + * documentation and/or other materials provided with the distribution.
9687 + * * Neither the name of Freescale Semiconductor nor the
9688 + * names of its contributors may be used to endorse or promote products
9689 + * derived from this software without specific prior written permission.
9692 + * ALTERNATIVELY, this software may be distributed under the terms of the
9693 + * GNU General Public License ("GPL") as published by the Free Software
9694 + * Foundation, either version 2 of that License or (at your option) any
9697 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
9698 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
9699 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
9700 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
9701 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
9702 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
9703 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
9704 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
9705 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
9706 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
9709 +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
9710 +#define pr_fmt(fmt) \
9711 + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
9712 + KBUILD_BASENAME".c", __LINE__, __func__
9714 +#define pr_fmt(fmt) \
9715 + KBUILD_MODNAME ": " fmt
9718 +#include <linux/string.h>
9720 +#include "dpaa_eth.h"
9721 +#include "mac.h" /* struct mac_device */
9722 +#include "dpaa_eth_common.h"
9724 +static const char dpa_stats_percpu[][ETH_GSTRING_LEN] = {
9737 +static char dpa_stats_global[][ETH_GSTRING_LEN] = {
9738 + /* dpa rx errors */
9740 + "rx frame physical error",
9741 + "rx frame size error",
9742 + "rx header error",
9745 + /* demultiplexing errors */
9748 + "qman error cond",
9749 + "qman early window",
9750 + "qman late window",
9752 + "qman fq retired",
9753 + "qman orp disabled",
9755 + /* congestion related stats */
9756 + "congestion time (ms)",
9757 + "entered congestion",
9761 +#define DPA_STATS_PERCPU_LEN ARRAY_SIZE(dpa_stats_percpu)
9762 +#define DPA_STATS_GLOBAL_LEN ARRAY_SIZE(dpa_stats_global)
9764 +static int __cold dpa_get_settings(struct net_device *net_dev,
9765 + struct ethtool_cmd *et_cmd)
9768 + struct dpa_priv_s *priv;
9770 + priv = netdev_priv(net_dev);
9772 + if (priv->mac_dev == NULL) {
9773 + netdev_info(net_dev, "This is a MAC-less interface\n");
9776 + if (unlikely(priv->mac_dev->phy_dev == NULL)) {
9777 + netdev_dbg(net_dev, "phy device not initialized\n");
9781 + _errno = phy_ethtool_gset(priv->mac_dev->phy_dev, et_cmd);
9782 + if (unlikely(_errno < 0))
9783 + netdev_err(net_dev, "phy_ethtool_gset() = %d\n", _errno);
9788 +static int __cold dpa_set_settings(struct net_device *net_dev,
9789 + struct ethtool_cmd *et_cmd)
9792 + struct dpa_priv_s *priv;
9794 + priv = netdev_priv(net_dev);
9796 + if (priv->mac_dev == NULL) {
9797 + netdev_info(net_dev, "This is a MAC-less interface\n");
9800 + if (unlikely(priv->mac_dev->phy_dev == NULL)) {
9801 + netdev_err(net_dev, "phy device not initialized\n");
9805 + _errno = phy_ethtool_sset(priv->mac_dev->phy_dev, et_cmd);
9806 + if (unlikely(_errno < 0))
9807 + netdev_err(net_dev, "phy_ethtool_sset() = %d\n", _errno);
9812 +static void __cold dpa_get_drvinfo(struct net_device *net_dev,
9813 + struct ethtool_drvinfo *drvinfo)
9817 + strncpy(drvinfo->driver, KBUILD_MODNAME,
9818 + sizeof(drvinfo->driver) - 1)[sizeof(drvinfo->driver)-1] = 0;
9819 + _errno = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
9822 + if (unlikely(_errno >= sizeof(drvinfo->fw_version))) {
9823 + /* Truncated output */
9824 + netdev_notice(net_dev, "snprintf() = %d\n", _errno);
9825 + } else if (unlikely(_errno < 0)) {
9826 + netdev_warn(net_dev, "snprintf() = %d\n", _errno);
9827 + memset(drvinfo->fw_version, 0, sizeof(drvinfo->fw_version));
9829 + strncpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
9830 + sizeof(drvinfo->bus_info)-1)[sizeof(drvinfo->bus_info)-1] = 0;
9833 +static uint32_t __cold dpa_get_msglevel(struct net_device *net_dev)
9835 + return ((struct dpa_priv_s *)netdev_priv(net_dev))->msg_enable;
9838 +static void __cold dpa_set_msglevel(struct net_device *net_dev,
9839 + uint32_t msg_enable)
9841 + ((struct dpa_priv_s *)netdev_priv(net_dev))->msg_enable = msg_enable;
9844 +static int __cold dpa_nway_reset(struct net_device *net_dev)
9847 + struct dpa_priv_s *priv;
9849 + priv = netdev_priv(net_dev);
9851 + if (priv->mac_dev == NULL) {
9852 + netdev_info(net_dev, "This is a MAC-less interface\n");
9855 + if (unlikely(priv->mac_dev->phy_dev == NULL)) {
9856 + netdev_err(net_dev, "phy device not initialized\n");
9861 + if (priv->mac_dev->phy_dev->autoneg) {
9862 + _errno = phy_start_aneg(priv->mac_dev->phy_dev);
9863 + if (unlikely(_errno < 0))
9864 + netdev_err(net_dev, "phy_start_aneg() = %d\n",
9871 +static void __cold dpa_get_pauseparam(struct net_device *net_dev,
9872 + struct ethtool_pauseparam *epause)
9874 + struct dpa_priv_s *priv;
9875 + struct mac_device *mac_dev;
9876 + struct phy_device *phy_dev;
9878 + priv = netdev_priv(net_dev);
9879 + mac_dev = priv->mac_dev;
9881 + if (mac_dev == NULL) {
9882 + netdev_info(net_dev, "This is a MAC-less interface\n");
9886 + phy_dev = mac_dev->phy_dev;
9887 + if (unlikely(phy_dev == NULL)) {
9888 + netdev_err(net_dev, "phy device not initialized\n");
9892 + epause->autoneg = mac_dev->autoneg_pause;
9893 + epause->rx_pause = mac_dev->rx_pause_active;
9894 + epause->tx_pause = mac_dev->tx_pause_active;
9897 +static int __cold dpa_set_pauseparam(struct net_device *net_dev,
9898 + struct ethtool_pauseparam *epause)
9900 + struct dpa_priv_s *priv;
9901 + struct mac_device *mac_dev;
9902 + struct phy_device *phy_dev;
9904 + u32 newadv, oldadv;
9905 + bool rx_pause, tx_pause;
9907 + priv = netdev_priv(net_dev);
9908 + mac_dev = priv->mac_dev;
9910 + if (mac_dev == NULL) {
9911 + netdev_info(net_dev, "This is a MAC-less interface\n");
9915 + phy_dev = mac_dev->phy_dev;
9916 + if (unlikely(phy_dev == NULL)) {
9917 + netdev_err(net_dev, "phy device not initialized\n");
9921 + if (!(phy_dev->supported & SUPPORTED_Pause) ||
9922 + (!(phy_dev->supported & SUPPORTED_Asym_Pause) &&
9923 + (epause->rx_pause != epause->tx_pause)))
9926 + /* The MAC should know how to handle PAUSE frame autonegotiation before
9927 + * adjust_link is triggered by a forced renegotiation of sym/asym PAUSE
9930 + mac_dev->autoneg_pause = !!epause->autoneg;
9931 + mac_dev->rx_pause_req = !!epause->rx_pause;
9932 + mac_dev->tx_pause_req = !!epause->tx_pause;
9934 + /* Determine the sym/asym advertised PAUSE capabilities from the desired
9935 + * rx/tx pause settings.
9938 + if (epause->rx_pause)
9939 + newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause;
9940 + if (epause->tx_pause)
9941 + newadv |= ADVERTISED_Asym_Pause;
9943 + oldadv = phy_dev->advertising &
9944 + (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
9946 + /* If there are differences between the old and the new advertised
9947 + * values, restart PHY autonegotiation and advertise the new values.
9949 + if (oldadv != newadv) {
9950 + phy_dev->advertising &= ~(ADVERTISED_Pause
9951 + | ADVERTISED_Asym_Pause);
9952 + phy_dev->advertising |= newadv;
9953 + if (phy_dev->autoneg) {
9954 + _errno = phy_start_aneg(phy_dev);
9955 + if (unlikely(_errno < 0))
9956 + netdev_err(net_dev, "phy_start_aneg() = %d\n",
9961 + get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
9962 + _errno = set_mac_active_pause(mac_dev, rx_pause, tx_pause);
9963 + if (unlikely(_errno < 0))
9964 + netdev_err(net_dev, "set_mac_active_pause() = %d\n", _errno);
9970 +static void dpa_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
9972 + struct dpa_priv_s *priv = netdev_priv(net_dev);
9974 + wol->supported = 0;
9977 + if (!priv->wol || !device_can_wakeup(net_dev->dev.parent))
9980 + if (priv->wol & DPAA_WOL_MAGIC) {
9981 + wol->supported = WAKE_MAGIC;
9982 + wol->wolopts = WAKE_MAGIC;
9986 +static int dpa_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
9988 + struct dpa_priv_s *priv = netdev_priv(net_dev);
9990 + if (priv->mac_dev == NULL) {
9991 + netdev_info(net_dev, "This is a MAC-less interface\n");
9995 + if (unlikely(priv->mac_dev->phy_dev == NULL)) {
9996 + netdev_dbg(net_dev, "phy device not initialized\n");
10000 + if (!device_can_wakeup(net_dev->dev.parent) ||
10001 + (wol->wolopts & ~WAKE_MAGIC))
10002 + return -EOPNOTSUPP;
10006 + if (wol->wolopts & WAKE_MAGIC) {
10007 + priv->wol = DPAA_WOL_MAGIC;
10008 + device_set_wakeup_enable(net_dev->dev.parent, 1);
10010 + device_set_wakeup_enable(net_dev->dev.parent, 0);
10017 +static int dpa_get_eee(struct net_device *net_dev, struct ethtool_eee *et_eee)
10019 + struct dpa_priv_s *priv;
10021 + priv = netdev_priv(net_dev);
10022 + if (priv->mac_dev == NULL) {
10023 + netdev_info(net_dev, "This is a MAC-less interface\n");
10027 + if (unlikely(priv->mac_dev->phy_dev == NULL)) {
10028 + netdev_err(net_dev, "phy device not initialized\n");
10032 + return phy_ethtool_get_eee(priv->mac_dev->phy_dev, et_eee);
10035 +static int dpa_set_eee(struct net_device *net_dev, struct ethtool_eee *et_eee)
10037 + struct dpa_priv_s *priv;
10039 + priv = netdev_priv(net_dev);
10040 + if (priv->mac_dev == NULL) {
10041 + netdev_info(net_dev, "This is a MAC-less interface\n");
10045 + if (unlikely(priv->mac_dev->phy_dev == NULL)) {
10046 + netdev_err(net_dev, "phy device not initialized\n");
10050 + return phy_ethtool_set_eee(priv->mac_dev->phy_dev, et_eee);
10053 +static int dpa_get_sset_count(struct net_device *net_dev, int type)
10055 + unsigned int total_stats, num_stats;
10057 + num_stats = num_online_cpus() + 1;
10058 + total_stats = num_stats * DPA_STATS_PERCPU_LEN + DPA_STATS_GLOBAL_LEN;
10061 + case ETH_SS_STATS:
10062 + return total_stats;
10064 + return -EOPNOTSUPP;
10068 +static void copy_stats(struct dpa_percpu_priv_s *percpu_priv, int num_cpus,
10069 + int crr_cpu, u64 bp_count, u64 *data)
10071 + int num_stat_values = num_cpus + 1;
10072 + int crr_stat = 0;
10074 + /* update current CPU's stats and also add them to the total values */
10075 + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->in_interrupt;
10076 + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->in_interrupt;
10078 + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.rx_packets;
10079 + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.rx_packets;
10081 + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.tx_packets;
10082 + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.tx_packets;
10084 + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->tx_returned;
10085 + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->tx_returned;
10087 + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->tx_confirm;
10088 + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->tx_confirm;
10090 + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->tx_frag_skbuffs;
10091 + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->tx_frag_skbuffs;
10093 + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->rx_sg;
10094 + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->rx_sg;
10096 + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.tx_errors;
10097 + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.tx_errors;
10099 + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.rx_errors;
10100 + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.rx_errors;
10102 + data[crr_stat * num_stat_values + crr_cpu] = bp_count;
10103 + data[crr_stat++ * num_stat_values + num_cpus] += bp_count;
10106 +static void dpa_get_ethtool_stats(struct net_device *net_dev,
10107 + struct ethtool_stats *stats, u64 *data)
10109 + u64 bp_count, cg_time, cg_num, cg_status;
10110 + struct dpa_percpu_priv_s *percpu_priv;
10111 + struct qm_mcr_querycgr query_cgr;
10112 + struct dpa_rx_errors rx_errors;
10113 + struct dpa_ern_cnt ern_cnt;
10114 + struct dpa_priv_s *priv;
10115 + unsigned int num_cpus, offset;
10116 + struct dpa_bp *dpa_bp;
10117 + int total_stats, i;
10119 + total_stats = dpa_get_sset_count(net_dev, ETH_SS_STATS);
10120 + priv = netdev_priv(net_dev);
10121 + dpa_bp = priv->dpa_bp;
10122 + num_cpus = num_online_cpus();
10125 + memset(&rx_errors, 0, sizeof(struct dpa_rx_errors));
10126 + memset(&ern_cnt, 0, sizeof(struct dpa_ern_cnt));
10127 + memset(data, 0, total_stats * sizeof(u64));
10129 + for_each_online_cpu(i) {
10130 + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
10132 + if (dpa_bp->percpu_count)
10133 + bp_count = *(per_cpu_ptr(dpa_bp->percpu_count, i));
10135 + rx_errors.dme += percpu_priv->rx_errors.dme;
10136 + rx_errors.fpe += percpu_priv->rx_errors.fpe;
10137 + rx_errors.fse += percpu_priv->rx_errors.fse;
10138 + rx_errors.phe += percpu_priv->rx_errors.phe;
10139 + rx_errors.cse += percpu_priv->rx_errors.cse;
10141 + ern_cnt.cg_tdrop += percpu_priv->ern_cnt.cg_tdrop;
10142 + ern_cnt.wred += percpu_priv->ern_cnt.wred;
10143 + ern_cnt.err_cond += percpu_priv->ern_cnt.err_cond;
10144 + ern_cnt.early_window += percpu_priv->ern_cnt.early_window;
10145 + ern_cnt.late_window += percpu_priv->ern_cnt.late_window;
10146 + ern_cnt.fq_tdrop += percpu_priv->ern_cnt.fq_tdrop;
10147 + ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired;
10148 + ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero;
10150 + copy_stats(percpu_priv, num_cpus, i, bp_count, data);
10153 + offset = (num_cpus + 1) * DPA_STATS_PERCPU_LEN;
10154 + memcpy(data + offset, &rx_errors, sizeof(struct dpa_rx_errors));
10156 + offset += sizeof(struct dpa_rx_errors) / sizeof(u64);
10157 + memcpy(data + offset, &ern_cnt, sizeof(struct dpa_ern_cnt));
10159 + /* gather congestion related counters */
10162 + cg_time = jiffies_to_msecs(priv->cgr_data.congested_jiffies);
10163 + if (qman_query_cgr(&priv->cgr_data.cgr, &query_cgr) == 0) {
10164 + cg_num = priv->cgr_data.cgr_congested_count;
10165 + cg_status = query_cgr.cgr.cs;
10167 + /* reset congestion stats (like QMan API does */
10168 + priv->cgr_data.congested_jiffies = 0;
10169 + priv->cgr_data.cgr_congested_count = 0;
10172 + offset += sizeof(struct dpa_ern_cnt) / sizeof(u64);
10173 + data[offset++] = cg_time;
10174 + data[offset++] = cg_num;
10175 + data[offset++] = cg_status;
10178 +static void dpa_get_strings(struct net_device *net_dev, u32 stringset, u8 *data)
10180 + unsigned int i, j, num_cpus, size;
10181 + char stat_string_cpu[ETH_GSTRING_LEN];
10185 + num_cpus = num_online_cpus();
10186 + size = DPA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN;
10188 + for (i = 0; i < DPA_STATS_PERCPU_LEN; i++) {
10189 + for (j = 0; j < num_cpus; j++) {
10190 + snprintf(stat_string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]", dpa_stats_percpu[i], j);
10191 + memcpy(strings, stat_string_cpu, ETH_GSTRING_LEN);
10192 + strings += ETH_GSTRING_LEN;
10194 + snprintf(stat_string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]", dpa_stats_percpu[i]);
10195 + memcpy(strings, stat_string_cpu, ETH_GSTRING_LEN);
10196 + strings += ETH_GSTRING_LEN;
10198 + memcpy(strings, dpa_stats_global, size);
10201 +const struct ethtool_ops dpa_ethtool_ops = {
10202 + .get_settings = dpa_get_settings,
10203 + .set_settings = dpa_set_settings,
10204 + .get_drvinfo = dpa_get_drvinfo,
10205 + .get_msglevel = dpa_get_msglevel,
10206 + .set_msglevel = dpa_set_msglevel,
10207 + .nway_reset = dpa_nway_reset,
10208 + .get_pauseparam = dpa_get_pauseparam,
10209 + .set_pauseparam = dpa_set_pauseparam,
10210 + .self_test = NULL, /* TODO invoke the cold-boot unit-test? */
10211 + .get_link = ethtool_op_get_link,
10212 + .get_eee = dpa_get_eee,
10213 + .set_eee = dpa_set_eee,
10214 + .get_sset_count = dpa_get_sset_count,
10215 + .get_ethtool_stats = dpa_get_ethtool_stats,
10216 + .get_strings = dpa_get_strings,
10218 + .get_wol = dpa_get_wol,
10219 + .set_wol = dpa_set_wol,
10223 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ptp.c
10226 + * DPAA Ethernet Driver -- PTP 1588 clock using the dTSEC
10228 + * Author: Yangbo Lu <yangbo.lu@freescale.com>
10230 + * Copyright 2014 Freescale Semiconductor, Inc.
10232 + * This program is free software; you can redistribute it and/or modify it
10233 + * under the terms of the GNU General Public License as published by the
10234 + * Free Software Foundation; either version 2 of the License, or (at your
10235 + * option) any later version.
10238 +#include <linux/device.h>
10239 +#include <linux/hrtimer.h>
10240 +#include <linux/init.h>
10241 +#include <linux/interrupt.h>
10242 +#include <linux/kernel.h>
10243 +#include <linux/module.h>
10244 +#include <linux/of.h>
10245 +#include <linux/of_platform.h>
10246 +#include <linux/timex.h>
10247 +#include <linux/io.h>
10249 +#include <linux/ptp_clock_kernel.h>
10251 +#include "dpaa_eth.h"
10254 +struct ptp_clock *clock;
10256 +static struct mac_device *mac_dev;
10257 +static u32 freqCompensation;
10259 +/* Bit definitions for the TMR_CTRL register */
10260 +#define ALM1P (1<<31) /* Alarm1 output polarity */
10261 +#define ALM2P (1<<30) /* Alarm2 output polarity */
10262 +#define FS (1<<28) /* FIPER start indication */
10263 +#define PP1L (1<<27) /* Fiper1 pulse loopback mode enabled. */
10264 +#define PP2L (1<<26) /* Fiper2 pulse loopback mode enabled. */
10265 +#define TCLK_PERIOD_SHIFT (16) /* 1588 timer reference clock period. */
10266 +#define TCLK_PERIOD_MASK (0x3ff)
10267 +#define RTPE (1<<15) /* Record Tx Timestamp to PAL Enable. */
10268 +#define FRD (1<<14) /* FIPER Realignment Disable */
10269 +#define ESFDP (1<<11) /* External Tx/Rx SFD Polarity. */
10270 +#define ESFDE (1<<10) /* External Tx/Rx SFD Enable. */
10271 +#define ETEP2 (1<<9) /* External trigger 2 edge polarity */
10272 +#define ETEP1 (1<<8) /* External trigger 1 edge polarity */
10273 +#define COPH (1<<7) /* Generated clock output phase. */
10274 +#define CIPH (1<<6) /* External oscillator input clock phase */
10275 +#define TMSR (1<<5) /* Timer soft reset. */
10276 +#define BYP (1<<3) /* Bypass drift compensated clock */
10277 +#define TE (1<<2) /* 1588 timer enable. */
10278 +#define CKSEL_SHIFT (0) /* 1588 Timer reference clock source */
10279 +#define CKSEL_MASK (0x3)
10281 +/* Bit definitions for the TMR_TEVENT register */
10282 +#define ETS2 (1<<25) /* External trigger 2 timestamp sampled */
10283 +#define ETS1 (1<<24) /* External trigger 1 timestamp sampled */
10284 +#define ALM2 (1<<17) /* Current time = alarm time register 2 */
10285 +#define ALM1 (1<<16) /* Current time = alarm time register 1 */
10286 +#define PP1 (1<<7) /* periodic pulse generated on FIPER1 */
10287 +#define PP2 (1<<6) /* periodic pulse generated on FIPER2 */
10288 +#define PP3 (1<<5) /* periodic pulse generated on FIPER3 */
10290 +/* Bit definitions for the TMR_TEMASK register */
10291 +#define ETS2EN (1<<25) /* External trigger 2 timestamp enable */
10292 +#define ETS1EN (1<<24) /* External trigger 1 timestamp enable */
10293 +#define ALM2EN (1<<17) /* Timer ALM2 event enable */
10294 +#define ALM1EN (1<<16) /* Timer ALM1 event enable */
10295 +#define PP1EN (1<<7) /* Periodic pulse event 1 enable */
10296 +#define PP2EN (1<<6) /* Periodic pulse event 2 enable */
10298 +/* Bit definitions for the TMR_PEVENT register */
10299 +#define TXP2 (1<<9) /* PTP transmitted timestamp im TXTS2 */
10300 +#define TXP1 (1<<8) /* PTP transmitted timestamp in TXTS1 */
10301 +#define RXP (1<<0) /* PTP frame has been received */
10303 +/* Bit definitions for the TMR_PEMASK register */
10304 +#define TXP2EN (1<<9) /* Transmit PTP packet event 2 enable */
10305 +#define TXP1EN (1<<8) /* Transmit PTP packet event 1 enable */
10306 +#define RXPEN (1<<0) /* Receive PTP packet event enable */
10308 +/* Bit definitions for the TMR_STAT register */
10309 +#define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */
10310 +#define STAT_VEC_MASK (0x3f)
10312 +/* Bit definitions for the TMR_PRSC register */
10313 +#define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */
10314 +#define PRSC_OCK_MASK (0xffff)
10317 +#define N_EXT_TS 2
10319 +static void set_alarm(void)
10323 + if (mac_dev->fm_rtc_get_cnt)
10324 + mac_dev->fm_rtc_get_cnt(mac_dev->fm_dev, &ns);
10325 + ns += 1500000000ULL;
10326 + ns = div_u64(ns, 1000000000UL) * 1000000000ULL;
10327 + ns -= DPA_PTP_NOMINAL_FREQ_PERIOD_NS;
10328 + if (mac_dev->fm_rtc_set_alarm)
10329 + mac_dev->fm_rtc_set_alarm(mac_dev->fm_dev, 0, ns);
10332 +static void set_fipers(void)
10336 + if (mac_dev->fm_rtc_disable)
10337 + mac_dev->fm_rtc_disable(mac_dev->fm_dev);
10340 + fiper = 1000000000ULL - DPA_PTP_NOMINAL_FREQ_PERIOD_NS;
10341 + if (mac_dev->fm_rtc_set_fiper)
10342 + mac_dev->fm_rtc_set_fiper(mac_dev->fm_dev, 0, fiper);
10344 + if (mac_dev->fm_rtc_enable)
10345 + mac_dev->fm_rtc_enable(mac_dev->fm_dev);
10348 +/* PTP clock operations */
10350 +static int ptp_dpa_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
10353 + u32 diff, tmr_add;
10361 + tmr_add = freqCompensation;
10364 + diff = div_u64(adj, 1000000000ULL);
10366 + tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
10368 + if (mac_dev->fm_rtc_set_drift)
10369 + mac_dev->fm_rtc_set_drift(mac_dev->fm_dev, tmr_add);
10374 +static int ptp_dpa_adjtime(struct ptp_clock_info *ptp, s64 delta)
10378 + if (mac_dev->fm_rtc_get_cnt)
10379 + mac_dev->fm_rtc_get_cnt(mac_dev->fm_dev, &now);
10383 + if (mac_dev->fm_rtc_set_cnt)
10384 + mac_dev->fm_rtc_set_cnt(mac_dev->fm_dev, now);
10390 +static int ptp_dpa_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
10395 + if (mac_dev->fm_rtc_get_cnt)
10396 + mac_dev->fm_rtc_get_cnt(mac_dev->fm_dev, &ns);
10398 + ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
10399 + ts->tv_nsec = remainder;
10403 +static int ptp_dpa_settime(struct ptp_clock_info *ptp,
10404 + const struct timespec64 *ts)
10408 + ns = ts->tv_sec * 1000000000ULL;
10409 + ns += ts->tv_nsec;
10411 + if (mac_dev->fm_rtc_set_cnt)
10412 + mac_dev->fm_rtc_set_cnt(mac_dev->fm_dev, ns);
10417 +static int ptp_dpa_enable(struct ptp_clock_info *ptp,
10418 + struct ptp_clock_request *rq, int on)
10422 + switch (rq->type) {
10423 + case PTP_CLK_REQ_EXTTS:
10424 + switch (rq->extts.index) {
10435 + if (mac_dev->fm_rtc_enable_interrupt)
10436 + mac_dev->fm_rtc_enable_interrupt(
10437 + mac_dev->fm_dev, bit);
10439 + if (mac_dev->fm_rtc_disable_interrupt)
10440 + mac_dev->fm_rtc_disable_interrupt(
10441 + mac_dev->fm_dev, bit);
10445 + case PTP_CLK_REQ_PPS:
10447 + if (mac_dev->fm_rtc_enable_interrupt)
10448 + mac_dev->fm_rtc_enable_interrupt(
10449 + mac_dev->fm_dev, PP1EN);
10451 + if (mac_dev->fm_rtc_disable_interrupt)
10452 + mac_dev->fm_rtc_disable_interrupt(
10453 + mac_dev->fm_dev, PP1EN);
10461 + return -EOPNOTSUPP;
10464 +static struct ptp_clock_info ptp_dpa_caps = {
10465 + .owner = THIS_MODULE,
10466 + .name = "dpaa clock",
10467 + .max_adj = 512000,
10469 + .n_ext_ts = N_EXT_TS,
10472 + .adjfreq = ptp_dpa_adjfreq,
10473 + .adjtime = ptp_dpa_adjtime,
10474 + .gettime64 = ptp_dpa_gettime,
10475 + .settime64 = ptp_dpa_settime,
10476 + .enable = ptp_dpa_enable,
10479 +static int __init __cold dpa_ptp_load(void)
10481 + struct device *ptp_dev;
10482 + struct timespec64 now;
10483 + int dpa_phc_index;
10486 + if (!(ptp_priv.of_dev && ptp_priv.mac_dev))
10489 + ptp_dev = &ptp_priv.of_dev->dev;
10490 + mac_dev = ptp_priv.mac_dev;
10492 + if (mac_dev->fm_rtc_get_drift)
10493 + mac_dev->fm_rtc_get_drift(mac_dev->fm_dev, &freqCompensation);
10495 + getnstimeofday64(&now);
10496 + ptp_dpa_settime(&ptp_dpa_caps, &now);
10498 + clock = ptp_clock_register(&ptp_dpa_caps, ptp_dev);
10499 + if (IS_ERR(clock)) {
10500 + err = PTR_ERR(clock);
10503 + dpa_phc_index = ptp_clock_index(clock);
10506 +module_init(dpa_ptp_load);
10508 +static void __exit __cold dpa_ptp_unload(void)
10510 + if (mac_dev->fm_rtc_disable_interrupt)
10511 + mac_dev->fm_rtc_disable_interrupt(mac_dev->fm_dev, 0xffffffff);
10512 + ptp_clock_unregister(clock);
10514 +module_exit(dpa_ptp_unload);
10516 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c
10518 +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
10520 + * Redistribution and use in source and binary forms, with or without
10521 + * modification, are permitted provided that the following conditions are met:
10522 + * * Redistributions of source code must retain the above copyright
10523 + * notice, this list of conditions and the following disclaimer.
10524 + * * Redistributions in binary form must reproduce the above copyright
10525 + * notice, this list of conditions and the following disclaimer in the
10526 + * documentation and/or other materials provided with the distribution.
10527 + * * Neither the name of Freescale Semiconductor nor the
10528 + * names of its contributors may be used to endorse or promote products
10529 + * derived from this software without specific prior written permission.
10532 + * ALTERNATIVELY, this software may be distributed under the terms of the
10533 + * GNU General Public License ("GPL") as published by the Free Software
10534 + * Foundation, either version 2 of that License or (at your option) any
10537 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
10538 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
10539 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
10540 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
10541 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
10542 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
10543 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
10544 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
10545 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
10546 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10549 +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
10550 +#define pr_fmt(fmt) \
10551 + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
10552 + KBUILD_BASENAME".c", __LINE__, __func__
10554 +#define pr_fmt(fmt) \
10555 + KBUILD_MODNAME ": " fmt
10558 +#include <linux/init.h>
10559 +#include <linux/module.h>
10560 +#include <linux/io.h>
10561 +#include <linux/of_platform.h>
10562 +#include <linux/of_mdio.h>
10563 +#include <linux/phy.h>
10564 +#include <linux/netdevice.h>
10566 +#include "dpaa_eth.h"
10568 +#include "lnxwrp_fsl_fman.h"
10570 +#include "error_ext.h" /* GET_ERROR_TYPE, E_OK */
10572 +#include "fsl_fman_dtsec.h"
10573 +#include "fsl_fman_tgec.h"
10574 +#include "fsl_fman_memac.h"
10575 +#include "../sdk_fman/src/wrapper/lnxwrp_sysfs_fm.h"
10577 +#define MAC_DESCRIPTION "FSL FMan MAC API based driver"
10579 +MODULE_LICENSE("Dual BSD/GPL");
10581 +MODULE_AUTHOR("Emil Medve <Emilian.Medve@Freescale.com>");
10583 +MODULE_DESCRIPTION(MAC_DESCRIPTION);
10585 +struct mac_priv_s {
10586 + struct fm_mac_dev *fm_mac;
10589 +const char *mac_driver_description __initconst = MAC_DESCRIPTION;
10590 +const size_t mac_sizeof_priv[] = {
10591 + [DTSEC] = sizeof(struct mac_priv_s),
10592 + [XGMAC] = sizeof(struct mac_priv_s),
10593 + [MEMAC] = sizeof(struct mac_priv_s)
10596 +static const enet_mode_t _100[] = {
10597 + [PHY_INTERFACE_MODE_MII] = e_ENET_MODE_MII_100,
10598 + [PHY_INTERFACE_MODE_RMII] = e_ENET_MODE_RMII_100
10601 +static const enet_mode_t _1000[] = {
10602 + [PHY_INTERFACE_MODE_GMII] = e_ENET_MODE_GMII_1000,
10603 + [PHY_INTERFACE_MODE_SGMII] = e_ENET_MODE_SGMII_1000,
10604 + [PHY_INTERFACE_MODE_QSGMII] = e_ENET_MODE_QSGMII_1000,
10605 + [PHY_INTERFACE_MODE_TBI] = e_ENET_MODE_TBI_1000,
10606 + [PHY_INTERFACE_MODE_RGMII] = e_ENET_MODE_RGMII_1000,
10607 + [PHY_INTERFACE_MODE_RGMII_ID] = e_ENET_MODE_RGMII_1000,
10608 + [PHY_INTERFACE_MODE_RGMII_RXID] = e_ENET_MODE_RGMII_1000,
10609 + [PHY_INTERFACE_MODE_RGMII_TXID] = e_ENET_MODE_RGMII_1000,
10610 + [PHY_INTERFACE_MODE_RTBI] = e_ENET_MODE_RTBI_1000
10613 +static enet_mode_t __cold __attribute__((nonnull))
10614 +macdev2enetinterface(const struct mac_device *mac_dev)
10616 + switch (mac_dev->max_speed) {
10618 + return _100[mac_dev->phy_if];
10620 + return _1000[mac_dev->phy_if];
10622 + return e_ENET_MODE_SGMII_2500;
10623 + case SPEED_10000:
10624 + return e_ENET_MODE_XGMII_10000;
10626 + return e_ENET_MODE_MII_100;
10630 +static void mac_exception(handle_t _mac_dev, e_FmMacExceptions exception)
10632 + struct mac_device *mac_dev;
10634 + mac_dev = (struct mac_device *)_mac_dev;
10636 + if (e_FM_MAC_EX_10G_RX_FIFO_OVFL == exception) {
10637 + /* don't flag RX FIFO after the first */
10638 + fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev),
10639 + e_FM_MAC_EX_10G_RX_FIFO_OVFL, false);
10640 + dev_err(mac_dev->dev, "10G MAC got RX FIFO Error = %x\n",
10644 + dev_dbg(mac_dev->dev, "%s:%s() -> %d\n", KBUILD_BASENAME".c", __func__,
10648 +static int __cold init(struct mac_device *mac_dev)
10651 + struct mac_priv_s *priv;
10652 + t_FmMacParams param;
10653 + uint32_t version;
10655 + priv = macdev_priv(mac_dev);
10657 + param.baseAddr = (typeof(param.baseAddr))(uintptr_t)devm_ioremap(
10658 + mac_dev->dev, mac_dev->res->start, 0x2000);
10659 + param.enetMode = macdev2enetinterface(mac_dev);
10660 + memcpy(¶m.addr, mac_dev->addr, min(sizeof(param.addr),
10661 + sizeof(mac_dev->addr)));
10662 + param.macId = mac_dev->cell_index;
10663 + param.h_Fm = (handle_t)mac_dev->fm;
10664 + param.mdioIrq = NO_IRQ;
10665 + param.f_Exception = mac_exception;
10666 + param.f_Event = mac_exception;
10667 + param.h_App = mac_dev;
10669 + priv->fm_mac = fm_mac_config(¶m);
10670 + if (unlikely(priv->fm_mac == NULL)) {
10671 + _errno = -EINVAL;
10675 + fm_mac_set_handle(mac_dev->fm_dev, priv->fm_mac,
10676 + (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ?
10677 + param.macId : param.macId + FM_MAX_NUM_OF_1G_MACS);
10679 + _errno = fm_mac_config_max_frame_length(priv->fm_mac,
10680 + fm_get_max_frm());
10681 + if (unlikely(_errno < 0))
10682 + goto _return_fm_mac_free;
10684 + if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) {
10685 + /* 10G always works with pad and CRC */
10686 + _errno = fm_mac_config_pad_and_crc(priv->fm_mac, true);
10687 + if (unlikely(_errno < 0))
10688 + goto _return_fm_mac_free;
10690 + _errno = fm_mac_config_half_duplex(priv->fm_mac,
10691 + mac_dev->half_duplex);
10692 + if (unlikely(_errno < 0))
10693 + goto _return_fm_mac_free;
10695 + _errno = fm_mac_config_reset_on_init(priv->fm_mac, true);
10696 + if (unlikely(_errno < 0))
10697 + goto _return_fm_mac_free;
10700 + _errno = fm_mac_init(priv->fm_mac);
10701 + if (unlikely(_errno < 0))
10702 + goto _return_fm_mac_free;
10704 +#ifndef CONFIG_FMAN_MIB_CNT_OVF_IRQ_EN
10705 + /* For 1G MAC, disable by default the MIB counters overflow interrupt */
10706 + if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) {
10707 + _errno = fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev),
10708 + e_FM_MAC_EX_1G_RX_MIB_CNT_OVFL, FALSE);
10709 + if (unlikely(_errno < 0))
10710 + goto _return_fm_mac_free;
10712 +#endif /* !CONFIG_FMAN_MIB_CNT_OVF_IRQ_EN */
10714 + /* For 10G MAC, disable Tx ECC exception */
10715 + if (macdev2enetinterface(mac_dev) == e_ENET_MODE_XGMII_10000) {
10716 + _errno = fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev),
10717 + e_FM_MAC_EX_10G_1TX_ECC_ER, FALSE);
10718 + if (unlikely(_errno < 0))
10719 + goto _return_fm_mac_free;
10722 + _errno = fm_mac_get_version(priv->fm_mac, &version);
10723 + if (unlikely(_errno < 0))
10724 + goto _return_fm_mac_free;
10726 + dev_info(mac_dev->dev, "FMan %s version: 0x%08x\n",
10727 + ((macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ?
10728 + "dTSEC" : "XGEC"), version);
10733 +_return_fm_mac_free:
10734 + fm_mac_free(mac_dev->get_mac_handle(mac_dev));
10740 +static int __cold memac_init(struct mac_device *mac_dev)
10743 + struct mac_priv_s *priv;
10744 + t_FmMacParams param;
10746 + priv = macdev_priv(mac_dev);
10748 + param.baseAddr = (typeof(param.baseAddr))(uintptr_t)devm_ioremap(
10749 + mac_dev->dev, mac_dev->res->start, 0x2000);
10750 + param.enetMode = macdev2enetinterface(mac_dev);
10751 + memcpy(¶m.addr, mac_dev->addr, sizeof(mac_dev->addr));
10752 + param.macId = mac_dev->cell_index;
10753 + param.h_Fm = (handle_t)mac_dev->fm;
10754 + param.mdioIrq = NO_IRQ;
10755 + param.f_Exception = mac_exception;
10756 + param.f_Event = mac_exception;
10757 + param.h_App = mac_dev;
10759 + priv->fm_mac = fm_mac_config(¶m);
10760 + if (unlikely(priv->fm_mac == NULL)) {
10761 + _errno = -EINVAL;
10765 + fm_mac_set_handle(mac_dev->fm_dev, priv->fm_mac,
10766 + (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ?
10767 + param.macId : param.macId + FM_MAX_NUM_OF_1G_MACS);
10769 + _errno = fm_mac_config_max_frame_length(priv->fm_mac, fm_get_max_frm());
10770 + if (unlikely(_errno < 0))
10771 + goto _return_fm_mac_free;
10773 + _errno = fm_mac_config_reset_on_init(priv->fm_mac, true);
10774 + if (unlikely(_errno < 0))
10775 + goto _return_fm_mac_free;
10777 + _errno = fm_mac_init(priv->fm_mac);
10778 + if (unlikely(_errno < 0))
10779 + goto _return_fm_mac_free;
10781 + dev_info(mac_dev->dev, "FMan MEMAC\n");
10785 +_return_fm_mac_free:
10786 + fm_mac_free(priv->fm_mac);
10792 +static int __cold start(struct mac_device *mac_dev)
10795 + struct phy_device *phy_dev = mac_dev->phy_dev;
10797 + _errno = fm_mac_enable(mac_dev->get_mac_handle(mac_dev));
10799 + if (!_errno && phy_dev)
10800 + phy_start(phy_dev);
10805 +static int __cold stop(struct mac_device *mac_dev)
10807 + if (mac_dev->phy_dev)
10808 + phy_stop(mac_dev->phy_dev);
10810 + return fm_mac_disable(mac_dev->get_mac_handle(mac_dev));
10813 +static int __cold set_multi(struct net_device *net_dev,
10814 + struct mac_device *mac_dev)
10816 + struct mac_priv_s *mac_priv;
10817 + struct mac_address *old_addr, *tmp;
10818 + struct netdev_hw_addr *ha;
10821 + mac_priv = macdev_priv(mac_dev);
10823 + /* Clear previous address list */
10824 + list_for_each_entry_safe(old_addr, tmp, &mac_dev->mc_addr_list, list) {
10825 + _errno = fm_mac_remove_hash_mac_addr(mac_priv->fm_mac,
10826 + (t_EnetAddr *)old_addr->addr);
10830 + list_del(&old_addr->list);
10834 + /* Add all the addresses from the new list */
10835 + netdev_for_each_mc_addr(ha, net_dev) {
10836 + _errno = fm_mac_add_hash_mac_addr(mac_priv->fm_mac,
10837 + (t_EnetAddr *)ha->addr);
10841 + tmp = kmalloc(sizeof(struct mac_address), GFP_ATOMIC);
10843 + dev_err(mac_dev->dev, "Out of memory\n");
10846 + memcpy(tmp->addr, ha->addr, ETH_ALEN);
10847 + list_add(&tmp->list, &mac_dev->mc_addr_list);
10852 +/* Avoid redundant calls to FMD, if the MAC driver already contains the desired
10853 + * active PAUSE settings. Otherwise, the new active settings should be reflected
10856 +int set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx)
10858 + struct fm_mac_dev *fm_mac_dev = mac_dev->get_mac_handle(mac_dev);
10861 + if (unlikely(rx != mac_dev->rx_pause_active)) {
10862 + _errno = fm_mac_set_rx_pause_frames(fm_mac_dev, rx);
10863 + if (likely(_errno == 0))
10864 + mac_dev->rx_pause_active = rx;
10867 + if (unlikely(tx != mac_dev->tx_pause_active)) {
10868 + _errno = fm_mac_set_tx_pause_frames(fm_mac_dev, tx);
10869 + if (likely(_errno == 0))
10870 + mac_dev->tx_pause_active = tx;
10875 +EXPORT_SYMBOL(set_mac_active_pause);
10877 +/* Determine the MAC RX/TX PAUSE frames settings based on PHY
10878 + * autonegotiation or values set by eththool.
10880 +void get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause, bool *tx_pause)
10882 + struct phy_device *phy_dev = mac_dev->phy_dev;
10883 + u16 lcl_adv, rmt_adv;
10886 + *rx_pause = *tx_pause = false;
10888 + if (!phy_dev->duplex)
10891 + /* If PAUSE autonegotiation is disabled, the TX/RX PAUSE settings
10892 + * are those set by ethtool.
10894 + if (!mac_dev->autoneg_pause) {
10895 + *rx_pause = mac_dev->rx_pause_req;
10896 + *tx_pause = mac_dev->tx_pause_req;
10900 + /* Else if PAUSE autonegotiation is enabled, the TX/RX PAUSE
10901 + * settings depend on the result of the link negotiation.
10904 + /* get local capabilities */
10906 + if (phy_dev->advertising & ADVERTISED_Pause)
10907 + lcl_adv |= ADVERTISE_PAUSE_CAP;
10908 + if (phy_dev->advertising & ADVERTISED_Asym_Pause)
10909 + lcl_adv |= ADVERTISE_PAUSE_ASYM;
10911 + /* get link partner capabilities */
10913 + if (phy_dev->pause)
10914 + rmt_adv |= LPA_PAUSE_CAP;
10915 + if (phy_dev->asym_pause)
10916 + rmt_adv |= LPA_PAUSE_ASYM;
10918 + /* Calculate TX/RX settings based on local and peer advertised
10919 + * symmetric/asymmetric PAUSE capabilities.
10921 + flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
10922 + if (flowctrl & FLOW_CTRL_RX)
10923 + *rx_pause = true;
10924 + if (flowctrl & FLOW_CTRL_TX)
10925 + *tx_pause = true;
10927 +EXPORT_SYMBOL(get_pause_cfg);
10929 +static void adjust_link_void(struct net_device *net_dev)
10933 +static void adjust_link(struct net_device *net_dev)
10935 + struct dpa_priv_s *priv = netdev_priv(net_dev);
10936 + struct mac_device *mac_dev = priv->mac_dev;
10937 + struct phy_device *phy_dev = mac_dev->phy_dev;
10938 + struct fm_mac_dev *fm_mac_dev;
10939 + bool rx_pause, tx_pause;
10942 + fm_mac_dev = mac_dev->get_mac_handle(mac_dev);
10943 + fm_mac_adjust_link(fm_mac_dev, phy_dev->link, phy_dev->speed,
10944 + phy_dev->duplex);
10946 + get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
10947 + _errno = set_mac_active_pause(mac_dev, rx_pause, tx_pause);
10948 + if (unlikely(_errno < 0))
10949 + netdev_err(net_dev, "set_mac_active_pause() = %d\n", _errno);
10952 +/* Initializes driver's PHY state, and attaches to the PHY.
10953 + * Returns 0 on success.
10955 +static int dtsec_init_phy(struct net_device *net_dev,
10956 + struct mac_device *mac_dev)
10958 + struct phy_device *phy_dev;
10960 + if (of_phy_is_fixed_link(mac_dev->phy_node))
10961 + phy_dev = of_phy_attach(net_dev, mac_dev->phy_node,
10962 + 0, mac_dev->phy_if);
10964 + phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
10965 + &adjust_link, 0, mac_dev->phy_if);
10966 + if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) {
10967 + netdev_err(net_dev, "Could not connect to PHY %s\n",
10968 + mac_dev->phy_node ?
10969 + mac_dev->phy_node->full_name :
10970 + mac_dev->fixed_bus_id);
10971 + return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev);
10974 + /* Remove any features not supported by the controller */
10975 + phy_dev->supported &= mac_dev->if_support;
10976 + /* Enable the symmetric and asymmetric PAUSE frame advertisements,
10977 + * as most of the PHY drivers do not enable them by default.
10979 + phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
10980 + phy_dev->advertising = phy_dev->supported;
10982 + mac_dev->phy_dev = phy_dev;
10987 +static int xgmac_init_phy(struct net_device *net_dev,
10988 + struct mac_device *mac_dev)
10990 + struct phy_device *phy_dev;
10992 + if (of_phy_is_fixed_link(mac_dev->phy_node))
10993 + phy_dev = of_phy_attach(net_dev, mac_dev->phy_node,
10994 + 0, mac_dev->phy_if);
10996 + phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
10997 + &adjust_link_void, 0, mac_dev->phy_if);
10998 + if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) {
10999 + netdev_err(net_dev, "Could not attach to PHY %s\n",
11000 + mac_dev->phy_node ?
11001 + mac_dev->phy_node->full_name :
11002 + mac_dev->fixed_bus_id);
11003 + return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev);
11006 + phy_dev->supported &= mac_dev->if_support;
11007 + /* Enable the symmetric and asymmetric PAUSE frame advertisements,
11008 + * as most of the PHY drivers do not enable them by default.
11010 + phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
11011 + phy_dev->advertising = phy_dev->supported;
11013 + mac_dev->phy_dev = phy_dev;
11018 +static int memac_init_phy(struct net_device *net_dev,
11019 + struct mac_device *mac_dev)
11021 + struct phy_device *phy_dev;
11023 + if (of_phy_is_fixed_link(mac_dev->phy_node)) {
11024 + phy_dev = of_phy_attach(net_dev, mac_dev->phy_node,
11025 + 0, mac_dev->phy_if);
11026 + } else if ((macdev2enetinterface(mac_dev) == e_ENET_MODE_XGMII_10000) ||
11027 + (macdev2enetinterface(mac_dev) == e_ENET_MODE_SGMII_2500)) {
11028 + phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
11029 + &adjust_link_void, 0,
11030 + mac_dev->phy_if);
11032 + phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
11033 + &adjust_link, 0, mac_dev->phy_if);
11036 + if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) {
11037 + netdev_err(net_dev, "Could not connect to PHY %s\n",
11038 + mac_dev->phy_node ?
11039 + mac_dev->phy_node->full_name :
11040 + mac_dev->fixed_bus_id);
11041 + return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev);
11044 + /* Remove any features not supported by the controller */
11045 + phy_dev->supported &= mac_dev->if_support;
11046 + /* Enable the symmetric and asymmetric PAUSE frame advertisements,
11047 + * as most of the PHY drivers do not enable them by default.
11049 + phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
11050 + phy_dev->advertising = phy_dev->supported;
11052 + mac_dev->phy_dev = phy_dev;
11057 +static int __cold uninit(struct fm_mac_dev *fm_mac_dev)
11059 + int _errno, __errno;
11061 + _errno = fm_mac_disable(fm_mac_dev);
11062 + __errno = fm_mac_free(fm_mac_dev);
11064 + if (unlikely(__errno < 0))
11065 + _errno = __errno;
11070 +static struct fm_mac_dev *get_mac_handle(struct mac_device *mac_dev)
11072 + const struct mac_priv_s *priv;
11073 + priv = macdev_priv(mac_dev);
11074 + return priv->fm_mac;
11077 +static int dtsec_dump_regs(struct mac_device *h_mac, char *buf, int nn)
11079 + struct dtsec_regs *p_mm = (struct dtsec_regs *) h_mac->vaddr;
11080 + int i = 0, n = nn;
11082 + FM_DMP_SUBTITLE(buf, n, "\n");
11084 + FM_DMP_TITLE(buf, n, p_mm, "FM MAC - DTSEC-%d", h_mac->cell_index);
11086 + FM_DMP_V32(buf, n, p_mm, tsec_id);
11087 + FM_DMP_V32(buf, n, p_mm, tsec_id2);
11088 + FM_DMP_V32(buf, n, p_mm, ievent);
11089 + FM_DMP_V32(buf, n, p_mm, imask);
11090 + FM_DMP_V32(buf, n, p_mm, ecntrl);
11091 + FM_DMP_V32(buf, n, p_mm, ptv);
11092 + FM_DMP_V32(buf, n, p_mm, tmr_ctrl);
11093 + FM_DMP_V32(buf, n, p_mm, tmr_pevent);
11094 + FM_DMP_V32(buf, n, p_mm, tmr_pemask);
11095 + FM_DMP_V32(buf, n, p_mm, tctrl);
11096 + FM_DMP_V32(buf, n, p_mm, rctrl);
11097 + FM_DMP_V32(buf, n, p_mm, maccfg1);
11098 + FM_DMP_V32(buf, n, p_mm, maccfg2);
11099 + FM_DMP_V32(buf, n, p_mm, ipgifg);
11100 + FM_DMP_V32(buf, n, p_mm, hafdup);
11101 + FM_DMP_V32(buf, n, p_mm, maxfrm);
11103 + FM_DMP_V32(buf, n, p_mm, macstnaddr1);
11104 + FM_DMP_V32(buf, n, p_mm, macstnaddr2);
11106 + for (i = 0; i < 7; ++i) {
11107 + FM_DMP_V32(buf, n, p_mm, macaddr[i].exact_match1);
11108 + FM_DMP_V32(buf, n, p_mm, macaddr[i].exact_match2);
11111 + FM_DMP_V32(buf, n, p_mm, car1);
11112 + FM_DMP_V32(buf, n, p_mm, car2);
11117 +static int xgmac_dump_regs(struct mac_device *h_mac, char *buf, int nn)
11119 + struct tgec_regs *p_mm = (struct tgec_regs *) h_mac->vaddr;
11122 + FM_DMP_SUBTITLE(buf, n, "\n");
11123 + FM_DMP_TITLE(buf, n, p_mm, "FM MAC - TGEC -%d", h_mac->cell_index);
11125 + FM_DMP_V32(buf, n, p_mm, tgec_id);
11126 + FM_DMP_V32(buf, n, p_mm, command_config);
11127 + FM_DMP_V32(buf, n, p_mm, mac_addr_0);
11128 + FM_DMP_V32(buf, n, p_mm, mac_addr_1);
11129 + FM_DMP_V32(buf, n, p_mm, maxfrm);
11130 + FM_DMP_V32(buf, n, p_mm, pause_quant);
11131 + FM_DMP_V32(buf, n, p_mm, rx_fifo_sections);
11132 + FM_DMP_V32(buf, n, p_mm, tx_fifo_sections);
11133 + FM_DMP_V32(buf, n, p_mm, rx_fifo_almost_f_e);
11134 + FM_DMP_V32(buf, n, p_mm, tx_fifo_almost_f_e);
11135 + FM_DMP_V32(buf, n, p_mm, hashtable_ctrl);
11136 + FM_DMP_V32(buf, n, p_mm, mdio_cfg_status);
11137 + FM_DMP_V32(buf, n, p_mm, mdio_command);
11138 + FM_DMP_V32(buf, n, p_mm, mdio_data);
11139 + FM_DMP_V32(buf, n, p_mm, mdio_regaddr);
11140 + FM_DMP_V32(buf, n, p_mm, status);
11141 + FM_DMP_V32(buf, n, p_mm, tx_ipg_len);
11142 + FM_DMP_V32(buf, n, p_mm, mac_addr_2);
11143 + FM_DMP_V32(buf, n, p_mm, mac_addr_3);
11144 + FM_DMP_V32(buf, n, p_mm, rx_fifo_ptr_rd);
11145 + FM_DMP_V32(buf, n, p_mm, rx_fifo_ptr_wr);
11146 + FM_DMP_V32(buf, n, p_mm, tx_fifo_ptr_rd);
11147 + FM_DMP_V32(buf, n, p_mm, tx_fifo_ptr_wr);
11148 + FM_DMP_V32(buf, n, p_mm, imask);
11149 + FM_DMP_V32(buf, n, p_mm, ievent);
11154 +static int memac_dump_regs(struct mac_device *h_mac, char *buf, int nn)
11156 + struct memac_regs *p_mm = (struct memac_regs *) h_mac->vaddr;
11157 + int i = 0, n = nn;
11159 + FM_DMP_SUBTITLE(buf, n, "\n");
11160 + FM_DMP_TITLE(buf, n, p_mm, "FM MAC - MEMAC -%d", h_mac->cell_index);
11162 + FM_DMP_V32(buf, n, p_mm, command_config);
11163 + FM_DMP_V32(buf, n, p_mm, mac_addr0.mac_addr_l);
11164 + FM_DMP_V32(buf, n, p_mm, mac_addr0.mac_addr_u);
11165 + FM_DMP_V32(buf, n, p_mm, maxfrm);
11166 + FM_DMP_V32(buf, n, p_mm, hashtable_ctrl);
11167 + FM_DMP_V32(buf, n, p_mm, ievent);
11168 + FM_DMP_V32(buf, n, p_mm, tx_ipg_length);
11169 + FM_DMP_V32(buf, n, p_mm, imask);
11171 + for (i = 0; i < 4; ++i)
11172 + FM_DMP_V32(buf, n, p_mm, pause_quanta[i]);
11174 + for (i = 0; i < 4; ++i)
11175 + FM_DMP_V32(buf, n, p_mm, pause_thresh[i]);
11177 + FM_DMP_V32(buf, n, p_mm, rx_pause_status);
11179 + for (i = 0; i < MEMAC_NUM_OF_PADDRS; ++i) {
11180 + FM_DMP_V32(buf, n, p_mm, mac_addr[i].mac_addr_l);
11181 + FM_DMP_V32(buf, n, p_mm, mac_addr[i].mac_addr_u);
11184 + FM_DMP_V32(buf, n, p_mm, lpwake_timer);
11185 + FM_DMP_V32(buf, n, p_mm, sleep_timer);
11186 + FM_DMP_V32(buf, n, p_mm, statn_config);
11187 + FM_DMP_V32(buf, n, p_mm, if_mode);
11188 + FM_DMP_V32(buf, n, p_mm, if_status);
11189 + FM_DMP_V32(buf, n, p_mm, hg_config);
11190 + FM_DMP_V32(buf, n, p_mm, hg_pause_quanta);
11191 + FM_DMP_V32(buf, n, p_mm, hg_pause_thresh);
11192 + FM_DMP_V32(buf, n, p_mm, hgrx_pause_status);
11193 + FM_DMP_V32(buf, n, p_mm, hg_fifos_status);
11194 + FM_DMP_V32(buf, n, p_mm, rhm);
11195 + FM_DMP_V32(buf, n, p_mm, thm);
11200 +static int memac_dump_regs_rx(struct mac_device *h_mac, char *buf, int nn)
11202 + struct memac_regs *p_mm = (struct memac_regs *) h_mac->vaddr;
11205 + FM_DMP_SUBTITLE(buf, n, "\n");
11206 + FM_DMP_TITLE(buf, n, p_mm, "FM MAC - MEMAC -%d Rx stats", h_mac->cell_index);
11208 + /* Rx Statistics Counter */
11209 + FM_DMP_V32(buf, n, p_mm, reoct_l);
11210 + FM_DMP_V32(buf, n, p_mm, reoct_u);
11211 + FM_DMP_V32(buf, n, p_mm, roct_l);
11212 + FM_DMP_V32(buf, n, p_mm, roct_u);
11213 + FM_DMP_V32(buf, n, p_mm, raln_l);
11214 + FM_DMP_V32(buf, n, p_mm, raln_u);
11215 + FM_DMP_V32(buf, n, p_mm, rxpf_l);
11216 + FM_DMP_V32(buf, n, p_mm, rxpf_u);
11217 + FM_DMP_V32(buf, n, p_mm, rfrm_l);
11218 + FM_DMP_V32(buf, n, p_mm, rfrm_u);
11219 + FM_DMP_V32(buf, n, p_mm, rfcs_l);
11220 + FM_DMP_V32(buf, n, p_mm, rfcs_u);
11221 + FM_DMP_V32(buf, n, p_mm, rvlan_l);
11222 + FM_DMP_V32(buf, n, p_mm, rvlan_u);
11223 + FM_DMP_V32(buf, n, p_mm, rerr_l);
11224 + FM_DMP_V32(buf, n, p_mm, rerr_u);
11225 + FM_DMP_V32(buf, n, p_mm, ruca_l);
11226 + FM_DMP_V32(buf, n, p_mm, ruca_u);
11227 + FM_DMP_V32(buf, n, p_mm, rmca_l);
11228 + FM_DMP_V32(buf, n, p_mm, rmca_u);
11229 + FM_DMP_V32(buf, n, p_mm, rbca_l);
11230 + FM_DMP_V32(buf, n, p_mm, rbca_u);
11231 + FM_DMP_V32(buf, n, p_mm, rdrp_l);
11232 + FM_DMP_V32(buf, n, p_mm, rdrp_u);
11233 + FM_DMP_V32(buf, n, p_mm, rpkt_l);
11234 + FM_DMP_V32(buf, n, p_mm, rpkt_u);
11235 + FM_DMP_V32(buf, n, p_mm, rund_l);
11236 + FM_DMP_V32(buf, n, p_mm, rund_u);
11237 + FM_DMP_V32(buf, n, p_mm, r64_l);
11238 + FM_DMP_V32(buf, n, p_mm, r64_u);
11239 + FM_DMP_V32(buf, n, p_mm, r127_l);
11240 + FM_DMP_V32(buf, n, p_mm, r127_u);
11241 + FM_DMP_V32(buf, n, p_mm, r255_l);
11242 + FM_DMP_V32(buf, n, p_mm, r255_u);
11243 + FM_DMP_V32(buf, n, p_mm, r511_l);
11244 + FM_DMP_V32(buf, n, p_mm, r511_u);
11245 + FM_DMP_V32(buf, n, p_mm, r1023_l);
11246 + FM_DMP_V32(buf, n, p_mm, r1023_u);
11247 + FM_DMP_V32(buf, n, p_mm, r1518_l);
11248 + FM_DMP_V32(buf, n, p_mm, r1518_u);
11249 + FM_DMP_V32(buf, n, p_mm, r1519x_l);
11250 + FM_DMP_V32(buf, n, p_mm, r1519x_u);
11251 + FM_DMP_V32(buf, n, p_mm, rovr_l);
11252 + FM_DMP_V32(buf, n, p_mm, rovr_u);
11253 + FM_DMP_V32(buf, n, p_mm, rjbr_l);
11254 + FM_DMP_V32(buf, n, p_mm, rjbr_u);
11255 + FM_DMP_V32(buf, n, p_mm, rfrg_l);
11256 + FM_DMP_V32(buf, n, p_mm, rfrg_u);
11257 + FM_DMP_V32(buf, n, p_mm, rcnp_l);
11258 + FM_DMP_V32(buf, n, p_mm, rcnp_u);
11259 + FM_DMP_V32(buf, n, p_mm, rdrntp_l);
11260 + FM_DMP_V32(buf, n, p_mm, rdrntp_u);
11265 +static int memac_dump_regs_tx(struct mac_device *h_mac, char *buf, int nn)
11267 + struct memac_regs *p_mm = (struct memac_regs *) h_mac->vaddr;
11270 + FM_DMP_SUBTITLE(buf, n, "\n");
11271 + FM_DMP_TITLE(buf, n, p_mm, "FM MAC - MEMAC -%d Tx stats", h_mac->cell_index);
11274 + /* Tx Statistics Counter */
11275 + FM_DMP_V32(buf, n, p_mm, teoct_l);
11276 + FM_DMP_V32(buf, n, p_mm, teoct_u);
11277 + FM_DMP_V32(buf, n, p_mm, toct_l);
11278 + FM_DMP_V32(buf, n, p_mm, toct_u);
11279 + FM_DMP_V32(buf, n, p_mm, txpf_l);
11280 + FM_DMP_V32(buf, n, p_mm, txpf_u);
11281 + FM_DMP_V32(buf, n, p_mm, tfrm_l);
11282 + FM_DMP_V32(buf, n, p_mm, tfrm_u);
11283 + FM_DMP_V32(buf, n, p_mm, tfcs_l);
11284 + FM_DMP_V32(buf, n, p_mm, tfcs_u);
11285 + FM_DMP_V32(buf, n, p_mm, tvlan_l);
11286 + FM_DMP_V32(buf, n, p_mm, tvlan_u);
11287 + FM_DMP_V32(buf, n, p_mm, terr_l);
11288 + FM_DMP_V32(buf, n, p_mm, terr_u);
11289 + FM_DMP_V32(buf, n, p_mm, tuca_l);
11290 + FM_DMP_V32(buf, n, p_mm, tuca_u);
11291 + FM_DMP_V32(buf, n, p_mm, tmca_l);
11292 + FM_DMP_V32(buf, n, p_mm, tmca_u);
11293 + FM_DMP_V32(buf, n, p_mm, tbca_l);
11294 + FM_DMP_V32(buf, n, p_mm, tbca_u);
11295 + FM_DMP_V32(buf, n, p_mm, tpkt_l);
11296 + FM_DMP_V32(buf, n, p_mm, tpkt_u);
11297 + FM_DMP_V32(buf, n, p_mm, tund_l);
11298 + FM_DMP_V32(buf, n, p_mm, tund_u);
11299 + FM_DMP_V32(buf, n, p_mm, t64_l);
11300 + FM_DMP_V32(buf, n, p_mm, t64_u);
11301 + FM_DMP_V32(buf, n, p_mm, t127_l);
11302 + FM_DMP_V32(buf, n, p_mm, t127_u);
11303 + FM_DMP_V32(buf, n, p_mm, t255_l);
11304 + FM_DMP_V32(buf, n, p_mm, t255_u);
11305 + FM_DMP_V32(buf, n, p_mm, t511_l);
11306 + FM_DMP_V32(buf, n, p_mm, t511_u);
11307 + FM_DMP_V32(buf, n, p_mm, t1023_l);
11308 + FM_DMP_V32(buf, n, p_mm, t1023_u);
11309 + FM_DMP_V32(buf, n, p_mm, t1518_l);
11310 + FM_DMP_V32(buf, n, p_mm, t1518_u);
11311 + FM_DMP_V32(buf, n, p_mm, t1519x_l);
11312 + FM_DMP_V32(buf, n, p_mm, t1519x_u);
11313 + FM_DMP_V32(buf, n, p_mm, tcnp_l);
11314 + FM_DMP_V32(buf, n, p_mm, tcnp_u);
11319 +int fm_mac_dump_regs(struct mac_device *h_mac, char *buf, int nn)
11323 + n = h_mac->dump_mac_regs(h_mac, buf, n);
11327 +EXPORT_SYMBOL(fm_mac_dump_regs);
11329 +int fm_mac_dump_rx_stats(struct mac_device *h_mac, char *buf, int nn)
11333 + if(h_mac->dump_mac_rx_stats)
11334 + n = h_mac->dump_mac_rx_stats(h_mac, buf, n);
11338 +EXPORT_SYMBOL(fm_mac_dump_rx_stats);
11340 +int fm_mac_dump_tx_stats(struct mac_device *h_mac, char *buf, int nn)
11344 + if(h_mac->dump_mac_tx_stats)
11345 + n = h_mac->dump_mac_tx_stats(h_mac, buf, n);
11349 +EXPORT_SYMBOL(fm_mac_dump_tx_stats);
11351 +static void __cold setup_dtsec(struct mac_device *mac_dev)
11353 + mac_dev->init_phy = dtsec_init_phy;
11354 + mac_dev->init = init;
11355 + mac_dev->start = start;
11356 + mac_dev->stop = stop;
11357 + mac_dev->set_promisc = fm_mac_set_promiscuous;
11358 + mac_dev->change_addr = fm_mac_modify_mac_addr;
11359 + mac_dev->set_multi = set_multi;
11360 + mac_dev->uninit = uninit;
11361 + mac_dev->ptp_enable = fm_mac_enable_1588_time_stamp;
11362 + mac_dev->ptp_disable = fm_mac_disable_1588_time_stamp;
11363 + mac_dev->get_mac_handle = get_mac_handle;
11364 + mac_dev->set_tx_pause = fm_mac_set_tx_pause_frames;
11365 + mac_dev->set_rx_pause = fm_mac_set_rx_pause_frames;
11366 + mac_dev->fm_rtc_enable = fm_rtc_enable;
11367 + mac_dev->fm_rtc_disable = fm_rtc_disable;
11368 + mac_dev->fm_rtc_get_cnt = fm_rtc_get_cnt;
11369 + mac_dev->fm_rtc_set_cnt = fm_rtc_set_cnt;
11370 + mac_dev->fm_rtc_get_drift = fm_rtc_get_drift;
11371 + mac_dev->fm_rtc_set_drift = fm_rtc_set_drift;
11372 + mac_dev->fm_rtc_set_alarm = fm_rtc_set_alarm;
11373 + mac_dev->fm_rtc_set_fiper = fm_rtc_set_fiper;
11374 + mac_dev->set_wol = fm_mac_set_wol;
11375 + mac_dev->dump_mac_regs = dtsec_dump_regs;
11378 +static void __cold setup_xgmac(struct mac_device *mac_dev)
11380 + mac_dev->init_phy = xgmac_init_phy;
11381 + mac_dev->init = init;
11382 + mac_dev->start = start;
11383 + mac_dev->stop = stop;
11384 + mac_dev->set_promisc = fm_mac_set_promiscuous;
11385 + mac_dev->change_addr = fm_mac_modify_mac_addr;
11386 + mac_dev->set_multi = set_multi;
11387 + mac_dev->uninit = uninit;
11388 + mac_dev->get_mac_handle = get_mac_handle;
11389 + mac_dev->set_tx_pause = fm_mac_set_tx_pause_frames;
11390 + mac_dev->set_rx_pause = fm_mac_set_rx_pause_frames;
11391 + mac_dev->set_wol = fm_mac_set_wol;
11392 + mac_dev->dump_mac_regs = xgmac_dump_regs;
11395 +static void __cold setup_memac(struct mac_device *mac_dev)
11397 + mac_dev->init_phy = memac_init_phy;
11398 + mac_dev->init = memac_init;
11399 + mac_dev->start = start;
11400 + mac_dev->stop = stop;
11401 + mac_dev->set_promisc = fm_mac_set_promiscuous;
11402 + mac_dev->change_addr = fm_mac_modify_mac_addr;
11403 + mac_dev->set_multi = set_multi;
11404 + mac_dev->uninit = uninit;
11405 + mac_dev->get_mac_handle = get_mac_handle;
11406 + mac_dev->set_tx_pause = fm_mac_set_tx_pause_frames;
11407 + mac_dev->set_rx_pause = fm_mac_set_rx_pause_frames;
11408 + mac_dev->fm_rtc_enable = fm_rtc_enable;
11409 + mac_dev->fm_rtc_disable = fm_rtc_disable;
11410 + mac_dev->fm_rtc_get_cnt = fm_rtc_get_cnt;
11411 + mac_dev->fm_rtc_set_cnt = fm_rtc_set_cnt;
11412 + mac_dev->fm_rtc_get_drift = fm_rtc_get_drift;
11413 + mac_dev->fm_rtc_set_drift = fm_rtc_set_drift;
11414 + mac_dev->fm_rtc_set_alarm = fm_rtc_set_alarm;
11415 + mac_dev->fm_rtc_set_fiper = fm_rtc_set_fiper;
11416 + mac_dev->set_wol = fm_mac_set_wol;
11417 + mac_dev->dump_mac_regs = memac_dump_regs;
11418 + mac_dev->dump_mac_rx_stats = memac_dump_regs_rx;
11419 + mac_dev->dump_mac_tx_stats = memac_dump_regs_tx;
11422 +void (*const mac_setup[])(struct mac_device *mac_dev) = {
11423 + [DTSEC] = setup_dtsec,
11424 + [XGMAC] = setup_xgmac,
11425 + [MEMAC] = setup_memac
11428 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac.c
11430 +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
11432 + * Redistribution and use in source and binary forms, with or without
11433 + * modification, are permitted provided that the following conditions are met:
11434 + * * Redistributions of source code must retain the above copyright
11435 + * notice, this list of conditions and the following disclaimer.
11436 + * * Redistributions in binary form must reproduce the above copyright
11437 + * notice, this list of conditions and the following disclaimer in the
11438 + * documentation and/or other materials provided with the distribution.
11439 + * * Neither the name of Freescale Semiconductor nor the
11440 + * names of its contributors may be used to endorse or promote products
11441 + * derived from this software without specific prior written permission.
11444 + * ALTERNATIVELY, this software may be distributed under the terms of the
11445 + * GNU General Public License ("GPL") as published by the Free Software
11446 + * Foundation, either version 2 of that License or (at your option) any
11449 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
11450 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
11451 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
11452 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
11453 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
11454 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
11455 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
11456 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
11457 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11458 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11461 +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
11462 +#define pr_fmt(fmt) \
11463 + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
11464 + KBUILD_BASENAME".c", __LINE__, __func__
11466 +#define pr_fmt(fmt) \
11467 + KBUILD_MODNAME ": " fmt
11470 +#include <linux/init.h>
11471 +#include <linux/module.h>
11472 +#include <linux/of_address.h>
11473 +#include <linux/of_platform.h>
11474 +#include <linux/of_net.h>
11475 +#include <linux/of_mdio.h>
11476 +#include <linux/phy_fixed.h>
11477 +#include <linux/device.h>
11478 +#include <linux/phy.h>
11479 +#include <linux/io.h>
11481 +#include "lnxwrp_fm_ext.h"
11485 +#define DTSEC_SUPPORTED \
11486 + (SUPPORTED_10baseT_Half \
11487 + | SUPPORTED_10baseT_Full \
11488 + | SUPPORTED_100baseT_Half \
11489 + | SUPPORTED_100baseT_Full \
11490 + | SUPPORTED_Autoneg \
11491 + | SUPPORTED_Pause \
11492 + | SUPPORTED_Asym_Pause \
11495 +static const char phy_str[][11] = {
11496 + [PHY_INTERFACE_MODE_MII] = "mii",
11497 + [PHY_INTERFACE_MODE_GMII] = "gmii",
11498 + [PHY_INTERFACE_MODE_SGMII] = "sgmii",
11499 + [PHY_INTERFACE_MODE_QSGMII] = "qsgmii",
11500 + [PHY_INTERFACE_MODE_TBI] = "tbi",
11501 + [PHY_INTERFACE_MODE_RMII] = "rmii",
11502 + [PHY_INTERFACE_MODE_RGMII] = "rgmii",
11503 + [PHY_INTERFACE_MODE_RGMII_ID] = "rgmii-id",
11504 + [PHY_INTERFACE_MODE_RGMII_RXID] = "rgmii-rxid",
11505 + [PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid",
11506 + [PHY_INTERFACE_MODE_RTBI] = "rtbi",
11507 + [PHY_INTERFACE_MODE_XGMII] = "xgmii",
11508 + [PHY_INTERFACE_MODE_SGMII_2500] = "sgmii-2500",
11511 +static phy_interface_t __pure __attribute__((nonnull)) str2phy(const char *str)
11515 + for (i = 0; i < ARRAY_SIZE(phy_str); i++)
11516 + if (strcmp(str, phy_str[i]) == 0)
11517 + return (phy_interface_t)i;
11519 + return PHY_INTERFACE_MODE_MII;
11522 +static const uint16_t phy2speed[] = {
11523 + [PHY_INTERFACE_MODE_MII] = SPEED_100,
11524 + [PHY_INTERFACE_MODE_GMII] = SPEED_1000,
11525 + [PHY_INTERFACE_MODE_SGMII] = SPEED_1000,
11526 + [PHY_INTERFACE_MODE_QSGMII] = SPEED_1000,
11527 + [PHY_INTERFACE_MODE_TBI] = SPEED_1000,
11528 + [PHY_INTERFACE_MODE_RMII] = SPEED_100,
11529 + [PHY_INTERFACE_MODE_RGMII] = SPEED_1000,
11530 + [PHY_INTERFACE_MODE_RGMII_ID] = SPEED_1000,
11531 + [PHY_INTERFACE_MODE_RGMII_RXID] = SPEED_1000,
11532 + [PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000,
11533 + [PHY_INTERFACE_MODE_RTBI] = SPEED_1000,
11534 + [PHY_INTERFACE_MODE_XGMII] = SPEED_10000,
11535 + [PHY_INTERFACE_MODE_SGMII_2500] = SPEED_2500,
11538 +static struct mac_device * __cold
11539 +alloc_macdev(struct device *dev, size_t sizeof_priv,
11540 + void (*setup)(struct mac_device *mac_dev))
11542 + struct mac_device *mac_dev;
11544 + mac_dev = devm_kzalloc(dev, sizeof(*mac_dev) + sizeof_priv, GFP_KERNEL);
11545 + if (unlikely(mac_dev == NULL))
11546 + mac_dev = ERR_PTR(-ENOMEM);
11548 + mac_dev->dev = dev;
11549 + dev_set_drvdata(dev, mac_dev);
11556 +static int __cold free_macdev(struct mac_device *mac_dev)
11558 + dev_set_drvdata(mac_dev->dev, NULL);
11560 + return mac_dev->uninit(mac_dev->get_mac_handle(mac_dev));
11563 +static const struct of_device_id mac_match[] = {
11565 + .compatible = "fsl,fman-1g-mac"
11568 + .compatible = "fsl,fman-10g-mac"
11571 + .compatible = "fsl,fman-memac"
11575 +MODULE_DEVICE_TABLE(of, mac_match);
11577 +static int __cold mac_probe(struct platform_device *_of_dev)
11580 + struct device *dev;
11581 + struct device_node *mac_node, *dev_node;
11582 + struct mac_device *mac_dev;
11583 + struct platform_device *of_dev;
11584 + struct resource res;
11585 + const uint8_t *mac_addr;
11586 + const char *char_prop;
11589 + const struct of_device_id *match;
11591 + dev = &_of_dev->dev;
11592 + mac_node = dev->of_node;
11594 + match = of_match_device(mac_match, dev);
11598 + for (i = 0; i < ARRAY_SIZE(mac_match) - 1 && match != mac_match + i;
11601 + BUG_ON(i >= ARRAY_SIZE(mac_match) - 1);
11603 + mac_dev = alloc_macdev(dev, mac_sizeof_priv[i], mac_setup[i]);
11604 + if (IS_ERR(mac_dev)) {
11605 + _errno = PTR_ERR(mac_dev);
11606 + dev_err(dev, "alloc_macdev() = %d\n", _errno);
11610 + INIT_LIST_HEAD(&mac_dev->mc_addr_list);
11612 + /* Get the FM node */
11613 + dev_node = of_get_parent(mac_node);
11614 + if (unlikely(dev_node == NULL)) {
11615 + dev_err(dev, "of_get_parent(%s) failed\n",
11616 + mac_node->full_name);
11617 + _errno = -EINVAL;
11618 + goto _return_dev_set_drvdata;
11621 + of_dev = of_find_device_by_node(dev_node);
11622 + if (unlikely(of_dev == NULL)) {
11623 + dev_err(dev, "of_find_device_by_node(%s) failed\n",
11624 + dev_node->full_name);
11625 + _errno = -EINVAL;
11626 + goto _return_of_node_put;
11629 + mac_dev->fm_dev = fm_bind(&of_dev->dev);
11630 + if (unlikely(mac_dev->fm_dev == NULL)) {
11631 + dev_err(dev, "fm_bind(%s) failed\n", dev_node->full_name);
11632 + _errno = -ENODEV;
11633 + goto _return_of_node_put;
11636 + mac_dev->fm = (void *)fm_get_handle(mac_dev->fm_dev);
11637 + of_node_put(dev_node);
11639 + /* Get the address of the memory mapped registers */
11640 + _errno = of_address_to_resource(mac_node, 0, &res);
11641 + if (unlikely(_errno < 0)) {
11642 + dev_err(dev, "of_address_to_resource(%s) = %d\n",
11643 + mac_node->full_name, _errno);
11644 + goto _return_dev_set_drvdata;
11647 + mac_dev->res = __devm_request_region(
11649 + fm_get_mem_region(mac_dev->fm_dev),
11650 + res.start, res.end + 1 - res.start, "mac");
11651 + if (unlikely(mac_dev->res == NULL)) {
11652 + dev_err(dev, "__devm_request_mem_region(mac) failed\n");
11654 + goto _return_dev_set_drvdata;
11657 + mac_dev->vaddr = devm_ioremap(dev, mac_dev->res->start,
11658 + mac_dev->res->end + 1
11659 + - mac_dev->res->start);
11660 + if (unlikely(mac_dev->vaddr == NULL)) {
11661 + dev_err(dev, "devm_ioremap() failed\n");
11663 + goto _return_dev_set_drvdata;
11666 +#define TBIPA_OFFSET 0x1c
11667 +#define TBIPA_DEFAULT_ADDR 5 /* override if used as external PHY addr. */
11668 + mac_dev->tbi_node = of_parse_phandle(mac_node, "tbi-handle", 0);
11669 + if (mac_dev->tbi_node) {
11670 + u32 tbiaddr = TBIPA_DEFAULT_ADDR;
11671 + const __be32 *tbi_reg;
11672 + void __iomem *addr;
11674 + tbi_reg = of_get_property(mac_dev->tbi_node, "reg", NULL);
11676 + tbiaddr = be32_to_cpup(tbi_reg);
11677 + addr = mac_dev->vaddr + TBIPA_OFFSET;
11678 + /* TODO: out_be32 does not exist on ARM */
11679 + out_be32(addr, tbiaddr);
11682 + if (!of_device_is_available(mac_node)) {
11683 + devm_iounmap(dev, mac_dev->vaddr);
11684 + __devm_release_region(dev, fm_get_mem_region(mac_dev->fm_dev),
11685 + res.start, res.end + 1 - res.start);
11686 + fm_unbind(mac_dev->fm_dev);
11687 + devm_kfree(dev, mac_dev);
11688 + dev_set_drvdata(dev, NULL);
11692 + /* Get the cell-index */
11693 + _errno = of_property_read_u32(mac_node, "cell-index", &cell_index);
11694 + if (unlikely(_errno)) {
11695 + dev_err(dev, "Cannot read cell-index of mac node %s from device tree\n",
11696 + mac_node->full_name);
11697 + goto _return_dev_set_drvdata;
11699 + mac_dev->cell_index = (uint8_t)cell_index;
11700 + if (mac_dev->cell_index >= 8)
11701 + mac_dev->cell_index -= 8;
11703 + /* Get the MAC address */
11704 + mac_addr = of_get_mac_address(mac_node);
11705 + if (unlikely(mac_addr == NULL)) {
11706 + dev_err(dev, "of_get_mac_address(%s) failed\n",
11707 + mac_node->full_name);
11708 + _errno = -EINVAL;
11709 + goto _return_dev_set_drvdata;
11711 + memcpy(mac_dev->addr, mac_addr, sizeof(mac_dev->addr));
11713 + /* Verify the number of port handles */
11714 + nph = of_count_phandle_with_args(mac_node, "fsl,fman-ports", NULL);
11715 + if (unlikely(nph < 0)) {
11716 + dev_err(dev, "Cannot read port handles of mac node %s from device tree\n",
11717 + mac_node->full_name);
11719 + goto _return_dev_set_drvdata;
11722 + if (nph != ARRAY_SIZE(mac_dev->port_dev)) {
11723 + dev_err(dev, "Not supported number of port handles of mac node %s from device tree\n",
11724 + mac_node->full_name);
11725 + _errno = -EINVAL;
11726 + goto _return_dev_set_drvdata;
11729 + for_each_port_device(i, mac_dev->port_dev) {
11730 + dev_node = of_parse_phandle(mac_node, "fsl,fman-ports", i);
11731 + if (unlikely(dev_node == NULL)) {
11732 + dev_err(dev, "Cannot find port node referenced by mac node %s from device tree\n",
11733 + mac_node->full_name);
11734 + _errno = -EINVAL;
11735 + goto _return_of_node_put;
11738 + of_dev = of_find_device_by_node(dev_node);
11739 + if (unlikely(of_dev == NULL)) {
11740 + dev_err(dev, "of_find_device_by_node(%s) failed\n",
11741 + dev_node->full_name);
11742 + _errno = -EINVAL;
11743 + goto _return_of_node_put;
11746 + mac_dev->port_dev[i] = fm_port_bind(&of_dev->dev);
11747 + if (unlikely(mac_dev->port_dev[i] == NULL)) {
11748 + dev_err(dev, "dev_get_drvdata(%s) failed\n",
11749 + dev_node->full_name);
11750 + _errno = -EINVAL;
11751 + goto _return_of_node_put;
11753 + of_node_put(dev_node);
11756 + /* Get the PHY connection type */
11757 + _errno = of_property_read_string(mac_node, "phy-connection-type",
11759 + if (unlikely(_errno)) {
11761 + "Cannot read PHY connection type of mac node %s from device tree. Defaulting to MII\n",
11762 + mac_node->full_name);
11763 + mac_dev->phy_if = PHY_INTERFACE_MODE_MII;
11765 + mac_dev->phy_if = str2phy(char_prop);
11767 + mac_dev->link = false;
11768 + mac_dev->half_duplex = false;
11769 + mac_dev->speed = phy2speed[mac_dev->phy_if];
11770 + mac_dev->max_speed = mac_dev->speed;
11771 + mac_dev->if_support = DTSEC_SUPPORTED;
11772 + /* We don't support half-duplex in SGMII mode */
11773 + if (strstr(char_prop, "sgmii") || strstr(char_prop, "qsgmii") ||
11774 + strstr(char_prop, "sgmii-2500"))
11775 + mac_dev->if_support &= ~(SUPPORTED_10baseT_Half |
11776 + SUPPORTED_100baseT_Half);
11778 + /* Gigabit support (no half-duplex) */
11779 + if (mac_dev->max_speed == SPEED_1000 ||
11780 + mac_dev->max_speed == SPEED_2500)
11781 + mac_dev->if_support |= SUPPORTED_1000baseT_Full;
11783 + /* The 10G interface only supports one mode */
11784 + if (strstr(char_prop, "xgmii"))
11785 + mac_dev->if_support = SUPPORTED_10000baseT_Full;
11787 + /* Get the rest of the PHY information */
11788 + mac_dev->phy_node = of_parse_phandle(mac_node, "phy-handle", 0);
11789 + if (!mac_dev->phy_node) {
11790 + struct phy_device *phy;
11792 + if (!of_phy_is_fixed_link(mac_node)) {
11793 + dev_err(dev, "Wrong PHY information of mac node %s\n",
11794 + mac_node->full_name);
11795 + goto _return_dev_set_drvdata;
11798 + _errno = of_phy_register_fixed_link(mac_node);
11800 + goto _return_dev_set_drvdata;
11802 + mac_dev->fixed_link = devm_kzalloc(mac_dev->dev,
11803 + sizeof(*mac_dev->fixed_link),
11805 + if (!mac_dev->fixed_link)
11806 + goto _return_dev_set_drvdata;
11808 + mac_dev->phy_node = of_node_get(mac_node);
11809 + phy = of_phy_find_device(mac_dev->phy_node);
11811 + goto _return_dev_set_drvdata;
11813 + mac_dev->fixed_link->link = phy->link;
11814 + mac_dev->fixed_link->speed = phy->speed;
11815 + mac_dev->fixed_link->duplex = phy->duplex;
11816 + mac_dev->fixed_link->pause = phy->pause;
11817 + mac_dev->fixed_link->asym_pause = phy->asym_pause;
11820 + _errno = mac_dev->init(mac_dev);
11821 + if (unlikely(_errno < 0)) {
11822 + dev_err(dev, "mac_dev->init() = %d\n", _errno);
11823 + goto _return_dev_set_drvdata;
11826 + /* pause frame autonegotiation enabled*/
11827 + mac_dev->autoneg_pause = true;
11829 + /* by intializing the values to false, force FMD to enable PAUSE frames
11832 + mac_dev->rx_pause_req = mac_dev->tx_pause_req = true;
11833 + mac_dev->rx_pause_active = mac_dev->tx_pause_active = false;
11834 + _errno = set_mac_active_pause(mac_dev, true, true);
11835 + if (unlikely(_errno < 0))
11836 + dev_err(dev, "set_mac_active_pause() = %d\n", _errno);
11839 + "FMan MAC address: %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n",
11840 + mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2],
11841 + mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]);
11845 +_return_of_node_put:
11846 + of_node_put(dev_node);
11847 +_return_dev_set_drvdata:
11848 + dev_set_drvdata(dev, NULL);
11853 +static int __cold mac_remove(struct platform_device *of_dev)
11856 + struct device *dev;
11857 + struct mac_device *mac_dev;
11859 + dev = &of_dev->dev;
11860 + mac_dev = (struct mac_device *)dev_get_drvdata(dev);
11862 + for_each_port_device(i, mac_dev->port_dev)
11863 + fm_port_unbind(mac_dev->port_dev[i]);
11865 + fm_unbind(mac_dev->fm_dev);
11867 + _errno = free_macdev(mac_dev);
11872 +static struct platform_driver mac_driver = {
11874 + .name = KBUILD_MODNAME,
11875 + .of_match_table = mac_match,
11876 + .owner = THIS_MODULE,
11878 + .probe = mac_probe,
11879 + .remove = mac_remove
11882 +static int __init __cold mac_load(void)
11886 + pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
11887 + KBUILD_BASENAME".c", __func__);
11889 + pr_info(KBUILD_MODNAME ": %s\n", mac_driver_description);
11891 + _errno = platform_driver_register(&mac_driver);
11892 + if (unlikely(_errno < 0)) {
11893 + pr_err(KBUILD_MODNAME ": %s:%hu:%s(): platform_driver_register() = %d\n",
11894 + KBUILD_BASENAME".c", __LINE__, __func__, _errno);
11901 + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
11902 + KBUILD_BASENAME".c", __func__);
11906 +module_init(mac_load);
11908 +static void __exit __cold mac_unload(void)
11910 + pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
11911 + KBUILD_BASENAME".c", __func__);
11913 + platform_driver_unregister(&mac_driver);
11915 + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
11916 + KBUILD_BASENAME".c", __func__);
11918 +module_exit(mac_unload);
11920 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac.h
11922 +/* Copyright 2008-2011 Freescale Semiconductor, Inc.
11924 + * Redistribution and use in source and binary forms, with or without
11925 + * modification, are permitted provided that the following conditions are met:
11926 + * * Redistributions of source code must retain the above copyright
11927 + * notice, this list of conditions and the following disclaimer.
11928 + * * Redistributions in binary form must reproduce the above copyright
11929 + * notice, this list of conditions and the following disclaimer in the
11930 + * documentation and/or other materials provided with the distribution.
11931 + * * Neither the name of Freescale Semiconductor nor the
11932 + * names of its contributors may be used to endorse or promote products
11933 + * derived from this software without specific prior written permission.
11936 + * ALTERNATIVELY, this software may be distributed under the terms of the
11937 + * GNU General Public License ("GPL") as published by the Free Software
11938 + * Foundation, either version 2 of that License or (at your option) any
11941 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
11942 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
11943 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
11944 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
11945 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
11946 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
11947 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
11948 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
11949 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11950 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11956 +#include <linux/device.h> /* struct device, BUS_ID_SIZE */
11957 +#include <linux/if_ether.h> /* ETH_ALEN */
11958 +#include <linux/phy.h> /* phy_interface_t, struct phy_device */
11959 +#include <linux/list.h>
11961 +#include "lnxwrp_fsl_fman.h" /* struct port_device */
11963 +enum {DTSEC, XGMAC, MEMAC};
11965 +struct mac_device {
11966 + struct device *dev;
11968 + uint8_t cell_index;
11969 + struct resource *res;
11970 + void __iomem *vaddr;
11971 + uint8_t addr[ETH_ALEN];
11974 + struct fm *fm_dev;
11975 + struct fm_port *port_dev[2];
11977 + phy_interface_t phy_if;
11980 + bool half_duplex;
11982 + uint16_t max_speed;
11983 + struct device_node *phy_node;
11984 + char fixed_bus_id[MII_BUS_ID_SIZE + 3];
11985 + struct device_node *tbi_node;
11986 + struct phy_device *phy_dev;
11988 + /* List of multicast addresses */
11989 + struct list_head mc_addr_list;
11990 + struct fixed_phy_status *fixed_link;
11992 + bool autoneg_pause;
11993 + bool rx_pause_req;
11994 + bool tx_pause_req;
11995 + bool rx_pause_active;
11996 + bool tx_pause_active;
11998 + struct fm_mac_dev *(*get_mac_handle)(struct mac_device *mac_dev);
11999 + int (*init_phy)(struct net_device *net_dev, struct mac_device *mac_dev);
12000 + int (*init)(struct mac_device *mac_dev);
12001 + int (*start)(struct mac_device *mac_dev);
12002 + int (*stop)(struct mac_device *mac_dev);
12003 + int (*set_promisc)(struct fm_mac_dev *fm_mac_dev, bool enable);
12004 + int (*change_addr)(struct fm_mac_dev *fm_mac_dev, uint8_t *addr);
12005 + int (*set_multi)(struct net_device *net_dev,
12006 + struct mac_device *mac_dev);
12007 + int (*uninit)(struct fm_mac_dev *fm_mac_dev);
12008 + int (*ptp_enable)(struct fm_mac_dev *fm_mac_dev);
12009 + int (*ptp_disable)(struct fm_mac_dev *fm_mac_dev);
12010 + int (*set_rx_pause)(struct fm_mac_dev *fm_mac_dev, bool en);
12011 + int (*set_tx_pause)(struct fm_mac_dev *fm_mac_dev, bool en);
12012 + int (*fm_rtc_enable)(struct fm *fm_dev);
12013 + int (*fm_rtc_disable)(struct fm *fm_dev);
12014 + int (*fm_rtc_get_cnt)(struct fm *fm_dev, uint64_t *ts);
12015 + int (*fm_rtc_set_cnt)(struct fm *fm_dev, uint64_t ts);
12016 + int (*fm_rtc_get_drift)(struct fm *fm_dev, uint32_t *drift);
12017 + int (*fm_rtc_set_drift)(struct fm *fm_dev, uint32_t drift);
12018 + int (*fm_rtc_set_alarm)(struct fm *fm_dev, uint32_t id, uint64_t time);
12019 + int (*fm_rtc_set_fiper)(struct fm *fm_dev, uint32_t id,
12021 +#ifdef CONFIG_PTP_1588_CLOCK_DPAA
12022 + int (*fm_rtc_enable_interrupt)(struct fm *fm_dev, uint32_t events);
12023 + int (*fm_rtc_disable_interrupt)(struct fm *fm_dev, uint32_t events);
12025 + int (*set_wol)(struct fm_port *port, struct fm_mac_dev *fm_mac_dev,
12027 + int (*dump_mac_regs)(struct mac_device *h_mac, char *buf, int nn);
12028 + int (*dump_mac_rx_stats)(struct mac_device *h_mac, char *buf, int nn);
12029 + int (*dump_mac_tx_stats)(struct mac_device *h_mac, char *buf, int nn);
12032 +struct mac_address {
12033 + uint8_t addr[ETH_ALEN];
12034 + struct list_head list;
12037 +#define get_fm_handle(net_dev) \
12038 + (((struct dpa_priv_s *)netdev_priv(net_dev))->mac_dev->fm_dev)
12040 +#define for_each_port_device(i, port_dev) \
12041 + for (i = 0; i < ARRAY_SIZE(port_dev); i++)
12043 +static inline __attribute((nonnull)) void *macdev_priv(
12044 + const struct mac_device *mac_dev)
12046 + return (void *)mac_dev + sizeof(*mac_dev);
12049 +extern const char *mac_driver_description;
12050 +extern const size_t mac_sizeof_priv[];
12051 +extern void (*const mac_setup[])(struct mac_device *mac_dev);
12053 +int set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx);
12054 +void get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause, bool *tx_pause);
12056 +#endif /* __MAC_H */
12058 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/offline_port.c
12060 +/* Copyright 2011-2012 Freescale Semiconductor Inc.
12062 + * Redistribution and use in source and binary forms, with or without
12063 + * modification, are permitted provided that the following conditions are met:
12064 + * * Redistributions of source code must retain the above copyright
12065 + * notice, this list of conditions and the following disclaimer.
12066 + * * Redistributions in binary form must reproduce the above copyright
12067 + * notice, this list of conditions and the following disclaimer in the
12068 + * documentation and/or other materials provided with the distribution.
12069 + * * Neither the name of Freescale Semiconductor nor the
12070 + * names of its contributors may be used to endorse or promote products
12071 + * derived from this software without specific prior written permission.
12074 + * ALTERNATIVELY, this software may be distributed under the terms of the
12075 + * GNU General Public License ("GPL") as published by the Free Software
12076 + * Foundation, either version 2 of that License or (at your option) any
12079 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
12080 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
12081 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
12082 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
12083 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
12084 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
12085 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
12086 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
12087 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12088 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12091 +/* Offline Parsing / Host Command port driver for FSL QorIQ FMan.
12092 + * Validates device-tree configuration and sets up the offline ports.
12095 +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
12096 +#define pr_fmt(fmt) \
12097 + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
12098 + KBUILD_BASENAME".c", __LINE__, __func__
12100 +#define pr_fmt(fmt) \
12101 + KBUILD_MODNAME ": " fmt
12105 +#include <linux/init.h>
12106 +#include <linux/module.h>
12107 +#include <linux/of_platform.h>
12108 +#include <linux/fsl_qman.h>
12110 +#include "offline_port.h"
12111 +#include "dpaa_eth.h"
12112 +#include "dpaa_eth_common.h"
12114 +#define OH_MOD_DESCRIPTION "FSL FMan Offline Parsing port driver"
12115 +/* Manip extra space and data alignment for fragmentation */
12116 +#define FRAG_MANIP_SPACE 128
12117 +#define FRAG_DATA_ALIGN 64
12120 +MODULE_LICENSE("Dual BSD/GPL");
12121 +MODULE_AUTHOR("Bogdan Hamciuc <bogdan.hamciuc@freescale.com>");
12122 +MODULE_DESCRIPTION(OH_MOD_DESCRIPTION);
12125 +static const struct of_device_id oh_port_match_table[] = {
12127 + .compatible = "fsl,dpa-oh"
12130 + .compatible = "fsl,dpa-oh-shared"
12134 +MODULE_DEVICE_TABLE(of, oh_port_match_table);
12138 +static int oh_suspend(struct device *dev)
12140 + struct dpa_oh_config_s *oh_config;
12142 + oh_config = dev_get_drvdata(dev);
12143 + return fm_port_suspend(oh_config->oh_port);
12146 +static int oh_resume(struct device *dev)
12148 + struct dpa_oh_config_s *oh_config;
12150 + oh_config = dev_get_drvdata(dev);
12151 + return fm_port_resume(oh_config->oh_port);
12154 +static const struct dev_pm_ops oh_pm_ops = {
12155 + .suspend = oh_suspend,
12156 + .resume = oh_resume,
12159 +#define OH_PM_OPS (&oh_pm_ops)
12161 +#else /* CONFIG_PM */
12163 +#define OH_PM_OPS NULL
12165 +#endif /* CONFIG_PM */
12167 +/* Creates Frame Queues */
12168 +static uint32_t oh_fq_create(struct qman_fq *fq,
12169 + uint32_t fq_id, uint16_t channel,
12172 + struct qm_mcc_initfq fq_opts;
12173 + uint32_t create_flags, init_flags;
12174 + uint32_t ret = 0;
12179 + /* Set flags for FQ create */
12180 + create_flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_TO_DCPORTAL;
12182 + /* Create frame queue */
12183 + ret = qman_create_fq(fq_id, create_flags, fq);
12187 + /* Set flags for FQ init */
12188 + init_flags = QMAN_INITFQ_FLAG_SCHED;
12190 + /* Set FQ init options. Specify destination WQ ID and channel */
12191 + fq_opts.we_mask = QM_INITFQ_WE_DESTWQ;
12192 + fq_opts.fqd.dest.wq = wq_id;
12193 + fq_opts.fqd.dest.channel = channel;
12195 + /* Initialize frame queue */
12196 + ret = qman_init_fq(fq, init_flags, &fq_opts);
12198 + qman_destroy_fq(fq, 0);
12205 +static void dump_fq(struct device *dev, int fqid, uint16_t channel)
12208 + /* display fqs with a valid (!= 0) destination channel */
12209 + dev_info(dev, "FQ ID:%d Channel ID:%d\n", fqid, channel);
12213 +static void dump_fq_duple(struct device *dev, struct qman_fq *fqs,
12214 + int fqs_count, uint16_t channel_id)
12217 + for (i = 0; i < fqs_count; i++)
12218 + dump_fq(dev, (fqs + i)->fqid, channel_id);
12221 +static void dump_oh_config(struct device *dev, struct dpa_oh_config_s *conf)
12223 + struct list_head *fq_list;
12224 + struct fq_duple *fqd;
12227 + dev_info(dev, "Default egress frame queue: %d\n", conf->default_fqid);
12228 + dev_info(dev, "Default error frame queue: %d\n", conf->error_fqid);
12230 + /* TX queues (old initialization) */
12231 + dev_info(dev, "Initialized queues:");
12232 + for (i = 0; i < conf->egress_cnt; i++)
12233 + dump_fq_duple(dev, conf->egress_fqs, conf->egress_cnt,
12236 + /* initialized ingress queues */
12237 + list_for_each(fq_list, &conf->fqs_ingress_list) {
12238 + fqd = list_entry(fq_list, struct fq_duple, fq_list);
12239 + dump_fq_duple(dev, fqd->fqs, fqd->fqs_count, fqd->channel_id);
12242 + /* initialized egress queues */
12243 + list_for_each(fq_list, &conf->fqs_egress_list) {
12244 + fqd = list_entry(fq_list, struct fq_duple, fq_list);
12245 + dump_fq_duple(dev, fqd->fqs, fqd->fqs_count, fqd->channel_id);
12249 +/* Destroys Frame Queues */
12250 +static void oh_fq_destroy(struct qman_fq *fq)
12254 + _errno = qman_retire_fq(fq, NULL);
12255 + if (unlikely(_errno < 0))
12256 + pr_err(KBUILD_MODNAME": %s:%hu:%s(): qman_retire_fq(%u)=%d\n",
12257 + KBUILD_BASENAME".c", __LINE__, __func__,
12258 + qman_fq_fqid(fq), _errno);
12260 + _errno = qman_oos_fq(fq);
12261 + if (unlikely(_errno < 0)) {
12262 + pr_err(KBUILD_MODNAME": %s:%hu:%s(): qman_oos_fq(%u)=%d\n",
12263 + KBUILD_BASENAME".c", __LINE__, __func__,
12264 + qman_fq_fqid(fq), _errno);
12267 + qman_destroy_fq(fq, 0);
12270 +/* Allocation code for the OH port's PCD frame queues */
12271 +static int __cold oh_alloc_pcd_fqids(struct device *dev,
12273 + uint8_t alignment,
12274 + uint32_t *base_fqid)
12276 + dev_crit(dev, "callback not implemented!\n");
12282 +static int __cold oh_free_pcd_fqids(struct device *dev, uint32_t base_fqid)
12284 + dev_crit(dev, "callback not implemented!\n");
12290 +static void oh_set_buffer_layout(struct fm_port *port,
12291 + struct dpa_buffer_layout_s *layout)
12293 + struct fm_port_params params;
12295 + layout->priv_data_size = DPA_TX_PRIV_DATA_SIZE;
12296 + layout->parse_results = true;
12297 + layout->hash_results = true;
12298 + layout->time_stamp = false;
12300 + fm_port_get_buff_layout_ext_params(port, ¶ms);
12301 + layout->manip_extra_space = params.manip_extra_space;
12302 + layout->data_align = params.data_align;
12306 +oh_port_probe(struct platform_device *_of_dev)
12308 + struct device *dpa_oh_dev;
12309 + struct device_node *dpa_oh_node;
12310 + int lenp, _errno = 0, fq_idx, duple_idx;
12311 + int n_size, i, j, ret, duples_count;
12312 + struct platform_device *oh_of_dev;
12313 + struct device_node *oh_node, *bpool_node = NULL, *root_node;
12314 + struct device *oh_dev;
12315 + struct dpa_oh_config_s *oh_config = NULL;
12316 + const __be32 *oh_all_queues;
12317 + const __be32 *channel_ids;
12318 + const __be32 *oh_tx_queues;
12319 + uint32_t queues_count;
12320 + uint32_t crt_fqid_base;
12321 + uint32_t crt_fq_count;
12322 + bool frag_enabled = false;
12323 + struct fm_port_params oh_port_tx_params;
12324 + struct fm_port_pcd_param oh_port_pcd_params;
12325 + struct dpa_buffer_layout_s buf_layout;
12327 + /* True if the current partition owns the OH port. */
12328 + bool init_oh_port;
12330 + const struct of_device_id *match;
12331 + int crt_ext_pools_count;
12332 + u32 ext_pool_size;
12336 + int channel_ids_count;
12338 + struct fq_duple *fqd;
12339 + struct list_head *fq_list, *fq_list_tmp;
12341 + const __be32 *bpool_cfg;
12344 + memset(&oh_port_tx_params, 0, sizeof(oh_port_tx_params));
12345 + dpa_oh_dev = &_of_dev->dev;
12346 + dpa_oh_node = dpa_oh_dev->of_node;
12347 + BUG_ON(dpa_oh_node == NULL);
12349 + match = of_match_device(oh_port_match_table, dpa_oh_dev);
12353 + dev_dbg(dpa_oh_dev, "Probing OH port...\n");
12355 + /* Find the referenced OH node */
12356 + oh_node = of_parse_phandle(dpa_oh_node, "fsl,fman-oh-port", 0);
12357 + if (oh_node == NULL) {
12358 + dev_err(dpa_oh_dev,
12359 + "Can't find OH node referenced from node %s\n",
12360 + dpa_oh_node->full_name);
12363 + dev_info(dpa_oh_dev, "Found OH node handle compatible with %s\n",
12364 + match->compatible);
12366 + _errno = of_property_read_u32(oh_node, "cell-index", &port_id);
12368 + dev_err(dpa_oh_dev, "No port id found in node %s\n",
12369 + dpa_oh_node->full_name);
12370 + goto return_kfree;
12373 + _errno = of_property_read_u32(oh_node, "fsl,qman-channel-id",
12376 + dev_err(dpa_oh_dev, "No channel id found in node %s\n",
12377 + dpa_oh_node->full_name);
12378 + goto return_kfree;
12381 + oh_of_dev = of_find_device_by_node(oh_node);
12382 + BUG_ON(oh_of_dev == NULL);
12383 + oh_dev = &oh_of_dev->dev;
12385 + /* The OH port must be initialized exactly once.
12386 + * The following scenarios are of interest:
12387 + * - the node is Linux-private (will always initialize it);
12388 + * - the node is shared between two Linux partitions
12389 + * (only one of them will initialize it);
12390 + * - the node is shared between a Linux and a LWE partition
12391 + * (Linux will initialize it) - "fsl,dpa-oh-shared"
12394 + /* Check if the current partition owns the OH port
12395 + * and ought to initialize it. It may be the case that we leave this
12396 + * to another (also Linux) partition.
12398 + init_oh_port = strcmp(match->compatible, "fsl,dpa-oh-shared");
12400 + /* If we aren't the "owner" of the OH node, we're done here. */
12401 + if (!init_oh_port) {
12402 + dev_dbg(dpa_oh_dev,
12403 + "Not owning the shared OH port %s, will not initialize it.\n",
12404 + oh_node->full_name);
12405 + of_node_put(oh_node);
12409 + /* Allocate OH dev private data */
12410 + oh_config = devm_kzalloc(dpa_oh_dev, sizeof(*oh_config), GFP_KERNEL);
12411 + if (oh_config == NULL) {
12412 + dev_err(dpa_oh_dev,
12413 + "Can't allocate private data for OH node %s referenced from node %s!\n",
12414 + oh_node->full_name, dpa_oh_node->full_name);
12415 + _errno = -ENOMEM;
12416 + goto return_kfree;
12419 + INIT_LIST_HEAD(&oh_config->fqs_ingress_list);
12420 + INIT_LIST_HEAD(&oh_config->fqs_egress_list);
12422 + /* FQs that enter OH port */
12424 + oh_all_queues = of_get_property(dpa_oh_node,
12425 + "fsl,qman-frame-queues-ingress", &lenp);
12426 + if (lenp % (2 * sizeof(*oh_all_queues))) {
12427 + dev_warn(dpa_oh_dev,
12428 + "Wrong ingress queues format for OH node %s referenced from node %s!\n",
12429 + oh_node->full_name, dpa_oh_node->full_name);
12430 + /* just ignore the last unpaired value */
12433 + duples_count = lenp / (2 * sizeof(*oh_all_queues));
12434 + dev_err(dpa_oh_dev, "Allocating %d ingress frame queues duples\n",
12436 + for (duple_idx = 0; duple_idx < duples_count; duple_idx++) {
12437 + crt_fqid_base = be32_to_cpu(oh_all_queues[2 * duple_idx]);
12438 + crt_fq_count = be32_to_cpu(oh_all_queues[2 * duple_idx + 1]);
12440 + fqd = devm_kzalloc(dpa_oh_dev,
12441 + sizeof(struct fq_duple), GFP_KERNEL);
12443 + dev_err(dpa_oh_dev, "Can't allocate structures for ingress frame queues for OH node %s referenced from node %s!\n",
12444 + oh_node->full_name,
12445 + dpa_oh_node->full_name);
12446 + _errno = -ENOMEM;
12447 + goto return_kfree;
12450 + fqd->fqs = devm_kzalloc(dpa_oh_dev,
12451 + crt_fq_count * sizeof(struct qman_fq),
12454 + dev_err(dpa_oh_dev, "Can't allocate structures for ingress frame queues for OH node %s referenced from node %s!\n",
12455 + oh_node->full_name,
12456 + dpa_oh_node->full_name);
12457 + _errno = -ENOMEM;
12458 + goto return_kfree;
12461 + for (j = 0; j < crt_fq_count; j++)
12462 + (fqd->fqs + j)->fqid = crt_fqid_base + j;
12463 + fqd->fqs_count = crt_fq_count;
12464 + fqd->channel_id = (uint16_t)channel_id;
12465 + list_add(&fqd->fq_list, &oh_config->fqs_ingress_list);
12468 + /* create the ingress queues */
12469 + list_for_each(fq_list, &oh_config->fqs_ingress_list) {
12470 + fqd = list_entry(fq_list, struct fq_duple, fq_list);
12472 + for (j = 0; j < fqd->fqs_count; j++) {
12473 + ret = oh_fq_create(fqd->fqs + j,
12474 + (fqd->fqs + j)->fqid,
12475 + fqd->channel_id, 3);
12477 + dev_err(dpa_oh_dev, "Unable to create ingress frame queue %d for OH node %s referenced from node %s!\n",
12478 + (fqd->fqs + j)->fqid,
12479 + oh_node->full_name,
12480 + dpa_oh_node->full_name);
12481 + _errno = -EINVAL;
12482 + goto return_kfree;
12487 + /* FQs that exit OH port */
12489 + oh_all_queues = of_get_property(dpa_oh_node,
12490 + "fsl,qman-frame-queues-egress", &lenp);
12491 + if (lenp % (2 * sizeof(*oh_all_queues))) {
12492 + dev_warn(dpa_oh_dev,
12493 + "Wrong egress queues format for OH node %s referenced from node %s!\n",
12494 + oh_node->full_name, dpa_oh_node->full_name);
12495 + /* just ignore the last unpaired value */
12498 + duples_count = lenp / (2 * sizeof(*oh_all_queues));
12499 + dev_dbg(dpa_oh_dev, "Allocating %d egress frame queues duples\n",
12501 + for (duple_idx = 0; duple_idx < duples_count; duple_idx++) {
12502 + crt_fqid_base = be32_to_cpu(oh_all_queues[2 * duple_idx]);
12503 + crt_fq_count = be32_to_cpu(oh_all_queues[2 * duple_idx + 1]);
12505 + fqd = devm_kzalloc(dpa_oh_dev,
12506 + sizeof(struct fq_duple), GFP_KERNEL);
12508 + dev_err(dpa_oh_dev, "Can't allocate structures for egress frame queues for OH node %s referenced from node %s!\n",
12509 + oh_node->full_name,
12510 + dpa_oh_node->full_name);
12511 + _errno = -ENOMEM;
12512 + goto return_kfree;
12515 + fqd->fqs = devm_kzalloc(dpa_oh_dev,
12516 + crt_fq_count * sizeof(struct qman_fq),
12519 + dev_err(dpa_oh_dev,
12520 + "Can't allocate structures for egress frame queues for OH node %s referenced from node %s!\n",
12521 + oh_node->full_name,
12522 + dpa_oh_node->full_name);
12523 + _errno = -ENOMEM;
12524 + goto return_kfree;
12527 + for (j = 0; j < crt_fq_count; j++)
12528 + (fqd->fqs + j)->fqid = crt_fqid_base + j;
12529 + fqd->fqs_count = crt_fq_count;
12530 + /* channel ID is specified in another attribute */
12531 + fqd->channel_id = 0;
12532 + list_add_tail(&fqd->fq_list, &oh_config->fqs_egress_list);
12534 + /* allocate the queue */
12538 + /* channel_ids for FQs that exit OH port */
12540 + channel_ids = of_get_property(dpa_oh_node,
12541 + "fsl,qman-channel-ids-egress", &lenp);
12543 + channel_ids_count = lenp / (sizeof(*channel_ids));
12544 + if (channel_ids_count != duples_count) {
12545 + dev_warn(dpa_oh_dev,
12546 + "Not all egress queues have a channel id for OH node %s referenced from node %s!\n",
12547 + oh_node->full_name, dpa_oh_node->full_name);
12548 + /* just ignore the queues that do not have a Channel ID */
12552 + list_for_each(fq_list, &oh_config->fqs_egress_list) {
12553 + if (channel_idx + 1 > channel_ids_count)
12555 + fqd = list_entry(fq_list, struct fq_duple, fq_list);
12556 + fqd->channel_id =
12557 + (uint16_t)be32_to_cpu(channel_ids[channel_idx++]);
12560 + /* create egress queues */
12561 + list_for_each(fq_list, &oh_config->fqs_egress_list) {
12562 + fqd = list_entry(fq_list, struct fq_duple, fq_list);
12564 + if (fqd->channel_id == 0) {
12565 + /* missing channel id in dts */
12569 + for (j = 0; j < fqd->fqs_count; j++) {
12570 + ret = oh_fq_create(fqd->fqs + j,
12571 + (fqd->fqs + j)->fqid,
12572 + fqd->channel_id, 3);
12574 + dev_err(dpa_oh_dev, "Unable to create egress frame queue %d for OH node %s referenced from node %s!\n",
12575 + (fqd->fqs + j)->fqid,
12576 + oh_node->full_name,
12577 + dpa_oh_node->full_name);
12578 + _errno = -EINVAL;
12579 + goto return_kfree;
12584 + /* Read FQ ids/nums for the DPA OH node */
12585 + oh_all_queues = of_get_property(dpa_oh_node,
12586 + "fsl,qman-frame-queues-oh", &lenp);
12587 + if (oh_all_queues == NULL) {
12588 + dev_err(dpa_oh_dev,
12589 + "No frame queues have been defined for OH node %s referenced from node %s\n",
12590 + oh_node->full_name, dpa_oh_node->full_name);
12591 + _errno = -EINVAL;
12592 + goto return_kfree;
12595 + /* Check that the OH error and default FQs are there */
12596 + BUG_ON(lenp % (2 * sizeof(*oh_all_queues)));
12597 + queues_count = lenp / (2 * sizeof(*oh_all_queues));
12598 + if (queues_count != 2) {
12599 + dev_err(dpa_oh_dev,
12600 + "Error and Default queues must be defined for OH node %s referenced from node %s\n",
12601 + oh_node->full_name, dpa_oh_node->full_name);
12602 + _errno = -EINVAL;
12603 + goto return_kfree;
12606 + /* Read the FQIDs defined for this OH port */
12607 + dev_dbg(dpa_oh_dev, "Reading %d queues...\n", queues_count);
12610 + /* Error FQID - must be present */
12611 + crt_fqid_base = be32_to_cpu(oh_all_queues[fq_idx++]);
12612 + crt_fq_count = be32_to_cpu(oh_all_queues[fq_idx++]);
12613 + if (crt_fq_count != 1) {
12614 + dev_err(dpa_oh_dev,
12615 + "Only 1 Error FQ allowed in OH node %s referenced from node %s (read: %d FQIDs).\n",
12616 + oh_node->full_name, dpa_oh_node->full_name,
12618 + _errno = -EINVAL;
12619 + goto return_kfree;
12621 + oh_config->error_fqid = crt_fqid_base;
12622 + dev_dbg(dpa_oh_dev, "Read Error FQID 0x%x for OH port %s.\n",
12623 + oh_config->error_fqid, oh_node->full_name);
12625 + /* Default FQID - must be present */
12626 + crt_fqid_base = be32_to_cpu(oh_all_queues[fq_idx++]);
12627 + crt_fq_count = be32_to_cpu(oh_all_queues[fq_idx++]);
12628 + if (crt_fq_count != 1) {
12629 + dev_err(dpa_oh_dev,
12630 + "Only 1 Default FQ allowed in OH node %s referenced from %s (read: %d FQIDs).\n",
12631 + oh_node->full_name, dpa_oh_node->full_name,
12633 + _errno = -EINVAL;
12634 + goto return_kfree;
12636 + oh_config->default_fqid = crt_fqid_base;
12637 + dev_dbg(dpa_oh_dev, "Read Default FQID 0x%x for OH port %s.\n",
12638 + oh_config->default_fqid, oh_node->full_name);
12640 + /* TX FQID - presence is optional */
12641 + oh_tx_queues = of_get_property(dpa_oh_node, "fsl,qman-frame-queues-tx",
12643 + if (oh_tx_queues == NULL) {
12644 + dev_dbg(dpa_oh_dev,
12645 + "No tx queues have been defined for OH node %s referenced from node %s\n",
12646 + oh_node->full_name, dpa_oh_node->full_name);
12647 + goto config_port;
12650 + /* Check that queues-tx has only a base and a count defined */
12651 + BUG_ON(lenp % (2 * sizeof(*oh_tx_queues)));
12652 + queues_count = lenp / (2 * sizeof(*oh_tx_queues));
12653 + if (queues_count != 1) {
12654 + dev_err(dpa_oh_dev,
12655 + "TX queues must be defined in only one <base count> tuple for OH node %s referenced from node %s\n",
12656 + oh_node->full_name, dpa_oh_node->full_name);
12657 + _errno = -EINVAL;
12658 + goto return_kfree;
12662 + crt_fqid_base = be32_to_cpu(oh_tx_queues[fq_idx++]);
12663 + crt_fq_count = be32_to_cpu(oh_tx_queues[fq_idx++]);
12664 + oh_config->egress_cnt = crt_fq_count;
12666 + /* Allocate TX queues */
12667 + dev_dbg(dpa_oh_dev, "Allocating %d queues for TX...\n", crt_fq_count);
12668 + oh_config->egress_fqs = devm_kzalloc(dpa_oh_dev,
12669 + crt_fq_count * sizeof(struct qman_fq), GFP_KERNEL);
12670 + if (oh_config->egress_fqs == NULL) {
12671 + dev_err(dpa_oh_dev,
12672 + "Can't allocate private data for TX queues for OH node %s referenced from node %s!\n",
12673 + oh_node->full_name, dpa_oh_node->full_name);
12674 + _errno = -ENOMEM;
12675 + goto return_kfree;
12678 + /* Create TX queues */
12679 + for (i = 0; i < crt_fq_count; i++) {
12680 + ret = oh_fq_create(oh_config->egress_fqs + i,
12681 + crt_fqid_base + i, (uint16_t)channel_id, 3);
12683 + dev_err(dpa_oh_dev,
12684 + "Unable to create TX frame queue %d for OH node %s referenced from node %s!\n",
12685 + crt_fqid_base + i, oh_node->full_name,
12686 + dpa_oh_node->full_name);
12687 + _errno = -EINVAL;
12688 + goto return_kfree;
12693 + /* Get a handle to the fm_port so we can set
12694 + * its configuration params
12696 + oh_config->oh_port = fm_port_bind(oh_dev);
12697 + if (oh_config->oh_port == NULL) {
12698 + dev_err(dpa_oh_dev, "NULL drvdata from fm port dev %s!\n",
12699 + oh_node->full_name);
12700 + _errno = -EINVAL;
12701 + goto return_kfree;
12704 + oh_set_buffer_layout(oh_config->oh_port, &buf_layout);
12706 + /* read the pool handlers */
12707 + crt_ext_pools_count = of_count_phandle_with_args(dpa_oh_node,
12708 + "fsl,bman-buffer-pools", NULL);
12709 + if (crt_ext_pools_count <= 0) {
12710 + dev_info(dpa_oh_dev,
12711 + "OH port %s has no buffer pool. Fragmentation will not be enabled\n",
12712 + oh_node->full_name);
12716 + /* used for reading ext_pool_size*/
12717 + root_node = of_find_node_by_path("/");
12718 + if (root_node == NULL) {
12719 + dev_err(dpa_oh_dev, "of_find_node_by_path(/) failed\n");
12720 + _errno = -EINVAL;
12721 + goto return_kfree;
12724 + n_size = of_n_size_cells(root_node);
12725 + of_node_put(root_node);
12727 + dev_dbg(dpa_oh_dev, "OH port number of pools = %d\n",
12728 + crt_ext_pools_count);
12730 + oh_port_tx_params.num_pools = (uint8_t)crt_ext_pools_count;
12732 + for (i = 0; i < crt_ext_pools_count; i++) {
12733 + bpool_node = of_parse_phandle(dpa_oh_node,
12734 + "fsl,bman-buffer-pools", i);
12735 + if (bpool_node == NULL) {
12736 + dev_err(dpa_oh_dev, "Invalid Buffer pool node\n");
12737 + _errno = -EINVAL;
12738 + goto return_kfree;
12741 + _errno = of_property_read_u32(bpool_node, "fsl,bpid", &bpid);
12743 + dev_err(dpa_oh_dev, "Invalid Buffer Pool ID\n");
12744 + _errno = -EINVAL;
12745 + goto return_kfree;
12748 + oh_port_tx_params.pool_param[i].id = (uint8_t)bpid;
12749 + dev_dbg(dpa_oh_dev, "OH port bpool id = %u\n", bpid);
12751 + bpool_cfg = of_get_property(bpool_node,
12752 + "fsl,bpool-ethernet-cfg", &lenp);
12753 + if (bpool_cfg == NULL) {
12754 + dev_err(dpa_oh_dev, "Invalid Buffer pool config params\n");
12755 + _errno = -EINVAL;
12756 + goto return_kfree;
12759 + ext_pool_size = of_read_number(bpool_cfg + n_size, n_size);
12760 + oh_port_tx_params.pool_param[i].size = (uint16_t)ext_pool_size;
12761 + dev_dbg(dpa_oh_dev, "OH port bpool size = %u\n",
12763 + of_node_put(bpool_node);
12767 + if (buf_layout.data_align != FRAG_DATA_ALIGN ||
12768 + buf_layout.manip_extra_space != FRAG_MANIP_SPACE)
12771 + frag_enabled = true;
12772 + dev_info(dpa_oh_dev, "IP Fragmentation enabled for OH port %d",
12776 + of_node_put(oh_node);
12777 + /* Set Tx params */
12778 + dpaa_eth_init_port(tx, oh_config->oh_port, oh_port_tx_params,
12779 + oh_config->error_fqid, oh_config->default_fqid, (&buf_layout),
12781 + /* Set PCD params */
12782 + oh_port_pcd_params.cba = oh_alloc_pcd_fqids;
12783 + oh_port_pcd_params.cbf = oh_free_pcd_fqids;
12784 + oh_port_pcd_params.dev = dpa_oh_dev;
12785 + fm_port_pcd_bind(oh_config->oh_port, &oh_port_pcd_params);
12787 + dev_set_drvdata(dpa_oh_dev, oh_config);
12789 + /* Enable the OH port */
12790 + _errno = fm_port_enable(oh_config->oh_port);
12792 + goto return_kfree;
12794 + dev_info(dpa_oh_dev, "OH port %s enabled.\n", oh_node->full_name);
12796 + /* print of all referenced & created queues */
12797 + dump_oh_config(dpa_oh_dev, oh_config);
12803 + of_node_put(bpool_node);
12805 + of_node_put(oh_node);
12806 + if (oh_config && oh_config->egress_fqs)
12807 + devm_kfree(dpa_oh_dev, oh_config->egress_fqs);
12809 + list_for_each_safe(fq_list, fq_list_tmp, &oh_config->fqs_ingress_list) {
12810 + fqd = list_entry(fq_list, struct fq_duple, fq_list);
12811 + list_del(fq_list);
12812 + devm_kfree(dpa_oh_dev, fqd->fqs);
12813 + devm_kfree(dpa_oh_dev, fqd);
12816 + list_for_each_safe(fq_list, fq_list_tmp, &oh_config->fqs_egress_list) {
12817 + fqd = list_entry(fq_list, struct fq_duple, fq_list);
12818 + list_del(fq_list);
12819 + devm_kfree(dpa_oh_dev, fqd->fqs);
12820 + devm_kfree(dpa_oh_dev, fqd);
12823 + devm_kfree(dpa_oh_dev, oh_config);
12827 +static int __cold oh_port_remove(struct platform_device *_of_dev)
12829 + int _errno = 0, i;
12830 + struct dpa_oh_config_s *oh_config;
12832 + pr_info("Removing OH port...\n");
12834 + oh_config = dev_get_drvdata(&_of_dev->dev);
12835 + if (oh_config == NULL) {
12836 + pr_err(KBUILD_MODNAME
12837 + ": %s:%hu:%s(): No OH config in device private data!\n",
12838 + KBUILD_BASENAME".c", __LINE__, __func__);
12839 + _errno = -ENODEV;
12840 + goto return_error;
12843 + if (oh_config->egress_fqs)
12844 + for (i = 0; i < oh_config->egress_cnt; i++)
12845 + oh_fq_destroy(oh_config->egress_fqs + i);
12847 + if (oh_config->oh_port == NULL) {
12848 + pr_err(KBUILD_MODNAME
12849 + ": %s:%hu:%s(): No fm port in device private data!\n",
12850 + KBUILD_BASENAME".c", __LINE__, __func__);
12851 + _errno = -EINVAL;
12852 + goto free_egress_fqs;
12855 + _errno = fm_port_disable(oh_config->oh_port);
12858 + if (oh_config->egress_fqs)
12859 + devm_kfree(&_of_dev->dev, oh_config->egress_fqs);
12860 + devm_kfree(&_of_dev->dev, oh_config);
12861 + dev_set_drvdata(&_of_dev->dev, NULL);
12867 +static struct platform_driver oh_port_driver = {
12869 + .name = KBUILD_MODNAME,
12870 + .of_match_table = oh_port_match_table,
12871 + .owner = THIS_MODULE,
12874 + .probe = oh_port_probe,
12875 + .remove = oh_port_remove
12878 +static int __init __cold oh_port_load(void)
12882 + pr_info(OH_MOD_DESCRIPTION "\n");
12884 + _errno = platform_driver_register(&oh_port_driver);
12885 + if (_errno < 0) {
12886 + pr_err(KBUILD_MODNAME
12887 + ": %s:%hu:%s(): platform_driver_register() = %d\n",
12888 + KBUILD_BASENAME".c", __LINE__, __func__, _errno);
12891 + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
12892 + KBUILD_BASENAME".c", __func__);
12895 +module_init(oh_port_load);
12897 +static void __exit __cold oh_port_unload(void)
12899 + pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
12900 + KBUILD_BASENAME".c", __func__);
12902 + platform_driver_unregister(&oh_port_driver);
12904 + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
12905 + KBUILD_BASENAME".c", __func__);
12907 +module_exit(oh_port_unload);
12909 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/offline_port.h
12911 +/* Copyright 2011 Freescale Semiconductor Inc.
12913 + * Redistribution and use in source and binary forms, with or without
12914 + * modification, are permitted provided that the following conditions are met:
12915 + * * Redistributions of source code must retain the above copyright
12916 + * notice, this list of conditions and the following disclaimer.
12917 + * * Redistributions in binary form must reproduce the above copyright
12918 + * notice, this list of conditions and the following disclaimer in the
12919 + * documentation and/or other materials provided with the distribution.
12920 + * * Neither the name of Freescale Semiconductor nor the
12921 + * names of its contributors may be used to endorse or promote products
12922 + * derived from this software without specific prior written permission.
12925 + * ALTERNATIVELY, this software may be distributed under the terms of the
12926 + * GNU General Public License ("GPL") as published by the Free Software
12927 + * Foundation, either version 2 of that License or (at your option) any
12930 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
12931 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
12932 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
12933 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
12934 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
12935 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
12936 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
12937 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
12938 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12939 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12942 +#ifndef __OFFLINE_PORT_H
12943 +#define __OFFLINE_PORT_H
12948 +/* fqs are defined in duples (base_fq, fq_count) */
12950 + struct qman_fq *fqs;
12952 + uint16_t channel_id;
12953 + struct list_head fq_list;
12956 +/* OH port configuration */
12957 +struct dpa_oh_config_s {
12958 + uint32_t error_fqid;
12959 + uint32_t default_fqid;
12960 + struct fm_port *oh_port;
12961 + uint32_t egress_cnt;
12962 + struct qman_fq *egress_fqs;
12963 + uint16_t channel;
12965 + struct list_head fqs_ingress_list;
12966 + struct list_head fqs_egress_list;
12969 +#endif /* __OFFLINE_PORT_H */