068f0219d2711b80280ca672555f150123d35f52
[openwrt/openwrt.git] / target / linux / layerscape / patches-4.9 / 706-fsl_ppfe-support-layercape.patch
1 From 8089957ac5ac5f4f8436b1052dda7840f3bff3ea Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Wed, 17 Jan 2018 15:14:12 +0800
4 Subject: [PATCH 12/30] fsl_ppfe: support layercape
5
6 This is an integrated patch for layerscape pfe support.
7
8 Calvin Johnson <calvin.johnson@nxp.com>
9 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
10 ---
11 drivers/staging/fsl_ppfe/Kconfig | 20 +
12 drivers/staging/fsl_ppfe/Makefile | 19 +
13 drivers/staging/fsl_ppfe/TODO | 2 +
14 drivers/staging/fsl_ppfe/include/pfe/cbus.h | 78 +
15 drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h | 55 +
16 .../staging/fsl_ppfe/include/pfe/cbus/class_csr.h | 289 +++
17 .../staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h | 242 ++
18 drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h | 86 +
19 drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h | 100 +
20 .../staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h | 50 +
21 .../staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h | 168 ++
22 .../staging/fsl_ppfe/include/pfe/cbus/util_csr.h | 61 +
23 drivers/staging/fsl_ppfe/include/pfe/pfe.h | 372 +++
24 drivers/staging/fsl_ppfe/pfe_ctrl.c | 238 ++
25 drivers/staging/fsl_ppfe/pfe_ctrl.h | 112 +
26 drivers/staging/fsl_ppfe/pfe_debugfs.c | 111 +
27 drivers/staging/fsl_ppfe/pfe_debugfs.h | 25 +
28 drivers/staging/fsl_ppfe/pfe_eth.c | 2474 ++++++++++++++++++++
29 drivers/staging/fsl_ppfe/pfe_eth.h | 184 ++
30 drivers/staging/fsl_ppfe/pfe_firmware.c | 314 +++
31 drivers/staging/fsl_ppfe/pfe_firmware.h | 32 +
32 drivers/staging/fsl_ppfe/pfe_hal.c | 1516 ++++++++++++
33 drivers/staging/fsl_ppfe/pfe_hif.c | 1072 +++++++++
34 drivers/staging/fsl_ppfe/pfe_hif.h | 211 ++
35 drivers/staging/fsl_ppfe/pfe_hif_lib.c | 637 +++++
36 drivers/staging/fsl_ppfe/pfe_hif_lib.h | 240 ++
37 drivers/staging/fsl_ppfe/pfe_hw.c | 176 ++
38 drivers/staging/fsl_ppfe/pfe_hw.h | 27 +
39 drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c | 394 ++++
40 drivers/staging/fsl_ppfe/pfe_mod.c | 141 ++
41 drivers/staging/fsl_ppfe/pfe_mod.h | 112 +
42 drivers/staging/fsl_ppfe/pfe_perfmon.h | 38 +
43 drivers/staging/fsl_ppfe/pfe_sysfs.c | 818 +++++++
44 drivers/staging/fsl_ppfe/pfe_sysfs.h | 29 +
45 34 files changed, 10443 insertions(+)
46 create mode 100644 drivers/staging/fsl_ppfe/Kconfig
47 create mode 100644 drivers/staging/fsl_ppfe/Makefile
48 create mode 100644 drivers/staging/fsl_ppfe/TODO
49 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus.h
50 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h
51 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/class_csr.h
52 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h
53 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h
54 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h
55 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h
56 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h
57 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/util_csr.h
58 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/pfe.h
59 create mode 100644 drivers/staging/fsl_ppfe/pfe_ctrl.c
60 create mode 100644 drivers/staging/fsl_ppfe/pfe_ctrl.h
61 create mode 100644 drivers/staging/fsl_ppfe/pfe_debugfs.c
62 create mode 100644 drivers/staging/fsl_ppfe/pfe_debugfs.h
63 create mode 100644 drivers/staging/fsl_ppfe/pfe_eth.c
64 create mode 100644 drivers/staging/fsl_ppfe/pfe_eth.h
65 create mode 100644 drivers/staging/fsl_ppfe/pfe_firmware.c
66 create mode 100644 drivers/staging/fsl_ppfe/pfe_firmware.h
67 create mode 100644 drivers/staging/fsl_ppfe/pfe_hal.c
68 create mode 100644 drivers/staging/fsl_ppfe/pfe_hif.c
69 create mode 100644 drivers/staging/fsl_ppfe/pfe_hif.h
70 create mode 100644 drivers/staging/fsl_ppfe/pfe_hif_lib.c
71 create mode 100644 drivers/staging/fsl_ppfe/pfe_hif_lib.h
72 create mode 100644 drivers/staging/fsl_ppfe/pfe_hw.c
73 create mode 100644 drivers/staging/fsl_ppfe/pfe_hw.h
74 create mode 100644 drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c
75 create mode 100644 drivers/staging/fsl_ppfe/pfe_mod.c
76 create mode 100644 drivers/staging/fsl_ppfe/pfe_mod.h
77 create mode 100644 drivers/staging/fsl_ppfe/pfe_perfmon.h
78 create mode 100644 drivers/staging/fsl_ppfe/pfe_sysfs.c
79 create mode 100644 drivers/staging/fsl_ppfe/pfe_sysfs.h
80
81 --- /dev/null
82 +++ b/drivers/staging/fsl_ppfe/Kconfig
83 @@ -0,0 +1,20 @@
84 +#
85 +# Freescale Programmable Packet Forwarding Engine driver
86 +#
87 +config FSL_PPFE
88 + bool "Freescale PPFE Driver"
89 + default n
90 + ---help---
91 + Freescale LS1012A SoC has a Programmable Packet Forwarding Engine.
92 + It provides two high performance ethernet interfaces.
93 + This driver initializes, programs and controls the PPFE.
94 + Use this driver to enable network connectivity on LS1012A platforms.
95 +
96 +if FSL_PPFE
97 +
98 +config FSL_PPFE_UTIL_DISABLED
99 + bool "Disable PPFE UTIL Processor Engine"
100 + ---help---
101 + UTIL PE has to be enabled only if required.
102 +
103 +endif # FSL_PPFE
104 --- /dev/null
105 +++ b/drivers/staging/fsl_ppfe/Makefile
106 @@ -0,0 +1,19 @@
107 +#
108 +# Makefile for Freesecale PPFE driver
109 +#
110 +
111 +ccflags-y += -I$(src)/include -I$(src)
112 +
113 +obj-m += pfe.o
114 +
115 +pfe-y += pfe_mod.o \
116 + pfe_hw.o \
117 + pfe_firmware.o \
118 + pfe_ctrl.o \
119 + pfe_hif.o \
120 + pfe_hif_lib.o\
121 + pfe_eth.o \
122 + pfe_sysfs.o \
123 + pfe_debugfs.o \
124 + pfe_ls1012a_platform.o \
125 + pfe_hal.o
126 --- /dev/null
127 +++ b/drivers/staging/fsl_ppfe/TODO
128 @@ -0,0 +1,2 @@
129 +TODO:
130 + - provide pfe pe monitoring support
131 --- /dev/null
132 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus.h
133 @@ -0,0 +1,78 @@
134 +/*
135 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
136 + * Copyright 2017 NXP
137 + *
138 + * This program is free software; you can redistribute it and/or modify
139 + * it under the terms of the GNU General Public License as published by
140 + * the Free Software Foundation; either version 2 of the License, or
141 + * (at your option) any later version.
142 + *
143 + * This program is distributed in the hope that it will be useful,
144 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
145 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
146 + * GNU General Public License for more details.
147 + *
148 + * You should have received a copy of the GNU General Public License
149 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
150 + */
151 +
152 +#ifndef _CBUS_H_
153 +#define _CBUS_H_
154 +
155 +#define EMAC1_BASE_ADDR (CBUS_BASE_ADDR + 0x200000)
156 +#define EGPI1_BASE_ADDR (CBUS_BASE_ADDR + 0x210000)
157 +#define EMAC2_BASE_ADDR (CBUS_BASE_ADDR + 0x220000)
158 +#define EGPI2_BASE_ADDR (CBUS_BASE_ADDR + 0x230000)
159 +#define BMU1_BASE_ADDR (CBUS_BASE_ADDR + 0x240000)
160 +#define BMU2_BASE_ADDR (CBUS_BASE_ADDR + 0x250000)
161 +#define ARB_BASE_ADDR (CBUS_BASE_ADDR + 0x260000)
162 +#define DDR_CONFIG_BASE_ADDR (CBUS_BASE_ADDR + 0x270000)
163 +#define HIF_BASE_ADDR (CBUS_BASE_ADDR + 0x280000)
164 +#define HGPI_BASE_ADDR (CBUS_BASE_ADDR + 0x290000)
165 +#define LMEM_BASE_ADDR (CBUS_BASE_ADDR + 0x300000)
166 +#define LMEM_SIZE 0x10000
167 +#define LMEM_END (LMEM_BASE_ADDR + LMEM_SIZE)
168 +#define TMU_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x310000)
169 +#define CLASS_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x320000)
170 +#define HIF_NOCPY_BASE_ADDR (CBUS_BASE_ADDR + 0x350000)
171 +#define UTIL_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x360000)
172 +#define CBUS_GPT_BASE_ADDR (CBUS_BASE_ADDR + 0x370000)
173 +
174 +/*
175 + * defgroup XXX_MEM_ACCESS_ADDR PE memory access through CSR
176 + * XXX_MEM_ACCESS_ADDR register bit definitions.
177 + */
178 +#define PE_MEM_ACCESS_WRITE BIT(31) /* Internal Memory Write. */
179 +#define PE_MEM_ACCESS_IMEM BIT(15)
180 +#define PE_MEM_ACCESS_DMEM BIT(16)
181 +
182 +/* Byte Enables of the Internal memory access. These are interpred in BE */
183 +#define PE_MEM_ACCESS_BYTE_ENABLE(offset, size) \
184 + ({ typeof(size) size_ = (size); \
185 + (((BIT(size_) - 1) << (4 - (offset) - (size_))) & 0xf) << 24; })
186 +
187 +#include "cbus/emac_mtip.h"
188 +#include "cbus/gpi.h"
189 +#include "cbus/bmu.h"
190 +#include "cbus/hif.h"
191 +#include "cbus/tmu_csr.h"
192 +#include "cbus/class_csr.h"
193 +#include "cbus/hif_nocpy.h"
194 +#include "cbus/util_csr.h"
195 +
196 +/* PFE cores states */
197 +#define CORE_DISABLE 0x00000000
198 +#define CORE_ENABLE 0x00000001
199 +#define CORE_SW_RESET 0x00000002
200 +
201 +/* LMEM defines */
202 +#define LMEM_HDR_SIZE 0x0010
203 +#define LMEM_BUF_SIZE_LN2 0x7
204 +#define LMEM_BUF_SIZE BIT(LMEM_BUF_SIZE_LN2)
205 +
206 +/* DDR defines */
207 +#define DDR_HDR_SIZE 0x0100
208 +#define DDR_BUF_SIZE_LN2 0xb
209 +#define DDR_BUF_SIZE BIT(DDR_BUF_SIZE_LN2)
210 +
211 +#endif /* _CBUS_H_ */
212 --- /dev/null
213 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h
214 @@ -0,0 +1,55 @@
215 +/*
216 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
217 + * Copyright 2017 NXP
218 + *
219 + * This program is free software; you can redistribute it and/or modify
220 + * it under the terms of the GNU General Public License as published by
221 + * the Free Software Foundation; either version 2 of the License, or
222 + * (at your option) any later version.
223 + *
224 + * This program is distributed in the hope that it will be useful,
225 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
226 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
227 + * GNU General Public License for more details.
228 + *
229 + * You should have received a copy of the GNU General Public License
230 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
231 + */
232 +
233 +#ifndef _BMU_H_
234 +#define _BMU_H_
235 +
236 +#define BMU_VERSION 0x000
237 +#define BMU_CTRL 0x004
238 +#define BMU_UCAST_CONFIG 0x008
239 +#define BMU_UCAST_BASE_ADDR 0x00c
240 +#define BMU_BUF_SIZE 0x010
241 +#define BMU_BUF_CNT 0x014
242 +#define BMU_THRES 0x018
243 +#define BMU_INT_SRC 0x020
244 +#define BMU_INT_ENABLE 0x024
245 +#define BMU_ALLOC_CTRL 0x030
246 +#define BMU_FREE_CTRL 0x034
247 +#define BMU_FREE_ERR_ADDR 0x038
248 +#define BMU_CURR_BUF_CNT 0x03c
249 +#define BMU_MCAST_CNT 0x040
250 +#define BMU_MCAST_ALLOC_CTRL 0x044
251 +#define BMU_REM_BUF_CNT 0x048
252 +#define BMU_LOW_WATERMARK 0x050
253 +#define BMU_HIGH_WATERMARK 0x054
254 +#define BMU_INT_MEM_ACCESS 0x100
255 +
256 +struct BMU_CFG {
257 + unsigned long baseaddr;
258 + u32 count;
259 + u32 size;
260 + u32 low_watermark;
261 + u32 high_watermark;
262 +};
263 +
264 +#define BMU1_BUF_SIZE LMEM_BUF_SIZE_LN2
265 +#define BMU2_BUF_SIZE DDR_BUF_SIZE_LN2
266 +
267 +#define BMU2_MCAST_ALLOC_CTRL (BMU2_BASE_ADDR + BMU_MCAST_ALLOC_CTRL)
268 +
269 +#endif /* _BMU_H_ */
270 --- /dev/null
271 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/class_csr.h
272 @@ -0,0 +1,289 @@
273 +/*
274 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
275 + * Copyright 2017 NXP
276 + *
277 + * This program is free software; you can redistribute it and/or modify
278 + * it under the terms of the GNU General Public License as published by
279 + * the Free Software Foundation; either version 2 of the License, or
280 + * (at your option) any later version.
281 + *
282 + * This program is distributed in the hope that it will be useful,
283 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
284 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
285 + * GNU General Public License for more details.
286 + *
287 + * You should have received a copy of the GNU General Public License
288 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
289 + */
290 +
291 +#ifndef _CLASS_CSR_H_
292 +#define _CLASS_CSR_H_
293 +
294 +/* @file class_csr.h.
295 + * class_csr - block containing all the classifier control and status register.
296 + * Mapped on CBUS and accessible from all PE's and ARM.
297 + */
298 +#define CLASS_VERSION (CLASS_CSR_BASE_ADDR + 0x000)
299 +#define CLASS_TX_CTRL (CLASS_CSR_BASE_ADDR + 0x004)
300 +#define CLASS_INQ_PKTPTR (CLASS_CSR_BASE_ADDR + 0x010)
301 +
302 +/* (ddr_hdr_size[24:16], lmem_hdr_size[5:0]) */
303 +#define CLASS_HDR_SIZE (CLASS_CSR_BASE_ADDR + 0x014)
304 +
305 +/* LMEM header size for the Classifier block.\ Data in the LMEM
306 + * is written from this offset.
307 + */
308 +#define CLASS_HDR_SIZE_LMEM(off) ((off) & 0x3f)
309 +
310 +/* DDR header size for the Classifier block.\ Data in the DDR
311 + * is written from this offset.
312 + */
313 +#define CLASS_HDR_SIZE_DDR(off) (((off) & 0x1ff) << 16)
314 +
315 +#define CLASS_PE0_QB_DM_ADDR0 (CLASS_CSR_BASE_ADDR + 0x020)
316 +
317 +/* DMEM address of first [15:0] and second [31:16] buffers on QB side. */
318 +#define CLASS_PE0_QB_DM_ADDR1 (CLASS_CSR_BASE_ADDR + 0x024)
319 +
320 +/* DMEM address of third [15:0] and fourth [31:16] buffers on QB side. */
321 +#define CLASS_PE0_RO_DM_ADDR0 (CLASS_CSR_BASE_ADDR + 0x060)
322 +
323 +/* DMEM address of first [15:0] and second [31:16] buffers on RO side. */
324 +#define CLASS_PE0_RO_DM_ADDR1 (CLASS_CSR_BASE_ADDR + 0x064)
325 +
326 +/* DMEM address of third [15:0] and fourth [31:16] buffers on RO side. */
327 +
328 +/* @name Class PE memory access. Allows external PE's and HOST to
329 + * read/write PMEM/DMEM memory ranges for each classifier PE.
330 + */
331 +/* {sr_pe_mem_cmd[31], csr_pe_mem_wren[27:24], csr_pe_mem_addr[23:0]},
332 + * See \ref XXX_MEM_ACCESS_ADDR for details.
333 + */
334 +#define CLASS_MEM_ACCESS_ADDR (CLASS_CSR_BASE_ADDR + 0x100)
335 +
336 +/* Internal Memory Access Write Data [31:0] */
337 +#define CLASS_MEM_ACCESS_WDATA (CLASS_CSR_BASE_ADDR + 0x104)
338 +
339 +/* Internal Memory Access Read Data [31:0] */
340 +#define CLASS_MEM_ACCESS_RDATA (CLASS_CSR_BASE_ADDR + 0x108)
341 +#define CLASS_TM_INQ_ADDR (CLASS_CSR_BASE_ADDR + 0x114)
342 +#define CLASS_PE_STATUS (CLASS_CSR_BASE_ADDR + 0x118)
343 +
344 +#define CLASS_PHY1_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x11c)
345 +#define CLASS_PHY1_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x120)
346 +#define CLASS_PHY1_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x124)
347 +#define CLASS_PHY1_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x128)
348 +#define CLASS_PHY1_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x12c)
349 +#define CLASS_PHY1_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x130)
350 +#define CLASS_PHY1_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x134)
351 +#define CLASS_PHY1_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x138)
352 +#define CLASS_PHY1_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x13c)
353 +#define CLASS_PHY1_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x140)
354 +#define CLASS_PHY2_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x144)
355 +#define CLASS_PHY2_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x148)
356 +#define CLASS_PHY2_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x14c)
357 +#define CLASS_PHY2_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x150)
358 +#define CLASS_PHY2_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x154)
359 +#define CLASS_PHY2_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x158)
360 +#define CLASS_PHY2_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x15c)
361 +#define CLASS_PHY2_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x160)
362 +#define CLASS_PHY2_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x164)
363 +#define CLASS_PHY2_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x168)
364 +#define CLASS_PHY3_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x16c)
365 +#define CLASS_PHY3_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x170)
366 +#define CLASS_PHY3_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x174)
367 +#define CLASS_PHY3_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x178)
368 +#define CLASS_PHY3_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x17c)
369 +#define CLASS_PHY3_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x180)
370 +#define CLASS_PHY3_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x184)
371 +#define CLASS_PHY3_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x188)
372 +#define CLASS_PHY3_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x18c)
373 +#define CLASS_PHY3_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x190)
374 +#define CLASS_PHY1_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x194)
375 +#define CLASS_PHY1_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x198)
376 +#define CLASS_PHY1_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x19c)
377 +#define CLASS_PHY1_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a0)
378 +#define CLASS_PHY2_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a4)
379 +#define CLASS_PHY2_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a8)
380 +#define CLASS_PHY2_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1ac)
381 +#define CLASS_PHY2_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b0)
382 +#define CLASS_PHY3_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b4)
383 +#define CLASS_PHY3_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b8)
384 +#define CLASS_PHY3_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1bc)
385 +#define CLASS_PHY3_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c0)
386 +#define CLASS_PHY4_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c4)
387 +#define CLASS_PHY4_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c8)
388 +#define CLASS_PHY4_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1cc)
389 +#define CLASS_PHY4_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1d0)
390 +#define CLASS_PHY4_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x1d4)
391 +#define CLASS_PHY4_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x1d8)
392 +#define CLASS_PHY4_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1dc)
393 +#define CLASS_PHY4_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1e0)
394 +#define CLASS_PHY4_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x1e4)
395 +#define CLASS_PHY4_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1e8)
396 +#define CLASS_PHY4_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x1ec)
397 +#define CLASS_PHY4_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x1f0)
398 +#define CLASS_PHY4_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x1f4)
399 +#define CLASS_PHY4_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x1f8)
400 +
401 +#define CLASS_PE_SYS_CLK_RATIO (CLASS_CSR_BASE_ADDR + 0x200)
402 +#define CLASS_AFULL_THRES (CLASS_CSR_BASE_ADDR + 0x204)
403 +#define CLASS_GAP_BETWEEN_READS (CLASS_CSR_BASE_ADDR + 0x208)
404 +#define CLASS_MAX_BUF_CNT (CLASS_CSR_BASE_ADDR + 0x20c)
405 +#define CLASS_TSQ_FIFO_THRES (CLASS_CSR_BASE_ADDR + 0x210)
406 +#define CLASS_TSQ_MAX_CNT (CLASS_CSR_BASE_ADDR + 0x214)
407 +#define CLASS_IRAM_DATA_0 (CLASS_CSR_BASE_ADDR + 0x218)
408 +#define CLASS_IRAM_DATA_1 (CLASS_CSR_BASE_ADDR + 0x21c)
409 +#define CLASS_IRAM_DATA_2 (CLASS_CSR_BASE_ADDR + 0x220)
410 +#define CLASS_IRAM_DATA_3 (CLASS_CSR_BASE_ADDR + 0x224)
411 +
412 +#define CLASS_BUS_ACCESS_ADDR (CLASS_CSR_BASE_ADDR + 0x228)
413 +
414 +#define CLASS_BUS_ACCESS_WDATA (CLASS_CSR_BASE_ADDR + 0x22c)
415 +#define CLASS_BUS_ACCESS_RDATA (CLASS_CSR_BASE_ADDR + 0x230)
416 +
417 +/* (route_entry_size[9:0], route_hash_size[23:16]
418 + * (this is actually ln2(size)))
419 + */
420 +#define CLASS_ROUTE_HASH_ENTRY_SIZE (CLASS_CSR_BASE_ADDR + 0x234)
421 +
422 +#define CLASS_ROUTE_ENTRY_SIZE(size) ((size) & 0x1ff)
423 +#define CLASS_ROUTE_HASH_SIZE(hash_bits) (((hash_bits) & 0xff) << 16)
424 +
425 +#define CLASS_ROUTE_TABLE_BASE (CLASS_CSR_BASE_ADDR + 0x238)
426 +
427 +#define CLASS_ROUTE_MULTI (CLASS_CSR_BASE_ADDR + 0x23c)
428 +#define CLASS_SMEM_OFFSET (CLASS_CSR_BASE_ADDR + 0x240)
429 +#define CLASS_LMEM_BUF_SIZE (CLASS_CSR_BASE_ADDR + 0x244)
430 +#define CLASS_VLAN_ID (CLASS_CSR_BASE_ADDR + 0x248)
431 +#define CLASS_BMU1_BUF_FREE (CLASS_CSR_BASE_ADDR + 0x24c)
432 +#define CLASS_USE_TMU_INQ (CLASS_CSR_BASE_ADDR + 0x250)
433 +#define CLASS_VLAN_ID1 (CLASS_CSR_BASE_ADDR + 0x254)
434 +
435 +#define CLASS_BUS_ACCESS_BASE (CLASS_CSR_BASE_ADDR + 0x258)
436 +#define CLASS_BUS_ACCESS_BASE_MASK (0xFF000000)
437 +/* bit 31:24 of PE peripheral address are stored in CLASS_BUS_ACCESS_BASE */
438 +
439 +#define CLASS_HIF_PARSE (CLASS_CSR_BASE_ADDR + 0x25c)
440 +
441 +#define CLASS_HOST_PE0_GP (CLASS_CSR_BASE_ADDR + 0x260)
442 +#define CLASS_PE0_GP (CLASS_CSR_BASE_ADDR + 0x264)
443 +#define CLASS_HOST_PE1_GP (CLASS_CSR_BASE_ADDR + 0x268)
444 +#define CLASS_PE1_GP (CLASS_CSR_BASE_ADDR + 0x26c)
445 +#define CLASS_HOST_PE2_GP (CLASS_CSR_BASE_ADDR + 0x270)
446 +#define CLASS_PE2_GP (CLASS_CSR_BASE_ADDR + 0x274)
447 +#define CLASS_HOST_PE3_GP (CLASS_CSR_BASE_ADDR + 0x278)
448 +#define CLASS_PE3_GP (CLASS_CSR_BASE_ADDR + 0x27c)
449 +#define CLASS_HOST_PE4_GP (CLASS_CSR_BASE_ADDR + 0x280)
450 +#define CLASS_PE4_GP (CLASS_CSR_BASE_ADDR + 0x284)
451 +#define CLASS_HOST_PE5_GP (CLASS_CSR_BASE_ADDR + 0x288)
452 +#define CLASS_PE5_GP (CLASS_CSR_BASE_ADDR + 0x28c)
453 +
454 +#define CLASS_PE_INT_SRC (CLASS_CSR_BASE_ADDR + 0x290)
455 +#define CLASS_PE_INT_ENABLE (CLASS_CSR_BASE_ADDR + 0x294)
456 +
457 +#define CLASS_TPID0_TPID1 (CLASS_CSR_BASE_ADDR + 0x298)
458 +#define CLASS_TPID2 (CLASS_CSR_BASE_ADDR + 0x29c)
459 +
460 +#define CLASS_L4_CHKSUM_ADDR (CLASS_CSR_BASE_ADDR + 0x2a0)
461 +
462 +#define CLASS_PE0_DEBUG (CLASS_CSR_BASE_ADDR + 0x2a4)
463 +#define CLASS_PE1_DEBUG (CLASS_CSR_BASE_ADDR + 0x2a8)
464 +#define CLASS_PE2_DEBUG (CLASS_CSR_BASE_ADDR + 0x2ac)
465 +#define CLASS_PE3_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b0)
466 +#define CLASS_PE4_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b4)
467 +#define CLASS_PE5_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b8)
468 +
469 +#define CLASS_STATE (CLASS_CSR_BASE_ADDR + 0x2bc)
470 +
471 +/* CLASS defines */
472 +#define CLASS_PBUF_SIZE 0x100 /* Fixed by hardware */
473 +#define CLASS_PBUF_HEADER_OFFSET 0x80 /* Can be configured */
474 +
475 +/* Can be configured */
476 +#define CLASS_PBUF0_BASE_ADDR 0x000
477 +/* Can be configured */
478 +#define CLASS_PBUF1_BASE_ADDR (CLASS_PBUF0_BASE_ADDR + CLASS_PBUF_SIZE)
479 +/* Can be configured */
480 +#define CLASS_PBUF2_BASE_ADDR (CLASS_PBUF1_BASE_ADDR + CLASS_PBUF_SIZE)
481 +/* Can be configured */
482 +#define CLASS_PBUF3_BASE_ADDR (CLASS_PBUF2_BASE_ADDR + CLASS_PBUF_SIZE)
483 +
484 +#define CLASS_PBUF0_HEADER_BASE_ADDR (CLASS_PBUF0_BASE_ADDR + \
485 + CLASS_PBUF_HEADER_OFFSET)
486 +#define CLASS_PBUF1_HEADER_BASE_ADDR (CLASS_PBUF1_BASE_ADDR + \
487 + CLASS_PBUF_HEADER_OFFSET)
488 +#define CLASS_PBUF2_HEADER_BASE_ADDR (CLASS_PBUF2_BASE_ADDR + \
489 + CLASS_PBUF_HEADER_OFFSET)
490 +#define CLASS_PBUF3_HEADER_BASE_ADDR (CLASS_PBUF3_BASE_ADDR + \
491 + CLASS_PBUF_HEADER_OFFSET)
492 +
493 +#define CLASS_PE0_RO_DM_ADDR0_VAL ((CLASS_PBUF1_BASE_ADDR << 16) | \
494 + CLASS_PBUF0_BASE_ADDR)
495 +#define CLASS_PE0_RO_DM_ADDR1_VAL ((CLASS_PBUF3_BASE_ADDR << 16) | \
496 + CLASS_PBUF2_BASE_ADDR)
497 +
498 +#define CLASS_PE0_QB_DM_ADDR0_VAL ((CLASS_PBUF1_HEADER_BASE_ADDR << 16) |\
499 + CLASS_PBUF0_HEADER_BASE_ADDR)
500 +#define CLASS_PE0_QB_DM_ADDR1_VAL ((CLASS_PBUF3_HEADER_BASE_ADDR << 16) |\
501 + CLASS_PBUF2_HEADER_BASE_ADDR)
502 +
503 +#define CLASS_ROUTE_SIZE 128
504 +#define CLASS_MAX_ROUTE_SIZE 256
505 +#define CLASS_ROUTE_HASH_BITS 20
506 +#define CLASS_ROUTE_HASH_MASK (BIT(CLASS_ROUTE_HASH_BITS) - 1)
507 +
508 +/* Can be configured */
509 +#define CLASS_ROUTE0_BASE_ADDR 0x400
510 +/* Can be configured */
511 +#define CLASS_ROUTE1_BASE_ADDR (CLASS_ROUTE0_BASE_ADDR + CLASS_ROUTE_SIZE)
512 +/* Can be configured */
513 +#define CLASS_ROUTE2_BASE_ADDR (CLASS_ROUTE1_BASE_ADDR + CLASS_ROUTE_SIZE)
514 +/* Can be configured */
515 +#define CLASS_ROUTE3_BASE_ADDR (CLASS_ROUTE2_BASE_ADDR + CLASS_ROUTE_SIZE)
516 +
517 +#define CLASS_SA_SIZE 128
518 +#define CLASS_IPSEC_SA0_BASE_ADDR 0x600
519 +/* not used */
520 +#define CLASS_IPSEC_SA1_BASE_ADDR (CLASS_IPSEC_SA0_BASE_ADDR + CLASS_SA_SIZE)
521 +/* not used */
522 +#define CLASS_IPSEC_SA2_BASE_ADDR (CLASS_IPSEC_SA1_BASE_ADDR + CLASS_SA_SIZE)
523 +/* not used */
524 +#define CLASS_IPSEC_SA3_BASE_ADDR (CLASS_IPSEC_SA2_BASE_ADDR + CLASS_SA_SIZE)
525 +
526 +/* generic purpose free dmem buffer, last portion of 2K dmem pbuf */
527 +#define CLASS_GP_DMEM_BUF_SIZE (2048 - (CLASS_PBUF_SIZE * 4) - \
528 + (CLASS_ROUTE_SIZE * 4) - (CLASS_SA_SIZE))
529 +#define CLASS_GP_DMEM_BUF ((void *)(CLASS_IPSEC_SA0_BASE_ADDR + \
530 + CLASS_SA_SIZE))
531 +
532 +#define TWO_LEVEL_ROUTE BIT(0)
533 +#define PHYNO_IN_HASH BIT(1)
534 +#define HW_ROUTE_FETCH BIT(3)
535 +#define HW_BRIDGE_FETCH BIT(5)
536 +#define IP_ALIGNED BIT(6)
537 +#define ARC_HIT_CHECK_EN BIT(7)
538 +#define CLASS_TOE BIT(11)
539 +#define HASH_NORMAL (0 << 12)
540 +#define HASH_CRC_PORT BIT(12)
541 +#define HASH_CRC_IP (2 << 12)
542 +#define HASH_CRC_PORT_IP (3 << 12)
543 +#define QB2BUS_LE BIT(15)
544 +
545 +#define TCP_CHKSUM_DROP BIT(0)
546 +#define UDP_CHKSUM_DROP BIT(1)
547 +#define IPV4_CHKSUM_DROP BIT(9)
548 +
549 +/*CLASS_HIF_PARSE bits*/
550 +#define HIF_PKT_CLASS_EN BIT(0)
551 +#define HIF_PKT_OFFSET(ofst) (((ofst) & 0xF) << 1)
552 +
553 +struct class_cfg {
554 + u32 toe_mode;
555 + unsigned long route_table_baseaddr;
556 + u32 route_table_hash_bits;
557 + u32 pe_sys_clk_ratio;
558 + u32 resume;
559 +};
560 +
561 +#endif /* _CLASS_CSR_H_ */
562 --- /dev/null
563 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h
564 @@ -0,0 +1,242 @@
565 +/*
566 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
567 + * Copyright 2017 NXP
568 + *
569 + * This program is free software; you can redistribute it and/or modify
570 + * it under the terms of the GNU General Public License as published by
571 + * the Free Software Foundation; either version 2 of the License, or
572 + * (at your option) any later version.
573 + *
574 + * This program is distributed in the hope that it will be useful,
575 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
576 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
577 + * GNU General Public License for more details.
578 + *
579 + * You should have received a copy of the GNU General Public License
580 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
581 + */
582 +
583 +#ifndef _EMAC_H_
584 +#define _EMAC_H_
585 +
586 +#include <linux/ethtool.h>
587 +
588 +#define EMAC_IEVENT_REG 0x004
589 +#define EMAC_IMASK_REG 0x008
590 +#define EMAC_R_DES_ACTIVE_REG 0x010
591 +#define EMAC_X_DES_ACTIVE_REG 0x014
592 +#define EMAC_ECNTRL_REG 0x024
593 +#define EMAC_MII_DATA_REG 0x040
594 +#define EMAC_MII_CTRL_REG 0x044
595 +#define EMAC_MIB_CTRL_STS_REG 0x064
596 +#define EMAC_RCNTRL_REG 0x084
597 +#define EMAC_TCNTRL_REG 0x0C4
598 +#define EMAC_PHY_ADDR_LOW 0x0E4
599 +#define EMAC_PHY_ADDR_HIGH 0x0E8
600 +#define EMAC_GAUR 0x120
601 +#define EMAC_GALR 0x124
602 +#define EMAC_TFWR_STR_FWD 0x144
603 +#define EMAC_RX_SECTION_FULL 0x190
604 +#define EMAC_RX_SECTION_EMPTY 0x194
605 +#define EMAC_TX_SECTION_EMPTY 0x1A0
606 +#define EMAC_TRUNC_FL 0x1B0
607 +
608 +#define RMON_T_DROP 0x200 /* Count of frames not cntd correctly */
609 +#define RMON_T_PACKETS 0x204 /* RMON TX packet count */
610 +#define RMON_T_BC_PKT 0x208 /* RMON TX broadcast pkts */
611 +#define RMON_T_MC_PKT 0x20c /* RMON TX multicast pkts */
612 +#define RMON_T_CRC_ALIGN 0x210 /* RMON TX pkts with CRC align err */
613 +#define RMON_T_UNDERSIZE 0x214 /* RMON TX pkts < 64 bytes, good CRC */
614 +#define RMON_T_OVERSIZE 0x218 /* RMON TX pkts > MAX_FL bytes good CRC */
615 +#define RMON_T_FRAG 0x21c /* RMON TX pkts < 64 bytes, bad CRC */
616 +#define RMON_T_JAB 0x220 /* RMON TX pkts > MAX_FL bytes, bad CRC */
617 +#define RMON_T_COL 0x224 /* RMON TX collision count */
618 +#define RMON_T_P64 0x228 /* RMON TX 64 byte pkts */
619 +#define RMON_T_P65TO127 0x22c /* RMON TX 65 to 127 byte pkts */
620 +#define RMON_T_P128TO255 0x230 /* RMON TX 128 to 255 byte pkts */
621 +#define RMON_T_P256TO511 0x234 /* RMON TX 256 to 511 byte pkts */
622 +#define RMON_T_P512TO1023 0x238 /* RMON TX 512 to 1023 byte pkts */
623 +#define RMON_T_P1024TO2047 0x23c /* RMON TX 1024 to 2047 byte pkts */
624 +#define RMON_T_P_GTE2048 0x240 /* RMON TX pkts > 2048 bytes */
625 +#define RMON_T_OCTETS 0x244 /* RMON TX octets */
626 +#define IEEE_T_DROP 0x248 /* Count of frames not counted crtly */
627 +#define IEEE_T_FRAME_OK 0x24c /* Frames tx'd OK */
628 +#define IEEE_T_1COL 0x250 /* Frames tx'd with single collision */
629 +#define IEEE_T_MCOL 0x254 /* Frames tx'd with multiple collision */
630 +#define IEEE_T_DEF 0x258 /* Frames tx'd after deferral delay */
631 +#define IEEE_T_LCOL 0x25c /* Frames tx'd with late collision */
632 +#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excesv collisions */
633 +#define IEEE_T_MACERR 0x264 /* Frames tx'd with TX FIFO underrun */
634 +#define IEEE_T_CSERR 0x268 /* Frames tx'd with carrier sense err */
635 +#define IEEE_T_SQE 0x26c /* Frames tx'd with SQE err */
636 +#define IEEE_T_FDXFC 0x270 /* Flow control pause frames tx'd */
637 +#define IEEE_T_OCTETS_OK 0x274 /* Octet count for frames tx'd w/o err */
638 +#define RMON_R_PACKETS 0x284 /* RMON RX packet count */
639 +#define RMON_R_BC_PKT 0x288 /* RMON RX broadcast pkts */
640 +#define RMON_R_MC_PKT 0x28c /* RMON RX multicast pkts */
641 +#define RMON_R_CRC_ALIGN 0x290 /* RMON RX pkts with CRC alignment err */
642 +#define RMON_R_UNDERSIZE 0x294 /* RMON RX pkts < 64 bytes, good CRC */
643 +#define RMON_R_OVERSIZE 0x298 /* RMON RX pkts > MAX_FL bytes good CRC */
644 +#define RMON_R_FRAG 0x29c /* RMON RX pkts < 64 bytes, bad CRC */
645 +#define RMON_R_JAB 0x2a0 /* RMON RX pkts > MAX_FL bytes, bad CRC */
646 +#define RMON_R_RESVD_O 0x2a4 /* Reserved */
647 +#define RMON_R_P64 0x2a8 /* RMON RX 64 byte pkts */
648 +#define RMON_R_P65TO127 0x2ac /* RMON RX 65 to 127 byte pkts */
649 +#define RMON_R_P128TO255 0x2b0 /* RMON RX 128 to 255 byte pkts */
650 +#define RMON_R_P256TO511 0x2b4 /* RMON RX 256 to 511 byte pkts */
651 +#define RMON_R_P512TO1023 0x2b8 /* RMON RX 512 to 1023 byte pkts */
652 +#define RMON_R_P1024TO2047 0x2bc /* RMON RX 1024 to 2047 byte pkts */
653 +#define RMON_R_P_GTE2048 0x2c0 /* RMON RX pkts > 2048 bytes */
654 +#define RMON_R_OCTETS 0x2c4 /* RMON RX octets */
655 +#define IEEE_R_DROP 0x2c8 /* Count frames not counted correctly */
656 +#define IEEE_R_FRAME_OK 0x2cc /* Frames rx'd OK */
657 +#define IEEE_R_CRC 0x2d0 /* Frames rx'd with CRC err */
658 +#define IEEE_R_ALIGN 0x2d4 /* Frames rx'd with alignment err */
659 +#define IEEE_R_MACERR 0x2d8 /* Receive FIFO overflow count */
660 +#define IEEE_R_FDXFC 0x2dc /* Flow control pause frames rx'd */
661 +#define IEEE_R_OCTETS_OK 0x2e0 /* Octet cnt for frames rx'd w/o err */
662 +
663 +#define EMAC_SMAC_0_0 0x500 /*Supplemental MAC Address 0 (RW).*/
664 +#define EMAC_SMAC_0_1 0x504 /*Supplemental MAC Address 0 (RW).*/
665 +
666 +/* GEMAC definitions and settings */
667 +
668 +#define EMAC_PORT_0 0
669 +#define EMAC_PORT_1 1
670 +
671 +/* GEMAC Bit definitions */
672 +#define EMAC_IEVENT_HBERR 0x80000000
673 +#define EMAC_IEVENT_BABR 0x40000000
674 +#define EMAC_IEVENT_BABT 0x20000000
675 +#define EMAC_IEVENT_GRA 0x10000000
676 +#define EMAC_IEVENT_TXF 0x08000000
677 +#define EMAC_IEVENT_TXB 0x04000000
678 +#define EMAC_IEVENT_RXF 0x02000000
679 +#define EMAC_IEVENT_RXB 0x01000000
680 +#define EMAC_IEVENT_MII 0x00800000
681 +#define EMAC_IEVENT_EBERR 0x00400000
682 +#define EMAC_IEVENT_LC 0x00200000
683 +#define EMAC_IEVENT_RL 0x00100000
684 +#define EMAC_IEVENT_UN 0x00080000
685 +
686 +#define EMAC_IMASK_HBERR 0x80000000
687 +#define EMAC_IMASK_BABR 0x40000000
688 +#define EMAC_IMASKT_BABT 0x20000000
689 +#define EMAC_IMASK_GRA 0x10000000
690 +#define EMAC_IMASKT_TXF 0x08000000
691 +#define EMAC_IMASK_TXB 0x04000000
692 +#define EMAC_IMASKT_RXF 0x02000000
693 +#define EMAC_IMASK_RXB 0x01000000
694 +#define EMAC_IMASK_MII 0x00800000
695 +#define EMAC_IMASK_EBERR 0x00400000
696 +#define EMAC_IMASK_LC 0x00200000
697 +#define EMAC_IMASKT_RL 0x00100000
698 +#define EMAC_IMASK_UN 0x00080000
699 +
700 +#define EMAC_RCNTRL_MAX_FL_SHIFT 16
701 +#define EMAC_RCNTRL_LOOP 0x00000001
702 +#define EMAC_RCNTRL_DRT 0x00000002
703 +#define EMAC_RCNTRL_MII_MODE 0x00000004
704 +#define EMAC_RCNTRL_PROM 0x00000008
705 +#define EMAC_RCNTRL_BC_REJ 0x00000010
706 +#define EMAC_RCNTRL_FCE 0x00000020
707 +#define EMAC_RCNTRL_RGMII 0x00000040
708 +#define EMAC_RCNTRL_SGMII 0x00000080
709 +#define EMAC_RCNTRL_RMII 0x00000100
710 +#define EMAC_RCNTRL_RMII_10T 0x00000200
711 +#define EMAC_RCNTRL_CRC_FWD 0x00004000
712 +
713 +#define EMAC_TCNTRL_GTS 0x00000001
714 +#define EMAC_TCNTRL_HBC 0x00000002
715 +#define EMAC_TCNTRL_FDEN 0x00000004
716 +#define EMAC_TCNTRL_TFC_PAUSE 0x00000008
717 +#define EMAC_TCNTRL_RFC_PAUSE 0x00000010
718 +
719 +#define EMAC_ECNTRL_RESET 0x00000001 /* reset the EMAC */
720 +#define EMAC_ECNTRL_ETHER_EN 0x00000002 /* enable the EMAC */
721 +#define EMAC_ECNTRL_MAGIC_ENA 0x00000004
722 +#define EMAC_ECNTRL_SLEEP 0x00000008
723 +#define EMAC_ECNTRL_SPEED 0x00000020
724 +#define EMAC_ECNTRL_DBSWAP 0x00000100
725 +
726 +#define EMAC_X_WMRK_STRFWD 0x00000100
727 +
728 +#define EMAC_X_DES_ACTIVE_TDAR 0x01000000
729 +#define EMAC_R_DES_ACTIVE_RDAR 0x01000000
730 +
731 +#define EMAC_RX_SECTION_EMPTY_V 0x00010006
732 +/*
733 + * The possible operating speeds of the MAC, currently supporting 10, 100 and
734 + * 1000Mb modes.
735 + */
736 +enum mac_speed {SPEED_10M, SPEED_100M, SPEED_1000M, SPEED_1000M_PCS};
737 +
738 +/* MII-related definitios */
739 +#define EMAC_MII_DATA_ST 0x40000000 /* Start of frame delimiter */
740 +#define EMAC_MII_DATA_OP_RD 0x20000000 /* Perform a read operation */
741 +#define EMAC_MII_DATA_OP_CL45_RD 0x30000000 /* Perform a read operation */
742 +#define EMAC_MII_DATA_OP_WR 0x10000000 /* Perform a write operation */
743 +#define EMAC_MII_DATA_OP_CL45_WR 0x10000000 /* Perform a write operation */
744 +#define EMAC_MII_DATA_PA_MSK 0x0f800000 /* PHY Address field mask */
745 +#define EMAC_MII_DATA_RA_MSK 0x007c0000 /* PHY Register field mask */
746 +#define EMAC_MII_DATA_TA 0x00020000 /* Turnaround */
747 +#define EMAC_MII_DATA_DATAMSK 0x0000ffff /* PHY data field */
748 +
749 +#define EMAC_MII_DATA_RA_SHIFT 18 /* MII Register address bits */
750 +#define EMAC_MII_DATA_RA_MASK 0x1F /* MII Register address mask */
751 +#define EMAC_MII_DATA_PA_SHIFT 23 /* MII PHY address bits */
752 +#define EMAC_MII_DATA_PA_MASK 0x1F /* MII PHY address mask */
753 +
754 +#define EMAC_MII_DATA_RA(v) (((v) & EMAC_MII_DATA_RA_MASK) << \
755 + EMAC_MII_DATA_RA_SHIFT)
756 +#define EMAC_MII_DATA_PA(v) (((v) & EMAC_MII_DATA_RA_MASK) << \
757 + EMAC_MII_DATA_PA_SHIFT)
758 +#define EMAC_MII_DATA(v) ((v) & 0xffff)
759 +
760 +#define EMAC_MII_SPEED_SHIFT 1
761 +#define EMAC_HOLDTIME_SHIFT 8
762 +#define EMAC_HOLDTIME_MASK 0x7
763 +#define EMAC_HOLDTIME(v) (((v) & EMAC_HOLDTIME_MASK) << \
764 + EMAC_HOLDTIME_SHIFT)
765 +
766 +/*
767 + * The Address organisation for the MAC device. All addresses are split into
768 + * two 32-bit register fields. The first one (bottom) is the lower 32-bits of
769 + * the address and the other field are the high order bits - this may be 16-bits
770 + * in the case of MAC addresses, or 32-bits for the hash address.
771 + * In terms of memory storage, the first item (bottom) is assumed to be at a
772 + * lower address location than 'top'. i.e. top should be at address location of
773 + * 'bottom' + 4 bytes.
774 + */
775 +struct pfe_mac_addr {
776 + u32 bottom; /* Lower 32-bits of address. */
777 + u32 top; /* Upper 32-bits of address. */
778 +};
779 +
780 +/*
781 + * The following is the organisation of the address filters section of the MAC
782 + * registers. The Cadence MAC contains four possible specific address match
783 + * addresses, if an incoming frame corresponds to any one of these four
784 + * addresses then the frame will be copied to memory.
785 + * It is not necessary for all four of the address match registers to be
786 + * programmed, this is application dependent.
787 + */
788 +struct spec_addr {
789 + struct pfe_mac_addr one; /* Specific address register 1. */
790 + struct pfe_mac_addr two; /* Specific address register 2. */
791 + struct pfe_mac_addr three; /* Specific address register 3. */
792 + struct pfe_mac_addr four; /* Specific address register 4. */
793 +};
794 +
795 +struct gemac_cfg {
796 + u32 mode;
797 + u32 speed;
798 + u32 duplex;
799 +};
800 +
801 +/* EMAC Hash size */
802 +#define EMAC_HASH_REG_BITS 64
803 +
804 +#define EMAC_SPEC_ADDR_MAX 4
805 +
806 +#endif /* _EMAC_H_ */
807 --- /dev/null
808 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h
809 @@ -0,0 +1,86 @@
810 +/*
811 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
812 + * Copyright 2017 NXP
813 + *
814 + * This program is free software; you can redistribute it and/or modify
815 + * it under the terms of the GNU General Public License as published by
816 + * the Free Software Foundation; either version 2 of the License, or
817 + * (at your option) any later version.
818 + *
819 + * This program is distributed in the hope that it will be useful,
820 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
821 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
822 + * GNU General Public License for more details.
823 + *
824 + * You should have received a copy of the GNU General Public License
825 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
826 + */
827 +
828 +#ifndef _GPI_H_
829 +#define _GPI_H_
830 +
831 +#define GPI_VERSION 0x00
832 +#define GPI_CTRL 0x04
833 +#define GPI_RX_CONFIG 0x08
834 +#define GPI_HDR_SIZE 0x0c
835 +#define GPI_BUF_SIZE 0x10
836 +#define GPI_LMEM_ALLOC_ADDR 0x14
837 +#define GPI_LMEM_FREE_ADDR 0x18
838 +#define GPI_DDR_ALLOC_ADDR 0x1c
839 +#define GPI_DDR_FREE_ADDR 0x20
840 +#define GPI_CLASS_ADDR 0x24
841 +#define GPI_DRX_FIFO 0x28
842 +#define GPI_TRX_FIFO 0x2c
843 +#define GPI_INQ_PKTPTR 0x30
844 +#define GPI_DDR_DATA_OFFSET 0x34
845 +#define GPI_LMEM_DATA_OFFSET 0x38
846 +#define GPI_TMLF_TX 0x4c
847 +#define GPI_DTX_ASEQ 0x50
848 +#define GPI_FIFO_STATUS 0x54
849 +#define GPI_FIFO_DEBUG 0x58
850 +#define GPI_TX_PAUSE_TIME 0x5c
851 +#define GPI_LMEM_SEC_BUF_DATA_OFFSET 0x60
852 +#define GPI_DDR_SEC_BUF_DATA_OFFSET 0x64
853 +#define GPI_TOE_CHKSUM_EN 0x68
854 +#define GPI_OVERRUN_DROPCNT 0x6c
855 +#define GPI_CSR_MTIP_PAUSE_REG 0x74
856 +#define GPI_CSR_MTIP_PAUSE_QUANTUM 0x78
857 +#define GPI_CSR_RX_CNT 0x7c
858 +#define GPI_CSR_TX_CNT 0x80
859 +#define GPI_CSR_DEBUG1 0x84
860 +#define GPI_CSR_DEBUG2 0x88
861 +
862 +struct gpi_cfg {
863 + u32 lmem_rtry_cnt;
864 + u32 tmlf_txthres;
865 + u32 aseq_len;
866 + u32 mtip_pause_reg;
867 +};
868 +
869 +/* GPI commons defines */
870 +#define GPI_LMEM_BUF_EN 0x1
871 +#define GPI_DDR_BUF_EN 0x1
872 +
873 +/* EGPI 1 defines */
874 +#define EGPI1_LMEM_RTRY_CNT 0x40
875 +#define EGPI1_TMLF_TXTHRES 0xBC
876 +#define EGPI1_ASEQ_LEN 0x50
877 +
878 +/* EGPI 2 defines */
879 +#define EGPI2_LMEM_RTRY_CNT 0x40
880 +#define EGPI2_TMLF_TXTHRES 0xBC
881 +#define EGPI2_ASEQ_LEN 0x40
882 +
883 +/* EGPI 3 defines */
884 +#define EGPI3_LMEM_RTRY_CNT 0x40
885 +#define EGPI3_TMLF_TXTHRES 0xBC
886 +#define EGPI3_ASEQ_LEN 0x40
887 +
888 +/* HGPI defines */
889 +#define HGPI_LMEM_RTRY_CNT 0x40
890 +#define HGPI_TMLF_TXTHRES 0xBC
891 +#define HGPI_ASEQ_LEN 0x40
892 +
893 +#define EGPI_PAUSE_TIME 0x000007D0
894 +#define EGPI_PAUSE_ENABLE 0x40000000
895 +#endif /* _GPI_H_ */
896 --- /dev/null
897 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h
898 @@ -0,0 +1,100 @@
899 +/*
900 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
901 + * Copyright 2017 NXP
902 + *
903 + * This program is free software; you can redistribute it and/or modify
904 + * it under the terms of the GNU General Public License as published by
905 + * the Free Software Foundation; either version 2 of the License, or
906 + * (at your option) any later version.
907 + *
908 + * This program is distributed in the hope that it will be useful,
909 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
910 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
911 + * GNU General Public License for more details.
912 + *
913 + * You should have received a copy of the GNU General Public License
914 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
915 + */
916 +
917 +#ifndef _HIF_H_
918 +#define _HIF_H_
919 +
920 +/* @file hif.h.
921 + * hif - PFE hif block control and status register.
922 + * Mapped on CBUS and accessible from all PE's and ARM.
923 + */
924 +#define HIF_VERSION (HIF_BASE_ADDR + 0x00)
925 +#define HIF_TX_CTRL (HIF_BASE_ADDR + 0x04)
926 +#define HIF_TX_CURR_BD_ADDR (HIF_BASE_ADDR + 0x08)
927 +#define HIF_TX_ALLOC (HIF_BASE_ADDR + 0x0c)
928 +#define HIF_TX_BDP_ADDR (HIF_BASE_ADDR + 0x10)
929 +#define HIF_TX_STATUS (HIF_BASE_ADDR + 0x14)
930 +#define HIF_RX_CTRL (HIF_BASE_ADDR + 0x20)
931 +#define HIF_RX_BDP_ADDR (HIF_BASE_ADDR + 0x24)
932 +#define HIF_RX_STATUS (HIF_BASE_ADDR + 0x30)
933 +#define HIF_INT_SRC (HIF_BASE_ADDR + 0x34)
934 +#define HIF_INT_ENABLE (HIF_BASE_ADDR + 0x38)
935 +#define HIF_POLL_CTRL (HIF_BASE_ADDR + 0x3c)
936 +#define HIF_RX_CURR_BD_ADDR (HIF_BASE_ADDR + 0x40)
937 +#define HIF_RX_ALLOC (HIF_BASE_ADDR + 0x44)
938 +#define HIF_TX_DMA_STATUS (HIF_BASE_ADDR + 0x48)
939 +#define HIF_RX_DMA_STATUS (HIF_BASE_ADDR + 0x4c)
940 +#define HIF_INT_COAL (HIF_BASE_ADDR + 0x50)
941 +
942 +/* HIF_INT_SRC/ HIF_INT_ENABLE control bits */
943 +#define HIF_INT BIT(0)
944 +#define HIF_RXBD_INT BIT(1)
945 +#define HIF_RXPKT_INT BIT(2)
946 +#define HIF_TXBD_INT BIT(3)
947 +#define HIF_TXPKT_INT BIT(4)
948 +
949 +/* HIF_TX_CTRL bits */
950 +#define HIF_CTRL_DMA_EN BIT(0)
951 +#define HIF_CTRL_BDP_POLL_CTRL_EN BIT(1)
952 +#define HIF_CTRL_BDP_CH_START_WSTB BIT(2)
953 +
954 +/* HIF_RX_STATUS bits */
955 +#define BDP_CSR_RX_DMA_ACTV BIT(16)
956 +
957 +/* HIF_INT_ENABLE bits */
958 +#define HIF_INT_EN BIT(0)
959 +#define HIF_RXBD_INT_EN BIT(1)
960 +#define HIF_RXPKT_INT_EN BIT(2)
961 +#define HIF_TXBD_INT_EN BIT(3)
962 +#define HIF_TXPKT_INT_EN BIT(4)
963 +
964 +/* HIF_POLL_CTRL bits*/
965 +#define HIF_RX_POLL_CTRL_CYCLE 0x0400
966 +#define HIF_TX_POLL_CTRL_CYCLE 0x0400
967 +
968 +/* HIF_INT_COAL bits*/
969 +#define HIF_INT_COAL_ENABLE BIT(31)
970 +
971 +/* Buffer descriptor control bits */
972 +#define BD_CTRL_BUFLEN_MASK 0x3fff
973 +#define BD_BUF_LEN(x) ((x) & BD_CTRL_BUFLEN_MASK)
974 +#define BD_CTRL_CBD_INT_EN BIT(16)
975 +#define BD_CTRL_PKT_INT_EN BIT(17)
976 +#define BD_CTRL_LIFM BIT(18)
977 +#define BD_CTRL_LAST_BD BIT(19)
978 +#define BD_CTRL_DIR BIT(20)
979 +#define BD_CTRL_LMEM_CPY BIT(21) /* Valid only for HIF_NOCPY */
980 +#define BD_CTRL_PKT_XFER BIT(24)
981 +#define BD_CTRL_DESC_EN BIT(31)
982 +#define BD_CTRL_PARSE_DISABLE BIT(25)
983 +#define BD_CTRL_BRFETCH_DISABLE BIT(26)
984 +#define BD_CTRL_RTFETCH_DISABLE BIT(27)
985 +
986 +/* Buffer descriptor status bits*/
987 +#define BD_STATUS_CONN_ID(x) ((x) & 0xffff)
988 +#define BD_STATUS_DIR_PROC_ID BIT(16)
989 +#define BD_STATUS_CONN_ID_EN BIT(17)
990 +#define BD_STATUS_PE2PROC_ID(x) (((x) & 7) << 18)
991 +#define BD_STATUS_LE_DATA BIT(21)
992 +#define BD_STATUS_CHKSUM_EN BIT(22)
993 +
994 +/* HIF Buffer descriptor status bits */
995 +#define DIR_PROC_ID BIT(16)
996 +#define PROC_ID(id) ((id) << 18)
997 +
998 +#endif /* _HIF_H_ */
999 --- /dev/null
1000 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h
1001 @@ -0,0 +1,50 @@
1002 +/*
1003 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
1004 + * Copyright 2017 NXP
1005 + *
1006 + * This program is free software; you can redistribute it and/or modify
1007 + * it under the terms of the GNU General Public License as published by
1008 + * the Free Software Foundation; either version 2 of the License, or
1009 + * (at your option) any later version.
1010 + *
1011 + * This program is distributed in the hope that it will be useful,
1012 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1013 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1014 + * GNU General Public License for more details.
1015 + *
1016 + * You should have received a copy of the GNU General Public License
1017 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
1018 + */
1019 +
1020 +#ifndef _HIF_NOCPY_H_
1021 +#define _HIF_NOCPY_H_
1022 +
1023 +#define HIF_NOCPY_VERSION (HIF_NOCPY_BASE_ADDR + 0x00)
1024 +#define HIF_NOCPY_TX_CTRL (HIF_NOCPY_BASE_ADDR + 0x04)
1025 +#define HIF_NOCPY_TX_CURR_BD_ADDR (HIF_NOCPY_BASE_ADDR + 0x08)
1026 +#define HIF_NOCPY_TX_ALLOC (HIF_NOCPY_BASE_ADDR + 0x0c)
1027 +#define HIF_NOCPY_TX_BDP_ADDR (HIF_NOCPY_BASE_ADDR + 0x10)
1028 +#define HIF_NOCPY_TX_STATUS (HIF_NOCPY_BASE_ADDR + 0x14)
1029 +#define HIF_NOCPY_RX_CTRL (HIF_NOCPY_BASE_ADDR + 0x20)
1030 +#define HIF_NOCPY_RX_BDP_ADDR (HIF_NOCPY_BASE_ADDR + 0x24)
1031 +#define HIF_NOCPY_RX_STATUS (HIF_NOCPY_BASE_ADDR + 0x30)
1032 +#define HIF_NOCPY_INT_SRC (HIF_NOCPY_BASE_ADDR + 0x34)
1033 +#define HIF_NOCPY_INT_ENABLE (HIF_NOCPY_BASE_ADDR + 0x38)
1034 +#define HIF_NOCPY_POLL_CTRL (HIF_NOCPY_BASE_ADDR + 0x3c)
1035 +#define HIF_NOCPY_RX_CURR_BD_ADDR (HIF_NOCPY_BASE_ADDR + 0x40)
1036 +#define HIF_NOCPY_RX_ALLOC (HIF_NOCPY_BASE_ADDR + 0x44)
1037 +#define HIF_NOCPY_TX_DMA_STATUS (HIF_NOCPY_BASE_ADDR + 0x48)
1038 +#define HIF_NOCPY_RX_DMA_STATUS (HIF_NOCPY_BASE_ADDR + 0x4c)
1039 +#define HIF_NOCPY_RX_INQ0_PKTPTR (HIF_NOCPY_BASE_ADDR + 0x50)
1040 +#define HIF_NOCPY_RX_INQ1_PKTPTR (HIF_NOCPY_BASE_ADDR + 0x54)
1041 +#define HIF_NOCPY_TX_PORT_NO (HIF_NOCPY_BASE_ADDR + 0x60)
1042 +#define HIF_NOCPY_LMEM_ALLOC_ADDR (HIF_NOCPY_BASE_ADDR + 0x64)
1043 +#define HIF_NOCPY_CLASS_ADDR (HIF_NOCPY_BASE_ADDR + 0x68)
1044 +#define HIF_NOCPY_TMU_PORT0_ADDR (HIF_NOCPY_BASE_ADDR + 0x70)
1045 +#define HIF_NOCPY_TMU_PORT1_ADDR (HIF_NOCPY_BASE_ADDR + 0x74)
1046 +#define HIF_NOCPY_TMU_PORT2_ADDR (HIF_NOCPY_BASE_ADDR + 0x7c)
1047 +#define HIF_NOCPY_TMU_PORT3_ADDR (HIF_NOCPY_BASE_ADDR + 0x80)
1048 +#define HIF_NOCPY_TMU_PORT4_ADDR (HIF_NOCPY_BASE_ADDR + 0x84)
1049 +#define HIF_NOCPY_INT_COAL (HIF_NOCPY_BASE_ADDR + 0x90)
1050 +
1051 +#endif /* _HIF_NOCPY_H_ */
1052 --- /dev/null
1053 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h
1054 @@ -0,0 +1,168 @@
1055 +/*
1056 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
1057 + * Copyright 2017 NXP
1058 + *
1059 + * This program is free software; you can redistribute it and/or modify
1060 + * it under the terms of the GNU General Public License as published by
1061 + * the Free Software Foundation; either version 2 of the License, or
1062 + * (at your option) any later version.
1063 + *
1064 + * This program is distributed in the hope that it will be useful,
1065 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1066 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1067 + * GNU General Public License for more details.
1068 + *
1069 + * You should have received a copy of the GNU General Public License
1070 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
1071 + */
1072 +
1073 +#ifndef _TMU_CSR_H_
1074 +#define _TMU_CSR_H_
1075 +
1076 +#define TMU_VERSION (TMU_CSR_BASE_ADDR + 0x000)
1077 +#define TMU_INQ_WATERMARK (TMU_CSR_BASE_ADDR + 0x004)
1078 +#define TMU_PHY_INQ_PKTPTR (TMU_CSR_BASE_ADDR + 0x008)
1079 +#define TMU_PHY_INQ_PKTINFO (TMU_CSR_BASE_ADDR + 0x00c)
1080 +#define TMU_PHY_INQ_FIFO_CNT (TMU_CSR_BASE_ADDR + 0x010)
1081 +#define TMU_SYS_GENERIC_CONTROL (TMU_CSR_BASE_ADDR + 0x014)
1082 +#define TMU_SYS_GENERIC_STATUS (TMU_CSR_BASE_ADDR + 0x018)
1083 +#define TMU_SYS_GEN_CON0 (TMU_CSR_BASE_ADDR + 0x01c)
1084 +#define TMU_SYS_GEN_CON1 (TMU_CSR_BASE_ADDR + 0x020)
1085 +#define TMU_SYS_GEN_CON2 (TMU_CSR_BASE_ADDR + 0x024)
1086 +#define TMU_SYS_GEN_CON3 (TMU_CSR_BASE_ADDR + 0x028)
1087 +#define TMU_SYS_GEN_CON4 (TMU_CSR_BASE_ADDR + 0x02c)
1088 +#define TMU_TEQ_DISABLE_DROPCHK (TMU_CSR_BASE_ADDR + 0x030)
1089 +#define TMU_TEQ_CTRL (TMU_CSR_BASE_ADDR + 0x034)
1090 +#define TMU_TEQ_QCFG (TMU_CSR_BASE_ADDR + 0x038)
1091 +#define TMU_TEQ_DROP_STAT (TMU_CSR_BASE_ADDR + 0x03c)
1092 +#define TMU_TEQ_QAVG (TMU_CSR_BASE_ADDR + 0x040)
1093 +#define TMU_TEQ_WREG_PROB (TMU_CSR_BASE_ADDR + 0x044)
1094 +#define TMU_TEQ_TRANS_STAT (TMU_CSR_BASE_ADDR + 0x048)
1095 +#define TMU_TEQ_HW_PROB_CFG0 (TMU_CSR_BASE_ADDR + 0x04c)
1096 +#define TMU_TEQ_HW_PROB_CFG1 (TMU_CSR_BASE_ADDR + 0x050)
1097 +#define TMU_TEQ_HW_PROB_CFG2 (TMU_CSR_BASE_ADDR + 0x054)
1098 +#define TMU_TEQ_HW_PROB_CFG3 (TMU_CSR_BASE_ADDR + 0x058)
1099 +#define TMU_TEQ_HW_PROB_CFG4 (TMU_CSR_BASE_ADDR + 0x05c)
1100 +#define TMU_TEQ_HW_PROB_CFG5 (TMU_CSR_BASE_ADDR + 0x060)
1101 +#define TMU_TEQ_HW_PROB_CFG6 (TMU_CSR_BASE_ADDR + 0x064)
1102 +#define TMU_TEQ_HW_PROB_CFG7 (TMU_CSR_BASE_ADDR + 0x068)
1103 +#define TMU_TEQ_HW_PROB_CFG8 (TMU_CSR_BASE_ADDR + 0x06c)
1104 +#define TMU_TEQ_HW_PROB_CFG9 (TMU_CSR_BASE_ADDR + 0x070)
1105 +#define TMU_TEQ_HW_PROB_CFG10 (TMU_CSR_BASE_ADDR + 0x074)
1106 +#define TMU_TEQ_HW_PROB_CFG11 (TMU_CSR_BASE_ADDR + 0x078)
1107 +#define TMU_TEQ_HW_PROB_CFG12 (TMU_CSR_BASE_ADDR + 0x07c)
1108 +#define TMU_TEQ_HW_PROB_CFG13 (TMU_CSR_BASE_ADDR + 0x080)
1109 +#define TMU_TEQ_HW_PROB_CFG14 (TMU_CSR_BASE_ADDR + 0x084)
1110 +#define TMU_TEQ_HW_PROB_CFG15 (TMU_CSR_BASE_ADDR + 0x088)
1111 +#define TMU_TEQ_HW_PROB_CFG16 (TMU_CSR_BASE_ADDR + 0x08c)
1112 +#define TMU_TEQ_HW_PROB_CFG17 (TMU_CSR_BASE_ADDR + 0x090)
1113 +#define TMU_TEQ_HW_PROB_CFG18 (TMU_CSR_BASE_ADDR + 0x094)
1114 +#define TMU_TEQ_HW_PROB_CFG19 (TMU_CSR_BASE_ADDR + 0x098)
1115 +#define TMU_TEQ_HW_PROB_CFG20 (TMU_CSR_BASE_ADDR + 0x09c)
1116 +#define TMU_TEQ_HW_PROB_CFG21 (TMU_CSR_BASE_ADDR + 0x0a0)
1117 +#define TMU_TEQ_HW_PROB_CFG22 (TMU_CSR_BASE_ADDR + 0x0a4)
1118 +#define TMU_TEQ_HW_PROB_CFG23 (TMU_CSR_BASE_ADDR + 0x0a8)
1119 +#define TMU_TEQ_HW_PROB_CFG24 (TMU_CSR_BASE_ADDR + 0x0ac)
1120 +#define TMU_TEQ_HW_PROB_CFG25 (TMU_CSR_BASE_ADDR + 0x0b0)
1121 +#define TMU_TDQ_IIFG_CFG (TMU_CSR_BASE_ADDR + 0x0b4)
1122 +/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
1123 + * This is a global Enable for all schedulers in PHY0
1124 + */
1125 +#define TMU_TDQ0_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x0b8)
1126 +
1127 +#define TMU_LLM_CTRL (TMU_CSR_BASE_ADDR + 0x0bc)
1128 +#define TMU_LLM_BASE_ADDR (TMU_CSR_BASE_ADDR + 0x0c0)
1129 +#define TMU_LLM_QUE_LEN (TMU_CSR_BASE_ADDR + 0x0c4)
1130 +#define TMU_LLM_QUE_HEADPTR (TMU_CSR_BASE_ADDR + 0x0c8)
1131 +#define TMU_LLM_QUE_TAILPTR (TMU_CSR_BASE_ADDR + 0x0cc)
1132 +#define TMU_LLM_QUE_DROPCNT (TMU_CSR_BASE_ADDR + 0x0d0)
1133 +#define TMU_INT_EN (TMU_CSR_BASE_ADDR + 0x0d4)
1134 +#define TMU_INT_SRC (TMU_CSR_BASE_ADDR + 0x0d8)
1135 +#define TMU_INQ_STAT (TMU_CSR_BASE_ADDR + 0x0dc)
1136 +#define TMU_CTRL (TMU_CSR_BASE_ADDR + 0x0e0)
1137 +
1138 +/* [31] Mem Access Command. 0 = Internal Memory Read, 1 = Internal memory
1139 + * Write [27:24] Byte Enables of the Internal memory access [23:0] Address of
1140 + * the internal memory. This address is used to access both the PM and DM of
1141 + * all the PE's
1142 + */
1143 +#define TMU_MEM_ACCESS_ADDR (TMU_CSR_BASE_ADDR + 0x0e4)
1144 +
1145 +/* Internal Memory Access Write Data */
1146 +#define TMU_MEM_ACCESS_WDATA (TMU_CSR_BASE_ADDR + 0x0e8)
1147 +/* Internal Memory Access Read Data. The commands are blocked
1148 + * at the mem_access only
1149 + */
1150 +#define TMU_MEM_ACCESS_RDATA (TMU_CSR_BASE_ADDR + 0x0ec)
1151 +
1152 +/* [31:0] PHY0 in queue address (must be initialized with one of the
1153 + * xxx_INQ_PKTPTR cbus addresses)
1154 + */
1155 +#define TMU_PHY0_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f0)
1156 +/* [31:0] PHY1 in queue address (must be initialized with one of the
1157 + * xxx_INQ_PKTPTR cbus addresses)
1158 + */
1159 +#define TMU_PHY1_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f4)
1160 +/* [31:0] PHY2 in queue address (must be initialized with one of the
1161 + * xxx_INQ_PKTPTR cbus addresses)
1162 + */
1163 +#define TMU_PHY2_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f8)
1164 +/* [31:0] PHY3 in queue address (must be initialized with one of the
1165 + * xxx_INQ_PKTPTR cbus addresses)
1166 + */
1167 +#define TMU_PHY3_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0fc)
1168 +#define TMU_BMU_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x100)
1169 +#define TMU_TX_CTRL (TMU_CSR_BASE_ADDR + 0x104)
1170 +
1171 +#define TMU_BUS_ACCESS_WDATA (TMU_CSR_BASE_ADDR + 0x108)
1172 +#define TMU_BUS_ACCESS (TMU_CSR_BASE_ADDR + 0x10c)
1173 +#define TMU_BUS_ACCESS_RDATA (TMU_CSR_BASE_ADDR + 0x110)
1174 +
1175 +#define TMU_PE_SYS_CLK_RATIO (TMU_CSR_BASE_ADDR + 0x114)
1176 +#define TMU_PE_STATUS (TMU_CSR_BASE_ADDR + 0x118)
1177 +#define TMU_TEQ_MAX_THRESHOLD (TMU_CSR_BASE_ADDR + 0x11c)
1178 +/* [31:0] PHY4 in queue address (must be initialized with one of the
1179 + * xxx_INQ_PKTPTR cbus addresses)
1180 + */
1181 +#define TMU_PHY4_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x134)
1182 +/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
1183 + * This is a global Enable for all schedulers in PHY1
1184 + */
1185 +#define TMU_TDQ1_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x138)
1186 +/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
1187 + * This is a global Enable for all schedulers in PHY2
1188 + */
1189 +#define TMU_TDQ2_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x13c)
1190 +/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
1191 + * This is a global Enable for all schedulers in PHY3
1192 + */
1193 +#define TMU_TDQ3_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x140)
1194 +#define TMU_BMU_BUF_SIZE (TMU_CSR_BASE_ADDR + 0x144)
1195 +/* [31:0] PHY5 in queue address (must be initialized with one of the
1196 + * xxx_INQ_PKTPTR cbus addresses)
1197 + */
1198 +#define TMU_PHY5_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x148)
1199 +
1200 +#define SW_RESET BIT(0) /* Global software reset */
1201 +#define INQ_RESET BIT(2)
1202 +#define TEQ_RESET BIT(3)
1203 +#define TDQ_RESET BIT(4)
1204 +#define PE_RESET BIT(5)
1205 +#define MEM_INIT BIT(6)
1206 +#define MEM_INIT_DONE BIT(7)
1207 +#define LLM_INIT BIT(8)
1208 +#define LLM_INIT_DONE BIT(9)
1209 +#define ECC_MEM_INIT_DONE BIT(10)
1210 +
1211 +struct tmu_cfg {
1212 + u32 pe_sys_clk_ratio;
1213 + unsigned long llm_base_addr;
1214 + u32 llm_queue_len;
1215 +};
1216 +
1217 +/* Not HW related for pfe_ctrl / pfe common defines */
1218 +#define DEFAULT_MAX_QDEPTH 80
1219 +#define DEFAULT_Q0_QDEPTH 511 /*We keep one large queue for host tx qos */
1220 +#define DEFAULT_TMU3_QDEPTH 127
1221 +
1222 +#endif /* _TMU_CSR_H_ */
1223 --- /dev/null
1224 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/util_csr.h
1225 @@ -0,0 +1,61 @@
1226 +/*
1227 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
1228 + * Copyright 2017 NXP
1229 + *
1230 + * This program is free software; you can redistribute it and/or modify
1231 + * it under the terms of the GNU General Public License as published by
1232 + * the Free Software Foundation; either version 2 of the License, or
1233 + * (at your option) any later version.
1234 + *
1235 + * This program is distributed in the hope that it will be useful,
1236 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1237 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1238 + * GNU General Public License for more details.
1239 + *
1240 + * You should have received a copy of the GNU General Public License
1241 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
1242 + */
1243 +
1244 +#ifndef _UTIL_CSR_H_
1245 +#define _UTIL_CSR_H_
1246 +
1247 +#define UTIL_VERSION (UTIL_CSR_BASE_ADDR + 0x000)
1248 +#define UTIL_TX_CTRL (UTIL_CSR_BASE_ADDR + 0x004)
1249 +#define UTIL_INQ_PKTPTR (UTIL_CSR_BASE_ADDR + 0x010)
1250 +
1251 +#define UTIL_HDR_SIZE (UTIL_CSR_BASE_ADDR + 0x014)
1252 +
1253 +#define UTIL_PE0_QB_DM_ADDR0 (UTIL_CSR_BASE_ADDR + 0x020)
1254 +#define UTIL_PE0_QB_DM_ADDR1 (UTIL_CSR_BASE_ADDR + 0x024)
1255 +#define UTIL_PE0_RO_DM_ADDR0 (UTIL_CSR_BASE_ADDR + 0x060)
1256 +#define UTIL_PE0_RO_DM_ADDR1 (UTIL_CSR_BASE_ADDR + 0x064)
1257 +
1258 +#define UTIL_MEM_ACCESS_ADDR (UTIL_CSR_BASE_ADDR + 0x100)
1259 +#define UTIL_MEM_ACCESS_WDATA (UTIL_CSR_BASE_ADDR + 0x104)
1260 +#define UTIL_MEM_ACCESS_RDATA (UTIL_CSR_BASE_ADDR + 0x108)
1261 +
1262 +#define UTIL_TM_INQ_ADDR (UTIL_CSR_BASE_ADDR + 0x114)
1263 +#define UTIL_PE_STATUS (UTIL_CSR_BASE_ADDR + 0x118)
1264 +
1265 +#define UTIL_PE_SYS_CLK_RATIO (UTIL_CSR_BASE_ADDR + 0x200)
1266 +#define UTIL_AFULL_THRES (UTIL_CSR_BASE_ADDR + 0x204)
1267 +#define UTIL_GAP_BETWEEN_READS (UTIL_CSR_BASE_ADDR + 0x208)
1268 +#define UTIL_MAX_BUF_CNT (UTIL_CSR_BASE_ADDR + 0x20c)
1269 +#define UTIL_TSQ_FIFO_THRES (UTIL_CSR_BASE_ADDR + 0x210)
1270 +#define UTIL_TSQ_MAX_CNT (UTIL_CSR_BASE_ADDR + 0x214)
1271 +#define UTIL_IRAM_DATA_0 (UTIL_CSR_BASE_ADDR + 0x218)
1272 +#define UTIL_IRAM_DATA_1 (UTIL_CSR_BASE_ADDR + 0x21c)
1273 +#define UTIL_IRAM_DATA_2 (UTIL_CSR_BASE_ADDR + 0x220)
1274 +#define UTIL_IRAM_DATA_3 (UTIL_CSR_BASE_ADDR + 0x224)
1275 +
1276 +#define UTIL_BUS_ACCESS_ADDR (UTIL_CSR_BASE_ADDR + 0x228)
1277 +#define UTIL_BUS_ACCESS_WDATA (UTIL_CSR_BASE_ADDR + 0x22c)
1278 +#define UTIL_BUS_ACCESS_RDATA (UTIL_CSR_BASE_ADDR + 0x230)
1279 +
1280 +#define UTIL_INQ_AFULL_THRES (UTIL_CSR_BASE_ADDR + 0x234)
1281 +
1282 +struct util_cfg {
1283 + u32 pe_sys_clk_ratio;
1284 +};
1285 +
1286 +#endif /* _UTIL_CSR_H_ */
1287 --- /dev/null
1288 +++ b/drivers/staging/fsl_ppfe/include/pfe/pfe.h
1289 @@ -0,0 +1,372 @@
1290 +/*
1291 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
1292 + * Copyright 2017 NXP
1293 + *
1294 + * This program is free software; you can redistribute it and/or modify
1295 + * it under the terms of the GNU General Public License as published by
1296 + * the Free Software Foundation; either version 2 of the License, or
1297 + * (at your option) any later version.
1298 + *
1299 + * This program is distributed in the hope that it will be useful,
1300 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1301 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1302 + * GNU General Public License for more details.
1303 + *
1304 + * You should have received a copy of the GNU General Public License
1305 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
1306 + */
1307 +
1308 +#ifndef _PFE_H_
1309 +#define _PFE_H_
1310 +
1311 +#include "cbus.h"
1312 +
1313 +#define CLASS_DMEM_BASE_ADDR(i) (0x00000000 | ((i) << 20))
1314 +/*
1315 + * Only valid for mem access register interface
1316 + */
1317 +#define CLASS_IMEM_BASE_ADDR(i) (0x00000000 | ((i) << 20))
1318 +#define CLASS_DMEM_SIZE 0x00002000
1319 +#define CLASS_IMEM_SIZE 0x00008000
1320 +
1321 +#define TMU_DMEM_BASE_ADDR(i) (0x00000000 + ((i) << 20))
1322 +/*
1323 + * Only valid for mem access register interface
1324 + */
1325 +#define TMU_IMEM_BASE_ADDR(i) (0x00000000 + ((i) << 20))
1326 +#define TMU_DMEM_SIZE 0x00000800
1327 +#define TMU_IMEM_SIZE 0x00002000
1328 +
1329 +#define UTIL_DMEM_BASE_ADDR 0x00000000
1330 +#define UTIL_DMEM_SIZE 0x00002000
1331 +
1332 +#define PE_LMEM_BASE_ADDR 0xc3010000
1333 +#define PE_LMEM_SIZE 0x8000
1334 +#define PE_LMEM_END (PE_LMEM_BASE_ADDR + PE_LMEM_SIZE)
1335 +
1336 +#define DMEM_BASE_ADDR 0x00000000
1337 +#define DMEM_SIZE 0x2000 /* TMU has less... */
1338 +#define DMEM_END (DMEM_BASE_ADDR + DMEM_SIZE)
1339 +
1340 +#define PMEM_BASE_ADDR 0x00010000
1341 +#define PMEM_SIZE 0x8000 /* TMU has less... */
1342 +#define PMEM_END (PMEM_BASE_ADDR + PMEM_SIZE)
1343 +
1344 +/* These check memory ranges from PE point of view/memory map */
1345 +#define IS_DMEM(addr, len) \
1346 + ({ typeof(addr) addr_ = (addr); \
1347 + ((unsigned long)(addr_) >= DMEM_BASE_ADDR) && \
1348 + (((unsigned long)(addr_) + (len)) <= DMEM_END); })
1349 +
1350 +#define IS_PMEM(addr, len) \
1351 + ({ typeof(addr) addr_ = (addr); \
1352 + ((unsigned long)(addr_) >= PMEM_BASE_ADDR) && \
1353 + (((unsigned long)(addr_) + (len)) <= PMEM_END); })
1354 +
1355 +#define IS_PE_LMEM(addr, len) \
1356 + ({ typeof(addr) addr_ = (addr); \
1357 + ((unsigned long)(addr_) >= \
1358 + PE_LMEM_BASE_ADDR) && \
1359 + (((unsigned long)(addr_) + \
1360 + (len)) <= PE_LMEM_END); })
1361 +
1362 +#define IS_PFE_LMEM(addr, len) \
1363 + ({ typeof(addr) addr_ = (addr); \
1364 + ((unsigned long)(addr_) >= \
1365 + CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR)) && \
1366 + (((unsigned long)(addr_) + (len)) <= \
1367 + CBUS_VIRT_TO_PFE(LMEM_END)); })
1368 +
1369 +#define __IS_PHYS_DDR(addr, len) \
1370 + ({ typeof(addr) addr_ = (addr); \
1371 + ((unsigned long)(addr_) >= \
1372 + DDR_PHYS_BASE_ADDR) && \
1373 + (((unsigned long)(addr_) + (len)) <= \
1374 + DDR_PHYS_END); })
1375 +
1376 +#define IS_PHYS_DDR(addr, len) __IS_PHYS_DDR(DDR_PFE_TO_PHYS(addr), len)
1377 +
1378 +/*
1379 + * If using a run-time virtual address for the cbus base address use this code
1380 + */
1381 +extern void *cbus_base_addr;
1382 +extern void *ddr_base_addr;
1383 +extern unsigned long ddr_phys_base_addr;
1384 +extern unsigned int ddr_size;
1385 +
1386 +#define CBUS_BASE_ADDR cbus_base_addr
1387 +#define DDR_PHYS_BASE_ADDR ddr_phys_base_addr
1388 +#define DDR_BASE_ADDR ddr_base_addr
1389 +#define DDR_SIZE ddr_size
1390 +
1391 +#define DDR_PHYS_END (DDR_PHYS_BASE_ADDR + DDR_SIZE)
1392 +
1393 +#define LS1012A_PFE_RESET_WA /*
1394 + * PFE doesn't have global reset and re-init
1395 + * should takecare few things to make PFE
1396 + * functional after reset
1397 + */
1398 +#define PFE_CBUS_PHYS_BASE_ADDR 0xc0000000 /* CBUS physical base address
1399 + * as seen by PE's.
1400 + */
1401 +/* CBUS physical base address as seen by PE's. */
1402 +#define PFE_CBUS_PHYS_BASE_ADDR_FROM_PFE 0xc0000000
1403 +
1404 +#define DDR_PHYS_TO_PFE(p) (((unsigned long int)(p)) & 0x7FFFFFFF)
1405 +#define DDR_PFE_TO_PHYS(p) (((unsigned long int)(p)) | 0x80000000)
1406 +#define CBUS_PHYS_TO_PFE(p) (((p) - PFE_CBUS_PHYS_BASE_ADDR) + \
1407 + PFE_CBUS_PHYS_BASE_ADDR_FROM_PFE)
1408 +/* Translates to PFE address map */
1409 +
1410 +#define DDR_PHYS_TO_VIRT(p) (((p) - DDR_PHYS_BASE_ADDR) + DDR_BASE_ADDR)
1411 +#define DDR_VIRT_TO_PHYS(v) (((v) - DDR_BASE_ADDR) + DDR_PHYS_BASE_ADDR)
1412 +#define DDR_VIRT_TO_PFE(p) (DDR_PHYS_TO_PFE(DDR_VIRT_TO_PHYS(p)))
1413 +
1414 +#define CBUS_VIRT_TO_PFE(v) (((v) - CBUS_BASE_ADDR) + \
1415 + PFE_CBUS_PHYS_BASE_ADDR)
1416 +#define CBUS_PFE_TO_VIRT(p) (((unsigned long int)(p) - \
1417 + PFE_CBUS_PHYS_BASE_ADDR) + CBUS_BASE_ADDR)
1418 +
1419 +/* The below part of the code is used in QOS control driver from host */
1420 +#define TMU_APB_BASE_ADDR 0xc1000000 /* TMU base address seen by
1421 + * pe's
1422 + */
1423 +
1424 +enum {
1425 + CLASS0_ID = 0,
1426 + CLASS1_ID,
1427 + CLASS2_ID,
1428 + CLASS3_ID,
1429 + CLASS4_ID,
1430 + CLASS5_ID,
1431 + TMU0_ID,
1432 + TMU1_ID,
1433 + TMU2_ID,
1434 + TMU3_ID,
1435 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
1436 + UTIL_ID,
1437 +#endif
1438 + MAX_PE
1439 +};
1440 +
1441 +#define CLASS_MASK (BIT(CLASS0_ID) | BIT(CLASS1_ID) |\
1442 + BIT(CLASS2_ID) | BIT(CLASS3_ID) |\
1443 + BIT(CLASS4_ID) | BIT(CLASS5_ID))
1444 +#define CLASS_MAX_ID CLASS5_ID
1445 +
1446 +#define TMU_MASK (BIT(TMU0_ID) | BIT(TMU1_ID) |\
1447 + BIT(TMU3_ID))
1448 +
1449 +#define TMU_MAX_ID TMU3_ID
1450 +
1451 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
1452 +#define UTIL_MASK BIT(UTIL_ID)
1453 +#endif
1454 +
1455 +struct pe_status {
1456 + u32 cpu_state;
1457 + u32 activity_counter;
1458 + u32 rx;
1459 + union {
1460 + u32 tx;
1461 + u32 tmu_qstatus;
1462 + };
1463 + u32 drop;
1464 +#if defined(CFG_PE_DEBUG)
1465 + u32 debug_indicator;
1466 + u32 debug[16];
1467 +#endif
1468 +} __aligned(16);
1469 +
1470 +struct pe_sync_mailbox {
1471 + u32 stop;
1472 + u32 stopped;
1473 +};
1474 +
1475 +/* Drop counter definitions */
1476 +
1477 +#define CLASS_NUM_DROP_COUNTERS 13
1478 +#define UTIL_NUM_DROP_COUNTERS 8
1479 +
1480 +/* PE information.
1481 + * Structure containing PE's specific information. It is used to create
1482 + * generic C functions common to all PE's.
1483 + * Before using the library functions this structure needs to be initialized
1484 + * with the different registers virtual addresses
1485 + * (according to the ARM MMU mmaping). The default initialization supports a
1486 + * virtual == physical mapping.
1487 + */
1488 +struct pe_info {
1489 + u32 dmem_base_addr; /* PE's dmem base address */
1490 + u32 pmem_base_addr; /* PE's pmem base address */
1491 + u32 pmem_size; /* PE's pmem size */
1492 +
1493 + void *mem_access_wdata; /* PE's _MEM_ACCESS_WDATA register
1494 + * address
1495 + */
1496 + void *mem_access_addr; /* PE's _MEM_ACCESS_ADDR register
1497 + * address
1498 + */
1499 + void *mem_access_rdata; /* PE's _MEM_ACCESS_RDATA register
1500 + * address
1501 + */
1502 +};
1503 +
1504 +void pe_lmem_read(u32 *dst, u32 len, u32 offset);
1505 +void pe_lmem_write(u32 *src, u32 len, u32 offset);
1506 +
1507 +void pe_dmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len);
1508 +void pe_pmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len);
1509 +
1510 +u32 pe_pmem_read(int id, u32 addr, u8 size);
1511 +
1512 +void pe_dmem_write(int id, u32 val, u32 addr, u8 size);
1513 +u32 pe_dmem_read(int id, u32 addr, u8 size);
1514 +void class_pe_lmem_memcpy_to32(u32 dst, const void *src, unsigned int len);
1515 +void class_pe_lmem_memset(u32 dst, int val, unsigned int len);
1516 +void class_bus_write(u32 val, u32 addr, u8 size);
1517 +u32 class_bus_read(u32 addr, u8 size);
1518 +
1519 +#define class_bus_readl(addr) class_bus_read(addr, 4)
1520 +#define class_bus_readw(addr) class_bus_read(addr, 2)
1521 +#define class_bus_readb(addr) class_bus_read(addr, 1)
1522 +
1523 +#define class_bus_writel(val, addr) class_bus_write(val, addr, 4)
1524 +#define class_bus_writew(val, addr) class_bus_write(val, addr, 2)
1525 +#define class_bus_writeb(val, addr) class_bus_write(val, addr, 1)
1526 +
1527 +#define pe_dmem_readl(id, addr) pe_dmem_read(id, addr, 4)
1528 +#define pe_dmem_readw(id, addr) pe_dmem_read(id, addr, 2)
1529 +#define pe_dmem_readb(id, addr) pe_dmem_read(id, addr, 1)
1530 +
1531 +#define pe_dmem_writel(id, val, addr) pe_dmem_write(id, val, addr, 4)
1532 +#define pe_dmem_writew(id, val, addr) pe_dmem_write(id, val, addr, 2)
1533 +#define pe_dmem_writeb(id, val, addr) pe_dmem_write(id, val, addr, 1)
1534 +
1535 +/*int pe_load_elf_section(int id, const void *data, elf32_shdr *shdr); */
1536 +int pe_load_elf_section(int id, const void *data, struct elf32_shdr *shdr,
1537 + struct device *dev);
1538 +
1539 +void pfe_lib_init(void *cbus_base, void *ddr_base, unsigned long ddr_phys_base,
1540 + unsigned int ddr_size);
1541 +void bmu_init(void *base, struct BMU_CFG *cfg);
1542 +void bmu_reset(void *base);
1543 +void bmu_enable(void *base);
1544 +void bmu_disable(void *base);
1545 +void bmu_set_config(void *base, struct BMU_CFG *cfg);
1546 +
1547 +/*
1548 + * An enumerated type for loopback values. This can be one of three values, no
1549 + * loopback -normal operation, local loopback with internal loopback module of
1550 + * MAC or PHY loopback which is through the external PHY.
1551 + */
1552 +#ifndef __MAC_LOOP_ENUM__
1553 +#define __MAC_LOOP_ENUM__
1554 +enum mac_loop {LB_NONE, LB_EXT, LB_LOCAL};
1555 +#endif
1556 +
1557 +void gemac_init(void *base, void *config);
1558 +void gemac_disable_rx_checksum_offload(void *base);
1559 +void gemac_enable_rx_checksum_offload(void *base);
1560 +void gemac_set_mdc_div(void *base, int mdc_div);
1561 +void gemac_set_speed(void *base, enum mac_speed gem_speed);
1562 +void gemac_set_duplex(void *base, int duplex);
1563 +void gemac_set_mode(void *base, int mode);
1564 +void gemac_enable(void *base);
1565 +void gemac_tx_disable(void *base);
1566 +void gemac_tx_enable(void *base);
1567 +void gemac_disable(void *base);
1568 +void gemac_reset(void *base);
1569 +void gemac_set_address(void *base, struct spec_addr *addr);
1570 +struct spec_addr gemac_get_address(void *base);
1571 +void gemac_set_loop(void *base, enum mac_loop gem_loop);
1572 +void gemac_set_laddr1(void *base, struct pfe_mac_addr *address);
1573 +void gemac_set_laddr2(void *base, struct pfe_mac_addr *address);
1574 +void gemac_set_laddr3(void *base, struct pfe_mac_addr *address);
1575 +void gemac_set_laddr4(void *base, struct pfe_mac_addr *address);
1576 +void gemac_set_laddrN(void *base, struct pfe_mac_addr *address,
1577 + unsigned int entry_index);
1578 +void gemac_clear_laddr1(void *base);
1579 +void gemac_clear_laddr2(void *base);
1580 +void gemac_clear_laddr3(void *base);
1581 +void gemac_clear_laddr4(void *base);
1582 +void gemac_clear_laddrN(void *base, unsigned int entry_index);
1583 +struct pfe_mac_addr gemac_get_hash(void *base);
1584 +void gemac_set_hash(void *base, struct pfe_mac_addr *hash);
1585 +struct pfe_mac_addr gem_get_laddr1(void *base);
1586 +struct pfe_mac_addr gem_get_laddr2(void *base);
1587 +struct pfe_mac_addr gem_get_laddr3(void *base);
1588 +struct pfe_mac_addr gem_get_laddr4(void *base);
1589 +struct pfe_mac_addr gem_get_laddrN(void *base, unsigned int entry_index);
1590 +void gemac_set_config(void *base, struct gemac_cfg *cfg);
1591 +void gemac_allow_broadcast(void *base);
1592 +void gemac_no_broadcast(void *base);
1593 +void gemac_enable_1536_rx(void *base);
1594 +void gemac_disable_1536_rx(void *base);
1595 +void gemac_enable_rx_jmb(void *base);
1596 +void gemac_disable_rx_jmb(void *base);
1597 +void gemac_enable_stacked_vlan(void *base);
1598 +void gemac_disable_stacked_vlan(void *base);
1599 +void gemac_enable_pause_rx(void *base);
1600 +void gemac_disable_pause_rx(void *base);
1601 +void gemac_enable_copy_all(void *base);
1602 +void gemac_disable_copy_all(void *base);
1603 +void gemac_set_bus_width(void *base, int width);
1604 +void gemac_set_wol(void *base, u32 wol_conf);
1605 +
1606 +void gpi_init(void *base, struct gpi_cfg *cfg);
1607 +void gpi_reset(void *base);
1608 +void gpi_enable(void *base);
1609 +void gpi_disable(void *base);
1610 +void gpi_set_config(void *base, struct gpi_cfg *cfg);
1611 +
1612 +void class_init(struct class_cfg *cfg);
1613 +void class_reset(void);
1614 +void class_enable(void);
1615 +void class_disable(void);
1616 +void class_set_config(struct class_cfg *cfg);
1617 +
1618 +void tmu_reset(void);
1619 +void tmu_init(struct tmu_cfg *cfg);
1620 +void tmu_enable(u32 pe_mask);
1621 +void tmu_disable(u32 pe_mask);
1622 +u32 tmu_qstatus(u32 if_id);
1623 +u32 tmu_pkts_processed(u32 if_id);
1624 +
1625 +void util_init(struct util_cfg *cfg);
1626 +void util_reset(void);
1627 +void util_enable(void);
1628 +void util_disable(void);
1629 +
1630 +void hif_init(void);
1631 +void hif_tx_enable(void);
1632 +void hif_tx_disable(void);
1633 +void hif_rx_enable(void);
1634 +void hif_rx_disable(void);
1635 +
1636 +/* Get Chip Revision level
1637 + *
1638 + */
1639 +static inline unsigned int CHIP_REVISION(void)
1640 +{
1641 + /*For LS1012A return always 1 */
1642 + return 1;
1643 +}
1644 +
1645 +/* Start HIF rx DMA
1646 + *
1647 + */
1648 +static inline void hif_rx_dma_start(void)
1649 +{
1650 + writel(HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB, HIF_RX_CTRL);
1651 +}
1652 +
1653 +/* Start HIF tx DMA
1654 + *
1655 + */
1656 +static inline void hif_tx_dma_start(void)
1657 +{
1658 + writel(HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB, HIF_TX_CTRL);
1659 +}
1660 +
1661 +#endif /* _PFE_H_ */
1662 --- /dev/null
1663 +++ b/drivers/staging/fsl_ppfe/pfe_ctrl.c
1664 @@ -0,0 +1,238 @@
1665 +/*
1666 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
1667 + * Copyright 2017 NXP
1668 + *
1669 + * This program is free software; you can redistribute it and/or modify
1670 + * it under the terms of the GNU General Public License as published by
1671 + * the Free Software Foundation; either version 2 of the License, or
1672 + * (at your option) any later version.
1673 + *
1674 + * This program is distributed in the hope that it will be useful,
1675 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1676 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1677 + * GNU General Public License for more details.
1678 + *
1679 + * You should have received a copy of the GNU General Public License
1680 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
1681 + */
1682 +
1683 +#include <linux/kernel.h>
1684 +#include <linux/sched.h>
1685 +#include <linux/module.h>
1686 +#include <linux/list.h>
1687 +#include <linux/kthread.h>
1688 +
1689 +#include "pfe_mod.h"
1690 +#include "pfe_ctrl.h"
1691 +
1692 +#define TIMEOUT_MS 1000
1693 +
1694 +int relax(unsigned long end)
1695 +{
1696 + if (time_after(jiffies, end)) {
1697 + if (time_after(jiffies, end + (TIMEOUT_MS * HZ) / 1000))
1698 + return -1;
1699 +
1700 + if (need_resched())
1701 + schedule();
1702 + }
1703 +
1704 + return 0;
1705 +}
1706 +
1707 +void pfe_ctrl_suspend(struct pfe_ctrl *ctrl)
1708 +{
1709 + int id;
1710 +
1711 + mutex_lock(&ctrl->mutex);
1712 +
1713 + for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++)
1714 + pe_dmem_write(id, cpu_to_be32(0x1), CLASS_DM_RESUME, 4);
1715 +
1716 + for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
1717 + if (id == TMU2_ID)
1718 + continue;
1719 + pe_dmem_write(id, cpu_to_be32(0x1), TMU_DM_RESUME, 4);
1720 + }
1721 +
1722 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
1723 + pe_dmem_write(UTIL_ID, cpu_to_be32(0x1), UTIL_DM_RESUME, 4);
1724 +#endif
1725 + mutex_unlock(&ctrl->mutex);
1726 +}
1727 +
1728 +void pfe_ctrl_resume(struct pfe_ctrl *ctrl)
1729 +{
1730 + int pe_mask = CLASS_MASK | TMU_MASK;
1731 +
1732 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
1733 + pe_mask |= UTIL_MASK;
1734 +#endif
1735 + mutex_lock(&ctrl->mutex);
1736 + pe_start(&pfe->ctrl, pe_mask);
1737 + mutex_unlock(&ctrl->mutex);
1738 +}
1739 +
1740 +/* PE sync stop.
1741 + * Stops packet processing for a list of PE's (specified using a bitmask).
1742 + * The caller must hold ctrl->mutex.
1743 + *
1744 + * @param ctrl Control context
1745 + * @param pe_mask Mask of PE id's to stop
1746 + *
1747 + */
1748 +int pe_sync_stop(struct pfe_ctrl *ctrl, int pe_mask)
1749 +{
1750 + struct pe_sync_mailbox *mbox;
1751 + int pe_stopped = 0;
1752 + unsigned long end = jiffies + 2;
1753 + int i;
1754 +
1755 + pe_mask &= 0x2FF; /*Exclude Util + TMU2 */
1756 +
1757 + for (i = 0; i < MAX_PE; i++)
1758 + if (pe_mask & (1 << i)) {
1759 + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
1760 +
1761 + pe_dmem_write(i, cpu_to_be32(0x1), (unsigned
1762 + long)&mbox->stop, 4);
1763 + }
1764 +
1765 + while (pe_stopped != pe_mask) {
1766 + for (i = 0; i < MAX_PE; i++)
1767 + if ((pe_mask & (1 << i)) && !(pe_stopped & (1 << i))) {
1768 + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
1769 +
1770 + if (pe_dmem_read(i, (unsigned
1771 + long)&mbox->stopped, 4) &
1772 + cpu_to_be32(0x1))
1773 + pe_stopped |= (1 << i);
1774 + }
1775 +
1776 + if (relax(end) < 0)
1777 + goto err;
1778 + }
1779 +
1780 + return 0;
1781 +
1782 +err:
1783 + pr_err("%s: timeout, %x %x\n", __func__, pe_mask, pe_stopped);
1784 +
1785 + for (i = 0; i < MAX_PE; i++)
1786 + if (pe_mask & (1 << i)) {
1787 + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
1788 +
1789 + pe_dmem_write(i, cpu_to_be32(0x0), (unsigned
1790 + long)&mbox->stop, 4);
1791 + }
1792 +
1793 + return -EIO;
1794 +}
1795 +
1796 +/* PE start.
1797 + * Starts packet processing for a list of PE's (specified using a bitmask).
1798 + * The caller must hold ctrl->mutex.
1799 + *
1800 + * @param ctrl Control context
1801 + * @param pe_mask Mask of PE id's to start
1802 + *
1803 + */
1804 +void pe_start(struct pfe_ctrl *ctrl, int pe_mask)
1805 +{
1806 + struct pe_sync_mailbox *mbox;
1807 + int i;
1808 +
1809 + for (i = 0; i < MAX_PE; i++)
1810 + if (pe_mask & (1 << i)) {
1811 + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
1812 +
1813 + pe_dmem_write(i, cpu_to_be32(0x0), (unsigned
1814 + long)&mbox->stop, 4);
1815 + }
1816 +}
1817 +
1818 +/* This function will ensure all PEs are put in to idle state */
1819 +int pe_reset_all(struct pfe_ctrl *ctrl)
1820 +{
1821 + struct pe_sync_mailbox *mbox;
1822 + int pe_stopped = 0;
1823 + unsigned long end = jiffies + 2;
1824 + int i;
1825 + int pe_mask = CLASS_MASK | TMU_MASK;
1826 +
1827 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
1828 + pe_mask |= UTIL_MASK;
1829 +#endif
1830 +
1831 + for (i = 0; i < MAX_PE; i++)
1832 + if (pe_mask & (1 << i)) {
1833 + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
1834 +
1835 + pe_dmem_write(i, cpu_to_be32(0x2), (unsigned
1836 + long)&mbox->stop, 4);
1837 + }
1838 +
1839 + while (pe_stopped != pe_mask) {
1840 + for (i = 0; i < MAX_PE; i++)
1841 + if ((pe_mask & (1 << i)) && !(pe_stopped & (1 << i))) {
1842 + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
1843 +
1844 + if (pe_dmem_read(i, (unsigned long)
1845 + &mbox->stopped, 4) &
1846 + cpu_to_be32(0x1))
1847 + pe_stopped |= (1 << i);
1848 + }
1849 +
1850 + if (relax(end) < 0)
1851 + goto err;
1852 + }
1853 +
1854 + return 0;
1855 +
1856 +err:
1857 + pr_err("%s: timeout, %x %x\n", __func__, pe_mask, pe_stopped);
1858 + return -EIO;
1859 +}
1860 +
1861 +int pfe_ctrl_init(struct pfe *pfe)
1862 +{
1863 + struct pfe_ctrl *ctrl = &pfe->ctrl;
1864 + int id;
1865 +
1866 + pr_info("%s\n", __func__);
1867 +
1868 + mutex_init(&ctrl->mutex);
1869 + spin_lock_init(&ctrl->lock);
1870 +
1871 + for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
1872 + ctrl->sync_mailbox_baseaddr[id] = CLASS_DM_SYNC_MBOX;
1873 + ctrl->msg_mailbox_baseaddr[id] = CLASS_DM_MSG_MBOX;
1874 + }
1875 +
1876 + for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
1877 + if (id == TMU2_ID)
1878 + continue;
1879 + ctrl->sync_mailbox_baseaddr[id] = TMU_DM_SYNC_MBOX;
1880 + ctrl->msg_mailbox_baseaddr[id] = TMU_DM_MSG_MBOX;
1881 + }
1882 +
1883 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
1884 + ctrl->sync_mailbox_baseaddr[UTIL_ID] = UTIL_DM_SYNC_MBOX;
1885 + ctrl->msg_mailbox_baseaddr[UTIL_ID] = UTIL_DM_MSG_MBOX;
1886 +#endif
1887 +
1888 + ctrl->hash_array_baseaddr = pfe->ddr_baseaddr + ROUTE_TABLE_BASEADDR;
1889 + ctrl->hash_array_phys_baseaddr = pfe->ddr_phys_baseaddr +
1890 + ROUTE_TABLE_BASEADDR;
1891 +
1892 + ctrl->dev = pfe->dev;
1893 +
1894 + pr_info("%s finished\n", __func__);
1895 +
1896 + return 0;
1897 +}
1898 +
1899 +void pfe_ctrl_exit(struct pfe *pfe)
1900 +{
1901 + pr_info("%s\n", __func__);
1902 +}
1903 --- /dev/null
1904 +++ b/drivers/staging/fsl_ppfe/pfe_ctrl.h
1905 @@ -0,0 +1,112 @@
1906 +/*
1907 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
1908 + * Copyright 2017 NXP
1909 + *
1910 + * This program is free software; you can redistribute it and/or modify
1911 + * it under the terms of the GNU General Public License as published by
1912 + * the Free Software Foundation; either version 2 of the License, or
1913 + * (at your option) any later version.
1914 + *
1915 + * This program is distributed in the hope that it will be useful,
1916 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1917 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1918 + * GNU General Public License for more details.
1919 + *
1920 + * You should have received a copy of the GNU General Public License
1921 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
1922 + */
1923 +
1924 +#ifndef _PFE_CTRL_H_
1925 +#define _PFE_CTRL_H_
1926 +
1927 +#include <linux/dmapool.h>
1928 +
1929 +#include "pfe_mod.h"
1930 +#include "pfe/pfe.h"
1931 +
1932 +#define DMA_BUF_SIZE_128 0x80 /* enough for 1 conntracks */
1933 +#define DMA_BUF_SIZE_256 0x100
1934 +/* enough for 2 conntracks, 1 bridge entry or 1 multicast entry */
1935 +#define DMA_BUF_SIZE_512 0x200
1936 +/* 512bytes dma allocated buffers used by rtp relay feature */
1937 +#define DMA_BUF_MIN_ALIGNMENT 8
1938 +#define DMA_BUF_BOUNDARY (4 * 1024)
1939 +/* bursts can not cross 4k boundary */
1940 +
1941 +#define CMD_TX_ENABLE 0x0501
1942 +#define CMD_TX_DISABLE 0x0502
1943 +
1944 +#define CMD_RX_LRO 0x0011
1945 +#define CMD_PKTCAP_ENABLE 0x0d01
1946 +#define CMD_QM_EXPT_RATE 0x020c
1947 +
1948 +#define CLASS_DM_SH_STATIC (0x800)
1949 +#define CLASS_DM_CPU_TICKS (CLASS_DM_SH_STATIC)
1950 +#define CLASS_DM_SYNC_MBOX (0x808)
1951 +#define CLASS_DM_MSG_MBOX (0x810)
1952 +#define CLASS_DM_DROP_CNTR (0x820)
1953 +#define CLASS_DM_RESUME (0x854)
1954 +#define CLASS_DM_PESTATUS (0x860)
1955 +
1956 +#define TMU_DM_SH_STATIC (0x80)
1957 +#define TMU_DM_CPU_TICKS (TMU_DM_SH_STATIC)
1958 +#define TMU_DM_SYNC_MBOX (0x88)
1959 +#define TMU_DM_MSG_MBOX (0x90)
1960 +#define TMU_DM_RESUME (0xA0)
1961 +#define TMU_DM_PESTATUS (0xB0)
1962 +#define TMU_DM_CONTEXT (0x300)
1963 +#define TMU_DM_TX_TRANS (0x480)
1964 +
1965 +#define UTIL_DM_SH_STATIC (0x0)
1966 +#define UTIL_DM_CPU_TICKS (UTIL_DM_SH_STATIC)
1967 +#define UTIL_DM_SYNC_MBOX (0x8)
1968 +#define UTIL_DM_MSG_MBOX (0x10)
1969 +#define UTIL_DM_DROP_CNTR (0x20)
1970 +#define UTIL_DM_RESUME (0x40)
1971 +#define UTIL_DM_PESTATUS (0x50)
1972 +
1973 +struct pfe_ctrl {
1974 + struct mutex mutex; /* to serialize pfe control access */
1975 + spinlock_t lock;
1976 +
1977 + void *dma_pool;
1978 + void *dma_pool_512;
1979 + void *dma_pool_128;
1980 +
1981 + struct device *dev;
1982 +
1983 + void *hash_array_baseaddr; /*
1984 + * Virtual base address of
1985 + * the conntrack hash array
1986 + */
1987 + unsigned long hash_array_phys_baseaddr; /*
1988 + * Physical base address of
1989 + * the conntrack hash array
1990 + */
1991 +
1992 + int (*event_cb)(u16, u16, u16*);
1993 +
1994 + unsigned long sync_mailbox_baseaddr[MAX_PE]; /*
1995 + * Sync mailbox PFE
1996 + * internal address,
1997 + * initialized
1998 + * when parsing elf images
1999 + */
2000 + unsigned long msg_mailbox_baseaddr[MAX_PE]; /*
2001 + * Msg mailbox PFE internal
2002 + * address, initialized
2003 + * when parsing elf images
2004 + */
2005 + unsigned int sys_clk; /* AXI clock value, in KHz */
2006 +};
2007 +
2008 +int pfe_ctrl_init(struct pfe *pfe);
2009 +void pfe_ctrl_exit(struct pfe *pfe);
2010 +int pe_sync_stop(struct pfe_ctrl *ctrl, int pe_mask);
2011 +void pe_start(struct pfe_ctrl *ctrl, int pe_mask);
2012 +int pe_reset_all(struct pfe_ctrl *ctrl);
2013 +void pfe_ctrl_suspend(struct pfe_ctrl *ctrl);
2014 +void pfe_ctrl_resume(struct pfe_ctrl *ctrl);
2015 +int relax(unsigned long end);
2016 +
2017 +#endif /* _PFE_CTRL_H_ */
2018 --- /dev/null
2019 +++ b/drivers/staging/fsl_ppfe/pfe_debugfs.c
2020 @@ -0,0 +1,111 @@
2021 +/*
2022 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
2023 + * Copyright 2017 NXP
2024 + *
2025 + * This program is free software; you can redistribute it and/or modify
2026 + * it under the terms of the GNU General Public License as published by
2027 + * the Free Software Foundation; either version 2 of the License, or
2028 + * (at your option) any later version.
2029 + *
2030 + * This program is distributed in the hope that it will be useful,
2031 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
2032 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2033 + * GNU General Public License for more details.
2034 + *
2035 + * You should have received a copy of the GNU General Public License
2036 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
2037 + */
2038 +
2039 +#include <linux/module.h>
2040 +#include <linux/debugfs.h>
2041 +#include <linux/platform_device.h>
2042 +
2043 +#include "pfe_mod.h"
2044 +
2045 +static int dmem_show(struct seq_file *s, void *unused)
2046 +{
2047 + u32 dmem_addr, val;
2048 + int id = (long int)s->private;
2049 + int i;
2050 +
2051 + for (dmem_addr = 0; dmem_addr < CLASS_DMEM_SIZE; dmem_addr += 8 * 4) {
2052 + seq_printf(s, "%04x:", dmem_addr);
2053 +
2054 + for (i = 0; i < 8; i++) {
2055 + val = pe_dmem_read(id, dmem_addr + i * 4, 4);
2056 + seq_printf(s, " %02x %02x %02x %02x", val & 0xff,
2057 + (val >> 8) & 0xff, (val >> 16) & 0xff,
2058 + (val >> 24) & 0xff);
2059 + }
2060 +
2061 + seq_puts(s, "\n");
2062 + }
2063 +
2064 + return 0;
2065 +}
2066 +
2067 +static int dmem_open(struct inode *inode, struct file *file)
2068 +{
2069 + return single_open(file, dmem_show, inode->i_private);
2070 +}
2071 +
2072 +static const struct file_operations dmem_fops = {
2073 + .open = dmem_open,
2074 + .read = seq_read,
2075 + .llseek = seq_lseek,
2076 + .release = single_release,
2077 +};
2078 +
2079 +int pfe_debugfs_init(struct pfe *pfe)
2080 +{
2081 + struct dentry *d;
2082 +
2083 + pr_info("%s\n", __func__);
2084 +
2085 + pfe->dentry = debugfs_create_dir("pfe", NULL);
2086 + if (IS_ERR_OR_NULL(pfe->dentry))
2087 + goto err_dir;
2088 +
2089 + d = debugfs_create_file("pe0_dmem", 0444, pfe->dentry, (void *)0,
2090 + &dmem_fops);
2091 + if (IS_ERR_OR_NULL(d))
2092 + goto err_pe;
2093 +
2094 + d = debugfs_create_file("pe1_dmem", 0444, pfe->dentry, (void *)1,
2095 + &dmem_fops);
2096 + if (IS_ERR_OR_NULL(d))
2097 + goto err_pe;
2098 +
2099 + d = debugfs_create_file("pe2_dmem", 0444, pfe->dentry, (void *)2,
2100 + &dmem_fops);
2101 + if (IS_ERR_OR_NULL(d))
2102 + goto err_pe;
2103 +
2104 + d = debugfs_create_file("pe3_dmem", 0444, pfe->dentry, (void *)3,
2105 + &dmem_fops);
2106 + if (IS_ERR_OR_NULL(d))
2107 + goto err_pe;
2108 +
2109 + d = debugfs_create_file("pe4_dmem", 0444, pfe->dentry, (void *)4,
2110 + &dmem_fops);
2111 + if (IS_ERR_OR_NULL(d))
2112 + goto err_pe;
2113 +
2114 + d = debugfs_create_file("pe5_dmem", 0444, pfe->dentry, (void *)5,
2115 + &dmem_fops);
2116 + if (IS_ERR_OR_NULL(d))
2117 + goto err_pe;
2118 +
2119 + return 0;
2120 +
2121 +err_pe:
2122 + debugfs_remove_recursive(pfe->dentry);
2123 +
2124 +err_dir:
2125 + return -1;
2126 +}
2127 +
2128 +void pfe_debugfs_exit(struct pfe *pfe)
2129 +{
2130 + debugfs_remove_recursive(pfe->dentry);
2131 +}
2132 --- /dev/null
2133 +++ b/drivers/staging/fsl_ppfe/pfe_debugfs.h
2134 @@ -0,0 +1,25 @@
2135 +/*
2136 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
2137 + * Copyright 2017 NXP
2138 + *
2139 + * This program is free software; you can redistribute it and/or modify
2140 + * it under the terms of the GNU General Public License as published by
2141 + * the Free Software Foundation; either version 2 of the License, or
2142 + * (at your option) any later version.
2143 + *
2144 + * This program is distributed in the hope that it will be useful,
2145 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
2146 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2147 + * GNU General Public License for more details.
2148 + *
2149 + * You should have received a copy of the GNU General Public License
2150 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
2151 + */
2152 +
2153 +#ifndef _PFE_DEBUGFS_H_
2154 +#define _PFE_DEBUGFS_H_
2155 +
2156 +int pfe_debugfs_init(struct pfe *pfe);
2157 +void pfe_debugfs_exit(struct pfe *pfe);
2158 +
2159 +#endif /* _PFE_DEBUGFS_H_ */
2160 --- /dev/null
2161 +++ b/drivers/staging/fsl_ppfe/pfe_eth.c
2162 @@ -0,0 +1,2474 @@
2163 +/*
2164 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
2165 + * Copyright 2017 NXP
2166 + *
2167 + * This program is free software; you can redistribute it and/or modify
2168 + * it under the terms of the GNU General Public License as published by
2169 + * the Free Software Foundation; either version 2 of the License, or
2170 + * (at your option) any later version.
2171 + *
2172 + * This program is distributed in the hope that it will be useful,
2173 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
2174 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2175 + * GNU General Public License for more details.
2176 + *
2177 + * You should have received a copy of the GNU General Public License
2178 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
2179 + */
2180 +
2181 +/* @pfe_eth.c.
2182 + * Ethernet driver for to handle exception path for PFE.
2183 + * - uses HIF functions to send/receive packets.
2184 + * - uses ctrl function to start/stop interfaces.
2185 + * - uses direct register accesses to control phy operation.
2186 + */
2187 +#include <linux/version.h>
2188 +#include <linux/kernel.h>
2189 +#include <linux/interrupt.h>
2190 +#include <linux/dma-mapping.h>
2191 +#include <linux/dmapool.h>
2192 +#include <linux/netdevice.h>
2193 +#include <linux/etherdevice.h>
2194 +#include <linux/ethtool.h>
2195 +#include <linux/mii.h>
2196 +#include <linux/phy.h>
2197 +#include <linux/timer.h>
2198 +#include <linux/hrtimer.h>
2199 +#include <linux/platform_device.h>
2200 +
2201 +#include <net/ip.h>
2202 +#include <net/sock.h>
2203 +
2204 +#include <linux/io.h>
2205 +#include <asm/irq.h>
2206 +#include <linux/delay.h>
2207 +#include <linux/regmap.h>
2208 +#include <linux/i2c.h>
2209 +
2210 +#if defined(CONFIG_NF_CONNTRACK_MARK)
2211 +#include <net/netfilter/nf_conntrack.h>
2212 +#endif
2213 +
2214 +#include "pfe_mod.h"
2215 +#include "pfe_eth.h"
2216 +
2217 +static void *cbus_emac_base[3];
2218 +static void *cbus_gpi_base[3];
2219 +
2220 +/* Forward Declaration */
2221 +static void pfe_eth_exit_one(struct pfe_eth_priv_s *priv);
2222 +static void pfe_eth_flush_tx(struct pfe_eth_priv_s *priv);
2223 +static void pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int
2224 + from_tx, int n_desc);
2225 +
2226 +unsigned int gemac_regs[] = {
2227 + 0x0004, /* Interrupt event */
2228 + 0x0008, /* Interrupt mask */
2229 + 0x0024, /* Ethernet control */
2230 + 0x0064, /* MIB Control/Status */
2231 + 0x0084, /* Receive control/status */
2232 + 0x00C4, /* Transmit control */
2233 + 0x00E4, /* Physical address low */
2234 + 0x00E8, /* Physical address high */
2235 + 0x0144, /* Transmit FIFO Watermark and Store and Forward Control*/
2236 + 0x0190, /* Receive FIFO Section Full Threshold */
2237 + 0x01A0, /* Transmit FIFO Section Empty Threshold */
2238 + 0x01B0, /* Frame Truncation Length */
2239 +};
2240 +
2241 +/********************************************************************/
2242 +/* SYSFS INTERFACE */
2243 +/********************************************************************/
2244 +
2245 +#ifdef PFE_ETH_NAPI_STATS
2246 +/*
2247 + * pfe_eth_show_napi_stats
2248 + */
2249 +static ssize_t pfe_eth_show_napi_stats(struct device *dev,
2250 + struct device_attribute *attr,
2251 + char *buf)
2252 +{
2253 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2254 + ssize_t len = 0;
2255 +
2256 + len += sprintf(buf + len, "sched: %u\n",
2257 + priv->napi_counters[NAPI_SCHED_COUNT]);
2258 + len += sprintf(buf + len, "poll: %u\n",
2259 + priv->napi_counters[NAPI_POLL_COUNT]);
2260 + len += sprintf(buf + len, "packet: %u\n",
2261 + priv->napi_counters[NAPI_PACKET_COUNT]);
2262 + len += sprintf(buf + len, "budget: %u\n",
2263 + priv->napi_counters[NAPI_FULL_BUDGET_COUNT]);
2264 + len += sprintf(buf + len, "desc: %u\n",
2265 + priv->napi_counters[NAPI_DESC_COUNT]);
2266 +
2267 + return len;
2268 +}
2269 +
2270 +/*
2271 + * pfe_eth_set_napi_stats
2272 + */
2273 +static ssize_t pfe_eth_set_napi_stats(struct device *dev,
2274 + struct device_attribute *attr,
2275 + const char *buf, size_t count)
2276 +{
2277 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2278 +
2279 + memset(priv->napi_counters, 0, sizeof(priv->napi_counters));
2280 +
2281 + return count;
2282 +}
2283 +#endif
2284 +#ifdef PFE_ETH_TX_STATS
2285 +/* pfe_eth_show_tx_stats
2286 + *
2287 + */
2288 +static ssize_t pfe_eth_show_tx_stats(struct device *dev,
2289 + struct device_attribute *attr,
2290 + char *buf)
2291 +{
2292 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2293 + ssize_t len = 0;
2294 + int i;
2295 +
2296 + len += sprintf(buf + len, "TX queues stats:\n");
2297 +
2298 + for (i = 0; i < emac_txq_cnt; i++) {
2299 + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
2300 + i);
2301 +
2302 + len += sprintf(buf + len, "\n");
2303 + __netif_tx_lock_bh(tx_queue);
2304 +
2305 + hif_tx_lock(&pfe->hif);
2306 + len += sprintf(buf + len,
2307 + "Queue %2d : credits = %10d\n"
2308 + , i, hif_lib_tx_credit_avail(pfe, priv->id, i));
2309 + len += sprintf(buf + len,
2310 + " tx packets = %10d\n"
2311 + , pfe->tmu_credit.tx_packets[priv->id][i]);
2312 + hif_tx_unlock(&pfe->hif);
2313 +
2314 + /* Don't output additionnal stats if queue never used */
2315 + if (!pfe->tmu_credit.tx_packets[priv->id][i])
2316 + goto skip;
2317 +
2318 + len += sprintf(buf + len,
2319 + " clean_fail = %10d\n"
2320 + , priv->clean_fail[i]);
2321 + len += sprintf(buf + len,
2322 + " stop_queue = %10d\n"
2323 + , priv->stop_queue_total[i]);
2324 + len += sprintf(buf + len,
2325 + " stop_queue_hif = %10d\n"
2326 + , priv->stop_queue_hif[i]);
2327 + len += sprintf(buf + len,
2328 + " stop_queue_hif_client = %10d\n"
2329 + , priv->stop_queue_hif_client[i]);
2330 + len += sprintf(buf + len,
2331 + " stop_queue_credit = %10d\n"
2332 + , priv->stop_queue_credit[i]);
2333 +skip:
2334 + __netif_tx_unlock_bh(tx_queue);
2335 + }
2336 + return len;
2337 +}
2338 +
2339 +/* pfe_eth_set_tx_stats
2340 + *
2341 + */
2342 +static ssize_t pfe_eth_set_tx_stats(struct device *dev,
2343 + struct device_attribute *attr,
2344 + const char *buf, size_t count)
2345 +{
2346 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2347 + int i;
2348 +
2349 + for (i = 0; i < emac_txq_cnt; i++) {
2350 + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
2351 + i);
2352 +
2353 + __netif_tx_lock_bh(tx_queue);
2354 + priv->clean_fail[i] = 0;
2355 + priv->stop_queue_total[i] = 0;
2356 + priv->stop_queue_hif[i] = 0;
2357 + priv->stop_queue_hif_client[i] = 0;
2358 + priv->stop_queue_credit[i] = 0;
2359 + __netif_tx_unlock_bh(tx_queue);
2360 + }
2361 +
2362 + return count;
2363 +}
2364 +#endif
2365 +/* pfe_eth_show_txavail
2366 + *
2367 + */
2368 +static ssize_t pfe_eth_show_txavail(struct device *dev,
2369 + struct device_attribute *attr,
2370 + char *buf)
2371 +{
2372 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2373 + ssize_t len = 0;
2374 + int i;
2375 +
2376 + for (i = 0; i < emac_txq_cnt; i++) {
2377 + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
2378 + i);
2379 +
2380 + __netif_tx_lock_bh(tx_queue);
2381 +
2382 + len += sprintf(buf + len, "%d",
2383 + hif_lib_tx_avail(&priv->client, i));
2384 +
2385 + __netif_tx_unlock_bh(tx_queue);
2386 +
2387 + if (i == (emac_txq_cnt - 1))
2388 + len += sprintf(buf + len, "\n");
2389 + else
2390 + len += sprintf(buf + len, " ");
2391 + }
2392 +
2393 + return len;
2394 +}
2395 +
2396 +/* pfe_eth_show_default_priority
2397 + *
2398 + */
2399 +static ssize_t pfe_eth_show_default_priority(struct device *dev,
2400 + struct device_attribute *attr,
2401 + char *buf)
2402 +{
2403 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2404 + unsigned long flags;
2405 + int rc;
2406 +
2407 + spin_lock_irqsave(&priv->lock, flags);
2408 + rc = sprintf(buf, "%d\n", priv->default_priority);
2409 + spin_unlock_irqrestore(&priv->lock, flags);
2410 +
2411 + return rc;
2412 +}
2413 +
2414 +/* pfe_eth_set_default_priority
2415 + *
2416 + */
2417 +
2418 +static ssize_t pfe_eth_set_default_priority(struct device *dev,
2419 + struct device_attribute *attr,
2420 + const char *buf, size_t count)
2421 +{
2422 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2423 + unsigned long flags;
2424 +
2425 + spin_lock_irqsave(&priv->lock, flags);
2426 + priv->default_priority = kstrtoul(buf, 0, 0);
2427 + spin_unlock_irqrestore(&priv->lock, flags);
2428 +
2429 + return count;
2430 +}
2431 +
2432 +static DEVICE_ATTR(txavail, 0444, pfe_eth_show_txavail, NULL);
2433 +static DEVICE_ATTR(default_priority, 0644, pfe_eth_show_default_priority,
2434 + pfe_eth_set_default_priority);
2435 +
2436 +#ifdef PFE_ETH_NAPI_STATS
2437 +static DEVICE_ATTR(napi_stats, 0644, pfe_eth_show_napi_stats,
2438 + pfe_eth_set_napi_stats);
2439 +#endif
2440 +
2441 +#ifdef PFE_ETH_TX_STATS
2442 +static DEVICE_ATTR(tx_stats, 0644, pfe_eth_show_tx_stats,
2443 + pfe_eth_set_tx_stats);
2444 +#endif
2445 +
2446 +/*
2447 + * pfe_eth_sysfs_init
2448 + *
2449 + */
2450 +static int pfe_eth_sysfs_init(struct net_device *ndev)
2451 +{
2452 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2453 + int err;
2454 +
2455 + /* Initialize the default values */
2456 +
2457 + /*
2458 + * By default, packets without conntrack will use this default low
2459 + * priority queue
2460 + */
2461 + priv->default_priority = 0;
2462 +
2463 + /* Create our sysfs files */
2464 + err = device_create_file(&ndev->dev, &dev_attr_default_priority);
2465 + if (err) {
2466 + netdev_err(ndev,
2467 + "failed to create default_priority sysfs files\n");
2468 + goto err_priority;
2469 + }
2470 +
2471 + err = device_create_file(&ndev->dev, &dev_attr_txavail);
2472 + if (err) {
2473 + netdev_err(ndev,
2474 + "failed to create default_priority sysfs files\n");
2475 + goto err_txavail;
2476 + }
2477 +
2478 +#ifdef PFE_ETH_NAPI_STATS
2479 + err = device_create_file(&ndev->dev, &dev_attr_napi_stats);
2480 + if (err) {
2481 + netdev_err(ndev, "failed to create napi stats sysfs files\n");
2482 + goto err_napi;
2483 + }
2484 +#endif
2485 +
2486 +#ifdef PFE_ETH_TX_STATS
2487 + err = device_create_file(&ndev->dev, &dev_attr_tx_stats);
2488 + if (err) {
2489 + netdev_err(ndev, "failed to create tx stats sysfs files\n");
2490 + goto err_tx;
2491 + }
2492 +#endif
2493 +
2494 + return 0;
2495 +
2496 +#ifdef PFE_ETH_TX_STATS
2497 +err_tx:
2498 +#endif
2499 +#ifdef PFE_ETH_NAPI_STATS
2500 + device_remove_file(&ndev->dev, &dev_attr_napi_stats);
2501 +
2502 +err_napi:
2503 +#endif
2504 + device_remove_file(&ndev->dev, &dev_attr_txavail);
2505 +
2506 +err_txavail:
2507 + device_remove_file(&ndev->dev, &dev_attr_default_priority);
2508 +
2509 +err_priority:
2510 + return -1;
2511 +}
2512 +
2513 +/* pfe_eth_sysfs_exit
2514 + *
2515 + */
2516 +void pfe_eth_sysfs_exit(struct net_device *ndev)
2517 +{
2518 +#ifdef PFE_ETH_TX_STATS
2519 + device_remove_file(&ndev->dev, &dev_attr_tx_stats);
2520 +#endif
2521 +
2522 +#ifdef PFE_ETH_NAPI_STATS
2523 + device_remove_file(&ndev->dev, &dev_attr_napi_stats);
2524 +#endif
2525 + device_remove_file(&ndev->dev, &dev_attr_txavail);
2526 + device_remove_file(&ndev->dev, &dev_attr_default_priority);
2527 +}
2528 +
2529 +/*************************************************************************/
2530 +/* ETHTOOL INTERCAE */
2531 +/*************************************************************************/
2532 +
2533 +/*MTIP GEMAC */
2534 +static const struct fec_stat {
2535 + char name[ETH_GSTRING_LEN];
2536 + u16 offset;
2537 +} fec_stats[] = {
2538 + /* RMON TX */
2539 + { "tx_dropped", RMON_T_DROP },
2540 + { "tx_packets", RMON_T_PACKETS },
2541 + { "tx_broadcast", RMON_T_BC_PKT },
2542 + { "tx_multicast", RMON_T_MC_PKT },
2543 + { "tx_crc_errors", RMON_T_CRC_ALIGN },
2544 + { "tx_undersize", RMON_T_UNDERSIZE },
2545 + { "tx_oversize", RMON_T_OVERSIZE },
2546 + { "tx_fragment", RMON_T_FRAG },
2547 + { "tx_jabber", RMON_T_JAB },
2548 + { "tx_collision", RMON_T_COL },
2549 + { "tx_64byte", RMON_T_P64 },
2550 + { "tx_65to127byte", RMON_T_P65TO127 },
2551 + { "tx_128to255byte", RMON_T_P128TO255 },
2552 + { "tx_256to511byte", RMON_T_P256TO511 },
2553 + { "tx_512to1023byte", RMON_T_P512TO1023 },
2554 + { "tx_1024to2047byte", RMON_T_P1024TO2047 },
2555 + { "tx_GTE2048byte", RMON_T_P_GTE2048 },
2556 + { "tx_octets", RMON_T_OCTETS },
2557 +
2558 + /* IEEE TX */
2559 + { "IEEE_tx_drop", IEEE_T_DROP },
2560 + { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
2561 + { "IEEE_tx_1col", IEEE_T_1COL },
2562 + { "IEEE_tx_mcol", IEEE_T_MCOL },
2563 + { "IEEE_tx_def", IEEE_T_DEF },
2564 + { "IEEE_tx_lcol", IEEE_T_LCOL },
2565 + { "IEEE_tx_excol", IEEE_T_EXCOL },
2566 + { "IEEE_tx_macerr", IEEE_T_MACERR },
2567 + { "IEEE_tx_cserr", IEEE_T_CSERR },
2568 + { "IEEE_tx_sqe", IEEE_T_SQE },
2569 + { "IEEE_tx_fdxfc", IEEE_T_FDXFC },
2570 + { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
2571 +
2572 + /* RMON RX */
2573 + { "rx_packets", RMON_R_PACKETS },
2574 + { "rx_broadcast", RMON_R_BC_PKT },
2575 + { "rx_multicast", RMON_R_MC_PKT },
2576 + { "rx_crc_errors", RMON_R_CRC_ALIGN },
2577 + { "rx_undersize", RMON_R_UNDERSIZE },
2578 + { "rx_oversize", RMON_R_OVERSIZE },
2579 + { "rx_fragment", RMON_R_FRAG },
2580 + { "rx_jabber", RMON_R_JAB },
2581 + { "rx_64byte", RMON_R_P64 },
2582 + { "rx_65to127byte", RMON_R_P65TO127 },
2583 + { "rx_128to255byte", RMON_R_P128TO255 },
2584 + { "rx_256to511byte", RMON_R_P256TO511 },
2585 + { "rx_512to1023byte", RMON_R_P512TO1023 },
2586 + { "rx_1024to2047byte", RMON_R_P1024TO2047 },
2587 + { "rx_GTE2048byte", RMON_R_P_GTE2048 },
2588 + { "rx_octets", RMON_R_OCTETS },
2589 +
2590 + /* IEEE RX */
2591 + { "IEEE_rx_drop", IEEE_R_DROP },
2592 + { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
2593 + { "IEEE_rx_crc", IEEE_R_CRC },
2594 + { "IEEE_rx_align", IEEE_R_ALIGN },
2595 + { "IEEE_rx_macerr", IEEE_R_MACERR },
2596 + { "IEEE_rx_fdxfc", IEEE_R_FDXFC },
2597 + { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
2598 +};
2599 +
2600 +static void pfe_eth_fill_stats(struct net_device *ndev, struct ethtool_stats
2601 + *stats, u64 *data)
2602 +{
2603 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2604 + int i;
2605 +
2606 + for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2607 + data[i] = readl(priv->EMAC_baseaddr + fec_stats[i].offset);
2608 +}
2609 +
2610 +static void pfe_eth_gstrings(struct net_device *netdev,
2611 + u32 stringset, u8 *data)
2612 +{
2613 + int i;
2614 +
2615 + switch (stringset) {
2616 + case ETH_SS_STATS:
2617 + for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2618 + memcpy(data + i * ETH_GSTRING_LEN,
2619 + fec_stats[i].name, ETH_GSTRING_LEN);
2620 + break;
2621 + }
2622 +}
2623 +
2624 +static int pfe_eth_stats_count(struct net_device *ndev, int sset)
2625 +{
2626 + switch (sset) {
2627 + case ETH_SS_STATS:
2628 + return ARRAY_SIZE(fec_stats);
2629 + default:
2630 + return -EOPNOTSUPP;
2631 + }
2632 +}
2633 +
2634 +/*
2635 + * pfe_eth_gemac_reglen - Return the length of the register structure.
2636 + *
2637 + */
2638 +static int pfe_eth_gemac_reglen(struct net_device *ndev)
2639 +{
2640 + pr_info("%s()\n", __func__);
2641 + return (sizeof(gemac_regs) / sizeof(u32));
2642 +}
2643 +
2644 +/*
2645 + * pfe_eth_gemac_get_regs - Return the gemac register structure.
2646 + *
2647 + */
2648 +static void pfe_eth_gemac_get_regs(struct net_device *ndev, struct ethtool_regs
2649 + *regs, void *regbuf)
2650 +{
2651 + int i;
2652 +
2653 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2654 + u32 *buf = (u32 *)regbuf;
2655 +
2656 + pr_info("%s()\n", __func__);
2657 + for (i = 0; i < sizeof(gemac_regs) / sizeof(u32); i++)
2658 + buf[i] = readl(priv->EMAC_baseaddr + gemac_regs[i]);
2659 +}
2660 +
2661 +/*
2662 + * pfe_eth_set_wol - Set the magic packet option, in WoL register.
2663 + *
2664 + */
2665 +static int pfe_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2666 +{
2667 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2668 +
2669 + if (wol->wolopts & ~WAKE_MAGIC)
2670 + return -EOPNOTSUPP;
2671 +
2672 + /* for MTIP we store wol->wolopts */
2673 + priv->wol = wol->wolopts;
2674 +
2675 + device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
2676 +
2677 + return 0;
2678 +}
2679 +
2680 +/*
2681 + *
2682 + * pfe_eth_get_wol - Get the WoL options.
2683 + *
2684 + */
2685 +static void pfe_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo
2686 + *wol)
2687 +{
2688 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2689 +
2690 + wol->supported = WAKE_MAGIC;
2691 + wol->wolopts = 0;
2692 +
2693 + if (priv->wol & WAKE_MAGIC)
2694 + wol->wolopts = WAKE_MAGIC;
2695 +
2696 + memset(&wol->sopass, 0, sizeof(wol->sopass));
2697 +}
2698 +
2699 +/*
2700 + * pfe_eth_get_drvinfo - Fills in the drvinfo structure with some basic info
2701 + *
2702 + */
2703 +static void pfe_eth_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo
2704 + *drvinfo)
2705 +{
2706 + strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
2707 + strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
2708 + strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
2709 + strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
2710 +}
2711 +
2712 +/*
2713 + * pfe_eth_set_settings - Used to send commands to PHY.
2714 + *
2715 + */
2716 +static int pfe_eth_set_settings(struct net_device *ndev,
2717 + const struct ethtool_link_ksettings *cmd)
2718 +{
2719 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2720 + struct phy_device *phydev = priv->phydev;
2721 +
2722 + if (!phydev)
2723 + return -ENODEV;
2724 +
2725 + return phy_ethtool_ksettings_set(phydev, cmd);
2726 +}
2727 +
2728 +/*
2729 + * pfe_eth_getsettings - Return the current settings in the ethtool_cmd
2730 + * structure.
2731 + *
2732 + */
2733 +static int pfe_eth_get_settings(struct net_device *ndev,
2734 + struct ethtool_link_ksettings *cmd)
2735 +{
2736 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2737 + struct phy_device *phydev = priv->phydev;
2738 +
2739 + if (!phydev)
2740 + return -ENODEV;
2741 +
2742 + phy_ethtool_ksettings_get(phydev, cmd);
2743 +
2744 + return 0;
2745 +}
2746 +
2747 +/*
2748 + * pfe_eth_get_msglevel - Gets the debug message mask.
2749 + *
2750 + */
2751 +static uint32_t pfe_eth_get_msglevel(struct net_device *ndev)
2752 +{
2753 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2754 +
2755 + return priv->msg_enable;
2756 +}
2757 +
2758 +/*
2759 + * pfe_eth_set_msglevel - Sets the debug message mask.
2760 + *
2761 + */
2762 +static void pfe_eth_set_msglevel(struct net_device *ndev, uint32_t data)
2763 +{
2764 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2765 +
2766 + priv->msg_enable = data;
2767 +}
2768 +
2769 +#define HIF_RX_COAL_MAX_CLKS (~(1 << 31))
2770 +#define HIF_RX_COAL_CLKS_PER_USEC (pfe->ctrl.sys_clk / 1000)
2771 +#define HIF_RX_COAL_MAX_USECS (HIF_RX_COAL_MAX_CLKS / \
2772 + HIF_RX_COAL_CLKS_PER_USEC)
2773 +
2774 +/*
2775 + * pfe_eth_set_coalesce - Sets rx interrupt coalescing timer.
2776 + *
2777 + */
2778 +static int pfe_eth_set_coalesce(struct net_device *ndev,
2779 + struct ethtool_coalesce *ec)
2780 +{
2781 + if (ec->rx_coalesce_usecs > HIF_RX_COAL_MAX_USECS)
2782 + return -EINVAL;
2783 +
2784 + if (!ec->rx_coalesce_usecs) {
2785 + writel(0, HIF_INT_COAL);
2786 + return 0;
2787 + }
2788 +
2789 + writel((ec->rx_coalesce_usecs * HIF_RX_COAL_CLKS_PER_USEC) |
2790 + HIF_INT_COAL_ENABLE, HIF_INT_COAL);
2791 +
2792 + return 0;
2793 +}
2794 +
2795 +/*
2796 + * pfe_eth_get_coalesce - Gets rx interrupt coalescing timer value.
2797 + *
2798 + */
2799 +static int pfe_eth_get_coalesce(struct net_device *ndev,
2800 + struct ethtool_coalesce *ec)
2801 +{
2802 + int reg_val = readl(HIF_INT_COAL);
2803 +
2804 + if (reg_val & HIF_INT_COAL_ENABLE)
2805 + ec->rx_coalesce_usecs = (reg_val & HIF_RX_COAL_MAX_CLKS) /
2806 + HIF_RX_COAL_CLKS_PER_USEC;
2807 + else
2808 + ec->rx_coalesce_usecs = 0;
2809 +
2810 + return 0;
2811 +}
2812 +
2813 +/*
2814 + * pfe_eth_set_pauseparam - Sets pause parameters
2815 + *
2816 + */
2817 +static int pfe_eth_set_pauseparam(struct net_device *ndev,
2818 + struct ethtool_pauseparam *epause)
2819 +{
2820 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2821 +
2822 + if (epause->tx_pause != epause->rx_pause) {
2823 + netdev_info(ndev,
2824 + "hardware only support enable/disable both tx and rx\n");
2825 + return -EINVAL;
2826 + }
2827 +
2828 + priv->pause_flag = 0;
2829 + priv->pause_flag |= epause->rx_pause ? PFE_PAUSE_FLAG_ENABLE : 0;
2830 + priv->pause_flag |= epause->autoneg ? PFE_PAUSE_FLAG_AUTONEG : 0;
2831 +
2832 + if (epause->rx_pause || epause->autoneg) {
2833 + gemac_enable_pause_rx(priv->EMAC_baseaddr);
2834 + writel((readl(priv->GPI_baseaddr + GPI_TX_PAUSE_TIME) |
2835 + EGPI_PAUSE_ENABLE),
2836 + priv->GPI_baseaddr + GPI_TX_PAUSE_TIME);
2837 + if (priv->phydev) {
2838 + priv->phydev->supported |= ADVERTISED_Pause |
2839 + ADVERTISED_Asym_Pause;
2840 + priv->phydev->advertising |= ADVERTISED_Pause |
2841 + ADVERTISED_Asym_Pause;
2842 + }
2843 + } else {
2844 + gemac_disable_pause_rx(priv->EMAC_baseaddr);
2845 + writel((readl(priv->GPI_baseaddr + GPI_TX_PAUSE_TIME) &
2846 + ~EGPI_PAUSE_ENABLE),
2847 + priv->GPI_baseaddr + GPI_TX_PAUSE_TIME);
2848 + if (priv->phydev) {
2849 + priv->phydev->supported &= ~(ADVERTISED_Pause |
2850 + ADVERTISED_Asym_Pause);
2851 + priv->phydev->advertising &= ~(ADVERTISED_Pause |
2852 + ADVERTISED_Asym_Pause);
2853 + }
2854 + }
2855 +
2856 + return 0;
2857 +}
2858 +
2859 +/*
2860 + * pfe_eth_get_pauseparam - Gets pause parameters
2861 + *
2862 + */
2863 +static void pfe_eth_get_pauseparam(struct net_device *ndev,
2864 + struct ethtool_pauseparam *epause)
2865 +{
2866 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2867 +
2868 + epause->autoneg = (priv->pause_flag & PFE_PAUSE_FLAG_AUTONEG) != 0;
2869 + epause->tx_pause = (priv->pause_flag & PFE_PAUSE_FLAG_ENABLE) != 0;
2870 + epause->rx_pause = epause->tx_pause;
2871 +}
2872 +
2873 +/*
2874 + * pfe_eth_get_hash
2875 + */
2876 +#define PFE_HASH_BITS 6 /* #bits in hash */
2877 +#define CRC32_POLY 0xEDB88320
2878 +
2879 +static int pfe_eth_get_hash(u8 *addr)
2880 +{
2881 + unsigned int i, bit, data, crc, hash;
2882 +
2883 + /* calculate crc32 value of mac address */
2884 + crc = 0xffffffff;
2885 +
2886 + for (i = 0; i < 6; i++) {
2887 + data = addr[i];
2888 + for (bit = 0; bit < 8; bit++, data >>= 1) {
2889 + crc = (crc >> 1) ^
2890 + (((crc ^ data) & 1) ? CRC32_POLY : 0);
2891 + }
2892 + }
2893 +
2894 + /*
2895 + * only upper 6 bits (PFE_HASH_BITS) are used
2896 + * which point to specific bit in the hash registers
2897 + */
2898 + hash = (crc >> (32 - PFE_HASH_BITS)) & 0x3f;
2899 +
2900 + return hash;
2901 +}
2902 +
2903 +const struct ethtool_ops pfe_ethtool_ops = {
2904 + .get_drvinfo = pfe_eth_get_drvinfo,
2905 + .get_regs_len = pfe_eth_gemac_reglen,
2906 + .get_regs = pfe_eth_gemac_get_regs,
2907 + .get_link = ethtool_op_get_link,
2908 + .get_wol = pfe_eth_get_wol,
2909 + .set_wol = pfe_eth_set_wol,
2910 + .set_pauseparam = pfe_eth_set_pauseparam,
2911 + .get_pauseparam = pfe_eth_get_pauseparam,
2912 + .get_strings = pfe_eth_gstrings,
2913 + .get_sset_count = pfe_eth_stats_count,
2914 + .get_ethtool_stats = pfe_eth_fill_stats,
2915 + .get_msglevel = pfe_eth_get_msglevel,
2916 + .set_msglevel = pfe_eth_set_msglevel,
2917 + .set_coalesce = pfe_eth_set_coalesce,
2918 + .get_coalesce = pfe_eth_get_coalesce,
2919 + .get_link_ksettings = pfe_eth_get_settings,
2920 + .set_link_ksettings = pfe_eth_set_settings,
2921 +};
2922 +
2923 +/* pfe_eth_mdio_reset
2924 + */
2925 +int pfe_eth_mdio_reset(struct mii_bus *bus)
2926 +{
2927 + struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
2928 + u32 phy_speed;
2929 +
2930 + netif_info(priv, hw, priv->ndev, "%s\n", __func__);
2931 +
2932 + mutex_lock(&bus->mdio_lock);
2933 +
2934 + /*
2935 + * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
2936 + *
2937 + * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
2938 + * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'.
2939 + */
2940 + phy_speed = (DIV_ROUND_UP((pfe->ctrl.sys_clk * 1000), 4000000)
2941 + << EMAC_MII_SPEED_SHIFT);
2942 + phy_speed |= EMAC_HOLDTIME(0x5);
2943 + __raw_writel(phy_speed, priv->PHY_baseaddr + EMAC_MII_CTRL_REG);
2944 +
2945 + mutex_unlock(&bus->mdio_lock);
2946 +
2947 + return 0;
2948 +}
2949 +
2950 +/* pfe_eth_gemac_phy_timeout
2951 + *
2952 + */
2953 +static int pfe_eth_gemac_phy_timeout(struct pfe_eth_priv_s *priv, int timeout)
2954 +{
2955 + while (!(__raw_readl(priv->PHY_baseaddr + EMAC_IEVENT_REG) &
2956 + EMAC_IEVENT_MII)) {
2957 + if (timeout-- <= 0)
2958 + return -1;
2959 + usleep_range(10, 20);
2960 + }
2961 + __raw_writel(EMAC_IEVENT_MII, priv->PHY_baseaddr + EMAC_IEVENT_REG);
2962 + return 0;
2963 +}
2964 +
2965 +static int pfe_eth_mdio_mux(u8 muxval)
2966 +{
2967 + struct i2c_adapter *a;
2968 + struct i2c_msg msg;
2969 + unsigned char buf[2];
2970 + int ret;
2971 +
2972 + a = i2c_get_adapter(0);
2973 + if (!a)
2974 + return -ENODEV;
2975 +
2976 + /* set bit 1 (the second bit) of chip at 0x09, register 0x13 */
2977 + buf[0] = 0x54; /* reg number */
2978 + buf[1] = (muxval << 6) | 0x3; /* data */
2979 + msg.addr = 0x66;
2980 + msg.buf = buf;
2981 + msg.len = 2;
2982 + msg.flags = 0;
2983 + ret = i2c_transfer(a, &msg, 1);
2984 + i2c_put_adapter(a);
2985 + if (ret != 1)
2986 + return -ENODEV;
2987 + return 0;
2988 +}
2989 +
2990 +static int pfe_eth_mdio_write_addr(struct mii_bus *bus, int mii_id,
2991 + int dev_addr, int regnum)
2992 +{
2993 + struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
2994 +
2995 + __raw_writel(EMAC_MII_DATA_PA(mii_id) |
2996 + EMAC_MII_DATA_RA(dev_addr) |
2997 + EMAC_MII_DATA_TA | EMAC_MII_DATA(regnum),
2998 + priv->PHY_baseaddr + EMAC_MII_DATA_REG);
2999 +
3000 + if (pfe_eth_gemac_phy_timeout(priv, EMAC_MDIO_TIMEOUT)) {
3001 + netdev_err(priv->ndev, "%s: phy MDIO address write timeout\n",
3002 + __func__);
3003 + return -1;
3004 + }
3005 +
3006 + return 0;
3007 +}
3008 +
3009 +static int pfe_eth_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
3010 + u16 value)
3011 +{
3012 + struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
3013 +
3014 + /*To access external PHYs on QDS board mux needs to be configured*/
3015 + if ((mii_id) && (pfe->mdio_muxval[mii_id]))
3016 + pfe_eth_mdio_mux(pfe->mdio_muxval[mii_id]);
3017 +
3018 + if (regnum & MII_ADDR_C45) {
3019 + pfe_eth_mdio_write_addr(bus, mii_id, (regnum >> 16) & 0x1f,
3020 + regnum & 0xffff);
3021 + __raw_writel(EMAC_MII_DATA_OP_CL45_WR |
3022 + EMAC_MII_DATA_PA(mii_id) |
3023 + EMAC_MII_DATA_RA((regnum >> 16) & 0x1f) |
3024 + EMAC_MII_DATA_TA | EMAC_MII_DATA(value),
3025 + priv->PHY_baseaddr + EMAC_MII_DATA_REG);
3026 + } else {
3027 + /* start a write op */
3028 + __raw_writel(EMAC_MII_DATA_ST | EMAC_MII_DATA_OP_WR |
3029 + EMAC_MII_DATA_PA(mii_id) |
3030 + EMAC_MII_DATA_RA(regnum) |
3031 + EMAC_MII_DATA_TA | EMAC_MII_DATA(value),
3032 + priv->PHY_baseaddr + EMAC_MII_DATA_REG);
3033 + }
3034 +
3035 + if (pfe_eth_gemac_phy_timeout(priv, EMAC_MDIO_TIMEOUT)) {
3036 + netdev_err(priv->ndev, "%s: phy MDIO write timeout\n",
3037 + __func__);
3038 + return -1;
3039 + }
3040 + netif_info(priv, hw, priv->ndev, "%s: phy %x reg %x val %x\n", __func__,
3041 + mii_id, regnum, value);
3042 +
3043 + return 0;
3044 +}
3045 +
3046 +static int pfe_eth_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
3047 +{
3048 + struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
3049 + u16 value = 0;
3050 +
3051 + /*To access external PHYs on QDS board mux needs to be configured*/
3052 + if ((mii_id) && (pfe->mdio_muxval[mii_id]))
3053 + pfe_eth_mdio_mux(pfe->mdio_muxval[mii_id]);
3054 +
3055 + if (regnum & MII_ADDR_C45) {
3056 + pfe_eth_mdio_write_addr(bus, mii_id, (regnum >> 16) & 0x1f,
3057 + regnum & 0xffff);
3058 + __raw_writel(EMAC_MII_DATA_OP_CL45_RD |
3059 + EMAC_MII_DATA_PA(mii_id) |
3060 + EMAC_MII_DATA_RA((regnum >> 16) & 0x1f) |
3061 + EMAC_MII_DATA_TA,
3062 + priv->PHY_baseaddr + EMAC_MII_DATA_REG);
3063 + } else {
3064 + /* start a read op */
3065 + __raw_writel(EMAC_MII_DATA_ST | EMAC_MII_DATA_OP_RD |
3066 + EMAC_MII_DATA_PA(mii_id) |
3067 + EMAC_MII_DATA_RA(regnum) |
3068 + EMAC_MII_DATA_TA, priv->PHY_baseaddr +
3069 + EMAC_MII_DATA_REG);
3070 + }
3071 +
3072 + if (pfe_eth_gemac_phy_timeout(priv, EMAC_MDIO_TIMEOUT)) {
3073 + netdev_err(priv->ndev, "%s: phy MDIO read timeout\n", __func__);
3074 + return -1;
3075 + }
3076 +
3077 + value = EMAC_MII_DATA(__raw_readl(priv->PHY_baseaddr +
3078 + EMAC_MII_DATA_REG));
3079 + netif_info(priv, hw, priv->ndev, "%s: phy %x reg %x val %x\n", __func__,
3080 + mii_id, regnum, value);
3081 + return value;
3082 +}
3083 +
3084 +static int pfe_eth_mdio_init(struct pfe_eth_priv_s *priv,
3085 + struct ls1012a_mdio_platform_data *minfo)
3086 +{
3087 + struct mii_bus *bus;
3088 + int rc, ii;
3089 + struct phy_device *phydev;
3090 +
3091 + netif_info(priv, drv, priv->ndev, "%s\n", __func__);
3092 + pr_info("%s\n", __func__);
3093 +
3094 + bus = mdiobus_alloc();
3095 + if (!bus) {
3096 + netdev_err(priv->ndev, "mdiobus_alloc() failed\n");
3097 + rc = -ENOMEM;
3098 + goto err0;
3099 + }
3100 +
3101 + bus->name = "ls1012a MDIO Bus";
3102 + bus->read = &pfe_eth_mdio_read;
3103 + bus->write = &pfe_eth_mdio_write;
3104 + bus->reset = &pfe_eth_mdio_reset;
3105 + snprintf(bus->id, MII_BUS_ID_SIZE, "ls1012a-%x", priv->id);
3106 + bus->priv = priv;
3107 +
3108 + bus->phy_mask = minfo->phy_mask;
3109 + priv->mdc_div = minfo->mdc_div;
3110 +
3111 + if (!priv->mdc_div)
3112 + priv->mdc_div = 64;
3113 +
3114 + bus->irq[0] = minfo->irq[0];
3115 +
3116 + bus->parent = priv->pfe->dev;
3117 +
3118 + netif_info(priv, drv, priv->ndev, "%s: mdc_div: %d, phy_mask: %x\n",
3119 + __func__, priv->mdc_div, bus->phy_mask);
3120 + rc = mdiobus_register(bus);
3121 + if (rc) {
3122 + netdev_err(priv->ndev, "mdiobus_register(%s) failed\n",
3123 + bus->name);
3124 + goto err1;
3125 + }
3126 +
3127 + priv->mii_bus = bus;
3128 +
3129 + /* For clause 45 we need to call get_phy_device() with it's
3130 + * 3rd argument as true and then register the phy device
3131 + * via phy_device_register()
3132 + */
3133 +
3134 + if (priv->einfo->mii_config == PHY_INTERFACE_MODE_2500SGMII) {
3135 + for (ii = 0; ii < NUM_GEMAC_SUPPORT; ii++) {
3136 + phydev = get_phy_device(priv->mii_bus,
3137 + priv->einfo->phy_id + ii, true);
3138 + if (!phydev || IS_ERR(phydev)) {
3139 + rc = -EIO;
3140 + netdev_err(priv->ndev, "fail to get device\n");
3141 + goto err1;
3142 + }
3143 + rc = phy_device_register(phydev);
3144 + if (rc) {
3145 + phy_device_free(phydev);
3146 + netdev_err(priv->ndev,
3147 + "phy_device_register() failed\n");
3148 + goto err1;
3149 + }
3150 + }
3151 + }
3152 +
3153 + pfe_eth_mdio_reset(bus);
3154 +
3155 + return 0;
3156 +
3157 +err1:
3158 + mdiobus_free(bus);
3159 +err0:
3160 + return rc;
3161 +}
3162 +
3163 +/* pfe_eth_mdio_exit
3164 + */
3165 +static void pfe_eth_mdio_exit(struct mii_bus *bus)
3166 +{
3167 + if (!bus)
3168 + return;
3169 +
3170 + netif_info((struct pfe_eth_priv_s *)bus->priv, drv, ((struct
3171 + pfe_eth_priv_s *)(bus->priv))->ndev, "%s\n", __func__);
3172 +
3173 + mdiobus_unregister(bus);
3174 + mdiobus_free(bus);
3175 +}
3176 +
3177 +/* pfe_get_phydev_speed
3178 + */
3179 +static int pfe_get_phydev_speed(struct phy_device *phydev)
3180 +{
3181 + switch (phydev->speed) {
3182 + case 10:
3183 + return SPEED_10M;
3184 + case 100:
3185 + return SPEED_100M;
3186 + case 1000:
3187 + default:
3188 + return SPEED_1000M;
3189 + }
3190 +}
3191 +
3192 +/* pfe_set_rgmii_speed
3193 + */
3194 +#define RGMIIPCR 0x434
3195 +/* RGMIIPCR bit definitions*/
3196 +#define SCFG_RGMIIPCR_EN_AUTO (0x00000008)
3197 +#define SCFG_RGMIIPCR_SETSP_1000M (0x00000004)
3198 +#define SCFG_RGMIIPCR_SETSP_100M (0x00000000)
3199 +#define SCFG_RGMIIPCR_SETSP_10M (0x00000002)
3200 +#define SCFG_RGMIIPCR_SETFD (0x00000001)
3201 +
3202 +static void pfe_set_rgmii_speed(struct phy_device *phydev)
3203 +{
3204 + u32 rgmii_pcr;
3205 +
3206 + regmap_read(pfe->scfg, RGMIIPCR, &rgmii_pcr);
3207 + rgmii_pcr &= ~(SCFG_RGMIIPCR_SETSP_1000M | SCFG_RGMIIPCR_SETSP_10M);
3208 +
3209 + switch (phydev->speed) {
3210 + case 10:
3211 + rgmii_pcr |= SCFG_RGMIIPCR_SETSP_10M;
3212 + break;
3213 + case 1000:
3214 + rgmii_pcr |= SCFG_RGMIIPCR_SETSP_1000M;
3215 + break;
3216 + case 100:
3217 + default:
3218 + /* Default is 100M */
3219 + break;
3220 + }
3221 + regmap_write(pfe->scfg, RGMIIPCR, rgmii_pcr);
3222 +}
3223 +
3224 +/* pfe_get_phydev_duplex
3225 + */
3226 +static int pfe_get_phydev_duplex(struct phy_device *phydev)
3227 +{
3228 + /*return (phydev->duplex == DUPLEX_HALF) ? DUP_HALF:DUP_FULL ; */
3229 + return DUPLEX_FULL;
3230 +}
3231 +
3232 +/* pfe_eth_adjust_link
3233 + */
3234 +static void pfe_eth_adjust_link(struct net_device *ndev)
3235 +{
3236 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3237 + unsigned long flags;
3238 + struct phy_device *phydev = priv->phydev;
3239 + int new_state = 0;
3240 +
3241 + netif_info(priv, drv, ndev, "%s\n", __func__);
3242 +
3243 + spin_lock_irqsave(&priv->lock, flags);
3244 +
3245 + if (phydev->link) {
3246 + /*
3247 + * Now we make sure that we can be in full duplex mode.
3248 + * If not, we operate in half-duplex mode.
3249 + */
3250 + if (phydev->duplex != priv->oldduplex) {
3251 + new_state = 1;
3252 + gemac_set_duplex(priv->EMAC_baseaddr,
3253 + pfe_get_phydev_duplex(phydev));
3254 + priv->oldduplex = phydev->duplex;
3255 + }
3256 +
3257 + if (phydev->speed != priv->oldspeed) {
3258 + new_state = 1;
3259 + gemac_set_speed(priv->EMAC_baseaddr,
3260 + pfe_get_phydev_speed(phydev));
3261 + if (priv->einfo->mii_config == PHY_INTERFACE_MODE_RGMII_TXID)
3262 + pfe_set_rgmii_speed(phydev);
3263 + priv->oldspeed = phydev->speed;
3264 + }
3265 +
3266 + if (!priv->oldlink) {
3267 + new_state = 1;
3268 + priv->oldlink = 1;
3269 + }
3270 +
3271 + } else if (priv->oldlink) {
3272 + new_state = 1;
3273 + priv->oldlink = 0;
3274 + priv->oldspeed = 0;
3275 + priv->oldduplex = -1;
3276 + }
3277 +
3278 + if (new_state && netif_msg_link(priv))
3279 + phy_print_status(phydev);
3280 +
3281 + spin_unlock_irqrestore(&priv->lock, flags);
3282 +}
3283 +
3284 +/* pfe_phy_exit
3285 + */
3286 +static void pfe_phy_exit(struct net_device *ndev)
3287 +{
3288 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3289 +
3290 + netif_info(priv, drv, ndev, "%s\n", __func__);
3291 +
3292 + phy_disconnect(priv->phydev);
3293 + priv->phydev = NULL;
3294 +}
3295 +
3296 +/* pfe_eth_stop
3297 + */
3298 +static void pfe_eth_stop(struct net_device *ndev, int wake)
3299 +{
3300 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3301 +
3302 + netif_info(priv, drv, ndev, "%s\n", __func__);
3303 +
3304 + if (wake) {
3305 + gemac_tx_disable(priv->EMAC_baseaddr);
3306 + } else {
3307 + gemac_disable(priv->EMAC_baseaddr);
3308 + gpi_disable(priv->GPI_baseaddr);
3309 +
3310 + if (priv->phydev)
3311 + phy_stop(priv->phydev);
3312 + }
3313 +}
3314 +
3315 +/* pfe_eth_start
3316 + */
3317 +static int pfe_eth_start(struct pfe_eth_priv_s *priv)
3318 +{
3319 + netif_info(priv, drv, priv->ndev, "%s\n", __func__);
3320 +
3321 + if (priv->phydev)
3322 + phy_start(priv->phydev);
3323 +
3324 + gpi_enable(priv->GPI_baseaddr);
3325 + gemac_enable(priv->EMAC_baseaddr);
3326 +
3327 + return 0;
3328 +}
3329 +
3330 +/*
3331 + * Configure on chip serdes through mdio
3332 + */
3333 +static void ls1012a_configure_serdes(struct net_device *ndev)
3334 +{
3335 + struct pfe_eth_priv_s *priv = pfe->eth.eth_priv[0];
3336 + int sgmii_2500 = 0;
3337 + struct mii_bus *bus = priv->mii_bus;
3338 + u16 value = 0;
3339 +
3340 + if (priv->einfo->mii_config == PHY_INTERFACE_MODE_2500SGMII)
3341 + sgmii_2500 = 1;
3342 +
3343 + netif_info(priv, drv, ndev, "%s\n", __func__);
3344 + /* PCS configuration done with corresponding GEMAC */
3345 +
3346 + pfe_eth_mdio_read(bus, 0, 0);
3347 + pfe_eth_mdio_read(bus, 0, 1);
3348 +
3349 + /*These settings taken from validtion team */
3350 + pfe_eth_mdio_write(bus, 0, 0x0, 0x8000);
3351 + if (sgmii_2500) {
3352 + pfe_eth_mdio_write(bus, 0, 0x14, 0x9);
3353 + pfe_eth_mdio_write(bus, 0, 0x4, 0x4001);
3354 + pfe_eth_mdio_write(bus, 0, 0x12, 0xa120);
3355 + pfe_eth_mdio_write(bus, 0, 0x13, 0x7);
3356 + /* Autonegotiation need to be disabled for 2.5G SGMII mode*/
3357 + value = 0x0140;
3358 + pfe_eth_mdio_write(bus, 0, 0x0, value);
3359 + } else {
3360 + pfe_eth_mdio_write(bus, 0, 0x14, 0xb);
3361 + pfe_eth_mdio_write(bus, 0, 0x4, 0x1a1);
3362 + pfe_eth_mdio_write(bus, 0, 0x12, 0x400);
3363 + pfe_eth_mdio_write(bus, 0, 0x13, 0x0);
3364 + pfe_eth_mdio_write(bus, 0, 0x0, 0x1140);
3365 + }
3366 +}
3367 +
3368 +/*
3369 + * pfe_phy_init
3370 + *
3371 + */
3372 +static int pfe_phy_init(struct net_device *ndev)
3373 +{
3374 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3375 + struct phy_device *phydev;
3376 + char phy_id[MII_BUS_ID_SIZE + 3];
3377 + char bus_id[MII_BUS_ID_SIZE];
3378 + phy_interface_t interface;
3379 +
3380 + priv->oldlink = 0;
3381 + priv->oldspeed = 0;
3382 + priv->oldduplex = -1;
3383 +
3384 + snprintf(bus_id, MII_BUS_ID_SIZE, "ls1012a-%d", 0);
3385 + snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
3386 + priv->einfo->phy_id);
3387 +
3388 + netif_info(priv, drv, ndev, "%s: %s\n", __func__, phy_id);
3389 + interface = priv->einfo->mii_config;
3390 + if ((interface == PHY_INTERFACE_MODE_SGMII) ||
3391 + (interface == PHY_INTERFACE_MODE_2500SGMII)) {
3392 + /*Configure SGMII PCS */
3393 + if (pfe->scfg) {
3394 + /*Config MDIO from serdes */
3395 + regmap_write(pfe->scfg, 0x484, 0x00000000);
3396 + }
3397 + ls1012a_configure_serdes(ndev);
3398 + }
3399 +
3400 + if (pfe->scfg) {
3401 + /*Config MDIO from PAD */
3402 + regmap_write(pfe->scfg, 0x484, 0x80000000);
3403 + }
3404 +
3405 + priv->oldlink = 0;
3406 + priv->oldspeed = 0;
3407 + priv->oldduplex = -1;
3408 + pr_info("%s interface %x\n", __func__, interface);
3409 + phydev = phy_connect(ndev, phy_id, &pfe_eth_adjust_link, interface);
3410 +
3411 + if (IS_ERR(phydev)) {
3412 + netdev_err(ndev, "phy_connect() failed\n");
3413 + return PTR_ERR(phydev);
3414 + }
3415 +
3416 + priv->phydev = phydev;
3417 + phydev->irq = PHY_POLL;
3418 +
3419 + return 0;
3420 +}
3421 +
3422 +/* pfe_gemac_init
3423 + */
3424 +static int pfe_gemac_init(struct pfe_eth_priv_s *priv)
3425 +{
3426 + struct gemac_cfg cfg;
3427 +
3428 + netif_info(priv, ifup, priv->ndev, "%s\n", __func__);
3429 +
3430 + cfg.speed = SPEED_1000M;
3431 + cfg.duplex = DUPLEX_FULL;
3432 +
3433 + gemac_set_config(priv->EMAC_baseaddr, &cfg);
3434 + gemac_allow_broadcast(priv->EMAC_baseaddr);
3435 + gemac_enable_1536_rx(priv->EMAC_baseaddr);
3436 + gemac_enable_rx_jmb(priv->EMAC_baseaddr);
3437 + gemac_enable_stacked_vlan(priv->EMAC_baseaddr);
3438 + gemac_enable_pause_rx(priv->EMAC_baseaddr);
3439 + gemac_set_bus_width(priv->EMAC_baseaddr, 64);
3440 +
3441 + /*GEM will perform checksum verifications*/
3442 + if (priv->ndev->features & NETIF_F_RXCSUM)
3443 + gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
3444 + else
3445 + gemac_disable_rx_checksum_offload(priv->EMAC_baseaddr);
3446 +
3447 + return 0;
3448 +}
3449 +
3450 +/* pfe_eth_event_handler
3451 + */
3452 +static int pfe_eth_event_handler(void *data, int event, int qno)
3453 +{
3454 + struct pfe_eth_priv_s *priv = data;
3455 +
3456 + switch (event) {
3457 + case EVENT_RX_PKT_IND:
3458 +
3459 + if (qno == 0) {
3460 + if (napi_schedule_prep(&priv->high_napi)) {
3461 + netif_info(priv, intr, priv->ndev,
3462 + "%s: schedule high prio poll\n"
3463 + , __func__);
3464 +
3465 +#ifdef PFE_ETH_NAPI_STATS
3466 + priv->napi_counters[NAPI_SCHED_COUNT]++;
3467 +#endif
3468 +
3469 + __napi_schedule(&priv->high_napi);
3470 + }
3471 + } else if (qno == 1) {
3472 + if (napi_schedule_prep(&priv->low_napi)) {
3473 + netif_info(priv, intr, priv->ndev,
3474 + "%s: schedule low prio poll\n"
3475 + , __func__);
3476 +
3477 +#ifdef PFE_ETH_NAPI_STATS
3478 + priv->napi_counters[NAPI_SCHED_COUNT]++;
3479 +#endif
3480 + __napi_schedule(&priv->low_napi);
3481 + }
3482 + } else if (qno == 2) {
3483 + if (napi_schedule_prep(&priv->lro_napi)) {
3484 + netif_info(priv, intr, priv->ndev,
3485 + "%s: schedule lro prio poll\n"
3486 + , __func__);
3487 +
3488 +#ifdef PFE_ETH_NAPI_STATS
3489 + priv->napi_counters[NAPI_SCHED_COUNT]++;
3490 +#endif
3491 + __napi_schedule(&priv->lro_napi);
3492 + }
3493 + }
3494 +
3495 + break;
3496 +
3497 + case EVENT_TXDONE_IND:
3498 + pfe_eth_flush_tx(priv);
3499 + hif_lib_event_handler_start(&priv->client, EVENT_TXDONE_IND, 0);
3500 + break;
3501 + case EVENT_HIGH_RX_WM:
3502 + default:
3503 + break;
3504 + }
3505 +
3506 + return 0;
3507 +}
3508 +
3509 +/* pfe_eth_open
3510 + */
3511 +static int pfe_eth_open(struct net_device *ndev)
3512 +{
3513 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3514 + struct hif_client_s *client;
3515 + int rc;
3516 +
3517 + netif_info(priv, ifup, ndev, "%s\n", __func__);
3518 +
3519 + /* Register client driver with HIF */
3520 + client = &priv->client;
3521 + memset(client, 0, sizeof(*client));
3522 + client->id = PFE_CL_GEM0 + priv->id;
3523 + client->tx_qn = emac_txq_cnt;
3524 + client->rx_qn = EMAC_RXQ_CNT;
3525 + client->priv = priv;
3526 + client->pfe = priv->pfe;
3527 + client->event_handler = pfe_eth_event_handler;
3528 +
3529 + client->tx_qsize = EMAC_TXQ_DEPTH;
3530 + client->rx_qsize = EMAC_RXQ_DEPTH;
3531 +
3532 + rc = hif_lib_client_register(client);
3533 + if (rc) {
3534 + netdev_err(ndev, "%s: hif_lib_client_register(%d) failed\n",
3535 + __func__, client->id);
3536 + goto err0;
3537 + }
3538 +
3539 + netif_info(priv, drv, ndev, "%s: registered client: %p\n", __func__,
3540 + client);
3541 +
3542 + pfe_gemac_init(priv);
3543 +
3544 + if (!is_valid_ether_addr(ndev->dev_addr)) {
3545 + netdev_err(ndev, "%s: invalid MAC address\n", __func__);
3546 + rc = -EADDRNOTAVAIL;
3547 + goto err1;
3548 + }
3549 +
3550 + gemac_set_laddrN(priv->EMAC_baseaddr,
3551 + (struct pfe_mac_addr *)ndev->dev_addr, 1);
3552 +
3553 + napi_enable(&priv->high_napi);
3554 + napi_enable(&priv->low_napi);
3555 + napi_enable(&priv->lro_napi);
3556 +
3557 + rc = pfe_eth_start(priv);
3558 +
3559 + netif_tx_wake_all_queues(ndev);
3560 +
3561 + return rc;
3562 +
3563 +err1:
3564 + hif_lib_client_unregister(&priv->client);
3565 +
3566 +err0:
3567 + return rc;
3568 +}
3569 +
3570 +/*
3571 + * pfe_eth_shutdown
3572 + */
3573 +int pfe_eth_shutdown(struct net_device *ndev, int wake)
3574 +{
3575 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3576 + int i, qstatus;
3577 + unsigned long next_poll = jiffies + 1, end = jiffies +
3578 + (TX_POLL_TIMEOUT_MS * HZ) / 1000;
3579 + int tx_pkts, prv_tx_pkts;
3580 +
3581 + netif_info(priv, ifdown, ndev, "%s\n", __func__);
3582 +
3583 + for (i = 0; i < emac_txq_cnt; i++)
3584 + hrtimer_cancel(&priv->fast_tx_timeout[i].timer);
3585 +
3586 + netif_tx_stop_all_queues(ndev);
3587 +
3588 + do {
3589 + tx_pkts = 0;
3590 + pfe_eth_flush_tx(priv);
3591 +
3592 + for (i = 0; i < emac_txq_cnt; i++)
3593 + tx_pkts += hif_lib_tx_pending(&priv->client, i);
3594 +
3595 + if (tx_pkts) {
3596 + /*Don't wait forever, break if we cross max timeout */
3597 + if (time_after(jiffies, end)) {
3598 + pr_err(
3599 + "(%s)Tx is not complete after %dmsec\n",
3600 + ndev->name, TX_POLL_TIMEOUT_MS);
3601 + break;
3602 + }
3603 +
3604 + pr_info("%s : (%s) Waiting for tx packets to free. Pending tx pkts = %d.\n"
3605 + , __func__, ndev->name, tx_pkts);
3606 + if (need_resched())
3607 + schedule();
3608 + }
3609 +
3610 + } while (tx_pkts);
3611 +
3612 + end = jiffies + (TX_POLL_TIMEOUT_MS * HZ) / 1000;
3613 +
3614 + prv_tx_pkts = tmu_pkts_processed(priv->id);
3615 + /*
3616 + * Wait till TMU transmits all pending packets
3617 + * poll tmu_qstatus and pkts processed by TMU for every 10ms
3618 + * Consider TMU is busy, If we see TMU qeueu pending or any packets
3619 + * processed by TMU
3620 + */
3621 + while (1) {
3622 + if (time_after(jiffies, next_poll)) {
3623 + tx_pkts = tmu_pkts_processed(priv->id);
3624 + qstatus = tmu_qstatus(priv->id) & 0x7ffff;
3625 +
3626 + if (!qstatus && (tx_pkts == prv_tx_pkts))
3627 + break;
3628 + /* Don't wait forever, break if we cross max
3629 + * timeout(TX_POLL_TIMEOUT_MS)
3630 + */
3631 + if (time_after(jiffies, end)) {
3632 + pr_err("TMU%d is busy after %dmsec\n",
3633 + priv->id, TX_POLL_TIMEOUT_MS);
3634 + break;
3635 + }
3636 + prv_tx_pkts = tx_pkts;
3637 + next_poll++;
3638 + }
3639 + if (need_resched())
3640 + schedule();
3641 + }
3642 + /* Wait for some more time to complete transmitting packet if any */
3643 + next_poll = jiffies + 1;
3644 + while (1) {
3645 + if (time_after(jiffies, next_poll))
3646 + break;
3647 + if (need_resched())
3648 + schedule();
3649 + }
3650 +
3651 + pfe_eth_stop(ndev, wake);
3652 +
3653 + napi_disable(&priv->lro_napi);
3654 + napi_disable(&priv->low_napi);
3655 + napi_disable(&priv->high_napi);
3656 +
3657 + hif_lib_client_unregister(&priv->client);
3658 +
3659 + return 0;
3660 +}
3661 +
3662 +/* pfe_eth_close
3663 + *
3664 + */
3665 +static int pfe_eth_close(struct net_device *ndev)
3666 +{
3667 + pfe_eth_shutdown(ndev, 0);
3668 +
3669 + return 0;
3670 +}
3671 +
3672 +/* pfe_eth_suspend
3673 + *
3674 + * return value : 1 if netdevice is configured to wakeup system
3675 + * 0 otherwise
3676 + */
3677 +int pfe_eth_suspend(struct net_device *ndev)
3678 +{
3679 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3680 + int retval = 0;
3681 +
3682 + if (priv->wol) {
3683 + gemac_set_wol(priv->EMAC_baseaddr, priv->wol);
3684 + retval = 1;
3685 + }
3686 + pfe_eth_shutdown(ndev, priv->wol);
3687 +
3688 + return retval;
3689 +}
3690 +
3691 +/* pfe_eth_resume
3692 + *
3693 + */
3694 +int pfe_eth_resume(struct net_device *ndev)
3695 +{
3696 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3697 +
3698 + if (priv->wol)
3699 + gemac_set_wol(priv->EMAC_baseaddr, 0);
3700 + gemac_tx_enable(priv->EMAC_baseaddr);
3701 +
3702 + return pfe_eth_open(ndev);
3703 +}
3704 +
3705 +/* pfe_eth_get_queuenum
3706 + */
3707 +static int pfe_eth_get_queuenum(struct pfe_eth_priv_s *priv, struct sk_buff
3708 + *skb)
3709 +{
3710 + int queuenum = 0;
3711 + unsigned long flags;
3712 +
3713 + /* Get the Fast Path queue number */
3714 + /*
3715 + * Use conntrack mark (if conntrack exists), then packet mark (if any),
3716 + * then fallback to default
3717 + */
3718 +#if defined(CONFIG_IP_NF_CONNTRACK_MARK) || defined(CONFIG_NF_CONNTRACK_MARK)
3719 + if (skb->nfct) {
3720 + enum ip_conntrack_info cinfo;
3721 + struct nf_conn *ct;
3722 +
3723 + ct = nf_ct_get(skb, &cinfo);
3724 +
3725 + if (ct) {
3726 + u32 connmark;
3727 +
3728 + connmark = ct->mark;
3729 +
3730 + if ((connmark & 0x80000000) && priv->id != 0)
3731 + connmark >>= 16;
3732 +
3733 + queuenum = connmark & EMAC_QUEUENUM_MASK;
3734 + }
3735 + } else {/* continued after #endif ... */
3736 +#endif
3737 + if (skb->mark) {
3738 + queuenum = skb->mark & EMAC_QUEUENUM_MASK;
3739 + } else {
3740 + spin_lock_irqsave(&priv->lock, flags);
3741 + queuenum = priv->default_priority & EMAC_QUEUENUM_MASK;
3742 + spin_unlock_irqrestore(&priv->lock, flags);
3743 + }
3744 +#if defined(CONFIG_IP_NF_CONNTRACK_MARK) || defined(CONFIG_NF_CONNTRACK_MARK)
3745 + }
3746 +#endif
3747 + return queuenum;
3748 +}
3749 +
3750 +/* pfe_eth_might_stop_tx
3751 + *
3752 + */
3753 +static int pfe_eth_might_stop_tx(struct pfe_eth_priv_s *priv, int queuenum,
3754 + struct netdev_queue *tx_queue,
3755 + unsigned int n_desc,
3756 + unsigned int n_segs)
3757 +{
3758 + ktime_t kt;
3759 + int tried = 0;
3760 +
3761 +try_again:
3762 + if (unlikely((__hif_tx_avail(&pfe->hif) < n_desc) ||
3763 + (hif_lib_tx_avail(&priv->client, queuenum) < n_desc) ||
3764 + (hif_lib_tx_credit_avail(pfe, priv->id, queuenum) < n_segs))) {
3765 + if (!tried) {
3766 + __hif_lib_update_credit(&priv->client, queuenum);
3767 + tried = 1;
3768 + goto try_again;
3769 + }
3770 +#ifdef PFE_ETH_TX_STATS
3771 + if (__hif_tx_avail(&pfe->hif) < n_desc) {
3772 + priv->stop_queue_hif[queuenum]++;
3773 + } else if (hif_lib_tx_avail(&priv->client, queuenum) < n_desc) {
3774 + priv->stop_queue_hif_client[queuenum]++;
3775 + } else if (hif_lib_tx_credit_avail(pfe, priv->id, queuenum) <
3776 + n_segs) {
3777 + priv->stop_queue_credit[queuenum]++;
3778 + }
3779 + priv->stop_queue_total[queuenum]++;
3780 +#endif
3781 + netif_tx_stop_queue(tx_queue);
3782 +
3783 + kt = ktime_set(0, LS1012A_TX_FAST_RECOVERY_TIMEOUT_MS *
3784 + NSEC_PER_MSEC);
3785 + hrtimer_start(&priv->fast_tx_timeout[queuenum].timer, kt,
3786 + HRTIMER_MODE_REL);
3787 + return -1;
3788 + } else {
3789 + return 0;
3790 + }
3791 +}
3792 +
3793 +#define SA_MAX_OP 2
3794 +/* pfe_hif_send_packet
3795 + *
3796 + * At this level if TX fails we drop the packet
3797 + */
3798 +static void pfe_hif_send_packet(struct sk_buff *skb, struct pfe_eth_priv_s
3799 + *priv, int queuenum)
3800 +{
3801 + struct skb_shared_info *sh = skb_shinfo(skb);
3802 + unsigned int nr_frags;
3803 + u32 ctrl = 0;
3804 +
3805 + netif_info(priv, tx_queued, priv->ndev, "%s\n", __func__);
3806 +
3807 + if (skb_is_gso(skb)) {
3808 + priv->stats.tx_dropped++;
3809 + return;
3810 + }
3811 +
3812 + if (skb->ip_summed == CHECKSUM_PARTIAL)
3813 + ctrl = HIF_CTRL_TX_CHECKSUM;
3814 +
3815 + nr_frags = sh->nr_frags;
3816 +
3817 + if (nr_frags) {
3818 + skb_frag_t *f;
3819 + int i;
3820 +
3821 + __hif_lib_xmit_pkt(&priv->client, queuenum, skb->data,
3822 + skb_headlen(skb), ctrl, HIF_FIRST_BUFFER,
3823 + skb);
3824 +
3825 + for (i = 0; i < nr_frags - 1; i++) {
3826 + f = &sh->frags[i];
3827 + __hif_lib_xmit_pkt(&priv->client, queuenum,
3828 + skb_frag_address(f),
3829 + skb_frag_size(f),
3830 + 0x0, 0x0, skb);
3831 + }
3832 +
3833 + f = &sh->frags[i];
3834 +
3835 + __hif_lib_xmit_pkt(&priv->client, queuenum,
3836 + skb_frag_address(f), skb_frag_size(f),
3837 + 0x0, HIF_LAST_BUFFER | HIF_DATA_VALID,
3838 + skb);
3839 +
3840 + netif_info(priv, tx_queued, priv->ndev,
3841 + "%s: pkt sent successfully skb:%p nr_frags:%d len:%d\n",
3842 + __func__, skb, nr_frags, skb->len);
3843 + } else {
3844 + __hif_lib_xmit_pkt(&priv->client, queuenum, skb->data,
3845 + skb->len, ctrl, HIF_FIRST_BUFFER |
3846 + HIF_LAST_BUFFER | HIF_DATA_VALID,
3847 + skb);
3848 + netif_info(priv, tx_queued, priv->ndev,
3849 + "%s: pkt sent successfully skb:%p len:%d\n",
3850 + __func__, skb, skb->len);
3851 + }
3852 + hif_tx_dma_start();
3853 + priv->stats.tx_packets++;
3854 + priv->stats.tx_bytes += skb->len;
3855 + hif_lib_tx_credit_use(pfe, priv->id, queuenum, 1);
3856 +}
3857 +
3858 +/* pfe_eth_flush_txQ
3859 + */
3860 +static void pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int
3861 + from_tx, int n_desc)
3862 +{
3863 + struct sk_buff *skb;
3864 + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
3865 + tx_q_num);
3866 + unsigned int flags;
3867 +
3868 + netif_info(priv, tx_done, priv->ndev, "%s\n", __func__);
3869 +
3870 + if (!from_tx)
3871 + __netif_tx_lock_bh(tx_queue);
3872 +
3873 + /* Clean HIF and client queue */
3874 + while ((skb = hif_lib_tx_get_next_complete(&priv->client,
3875 + tx_q_num, &flags,
3876 + HIF_TX_DESC_NT))) {
3877 + if (flags & HIF_DATA_VALID)
3878 + dev_kfree_skb_any(skb);
3879 + }
3880 + if (!from_tx)
3881 + __netif_tx_unlock_bh(tx_queue);
3882 +}
3883 +
3884 +/* pfe_eth_flush_tx
3885 + */
3886 +static void pfe_eth_flush_tx(struct pfe_eth_priv_s *priv)
3887 +{
3888 + int ii;
3889 +
3890 + netif_info(priv, tx_done, priv->ndev, "%s\n", __func__);
3891 +
3892 + for (ii = 0; ii < emac_txq_cnt; ii++) {
3893 + pfe_eth_flush_txQ(priv, ii, 0, 0);
3894 + __hif_lib_update_credit(&priv->client, ii);
3895 + }
3896 +}
3897 +
3898 +void pfe_tx_get_req_desc(struct sk_buff *skb, unsigned int *n_desc, unsigned int
3899 + *n_segs)
3900 +{
3901 + struct skb_shared_info *sh = skb_shinfo(skb);
3902 +
3903 + /* Scattered data */
3904 + if (sh->nr_frags) {
3905 + *n_desc = sh->nr_frags + 1;
3906 + *n_segs = 1;
3907 + /* Regular case */
3908 + } else {
3909 + *n_desc = 1;
3910 + *n_segs = 1;
3911 + }
3912 +}
3913 +
3914 +/* pfe_eth_send_packet
3915 + */
3916 +static int pfe_eth_send_packet(struct sk_buff *skb, struct net_device *ndev)
3917 +{
3918 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3919 + int tx_q_num = skb_get_queue_mapping(skb);
3920 + int n_desc, n_segs;
3921 + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
3922 + tx_q_num);
3923 +
3924 + netif_info(priv, tx_queued, ndev, "%s\n", __func__);
3925 +
3926 + if ((!skb_is_gso(skb)) && (skb_headroom(skb) < (PFE_PKT_HEADER_SZ +
3927 + sizeof(unsigned long)))) {
3928 + netif_warn(priv, tx_err, priv->ndev, "%s: copying skb\n",
3929 + __func__);
3930 +
3931 + if (pskb_expand_head(skb, (PFE_PKT_HEADER_SZ + sizeof(unsigned
3932 + long)), 0, GFP_ATOMIC)) {
3933 + /* No need to re-transmit, no way to recover*/
3934 + kfree_skb(skb);
3935 + priv->stats.tx_dropped++;
3936 + return NETDEV_TX_OK;
3937 + }
3938 + }
3939 +
3940 + pfe_tx_get_req_desc(skb, &n_desc, &n_segs);
3941 +
3942 + hif_tx_lock(&pfe->hif);
3943 + if (unlikely(pfe_eth_might_stop_tx(priv, tx_q_num, tx_queue, n_desc,
3944 + n_segs))) {
3945 +#ifdef PFE_ETH_TX_STATS
3946 + if (priv->was_stopped[tx_q_num]) {
3947 + priv->clean_fail[tx_q_num]++;
3948 + priv->was_stopped[tx_q_num] = 0;
3949 + }
3950 +#endif
3951 + hif_tx_unlock(&pfe->hif);
3952 + return NETDEV_TX_BUSY;
3953 + }
3954 +
3955 + pfe_hif_send_packet(skb, priv, tx_q_num);
3956 +
3957 + hif_tx_unlock(&pfe->hif);
3958 +
3959 + tx_queue->trans_start = jiffies;
3960 +
3961 +#ifdef PFE_ETH_TX_STATS
3962 + priv->was_stopped[tx_q_num] = 0;
3963 +#endif
3964 +
3965 + return NETDEV_TX_OK;
3966 +}
3967 +
3968 +/* pfe_eth_select_queue
3969 + *
3970 + */
3971 +static u16 pfe_eth_select_queue(struct net_device *ndev, struct sk_buff *skb,
3972 + void *accel_priv,
3973 + select_queue_fallback_t fallback)
3974 +{
3975 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3976 +
3977 + return pfe_eth_get_queuenum(priv, skb);
3978 +}
3979 +
3980 +/* pfe_eth_get_stats
3981 + */
3982 +static struct net_device_stats *pfe_eth_get_stats(struct net_device *ndev)
3983 +{
3984 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3985 +
3986 + netif_info(priv, drv, ndev, "%s\n", __func__);
3987 +
3988 + return &priv->stats;
3989 +}
3990 +
3991 +/* pfe_eth_set_mac_address
3992 + */
3993 +static int pfe_eth_set_mac_address(struct net_device *ndev, void *addr)
3994 +{
3995 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3996 + struct sockaddr *sa = addr;
3997 +
3998 + netif_info(priv, drv, ndev, "%s\n", __func__);
3999 +
4000 + if (!is_valid_ether_addr(sa->sa_data))
4001 + return -EADDRNOTAVAIL;
4002 +
4003 + memcpy(ndev->dev_addr, sa->sa_data, ETH_ALEN);
4004 +
4005 + gemac_set_laddrN(priv->EMAC_baseaddr,
4006 + (struct pfe_mac_addr *)ndev->dev_addr, 1);
4007 +
4008 + return 0;
4009 +}
4010 +
4011 +/* pfe_eth_enet_addr_byte_mac
4012 + */
4013 +int pfe_eth_enet_addr_byte_mac(u8 *enet_byte_addr,
4014 + struct pfe_mac_addr *enet_addr)
4015 +{
4016 + if (!enet_byte_addr || !enet_addr) {
4017 + return -1;
4018 +
4019 + } else {
4020 + enet_addr->bottom = enet_byte_addr[0] |
4021 + (enet_byte_addr[1] << 8) |
4022 + (enet_byte_addr[2] << 16) |
4023 + (enet_byte_addr[3] << 24);
4024 + enet_addr->top = enet_byte_addr[4] |
4025 + (enet_byte_addr[5] << 8);
4026 + return 0;
4027 + }
4028 +}
4029 +
4030 +/* pfe_eth_set_multi
4031 + */
4032 +static void pfe_eth_set_multi(struct net_device *ndev)
4033 +{
4034 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
4035 + struct pfe_mac_addr hash_addr; /* hash register structure */
4036 + /* specific mac address register structure */
4037 + struct pfe_mac_addr spec_addr;
4038 + int result; /* index into hash register to set.. */
4039 + int uc_count = 0;
4040 + struct netdev_hw_addr *ha;
4041 +
4042 + if (ndev->flags & IFF_PROMISC) {
4043 + netif_info(priv, drv, ndev, "entering promiscuous mode\n");
4044 +
4045 + priv->promisc = 1;
4046 + gemac_enable_copy_all(priv->EMAC_baseaddr);
4047 + } else {
4048 + priv->promisc = 0;
4049 + gemac_disable_copy_all(priv->EMAC_baseaddr);
4050 + }
4051 +
4052 + /* Enable broadcast frame reception if required. */
4053 + if (ndev->flags & IFF_BROADCAST) {
4054 + gemac_allow_broadcast(priv->EMAC_baseaddr);
4055 + } else {
4056 + netif_info(priv, drv, ndev,
4057 + "disabling broadcast frame reception\n");
4058 +
4059 + gemac_no_broadcast(priv->EMAC_baseaddr);
4060 + }
4061 +
4062 + if (ndev->flags & IFF_ALLMULTI) {
4063 + /* Set the hash to rx all multicast frames */
4064 + hash_addr.bottom = 0xFFFFFFFF;
4065 + hash_addr.top = 0xFFFFFFFF;
4066 + gemac_set_hash(priv->EMAC_baseaddr, &hash_addr);
4067 + netdev_for_each_uc_addr(ha, ndev) {
4068 + if (uc_count >= MAX_UC_SPEC_ADDR_REG)
4069 + break;
4070 + pfe_eth_enet_addr_byte_mac(ha->addr, &spec_addr);
4071 + gemac_set_laddrN(priv->EMAC_baseaddr, &spec_addr,
4072 + uc_count + 2);
4073 + uc_count++;
4074 + }
4075 + } else if ((netdev_mc_count(ndev) > 0) || (netdev_uc_count(ndev))) {
4076 + u8 *addr;
4077 +
4078 + hash_addr.bottom = 0;
4079 + hash_addr.top = 0;
4080 +
4081 + netdev_for_each_mc_addr(ha, ndev) {
4082 + addr = ha->addr;
4083 +
4084 + netif_info(priv, drv, ndev,
4085 + "adding multicast address %X:%X:%X:%X:%X:%X to gem filter\n",
4086 + addr[0], addr[1], addr[2],
4087 + addr[3], addr[4], addr[5]);
4088 +
4089 + result = pfe_eth_get_hash(addr);
4090 +
4091 + if (result < EMAC_HASH_REG_BITS) {
4092 + if (result < 32)
4093 + hash_addr.bottom |= (1 << result);
4094 + else
4095 + hash_addr.top |= (1 << (result - 32));
4096 + } else {
4097 + break;
4098 + }
4099 + }
4100 +
4101 + uc_count = -1;
4102 + netdev_for_each_uc_addr(ha, ndev) {
4103 + addr = ha->addr;
4104 +
4105 + if (++uc_count < MAX_UC_SPEC_ADDR_REG) {
4106 + netdev_info(ndev,
4107 + "adding unicast address %02x:%02x:%02x:%02x:%02x:%02x to gem filter\n",
4108 + addr[0], addr[1], addr[2],
4109 + addr[3], addr[4], addr[5]);
4110 + pfe_eth_enet_addr_byte_mac(addr, &spec_addr);
4111 + gemac_set_laddrN(priv->EMAC_baseaddr,
4112 + &spec_addr, uc_count + 2);
4113 + } else {
4114 + netif_info(priv, drv, ndev,
4115 + "adding unicast address %02x:%02x:%02x:%02x:%02x:%02x to gem hash\n",
4116 + addr[0], addr[1], addr[2],
4117 + addr[3], addr[4], addr[5]);
4118 +
4119 + result = pfe_eth_get_hash(addr);
4120 + if (result >= EMAC_HASH_REG_BITS) {
4121 + break;
4122 +
4123 + } else {
4124 + if (result < 32)
4125 + hash_addr.bottom |= (1 <<
4126 + result);
4127 + else
4128 + hash_addr.top |= (1 <<
4129 + (result - 32));
4130 + }
4131 + }
4132 + }
4133 +
4134 + gemac_set_hash(priv->EMAC_baseaddr, &hash_addr);
4135 + }
4136 +
4137 + if (!(netdev_uc_count(ndev) >= MAX_UC_SPEC_ADDR_REG)) {
4138 + /*
4139 + * Check if there are any specific address HW registers that
4140 + * need to be flushed
4141 + */
4142 + for (uc_count = netdev_uc_count(ndev); uc_count <
4143 + MAX_UC_SPEC_ADDR_REG; uc_count++)
4144 + gemac_clear_laddrN(priv->EMAC_baseaddr, uc_count + 2);
4145 + }
4146 +
4147 + if (ndev->flags & IFF_LOOPBACK)
4148 + gemac_set_loop(priv->EMAC_baseaddr, LB_LOCAL);
4149 +}
4150 +
4151 +/* pfe_eth_set_features
4152 + */
4153 +static int pfe_eth_set_features(struct net_device *ndev, netdev_features_t
4154 + features)
4155 +{
4156 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
4157 + int rc = 0;
4158 +
4159 + if (features & NETIF_F_RXCSUM)
4160 + gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
4161 + else
4162 + gemac_disable_rx_checksum_offload(priv->EMAC_baseaddr);
4163 + return rc;
4164 +}
4165 +
4166 +/* pfe_eth_fast_tx_timeout
4167 + */
4168 +static enum hrtimer_restart pfe_eth_fast_tx_timeout(struct hrtimer *timer)
4169 +{
4170 + struct pfe_eth_fast_timer *fast_tx_timeout = container_of(timer, struct
4171 + pfe_eth_fast_timer,
4172 + timer);
4173 + struct pfe_eth_priv_s *priv = container_of(fast_tx_timeout->base,
4174 + struct pfe_eth_priv_s,
4175 + fast_tx_timeout);
4176 + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
4177 + fast_tx_timeout->queuenum);
4178 +
4179 + if (netif_tx_queue_stopped(tx_queue)) {
4180 +#ifdef PFE_ETH_TX_STATS
4181 + priv->was_stopped[fast_tx_timeout->queuenum] = 1;
4182 +#endif
4183 + netif_tx_wake_queue(tx_queue);
4184 + }
4185 +
4186 + return HRTIMER_NORESTART;
4187 +}
4188 +
4189 +/* pfe_eth_fast_tx_timeout_init
4190 + */
4191 +static void pfe_eth_fast_tx_timeout_init(struct pfe_eth_priv_s *priv)
4192 +{
4193 + int i;
4194 +
4195 + for (i = 0; i < emac_txq_cnt; i++) {
4196 + priv->fast_tx_timeout[i].queuenum = i;
4197 + hrtimer_init(&priv->fast_tx_timeout[i].timer, CLOCK_MONOTONIC,
4198 + HRTIMER_MODE_REL);
4199 + priv->fast_tx_timeout[i].timer.function =
4200 + pfe_eth_fast_tx_timeout;
4201 + priv->fast_tx_timeout[i].base = priv->fast_tx_timeout;
4202 + }
4203 +}
4204 +
4205 +static struct sk_buff *pfe_eth_rx_skb(struct net_device *ndev,
4206 + struct pfe_eth_priv_s *priv,
4207 + unsigned int qno)
4208 +{
4209 + void *buf_addr;
4210 + unsigned int rx_ctrl;
4211 + unsigned int desc_ctrl = 0;
4212 + struct hif_ipsec_hdr *ipsec_hdr = NULL;
4213 + struct sk_buff *skb;
4214 + struct sk_buff *skb_frag, *skb_frag_last = NULL;
4215 + int length = 0, offset;
4216 +
4217 + skb = priv->skb_inflight[qno];
4218 +
4219 + if (skb) {
4220 + skb_frag_last = skb_shinfo(skb)->frag_list;
4221 + if (skb_frag_last) {
4222 + while (skb_frag_last->next)
4223 + skb_frag_last = skb_frag_last->next;
4224 + }
4225 + }
4226 +
4227 + while (!(desc_ctrl & CL_DESC_LAST)) {
4228 + buf_addr = hif_lib_receive_pkt(&priv->client, qno, &length,
4229 + &offset, &rx_ctrl, &desc_ctrl,
4230 + (void **)&ipsec_hdr);
4231 + if (!buf_addr)
4232 + goto incomplete;
4233 +
4234 +#ifdef PFE_ETH_NAPI_STATS
4235 + priv->napi_counters[NAPI_DESC_COUNT]++;
4236 +#endif
4237 +
4238 + /* First frag */
4239 + if (desc_ctrl & CL_DESC_FIRST) {
4240 + skb = build_skb(buf_addr, 0);
4241 + if (unlikely(!skb))
4242 + goto pkt_drop;
4243 +
4244 + skb_reserve(skb, offset);
4245 + skb_put(skb, length);
4246 + skb->dev = ndev;
4247 +
4248 + if ((ndev->features & NETIF_F_RXCSUM) && (rx_ctrl &
4249 + HIF_CTRL_RX_CHECKSUMMED))
4250 + skb->ip_summed = CHECKSUM_UNNECESSARY;
4251 + else
4252 + skb_checksum_none_assert(skb);
4253 +
4254 + } else {
4255 + /* Next frags */
4256 + if (unlikely(!skb)) {
4257 + pr_err("%s: NULL skb_inflight\n",
4258 + __func__);
4259 + goto pkt_drop;
4260 + }
4261 +
4262 + skb_frag = build_skb(buf_addr, 0);
4263 +
4264 + if (unlikely(!skb_frag)) {
4265 + kfree(buf_addr);
4266 + goto pkt_drop;
4267 + }
4268 +
4269 + skb_reserve(skb_frag, offset);
4270 + skb_put(skb_frag, length);
4271 +
4272 + skb_frag->dev = ndev;
4273 +
4274 + if (skb_shinfo(skb)->frag_list)
4275 + skb_frag_last->next = skb_frag;
4276 + else
4277 + skb_shinfo(skb)->frag_list = skb_frag;
4278 +
4279 + skb->truesize += skb_frag->truesize;
4280 + skb->data_len += length;
4281 + skb->len += length;
4282 + skb_frag_last = skb_frag;
4283 + }
4284 + }
4285 +
4286 + priv->skb_inflight[qno] = NULL;
4287 + return skb;
4288 +
4289 +incomplete:
4290 + priv->skb_inflight[qno] = skb;
4291 + return NULL;
4292 +
4293 +pkt_drop:
4294 + priv->skb_inflight[qno] = NULL;
4295 +
4296 + if (skb)
4297 + kfree_skb(skb);
4298 + else
4299 + kfree(buf_addr);
4300 +
4301 + priv->stats.rx_errors++;
4302 +
4303 + return NULL;
4304 +}
4305 +
4306 +/* pfe_eth_poll
4307 + */
4308 +static int pfe_eth_poll(struct pfe_eth_priv_s *priv, struct napi_struct *napi,
4309 + unsigned int qno, int budget)
4310 +{
4311 + struct net_device *ndev = priv->ndev;
4312 + struct sk_buff *skb;
4313 + int work_done = 0;
4314 + unsigned int len;
4315 +
4316 + netif_info(priv, intr, priv->ndev, "%s\n", __func__);
4317 +
4318 +#ifdef PFE_ETH_NAPI_STATS
4319 + priv->napi_counters[NAPI_POLL_COUNT]++;
4320 +#endif
4321 +
4322 + do {
4323 + skb = pfe_eth_rx_skb(ndev, priv, qno);
4324 +
4325 + if (!skb)
4326 + break;
4327 +
4328 + len = skb->len;
4329 +
4330 + /* Packet will be processed */
4331 + skb->protocol = eth_type_trans(skb, ndev);
4332 +
4333 + netif_receive_skb(skb);
4334 +
4335 + priv->stats.rx_packets++;
4336 + priv->stats.rx_bytes += len;
4337 +
4338 + work_done++;
4339 +
4340 +#ifdef PFE_ETH_NAPI_STATS
4341 + priv->napi_counters[NAPI_PACKET_COUNT]++;
4342 +#endif
4343 +
4344 + } while (work_done < budget);
4345 +
4346 + /*
4347 + * If no Rx receive nor cleanup work was done, exit polling mode.
4348 + * No more netif_running(dev) check is required here , as this is
4349 + * checked in net/core/dev.c (2.6.33.5 kernel specific).
4350 + */
4351 + if (work_done < budget) {
4352 + napi_complete(napi);
4353 +
4354 + hif_lib_event_handler_start(&priv->client, EVENT_RX_PKT_IND,
4355 + qno);
4356 + }
4357 +#ifdef PFE_ETH_NAPI_STATS
4358 + else
4359 + priv->napi_counters[NAPI_FULL_BUDGET_COUNT]++;
4360 +#endif
4361 +
4362 + return work_done;
4363 +}
4364 +
4365 +/*
4366 + * pfe_eth_lro_poll
4367 + */
4368 +static int pfe_eth_lro_poll(struct napi_struct *napi, int budget)
4369 +{
4370 + struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
4371 + lro_napi);
4372 +
4373 + netif_info(priv, intr, priv->ndev, "%s\n", __func__);
4374 +
4375 + return pfe_eth_poll(priv, napi, 2, budget);
4376 +}
4377 +
4378 +/* pfe_eth_low_poll
4379 + */
4380 +static int pfe_eth_low_poll(struct napi_struct *napi, int budget)
4381 +{
4382 + struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
4383 + low_napi);
4384 +
4385 + netif_info(priv, intr, priv->ndev, "%s\n", __func__);
4386 +
4387 + return pfe_eth_poll(priv, napi, 1, budget);
4388 +}
4389 +
4390 +/* pfe_eth_high_poll
4391 + */
4392 +static int pfe_eth_high_poll(struct napi_struct *napi, int budget)
4393 +{
4394 + struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
4395 + high_napi);
4396 +
4397 + netif_info(priv, intr, priv->ndev, "%s\n", __func__);
4398 +
4399 + return pfe_eth_poll(priv, napi, 0, budget);
4400 +}
4401 +
4402 +static const struct net_device_ops pfe_netdev_ops = {
4403 + .ndo_open = pfe_eth_open,
4404 + .ndo_stop = pfe_eth_close,
4405 + .ndo_start_xmit = pfe_eth_send_packet,
4406 + .ndo_select_queue = pfe_eth_select_queue,
4407 + .ndo_get_stats = pfe_eth_get_stats,
4408 + .ndo_set_mac_address = pfe_eth_set_mac_address,
4409 + .ndo_set_rx_mode = pfe_eth_set_multi,
4410 + .ndo_set_features = pfe_eth_set_features,
4411 + .ndo_validate_addr = eth_validate_addr,
4412 +};
4413 +
4414 +/* pfe_eth_init_one
4415 + */
4416 +static int pfe_eth_init_one(struct pfe *pfe, int id)
4417 +{
4418 + struct net_device *ndev = NULL;
4419 + struct pfe_eth_priv_s *priv = NULL;
4420 + struct ls1012a_eth_platform_data *einfo;
4421 + struct ls1012a_mdio_platform_data *minfo;
4422 + struct ls1012a_pfe_platform_data *pfe_info;
4423 + int err;
4424 +
4425 + /* Extract pltform data */
4426 + pfe_info = (struct ls1012a_pfe_platform_data *)
4427 + pfe->dev->platform_data;
4428 + if (!pfe_info) {
4429 + pr_err(
4430 + "%s: pfe missing additional platform data\n"
4431 + , __func__);
4432 + err = -ENODEV;
4433 + goto err0;
4434 + }
4435 +
4436 + einfo = (struct ls1012a_eth_platform_data *)
4437 + pfe_info->ls1012a_eth_pdata;
4438 +
4439 + /* einfo never be NULL, but no harm in having this check */
4440 + if (!einfo) {
4441 + pr_err(
4442 + "%s: pfe missing additional gemacs platform data\n"
4443 + , __func__);
4444 + err = -ENODEV;
4445 + goto err0;
4446 + }
4447 +
4448 + minfo = (struct ls1012a_mdio_platform_data *)
4449 + pfe_info->ls1012a_mdio_pdata;
4450 +
4451 + /* einfo never be NULL, but no harm in having this check */
4452 + if (!minfo) {
4453 + pr_err(
4454 + "%s: pfe missing additional mdios platform data\n",
4455 + __func__);
4456 + err = -ENODEV;
4457 + goto err0;
4458 + }
4459 +
4460 + /* Create an ethernet device instance */
4461 + ndev = alloc_etherdev_mq(sizeof(*priv), emac_txq_cnt);
4462 +
4463 + if (!ndev) {
4464 + pr_err("%s: gemac %d device allocation failed\n",
4465 + __func__, einfo[id].gem_id);
4466 + err = -ENOMEM;
4467 + goto err0;
4468 + }
4469 +
4470 + priv = netdev_priv(ndev);
4471 + priv->ndev = ndev;
4472 + priv->id = einfo[id].gem_id;
4473 + priv->pfe = pfe;
4474 +
4475 + SET_NETDEV_DEV(priv->ndev, priv->pfe->dev);
4476 +
4477 + pfe->eth.eth_priv[id] = priv;
4478 +
4479 + /* Set the info in the priv to the current info */
4480 + priv->einfo = &einfo[id];
4481 + priv->EMAC_baseaddr = cbus_emac_base[id];
4482 + priv->PHY_baseaddr = cbus_emac_base[0];
4483 + priv->GPI_baseaddr = cbus_gpi_base[id];
4484 +
4485 +#define HIF_GEMAC_TMUQ_BASE 6
4486 + priv->low_tmu_q = HIF_GEMAC_TMUQ_BASE + (id * 2);
4487 + priv->high_tmu_q = priv->low_tmu_q + 1;
4488 +
4489 + spin_lock_init(&priv->lock);
4490 +
4491 + pfe_eth_fast_tx_timeout_init(priv);
4492 +
4493 + /* Copy the station address into the dev structure, */
4494 + memcpy(ndev->dev_addr, einfo[id].mac_addr, ETH_ALEN);
4495 +
4496 + /* Initialize mdio */
4497 + if (minfo[id].enabled) {
4498 + err = pfe_eth_mdio_init(priv, &minfo[id]);
4499 + if (err) {
4500 + netdev_err(ndev, "%s: pfe_eth_mdio_init() failed\n",
4501 + __func__);
4502 + goto err2;
4503 + }
4504 + }
4505 +
4506 + ndev->mtu = 1500;
4507 +
4508 + /* Set MTU limits */
4509 + ndev->min_mtu = ETH_MIN_MTU;
4510 + ndev->max_mtu = JUMBO_FRAME_SIZE;
4511 +
4512 + /* supported features */
4513 + ndev->hw_features = NETIF_F_SG;
4514 +
4515 + /*Enable after checksum offload is validated */
4516 + ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
4517 + NETIF_F_IPV6_CSUM | NETIF_F_SG;
4518 +
4519 + /* enabled by default */
4520 + ndev->features = ndev->hw_features;
4521 +
4522 + priv->usr_features = ndev->features;
4523 +
4524 + ndev->netdev_ops = &pfe_netdev_ops;
4525 +
4526 + ndev->ethtool_ops = &pfe_ethtool_ops;
4527 +
4528 + /* Enable basic messages by default */
4529 + priv->msg_enable = NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_LINK |
4530 + NETIF_MSG_PROBE;
4531 +
4532 + netif_napi_add(ndev, &priv->low_napi, pfe_eth_low_poll,
4533 + HIF_RX_POLL_WEIGHT - 16);
4534 + netif_napi_add(ndev, &priv->high_napi, pfe_eth_high_poll,
4535 + HIF_RX_POLL_WEIGHT - 16);
4536 + netif_napi_add(ndev, &priv->lro_napi, pfe_eth_lro_poll,
4537 + HIF_RX_POLL_WEIGHT - 16);
4538 +
4539 + err = register_netdev(ndev);
4540 +
4541 + if (err) {
4542 + netdev_err(ndev, "register_netdev() failed\n");
4543 + goto err3;
4544 + }
4545 + device_init_wakeup(&ndev->dev, WAKE_MAGIC);
4546 +
4547 + if (!(priv->einfo->phy_flags & GEMAC_NO_PHY)) {
4548 + err = pfe_phy_init(ndev);
4549 + if (err) {
4550 + netdev_err(ndev, "%s: pfe_phy_init() failed\n",
4551 + __func__);
4552 + goto err4;
4553 + }
4554 + }
4555 +
4556 + netif_carrier_on(ndev);
4557 +
4558 + /* Create all the sysfs files */
4559 + if (pfe_eth_sysfs_init(ndev))
4560 + goto err4;
4561 +
4562 + netif_info(priv, probe, ndev, "%s: created interface, baseaddr: %p\n",
4563 + __func__, priv->EMAC_baseaddr);
4564 +
4565 + return 0;
4566 +err4:
4567 + unregister_netdev(ndev);
4568 +err3:
4569 + pfe_eth_mdio_exit(priv->mii_bus);
4570 +err2:
4571 + free_netdev(priv->ndev);
4572 +err0:
4573 + return err;
4574 +}
4575 +
4576 +/* pfe_eth_init
4577 + */
4578 +int pfe_eth_init(struct pfe *pfe)
4579 +{
4580 + int ii = 0;
4581 + int err;
4582 +
4583 + pr_info("%s\n", __func__);
4584 +
4585 + cbus_emac_base[0] = EMAC1_BASE_ADDR;
4586 + cbus_emac_base[1] = EMAC2_BASE_ADDR;
4587 +
4588 + cbus_gpi_base[0] = EGPI1_BASE_ADDR;
4589 + cbus_gpi_base[1] = EGPI2_BASE_ADDR;
4590 +
4591 + for (ii = 0; ii < NUM_GEMAC_SUPPORT; ii++) {
4592 + err = pfe_eth_init_one(pfe, ii);
4593 + if (err)
4594 + goto err0;
4595 + }
4596 +
4597 + return 0;
4598 +
4599 +err0:
4600 + while (ii--)
4601 + pfe_eth_exit_one(pfe->eth.eth_priv[ii]);
4602 +
4603 + /* Register three network devices in the kernel */
4604 + return err;
4605 +}
4606 +
4607 +/* pfe_eth_exit_one
4608 + */
4609 +static void pfe_eth_exit_one(struct pfe_eth_priv_s *priv)
4610 +{
4611 + netif_info(priv, probe, priv->ndev, "%s\n", __func__);
4612 +
4613 + pfe_eth_sysfs_exit(priv->ndev);
4614 +
4615 + unregister_netdev(priv->ndev);
4616 +
4617 + if (!(priv->einfo->phy_flags & GEMAC_NO_PHY))
4618 + pfe_phy_exit(priv->ndev);
4619 +
4620 + if (priv->mii_bus)
4621 + pfe_eth_mdio_exit(priv->mii_bus);
4622 +
4623 + free_netdev(priv->ndev);
4624 +}
4625 +
4626 +/* pfe_eth_exit
4627 + */
4628 +void pfe_eth_exit(struct pfe *pfe)
4629 +{
4630 + int ii;
4631 +
4632 + pr_info("%s\n", __func__);
4633 +
4634 + for (ii = NUM_GEMAC_SUPPORT - 1; ii >= 0; ii--)
4635 + pfe_eth_exit_one(pfe->eth.eth_priv[ii]);
4636 +}
4637 --- /dev/null
4638 +++ b/drivers/staging/fsl_ppfe/pfe_eth.h
4639 @@ -0,0 +1,184 @@
4640 +/*
4641 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
4642 + * Copyright 2017 NXP
4643 + *
4644 + * This program is free software; you can redistribute it and/or modify
4645 + * it under the terms of the GNU General Public License as published by
4646 + * the Free Software Foundation; either version 2 of the License, or
4647 + * (at your option) any later version.
4648 + *
4649 + * This program is distributed in the hope that it will be useful,
4650 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
4651 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4652 + * GNU General Public License for more details.
4653 + *
4654 + * You should have received a copy of the GNU General Public License
4655 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
4656 + */
4657 +
4658 +#ifndef _PFE_ETH_H_
4659 +#define _PFE_ETH_H_
4660 +#include <linux/kernel.h>
4661 +#include <linux/netdevice.h>
4662 +#include <linux/etherdevice.h>
4663 +#include <linux/ethtool.h>
4664 +#include <linux/mii.h>
4665 +#include <linux/phy.h>
4666 +#include <linux/clk.h>
4667 +#include <linux/interrupt.h>
4668 +#include <linux/time.h>
4669 +
4670 +#define PFE_ETH_NAPI_STATS
4671 +#define PFE_ETH_TX_STATS
4672 +
4673 +#define PFE_ETH_FRAGS_MAX (65536 / HIF_RX_PKT_MIN_SIZE)
4674 +#define LRO_LEN_COUNT_MAX 32
4675 +#define LRO_NB_COUNT_MAX 32
4676 +
4677 +#define PFE_PAUSE_FLAG_ENABLE 1
4678 +#define PFE_PAUSE_FLAG_AUTONEG 2
4679 +
4680 +/* GEMAC configured by SW */
4681 +/* GEMAC configured by phy lines (not for MII/GMII) */
4682 +
4683 +#define GEMAC_SW_FULL_DUPLEX BIT(9)
4684 +#define GEMAC_SW_SPEED_10M (0 << 12)
4685 +#define GEMAC_SW_SPEED_100M BIT(12)
4686 +#define GEMAC_SW_SPEED_1G (2 << 12)
4687 +
4688 +#define GEMAC_NO_PHY BIT(0)
4689 +
4690 +struct ls1012a_eth_platform_data {
4691 + /* device specific information */
4692 + u32 device_flags;
4693 + char name[16];
4694 +
4695 + /* board specific information */
4696 + u32 mii_config;
4697 + u32 phy_flags;
4698 + u32 gem_id;
4699 + u32 bus_id;
4700 + u32 phy_id;
4701 + u32 mdio_muxval;
4702 + u8 mac_addr[ETH_ALEN];
4703 +};
4704 +
4705 +struct ls1012a_mdio_platform_data {
4706 + int enabled;
4707 + int irq[32];
4708 + u32 phy_mask;
4709 + int mdc_div;
4710 +};
4711 +
4712 +struct ls1012a_pfe_platform_data {
4713 + struct ls1012a_eth_platform_data ls1012a_eth_pdata[3];
4714 + struct ls1012a_mdio_platform_data ls1012a_mdio_pdata[3];
4715 +};
4716 +
4717 +#define NUM_GEMAC_SUPPORT 2
4718 +#define DRV_NAME "pfe-eth"
4719 +#define DRV_VERSION "1.0"
4720 +
4721 +#define LS1012A_TX_FAST_RECOVERY_TIMEOUT_MS 3
4722 +#define TX_POLL_TIMEOUT_MS 1000
4723 +
4724 +#define EMAC_TXQ_CNT 16
4725 +#define EMAC_TXQ_DEPTH (HIF_TX_DESC_NT)
4726 +
4727 +#define JUMBO_FRAME_SIZE 10258
4728 +/*
4729 + * Client Tx queue threshold, for txQ flush condition.
4730 + * It must be smaller than the queue size (in case we ever change it in the
4731 + * future).
4732 + */
4733 +#define HIF_CL_TX_FLUSH_MARK 32
4734 +
4735 +/*
4736 + * Max number of TX resources (HIF descriptors or skbs) that will be released
4737 + * in a single go during batch recycling.
4738 + * Should be lower than the flush mark so the SW can provide the HW with a
4739 + * continuous stream of packets instead of bursts.
4740 + */
4741 +#define TX_FREE_MAX_COUNT 16
4742 +#define EMAC_RXQ_CNT 3
4743 +#define EMAC_RXQ_DEPTH HIF_RX_DESC_NT
4744 +/* make sure clients can receive a full burst of packets */
4745 +#define EMAC_RMON_TXBYTES_POS 0x00
4746 +#define EMAC_RMON_RXBYTES_POS 0x14
4747 +
4748 +#define EMAC_QUEUENUM_MASK (emac_txq_cnt - 1)
4749 +#define EMAC_MDIO_TIMEOUT 1000
4750 +#define MAX_UC_SPEC_ADDR_REG 31
4751 +
4752 +struct pfe_eth_fast_timer {
4753 + int queuenum;
4754 + struct hrtimer timer;
4755 + void *base;
4756 +};
4757 +
4758 +struct pfe_eth_priv_s {
4759 + struct pfe *pfe;
4760 + struct hif_client_s client;
4761 + struct napi_struct lro_napi;
4762 + struct napi_struct low_napi;
4763 + struct napi_struct high_napi;
4764 + int low_tmu_q;
4765 + int high_tmu_q;
4766 + struct net_device_stats stats;
4767 + struct net_device *ndev;
4768 + int id;
4769 + int promisc;
4770 + unsigned int msg_enable;
4771 + unsigned int usr_features;
4772 +
4773 + spinlock_t lock; /* protect member variables */
4774 + unsigned int event_status;
4775 + int irq;
4776 + void *EMAC_baseaddr;
4777 + /* This points to the EMAC base from where we access PHY */
4778 + void *PHY_baseaddr;
4779 + void *GPI_baseaddr;
4780 + /* PHY stuff */
4781 + struct phy_device *phydev;
4782 + int oldspeed;
4783 + int oldduplex;
4784 + int oldlink;
4785 + /* mdio info */
4786 + int mdc_div;
4787 + struct mii_bus *mii_bus;
4788 + struct clk *gemtx_clk;
4789 + int wol;
4790 + int pause_flag;
4791 +
4792 + int default_priority;
4793 + struct pfe_eth_fast_timer fast_tx_timeout[EMAC_TXQ_CNT];
4794 +
4795 + struct ls1012a_eth_platform_data *einfo;
4796 + struct sk_buff *skb_inflight[EMAC_RXQ_CNT + 6];
4797 +
4798 +#ifdef PFE_ETH_TX_STATS
4799 + unsigned int stop_queue_total[EMAC_TXQ_CNT];
4800 + unsigned int stop_queue_hif[EMAC_TXQ_CNT];
4801 + unsigned int stop_queue_hif_client[EMAC_TXQ_CNT];
4802 + unsigned int stop_queue_credit[EMAC_TXQ_CNT];
4803 + unsigned int clean_fail[EMAC_TXQ_CNT];
4804 + unsigned int was_stopped[EMAC_TXQ_CNT];
4805 +#endif
4806 +
4807 +#ifdef PFE_ETH_NAPI_STATS
4808 + unsigned int napi_counters[NAPI_MAX_COUNT];
4809 +#endif
4810 + unsigned int frags_inflight[EMAC_RXQ_CNT + 6];
4811 +};
4812 +
4813 +struct pfe_eth {
4814 + struct pfe_eth_priv_s *eth_priv[3];
4815 +};
4816 +
4817 +int pfe_eth_init(struct pfe *pfe);
4818 +void pfe_eth_exit(struct pfe *pfe);
4819 +int pfe_eth_suspend(struct net_device *dev);
4820 +int pfe_eth_resume(struct net_device *dev);
4821 +int pfe_eth_mdio_reset(struct mii_bus *bus);
4822 +
4823 +#endif /* _PFE_ETH_H_ */
4824 --- /dev/null
4825 +++ b/drivers/staging/fsl_ppfe/pfe_firmware.c
4826 @@ -0,0 +1,314 @@
4827 +/*
4828 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
4829 + * Copyright 2017 NXP
4830 + *
4831 + * This program is free software; you can redistribute it and/or modify
4832 + * it under the terms of the GNU General Public License as published by
4833 + * the Free Software Foundation; either version 2 of the License, or
4834 + * (at your option) any later version.
4835 + *
4836 + * This program is distributed in the hope that it will be useful,
4837 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
4838 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4839 + * GNU General Public License for more details.
4840 + *
4841 + * You should have received a copy of the GNU General Public License
4842 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
4843 + */
4844 +
4845 +/*
4846 + * @file
4847 + * Contains all the functions to handle parsing and loading of PE firmware
4848 + * files.
4849 + */
4850 +#include <linux/firmware.h>
4851 +
4852 +#include "pfe_mod.h"
4853 +#include "pfe_firmware.h"
4854 +#include "pfe/pfe.h"
4855 +
4856 +static struct elf32_shdr *get_elf_section_header(const struct firmware *fw,
4857 + const char *section)
4858 +{
4859 + struct elf32_hdr *elf_hdr = (struct elf32_hdr *)fw->data;
4860 + struct elf32_shdr *shdr;
4861 + struct elf32_shdr *shdr_shstr;
4862 + Elf32_Off e_shoff = be32_to_cpu(elf_hdr->e_shoff);
4863 + Elf32_Half e_shentsize = be16_to_cpu(elf_hdr->e_shentsize);
4864 + Elf32_Half e_shnum = be16_to_cpu(elf_hdr->e_shnum);
4865 + Elf32_Half e_shstrndx = be16_to_cpu(elf_hdr->e_shstrndx);
4866 + Elf32_Off shstr_offset;
4867 + Elf32_Word sh_name;
4868 + const char *name;
4869 + int i;
4870 +
4871 + /* Section header strings */
4872 + shdr_shstr = (struct elf32_shdr *)(fw->data + e_shoff + e_shstrndx *
4873 + e_shentsize);
4874 + shstr_offset = be32_to_cpu(shdr_shstr->sh_offset);
4875 +
4876 + for (i = 0; i < e_shnum; i++) {
4877 + shdr = (struct elf32_shdr *)(fw->data + e_shoff
4878 + + i * e_shentsize);
4879 +
4880 + sh_name = be32_to_cpu(shdr->sh_name);
4881 +
4882 + name = (const char *)(fw->data + shstr_offset + sh_name);
4883 +
4884 + if (!strcmp(name, section))
4885 + return shdr;
4886 + }
4887 +
4888 + pr_err("%s: didn't find section %s\n", __func__, section);
4889 +
4890 + return NULL;
4891 +}
4892 +
4893 +#if defined(CFG_DIAGS)
4894 +static int pfe_get_diags_info(const struct firmware *fw, struct pfe_diags_info
4895 + *diags_info)
4896 +{
4897 + struct elf32_shdr *shdr;
4898 + unsigned long offset, size;
4899 +
4900 + shdr = get_elf_section_header(fw, ".pfe_diags_str");
4901 + if (shdr) {
4902 + offset = be32_to_cpu(shdr->sh_offset);
4903 + size = be32_to_cpu(shdr->sh_size);
4904 + diags_info->diags_str_base = be32_to_cpu(shdr->sh_addr);
4905 + diags_info->diags_str_size = size;
4906 + diags_info->diags_str_array = kmalloc(size, GFP_KERNEL);
4907 + memcpy(diags_info->diags_str_array, fw->data + offset, size);
4908 +
4909 + return 0;
4910 + } else {
4911 + return -1;
4912 + }
4913 +}
4914 +#endif
4915 +
4916 +static void pfe_check_version_info(const struct firmware *fw)
4917 +{
4918 + /*static char *version = NULL;*/
4919 + static char *version;
4920 +
4921 + struct elf32_shdr *shdr = get_elf_section_header(fw, ".version");
4922 +
4923 + if (shdr) {
4924 + if (!version) {
4925 + /*
4926 + * this is the first fw we load, use its version
4927 + * string as reference (whatever it is)
4928 + */
4929 + version = (char *)(fw->data +
4930 + be32_to_cpu(shdr->sh_offset));
4931 +
4932 + pr_info("PFE binary version: %s\n", version);
4933 + } else {
4934 + /*
4935 + * already have loaded at least one firmware, check
4936 + * sequence can start now
4937 + */
4938 + if (strcmp(version, (char *)(fw->data +
4939 + be32_to_cpu(shdr->sh_offset)))) {
4940 + pr_info(
4941 + "WARNING: PFE firmware binaries from incompatible version\n");
4942 + }
4943 + }
4944 + } else {
4945 + /*
4946 + * version cannot be verified, a potential issue that should
4947 + * be reported
4948 + */
4949 + pr_info(
4950 + "WARNING: PFE firmware binaries from incompatible version\n");
4951 + }
4952 +}
4953 +
4954 +/* PFE elf firmware loader.
4955 + * Loads an elf firmware image into a list of PE's (specified using a bitmask)
4956 + *
4957 + * @param pe_mask Mask of PE id's to load firmware to
4958 + * @param fw Pointer to the firmware image
4959 + *
4960 + * @return 0 on success, a negative value on error
4961 + *
4962 + */
4963 +int pfe_load_elf(int pe_mask, const struct firmware *fw, struct pfe *pfe)
4964 +{
4965 + struct elf32_hdr *elf_hdr = (struct elf32_hdr *)fw->data;
4966 + Elf32_Half sections = be16_to_cpu(elf_hdr->e_shnum);
4967 + struct elf32_shdr *shdr = (struct elf32_shdr *)(fw->data +
4968 + be32_to_cpu(elf_hdr->e_shoff));
4969 + int id, section;
4970 + int rc;
4971 +
4972 + pr_info("%s\n", __func__);
4973 +
4974 + /* Some sanity checks */
4975 + if (strncmp(&elf_hdr->e_ident[EI_MAG0], ELFMAG, SELFMAG)) {
4976 + pr_err("%s: incorrect elf magic number\n", __func__);
4977 + return -EINVAL;
4978 + }
4979 +
4980 + if (elf_hdr->e_ident[EI_CLASS] != ELFCLASS32) {
4981 + pr_err("%s: incorrect elf class(%x)\n", __func__,
4982 + elf_hdr->e_ident[EI_CLASS]);
4983 + return -EINVAL;
4984 + }
4985 +
4986 + if (elf_hdr->e_ident[EI_DATA] != ELFDATA2MSB) {
4987 + pr_err("%s: incorrect elf data(%x)\n", __func__,
4988 + elf_hdr->e_ident[EI_DATA]);
4989 + return -EINVAL;
4990 + }
4991 +
4992 + if (be16_to_cpu(elf_hdr->e_type) != ET_EXEC) {
4993 + pr_err("%s: incorrect elf file type(%x)\n", __func__,
4994 + be16_to_cpu(elf_hdr->e_type));
4995 + return -EINVAL;
4996 + }
4997 +
4998 + for (section = 0; section < sections; section++, shdr++) {
4999 + if (!(be32_to_cpu(shdr->sh_flags) & (SHF_WRITE | SHF_ALLOC |
5000 + SHF_EXECINSTR)))
5001 + continue;
5002 +
5003 + for (id = 0; id < MAX_PE; id++)
5004 + if (pe_mask & (1 << id)) {
5005 + rc = pe_load_elf_section(id, fw->data, shdr,
5006 + pfe->dev);
5007 + if (rc < 0)
5008 + goto err;
5009 + }
5010 + }
5011 +
5012 + pfe_check_version_info(fw);
5013 +
5014 + return 0;
5015 +
5016 +err:
5017 + return rc;
5018 +}
5019 +
5020 +/* PFE firmware initialization.
5021 + * Loads different firmware files from filesystem.
5022 + * Initializes PE IMEM/DMEM and UTIL-PE DDR
5023 + * Initializes control path symbol addresses (by looking them up in the elf
5024 + * firmware files
5025 + * Takes PE's out of reset
5026 + *
5027 + * @return 0 on success, a negative value on error
5028 + *
5029 + */
5030 +int pfe_firmware_init(struct pfe *pfe)
5031 +{
5032 + const struct firmware *class_fw, *tmu_fw;
5033 + int rc = 0;
5034 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5035 + const char *util_fw_name;
5036 + const struct firmware *util_fw;
5037 +#endif
5038 +
5039 + pr_info("%s\n", __func__);
5040 +
5041 + if (request_firmware(&class_fw, CLASS_FIRMWARE_FILENAME, pfe->dev)) {
5042 + pr_err("%s: request firmware %s failed\n", __func__,
5043 + CLASS_FIRMWARE_FILENAME);
5044 + rc = -ETIMEDOUT;
5045 + goto err0;
5046 + }
5047 +
5048 + if (request_firmware(&tmu_fw, TMU_FIRMWARE_FILENAME, pfe->dev)) {
5049 + pr_err("%s: request firmware %s failed\n", __func__,
5050 + TMU_FIRMWARE_FILENAME);
5051 + rc = -ETIMEDOUT;
5052 + goto err1;
5053 +}
5054 +
5055 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5056 + util_fw_name = UTIL_FIRMWARE_FILENAME;
5057 +
5058 + if (request_firmware(&util_fw, util_fw_name, pfe->dev)) {
5059 + pr_err("%s: request firmware %s failed\n", __func__,
5060 + util_fw_name);
5061 + rc = -ETIMEDOUT;
5062 + goto err2;
5063 + }
5064 +#endif
5065 + rc = pfe_load_elf(CLASS_MASK, class_fw, pfe);
5066 + if (rc < 0) {
5067 + pr_err("%s: class firmware load failed\n", __func__);
5068 + goto err3;
5069 + }
5070 +
5071 +#if defined(CFG_DIAGS)
5072 + rc = pfe_get_diags_info(class_fw, &pfe->diags.class_diags_info);
5073 + if (rc < 0) {
5074 + pr_warn(
5075 + "PFE diags won't be available for class PEs\n");
5076 + rc = 0;
5077 + }
5078 +#endif
5079 +
5080 + rc = pfe_load_elf(TMU_MASK, tmu_fw, pfe);
5081 + if (rc < 0) {
5082 + pr_err("%s: tmu firmware load failed\n", __func__);
5083 + goto err3;
5084 + }
5085 +
5086 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5087 + rc = pfe_load_elf(UTIL_MASK, util_fw, pfe);
5088 + if (rc < 0) {
5089 + pr_err("%s: util firmware load failed\n", __func__);
5090 + goto err3;
5091 + }
5092 +
5093 +#if defined(CFG_DIAGS)
5094 + rc = pfe_get_diags_info(util_fw, &pfe->diags.util_diags_info);
5095 + if (rc < 0) {
5096 + pr_warn(
5097 + "PFE diags won't be available for util PE\n");
5098 + rc = 0;
5099 + }
5100 +#endif
5101 +
5102 + util_enable();
5103 +#endif
5104 +
5105 + tmu_enable(0xf);
5106 + class_enable();
5107 +
5108 +err3:
5109 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5110 + release_firmware(util_fw);
5111 +
5112 +err2:
5113 +#endif
5114 + release_firmware(tmu_fw);
5115 +
5116 +err1:
5117 + release_firmware(class_fw);
5118 +
5119 +err0:
5120 + return rc;
5121 +}
5122 +
5123 +/* PFE firmware cleanup
5124 + * Puts PE's in reset
5125 + *
5126 + *
5127 + */
5128 +void pfe_firmware_exit(struct pfe *pfe)
5129 +{
5130 + pr_info("%s\n", __func__);
5131 +
5132 + if (pe_reset_all(&pfe->ctrl) != 0)
5133 + pr_err("Error: Failed to stop PEs, PFE reload may not work correctly\n");
5134 +
5135 + class_disable();
5136 + tmu_disable(0xf);
5137 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5138 + util_disable();
5139 +#endif
5140 +}
5141 --- /dev/null
5142 +++ b/drivers/staging/fsl_ppfe/pfe_firmware.h
5143 @@ -0,0 +1,32 @@
5144 +/*
5145 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
5146 + * Copyright 2017 NXP
5147 + *
5148 + * This program is free software; you can redistribute it and/or modify
5149 + * it under the terms of the GNU General Public License as published by
5150 + * the Free Software Foundation; either version 2 of the License, or
5151 + * (at your option) any later version.
5152 + *
5153 + * This program is distributed in the hope that it will be useful,
5154 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
5155 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
5156 + * GNU General Public License for more details.
5157 + *
5158 + * You should have received a copy of the GNU General Public License
5159 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
5160 + */
5161 +
5162 +#ifndef _PFE_FIRMWARE_H_
5163 +#define _PFE_FIRMWARE_H_
5164 +
5165 +#define CLASS_FIRMWARE_FILENAME "ppfe_class_ls1012a.elf"
5166 +#define TMU_FIRMWARE_FILENAME "ppfe_tmu_ls1012a.elf"
5167 +
5168 +#define PFE_FW_CHECK_PASS 0
5169 +#define PFE_FW_CHECK_FAIL 1
5170 +#define NUM_PFE_FW 3
5171 +
5172 +int pfe_firmware_init(struct pfe *pfe);
5173 +void pfe_firmware_exit(struct pfe *pfe);
5174 +
5175 +#endif /* _PFE_FIRMWARE_H_ */
5176 --- /dev/null
5177 +++ b/drivers/staging/fsl_ppfe/pfe_hal.c
5178 @@ -0,0 +1,1516 @@
5179 +/*
5180 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
5181 + * Copyright 2017 NXP
5182 + *
5183 + * This program is free software; you can redistribute it and/or modify
5184 + * it under the terms of the GNU General Public License as published by
5185 + * the Free Software Foundation; either version 2 of the License, or
5186 + * (at your option) any later version.
5187 + *
5188 + * This program is distributed in the hope that it will be useful,
5189 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
5190 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
5191 + * GNU General Public License for more details.
5192 + *
5193 + * You should have received a copy of the GNU General Public License
5194 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
5195 + */
5196 +
5197 +#include "pfe_mod.h"
5198 +#include "pfe/pfe.h"
5199 +
5200 +void *cbus_base_addr;
5201 +void *ddr_base_addr;
5202 +unsigned long ddr_phys_base_addr;
5203 +unsigned int ddr_size;
5204 +
5205 +static struct pe_info pe[MAX_PE];
5206 +
5207 +/* Initializes the PFE library.
5208 + * Must be called before using any of the library functions.
5209 + *
5210 + * @param[in] cbus_base CBUS virtual base address (as mapped in
5211 + * the host CPU address space)
5212 + * @param[in] ddr_base PFE DDR range virtual base address (as
5213 + * mapped in the host CPU address space)
5214 + * @param[in] ddr_phys_base PFE DDR range physical base address (as
5215 + * mapped in platform)
5216 + * @param[in] size PFE DDR range size (as defined by the host
5217 + * software)
5218 + */
5219 +void pfe_lib_init(void *cbus_base, void *ddr_base, unsigned long ddr_phys_base,
5220 + unsigned int size)
5221 +{
5222 + cbus_base_addr = cbus_base;
5223 + ddr_base_addr = ddr_base;
5224 + ddr_phys_base_addr = ddr_phys_base;
5225 + ddr_size = size;
5226 +
5227 + pe[CLASS0_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(0);
5228 + pe[CLASS0_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(0);
5229 + pe[CLASS0_ID].pmem_size = CLASS_IMEM_SIZE;
5230 + pe[CLASS0_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
5231 + pe[CLASS0_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
5232 + pe[CLASS0_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
5233 +
5234 + pe[CLASS1_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(1);
5235 + pe[CLASS1_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(1);
5236 + pe[CLASS1_ID].pmem_size = CLASS_IMEM_SIZE;
5237 + pe[CLASS1_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
5238 + pe[CLASS1_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
5239 + pe[CLASS1_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
5240 +
5241 + pe[CLASS2_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(2);
5242 + pe[CLASS2_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(2);
5243 + pe[CLASS2_ID].pmem_size = CLASS_IMEM_SIZE;
5244 + pe[CLASS2_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
5245 + pe[CLASS2_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
5246 + pe[CLASS2_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
5247 +
5248 + pe[CLASS3_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(3);
5249 + pe[CLASS3_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(3);
5250 + pe[CLASS3_ID].pmem_size = CLASS_IMEM_SIZE;
5251 + pe[CLASS3_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
5252 + pe[CLASS3_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
5253 + pe[CLASS3_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
5254 +
5255 + pe[CLASS4_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(4);
5256 + pe[CLASS4_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(4);
5257 + pe[CLASS4_ID].pmem_size = CLASS_IMEM_SIZE;
5258 + pe[CLASS4_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
5259 + pe[CLASS4_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
5260 + pe[CLASS4_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
5261 +
5262 + pe[CLASS5_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(5);
5263 + pe[CLASS5_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(5);
5264 + pe[CLASS5_ID].pmem_size = CLASS_IMEM_SIZE;
5265 + pe[CLASS5_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
5266 + pe[CLASS5_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
5267 + pe[CLASS5_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
5268 +
5269 + pe[TMU0_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(0);
5270 + pe[TMU0_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(0);
5271 + pe[TMU0_ID].pmem_size = TMU_IMEM_SIZE;
5272 + pe[TMU0_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
5273 + pe[TMU0_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
5274 + pe[TMU0_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
5275 +
5276 + pe[TMU1_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(1);
5277 + pe[TMU1_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(1);
5278 + pe[TMU1_ID].pmem_size = TMU_IMEM_SIZE;
5279 + pe[TMU1_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
5280 + pe[TMU1_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
5281 + pe[TMU1_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
5282 +
5283 + pe[TMU3_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(3);
5284 + pe[TMU3_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(3);
5285 + pe[TMU3_ID].pmem_size = TMU_IMEM_SIZE;
5286 + pe[TMU3_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
5287 + pe[TMU3_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
5288 + pe[TMU3_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
5289 +
5290 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5291 + pe[UTIL_ID].dmem_base_addr = UTIL_DMEM_BASE_ADDR;
5292 + pe[UTIL_ID].mem_access_wdata = UTIL_MEM_ACCESS_WDATA;
5293 + pe[UTIL_ID].mem_access_addr = UTIL_MEM_ACCESS_ADDR;
5294 + pe[UTIL_ID].mem_access_rdata = UTIL_MEM_ACCESS_RDATA;
5295 +#endif
5296 +}
5297 +
5298 +/* Writes a buffer to PE internal memory from the host
5299 + * through indirect access registers.
5300 + *
5301 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5302 + * ..., UTIL_ID)
5303 + * @param[in] src Buffer source address
5304 + * @param[in] mem_access_addr DMEM destination address (must be 32bit
5305 + * aligned)
5306 + * @param[in] len Number of bytes to copy
5307 + */
5308 +void pe_mem_memcpy_to32(int id, u32 mem_access_addr, const void *src, unsigned
5309 +int len)
5310 +{
5311 + u32 offset = 0, val, addr;
5312 + unsigned int len32 = len >> 2;
5313 + int i;
5314 +
5315 + addr = mem_access_addr | PE_MEM_ACCESS_WRITE |
5316 + PE_MEM_ACCESS_BYTE_ENABLE(0, 4);
5317 +
5318 + for (i = 0; i < len32; i++, offset += 4, src += 4) {
5319 + val = *(u32 *)src;
5320 + writel(cpu_to_be32(val), pe[id].mem_access_wdata);
5321 + writel(addr + offset, pe[id].mem_access_addr);
5322 + }
5323 +
5324 + len = (len & 0x3);
5325 + if (len) {
5326 + val = 0;
5327 +
5328 + addr = (mem_access_addr | PE_MEM_ACCESS_WRITE |
5329 + PE_MEM_ACCESS_BYTE_ENABLE(0, len)) + offset;
5330 +
5331 + for (i = 0; i < len; i++, src++)
5332 + val |= (*(u8 *)src) << (8 * i);
5333 +
5334 + writel(cpu_to_be32(val), pe[id].mem_access_wdata);
5335 + writel(addr, pe[id].mem_access_addr);
5336 + }
5337 +}
5338 +
5339 +/* Writes a buffer to PE internal data memory (DMEM) from the host
5340 + * through indirect access registers.
5341 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5342 + * ..., UTIL_ID)
5343 + * @param[in] src Buffer source address
5344 + * @param[in] dst DMEM destination address (must be 32bit
5345 + * aligned)
5346 + * @param[in] len Number of bytes to copy
5347 + */
5348 +void pe_dmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len)
5349 +{
5350 + pe_mem_memcpy_to32(id, pe[id].dmem_base_addr | dst |
5351 + PE_MEM_ACCESS_DMEM, src, len);
5352 +}
5353 +
5354 +/* Writes a buffer to PE internal program memory (PMEM) from the host
5355 + * through indirect access registers.
5356 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5357 + * ..., TMU3_ID)
5358 + * @param[in] src Buffer source address
5359 + * @param[in] dst PMEM destination address (must be 32bit
5360 + * aligned)
5361 + * @param[in] len Number of bytes to copy
5362 + */
5363 +void pe_pmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len)
5364 +{
5365 + pe_mem_memcpy_to32(id, pe[id].pmem_base_addr | (dst & (pe[id].pmem_size
5366 + - 1)) | PE_MEM_ACCESS_IMEM, src, len);
5367 +}
5368 +
5369 +/* Reads PE internal program memory (IMEM) from the host
5370 + * through indirect access registers.
5371 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5372 + * ..., TMU3_ID)
5373 + * @param[in] addr PMEM read address (must be aligned on size)
5374 + * @param[in] size Number of bytes to read (maximum 4, must not
5375 + * cross 32bit boundaries)
5376 + * @return the data read (in PE endianness, i.e BE).
5377 + */
5378 +u32 pe_pmem_read(int id, u32 addr, u8 size)
5379 +{
5380 + u32 offset = addr & 0x3;
5381 + u32 mask = 0xffffffff >> ((4 - size) << 3);
5382 + u32 val;
5383 +
5384 + addr = pe[id].pmem_base_addr | ((addr & ~0x3) & (pe[id].pmem_size - 1))
5385 + | PE_MEM_ACCESS_IMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
5386 +
5387 + writel(addr, pe[id].mem_access_addr);
5388 + val = be32_to_cpu(readl(pe[id].mem_access_rdata));
5389 +
5390 + return (val >> (offset << 3)) & mask;
5391 +}
5392 +
5393 +/* Writes PE internal data memory (DMEM) from the host
5394 + * through indirect access registers.
5395 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5396 + * ..., UTIL_ID)
5397 + * @param[in] addr DMEM write address (must be aligned on size)
5398 + * @param[in] val Value to write (in PE endianness, i.e BE)
5399 + * @param[in] size Number of bytes to write (maximum 4, must not
5400 + * cross 32bit boundaries)
5401 + */
5402 +void pe_dmem_write(int id, u32 val, u32 addr, u8 size)
5403 +{
5404 + u32 offset = addr & 0x3;
5405 +
5406 + addr = pe[id].dmem_base_addr | (addr & ~0x3) | PE_MEM_ACCESS_WRITE |
5407 + PE_MEM_ACCESS_DMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
5408 +
5409 + /* Indirect access interface is byte swapping data being written */
5410 + writel(cpu_to_be32(val << (offset << 3)), pe[id].mem_access_wdata);
5411 + writel(addr, pe[id].mem_access_addr);
5412 +}
5413 +
5414 +/* Reads PE internal data memory (DMEM) from the host
5415 + * through indirect access registers.
5416 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5417 + * ..., UTIL_ID)
5418 + * @param[in] addr DMEM read address (must be aligned on size)
5419 + * @param[in] size Number of bytes to read (maximum 4, must not
5420 + * cross 32bit boundaries)
5421 + * @return the data read (in PE endianness, i.e BE).
5422 + */
5423 +u32 pe_dmem_read(int id, u32 addr, u8 size)
5424 +{
5425 + u32 offset = addr & 0x3;
5426 + u32 mask = 0xffffffff >> ((4 - size) << 3);
5427 + u32 val;
5428 +
5429 + addr = pe[id].dmem_base_addr | (addr & ~0x3) | PE_MEM_ACCESS_DMEM |
5430 + PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
5431 +
5432 + writel(addr, pe[id].mem_access_addr);
5433 +
5434 + /* Indirect access interface is byte swapping data being read */
5435 + val = be32_to_cpu(readl(pe[id].mem_access_rdata));
5436 +
5437 + return (val >> (offset << 3)) & mask;
5438 +}
5439 +
5440 +/* This function is used to write to CLASS internal bus peripherals (ccu,
5441 + * pe-lem) from the host
5442 + * through indirect access registers.
5443 + * @param[in] val value to write
5444 + * @param[in] addr Address to write to (must be aligned on size)
5445 + * @param[in] size Number of bytes to write (1, 2 or 4)
5446 + *
5447 + */
5448 +void class_bus_write(u32 val, u32 addr, u8 size)
5449 +{
5450 + u32 offset = addr & 0x3;
5451 +
5452 + writel((addr & CLASS_BUS_ACCESS_BASE_MASK), CLASS_BUS_ACCESS_BASE);
5453 +
5454 + addr = (addr & ~CLASS_BUS_ACCESS_BASE_MASK) | PE_MEM_ACCESS_WRITE |
5455 + (size << 24);
5456 +
5457 + writel(cpu_to_be32(val << (offset << 3)), CLASS_BUS_ACCESS_WDATA);
5458 + writel(addr, CLASS_BUS_ACCESS_ADDR);
5459 +}
5460 +
5461 +/* Reads from CLASS internal bus peripherals (ccu, pe-lem) from the host
5462 + * through indirect access registers.
5463 + * @param[in] addr Address to read from (must be aligned on size)
5464 + * @param[in] size Number of bytes to read (1, 2 or 4)
5465 + * @return the read data
5466 + *
5467 + */
5468 +u32 class_bus_read(u32 addr, u8 size)
5469 +{
5470 + u32 offset = addr & 0x3;
5471 + u32 mask = 0xffffffff >> ((4 - size) << 3);
5472 + u32 val;
5473 +
5474 + writel((addr & CLASS_BUS_ACCESS_BASE_MASK), CLASS_BUS_ACCESS_BASE);
5475 +
5476 + addr = (addr & ~CLASS_BUS_ACCESS_BASE_MASK) | (size << 24);
5477 +
5478 + writel(addr, CLASS_BUS_ACCESS_ADDR);
5479 + val = be32_to_cpu(readl(CLASS_BUS_ACCESS_RDATA));
5480 +
5481 + return (val >> (offset << 3)) & mask;
5482 +}
5483 +
5484 +/* Writes data to the cluster memory (PE_LMEM)
5485 + * @param[in] dst PE LMEM destination address (must be 32bit aligned)
5486 + * @param[in] src Buffer source address
5487 + * @param[in] len Number of bytes to copy
5488 + */
5489 +void class_pe_lmem_memcpy_to32(u32 dst, const void *src, unsigned int len)
5490 +{
5491 + u32 len32 = len >> 2;
5492 + int i;
5493 +
5494 + for (i = 0; i < len32; i++, src += 4, dst += 4)
5495 + class_bus_write(*(u32 *)src, dst, 4);
5496 +
5497 + if (len & 0x2) {
5498 + class_bus_write(*(u16 *)src, dst, 2);
5499 + src += 2;
5500 + dst += 2;
5501 + }
5502 +
5503 + if (len & 0x1) {
5504 + class_bus_write(*(u8 *)src, dst, 1);
5505 + src++;
5506 + dst++;
5507 + }
5508 +}
5509 +
5510 +/* Writes value to the cluster memory (PE_LMEM)
5511 + * @param[in] dst PE LMEM destination address (must be 32bit aligned)
5512 + * @param[in] val Value to write
5513 + * @param[in] len Number of bytes to write
5514 + */
5515 +void class_pe_lmem_memset(u32 dst, int val, unsigned int len)
5516 +{
5517 + u32 len32 = len >> 2;
5518 + int i;
5519 +
5520 + val = val | (val << 8) | (val << 16) | (val << 24);
5521 +
5522 + for (i = 0; i < len32; i++, dst += 4)
5523 + class_bus_write(val, dst, 4);
5524 +
5525 + if (len & 0x2) {
5526 + class_bus_write(val, dst, 2);
5527 + dst += 2;
5528 + }
5529 +
5530 + if (len & 0x1) {
5531 + class_bus_write(val, dst, 1);
5532 + dst++;
5533 + }
5534 +}
5535 +
5536 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5537 +
5538 +/* Writes UTIL program memory (DDR) from the host.
5539 + *
5540 + * @param[in] addr Address to write (virtual, must be aligned on size)
5541 + * @param[in] val Value to write (in PE endianness, i.e BE)
5542 + * @param[in] size Number of bytes to write (2 or 4)
5543 + */
5544 +static void util_pmem_write(u32 val, void *addr, u8 size)
5545 +{
5546 + void *addr64 = (void *)((unsigned long)addr & ~0x7);
5547 + unsigned long off = 8 - ((unsigned long)addr & 0x7) - size;
5548 +
5549 + /*
5550 + * IMEM should be loaded as a 64bit swapped value in a 64bit aligned
5551 + * location
5552 + */
5553 + if (size == 4)
5554 + writel(be32_to_cpu(val), addr64 + off);
5555 + else
5556 + writew(be16_to_cpu((u16)val), addr64 + off);
5557 +}
5558 +
5559 +/* Writes a buffer to UTIL program memory (DDR) from the host.
5560 + *
5561 + * @param[in] dst Address to write (virtual, must be at least 16bit
5562 + * aligned)
5563 + * @param[in] src Buffer to write (in PE endianness, i.e BE, must have
5564 + * same alignment as dst)
5565 + * @param[in] len Number of bytes to write (must be at least 16bit
5566 + * aligned)
5567 + */
5568 +static void util_pmem_memcpy(void *dst, const void *src, unsigned int len)
5569 +{
5570 + unsigned int len32;
5571 + int i;
5572 +
5573 + if ((unsigned long)src & 0x2) {
5574 + util_pmem_write(*(u16 *)src, dst, 2);
5575 + src += 2;
5576 + dst += 2;
5577 + len -= 2;
5578 + }
5579 +
5580 + len32 = len >> 2;
5581 +
5582 + for (i = 0; i < len32; i++, dst += 4, src += 4)
5583 + util_pmem_write(*(u32 *)src, dst, 4);
5584 +
5585 + if (len & 0x2)
5586 + util_pmem_write(*(u16 *)src, dst, len & 0x2);
5587 +}
5588 +#endif
5589 +
5590 +/* Loads an elf section into pmem
5591 + * Code needs to be at least 16bit aligned and only PROGBITS sections are
5592 + * supported
5593 + *
5594 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID, ...,
5595 + * TMU3_ID)
5596 + * @param[in] data pointer to the elf firmware
5597 + * @param[in] shdr pointer to the elf section header
5598 + *
5599 + */
5600 +static int pe_load_pmem_section(int id, const void *data,
5601 + struct elf32_shdr *shdr)
5602 +{
5603 + u32 offset = be32_to_cpu(shdr->sh_offset);
5604 + u32 addr = be32_to_cpu(shdr->sh_addr);
5605 + u32 size = be32_to_cpu(shdr->sh_size);
5606 + u32 type = be32_to_cpu(shdr->sh_type);
5607 +
5608 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5609 + if (id == UTIL_ID) {
5610 + pr_err("%s: unsupported pmem section for UTIL\n",
5611 + __func__);
5612 + return -EINVAL;
5613 + }
5614 +#endif
5615 +
5616 + if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
5617 + pr_err(
5618 + "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
5619 + , __func__, addr, (unsigned long)data + offset);
5620 +
5621 + return -EINVAL;
5622 + }
5623 +
5624 + if (addr & 0x1) {
5625 + pr_err("%s: load address(%x) is not 16bit aligned\n",
5626 + __func__, addr);
5627 + return -EINVAL;
5628 + }
5629 +
5630 + if (size & 0x1) {
5631 + pr_err("%s: load size(%x) is not 16bit aligned\n",
5632 + __func__, size);
5633 + return -EINVAL;
5634 + }
5635 +
5636 + switch (type) {
5637 + case SHT_PROGBITS:
5638 + pe_pmem_memcpy_to32(id, addr, data + offset, size);
5639 +
5640 + break;
5641 +
5642 + default:
5643 + pr_err("%s: unsupported section type(%x)\n", __func__,
5644 + type);
5645 + return -EINVAL;
5646 + }
5647 +
5648 + return 0;
5649 +}
5650 +
5651 +/* Loads an elf section into dmem
5652 + * Data needs to be at least 32bit aligned, NOBITS sections are correctly
5653 + * initialized to 0
5654 + *
5655 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5656 + * ..., UTIL_ID)
5657 + * @param[in] data pointer to the elf firmware
5658 + * @param[in] shdr pointer to the elf section header
5659 + *
5660 + */
5661 +static int pe_load_dmem_section(int id, const void *data,
5662 + struct elf32_shdr *shdr)
5663 +{
5664 + u32 offset = be32_to_cpu(shdr->sh_offset);
5665 + u32 addr = be32_to_cpu(shdr->sh_addr);
5666 + u32 size = be32_to_cpu(shdr->sh_size);
5667 + u32 type = be32_to_cpu(shdr->sh_type);
5668 + u32 size32 = size >> 2;
5669 + int i;
5670 +
5671 + if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
5672 + pr_err(
5673 + "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
5674 + __func__, addr, (unsigned long)data + offset);
5675 +
5676 + return -EINVAL;
5677 + }
5678 +
5679 + if (addr & 0x3) {
5680 + pr_err("%s: load address(%x) is not 32bit aligned\n",
5681 + __func__, addr);
5682 + return -EINVAL;
5683 + }
5684 +
5685 + switch (type) {
5686 + case SHT_PROGBITS:
5687 + pe_dmem_memcpy_to32(id, addr, data + offset, size);
5688 + break;
5689 +
5690 + case SHT_NOBITS:
5691 + for (i = 0; i < size32; i++, addr += 4)
5692 + pe_dmem_write(id, 0, addr, 4);
5693 +
5694 + if (size & 0x3)
5695 + pe_dmem_write(id, 0, addr, size & 0x3);
5696 +
5697 + break;
5698 +
5699 + default:
5700 + pr_err("%s: unsupported section type(%x)\n", __func__,
5701 + type);
5702 + return -EINVAL;
5703 + }
5704 +
5705 + return 0;
5706 +}
5707 +
5708 +/* Loads an elf section into DDR
5709 + * Data needs to be at least 32bit aligned, NOBITS sections are correctly
5710 + * initialized to 0
5711 + *
5712 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5713 + * ..., UTIL_ID)
5714 + * @param[in] data pointer to the elf firmware
5715 + * @param[in] shdr pointer to the elf section header
5716 + *
5717 + */
5718 +static int pe_load_ddr_section(int id, const void *data,
5719 + struct elf32_shdr *shdr,
5720 + struct device *dev) {
5721 + u32 offset = be32_to_cpu(shdr->sh_offset);
5722 + u32 addr = be32_to_cpu(shdr->sh_addr);
5723 + u32 size = be32_to_cpu(shdr->sh_size);
5724 + u32 type = be32_to_cpu(shdr->sh_type);
5725 + u32 flags = be32_to_cpu(shdr->sh_flags);
5726 +
5727 + switch (type) {
5728 + case SHT_PROGBITS:
5729 + if (flags & SHF_EXECINSTR) {
5730 + if (id <= CLASS_MAX_ID) {
5731 + /* DO the loading only once in DDR */
5732 + if (id == CLASS0_ID) {
5733 + pr_err(
5734 + "%s: load address(%x) and elf file address(%lx) rcvd\n",
5735 + __func__, addr,
5736 + (unsigned long)data + offset);
5737 + if (((unsigned long)(data + offset)
5738 + & 0x3) != (addr & 0x3)) {
5739 + pr_err(
5740 + "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
5741 + , __func__, addr,
5742 + (unsigned long)data + offset);
5743 +
5744 + return -EINVAL;
5745 + }
5746 +
5747 + if (addr & 0x1) {
5748 + pr_err(
5749 + "%s: load address(%x) is not 16bit aligned\n"
5750 + , __func__, addr);
5751 + return -EINVAL;
5752 + }
5753 +
5754 + if (size & 0x1) {
5755 + pr_err(
5756 + "%s: load length(%x) is not 16bit aligned\n"
5757 + , __func__, size);
5758 + return -EINVAL;
5759 + }
5760 + memcpy(DDR_PHYS_TO_VIRT(
5761 + DDR_PFE_TO_PHYS(addr)),
5762 + data + offset, size);
5763 + }
5764 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5765 + } else if (id == UTIL_ID) {
5766 + if (((unsigned long)(data + offset) & 0x3)
5767 + != (addr & 0x3)) {
5768 + pr_err(
5769 + "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
5770 + , __func__, addr,
5771 + (unsigned long)data + offset);
5772 +
5773 + return -EINVAL;
5774 + }
5775 +
5776 + if (addr & 0x1) {
5777 + pr_err(
5778 + "%s: load address(%x) is not 16bit aligned\n"
5779 + , __func__, addr);
5780 + return -EINVAL;
5781 + }
5782 +
5783 + if (size & 0x1) {
5784 + pr_err(
5785 + "%s: load length(%x) is not 16bit aligned\n"
5786 + , __func__, size);
5787 + return -EINVAL;
5788 + }
5789 +
5790 + util_pmem_memcpy(DDR_PHYS_TO_VIRT(
5791 + DDR_PFE_TO_PHYS(addr)),
5792 + data + offset, size);
5793 + }
5794 +#endif
5795 + } else {
5796 + pr_err(
5797 + "%s: unsupported ddr section type(%x) for PE(%d)\n"
5798 + , __func__, type, id);
5799 + return -EINVAL;
5800 + }
5801 +
5802 + } else {
5803 + memcpy(DDR_PHYS_TO_VIRT(DDR_PFE_TO_PHYS(addr)), data
5804 + + offset, size);
5805 + }
5806 +
5807 + break;
5808 +
5809 + case SHT_NOBITS:
5810 + memset(DDR_PHYS_TO_VIRT(DDR_PFE_TO_PHYS(addr)), 0, size);
5811 +
5812 + break;
5813 +
5814 + default:
5815 + pr_err("%s: unsupported section type(%x)\n", __func__,
5816 + type);
5817 + return -EINVAL;
5818 + }
5819 +
5820 + return 0;
5821 +}
5822 +
5823 +/* Loads an elf section into pe lmem
5824 + * Data needs to be at least 32bit aligned, NOBITS sections are correctly
5825 + * initialized to 0
5826 + *
5827 + * @param[in] id PE identification (CLASS0_ID,..., CLASS5_ID)
5828 + * @param[in] data pointer to the elf firmware
5829 + * @param[in] shdr pointer to the elf section header
5830 + *
5831 + */
5832 +static int pe_load_pe_lmem_section(int id, const void *data,
5833 + struct elf32_shdr *shdr)
5834 +{
5835 + u32 offset = be32_to_cpu(shdr->sh_offset);
5836 + u32 addr = be32_to_cpu(shdr->sh_addr);
5837 + u32 size = be32_to_cpu(shdr->sh_size);
5838 + u32 type = be32_to_cpu(shdr->sh_type);
5839 +
5840 + if (id > CLASS_MAX_ID) {
5841 + pr_err(
5842 + "%s: unsupported pe-lmem section type(%x) for PE(%d)\n",
5843 + __func__, type, id);
5844 + return -EINVAL;
5845 + }
5846 +
5847 + if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
5848 + pr_err(
5849 + "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
5850 + __func__, addr, (unsigned long)data + offset);
5851 +
5852 + return -EINVAL;
5853 + }
5854 +
5855 + if (addr & 0x3) {
5856 + pr_err("%s: load address(%x) is not 32bit aligned\n",
5857 + __func__, addr);
5858 + return -EINVAL;
5859 + }
5860 +
5861 + switch (type) {
5862 + case SHT_PROGBITS:
5863 + class_pe_lmem_memcpy_to32(addr, data + offset, size);
5864 + break;
5865 +
5866 + case SHT_NOBITS:
5867 + class_pe_lmem_memset(addr, 0, size);
5868 + break;
5869 +
5870 + default:
5871 + pr_err("%s: unsupported section type(%x)\n", __func__,
5872 + type);
5873 + return -EINVAL;
5874 + }
5875 +
5876 + return 0;
5877 +}
5878 +
5879 +/* Loads an elf section into a PE
5880 + * For now only supports loading a section to dmem (all PE's), pmem (class and
5881 + * tmu PE's),
5882 + * DDDR (util PE code)
5883 + *
5884 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5885 + * ..., UTIL_ID)
5886 + * @param[in] data pointer to the elf firmware
5887 + * @param[in] shdr pointer to the elf section header
5888 + *
5889 + */
5890 +int pe_load_elf_section(int id, const void *data, struct elf32_shdr *shdr,
5891 + struct device *dev) {
5892 + u32 addr = be32_to_cpu(shdr->sh_addr);
5893 + u32 size = be32_to_cpu(shdr->sh_size);
5894 +
5895 + if (IS_DMEM(addr, size))
5896 + return pe_load_dmem_section(id, data, shdr);
5897 + else if (IS_PMEM(addr, size))
5898 + return pe_load_pmem_section(id, data, shdr);
5899 + else if (IS_PFE_LMEM(addr, size))
5900 + return 0;
5901 + else if (IS_PHYS_DDR(addr, size))
5902 + return pe_load_ddr_section(id, data, shdr, dev);
5903 + else if (IS_PE_LMEM(addr, size))
5904 + return pe_load_pe_lmem_section(id, data, shdr);
5905 +
5906 + pr_err("%s: unsupported memory range(%x)\n", __func__,
5907 + addr);
5908 + return 0;
5909 +}
5910 +
5911 +/**************************** BMU ***************************/
5912 +
5913 +/* Initializes a BMU block.
5914 + * @param[in] base BMU block base address
5915 + * @param[in] cfg BMU configuration
5916 + */
5917 +void bmu_init(void *base, struct BMU_CFG *cfg)
5918 +{
5919 + bmu_disable(base);
5920 +
5921 + bmu_set_config(base, cfg);
5922 +
5923 + bmu_reset(base);
5924 +}
5925 +
5926 +/* Resets a BMU block.
5927 + * @param[in] base BMU block base address
5928 + */
5929 +void bmu_reset(void *base)
5930 +{
5931 + writel(CORE_SW_RESET, base + BMU_CTRL);
5932 +
5933 + /* Wait for self clear */
5934 + while (readl(base + BMU_CTRL) & CORE_SW_RESET)
5935 + ;
5936 +}
5937 +
5938 +/* Enabled a BMU block.
5939 + * @param[in] base BMU block base address
5940 + */
5941 +void bmu_enable(void *base)
5942 +{
5943 + writel(CORE_ENABLE, base + BMU_CTRL);
5944 +}
5945 +
5946 +/* Disables a BMU block.
5947 + * @param[in] base BMU block base address
5948 + */
5949 +void bmu_disable(void *base)
5950 +{
5951 + writel(CORE_DISABLE, base + BMU_CTRL);
5952 +}
5953 +
5954 +/* Sets the configuration of a BMU block.
5955 + * @param[in] base BMU block base address
5956 + * @param[in] cfg BMU configuration
5957 + */
5958 +void bmu_set_config(void *base, struct BMU_CFG *cfg)
5959 +{
5960 + writel(cfg->baseaddr, base + BMU_UCAST_BASE_ADDR);
5961 + writel(cfg->count & 0xffff, base + BMU_UCAST_CONFIG);
5962 + writel(cfg->size & 0xffff, base + BMU_BUF_SIZE);
5963 +
5964 + /* Interrupts are never used */
5965 + writel(cfg->low_watermark, base + BMU_LOW_WATERMARK);
5966 + writel(cfg->high_watermark, base + BMU_HIGH_WATERMARK);
5967 + writel(0x0, base + BMU_INT_ENABLE);
5968 +}
5969 +
5970 +/**************************** MTIP GEMAC ***************************/
5971 +
5972 +/* Enable Rx Checksum Engine. With this enabled, Frame with bad IP,
5973 + * TCP or UDP checksums are discarded
5974 + *
5975 + * @param[in] base GEMAC base address.
5976 + */
5977 +void gemac_enable_rx_checksum_offload(void *base)
5978 +{
5979 + /*Do not find configuration to do this */
5980 +}
5981 +
5982 +/* Disable Rx Checksum Engine.
5983 + *
5984 + * @param[in] base GEMAC base address.
5985 + */
5986 +void gemac_disable_rx_checksum_offload(void *base)
5987 +{
5988 + /*Do not find configuration to do this */
5989 +}
5990 +
5991 +/* GEMAC set speed.
5992 + * @param[in] base GEMAC base address
5993 + * @param[in] speed GEMAC speed (10, 100 or 1000 Mbps)
5994 + */
5995 +void gemac_set_speed(void *base, enum mac_speed gem_speed)
5996 +{
5997 + u32 ecr = readl(base + EMAC_ECNTRL_REG) & ~EMAC_ECNTRL_SPEED;
5998 + u32 rcr = readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_RMII_10T;
5999 +
6000 + switch (gem_speed) {
6001 + case SPEED_10M:
6002 + rcr |= EMAC_RCNTRL_RMII_10T;
6003 + break;
6004 +
6005 + case SPEED_1000M:
6006 + ecr |= EMAC_ECNTRL_SPEED;
6007 + break;
6008 +
6009 + case SPEED_100M:
6010 + default:
6011 + /*It is in 100M mode */
6012 + break;
6013 + }
6014 + writel(ecr, (base + EMAC_ECNTRL_REG));
6015 + writel(rcr, (base + EMAC_RCNTRL_REG));
6016 +}
6017 +
6018 +/* GEMAC set duplex.
6019 + * @param[in] base GEMAC base address
6020 + * @param[in] duplex GEMAC duplex mode (Full, Half)
6021 + */
6022 +void gemac_set_duplex(void *base, int duplex)
6023 +{
6024 + if (duplex == DUPLEX_HALF) {
6025 + writel(readl(base + EMAC_TCNTRL_REG) & ~EMAC_TCNTRL_FDEN, base
6026 + + EMAC_TCNTRL_REG);
6027 + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_DRT, (base
6028 + + EMAC_RCNTRL_REG));
6029 + } else{
6030 + writel(readl(base + EMAC_TCNTRL_REG) | EMAC_TCNTRL_FDEN, base
6031 + + EMAC_TCNTRL_REG);
6032 + writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_DRT, (base
6033 + + EMAC_RCNTRL_REG));
6034 + }
6035 +}
6036 +
6037 +/* GEMAC set mode.
6038 + * @param[in] base GEMAC base address
6039 + * @param[in] mode GEMAC operation mode (MII, RMII, RGMII, SGMII)
6040 + */
6041 +void gemac_set_mode(void *base, int mode)
6042 +{
6043 + u32 val = readl(base + EMAC_RCNTRL_REG);
6044 +
6045 + /*Remove loopbank*/
6046 + val &= ~EMAC_RCNTRL_LOOP;
6047 +
6048 + /*Enable flow control and MII mode*/
6049 + val |= (EMAC_RCNTRL_FCE | EMAC_RCNTRL_MII_MODE);
6050 +
6051 + writel(val, base + EMAC_RCNTRL_REG);
6052 +}
6053 +
6054 +/* GEMAC enable function.
6055 + * @param[in] base GEMAC base address
6056 + */
6057 +void gemac_enable(void *base)
6058 +{
6059 + writel(readl(base + EMAC_ECNTRL_REG) | EMAC_ECNTRL_ETHER_EN, base +
6060 + EMAC_ECNTRL_REG);
6061 +}
6062 +
6063 +/* GEMAC disable function.
6064 + * @param[in] base GEMAC base address
6065 + */
6066 +void gemac_disable(void *base)
6067 +{
6068 + writel(readl(base + EMAC_ECNTRL_REG) & ~EMAC_ECNTRL_ETHER_EN, base +
6069 + EMAC_ECNTRL_REG);
6070 +}
6071 +
6072 +/* GEMAC TX disable function.
6073 + * @param[in] base GEMAC base address
6074 + */
6075 +void gemac_tx_disable(void *base)
6076 +{
6077 + writel(readl(base + EMAC_TCNTRL_REG) | EMAC_TCNTRL_GTS, base +
6078 + EMAC_TCNTRL_REG);
6079 +}
6080 +
6081 +void gemac_tx_enable(void *base)
6082 +{
6083 + writel(readl(base + EMAC_TCNTRL_REG) & ~EMAC_TCNTRL_GTS, base +
6084 + EMAC_TCNTRL_REG);
6085 +}
6086 +
6087 +/* Sets the hash register of the MAC.
6088 + * This register is used for matching unicast and multicast frames.
6089 + *
6090 + * @param[in] base GEMAC base address.
6091 + * @param[in] hash 64-bit hash to be configured.
6092 + */
6093 +void gemac_set_hash(void *base, struct pfe_mac_addr *hash)
6094 +{
6095 + writel(hash->bottom, base + EMAC_GALR);
6096 + writel(hash->top, base + EMAC_GAUR);
6097 +}
6098 +
6099 +void gemac_set_laddrN(void *base, struct pfe_mac_addr *address,
6100 + unsigned int entry_index)
6101 +{
6102 + if ((entry_index < 1) || (entry_index > EMAC_SPEC_ADDR_MAX))
6103 + return;
6104 +
6105 + entry_index = entry_index - 1;
6106 + if (entry_index < 1) {
6107 + writel(htonl(address->bottom), base + EMAC_PHY_ADDR_LOW);
6108 + writel((htonl(address->top) | 0x8808), base +
6109 + EMAC_PHY_ADDR_HIGH);
6110 + } else {
6111 + writel(htonl(address->bottom), base + ((entry_index - 1) * 8)
6112 + + EMAC_SMAC_0_0);
6113 + writel((htonl(address->top) | 0x8808), base + ((entry_index -
6114 + 1) * 8) + EMAC_SMAC_0_1);
6115 + }
6116 +}
6117 +
6118 +void gemac_clear_laddrN(void *base, unsigned int entry_index)
6119 +{
6120 + if ((entry_index < 1) || (entry_index > EMAC_SPEC_ADDR_MAX))
6121 + return;
6122 +
6123 + entry_index = entry_index - 1;
6124 + if (entry_index < 1) {
6125 + writel(0, base + EMAC_PHY_ADDR_LOW);
6126 + writel(0, base + EMAC_PHY_ADDR_HIGH);
6127 + } else {
6128 + writel(0, base + ((entry_index - 1) * 8) + EMAC_SMAC_0_0);
6129 + writel(0, base + ((entry_index - 1) * 8) + EMAC_SMAC_0_1);
6130 + }
6131 +}
6132 +
6133 +/* Set the loopback mode of the MAC. This can be either no loopback for
6134 + * normal operation, local loopback through MAC internal loopback module or PHY
6135 + * loopback for external loopback through a PHY. This asserts the external
6136 + * loop pin.
6137 + *
6138 + * @param[in] base GEMAC base address.
6139 + * @param[in] gem_loop Loopback mode to be enabled. LB_LOCAL - MAC
6140 + * Loopback,
6141 + * LB_EXT - PHY Loopback.
6142 + */
6143 +void gemac_set_loop(void *base, enum mac_loop gem_loop)
6144 +{
6145 + pr_info("%s()\n", __func__);
6146 + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_LOOP, (base +
6147 + EMAC_RCNTRL_REG));
6148 +}
6149 +
6150 +/* GEMAC allow frames
6151 + * @param[in] base GEMAC base address
6152 + */
6153 +void gemac_enable_copy_all(void *base)
6154 +{
6155 + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_PROM, (base +
6156 + EMAC_RCNTRL_REG));
6157 +}
6158 +
6159 +/* GEMAC do not allow frames
6160 + * @param[in] base GEMAC base address
6161 + */
6162 +void gemac_disable_copy_all(void *base)
6163 +{
6164 + writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_PROM, (base +
6165 + EMAC_RCNTRL_REG));
6166 +}
6167 +
6168 +/* GEMAC allow broadcast function.
6169 + * @param[in] base GEMAC base address
6170 + */
6171 +void gemac_allow_broadcast(void *base)
6172 +{
6173 + writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_BC_REJ, base +
6174 + EMAC_RCNTRL_REG);
6175 +}
6176 +
6177 +/* GEMAC no broadcast function.
6178 + * @param[in] base GEMAC base address
6179 + */
6180 +void gemac_no_broadcast(void *base)
6181 +{
6182 + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_BC_REJ, base +
6183 + EMAC_RCNTRL_REG);
6184 +}
6185 +
6186 +/* GEMAC enable 1536 rx function.
6187 + * @param[in] base GEMAC base address
6188 + */
6189 +void gemac_enable_1536_rx(void *base)
6190 +{
6191 + /* Set 1536 as Maximum frame length */
6192 + writel(readl(base + EMAC_RCNTRL_REG) | (1536 << 16), base +
6193 + EMAC_RCNTRL_REG);
6194 +}
6195 +
6196 +/* GEMAC enable jumbo function.
6197 + * @param[in] base GEMAC base address
6198 + */
6199 +void gemac_enable_rx_jmb(void *base)
6200 +{
6201 + writel(readl(base + EMAC_RCNTRL_REG) | (JUMBO_FRAME_SIZE << 16), base
6202 + + EMAC_RCNTRL_REG);
6203 +}
6204 +
6205 +/* GEMAC enable stacked vlan function.
6206 + * @param[in] base GEMAC base address
6207 + */
6208 +void gemac_enable_stacked_vlan(void *base)
6209 +{
6210 + /* MTIP doesn't support stacked vlan */
6211 +}
6212 +
6213 +/* GEMAC enable pause rx function.
6214 + * @param[in] base GEMAC base address
6215 + */
6216 +void gemac_enable_pause_rx(void *base)
6217 +{
6218 + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_FCE,
6219 + base + EMAC_RCNTRL_REG);
6220 +}
6221 +
6222 +/* GEMAC disable pause rx function.
6223 + * @param[in] base GEMAC base address
6224 + */
6225 +void gemac_disable_pause_rx(void *base)
6226 +{
6227 + writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_FCE,
6228 + base + EMAC_RCNTRL_REG);
6229 +}
6230 +
6231 +/* GEMAC enable pause tx function.
6232 + * @param[in] base GEMAC base address
6233 + */
6234 +void gemac_enable_pause_tx(void *base)
6235 +{
6236 + writel(EMAC_RX_SECTION_EMPTY_V, base + EMAC_RX_SECTION_EMPTY);
6237 +}
6238 +
6239 +/* GEMAC disable pause tx function.
6240 + * @param[in] base GEMAC base address
6241 + */
6242 +void gemac_disable_pause_tx(void *base)
6243 +{
6244 + writel(0x0, base + EMAC_RX_SECTION_EMPTY);
6245 +}
6246 +
6247 +/* GEMAC wol configuration
6248 + * @param[in] base GEMAC base address
6249 + * @param[in] wol_conf WoL register configuration
6250 + */
6251 +void gemac_set_wol(void *base, u32 wol_conf)
6252 +{
6253 + u32 val = readl(base + EMAC_ECNTRL_REG);
6254 +
6255 + if (wol_conf)
6256 + val |= (EMAC_ECNTRL_MAGIC_ENA | EMAC_ECNTRL_SLEEP);
6257 + else
6258 + val &= ~(EMAC_ECNTRL_MAGIC_ENA | EMAC_ECNTRL_SLEEP);
6259 + writel(val, base + EMAC_ECNTRL_REG);
6260 +}
6261 +
6262 +/* Sets Gemac bus width to 64bit
6263 + * @param[in] base GEMAC base address
6264 + * @param[in] width gemac bus width to be set possible values are 32/64/128
6265 + */
6266 +void gemac_set_bus_width(void *base, int width)
6267 +{
6268 +}
6269 +
6270 +/* Sets Gemac configuration.
6271 + * @param[in] base GEMAC base address
6272 + * @param[in] cfg GEMAC configuration
6273 + */
6274 +void gemac_set_config(void *base, struct gemac_cfg *cfg)
6275 +{
6276 + /*GEMAC config taken from VLSI */
6277 + writel(0x00000004, base + EMAC_TFWR_STR_FWD);
6278 + writel(0x00000005, base + EMAC_RX_SECTION_FULL);
6279 + writel(0x00003fff, base + EMAC_TRUNC_FL);
6280 + writel(0x00000030, base + EMAC_TX_SECTION_EMPTY);
6281 + writel(0x00000000, base + EMAC_MIB_CTRL_STS_REG);
6282 +
6283 + gemac_set_mode(base, cfg->mode);
6284 +
6285 + gemac_set_speed(base, cfg->speed);
6286 +
6287 + gemac_set_duplex(base, cfg->duplex);
6288 +}
6289 +
6290 +/**************************** GPI ***************************/
6291 +
6292 +/* Initializes a GPI block.
6293 + * @param[in] base GPI base address
6294 + * @param[in] cfg GPI configuration
6295 + */
6296 +void gpi_init(void *base, struct gpi_cfg *cfg)
6297 +{
6298 + gpi_reset(base);
6299 +
6300 + gpi_disable(base);
6301 +
6302 + gpi_set_config(base, cfg);
6303 +}
6304 +
6305 +/* Resets a GPI block.
6306 + * @param[in] base GPI base address
6307 + */
6308 +void gpi_reset(void *base)
6309 +{
6310 + writel(CORE_SW_RESET, base + GPI_CTRL);
6311 +}
6312 +
6313 +/* Enables a GPI block.
6314 + * @param[in] base GPI base address
6315 + */
6316 +void gpi_enable(void *base)
6317 +{
6318 + writel(CORE_ENABLE, base + GPI_CTRL);
6319 +}
6320 +
6321 +/* Disables a GPI block.
6322 + * @param[in] base GPI base address
6323 + */
6324 +void gpi_disable(void *base)
6325 +{
6326 + writel(CORE_DISABLE, base + GPI_CTRL);
6327 +}
6328 +
6329 +/* Sets the configuration of a GPI block.
6330 + * @param[in] base GPI base address
6331 + * @param[in] cfg GPI configuration
6332 + */
6333 +void gpi_set_config(void *base, struct gpi_cfg *cfg)
6334 +{
6335 + writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_ALLOC_CTRL), base
6336 + + GPI_LMEM_ALLOC_ADDR);
6337 + writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_FREE_CTRL), base
6338 + + GPI_LMEM_FREE_ADDR);
6339 + writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_ALLOC_CTRL), base
6340 + + GPI_DDR_ALLOC_ADDR);
6341 + writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL), base
6342 + + GPI_DDR_FREE_ADDR);
6343 + writel(CBUS_VIRT_TO_PFE(CLASS_INQ_PKTPTR), base + GPI_CLASS_ADDR);
6344 + writel(DDR_HDR_SIZE, base + GPI_DDR_DATA_OFFSET);
6345 + writel(LMEM_HDR_SIZE, base + GPI_LMEM_DATA_OFFSET);
6346 + writel(0, base + GPI_LMEM_SEC_BUF_DATA_OFFSET);
6347 + writel(0, base + GPI_DDR_SEC_BUF_DATA_OFFSET);
6348 + writel((DDR_HDR_SIZE << 16) | LMEM_HDR_SIZE, base + GPI_HDR_SIZE);
6349 + writel((DDR_BUF_SIZE << 16) | LMEM_BUF_SIZE, base + GPI_BUF_SIZE);
6350 +
6351 + writel(((cfg->lmem_rtry_cnt << 16) | (GPI_DDR_BUF_EN << 1) |
6352 + GPI_LMEM_BUF_EN), base + GPI_RX_CONFIG);
6353 + writel(cfg->tmlf_txthres, base + GPI_TMLF_TX);
6354 + writel(cfg->aseq_len, base + GPI_DTX_ASEQ);
6355 + writel(1, base + GPI_TOE_CHKSUM_EN);
6356 +
6357 + if (cfg->mtip_pause_reg) {
6358 + writel(cfg->mtip_pause_reg, base + GPI_CSR_MTIP_PAUSE_REG);
6359 + writel(EGPI_PAUSE_TIME, base + GPI_TX_PAUSE_TIME);
6360 + }
6361 +}
6362 +
6363 +/**************************** CLASSIFIER ***************************/
6364 +
6365 +/* Initializes CLASSIFIER block.
6366 + * @param[in] cfg CLASSIFIER configuration
6367 + */
6368 +void class_init(struct class_cfg *cfg)
6369 +{
6370 + class_reset();
6371 +
6372 + class_disable();
6373 +
6374 + class_set_config(cfg);
6375 +}
6376 +
6377 +/* Resets CLASSIFIER block.
6378 + *
6379 + */
6380 +void class_reset(void)
6381 +{
6382 + writel(CORE_SW_RESET, CLASS_TX_CTRL);
6383 +}
6384 +
6385 +/* Enables all CLASS-PE's cores.
6386 + *
6387 + */
6388 +void class_enable(void)
6389 +{
6390 + writel(CORE_ENABLE, CLASS_TX_CTRL);
6391 +}
6392 +
6393 +/* Disables all CLASS-PE's cores.
6394 + *
6395 + */
6396 +void class_disable(void)
6397 +{
6398 + writel(CORE_DISABLE, CLASS_TX_CTRL);
6399 +}
6400 +
6401 +/*
6402 + * Sets the configuration of the CLASSIFIER block.
6403 + * @param[in] cfg CLASSIFIER configuration
6404 + */
6405 +void class_set_config(struct class_cfg *cfg)
6406 +{
6407 + u32 val;
6408 +
6409 + /* Initialize route table */
6410 + if (!cfg->resume)
6411 + memset(DDR_PHYS_TO_VIRT(cfg->route_table_baseaddr), 0, (1 <<
6412 + cfg->route_table_hash_bits) * CLASS_ROUTE_SIZE);
6413 +
6414 +#if !defined(LS1012A_PFE_RESET_WA)
6415 + writel(cfg->pe_sys_clk_ratio, CLASS_PE_SYS_CLK_RATIO);
6416 +#endif
6417 +
6418 + writel((DDR_HDR_SIZE << 16) | LMEM_HDR_SIZE, CLASS_HDR_SIZE);
6419 + writel(LMEM_BUF_SIZE, CLASS_LMEM_BUF_SIZE);
6420 + writel(CLASS_ROUTE_ENTRY_SIZE(CLASS_ROUTE_SIZE) |
6421 + CLASS_ROUTE_HASH_SIZE(cfg->route_table_hash_bits),
6422 + CLASS_ROUTE_HASH_ENTRY_SIZE);
6423 + writel(HIF_PKT_CLASS_EN | HIF_PKT_OFFSET(sizeof(struct hif_hdr)),
6424 + CLASS_HIF_PARSE);
6425 +
6426 + val = HASH_CRC_PORT_IP | QB2BUS_LE;
6427 +
6428 +#if defined(CONFIG_IP_ALIGNED)
6429 + val |= IP_ALIGNED;
6430 +#endif
6431 +
6432 + /*
6433 + * Class PE packet steering will only work if TOE mode, bridge fetch or
6434 + * route fetch are enabled (see class/qb_fet.v). Route fetch would
6435 + * trigger additional memory copies (likely from DDR because of hash
6436 + * table size, which cannot be reduced because PE software still
6437 + * relies on hash value computed in HW), so when not in TOE mode we
6438 + * simply enable HW bridge fetch even though we don't use it.
6439 + */
6440 + if (cfg->toe_mode)
6441 + val |= CLASS_TOE;
6442 + else
6443 + val |= HW_BRIDGE_FETCH;
6444 +
6445 + writel(val, CLASS_ROUTE_MULTI);
6446 +
6447 + writel(DDR_PHYS_TO_PFE(cfg->route_table_baseaddr),
6448 + CLASS_ROUTE_TABLE_BASE);
6449 + writel(CLASS_PE0_RO_DM_ADDR0_VAL, CLASS_PE0_RO_DM_ADDR0);
6450 + writel(CLASS_PE0_RO_DM_ADDR1_VAL, CLASS_PE0_RO_DM_ADDR1);
6451 + writel(CLASS_PE0_QB_DM_ADDR0_VAL, CLASS_PE0_QB_DM_ADDR0);
6452 + writel(CLASS_PE0_QB_DM_ADDR1_VAL, CLASS_PE0_QB_DM_ADDR1);
6453 + writel(CBUS_VIRT_TO_PFE(TMU_PHY_INQ_PKTPTR), CLASS_TM_INQ_ADDR);
6454 +
6455 + writel(23, CLASS_AFULL_THRES);
6456 + writel(23, CLASS_TSQ_FIFO_THRES);
6457 +
6458 + writel(24, CLASS_MAX_BUF_CNT);
6459 + writel(24, CLASS_TSQ_MAX_CNT);
6460 +}
6461 +
6462 +/**************************** TMU ***************************/
6463 +
6464 +void tmu_reset(void)
6465 +{
6466 + writel(SW_RESET, TMU_CTRL);
6467 +}
6468 +
6469 +/* Initializes TMU block.
6470 + * @param[in] cfg TMU configuration
6471 + */
6472 +void tmu_init(struct tmu_cfg *cfg)
6473 +{
6474 + int q, phyno;
6475 +
6476 + tmu_disable(0xF);
6477 + mdelay(10);
6478 +
6479 +#if !defined(LS1012A_PFE_RESET_WA)
6480 + /* keep in soft reset */
6481 + writel(SW_RESET, TMU_CTRL);
6482 +#endif
6483 + writel(0x3, TMU_SYS_GENERIC_CONTROL);
6484 + writel(750, TMU_INQ_WATERMARK);
6485 + writel(CBUS_VIRT_TO_PFE(EGPI1_BASE_ADDR +
6486 + GPI_INQ_PKTPTR), TMU_PHY0_INQ_ADDR);
6487 + writel(CBUS_VIRT_TO_PFE(EGPI2_BASE_ADDR +
6488 + GPI_INQ_PKTPTR), TMU_PHY1_INQ_ADDR);
6489 + writel(CBUS_VIRT_TO_PFE(HGPI_BASE_ADDR +
6490 + GPI_INQ_PKTPTR), TMU_PHY3_INQ_ADDR);
6491 + writel(CBUS_VIRT_TO_PFE(HIF_NOCPY_RX_INQ0_PKTPTR), TMU_PHY4_INQ_ADDR);
6492 + writel(CBUS_VIRT_TO_PFE(UTIL_INQ_PKTPTR), TMU_PHY5_INQ_ADDR);
6493 + writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL),
6494 + TMU_BMU_INQ_ADDR);
6495 +
6496 + writel(0x3FF, TMU_TDQ0_SCH_CTRL); /*
6497 + * enabling all 10
6498 + * schedulers [9:0] of each TDQ
6499 + */
6500 + writel(0x3FF, TMU_TDQ1_SCH_CTRL);
6501 + writel(0x3FF, TMU_TDQ3_SCH_CTRL);
6502 +
6503 +#if !defined(LS1012A_PFE_RESET_WA)
6504 + writel(cfg->pe_sys_clk_ratio, TMU_PE_SYS_CLK_RATIO);
6505 +#endif
6506 +
6507 +#if !defined(LS1012A_PFE_RESET_WA)
6508 + writel(DDR_PHYS_TO_PFE(cfg->llm_base_addr), TMU_LLM_BASE_ADDR);
6509 + /* Extra packet pointers will be stored from this address onwards */
6510 +
6511 + writel(cfg->llm_queue_len, TMU_LLM_QUE_LEN);
6512 + writel(5, TMU_TDQ_IIFG_CFG);
6513 + writel(DDR_BUF_SIZE, TMU_BMU_BUF_SIZE);
6514 +
6515 + writel(0x0, TMU_CTRL);
6516 +
6517 + /* MEM init */
6518 + pr_info("%s: mem init\n", __func__);
6519 + writel(MEM_INIT, TMU_CTRL);
6520 +
6521 + while (!(readl(TMU_CTRL) & MEM_INIT_DONE))
6522 + ;
6523 +
6524 + /* LLM init */
6525 + pr_info("%s: lmem init\n", __func__);
6526 + writel(LLM_INIT, TMU_CTRL);
6527 +
6528 + while (!(readl(TMU_CTRL) & LLM_INIT_DONE))
6529 + ;
6530 +#endif
6531 + /* set up each queue for tail drop */
6532 + for (phyno = 0; phyno < 4; phyno++) {
6533 + if (phyno == 2)
6534 + continue;
6535 + for (q = 0; q < 16; q++) {
6536 + u32 qdepth;
6537 +
6538 + writel((phyno << 8) | q, TMU_TEQ_CTRL);
6539 + writel(1 << 22, TMU_TEQ_QCFG); /*Enable tail drop */
6540 +
6541 + if (phyno == 3)
6542 + qdepth = DEFAULT_TMU3_QDEPTH;
6543 + else
6544 + qdepth = (q == 0) ? DEFAULT_Q0_QDEPTH :
6545 + DEFAULT_MAX_QDEPTH;
6546 +
6547 + /* LOG: 68855 */
6548 + /*
6549 + * The following is a workaround for the reordered
6550 + * packet and BMU2 buffer leakage issue.
6551 + */
6552 + if (CHIP_REVISION() == 0)
6553 + qdepth = 31;
6554 +
6555 + writel(qdepth << 18, TMU_TEQ_HW_PROB_CFG2);
6556 + writel(qdepth >> 14, TMU_TEQ_HW_PROB_CFG3);
6557 + }
6558 + }
6559 +
6560 +#ifdef CFG_LRO
6561 + /* Set TMU-3 queue 5 (LRO) in no-drop mode */
6562 + writel((3 << 8) | TMU_QUEUE_LRO, TMU_TEQ_CTRL);
6563 + writel(0, TMU_TEQ_QCFG);
6564 +#endif
6565 +
6566 + writel(0x05, TMU_TEQ_DISABLE_DROPCHK);
6567 +
6568 + writel(0x0, TMU_CTRL);
6569 +}
6570 +
6571 +/* Enables TMU-PE cores.
6572 + * @param[in] pe_mask TMU PE mask
6573 + */
6574 +void tmu_enable(u32 pe_mask)
6575 +{
6576 + writel(readl(TMU_TX_CTRL) | (pe_mask & 0xF), TMU_TX_CTRL);
6577 +}
6578 +
6579 +/* Disables TMU cores.
6580 + * @param[in] pe_mask TMU PE mask
6581 + */
6582 +void tmu_disable(u32 pe_mask)
6583 +{
6584 + writel(readl(TMU_TX_CTRL) & ~(pe_mask & 0xF), TMU_TX_CTRL);
6585 +}
6586 +
6587 +/* This will return the tmu queue status
6588 + * @param[in] if_id gem interface id or TMU index
6589 + * @return returns the bit mask of busy queues, zero means all
6590 + * queues are empty
6591 + */
6592 +u32 tmu_qstatus(u32 if_id)
6593 +{
6594 + return cpu_to_be32(pe_dmem_read(TMU0_ID + if_id, TMU_DM_PESTATUS +
6595 + offsetof(struct pe_status, tmu_qstatus), 4));
6596 +}
6597 +
6598 +u32 tmu_pkts_processed(u32 if_id)
6599 +{
6600 + return cpu_to_be32(pe_dmem_read(TMU0_ID + if_id, TMU_DM_PESTATUS +
6601 + offsetof(struct pe_status, rx), 4));
6602 +}
6603 +
6604 +/**************************** UTIL ***************************/
6605 +
6606 +/* Resets UTIL block.
6607 + */
6608 +void util_reset(void)
6609 +{
6610 + writel(CORE_SW_RESET, UTIL_TX_CTRL);
6611 +}
6612 +
6613 +/* Initializes UTIL block.
6614 + * @param[in] cfg UTIL configuration
6615 + */
6616 +void util_init(struct util_cfg *cfg)
6617 +{
6618 + writel(cfg->pe_sys_clk_ratio, UTIL_PE_SYS_CLK_RATIO);
6619 +}
6620 +
6621 +/* Enables UTIL-PE core.
6622 + *
6623 + */
6624 +void util_enable(void)
6625 +{
6626 + writel(CORE_ENABLE, UTIL_TX_CTRL);
6627 +}
6628 +
6629 +/* Disables UTIL-PE core.
6630 + *
6631 + */
6632 +void util_disable(void)
6633 +{
6634 + writel(CORE_DISABLE, UTIL_TX_CTRL);
6635 +}
6636 +
6637 +/**************************** HIF ***************************/
6638 +/* Initializes HIF copy block.
6639 + *
6640 + */
6641 +void hif_init(void)
6642 +{
6643 + /*Initialize HIF registers*/
6644 + writel((HIF_RX_POLL_CTRL_CYCLE << 16) | HIF_TX_POLL_CTRL_CYCLE,
6645 + HIF_POLL_CTRL);
6646 +}
6647 +
6648 +/* Enable hif tx DMA and interrupt
6649 + *
6650 + */
6651 +void hif_tx_enable(void)
6652 +{
6653 + writel(HIF_CTRL_DMA_EN, HIF_TX_CTRL);
6654 + writel((readl(HIF_INT_ENABLE) | HIF_INT_EN | HIF_TXPKT_INT_EN),
6655 + HIF_INT_ENABLE);
6656 +}
6657 +
6658 +/* Disable hif tx DMA and interrupt
6659 + *
6660 + */
6661 +void hif_tx_disable(void)
6662 +{
6663 + u32 hif_int;
6664 +
6665 + writel(0, HIF_TX_CTRL);
6666 +
6667 + hif_int = readl(HIF_INT_ENABLE);
6668 + hif_int &= HIF_TXPKT_INT_EN;
6669 + writel(hif_int, HIF_INT_ENABLE);
6670 +}
6671 +
6672 +/* Enable hif rx DMA and interrupt
6673 + *
6674 + */
6675 +void hif_rx_enable(void)
6676 +{
6677 + hif_rx_dma_start();
6678 + writel((readl(HIF_INT_ENABLE) | HIF_INT_EN | HIF_RXPKT_INT_EN),
6679 + HIF_INT_ENABLE);
6680 +}
6681 +
6682 +/* Disable hif rx DMA and interrupt
6683 + *
6684 + */
6685 +void hif_rx_disable(void)
6686 +{
6687 + u32 hif_int;
6688 +
6689 + writel(0, HIF_RX_CTRL);
6690 +
6691 + hif_int = readl(HIF_INT_ENABLE);
6692 + hif_int &= HIF_RXPKT_INT_EN;
6693 + writel(hif_int, HIF_INT_ENABLE);
6694 +}
6695 --- /dev/null
6696 +++ b/drivers/staging/fsl_ppfe/pfe_hif.c
6697 @@ -0,0 +1,1072 @@
6698 +/*
6699 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
6700 + * Copyright 2017 NXP
6701 + *
6702 + * This program is free software; you can redistribute it and/or modify
6703 + * it under the terms of the GNU General Public License as published by
6704 + * the Free Software Foundation; either version 2 of the License, or
6705 + * (at your option) any later version.
6706 + *
6707 + * This program is distributed in the hope that it will be useful,
6708 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
6709 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
6710 + * GNU General Public License for more details.
6711 + *
6712 + * You should have received a copy of the GNU General Public License
6713 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
6714 + */
6715 +
6716 +#include <linux/kernel.h>
6717 +#include <linux/interrupt.h>
6718 +#include <linux/dma-mapping.h>
6719 +#include <linux/dmapool.h>
6720 +#include <linux/sched.h>
6721 +#include <linux/module.h>
6722 +#include <linux/list.h>
6723 +#include <linux/kthread.h>
6724 +#include <linux/slab.h>
6725 +
6726 +#include <linux/io.h>
6727 +#include <asm/irq.h>
6728 +
6729 +#include "pfe_mod.h"
6730 +
6731 +#define HIF_INT_MASK (HIF_INT | HIF_RXPKT_INT | HIF_TXPKT_INT)
6732 +
6733 +unsigned char napi_first_batch;
6734 +
6735 +static void pfe_tx_do_cleanup(unsigned long data);
6736 +
6737 +static int pfe_hif_alloc_descr(struct pfe_hif *hif)
6738 +{
6739 + void *addr;
6740 + dma_addr_t dma_addr;
6741 + int err = 0;
6742 +
6743 + pr_info("%s\n", __func__);
6744 + addr = dma_alloc_coherent(pfe->dev,
6745 + HIF_RX_DESC_NT * sizeof(struct hif_desc) +
6746 + HIF_TX_DESC_NT * sizeof(struct hif_desc),
6747 + &dma_addr, GFP_KERNEL);
6748 +
6749 + if (!addr) {
6750 + pr_err("%s: Could not allocate buffer descriptors!\n"
6751 + , __func__);
6752 + err = -ENOMEM;
6753 + goto err0;
6754 + }
6755 +
6756 + hif->descr_baseaddr_p = dma_addr;
6757 + hif->descr_baseaddr_v = addr;
6758 + hif->rx_ring_size = HIF_RX_DESC_NT;
6759 + hif->tx_ring_size = HIF_TX_DESC_NT;
6760 +
6761 + return 0;
6762 +
6763 +err0:
6764 + return err;
6765 +}
6766 +
6767 +#if defined(LS1012A_PFE_RESET_WA)
6768 +static void pfe_hif_disable_rx_desc(struct pfe_hif *hif)
6769 +{
6770 + int ii;
6771 + struct hif_desc *desc = hif->rx_base;
6772 +
6773 + /*Mark all descriptors as LAST_BD */
6774 + for (ii = 0; ii < hif->rx_ring_size; ii++) {
6775 + desc->ctrl |= BD_CTRL_LAST_BD;
6776 + desc++;
6777 + }
6778 +}
6779 +
6780 +struct class_rx_hdr_t {
6781 + u32 next_ptr; /* ptr to the start of the first DDR buffer */
6782 + u16 length; /* total packet length */
6783 + u16 phyno; /* input physical port number */
6784 + u32 status; /* gemac status bits */
6785 + u32 status2; /* reserved for software usage */
6786 +};
6787 +
6788 +/* STATUS_BAD_FRAME_ERR is set for all errors (including checksums if enabled)
6789 + * except overflow
6790 + */
6791 +#define STATUS_BAD_FRAME_ERR BIT(16)
6792 +#define STATUS_LENGTH_ERR BIT(17)
6793 +#define STATUS_CRC_ERR BIT(18)
6794 +#define STATUS_TOO_SHORT_ERR BIT(19)
6795 +#define STATUS_TOO_LONG_ERR BIT(20)
6796 +#define STATUS_CODE_ERR BIT(21)
6797 +#define STATUS_MC_HASH_MATCH BIT(22)
6798 +#define STATUS_CUMULATIVE_ARC_HIT BIT(23)
6799 +#define STATUS_UNICAST_HASH_MATCH BIT(24)
6800 +#define STATUS_IP_CHECKSUM_CORRECT BIT(25)
6801 +#define STATUS_TCP_CHECKSUM_CORRECT BIT(26)
6802 +#define STATUS_UDP_CHECKSUM_CORRECT BIT(27)
6803 +#define STATUS_OVERFLOW_ERR BIT(28) /* GPI error */
6804 +#define MIN_PKT_SIZE 64
6805 +
6806 +static inline void copy_to_lmem(u32 *dst, u32 *src, int len)
6807 +{
6808 + int i;
6809 +
6810 + for (i = 0; i < len; i += sizeof(u32)) {
6811 + *dst = htonl(*src);
6812 + dst++; src++;
6813 + }
6814 +}
6815 +
6816 +static void send_dummy_pkt_to_hif(void)
6817 +{
6818 + void *lmem_ptr, *ddr_ptr, *lmem_virt_addr;
6819 + u32 physaddr;
6820 + struct class_rx_hdr_t local_hdr;
6821 + static u32 dummy_pkt[] = {
6822 + 0x33221100, 0x2b785544, 0xd73093cb, 0x01000608,
6823 + 0x04060008, 0x2b780200, 0xd73093cb, 0x0a01a8c0,
6824 + 0x33221100, 0xa8c05544, 0x00000301, 0x00000000,
6825 + 0x00000000, 0x00000000, 0x00000000, 0xbe86c51f };
6826 +
6827 + ddr_ptr = (void *)((u64)readl(BMU2_BASE_ADDR + BMU_ALLOC_CTRL));
6828 + if (!ddr_ptr)
6829 + return;
6830 +
6831 + lmem_ptr = (void *)((u64)readl(BMU1_BASE_ADDR + BMU_ALLOC_CTRL));
6832 + if (!lmem_ptr)
6833 + return;
6834 +
6835 + pr_info("Sending a dummy pkt to HIF %p %p\n", ddr_ptr, lmem_ptr);
6836 + physaddr = (u32)DDR_VIRT_TO_PFE(ddr_ptr);
6837 +
6838 + lmem_virt_addr = (void *)CBUS_PFE_TO_VIRT((unsigned long int)lmem_ptr);
6839 +
6840 + local_hdr.phyno = htons(0); /* RX_PHY_0 */
6841 + local_hdr.length = htons(MIN_PKT_SIZE);
6842 +
6843 + local_hdr.next_ptr = htonl((u32)physaddr);
6844 + /*Mark checksum is correct */
6845 + local_hdr.status = htonl((STATUS_IP_CHECKSUM_CORRECT |
6846 + STATUS_UDP_CHECKSUM_CORRECT |
6847 + STATUS_TCP_CHECKSUM_CORRECT |
6848 + STATUS_UNICAST_HASH_MATCH |
6849 + STATUS_CUMULATIVE_ARC_HIT));
6850 + copy_to_lmem((u32 *)lmem_virt_addr, (u32 *)&local_hdr,
6851 + sizeof(local_hdr));
6852 +
6853 + copy_to_lmem((u32 *)(lmem_virt_addr + LMEM_HDR_SIZE), (u32 *)dummy_pkt,
6854 + 0x40);
6855 +
6856 + writel((unsigned long int)lmem_ptr, CLASS_INQ_PKTPTR);
6857 +}
6858 +
6859 +void pfe_hif_rx_idle(struct pfe_hif *hif)
6860 +{
6861 + int hif_stop_loop = 10;
6862 + u32 rx_status;
6863 +
6864 + pfe_hif_disable_rx_desc(hif);
6865 + pr_info("Bringing hif to idle state...");
6866 + writel(0, HIF_INT_ENABLE);
6867 + /*If HIF Rx BDP is busy send a dummy packet */
6868 + do {
6869 + rx_status = readl(HIF_RX_STATUS);
6870 + if (rx_status & BDP_CSR_RX_DMA_ACTV)
6871 + send_dummy_pkt_to_hif();
6872 +
6873 + usleep_range(100, 150);
6874 + } while (--hif_stop_loop);
6875 +
6876 + if (readl(HIF_RX_STATUS) & BDP_CSR_RX_DMA_ACTV)
6877 + pr_info("Failed\n");
6878 + else
6879 + pr_info("Done\n");
6880 +}
6881 +#endif
6882 +
6883 +static void pfe_hif_free_descr(struct pfe_hif *hif)
6884 +{
6885 + pr_info("%s\n", __func__);
6886 +
6887 + dma_free_coherent(pfe->dev,
6888 + hif->rx_ring_size * sizeof(struct hif_desc) +
6889 + hif->tx_ring_size * sizeof(struct hif_desc),
6890 + hif->descr_baseaddr_v, hif->descr_baseaddr_p);
6891 +}
6892 +
6893 +void pfe_hif_desc_dump(struct pfe_hif *hif)
6894 +{
6895 + struct hif_desc *desc;
6896 + unsigned long desc_p;
6897 + int ii = 0;
6898 +
6899 + pr_info("%s\n", __func__);
6900 +
6901 + desc = hif->rx_base;
6902 + desc_p = (u32)((u64)desc - (u64)hif->descr_baseaddr_v +
6903 + hif->descr_baseaddr_p);
6904 +
6905 + pr_info("HIF Rx desc base %p physical %x\n", desc, (u32)desc_p);
6906 + for (ii = 0; ii < hif->rx_ring_size; ii++) {
6907 + pr_info("status: %08x, ctrl: %08x, data: %08x, next: %x\n",
6908 + readl(&desc->status), readl(&desc->ctrl),
6909 + readl(&desc->data), readl(&desc->next));
6910 + desc++;
6911 + }
6912 +
6913 + desc = hif->tx_base;
6914 + desc_p = ((u64)desc - (u64)hif->descr_baseaddr_v +
6915 + hif->descr_baseaddr_p);
6916 +
6917 + pr_info("HIF Tx desc base %p physical %x\n", desc, (u32)desc_p);
6918 + for (ii = 0; ii < hif->tx_ring_size; ii++) {
6919 + pr_info("status: %08x, ctrl: %08x, data: %08x, next: %x\n",
6920 + readl(&desc->status), readl(&desc->ctrl),
6921 + readl(&desc->data), readl(&desc->next));
6922 + desc++;
6923 + }
6924 +}
6925 +
6926 +/* pfe_hif_release_buffers */
6927 +static void pfe_hif_release_buffers(struct pfe_hif *hif)
6928 +{
6929 + struct hif_desc *desc;
6930 + int i = 0;
6931 +
6932 + hif->rx_base = hif->descr_baseaddr_v;
6933 +
6934 + pr_info("%s\n", __func__);
6935 +
6936 + /*Free Rx buffers */
6937 + desc = hif->rx_base;
6938 + for (i = 0; i < hif->rx_ring_size; i++) {
6939 + if (readl(&desc->data)) {
6940 + if ((i < hif->shm->rx_buf_pool_cnt) &&
6941 + (!hif->shm->rx_buf_pool[i])) {
6942 + /*
6943 + * dma_unmap_single(hif->dev, desc->data,
6944 + * hif->rx_buf_len[i], DMA_FROM_DEVICE);
6945 + */
6946 + dma_unmap_single(hif->dev,
6947 + DDR_PFE_TO_PHYS(
6948 + readl(&desc->data)),
6949 + hif->rx_buf_len[i],
6950 + DMA_FROM_DEVICE);
6951 + hif->shm->rx_buf_pool[i] = hif->rx_buf_addr[i];
6952 + } else {
6953 + pr_err("%s: buffer pool already full\n"
6954 + , __func__);
6955 + }
6956 + }
6957 +
6958 + writel(0, &desc->data);
6959 + writel(0, &desc->status);
6960 + writel(0, &desc->ctrl);
6961 + desc++;
6962 + }
6963 +}
6964 +
6965 +/*
6966 + * pfe_hif_init_buffers
6967 + * This function initializes the HIF Rx/Tx ring descriptors and
6968 + * initialize Rx queue with buffers.
6969 + */
6970 +static int pfe_hif_init_buffers(struct pfe_hif *hif)
6971 +{
6972 + struct hif_desc *desc, *first_desc_p;
6973 + u32 data;
6974 + int i = 0;
6975 +
6976 + pr_info("%s\n", __func__);
6977 +
6978 + /* Check enough Rx buffers available in the shared memory */
6979 + if (hif->shm->rx_buf_pool_cnt < hif->rx_ring_size)
6980 + return -ENOMEM;
6981 +
6982 + hif->rx_base = hif->descr_baseaddr_v;
6983 + memset(hif->rx_base, 0, hif->rx_ring_size * sizeof(struct hif_desc));
6984 +
6985 + /*Initialize Rx descriptors */
6986 + desc = hif->rx_base;
6987 + first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p;
6988 +
6989 + for (i = 0; i < hif->rx_ring_size; i++) {
6990 + /* Initialize Rx buffers from the shared memory */
6991 +
6992 + data = (u32)dma_map_single(hif->dev, hif->shm->rx_buf_pool[i],
6993 + pfe_pkt_size, DMA_FROM_DEVICE);
6994 + hif->rx_buf_addr[i] = hif->shm->rx_buf_pool[i];
6995 + hif->rx_buf_len[i] = pfe_pkt_size;
6996 + hif->shm->rx_buf_pool[i] = NULL;
6997 +
6998 + if (likely(dma_mapping_error(hif->dev, data) == 0)) {
6999 + writel(DDR_PHYS_TO_PFE(data), &desc->data);
7000 + } else {
7001 + pr_err("%s : low on mem\n", __func__);
7002 +
7003 + goto err;
7004 + }
7005 +
7006 + writel(0, &desc->status);
7007 +
7008 + /*
7009 + * Ensure everything else is written to DDR before
7010 + * writing bd->ctrl
7011 + */
7012 + wmb();
7013 +
7014 + writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM
7015 + | BD_CTRL_DIR | BD_CTRL_DESC_EN
7016 + | BD_BUF_LEN(pfe_pkt_size)), &desc->ctrl);
7017 +
7018 + /* Chain descriptors */
7019 + writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
7020 + desc++;
7021 + }
7022 +
7023 + /* Overwrite last descriptor to chain it to first one*/
7024 + desc--;
7025 + writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
7026 +
7027 + hif->rxtoclean_index = 0;
7028 +
7029 + /*Initialize Rx buffer descriptor ring base address */
7030 + writel(DDR_PHYS_TO_PFE(hif->descr_baseaddr_p), HIF_RX_BDP_ADDR);
7031 +
7032 + hif->tx_base = hif->rx_base + hif->rx_ring_size;
7033 + first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p +
7034 + hif->rx_ring_size;
7035 + memset(hif->tx_base, 0, hif->tx_ring_size * sizeof(struct hif_desc));
7036 +
7037 + /*Initialize tx descriptors */
7038 + desc = hif->tx_base;
7039 +
7040 + for (i = 0; i < hif->tx_ring_size; i++) {
7041 + /* Chain descriptors */
7042 + writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
7043 + writel(0, &desc->ctrl);
7044 + desc++;
7045 + }
7046 +
7047 + /* Overwrite last descriptor to chain it to first one */
7048 + desc--;
7049 + writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
7050 + hif->txavail = hif->tx_ring_size;
7051 + hif->txtosend = 0;
7052 + hif->txtoclean = 0;
7053 + hif->txtoflush = 0;
7054 +
7055 + /*Initialize Tx buffer descriptor ring base address */
7056 + writel((u32)DDR_PHYS_TO_PFE(first_desc_p), HIF_TX_BDP_ADDR);
7057 +
7058 + return 0;
7059 +
7060 +err:
7061 + pfe_hif_release_buffers(hif);
7062 + return -ENOMEM;
7063 +}
7064 +
7065 +/*
7066 + * pfe_hif_client_register
7067 + *
7068 + * This function used to register a client driver with the HIF driver.
7069 + *
7070 + * Return value:
7071 + * 0 - on Successful registration
7072 + */
7073 +static int pfe_hif_client_register(struct pfe_hif *hif, u32 client_id,
7074 + struct hif_client_shm *client_shm)
7075 +{
7076 + struct hif_client *client = &hif->client[client_id];
7077 + u32 i, cnt;
7078 + struct rx_queue_desc *rx_qbase;
7079 + struct tx_queue_desc *tx_qbase;
7080 + struct hif_rx_queue *rx_queue;
7081 + struct hif_tx_queue *tx_queue;
7082 + int err = 0;
7083 +
7084 + pr_info("%s\n", __func__);
7085 +
7086 + spin_lock_bh(&hif->tx_lock);
7087 +
7088 + if (test_bit(client_id, &hif->shm->g_client_status[0])) {
7089 + pr_err("%s: client %d already registered\n",
7090 + __func__, client_id);
7091 + err = -1;
7092 + goto unlock;
7093 + }
7094 +
7095 + memset(client, 0, sizeof(struct hif_client));
7096 +
7097 + /* Initialize client Rx queues baseaddr, size */
7098 +
7099 + cnt = CLIENT_CTRL_RX_Q_CNT(client_shm->ctrl);
7100 + /* Check if client is requesting for more queues than supported */
7101 + if (cnt > HIF_CLIENT_QUEUES_MAX)
7102 + cnt = HIF_CLIENT_QUEUES_MAX;
7103 +
7104 + client->rx_qn = cnt;
7105 + rx_qbase = (struct rx_queue_desc *)client_shm->rx_qbase;
7106 + for (i = 0; i < cnt; i++) {
7107 + rx_queue = &client->rx_q[i];
7108 + rx_queue->base = rx_qbase + i * client_shm->rx_qsize;
7109 + rx_queue->size = client_shm->rx_qsize;
7110 + rx_queue->write_idx = 0;
7111 + }
7112 +
7113 + /* Initialize client Tx queues baseaddr, size */
7114 + cnt = CLIENT_CTRL_TX_Q_CNT(client_shm->ctrl);
7115 +
7116 + /* Check if client is requesting for more queues than supported */
7117 + if (cnt > HIF_CLIENT_QUEUES_MAX)
7118 + cnt = HIF_CLIENT_QUEUES_MAX;
7119 +
7120 + client->tx_qn = cnt;
7121 + tx_qbase = (struct tx_queue_desc *)client_shm->tx_qbase;
7122 + for (i = 0; i < cnt; i++) {
7123 + tx_queue = &client->tx_q[i];
7124 + tx_queue->base = tx_qbase + i * client_shm->tx_qsize;
7125 + tx_queue->size = client_shm->tx_qsize;
7126 + tx_queue->ack_idx = 0;
7127 + }
7128 +
7129 + set_bit(client_id, &hif->shm->g_client_status[0]);
7130 +
7131 +unlock:
7132 + spin_unlock_bh(&hif->tx_lock);
7133 +
7134 + return err;
7135 +}
7136 +
7137 +/*
7138 + * pfe_hif_client_unregister
7139 + *
7140 + * This function used to unregister a client from the HIF driver.
7141 + *
7142 + */
7143 +static void pfe_hif_client_unregister(struct pfe_hif *hif, u32 client_id)
7144 +{
7145 + pr_info("%s\n", __func__);
7146 +
7147 + /*
7148 + * Mark client as no longer available (which prevents further packet
7149 + * receive for this client)
7150 + */
7151 + spin_lock_bh(&hif->tx_lock);
7152 +
7153 + if (!test_bit(client_id, &hif->shm->g_client_status[0])) {
7154 + pr_err("%s: client %d not registered\n", __func__,
7155 + client_id);
7156 +
7157 + spin_unlock_bh(&hif->tx_lock);
7158 + return;
7159 + }
7160 +
7161 + clear_bit(client_id, &hif->shm->g_client_status[0]);
7162 +
7163 + spin_unlock_bh(&hif->tx_lock);
7164 +}
7165 +
7166 +/*
7167 + * client_put_rxpacket-
7168 + * This functions puts the Rx pkt in the given client Rx queue.
7169 + * It actually swap the Rx pkt in the client Rx descriptor buffer
7170 + * and returns the free buffer from it.
7171 + *
7172 + * If the function returns NULL means client Rx queue is full and
7173 + * packet couldn't send to client queue.
7174 + */
7175 +static void *client_put_rxpacket(struct hif_rx_queue *queue, void *pkt, u32 len,
7176 + u32 flags, u32 client_ctrl, u32 *rem_len)
7177 +{
7178 + void *free_pkt = NULL;
7179 + struct rx_queue_desc *desc = queue->base + queue->write_idx;
7180 +
7181 + if (readl(&desc->ctrl) & CL_DESC_OWN) {
7182 + if (page_mode) {
7183 + int rem_page_size = PAGE_SIZE -
7184 + PRESENT_OFST_IN_PAGE(pkt);
7185 + int cur_pkt_size = ROUND_MIN_RX_SIZE(len +
7186 + pfe_pkt_headroom);
7187 + *rem_len = (rem_page_size - cur_pkt_size);
7188 + if (*rem_len) {
7189 + free_pkt = pkt + cur_pkt_size;
7190 + get_page(virt_to_page(free_pkt));
7191 + } else {
7192 + free_pkt = (void
7193 + *)__get_free_page(GFP_ATOMIC | GFP_DMA_PFE);
7194 + *rem_len = pfe_pkt_size;
7195 + }
7196 + } else {
7197 + free_pkt = kmalloc(PFE_BUF_SIZE, GFP_ATOMIC |
7198 + GFP_DMA_PFE);
7199 + *rem_len = PFE_BUF_SIZE - pfe_pkt_headroom;
7200 + }
7201 +
7202 + if (free_pkt) {
7203 + desc->data = pkt;
7204 + desc->client_ctrl = client_ctrl;
7205 + /*
7206 + * Ensure everything else is written to DDR before
7207 + * writing bd->ctrl
7208 + */
7209 + smp_wmb();
7210 + writel(CL_DESC_BUF_LEN(len) | flags, &desc->ctrl);
7211 + queue->write_idx = (queue->write_idx + 1)
7212 + & (queue->size - 1);
7213 +
7214 + free_pkt += pfe_pkt_headroom;
7215 + }
7216 + }
7217 +
7218 + return free_pkt;
7219 +}
7220 +
7221 +/*
7222 + * pfe_hif_rx_process-
7223 + * This function does pfe hif rx queue processing.
7224 + * Dequeue packet from Rx queue and send it to corresponding client queue
7225 + */
7226 +static int pfe_hif_rx_process(struct pfe_hif *hif, int budget)
7227 +{
7228 + struct hif_desc *desc;
7229 + struct hif_hdr *pkt_hdr;
7230 + struct __hif_hdr hif_hdr;
7231 + void *free_buf;
7232 + int rtc, len, rx_processed = 0;
7233 + struct __hif_desc local_desc;
7234 + int flags;
7235 + unsigned int desc_p;
7236 + unsigned int buf_size = 0;
7237 +
7238 + spin_lock_bh(&hif->lock);
7239 +
7240 + rtc = hif->rxtoclean_index;
7241 +
7242 + while (rx_processed < budget) {
7243 + desc = hif->rx_base + rtc;
7244 +
7245 + __memcpy12(&local_desc, desc);
7246 +
7247 + /* ACK pending Rx interrupt */
7248 + if (local_desc.ctrl & BD_CTRL_DESC_EN) {
7249 + writel(HIF_INT | HIF_RXPKT_INT, HIF_INT_SRC);
7250 +
7251 + if (rx_processed == 0) {
7252 + if (napi_first_batch == 1) {
7253 + desc_p = hif->descr_baseaddr_p +
7254 + ((unsigned long int)(desc) -
7255 + (unsigned long
7256 + int)hif->descr_baseaddr_v);
7257 + napi_first_batch = 0;
7258 + }
7259 + }
7260 +
7261 + __memcpy12(&local_desc, desc);
7262 +
7263 + if (local_desc.ctrl & BD_CTRL_DESC_EN)
7264 + break;
7265 + }
7266 +
7267 + napi_first_batch = 0;
7268 +
7269 +#ifdef HIF_NAPI_STATS
7270 + hif->napi_counters[NAPI_DESC_COUNT]++;
7271 +#endif
7272 + len = BD_BUF_LEN(local_desc.ctrl);
7273 + /*
7274 + * dma_unmap_single(hif->dev, DDR_PFE_TO_PHYS(local_desc.data),
7275 + * hif->rx_buf_len[rtc], DMA_FROM_DEVICE);
7276 + */
7277 + dma_unmap_single(hif->dev, DDR_PFE_TO_PHYS(local_desc.data),
7278 + hif->rx_buf_len[rtc], DMA_FROM_DEVICE);
7279 +
7280 + pkt_hdr = (struct hif_hdr *)hif->rx_buf_addr[rtc];
7281 +
7282 + /* Track last HIF header received */
7283 + if (!hif->started) {
7284 + hif->started = 1;
7285 +
7286 + __memcpy8(&hif_hdr, pkt_hdr);
7287 +
7288 + hif->qno = hif_hdr.hdr.q_num;
7289 + hif->client_id = hif_hdr.hdr.client_id;
7290 + hif->client_ctrl = (hif_hdr.hdr.client_ctrl1 << 16) |
7291 + hif_hdr.hdr.client_ctrl;
7292 + flags = CL_DESC_FIRST;
7293 +
7294 + } else {
7295 + flags = 0;
7296 + }
7297 +
7298 + if (local_desc.ctrl & BD_CTRL_LIFM)
7299 + flags |= CL_DESC_LAST;
7300 +
7301 + /* Check for valid client id and still registered */
7302 + if ((hif->client_id >= HIF_CLIENTS_MAX) ||
7303 + !(test_bit(hif->client_id,
7304 + &hif->shm->g_client_status[0]))) {
7305 + printk_ratelimited("%s: packet with invalid client id %d q_num %d\n",
7306 + __func__,
7307 + hif->client_id,
7308 + hif->qno);
7309 +
7310 + free_buf = pkt_hdr;
7311 +
7312 + goto pkt_drop;
7313 + }
7314 +
7315 + /* Check to valid queue number */
7316 + if (hif->client[hif->client_id].rx_qn <= hif->qno) {
7317 + pr_info("%s: packet with invalid queue: %d\n"
7318 + , __func__, hif->qno);
7319 + hif->qno = 0;
7320 + }
7321 +
7322 + free_buf =
7323 + client_put_rxpacket(&hif->client[hif->client_id].rx_q[hif->qno],
7324 + (void *)pkt_hdr, len, flags,
7325 + hif->client_ctrl, &buf_size);
7326 +
7327 + hif_lib_indicate_client(hif->client_id, EVENT_RX_PKT_IND,
7328 + hif->qno);
7329 +
7330 + if (unlikely(!free_buf)) {
7331 +#ifdef HIF_NAPI_STATS
7332 + hif->napi_counters[NAPI_CLIENT_FULL_COUNT]++;
7333 +#endif
7334 + /*
7335 + * If we want to keep in polling mode to retry later,
7336 + * we need to tell napi that we consumed
7337 + * the full budget or we will hit a livelock scenario.
7338 + * The core code keeps this napi instance
7339 + * at the head of the list and none of the other
7340 + * instances get to run
7341 + */
7342 + rx_processed = budget;
7343 +
7344 + if (flags & CL_DESC_FIRST)
7345 + hif->started = 0;
7346 +
7347 + break;
7348 + }
7349 +
7350 +pkt_drop:
7351 + /*Fill free buffer in the descriptor */
7352 + hif->rx_buf_addr[rtc] = free_buf;
7353 + hif->rx_buf_len[rtc] = min(pfe_pkt_size, buf_size);
7354 + writel((DDR_PHYS_TO_PFE
7355 + ((u32)dma_map_single(hif->dev,
7356 + free_buf, hif->rx_buf_len[rtc], DMA_FROM_DEVICE))),
7357 + &desc->data);
7358 + /*
7359 + * Ensure everything else is written to DDR before
7360 + * writing bd->ctrl
7361 + */
7362 + wmb();
7363 + writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM | BD_CTRL_DIR |
7364 + BD_CTRL_DESC_EN | BD_BUF_LEN(hif->rx_buf_len[rtc])),
7365 + &desc->ctrl);
7366 +
7367 + rtc = (rtc + 1) & (hif->rx_ring_size - 1);
7368 +
7369 + if (local_desc.ctrl & BD_CTRL_LIFM) {
7370 + if (!(hif->client_ctrl & HIF_CTRL_RX_CONTINUED)) {
7371 + rx_processed++;
7372 +
7373 +#ifdef HIF_NAPI_STATS
7374 + hif->napi_counters[NAPI_PACKET_COUNT]++;
7375 +#endif
7376 + }
7377 + hif->started = 0;
7378 + }
7379 + }
7380 +
7381 + hif->rxtoclean_index = rtc;
7382 + spin_unlock_bh(&hif->lock);
7383 +
7384 + /* we made some progress, re-start rx dma in case it stopped */
7385 + hif_rx_dma_start();
7386 +
7387 + return rx_processed;
7388 +}
7389 +
7390 +/*
7391 + * client_ack_txpacket-
7392 + * This function ack the Tx packet in the give client Tx queue by resetting
7393 + * ownership bit in the descriptor.
7394 + */
7395 +static int client_ack_txpacket(struct pfe_hif *hif, unsigned int client_id,
7396 + unsigned int q_no)
7397 +{
7398 + struct hif_tx_queue *queue = &hif->client[client_id].tx_q[q_no];
7399 + struct tx_queue_desc *desc = queue->base + queue->ack_idx;
7400 +
7401 + if (readl(&desc->ctrl) & CL_DESC_OWN) {
7402 + writel((readl(&desc->ctrl) & ~CL_DESC_OWN), &desc->ctrl);
7403 + queue->ack_idx = (queue->ack_idx + 1) & (queue->size - 1);
7404 +
7405 + return 0;
7406 +
7407 + } else {
7408 + /*This should not happen */
7409 + pr_err("%s: %d %d %d %d %d %p %d\n", __func__,
7410 + hif->txtosend, hif->txtoclean, hif->txavail,
7411 + client_id, q_no, queue, queue->ack_idx);
7412 + WARN(1, "%s: doesn't own this descriptor", __func__);
7413 + return 1;
7414 + }
7415 +}
7416 +
7417 +void __hif_tx_done_process(struct pfe_hif *hif, int count)
7418 +{
7419 + struct hif_desc *desc;
7420 + struct hif_desc_sw *desc_sw;
7421 + int ttc, tx_avl;
7422 + int pkts_done[HIF_CLIENTS_MAX] = {0, 0};
7423 +
7424 + ttc = hif->txtoclean;
7425 + tx_avl = hif->txavail;
7426 +
7427 + while ((tx_avl < hif->tx_ring_size) && count--) {
7428 + desc = hif->tx_base + ttc;
7429 +
7430 + if (readl(&desc->ctrl) & BD_CTRL_DESC_EN)
7431 + break;
7432 +
7433 + desc_sw = &hif->tx_sw_queue[ttc];
7434 +
7435 + if (desc_sw->data) {
7436 + /*
7437 + * dmap_unmap_single(hif->dev, desc_sw->data,
7438 + * desc_sw->len, DMA_TO_DEVICE);
7439 + */
7440 + dma_unmap_single(hif->dev, desc_sw->data,
7441 + desc_sw->len, DMA_TO_DEVICE);
7442 + }
7443 +
7444 + if (desc_sw->client_id > HIF_CLIENTS_MAX)
7445 + pr_err("Invalid cl id %d\n", desc_sw->client_id);
7446 +
7447 + pkts_done[desc_sw->client_id]++;
7448 +
7449 + client_ack_txpacket(hif, desc_sw->client_id, desc_sw->q_no);
7450 +
7451 + ttc = (ttc + 1) & (hif->tx_ring_size - 1);
7452 + tx_avl++;
7453 + }
7454 +
7455 + if (pkts_done[0])
7456 + hif_lib_indicate_client(0, EVENT_TXDONE_IND, 0);
7457 + if (pkts_done[1])
7458 + hif_lib_indicate_client(1, EVENT_TXDONE_IND, 0);
7459 +
7460 + hif->txtoclean = ttc;
7461 + hif->txavail = tx_avl;
7462 +
7463 + if (!count) {
7464 + tasklet_schedule(&hif->tx_cleanup_tasklet);
7465 + } else {
7466 + /*Enable Tx done interrupt */
7467 + writel(readl_relaxed(HIF_INT_ENABLE) | HIF_TXPKT_INT,
7468 + HIF_INT_ENABLE);
7469 + }
7470 +}
7471 +
7472 +static void pfe_tx_do_cleanup(unsigned long data)
7473 +{
7474 + struct pfe_hif *hif = (struct pfe_hif *)data;
7475 +
7476 + writel(HIF_INT | HIF_TXPKT_INT, HIF_INT_SRC);
7477 +
7478 + hif_tx_done_process(hif, 64);
7479 +}
7480 +
7481 +/*
7482 + * __hif_xmit_pkt -
7483 + * This function puts one packet in the HIF Tx queue
7484 + */
7485 +void __hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int
7486 + q_no, void *data, u32 len, unsigned int flags)
7487 +{
7488 + struct hif_desc *desc;
7489 + struct hif_desc_sw *desc_sw;
7490 +
7491 + desc = hif->tx_base + hif->txtosend;
7492 + desc_sw = &hif->tx_sw_queue[hif->txtosend];
7493 +
7494 + desc_sw->len = len;
7495 + desc_sw->client_id = client_id;
7496 + desc_sw->q_no = q_no;
7497 + desc_sw->flags = flags;
7498 +
7499 + if (flags & HIF_DONT_DMA_MAP) {
7500 + desc_sw->data = 0;
7501 + writel((u32)DDR_PHYS_TO_PFE(data), &desc->data);
7502 + } else {
7503 + desc_sw->data = dma_map_single(hif->dev, data, len,
7504 + DMA_TO_DEVICE);
7505 + writel((u32)DDR_PHYS_TO_PFE(desc_sw->data), &desc->data);
7506 + }
7507 +
7508 + hif->txtosend = (hif->txtosend + 1) & (hif->tx_ring_size - 1);
7509 + hif->txavail--;
7510 +
7511 + if ((!((flags & HIF_DATA_VALID) && (flags &
7512 + HIF_LAST_BUFFER))))
7513 + goto skip_tx;
7514 +
7515 + /*
7516 + * Ensure everything else is written to DDR before
7517 + * writing bd->ctrl
7518 + */
7519 + wmb();
7520 +
7521 + do {
7522 + desc_sw = &hif->tx_sw_queue[hif->txtoflush];
7523 + desc = hif->tx_base + hif->txtoflush;
7524 +
7525 + if (desc_sw->flags & HIF_LAST_BUFFER) {
7526 + writel((BD_CTRL_LIFM |
7527 + BD_CTRL_BRFETCH_DISABLE | BD_CTRL_RTFETCH_DISABLE
7528 + | BD_CTRL_PARSE_DISABLE | BD_CTRL_DESC_EN |
7529 + BD_CTRL_PKT_INT_EN | BD_BUF_LEN(desc_sw->len)),
7530 + &desc->ctrl);
7531 + } else {
7532 + writel((BD_CTRL_DESC_EN |
7533 + BD_BUF_LEN(desc_sw->len)), &desc->ctrl);
7534 + }
7535 + hif->txtoflush = (hif->txtoflush + 1) & (hif->tx_ring_size - 1);
7536 + }
7537 + while (hif->txtoflush != hif->txtosend)
7538 + ;
7539 +
7540 +skip_tx:
7541 + return;
7542 +}
7543 +
7544 +static irqreturn_t wol_isr(int irq, void *dev_id)
7545 +{
7546 + pr_info("WoL\n");
7547 + gemac_set_wol(EMAC1_BASE_ADDR, 0);
7548 + gemac_set_wol(EMAC2_BASE_ADDR, 0);
7549 + return IRQ_HANDLED;
7550 +}
7551 +
7552 +/*
7553 + * hif_isr-
7554 + * This ISR routine processes Rx/Tx done interrupts from the HIF hardware block
7555 + */
7556 +static irqreturn_t hif_isr(int irq, void *dev_id)
7557 +{
7558 + struct pfe_hif *hif = (struct pfe_hif *)dev_id;
7559 + int int_status;
7560 + int int_enable_mask;
7561 +
7562 + /*Read hif interrupt source register */
7563 + int_status = readl_relaxed(HIF_INT_SRC);
7564 + int_enable_mask = readl_relaxed(HIF_INT_ENABLE);
7565 +
7566 + if ((int_status & HIF_INT) == 0)
7567 + return IRQ_NONE;
7568 +
7569 + int_status &= ~(HIF_INT);
7570 +
7571 + if (int_status & HIF_RXPKT_INT) {
7572 + int_status &= ~(HIF_RXPKT_INT);
7573 + int_enable_mask &= ~(HIF_RXPKT_INT);
7574 +
7575 + napi_first_batch = 1;
7576 +
7577 + if (napi_schedule_prep(&hif->napi)) {
7578 +#ifdef HIF_NAPI_STATS
7579 + hif->napi_counters[NAPI_SCHED_COUNT]++;
7580 +#endif
7581 + __napi_schedule(&hif->napi);
7582 + }
7583 + }
7584 +
7585 + if (int_status & HIF_TXPKT_INT) {
7586 + int_status &= ~(HIF_TXPKT_INT);
7587 + int_enable_mask &= ~(HIF_TXPKT_INT);
7588 + /*Schedule tx cleanup tassklet */
7589 + tasklet_schedule(&hif->tx_cleanup_tasklet);
7590 + }
7591 +
7592 + /*Disable interrupts, they will be enabled after they are serviced */
7593 + writel_relaxed(int_enable_mask, HIF_INT_ENABLE);
7594 +
7595 + if (int_status) {
7596 + pr_info("%s : Invalid interrupt : %d\n", __func__,
7597 + int_status);
7598 + writel(int_status, HIF_INT_SRC);
7599 + }
7600 +
7601 + return IRQ_HANDLED;
7602 +}
7603 +
7604 +void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int data2)
7605 +{
7606 + unsigned int client_id = data1;
7607 +
7608 + if (client_id >= HIF_CLIENTS_MAX) {
7609 + pr_err("%s: client id %d out of bounds\n", __func__,
7610 + client_id);
7611 + return;
7612 + }
7613 +
7614 + switch (req) {
7615 + case REQUEST_CL_REGISTER:
7616 + /* Request for register a client */
7617 + pr_info("%s: register client_id %d\n",
7618 + __func__, client_id);
7619 + pfe_hif_client_register(hif, client_id, (struct
7620 + hif_client_shm *)&hif->shm->client[client_id]);
7621 + break;
7622 +
7623 + case REQUEST_CL_UNREGISTER:
7624 + pr_info("%s: unregister client_id %d\n",
7625 + __func__, client_id);
7626 +
7627 + /* Request for unregister a client */
7628 + pfe_hif_client_unregister(hif, client_id);
7629 +
7630 + break;
7631 +
7632 + default:
7633 + pr_err("%s: unsupported request %d\n",
7634 + __func__, req);
7635 + break;
7636 + }
7637 +
7638 + /*
7639 + * Process client Tx queues
7640 + * Currently we don't have checking for tx pending
7641 + */
7642 +}
7643 +
7644 +/*
7645 + * pfe_hif_rx_poll
7646 + * This function is NAPI poll function to process HIF Rx queue.
7647 + */
7648 +static int pfe_hif_rx_poll(struct napi_struct *napi, int budget)
7649 +{
7650 + struct pfe_hif *hif = container_of(napi, struct pfe_hif, napi);
7651 + int work_done;
7652 +
7653 +#ifdef HIF_NAPI_STATS
7654 + hif->napi_counters[NAPI_POLL_COUNT]++;
7655 +#endif
7656 +
7657 + work_done = pfe_hif_rx_process(hif, budget);
7658 +
7659 + if (work_done < budget) {
7660 + napi_complete(napi);
7661 + writel(readl_relaxed(HIF_INT_ENABLE) | HIF_RXPKT_INT,
7662 + HIF_INT_ENABLE);
7663 + }
7664 +#ifdef HIF_NAPI_STATS
7665 + else
7666 + hif->napi_counters[NAPI_FULL_BUDGET_COUNT]++;
7667 +#endif
7668 +
7669 + return work_done;
7670 +}
7671 +
7672 +/*
7673 + * pfe_hif_init
7674 + * This function initializes the baseaddresses and irq, etc.
7675 + */
7676 +int pfe_hif_init(struct pfe *pfe)
7677 +{
7678 + struct pfe_hif *hif = &pfe->hif;
7679 + int err;
7680 +
7681 + pr_info("%s\n", __func__);
7682 +
7683 + hif->dev = pfe->dev;
7684 + hif->irq = pfe->hif_irq;
7685 +
7686 + err = pfe_hif_alloc_descr(hif);
7687 + if (err)
7688 + goto err0;
7689 +
7690 + if (pfe_hif_init_buffers(hif)) {
7691 + pr_err("%s: Could not initialize buffer descriptors\n"
7692 + , __func__);
7693 + err = -ENOMEM;
7694 + goto err1;
7695 + }
7696 +
7697 + /* Initialize NAPI for Rx processing */
7698 + init_dummy_netdev(&hif->dummy_dev);
7699 + netif_napi_add(&hif->dummy_dev, &hif->napi, pfe_hif_rx_poll,
7700 + HIF_RX_POLL_WEIGHT);
7701 + napi_enable(&hif->napi);
7702 +
7703 + spin_lock_init(&hif->tx_lock);
7704 + spin_lock_init(&hif->lock);
7705 +
7706 + hif_init();
7707 + hif_rx_enable();
7708 + hif_tx_enable();
7709 +
7710 + /* Disable tx done interrupt */
7711 + writel(HIF_INT_MASK, HIF_INT_ENABLE);
7712 +
7713 + gpi_enable(HGPI_BASE_ADDR);
7714 +
7715 + err = request_irq(hif->irq, hif_isr, 0, "pfe_hif", hif);
7716 + if (err) {
7717 + pr_err("%s: failed to get the hif IRQ = %d\n",
7718 + __func__, hif->irq);
7719 + goto err1;
7720 + }
7721 +
7722 + err = request_irq(pfe->wol_irq, wol_isr, 0, "pfe_wol", pfe);
7723 + if (err) {
7724 + pr_err("%s: failed to get the wol IRQ = %d\n",
7725 + __func__, pfe->wol_irq);
7726 + goto err1;
7727 + }
7728 +
7729 + tasklet_init(&hif->tx_cleanup_tasklet,
7730 + (void(*)(unsigned long))pfe_tx_do_cleanup,
7731 + (unsigned long)hif);
7732 +
7733 + return 0;
7734 +err1:
7735 + pfe_hif_free_descr(hif);
7736 +err0:
7737 + return err;
7738 +}
7739 +
7740 +/* pfe_hif_exit- */
7741 +void pfe_hif_exit(struct pfe *pfe)
7742 +{
7743 + struct pfe_hif *hif = &pfe->hif;
7744 +
7745 + pr_info("%s\n", __func__);
7746 +
7747 + tasklet_kill(&hif->tx_cleanup_tasklet);
7748 +
7749 + spin_lock_bh(&hif->lock);
7750 + hif->shm->g_client_status[0] = 0;
7751 + /* Make sure all clients are disabled*/
7752 + hif->shm->g_client_status[1] = 0;
7753 +
7754 + spin_unlock_bh(&hif->lock);
7755 +
7756 + /*Disable Rx/Tx */
7757 + gpi_disable(HGPI_BASE_ADDR);
7758 + hif_rx_disable();
7759 + hif_tx_disable();
7760 +
7761 + napi_disable(&hif->napi);
7762 + netif_napi_del(&hif->napi);
7763 +
7764 + free_irq(pfe->wol_irq, pfe);
7765 + free_irq(hif->irq, hif);
7766 +
7767 + pfe_hif_release_buffers(hif);
7768 + pfe_hif_free_descr(hif);
7769 +}
7770 --- /dev/null
7771 +++ b/drivers/staging/fsl_ppfe/pfe_hif.h
7772 @@ -0,0 +1,211 @@
7773 +/*
7774 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
7775 + * Copyright 2017 NXP
7776 + *
7777 + * This program is free software; you can redistribute it and/or modify
7778 + * it under the terms of the GNU General Public License as published by
7779 + * the Free Software Foundation; either version 2 of the License, or
7780 + * (at your option) any later version.
7781 + *
7782 + * This program is distributed in the hope that it will be useful,
7783 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
7784 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
7785 + * GNU General Public License for more details.
7786 + *
7787 + * You should have received a copy of the GNU General Public License
7788 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
7789 + */
7790 +
7791 +#ifndef _PFE_HIF_H_
7792 +#define _PFE_HIF_H_
7793 +
7794 +#include <linux/netdevice.h>
7795 +
7796 +#define HIF_NAPI_STATS
7797 +
7798 +#define HIF_CLIENT_QUEUES_MAX 16
7799 +#define HIF_RX_POLL_WEIGHT 64
7800 +
7801 +#define HIF_RX_PKT_MIN_SIZE 0x800 /* 2KB */
7802 +#define HIF_RX_PKT_MIN_SIZE_MASK ~(HIF_RX_PKT_MIN_SIZE - 1)
7803 +#define ROUND_MIN_RX_SIZE(_sz) (((_sz) + (HIF_RX_PKT_MIN_SIZE - 1)) \
7804 + & HIF_RX_PKT_MIN_SIZE_MASK)
7805 +#define PRESENT_OFST_IN_PAGE(_buf) (((unsigned long int)(_buf) & (PAGE_SIZE \
7806 + - 1)) & HIF_RX_PKT_MIN_SIZE_MASK)
7807 +
7808 +enum {
7809 + NAPI_SCHED_COUNT = 0,
7810 + NAPI_POLL_COUNT,
7811 + NAPI_PACKET_COUNT,
7812 + NAPI_DESC_COUNT,
7813 + NAPI_FULL_BUDGET_COUNT,
7814 + NAPI_CLIENT_FULL_COUNT,
7815 + NAPI_MAX_COUNT
7816 +};
7817 +
7818 +/*
7819 + * HIF_TX_DESC_NT value should be always greter than 4,
7820 + * Otherwise HIF_TX_POLL_MARK will become zero.
7821 + */
7822 +#define HIF_RX_DESC_NT 256
7823 +#define HIF_TX_DESC_NT 2048
7824 +
7825 +#define HIF_FIRST_BUFFER BIT(0)
7826 +#define HIF_LAST_BUFFER BIT(1)
7827 +#define HIF_DONT_DMA_MAP BIT(2)
7828 +#define HIF_DATA_VALID BIT(3)
7829 +#define HIF_TSO BIT(4)
7830 +
7831 +enum {
7832 + PFE_CL_GEM0 = 0,
7833 + PFE_CL_GEM1,
7834 + HIF_CLIENTS_MAX
7835 +};
7836 +
7837 +/*structure to store client queue info */
7838 +struct hif_rx_queue {
7839 + struct rx_queue_desc *base;
7840 + u32 size;
7841 + u32 write_idx;
7842 +};
7843 +
7844 +struct hif_tx_queue {
7845 + struct tx_queue_desc *base;
7846 + u32 size;
7847 + u32 ack_idx;
7848 +};
7849 +
7850 +/*Structure to store the client info */
7851 +struct hif_client {
7852 + int rx_qn;
7853 + struct hif_rx_queue rx_q[HIF_CLIENT_QUEUES_MAX];
7854 + int tx_qn;
7855 + struct hif_tx_queue tx_q[HIF_CLIENT_QUEUES_MAX];
7856 +};
7857 +
7858 +/*HIF hardware buffer descriptor */
7859 +struct hif_desc {
7860 + u32 ctrl;
7861 + u32 status;
7862 + u32 data;
7863 + u32 next;
7864 +};
7865 +
7866 +struct __hif_desc {
7867 + u32 ctrl;
7868 + u32 status;
7869 + u32 data;
7870 +};
7871 +
7872 +struct hif_desc_sw {
7873 + dma_addr_t data;
7874 + u16 len;
7875 + u8 client_id;
7876 + u8 q_no;
7877 + u16 flags;
7878 +};
7879 +
7880 +struct hif_hdr {
7881 + u8 client_id;
7882 + u8 q_num;
7883 + u16 client_ctrl;
7884 + u16 client_ctrl1;
7885 +};
7886 +
7887 +struct __hif_hdr {
7888 + union {
7889 + struct hif_hdr hdr;
7890 + u32 word[2];
7891 + };
7892 +};
7893 +
7894 +struct hif_ipsec_hdr {
7895 + u16 sa_handle[2];
7896 +} __packed;
7897 +
7898 +/* HIF_CTRL_TX... defines */
7899 +#define HIF_CTRL_TX_CHECKSUM BIT(2)
7900 +
7901 +/* HIF_CTRL_RX... defines */
7902 +#define HIF_CTRL_RX_OFFSET_OFST (24)
7903 +#define HIF_CTRL_RX_CHECKSUMMED BIT(2)
7904 +#define HIF_CTRL_RX_CONTINUED BIT(1)
7905 +
7906 +struct pfe_hif {
7907 + /* To store registered clients in hif layer */
7908 + struct hif_client client[HIF_CLIENTS_MAX];
7909 + struct hif_shm *shm;
7910 + int irq;
7911 +
7912 + void *descr_baseaddr_v;
7913 + unsigned long descr_baseaddr_p;
7914 +
7915 + struct hif_desc *rx_base;
7916 + u32 rx_ring_size;
7917 + u32 rxtoclean_index;
7918 + void *rx_buf_addr[HIF_RX_DESC_NT];
7919 + int rx_buf_len[HIF_RX_DESC_NT];
7920 + unsigned int qno;
7921 + unsigned int client_id;
7922 + unsigned int client_ctrl;
7923 + unsigned int started;
7924 +
7925 + struct hif_desc *tx_base;
7926 + u32 tx_ring_size;
7927 + u32 txtosend;
7928 + u32 txtoclean;
7929 + u32 txavail;
7930 + u32 txtoflush;
7931 + struct hif_desc_sw tx_sw_queue[HIF_TX_DESC_NT];
7932 +
7933 +/* tx_lock synchronizes hif packet tx as well as pfe_hif structure access */
7934 + spinlock_t tx_lock;
7935 +/* lock synchronizes hif rx queue processing */
7936 + spinlock_t lock;
7937 + struct net_device dummy_dev;
7938 + struct napi_struct napi;
7939 + struct device *dev;
7940 +
7941 +#ifdef HIF_NAPI_STATS
7942 + unsigned int napi_counters[NAPI_MAX_COUNT];
7943 +#endif
7944 + struct tasklet_struct tx_cleanup_tasklet;
7945 +};
7946 +
7947 +void __hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int
7948 + q_no, void *data, u32 len, unsigned int flags);
7949 +int hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int q_no,
7950 + void *data, unsigned int len);
7951 +void __hif_tx_done_process(struct pfe_hif *hif, int count);
7952 +void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int
7953 + data2);
7954 +int pfe_hif_init(struct pfe *pfe);
7955 +void pfe_hif_exit(struct pfe *pfe);
7956 +void pfe_hif_rx_idle(struct pfe_hif *hif);
7957 +static inline void hif_tx_done_process(struct pfe_hif *hif, int count)
7958 +{
7959 + spin_lock_bh(&hif->tx_lock);
7960 + __hif_tx_done_process(hif, count);
7961 + spin_unlock_bh(&hif->tx_lock);
7962 +}
7963 +
7964 +static inline void hif_tx_lock(struct pfe_hif *hif)
7965 +{
7966 + spin_lock_bh(&hif->tx_lock);
7967 +}
7968 +
7969 +static inline void hif_tx_unlock(struct pfe_hif *hif)
7970 +{
7971 + spin_unlock_bh(&hif->tx_lock);
7972 +}
7973 +
7974 +static inline int __hif_tx_avail(struct pfe_hif *hif)
7975 +{
7976 + return hif->txavail;
7977 +}
7978 +
7979 +#define __memcpy8(dst, src) memcpy(dst, src, 8)
7980 +#define __memcpy12(dst, src) memcpy(dst, src, 12)
7981 +#define __memcpy(dst, src, len) memcpy(dst, src, len)
7982 +
7983 +#endif /* _PFE_HIF_H_ */
7984 --- /dev/null
7985 +++ b/drivers/staging/fsl_ppfe/pfe_hif_lib.c
7986 @@ -0,0 +1,637 @@
7987 +/*
7988 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
7989 + * Copyright 2017 NXP
7990 + *
7991 + * This program is free software; you can redistribute it and/or modify
7992 + * it under the terms of the GNU General Public License as published by
7993 + * the Free Software Foundation; either version 2 of the License, or
7994 + * (at your option) any later version.
7995 + *
7996 + * This program is distributed in the hope that it will be useful,
7997 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
7998 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
7999 + * GNU General Public License for more details.
8000 + *
8001 + * You should have received a copy of the GNU General Public License
8002 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
8003 + */
8004 +
8005 +#include <linux/version.h>
8006 +#include <linux/kernel.h>
8007 +#include <linux/slab.h>
8008 +#include <linux/interrupt.h>
8009 +#include <linux/workqueue.h>
8010 +#include <linux/dma-mapping.h>
8011 +#include <linux/dmapool.h>
8012 +#include <linux/sched.h>
8013 +#include <linux/skbuff.h>
8014 +#include <linux/moduleparam.h>
8015 +#include <linux/cpu.h>
8016 +
8017 +#include "pfe_mod.h"
8018 +#include "pfe_hif.h"
8019 +#include "pfe_hif_lib.h"
8020 +
8021 +unsigned int lro_mode;
8022 +unsigned int page_mode;
8023 +unsigned int tx_qos = 1;
8024 +module_param(tx_qos, uint, 0444);
8025 +MODULE_PARM_DESC(tx_qos, "0: disable ,\n"
8026 + "1: enable (default), guarantee no packet drop at TMU level\n");
8027 +unsigned int pfe_pkt_size;
8028 +unsigned int pfe_pkt_headroom;
8029 +unsigned int emac_txq_cnt;
8030 +
8031 +/*
8032 + * @pfe_hal_lib.c.
8033 + * Common functions used by HIF client drivers
8034 + */
8035 +
8036 +/*HIF shared memory Global variable */
8037 +struct hif_shm ghif_shm;
8038 +
8039 +/* Cleanup the HIF shared memory, release HIF rx_buffer_pool.
8040 + * This function should be called after pfe_hif_exit
8041 + *
8042 + * @param[in] hif_shm Shared memory address location in DDR
8043 + */
8044 +static void pfe_hif_shm_clean(struct hif_shm *hif_shm)
8045 +{
8046 + int i;
8047 + void *pkt;
8048 +
8049 + for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
8050 + pkt = hif_shm->rx_buf_pool[i];
8051 + if (pkt) {
8052 + hif_shm->rx_buf_pool[i] = NULL;
8053 + pkt -= pfe_pkt_headroom;
8054 +
8055 + if (page_mode)
8056 + put_page(virt_to_page(pkt));
8057 + else
8058 + kfree(pkt);
8059 + }
8060 + }
8061 +}
8062 +
8063 +/* Initialize shared memory used between HIF driver and clients,
8064 + * allocate rx_buffer_pool required for HIF Rx descriptors.
8065 + * This function should be called before initializing HIF driver.
8066 + *
8067 + * @param[in] hif_shm Shared memory address location in DDR
8068 + * @rerurn 0 - on succes, <0 on fail to initialize
8069 + */
8070 +static int pfe_hif_shm_init(struct hif_shm *hif_shm)
8071 +{
8072 + int i;
8073 + void *pkt;
8074 +
8075 + memset(hif_shm, 0, sizeof(struct hif_shm));
8076 + hif_shm->rx_buf_pool_cnt = HIF_RX_DESC_NT;
8077 +
8078 + for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
8079 + if (page_mode) {
8080 + pkt = (void *)__get_free_page(GFP_KERNEL |
8081 + GFP_DMA_PFE);
8082 + } else {
8083 + pkt = kmalloc(PFE_BUF_SIZE, GFP_KERNEL | GFP_DMA_PFE);
8084 + }
8085 +
8086 + if (pkt)
8087 + hif_shm->rx_buf_pool[i] = pkt + pfe_pkt_headroom;
8088 + else
8089 + goto err0;
8090 + }
8091 +
8092 + return 0;
8093 +
8094 +err0:
8095 + pr_err("%s Low memory\n", __func__);
8096 + pfe_hif_shm_clean(hif_shm);
8097 + return -ENOMEM;
8098 +}
8099 +
8100 +/*This function sends indication to HIF driver
8101 + *
8102 + * @param[in] hif hif context
8103 + */
8104 +static void hif_lib_indicate_hif(struct pfe_hif *hif, int req, int data1, int
8105 + data2)
8106 +{
8107 + hif_process_client_req(hif, req, data1, data2);
8108 +}
8109 +
8110 +void hif_lib_indicate_client(int client_id, int event_type, int qno)
8111 +{
8112 + struct hif_client_s *client = pfe->hif_client[client_id];
8113 +
8114 + if (!client || (event_type >= HIF_EVENT_MAX) || (qno >=
8115 + HIF_CLIENT_QUEUES_MAX))
8116 + return;
8117 +
8118 + if (!test_and_set_bit(qno, &client->queue_mask[event_type]))
8119 + client->event_handler(client->priv, event_type, qno);
8120 +}
8121 +
8122 +/*This function releases Rx queue descriptors memory and pre-filled buffers
8123 + *
8124 + * @param[in] client hif_client context
8125 + */
8126 +static void hif_lib_client_release_rx_buffers(struct hif_client_s *client)
8127 +{
8128 + struct rx_queue_desc *desc;
8129 + int qno, ii;
8130 + void *buf;
8131 +
8132 + for (qno = 0; qno < client->rx_qn; qno++) {
8133 + desc = client->rx_q[qno].base;
8134 +
8135 + for (ii = 0; ii < client->rx_q[qno].size; ii++) {
8136 + buf = (void *)desc->data;
8137 + if (buf) {
8138 + buf -= pfe_pkt_headroom;
8139 +
8140 + if (page_mode)
8141 + free_page((unsigned long)buf);
8142 + else
8143 + kfree(buf);
8144 +
8145 + desc->ctrl = 0;
8146 + }
8147 +
8148 + desc++;
8149 + }
8150 + }
8151 +
8152 + kfree(client->rx_qbase);
8153 +}
8154 +
8155 +/*This function allocates memory for the rxq descriptors and pre-fill rx queues
8156 + * with buffers.
8157 + * @param[in] client client context
8158 + * @param[in] q_size size of the rxQ, all queues are of same size
8159 + */
8160 +static int hif_lib_client_init_rx_buffers(struct hif_client_s *client, int
8161 + q_size)
8162 +{
8163 + struct rx_queue_desc *desc;
8164 + struct hif_client_rx_queue *queue;
8165 + int ii, qno;
8166 +
8167 + /*Allocate memory for the client queues */
8168 + client->rx_qbase = kzalloc(client->rx_qn * q_size * sizeof(struct
8169 + rx_queue_desc), GFP_KERNEL);
8170 + if (!client->rx_qbase)
8171 + goto err;
8172 +
8173 + for (qno = 0; qno < client->rx_qn; qno++) {
8174 + queue = &client->rx_q[qno];
8175 +
8176 + queue->base = client->rx_qbase + qno * q_size * sizeof(struct
8177 + rx_queue_desc);
8178 + queue->size = q_size;
8179 + queue->read_idx = 0;
8180 + queue->write_idx = 0;
8181 +
8182 + pr_debug("rx queue: %d, base: %p, size: %d\n", qno,
8183 + queue->base, queue->size);
8184 + }
8185 +
8186 + for (qno = 0; qno < client->rx_qn; qno++) {
8187 + queue = &client->rx_q[qno];
8188 + desc = queue->base;
8189 +
8190 + for (ii = 0; ii < queue->size; ii++) {
8191 + desc->ctrl = CL_DESC_BUF_LEN(pfe_pkt_size) |
8192 + CL_DESC_OWN;
8193 + desc++;
8194 + }
8195 + }
8196 +
8197 + return 0;
8198 +
8199 +err:
8200 + return 1;
8201 +}
8202 +
8203 +
8204 +static void hif_lib_client_cleanup_tx_queue(struct hif_client_tx_queue *queue)
8205 +{
8206 + pr_debug("%s\n", __func__);
8207 +
8208 + /*
8209 + * Check if there are any pending packets. Client must flush the tx
8210 + * queues before unregistering, by calling by calling
8211 + * hif_lib_tx_get_next_complete()
8212 + *
8213 + * Hif no longer calls since we are no longer registered
8214 + */
8215 + if (queue->tx_pending)
8216 + pr_err("%s: pending transmit packets\n", __func__);
8217 +}
8218 +
8219 +static void hif_lib_client_release_tx_buffers(struct hif_client_s *client)
8220 +{
8221 + int qno;
8222 +
8223 + pr_debug("%s\n", __func__);
8224 +
8225 + for (qno = 0; qno < client->tx_qn; qno++)
8226 + hif_lib_client_cleanup_tx_queue(&client->tx_q[qno]);
8227 +
8228 + kfree(client->tx_qbase);
8229 +}
8230 +
8231 +static int hif_lib_client_init_tx_buffers(struct hif_client_s *client, int
8232 + q_size)
8233 +{
8234 + struct hif_client_tx_queue *queue;
8235 + int qno;
8236 +
8237 + client->tx_qbase = kzalloc(client->tx_qn * q_size * sizeof(struct
8238 + tx_queue_desc), GFP_KERNEL);
8239 + if (!client->tx_qbase)
8240 + return 1;
8241 +
8242 + for (qno = 0; qno < client->tx_qn; qno++) {
8243 + queue = &client->tx_q[qno];
8244 +
8245 + queue->base = client->tx_qbase + qno * q_size * sizeof(struct
8246 + tx_queue_desc);
8247 + queue->size = q_size;
8248 + queue->read_idx = 0;
8249 + queue->write_idx = 0;
8250 + queue->tx_pending = 0;
8251 + queue->nocpy_flag = 0;
8252 + queue->prev_tmu_tx_pkts = 0;
8253 + queue->done_tmu_tx_pkts = 0;
8254 +
8255 + pr_debug("tx queue: %d, base: %p, size: %d\n", qno,
8256 + queue->base, queue->size);
8257 + }
8258 +
8259 + return 0;
8260 +}
8261 +
8262 +static int hif_lib_event_dummy(void *priv, int event_type, int qno)
8263 +{
8264 + return 0;
8265 +}
8266 +
8267 +int hif_lib_client_register(struct hif_client_s *client)
8268 +{
8269 + struct hif_shm *hif_shm;
8270 + struct hif_client_shm *client_shm;
8271 + int err, i;
8272 + /* int loop_cnt = 0; */
8273 +
8274 + pr_debug("%s\n", __func__);
8275 +
8276 + /*Allocate memory before spin_lock*/
8277 + if (hif_lib_client_init_rx_buffers(client, client->rx_qsize)) {
8278 + err = -ENOMEM;
8279 + goto err_rx;
8280 + }
8281 +
8282 + if (hif_lib_client_init_tx_buffers(client, client->tx_qsize)) {
8283 + err = -ENOMEM;
8284 + goto err_tx;
8285 + }
8286 +
8287 + spin_lock_bh(&pfe->hif.lock);
8288 + if (!(client->pfe) || (client->id >= HIF_CLIENTS_MAX) ||
8289 + (pfe->hif_client[client->id])) {
8290 + err = -EINVAL;
8291 + goto err;
8292 + }
8293 +
8294 + hif_shm = client->pfe->hif.shm;
8295 +
8296 + if (!client->event_handler)
8297 + client->event_handler = hif_lib_event_dummy;
8298 +
8299 + /*Initialize client specific shared memory */
8300 + client_shm = (struct hif_client_shm *)&hif_shm->client[client->id];
8301 + client_shm->rx_qbase = (unsigned long int)client->rx_qbase;
8302 + client_shm->rx_qsize = client->rx_qsize;
8303 + client_shm->tx_qbase = (unsigned long int)client->tx_qbase;
8304 + client_shm->tx_qsize = client->tx_qsize;
8305 + client_shm->ctrl = (client->tx_qn << CLIENT_CTRL_TX_Q_CNT_OFST) |
8306 + (client->rx_qn << CLIENT_CTRL_RX_Q_CNT_OFST);
8307 + /* spin_lock_init(&client->rx_lock); */
8308 +
8309 + for (i = 0; i < HIF_EVENT_MAX; i++) {
8310 + client->queue_mask[i] = 0; /*
8311 + * By default all events are
8312 + * unmasked
8313 + */
8314 + }
8315 +
8316 + /*Indicate to HIF driver*/
8317 + hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_REGISTER, client->id, 0);
8318 +
8319 + pr_debug("%s: client: %p, client_id: %d, tx_qsize: %d, rx_qsize: %d\n",
8320 + __func__, client, client->id, client->tx_qsize,
8321 + client->rx_qsize);
8322 +
8323 + client->cpu_id = -1;
8324 +
8325 + pfe->hif_client[client->id] = client;
8326 + spin_unlock_bh(&pfe->hif.lock);
8327 +
8328 + return 0;
8329 +
8330 +err:
8331 + spin_unlock_bh(&pfe->hif.lock);
8332 + hif_lib_client_release_tx_buffers(client);
8333 +
8334 +err_tx:
8335 + hif_lib_client_release_rx_buffers(client);
8336 +
8337 +err_rx:
8338 + return err;
8339 +}
8340 +
8341 +int hif_lib_client_unregister(struct hif_client_s *client)
8342 +{
8343 + struct pfe *pfe = client->pfe;
8344 + u32 client_id = client->id;
8345 +
8346 + pr_info(
8347 + "%s : client: %p, client_id: %d, txQ_depth: %d, rxQ_depth: %d\n"
8348 + , __func__, client, client->id, client->tx_qsize,
8349 + client->rx_qsize);
8350 +
8351 + spin_lock_bh(&pfe->hif.lock);
8352 + hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_UNREGISTER, client->id, 0);
8353 +
8354 + hif_lib_client_release_tx_buffers(client);
8355 + hif_lib_client_release_rx_buffers(client);
8356 + pfe->hif_client[client_id] = NULL;
8357 + spin_unlock_bh(&pfe->hif.lock);
8358 +
8359 + return 0;
8360 +}
8361 +
8362 +int hif_lib_event_handler_start(struct hif_client_s *client, int event,
8363 + int qno)
8364 +{
8365 + struct hif_client_rx_queue *queue = &client->rx_q[qno];
8366 + struct rx_queue_desc *desc = queue->base + queue->read_idx;
8367 +
8368 + if ((event >= HIF_EVENT_MAX) || (qno >= HIF_CLIENT_QUEUES_MAX)) {
8369 + pr_debug("%s: Unsupported event : %d queue number : %d\n",
8370 + __func__, event, qno);
8371 + return -1;
8372 + }
8373 +
8374 + test_and_clear_bit(qno, &client->queue_mask[event]);
8375 +
8376 + switch (event) {
8377 + case EVENT_RX_PKT_IND:
8378 + if (!(desc->ctrl & CL_DESC_OWN))
8379 + hif_lib_indicate_client(client->id,
8380 + EVENT_RX_PKT_IND, qno);
8381 + break;
8382 +
8383 + case EVENT_HIGH_RX_WM:
8384 + case EVENT_TXDONE_IND:
8385 + default:
8386 + break;
8387 + }
8388 +
8389 + return 0;
8390 +}
8391 +
8392 +/*
8393 + * This function gets one packet from the specified client queue
8394 + * It also refill the rx buffer
8395 + */
8396 +void *hif_lib_receive_pkt(struct hif_client_s *client, int qno, int *len, int
8397 + *ofst, unsigned int *rx_ctrl,
8398 + unsigned int *desc_ctrl, void **priv_data)
8399 +{
8400 + struct hif_client_rx_queue *queue = &client->rx_q[qno];
8401 + struct rx_queue_desc *desc;
8402 + void *pkt = NULL;
8403 +
8404 + /*
8405 + * Following lock is to protect rx queue access from,
8406 + * hif_lib_event_handler_start.
8407 + * In general below lock is not required, because hif_lib_xmit_pkt and
8408 + * hif_lib_event_handler_start are called from napi poll and which is
8409 + * not re-entrant. But if some client use in different way this lock is
8410 + * required.
8411 + */
8412 + /*spin_lock_irqsave(&client->rx_lock, flags); */
8413 + desc = queue->base + queue->read_idx;
8414 + if (!(desc->ctrl & CL_DESC_OWN)) {
8415 + pkt = desc->data - pfe_pkt_headroom;
8416 +
8417 + *rx_ctrl = desc->client_ctrl;
8418 + *desc_ctrl = desc->ctrl;
8419 +
8420 + if (desc->ctrl & CL_DESC_FIRST) {
8421 + u16 size = *rx_ctrl >> HIF_CTRL_RX_OFFSET_OFST;
8422 +
8423 + if (size) {
8424 + *len = CL_DESC_BUF_LEN(desc->ctrl) -
8425 + PFE_PKT_HEADER_SZ - size;
8426 + *ofst = pfe_pkt_headroom + PFE_PKT_HEADER_SZ
8427 + + size;
8428 + *priv_data = desc->data + PFE_PKT_HEADER_SZ;
8429 + } else {
8430 + *len = CL_DESC_BUF_LEN(desc->ctrl) -
8431 + PFE_PKT_HEADER_SZ;
8432 + *ofst = pfe_pkt_headroom + PFE_PKT_HEADER_SZ;
8433 + *priv_data = NULL;
8434 + }
8435 +
8436 + } else {
8437 + *len = CL_DESC_BUF_LEN(desc->ctrl);
8438 + *ofst = pfe_pkt_headroom;
8439 + }
8440 +
8441 + /*
8442 + * Needed so we don't free a buffer/page
8443 + * twice on module_exit
8444 + */
8445 + desc->data = NULL;
8446 +
8447 + /*
8448 + * Ensure everything else is written to DDR before
8449 + * writing bd->ctrl
8450 + */
8451 + smp_wmb();
8452 +
8453 + desc->ctrl = CL_DESC_BUF_LEN(pfe_pkt_size) | CL_DESC_OWN;
8454 + queue->read_idx = (queue->read_idx + 1) & (queue->size - 1);
8455 + }
8456 +
8457 + /*spin_unlock_irqrestore(&client->rx_lock, flags); */
8458 + return pkt;
8459 +}
8460 +
8461 +static inline void hif_hdr_write(struct hif_hdr *pkt_hdr, unsigned int
8462 + client_id, unsigned int qno,
8463 + u32 client_ctrl)
8464 +{
8465 + /* Optimize the write since the destinaton may be non-cacheable */
8466 + if (!((unsigned long)pkt_hdr & 0x3)) {
8467 + ((u32 *)pkt_hdr)[0] = (client_ctrl << 16) | (qno << 8) |
8468 + client_id;
8469 + } else {
8470 + ((u16 *)pkt_hdr)[0] = (qno << 8) | (client_id & 0xFF);
8471 + ((u16 *)pkt_hdr)[1] = (client_ctrl & 0xFFFF);
8472 + }
8473 +}
8474 +
8475 +/*This function puts the given packet in the specific client queue */
8476 +void __hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void
8477 + *data, unsigned int len, u32 client_ctrl,
8478 + unsigned int flags, void *client_data)
8479 +{
8480 + struct hif_client_tx_queue *queue = &client->tx_q[qno];
8481 + struct tx_queue_desc *desc = queue->base + queue->write_idx;
8482 +
8483 + /* First buffer */
8484 + if (flags & HIF_FIRST_BUFFER) {
8485 + data -= sizeof(struct hif_hdr);
8486 + len += sizeof(struct hif_hdr);
8487 +
8488 + hif_hdr_write(data, client->id, qno, client_ctrl);
8489 + }
8490 +
8491 + desc->data = client_data;
8492 + desc->ctrl = CL_DESC_OWN | CL_DESC_FLAGS(flags);
8493 +
8494 + __hif_xmit_pkt(&pfe->hif, client->id, qno, data, len, flags);
8495 +
8496 + queue->write_idx = (queue->write_idx + 1) & (queue->size - 1);
8497 + queue->tx_pending++;
8498 + queue->jiffies_last_packet = jiffies;
8499 +}
8500 +
8501 +void *hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno,
8502 + unsigned int *flags, int count)
8503 +{
8504 + struct hif_client_tx_queue *queue = &client->tx_q[qno];
8505 + struct tx_queue_desc *desc = queue->base + queue->read_idx;
8506 +
8507 + pr_debug("%s: qno : %d rd_indx: %d pending:%d\n", __func__, qno,
8508 + queue->read_idx, queue->tx_pending);
8509 +
8510 + if (!queue->tx_pending)
8511 + return NULL;
8512 +
8513 + if (queue->nocpy_flag && !queue->done_tmu_tx_pkts) {
8514 + u32 tmu_tx_pkts = be32_to_cpu(pe_dmem_read(TMU0_ID +
8515 + client->id, TMU_DM_TX_TRANS, 4));
8516 +
8517 + if (queue->prev_tmu_tx_pkts > tmu_tx_pkts)
8518 + queue->done_tmu_tx_pkts = UINT_MAX -
8519 + queue->prev_tmu_tx_pkts + tmu_tx_pkts;
8520 + else
8521 + queue->done_tmu_tx_pkts = tmu_tx_pkts -
8522 + queue->prev_tmu_tx_pkts;
8523 +
8524 + queue->prev_tmu_tx_pkts = tmu_tx_pkts;
8525 +
8526 + if (!queue->done_tmu_tx_pkts)
8527 + return NULL;
8528 + }
8529 +
8530 + if (desc->ctrl & CL_DESC_OWN)
8531 + return NULL;
8532 +
8533 + queue->read_idx = (queue->read_idx + 1) & (queue->size - 1);
8534 + queue->tx_pending--;
8535 +
8536 + *flags = CL_DESC_GET_FLAGS(desc->ctrl);
8537 +
8538 + if (queue->done_tmu_tx_pkts && (*flags & HIF_LAST_BUFFER))
8539 + queue->done_tmu_tx_pkts--;
8540 +
8541 + return desc->data;
8542 +}
8543 +
8544 +static void hif_lib_tmu_credit_init(struct pfe *pfe)
8545 +{
8546 + int i, q;
8547 +
8548 + for (i = 0; i < NUM_GEMAC_SUPPORT; i++)
8549 + for (q = 0; q < emac_txq_cnt; q++) {
8550 + pfe->tmu_credit.tx_credit_max[i][q] = (q == 0) ?
8551 + DEFAULT_Q0_QDEPTH : DEFAULT_MAX_QDEPTH;
8552 + pfe->tmu_credit.tx_credit[i][q] =
8553 + pfe->tmu_credit.tx_credit_max[i][q];
8554 + }
8555 +}
8556 +
8557 +/* __hif_lib_update_credit
8558 + *
8559 + * @param[in] client hif client context
8560 + * @param[in] queue queue number in match with TMU
8561 + */
8562 +void __hif_lib_update_credit(struct hif_client_s *client, unsigned int queue)
8563 +{
8564 + unsigned int tmu_tx_packets, tmp;
8565 +
8566 + if (tx_qos) {
8567 + tmu_tx_packets = be32_to_cpu(pe_dmem_read(TMU0_ID +
8568 + client->id, (TMU_DM_TX_TRANS + (queue * 4)), 4));
8569 +
8570 + /* tx_packets counter overflowed */
8571 + if (tmu_tx_packets >
8572 + pfe->tmu_credit.tx_packets[client->id][queue]) {
8573 + tmp = UINT_MAX - tmu_tx_packets +
8574 + pfe->tmu_credit.tx_packets[client->id][queue];
8575 +
8576 + pfe->tmu_credit.tx_credit[client->id][queue] =
8577 + pfe->tmu_credit.tx_credit_max[client->id][queue] - tmp;
8578 + } else {
8579 + /* TMU tx <= pfe_eth tx, normal case or both OF since
8580 + * last time
8581 + */
8582 + pfe->tmu_credit.tx_credit[client->id][queue] =
8583 + pfe->tmu_credit.tx_credit_max[client->id][queue] -
8584 + (pfe->tmu_credit.tx_packets[client->id][queue] -
8585 + tmu_tx_packets);
8586 + }
8587 + }
8588 +}
8589 +
8590 +int pfe_hif_lib_init(struct pfe *pfe)
8591 +{
8592 + int rc;
8593 +
8594 + pr_info("%s\n", __func__);
8595 +
8596 + if (lro_mode) {
8597 + page_mode = 1;
8598 + pfe_pkt_size = min(PAGE_SIZE, MAX_PFE_PKT_SIZE);
8599 + pfe_pkt_headroom = 0;
8600 + } else {
8601 + page_mode = 0;
8602 + pfe_pkt_size = PFE_PKT_SIZE;
8603 + pfe_pkt_headroom = PFE_PKT_HEADROOM;
8604 + }
8605 +
8606 + if (tx_qos)
8607 + emac_txq_cnt = EMAC_TXQ_CNT / 2;
8608 + else
8609 + emac_txq_cnt = EMAC_TXQ_CNT;
8610 +
8611 + hif_lib_tmu_credit_init(pfe);
8612 + pfe->hif.shm = &ghif_shm;
8613 + rc = pfe_hif_shm_init(pfe->hif.shm);
8614 +
8615 + return rc;
8616 +}
8617 +
8618 +void pfe_hif_lib_exit(struct pfe *pfe)
8619 +{
8620 + pr_info("%s\n", __func__);
8621 +
8622 + pfe_hif_shm_clean(pfe->hif.shm);
8623 +}
8624 --- /dev/null
8625 +++ b/drivers/staging/fsl_ppfe/pfe_hif_lib.h
8626 @@ -0,0 +1,240 @@
8627 +/*
8628 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
8629 + * Copyright 2017 NXP
8630 + *
8631 + * This program is free software; you can redistribute it and/or modify
8632 + * it under the terms of the GNU General Public License as published by
8633 + * the Free Software Foundation; either version 2 of the License, or
8634 + * (at your option) any later version.
8635 + *
8636 + * This program is distributed in the hope that it will be useful,
8637 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
8638 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8639 + * GNU General Public License for more details.
8640 + *
8641 + * You should have received a copy of the GNU General Public License
8642 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
8643 + */
8644 +
8645 +#ifndef _PFE_HIF_LIB_H_
8646 +#define _PFE_HIF_LIB_H_
8647 +
8648 +#include "pfe_hif.h"
8649 +
8650 +#define HIF_CL_REQ_TIMEOUT 10
8651 +#define GFP_DMA_PFE 0
8652 +
8653 +enum {
8654 + REQUEST_CL_REGISTER = 0,
8655 + REQUEST_CL_UNREGISTER,
8656 + HIF_REQUEST_MAX
8657 +};
8658 +
8659 +enum {
8660 + /* Event to indicate that client rx queue is reached water mark level */
8661 + EVENT_HIGH_RX_WM = 0,
8662 + /* Event to indicate that, packet received for client */
8663 + EVENT_RX_PKT_IND,
8664 + /* Event to indicate that, packet tx done for client */
8665 + EVENT_TXDONE_IND,
8666 + HIF_EVENT_MAX
8667 +};
8668 +
8669 +/*structure to store client queue info */
8670 +
8671 +/*structure to store client queue info */
8672 +struct hif_client_rx_queue {
8673 + struct rx_queue_desc *base;
8674 + u32 size;
8675 + u32 read_idx;
8676 + u32 write_idx;
8677 +};
8678 +
8679 +struct hif_client_tx_queue {
8680 + struct tx_queue_desc *base;
8681 + u32 size;
8682 + u32 read_idx;
8683 + u32 write_idx;
8684 + u32 tx_pending;
8685 + unsigned long jiffies_last_packet;
8686 + u32 nocpy_flag;
8687 + u32 prev_tmu_tx_pkts;
8688 + u32 done_tmu_tx_pkts;
8689 +};
8690 +
8691 +struct hif_client_s {
8692 + int id;
8693 + int tx_qn;
8694 + int rx_qn;
8695 + void *rx_qbase;
8696 + void *tx_qbase;
8697 + int tx_qsize;
8698 + int rx_qsize;
8699 + int cpu_id;
8700 + struct hif_client_tx_queue tx_q[HIF_CLIENT_QUEUES_MAX];
8701 + struct hif_client_rx_queue rx_q[HIF_CLIENT_QUEUES_MAX];
8702 + int (*event_handler)(void *priv, int event, int data);
8703 + unsigned long queue_mask[HIF_EVENT_MAX];
8704 + struct pfe *pfe;
8705 + void *priv;
8706 +};
8707 +
8708 +/*
8709 + * Client specific shared memory
8710 + * It contains number of Rx/Tx queues, base addresses and queue sizes
8711 + */
8712 +struct hif_client_shm {
8713 + u32 ctrl; /*0-7: number of Rx queues, 8-15: number of tx queues */
8714 + unsigned long rx_qbase; /*Rx queue base address */
8715 + u32 rx_qsize; /*each Rx queue size, all Rx queues are of same size */
8716 + unsigned long tx_qbase; /* Tx queue base address */
8717 + u32 tx_qsize; /*each Tx queue size, all Tx queues are of same size */
8718 +};
8719 +
8720 +/*Client shared memory ctrl bit description */
8721 +#define CLIENT_CTRL_RX_Q_CNT_OFST 0
8722 +#define CLIENT_CTRL_TX_Q_CNT_OFST 8
8723 +#define CLIENT_CTRL_RX_Q_CNT(ctrl) (((ctrl) >> CLIENT_CTRL_RX_Q_CNT_OFST) \
8724 + & 0xFF)
8725 +#define CLIENT_CTRL_TX_Q_CNT(ctrl) (((ctrl) >> CLIENT_CTRL_TX_Q_CNT_OFST) \
8726 + & 0xFF)
8727 +
8728 +/*
8729 + * Shared memory used to communicate between HIF driver and host/client drivers
8730 + * Before starting the hif driver rx_buf_pool ans rx_buf_pool_cnt should be
8731 + * initialized with host buffers and buffers count in the pool.
8732 + * rx_buf_pool_cnt should be >= HIF_RX_DESC_NT.
8733 + *
8734 + */
8735 +struct hif_shm {
8736 + u32 rx_buf_pool_cnt; /*Number of rx buffers available*/
8737 + /*Rx buffers required to initialize HIF rx descriptors */
8738 + void *rx_buf_pool[HIF_RX_DESC_NT];
8739 + unsigned long g_client_status[2]; /*Global client status bit mask */
8740 + /* Client specific shared memory */
8741 + struct hif_client_shm client[HIF_CLIENTS_MAX];
8742 +};
8743 +
8744 +#define CL_DESC_OWN BIT(31)
8745 +/* This sets owner ship to HIF driver */
8746 +#define CL_DESC_LAST BIT(30)
8747 +/* This indicates last packet for multi buffers handling */
8748 +#define CL_DESC_FIRST BIT(29)
8749 +/* This indicates first packet for multi buffers handling */
8750 +
8751 +#define CL_DESC_BUF_LEN(x) ((x) & 0xFFFF)
8752 +#define CL_DESC_FLAGS(x) (((x) & 0xF) << 16)
8753 +#define CL_DESC_GET_FLAGS(x) (((x) >> 16) & 0xF)
8754 +
8755 +struct rx_queue_desc {
8756 + void *data;
8757 + u32 ctrl; /*0-15bit len, 16-20bit flags, 31bit owner*/
8758 + u32 client_ctrl;
8759 +};
8760 +
8761 +struct tx_queue_desc {
8762 + void *data;
8763 + u32 ctrl; /*0-15bit len, 16-20bit flags, 31bit owner*/
8764 +};
8765 +
8766 +/* HIF Rx is not working properly for 2-byte aligned buffers and
8767 + * ip_header should be 4byte aligned for better iperformance.
8768 + * "ip_header = 64 + 6(hif_header) + 14 (MAC Header)" will be 4byte aligned.
8769 + */
8770 +#define PFE_PKT_HEADER_SZ sizeof(struct hif_hdr)
8771 +/* must be big enough for headroom, pkt size and skb shared info */
8772 +#define PFE_BUF_SIZE 2048
8773 +#define PFE_PKT_HEADROOM 128
8774 +
8775 +#define SKB_SHARED_INFO_SIZE (sizeof(struct skb_shared_info))
8776 +#define PFE_PKT_SIZE (PFE_BUF_SIZE - PFE_PKT_HEADROOM \
8777 + - SKB_SHARED_INFO_SIZE)
8778 +#define MAX_L2_HDR_SIZE 14 /* Not correct for VLAN/PPPoE */
8779 +#define MAX_L3_HDR_SIZE 20 /* Not correct for IPv6 */
8780 +#define MAX_L4_HDR_SIZE 60 /* TCP with maximum options */
8781 +#define MAX_HDR_SIZE (MAX_L2_HDR_SIZE + MAX_L3_HDR_SIZE \
8782 + + MAX_L4_HDR_SIZE)
8783 +/* Used in page mode to clamp packet size to the maximum supported by the hif
8784 + *hw interface (<16KiB)
8785 + */
8786 +#define MAX_PFE_PKT_SIZE 16380UL
8787 +
8788 +extern unsigned int pfe_pkt_size;
8789 +extern unsigned int pfe_pkt_headroom;
8790 +extern unsigned int page_mode;
8791 +extern unsigned int lro_mode;
8792 +extern unsigned int tx_qos;
8793 +extern unsigned int emac_txq_cnt;
8794 +
8795 +int pfe_hif_lib_init(struct pfe *pfe);
8796 +void pfe_hif_lib_exit(struct pfe *pfe);
8797 +int hif_lib_client_register(struct hif_client_s *client);
8798 +int hif_lib_client_unregister(struct hif_client_s *client);
8799 +void __hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void
8800 + *data, unsigned int len, u32 client_ctrl,
8801 + unsigned int flags, void *client_data);
8802 +int hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void *data,
8803 + unsigned int len, u32 client_ctrl, void *client_data);
8804 +void hif_lib_indicate_client(int cl_id, int event, int data);
8805 +int hif_lib_event_handler_start(struct hif_client_s *client, int event, int
8806 + data);
8807 +int hif_lib_tmu_queue_start(struct hif_client_s *client, int qno);
8808 +int hif_lib_tmu_queue_stop(struct hif_client_s *client, int qno);
8809 +void *hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno,
8810 + unsigned int *flags, int count);
8811 +void *hif_lib_receive_pkt(struct hif_client_s *client, int qno, int *len, int
8812 + *ofst, unsigned int *rx_ctrl,
8813 + unsigned int *desc_ctrl, void **priv_data);
8814 +void __hif_lib_update_credit(struct hif_client_s *client, unsigned int queue);
8815 +void hif_lib_set_rx_cpu_affinity(struct hif_client_s *client, int cpu_id);
8816 +void hif_lib_set_tx_queue_nocpy(struct hif_client_s *client, int qno, int
8817 + enable);
8818 +static inline int hif_lib_tx_avail(struct hif_client_s *client, unsigned int
8819 + qno)
8820 +{
8821 + struct hif_client_tx_queue *queue = &client->tx_q[qno];
8822 +
8823 + return (queue->size - queue->tx_pending);
8824 +}
8825 +
8826 +static inline int hif_lib_get_tx_wr_index(struct hif_client_s *client, unsigned
8827 + int qno)
8828 +{
8829 + struct hif_client_tx_queue *queue = &client->tx_q[qno];
8830 +
8831 + return queue->write_idx;
8832 +}
8833 +
8834 +static inline int hif_lib_tx_pending(struct hif_client_s *client, unsigned int
8835 + qno)
8836 +{
8837 + struct hif_client_tx_queue *queue = &client->tx_q[qno];
8838 +
8839 + return queue->tx_pending;
8840 +}
8841 +
8842 +#define hif_lib_tx_credit_avail(pfe, id, qno) \
8843 + ((pfe)->tmu_credit.tx_credit[id][qno])
8844 +
8845 +#define hif_lib_tx_credit_max(pfe, id, qno) \
8846 + ((pfe)->tmu_credit.tx_credit_max[id][qno])
8847 +
8848 +/*
8849 + * Test comment
8850 + */
8851 +#define hif_lib_tx_credit_use(pfe, id, qno, credit) \
8852 + ({ typeof(pfe) pfe_ = pfe; \
8853 + typeof(id) id_ = id; \
8854 + typeof(qno) qno_ = qno_; \
8855 + typeof(credit) credit_ = credit; \
8856 + do { \
8857 + if (tx_qos) { \
8858 + (pfe_)->tmu_credit.tx_credit[id_][qno_]\
8859 + -= credit_; \
8860 + (pfe_)->tmu_credit.tx_packets[id_][qno_]\
8861 + += credit_; \
8862 + } \
8863 + } while (0); \
8864 + })
8865 +
8866 +#endif /* _PFE_HIF_LIB_H_ */
8867 --- /dev/null
8868 +++ b/drivers/staging/fsl_ppfe/pfe_hw.c
8869 @@ -0,0 +1,176 @@
8870 +/*
8871 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
8872 + * Copyright 2017 NXP
8873 + *
8874 + * This program is free software; you can redistribute it and/or modify
8875 + * it under the terms of the GNU General Public License as published by
8876 + * the Free Software Foundation; either version 2 of the License, or
8877 + * (at your option) any later version.
8878 + *
8879 + * This program is distributed in the hope that it will be useful,
8880 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
8881 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8882 + * GNU General Public License for more details.
8883 + *
8884 + * You should have received a copy of the GNU General Public License
8885 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
8886 + */
8887 +
8888 +#include "pfe_mod.h"
8889 +#include "pfe_hw.h"
8890 +
8891 +/* Functions to handle most of pfe hw register initialization */
8892 +int pfe_hw_init(struct pfe *pfe, int resume)
8893 +{
8894 + struct class_cfg class_cfg = {
8895 + .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
8896 + .route_table_baseaddr = pfe->ddr_phys_baseaddr +
8897 + ROUTE_TABLE_BASEADDR,
8898 + .route_table_hash_bits = ROUTE_TABLE_HASH_BITS,
8899 + };
8900 +
8901 + struct tmu_cfg tmu_cfg = {
8902 + .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
8903 + .llm_base_addr = pfe->ddr_phys_baseaddr + TMU_LLM_BASEADDR,
8904 + .llm_queue_len = TMU_LLM_QUEUE_LEN,
8905 + };
8906 +
8907 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
8908 + struct util_cfg util_cfg = {
8909 + .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
8910 + };
8911 +#endif
8912 +
8913 + struct BMU_CFG bmu1_cfg = {
8914 + .baseaddr = CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR +
8915 + BMU1_LMEM_BASEADDR),
8916 + .count = BMU1_BUF_COUNT,
8917 + .size = BMU1_BUF_SIZE,
8918 + .low_watermark = 10,
8919 + .high_watermark = 15,
8920 + };
8921 +
8922 + struct BMU_CFG bmu2_cfg = {
8923 + .baseaddr = DDR_PHYS_TO_PFE(pfe->ddr_phys_baseaddr +
8924 + BMU2_DDR_BASEADDR),
8925 + .count = BMU2_BUF_COUNT,
8926 + .size = BMU2_BUF_SIZE,
8927 + .low_watermark = 250,
8928 + .high_watermark = 253,
8929 + };
8930 +
8931 + struct gpi_cfg egpi1_cfg = {
8932 + .lmem_rtry_cnt = EGPI1_LMEM_RTRY_CNT,
8933 + .tmlf_txthres = EGPI1_TMLF_TXTHRES,
8934 + .aseq_len = EGPI1_ASEQ_LEN,
8935 + .mtip_pause_reg = CBUS_VIRT_TO_PFE(EMAC1_BASE_ADDR +
8936 + EMAC_TCNTRL_REG),
8937 + };
8938 +
8939 + struct gpi_cfg egpi2_cfg = {
8940 + .lmem_rtry_cnt = EGPI2_LMEM_RTRY_CNT,
8941 + .tmlf_txthres = EGPI2_TMLF_TXTHRES,
8942 + .aseq_len = EGPI2_ASEQ_LEN,
8943 + .mtip_pause_reg = CBUS_VIRT_TO_PFE(EMAC2_BASE_ADDR +
8944 + EMAC_TCNTRL_REG),
8945 + };
8946 +
8947 + struct gpi_cfg hgpi_cfg = {
8948 + .lmem_rtry_cnt = HGPI_LMEM_RTRY_CNT,
8949 + .tmlf_txthres = HGPI_TMLF_TXTHRES,
8950 + .aseq_len = HGPI_ASEQ_LEN,
8951 + .mtip_pause_reg = 0,
8952 + };
8953 +
8954 + pr_info("%s\n", __func__);
8955 +
8956 +#if !defined(LS1012A_PFE_RESET_WA)
8957 + /* LS1012A needs this to make PE work correctly */
8958 + writel(0x3, CLASS_PE_SYS_CLK_RATIO);
8959 + writel(0x3, TMU_PE_SYS_CLK_RATIO);
8960 + writel(0x3, UTIL_PE_SYS_CLK_RATIO);
8961 + usleep_range(10, 20);
8962 +#endif
8963 +
8964 + pr_info("CLASS version: %x\n", readl(CLASS_VERSION));
8965 + pr_info("TMU version: %x\n", readl(TMU_VERSION));
8966 +
8967 + pr_info("BMU1 version: %x\n", readl(BMU1_BASE_ADDR +
8968 + BMU_VERSION));
8969 + pr_info("BMU2 version: %x\n", readl(BMU2_BASE_ADDR +
8970 + BMU_VERSION));
8971 +
8972 + pr_info("EGPI1 version: %x\n", readl(EGPI1_BASE_ADDR +
8973 + GPI_VERSION));
8974 + pr_info("EGPI2 version: %x\n", readl(EGPI2_BASE_ADDR +
8975 + GPI_VERSION));
8976 + pr_info("HGPI version: %x\n", readl(HGPI_BASE_ADDR +
8977 + GPI_VERSION));
8978 +
8979 + pr_info("HIF version: %x\n", readl(HIF_VERSION));
8980 + pr_info("HIF NOPCY version: %x\n", readl(HIF_NOCPY_VERSION));
8981 +
8982 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
8983 + pr_info("UTIL version: %x\n", readl(UTIL_VERSION));
8984 +#endif
8985 + while (!(readl(TMU_CTRL) & ECC_MEM_INIT_DONE))
8986 + ;
8987 +
8988 + hif_rx_disable();
8989 + hif_tx_disable();
8990 +
8991 + bmu_init(BMU1_BASE_ADDR, &bmu1_cfg);
8992 +
8993 + pr_info("bmu_init(1) done\n");
8994 +
8995 + bmu_init(BMU2_BASE_ADDR, &bmu2_cfg);
8996 +
8997 + pr_info("bmu_init(2) done\n");
8998 +
8999 + class_cfg.resume = resume ? 1 : 0;
9000 +
9001 + class_init(&class_cfg);
9002 +
9003 + pr_info("class_init() done\n");
9004 +
9005 + tmu_init(&tmu_cfg);
9006 +
9007 + pr_info("tmu_init() done\n");
9008 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
9009 + util_init(&util_cfg);
9010 +
9011 + pr_info("util_init() done\n");
9012 +#endif
9013 + gpi_init(EGPI1_BASE_ADDR, &egpi1_cfg);
9014 +
9015 + pr_info("gpi_init(1) done\n");
9016 +
9017 + gpi_init(EGPI2_BASE_ADDR, &egpi2_cfg);
9018 +
9019 + pr_info("gpi_init(2) done\n");
9020 +
9021 + gpi_init(HGPI_BASE_ADDR, &hgpi_cfg);
9022 +
9023 + pr_info("gpi_init(hif) done\n");
9024 +
9025 + bmu_enable(BMU1_BASE_ADDR);
9026 +
9027 + pr_info("bmu_enable(1) done\n");
9028 +
9029 + bmu_enable(BMU2_BASE_ADDR);
9030 +
9031 + pr_info("bmu_enable(2) done\n");
9032 +
9033 + return 0;
9034 +}
9035 +
9036 +void pfe_hw_exit(struct pfe *pfe)
9037 +{
9038 + pr_info("%s\n", __func__);
9039 +
9040 + bmu_disable(BMU1_BASE_ADDR);
9041 + bmu_reset(BMU1_BASE_ADDR);
9042 +
9043 + bmu_disable(BMU2_BASE_ADDR);
9044 + bmu_reset(BMU2_BASE_ADDR);
9045 +}
9046 --- /dev/null
9047 +++ b/drivers/staging/fsl_ppfe/pfe_hw.h
9048 @@ -0,0 +1,27 @@
9049 +/*
9050 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
9051 + * Copyright 2017 NXP
9052 + *
9053 + * This program is free software; you can redistribute it and/or modify
9054 + * it under the terms of the GNU General Public License as published by
9055 + * the Free Software Foundation; either version 2 of the License, or
9056 + * (at your option) any later version.
9057 + *
9058 + * This program is distributed in the hope that it will be useful,
9059 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
9060 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9061 + * GNU General Public License for more details.
9062 + *
9063 + * You should have received a copy of the GNU General Public License
9064 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
9065 + */
9066 +
9067 +#ifndef _PFE_HW_H_
9068 +#define _PFE_HW_H_
9069 +
9070 +#define PE_SYS_CLK_RATIO 1 /* SYS/AXI = 250MHz, HFE = 500MHz */
9071 +
9072 +int pfe_hw_init(struct pfe *pfe, int resume);
9073 +void pfe_hw_exit(struct pfe *pfe);
9074 +
9075 +#endif /* _PFE_HW_H_ */
9076 --- /dev/null
9077 +++ b/drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c
9078 @@ -0,0 +1,394 @@
9079 +/*
9080 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
9081 + * Copyright 2017 NXP
9082 + *
9083 + * This program is free software; you can redistribute it and/or modify
9084 + * it under the terms of the GNU General Public License as published by
9085 + * the Free Software Foundation; either version 2 of the License, or
9086 + * (at your option) any later version.
9087 + *
9088 + * This program is distributed in the hope that it will be useful,
9089 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
9090 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9091 + * GNU General Public License for more details.
9092 + *
9093 + * You should have received a copy of the GNU General Public License
9094 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
9095 + */
9096 +
9097 +#include <linux/module.h>
9098 +#include <linux/device.h>
9099 +#include <linux/of_net.h>
9100 +#include <linux/of_address.h>
9101 +#include <linux/platform_device.h>
9102 +#include <linux/slab.h>
9103 +#include <linux/clk.h>
9104 +#include <linux/mfd/syscon.h>
9105 +#include <linux/regmap.h>
9106 +
9107 +#include "pfe_mod.h"
9108 +
9109 +struct ls1012a_pfe_platform_data pfe_platform_data;
9110 +
9111 +static int pfe_get_gemac_if_proprties(struct device_node *parent, int port, int
9112 + if_cnt,
9113 + struct ls1012a_pfe_platform_data
9114 + *pdata)
9115 +{
9116 + struct device_node *gem = NULL, *phy = NULL;
9117 + int size;
9118 + int ii = 0, phy_id = 0;
9119 + const u32 *addr;
9120 + const void *mac_addr;
9121 +
9122 + for (ii = 0; ii < if_cnt; ii++) {
9123 + gem = of_get_next_child(parent, gem);
9124 + if (!gem)
9125 + goto err;
9126 + addr = of_get_property(gem, "reg", &size);
9127 + if (addr && (be32_to_cpup(addr) == port))
9128 + break;
9129 + }
9130 +
9131 + if (ii >= if_cnt) {
9132 + pr_err("%s:%d Failed to find interface = %d\n",
9133 + __func__, __LINE__, if_cnt);
9134 + goto err;
9135 + }
9136 +
9137 + pdata->ls1012a_eth_pdata[port].gem_id = port;
9138 +
9139 + mac_addr = of_get_mac_address(gem);
9140 +
9141 + if (mac_addr) {
9142 + memcpy(pdata->ls1012a_eth_pdata[port].mac_addr, mac_addr,
9143 + ETH_ALEN);
9144 + }
9145 +
9146 + pdata->ls1012a_eth_pdata[port].mii_config = of_get_phy_mode(gem);
9147 +
9148 + if ((pdata->ls1012a_eth_pdata[port].mii_config) < 0)
9149 + pr_err("%s:%d Incorrect Phy mode....\n", __func__,
9150 + __LINE__);
9151 +
9152 + addr = of_get_property(gem, "fsl,gemac-bus-id", &size);
9153 + if (!addr)
9154 + pr_err("%s:%d Invalid gemac-bus-id....\n", __func__,
9155 + __LINE__);
9156 + else
9157 + pdata->ls1012a_eth_pdata[port].bus_id = be32_to_cpup(addr);
9158 +
9159 + addr = of_get_property(gem, "fsl,gemac-phy-id", &size);
9160 + if (!addr) {
9161 + pr_err("%s:%d Invalid gemac-phy-id....\n", __func__,
9162 + __LINE__);
9163 + } else {
9164 + phy_id = be32_to_cpup(addr);
9165 + pdata->ls1012a_eth_pdata[port].phy_id = phy_id;
9166 + pdata->ls1012a_mdio_pdata[0].phy_mask &= ~(1 << phy_id);
9167 + }
9168 +
9169 + addr = of_get_property(gem, "fsl,mdio-mux-val", &size);
9170 + if (!addr)
9171 + pr_err("%s: Invalid mdio-mux-val....\n", __func__);
9172 + else
9173 + phy_id = be32_to_cpup(addr);
9174 + pdata->ls1012a_eth_pdata[port].mdio_muxval = phy_id;
9175 +
9176 + if (pdata->ls1012a_eth_pdata[port].phy_id < 32)
9177 + pfe->mdio_muxval[pdata->ls1012a_eth_pdata[port].phy_id] =
9178 + pdata->ls1012a_eth_pdata[port].mdio_muxval;
9179 +
9180 + addr = of_get_property(gem, "fsl,pfe-phy-if-flags", &size);
9181 + if (!addr)
9182 + pr_err("%s:%d Invalid pfe-phy-if-flags....\n",
9183 + __func__, __LINE__);
9184 + else
9185 + pdata->ls1012a_eth_pdata[port].phy_flags = be32_to_cpup(addr);
9186 +
9187 + /* If PHY is enabled, read mdio properties */
9188 + if (pdata->ls1012a_eth_pdata[port].phy_flags & GEMAC_NO_PHY)
9189 + goto done;
9190 +
9191 + phy = of_get_next_child(gem, NULL);
9192 +
9193 + addr = of_get_property(phy, "reg", &size);
9194 +
9195 + if (!addr)
9196 + pr_err("%s:%d Invalid phy enable flag....\n",
9197 + __func__, __LINE__);
9198 + else
9199 + pdata->ls1012a_mdio_pdata[port].enabled = be32_to_cpup(addr);
9200 +
9201 + pdata->ls1012a_mdio_pdata[port].irq[0] = PHY_POLL;
9202 +
9203 +done:
9204 +
9205 + return 0;
9206 +
9207 +err:
9208 + return -1;
9209 +}
9210 +
9211 +/*
9212 + *
9213 + * pfe_platform_probe -
9214 + *
9215 + *
9216 + */
9217 +static int pfe_platform_probe(struct platform_device *pdev)
9218 +{
9219 + struct resource res;
9220 + int ii, rc, interface_count = 0, size = 0;
9221 + const u32 *prop;
9222 + struct device_node *np;
9223 + struct clk *pfe_clk;
9224 +
9225 + np = pdev->dev.of_node;
9226 +
9227 + if (!np) {
9228 + pr_err("Invalid device node\n");
9229 + return -EINVAL;
9230 + }
9231 +
9232 + pfe = kzalloc(sizeof(*pfe), GFP_KERNEL);
9233 + if (!pfe) {
9234 + rc = -ENOMEM;
9235 + goto err_alloc;
9236 + }
9237 +
9238 + platform_set_drvdata(pdev, pfe);
9239 +
9240 + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9241 +
9242 + if (of_address_to_resource(np, 1, &res)) {
9243 + rc = -ENOMEM;
9244 + pr_err("failed to get ddr resource\n");
9245 + goto err_ddr;
9246 + }
9247 +
9248 + pfe->ddr_phys_baseaddr = res.start;
9249 + pfe->ddr_size = resource_size(&res);
9250 +
9251 + pfe->ddr_baseaddr = phys_to_virt(res.start);
9252 + if (!pfe->ddr_baseaddr) {
9253 + pr_err("ioremap() ddr failed\n");
9254 + rc = -ENOMEM;
9255 + goto err_ddr;
9256 + }
9257 +
9258 + pfe->scfg =
9259 + syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
9260 + "fsl,pfe-scfg");
9261 + if (IS_ERR(pfe->scfg)) {
9262 + dev_err(&pdev->dev, "No syscfg phandle specified\n");
9263 + return PTR_ERR(pfe->scfg);
9264 + }
9265 +
9266 + pfe->cbus_baseaddr = of_iomap(np, 0);
9267 + if (!pfe->cbus_baseaddr) {
9268 + rc = -ENOMEM;
9269 + pr_err("failed to get axi resource\n");
9270 + goto err_axi;
9271 + }
9272 +
9273 + pfe->hif_irq = platform_get_irq(pdev, 0);
9274 + if (pfe->hif_irq < 0) {
9275 + pr_err("platform_get_irq for hif failed\n");
9276 + rc = pfe->hif_irq;
9277 + goto err_hif_irq;
9278 + }
9279 +
9280 + pfe->wol_irq = platform_get_irq(pdev, 2);
9281 + if (pfe->wol_irq < 0) {
9282 + pr_err("platform_get_irq for WoL failed\n");
9283 + rc = pfe->wol_irq;
9284 + goto err_hif_irq;
9285 + }
9286 +
9287 + /* Read interface count */
9288 + prop = of_get_property(np, "fsl,pfe-num-interfaces", &size);
9289 + if (!prop) {
9290 + pr_err("Failed to read number of interfaces\n");
9291 + rc = -ENXIO;
9292 + goto err_prop;
9293 + }
9294 +
9295 + interface_count = be32_to_cpup(prop);
9296 + if (interface_count <= 0) {
9297 + pr_err("No ethernet interface count : %d\n",
9298 + interface_count);
9299 + rc = -ENXIO;
9300 + goto err_prop;
9301 + }
9302 +
9303 + pfe_platform_data.ls1012a_mdio_pdata[0].phy_mask = 0xffffffff;
9304 +
9305 + for (ii = 0; ii < interface_count; ii++) {
9306 + pfe_get_gemac_if_proprties(np, ii, interface_count,
9307 + &pfe_platform_data);
9308 + }
9309 +
9310 + pfe->dev = &pdev->dev;
9311 +
9312 + pfe->dev->platform_data = &pfe_platform_data;
9313 +
9314 + /* declare WoL capabilities */
9315 + device_init_wakeup(&pdev->dev, true);
9316 +
9317 + /* find the clocks */
9318 + pfe_clk = devm_clk_get(pfe->dev, "pfe");
9319 + if (IS_ERR(pfe_clk))
9320 + return PTR_ERR(pfe_clk);
9321 +
9322 + /* PFE clock is (platform clock / 2) */
9323 + /* save sys_clk value as KHz */
9324 + pfe->ctrl.sys_clk = clk_get_rate(pfe_clk) / (2 * 1000);
9325 +
9326 + rc = pfe_probe(pfe);
9327 + if (rc < 0)
9328 + goto err_probe;
9329 +
9330 + return 0;
9331 +
9332 +err_probe:
9333 +err_prop:
9334 +err_hif_irq:
9335 + iounmap(pfe->cbus_baseaddr);
9336 +
9337 +err_axi:
9338 + iounmap(pfe->ddr_baseaddr);
9339 +
9340 +err_ddr:
9341 + platform_set_drvdata(pdev, NULL);
9342 +
9343 + kfree(pfe);
9344 +
9345 +err_alloc:
9346 + return rc;
9347 +}
9348 +
9349 +/*
9350 + * pfe_platform_remove -
9351 + */
9352 +static int pfe_platform_remove(struct platform_device *pdev)
9353 +{
9354 + struct pfe *pfe = platform_get_drvdata(pdev);
9355 + int rc;
9356 +
9357 + pr_info("%s\n", __func__);
9358 +
9359 + rc = pfe_remove(pfe);
9360 +
9361 + iounmap(pfe->cbus_baseaddr);
9362 + iounmap(pfe->ddr_baseaddr);
9363 +
9364 + platform_set_drvdata(pdev, NULL);
9365 +
9366 + kfree(pfe);
9367 +
9368 + return rc;
9369 +}
9370 +
9371 +#ifdef CONFIG_PM
9372 +#ifdef CONFIG_PM_SLEEP
9373 +int pfe_platform_suspend(struct device *dev)
9374 +{
9375 + struct pfe *pfe = platform_get_drvdata(to_platform_device(dev));
9376 + struct net_device *netdev;
9377 + int i;
9378 +
9379 + pfe->wake = 0;
9380 +
9381 + for (i = 0; i < (NUM_GEMAC_SUPPORT); i++) {
9382 + netdev = pfe->eth.eth_priv[i]->ndev;
9383 +
9384 + netif_device_detach(netdev);
9385 +
9386 + if (netif_running(netdev))
9387 + if (pfe_eth_suspend(netdev))
9388 + pfe->wake = 1;
9389 + }
9390 +
9391 + /* Shutdown PFE only if we're not waking up the system */
9392 + if (!pfe->wake) {
9393 +#if defined(LS1012A_PFE_RESET_WA)
9394 + pfe_hif_rx_idle(&pfe->hif);
9395 +#endif
9396 + pfe_ctrl_suspend(&pfe->ctrl);
9397 + pfe_firmware_exit(pfe);
9398 +
9399 + pfe_hif_exit(pfe);
9400 + pfe_hif_lib_exit(pfe);
9401 +
9402 + pfe_hw_exit(pfe);
9403 + }
9404 +
9405 + return 0;
9406 +}
9407 +
9408 +static int pfe_platform_resume(struct device *dev)
9409 +{
9410 + struct pfe *pfe = platform_get_drvdata(to_platform_device(dev));
9411 + struct net_device *netdev;
9412 + int i;
9413 +
9414 + if (!pfe->wake) {
9415 + pfe_hw_init(pfe, 1);
9416 + pfe_hif_lib_init(pfe);
9417 + pfe_hif_init(pfe);
9418 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
9419 + util_enable();
9420 +#endif
9421 + tmu_enable(0xf);
9422 + class_enable();
9423 + pfe_ctrl_resume(&pfe->ctrl);
9424 + }
9425 +
9426 + for (i = 0; i < (NUM_GEMAC_SUPPORT); i++) {
9427 + netdev = pfe->eth.eth_priv[i]->ndev;
9428 +
9429 + if (pfe->eth.eth_priv[i]->mii_bus)
9430 + pfe_eth_mdio_reset(pfe->eth.eth_priv[i]->mii_bus);
9431 +
9432 + if (netif_running(netdev))
9433 + pfe_eth_resume(netdev);
9434 +
9435 + netif_device_attach(netdev);
9436 + }
9437 + return 0;
9438 +}
9439 +#else
9440 +#define pfe_platform_suspend NULL
9441 +#define pfe_platform_resume NULL
9442 +#endif
9443 +
9444 +static const struct dev_pm_ops pfe_platform_pm_ops = {
9445 + SET_SYSTEM_SLEEP_PM_OPS(pfe_platform_suspend, pfe_platform_resume)
9446 +};
9447 +#endif
9448 +
9449 +static const struct of_device_id pfe_match[] = {
9450 + {
9451 + .compatible = "fsl,pfe",
9452 + },
9453 + {},
9454 +};
9455 +MODULE_DEVICE_TABLE(of, pfe_match);
9456 +
9457 +static struct platform_driver pfe_platform_driver = {
9458 + .probe = pfe_platform_probe,
9459 + .remove = pfe_platform_remove,
9460 + .driver = {
9461 + .name = "pfe",
9462 + .of_match_table = pfe_match,
9463 +#ifdef CONFIG_PM
9464 + .pm = &pfe_platform_pm_ops,
9465 +#endif
9466 + },
9467 +};
9468 +
9469 +module_platform_driver(pfe_platform_driver);
9470 +MODULE_LICENSE("GPL");
9471 +MODULE_DESCRIPTION("PFE Ethernet driver");
9472 +MODULE_AUTHOR("NXP DNCPE");
9473 --- /dev/null
9474 +++ b/drivers/staging/fsl_ppfe/pfe_mod.c
9475 @@ -0,0 +1,141 @@
9476 +/*
9477 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
9478 + * Copyright 2017 NXP
9479 + *
9480 + * This program is free software; you can redistribute it and/or modify
9481 + * it under the terms of the GNU General Public License as published by
9482 + * the Free Software Foundation; either version 2 of the License, or
9483 + * (at your option) any later version.
9484 + *
9485 + * This program is distributed in the hope that it will be useful,
9486 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
9487 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9488 + * GNU General Public License for more details.
9489 + *
9490 + * You should have received a copy of the GNU General Public License
9491 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
9492 + */
9493 +
9494 +#include <linux/dma-mapping.h>
9495 +#include "pfe_mod.h"
9496 +
9497 +struct pfe *pfe;
9498 +
9499 +/*
9500 + * pfe_probe -
9501 + */
9502 +int pfe_probe(struct pfe *pfe)
9503 +{
9504 + int rc;
9505 +
9506 + if (pfe->ddr_size < DDR_MAX_SIZE) {
9507 + pr_err("%s: required DDR memory (%x) above platform ddr memory (%x)\n",
9508 + __func__, (unsigned int)DDR_MAX_SIZE, pfe->ddr_size);
9509 + rc = -ENOMEM;
9510 + goto err_hw;
9511 + }
9512 +
9513 + if (((int)(pfe->ddr_phys_baseaddr + BMU2_DDR_BASEADDR) &
9514 + (8 * SZ_1M - 1)) != 0) {
9515 + pr_err("%s: BMU2 base address (0x%x) must be aligned on 8MB boundary\n",
9516 + __func__, (int)pfe->ddr_phys_baseaddr +
9517 + BMU2_DDR_BASEADDR);
9518 + rc = -ENOMEM;
9519 + goto err_hw;
9520 + }
9521 +
9522 + pr_info("cbus_baseaddr: %lx, ddr_baseaddr: %lx, ddr_phys_baseaddr: %lx, ddr_size: %x\n",
9523 + (unsigned long)pfe->cbus_baseaddr,
9524 + (unsigned long)pfe->ddr_baseaddr,
9525 + pfe->ddr_phys_baseaddr, pfe->ddr_size);
9526 +
9527 + pfe_lib_init(pfe->cbus_baseaddr, pfe->ddr_baseaddr,
9528 + pfe->ddr_phys_baseaddr, pfe->ddr_size);
9529 +
9530 + rc = pfe_hw_init(pfe, 0);
9531 + if (rc < 0)
9532 + goto err_hw;
9533 +
9534 + rc = pfe_hif_lib_init(pfe);
9535 + if (rc < 0)
9536 + goto err_hif_lib;
9537 +
9538 + rc = pfe_hif_init(pfe);
9539 + if (rc < 0)
9540 + goto err_hif;
9541 +
9542 + rc = pfe_firmware_init(pfe);
9543 + if (rc < 0)
9544 + goto err_firmware;
9545 +
9546 + rc = pfe_ctrl_init(pfe);
9547 + if (rc < 0)
9548 + goto err_ctrl;
9549 +
9550 + rc = pfe_eth_init(pfe);
9551 + if (rc < 0)
9552 + goto err_eth;
9553 +
9554 + rc = pfe_sysfs_init(pfe);
9555 + if (rc < 0)
9556 + goto err_sysfs;
9557 +
9558 + rc = pfe_debugfs_init(pfe);
9559 + if (rc < 0)
9560 + goto err_debugfs;
9561 +
9562 + return 0;
9563 +
9564 +err_debugfs:
9565 + pfe_sysfs_exit(pfe);
9566 +
9567 +err_sysfs:
9568 + pfe_eth_exit(pfe);
9569 +
9570 +err_eth:
9571 + pfe_ctrl_exit(pfe);
9572 +
9573 +err_ctrl:
9574 + pfe_firmware_exit(pfe);
9575 +
9576 +err_firmware:
9577 + pfe_hif_exit(pfe);
9578 +
9579 +err_hif:
9580 + pfe_hif_lib_exit(pfe);
9581 +
9582 +err_hif_lib:
9583 + pfe_hw_exit(pfe);
9584 +
9585 +err_hw:
9586 + return rc;
9587 +}
9588 +
9589 +/*
9590 + * pfe_remove -
9591 + */
9592 +int pfe_remove(struct pfe *pfe)
9593 +{
9594 + pr_info("%s\n", __func__);
9595 +
9596 + pfe_debugfs_exit(pfe);
9597 +
9598 + pfe_sysfs_exit(pfe);
9599 +
9600 + pfe_eth_exit(pfe);
9601 +
9602 + pfe_ctrl_exit(pfe);
9603 +
9604 +#if defined(LS1012A_PFE_RESET_WA)
9605 + pfe_hif_rx_idle(&pfe->hif);
9606 +#endif
9607 + pfe_firmware_exit(pfe);
9608 +
9609 + pfe_hif_exit(pfe);
9610 +
9611 + pfe_hif_lib_exit(pfe);
9612 +
9613 + pfe_hw_exit(pfe);
9614 +
9615 + return 0;
9616 +}
9617 --- /dev/null
9618 +++ b/drivers/staging/fsl_ppfe/pfe_mod.h
9619 @@ -0,0 +1,112 @@
9620 +/*
9621 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
9622 + * Copyright 2017 NXP
9623 + *
9624 + * This program is free software; you can redistribute it and/or modify
9625 + * it under the terms of the GNU General Public License as published by
9626 + * the Free Software Foundation; either version 2 of the License, or
9627 + * (at your option) any later version.
9628 + *
9629 + * This program is distributed in the hope that it will be useful,
9630 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
9631 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9632 + * GNU General Public License for more details.
9633 + *
9634 + * You should have received a copy of the GNU General Public License
9635 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
9636 + */
9637 +
9638 +#ifndef _PFE_MOD_H_
9639 +#define _PFE_MOD_H_
9640 +
9641 +#include <linux/device.h>
9642 +#include <linux/elf.h>
9643 +
9644 +struct pfe;
9645 +
9646 +#include "pfe_hw.h"
9647 +#include "pfe_firmware.h"
9648 +#include "pfe_ctrl.h"
9649 +#include "pfe_hif.h"
9650 +#include "pfe_hif_lib.h"
9651 +#include "pfe_eth.h"
9652 +#include "pfe_sysfs.h"
9653 +#include "pfe_perfmon.h"
9654 +#include "pfe_debugfs.h"
9655 +
9656 +#define PHYID_MAX_VAL 32
9657 +
9658 +struct pfe_tmu_credit {
9659 + /* Number of allowed TX packet in-flight, matches TMU queue size */
9660 + unsigned int tx_credit[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
9661 + unsigned int tx_credit_max[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
9662 + unsigned int tx_packets[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
9663 +};
9664 +
9665 +struct pfe {
9666 + struct regmap *scfg;
9667 + unsigned long ddr_phys_baseaddr;
9668 + void *ddr_baseaddr;
9669 + unsigned int ddr_size;
9670 + void *cbus_baseaddr;
9671 + void *apb_baseaddr;
9672 + unsigned long iram_phys_baseaddr;
9673 + void *iram_baseaddr;
9674 + unsigned long ipsec_phys_baseaddr;
9675 + void *ipsec_baseaddr;
9676 + int hif_irq;
9677 + int wol_irq;
9678 + int hif_client_irq;
9679 + struct device *dev;
9680 + struct dentry *dentry;
9681 + struct pfe_ctrl ctrl;
9682 + struct pfe_hif hif;
9683 + struct pfe_eth eth;
9684 + struct hif_client_s *hif_client[HIF_CLIENTS_MAX];
9685 +#if defined(CFG_DIAGS)
9686 + struct pfe_diags diags;
9687 +#endif
9688 + struct pfe_tmu_credit tmu_credit;
9689 + struct pfe_cpumon cpumon;
9690 + struct pfe_memmon memmon;
9691 + int wake;
9692 + int mdio_muxval[PHYID_MAX_VAL];
9693 + struct clk *hfe_clock;
9694 +};
9695 +
9696 +extern struct pfe *pfe;
9697 +
9698 +int pfe_probe(struct pfe *pfe);
9699 +int pfe_remove(struct pfe *pfe);
9700 +
9701 +/* DDR Mapping in reserved memory*/
9702 +#define ROUTE_TABLE_BASEADDR 0
9703 +#define ROUTE_TABLE_HASH_BITS 15 /* 32K entries */
9704 +#define ROUTE_TABLE_SIZE ((1 << ROUTE_TABLE_HASH_BITS) \
9705 + * CLASS_ROUTE_SIZE)
9706 +#define BMU2_DDR_BASEADDR (ROUTE_TABLE_BASEADDR + ROUTE_TABLE_SIZE)
9707 +#define BMU2_BUF_COUNT (4096 - 256)
9708 +/* This is to get a total DDR size of 12MiB */
9709 +#define BMU2_DDR_SIZE (DDR_BUF_SIZE * BMU2_BUF_COUNT)
9710 +#define UTIL_CODE_BASEADDR (BMU2_DDR_BASEADDR + BMU2_DDR_SIZE)
9711 +#define UTIL_CODE_SIZE (128 * SZ_1K)
9712 +#define UTIL_DDR_DATA_BASEADDR (UTIL_CODE_BASEADDR + UTIL_CODE_SIZE)
9713 +#define UTIL_DDR_DATA_SIZE (64 * SZ_1K)
9714 +#define CLASS_DDR_DATA_BASEADDR (UTIL_DDR_DATA_BASEADDR + UTIL_DDR_DATA_SIZE)
9715 +#define CLASS_DDR_DATA_SIZE (32 * SZ_1K)
9716 +#define TMU_DDR_DATA_BASEADDR (CLASS_DDR_DATA_BASEADDR + CLASS_DDR_DATA_SIZE)
9717 +#define TMU_DDR_DATA_SIZE (32 * SZ_1K)
9718 +#define TMU_LLM_BASEADDR (TMU_DDR_DATA_BASEADDR + TMU_DDR_DATA_SIZE)
9719 +#define TMU_LLM_QUEUE_LEN (8 * 512)
9720 +/* Must be power of two and at least 16 * 8 = 128 bytes */
9721 +#define TMU_LLM_SIZE (4 * 16 * TMU_LLM_QUEUE_LEN)
9722 +/* (4 TMU's x 16 queues x queue_len) */
9723 +
9724 +#define DDR_MAX_SIZE (TMU_LLM_BASEADDR + TMU_LLM_SIZE)
9725 +
9726 +/* LMEM Mapping */
9727 +#define BMU1_LMEM_BASEADDR 0
9728 +#define BMU1_BUF_COUNT 256
9729 +#define BMU1_LMEM_SIZE (LMEM_BUF_SIZE * BMU1_BUF_COUNT)
9730 +
9731 +#endif /* _PFE_MOD_H */
9732 --- /dev/null
9733 +++ b/drivers/staging/fsl_ppfe/pfe_perfmon.h
9734 @@ -0,0 +1,38 @@
9735 +/*
9736 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
9737 + * Copyright 2017 NXP
9738 + *
9739 + * This program is free software; you can redistribute it and/or modify
9740 + * it under the terms of the GNU General Public License as published by
9741 + * the Free Software Foundation; either version 2 of the License, or
9742 + * (at your option) any later version.
9743 + *
9744 + * This program is distributed in the hope that it will be useful,
9745 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
9746 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9747 + * GNU General Public License for more details.
9748 + *
9749 + * You should have received a copy of the GNU General Public License
9750 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
9751 + */
9752 +
9753 +#ifndef _PFE_PERFMON_H_
9754 +#define _PFE_PERFMON_H_
9755 +
9756 +#include "pfe/pfe.h"
9757 +
9758 +#define CT_CPUMON_INTERVAL (1 * TIMER_TICKS_PER_SEC)
9759 +
9760 +struct pfe_cpumon {
9761 + u32 cpu_usage_pct[MAX_PE];
9762 + u32 class_usage_pct;
9763 +};
9764 +
9765 +struct pfe_memmon {
9766 + u32 kernel_memory_allocated;
9767 +};
9768 +
9769 +int pfe_perfmon_init(struct pfe *pfe);
9770 +void pfe_perfmon_exit(struct pfe *pfe);
9771 +
9772 +#endif /* _PFE_PERFMON_H_ */
9773 --- /dev/null
9774 +++ b/drivers/staging/fsl_ppfe/pfe_sysfs.c
9775 @@ -0,0 +1,818 @@
9776 +/*
9777 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
9778 + * Copyright 2017 NXP
9779 + *
9780 + * This program is free software; you can redistribute it and/or modify
9781 + * it under the terms of the GNU General Public License as published by
9782 + * the Free Software Foundation; either version 2 of the License, or
9783 + * (at your option) any later version.
9784 + *
9785 + * This program is distributed in the hope that it will be useful,
9786 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
9787 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9788 + * GNU General Public License for more details.
9789 + *
9790 + * You should have received a copy of the GNU General Public License
9791 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
9792 + */
9793 +
9794 +#include <linux/module.h>
9795 +#include <linux/platform_device.h>
9796 +
9797 +#include "pfe_mod.h"
9798 +
9799 +#define PE_EXCEPTION_DUMP_ADDRESS 0x1fa8
9800 +#define NUM_QUEUES 16
9801 +
9802 +static char register_name[20][5] = {
9803 + "EPC", "ECAS", "EID", "ED",
9804 + "r0", "r1", "r2", "r3",
9805 + "r4", "r5", "r6", "r7",
9806 + "r8", "r9", "r10", "r11",
9807 + "r12", "r13", "r14", "r15",
9808 +};
9809 +
9810 +static char exception_name[14][20] = {
9811 + "Reset",
9812 + "HardwareFailure",
9813 + "NMI",
9814 + "InstBreakpoint",
9815 + "DataBreakpoint",
9816 + "Unsupported",
9817 + "PrivilegeViolation",
9818 + "InstBusError",
9819 + "DataBusError",
9820 + "AlignmentError",
9821 + "ArithmeticError",
9822 + "SystemCall",
9823 + "MemoryManagement",
9824 + "Interrupt",
9825 +};
9826 +
9827 +static unsigned long class_do_clear;
9828 +static unsigned long tmu_do_clear;
9829 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
9830 +static unsigned long util_do_clear;
9831 +#endif
9832 +
9833 +static ssize_t display_pe_status(char *buf, int id, u32 dmem_addr, unsigned long
9834 + do_clear)
9835 +{
9836 + ssize_t len = 0;
9837 + u32 val;
9838 + char statebuf[5];
9839 + struct pfe_cpumon *cpumon = &pfe->cpumon;
9840 + u32 debug_indicator;
9841 + u32 debug[20];
9842 +
9843 + *(u32 *)statebuf = pe_dmem_read(id, dmem_addr, 4);
9844 + dmem_addr += 4;
9845 +
9846 + statebuf[4] = '\0';
9847 + len += sprintf(buf + len, "state=%4s ", statebuf);
9848 +
9849 + val = pe_dmem_read(id, dmem_addr, 4);
9850 + dmem_addr += 4;
9851 + len += sprintf(buf + len, "ctr=%08x ", cpu_to_be32(val));
9852 +
9853 + val = pe_dmem_read(id, dmem_addr, 4);
9854 + if (do_clear && val)
9855 + pe_dmem_write(id, 0, dmem_addr, 4);
9856 + dmem_addr += 4;
9857 + len += sprintf(buf + len, "rx=%u ", cpu_to_be32(val));
9858 +
9859 + val = pe_dmem_read(id, dmem_addr, 4);
9860 + if (do_clear && val)
9861 + pe_dmem_write(id, 0, dmem_addr, 4);
9862 + dmem_addr += 4;
9863 + if (id >= TMU0_ID && id <= TMU_MAX_ID)
9864 + len += sprintf(buf + len, "qstatus=%x", cpu_to_be32(val));
9865 + else
9866 + len += sprintf(buf + len, "tx=%u", cpu_to_be32(val));
9867 +
9868 + val = pe_dmem_read(id, dmem_addr, 4);
9869 + if (do_clear && val)
9870 + pe_dmem_write(id, 0, dmem_addr, 4);
9871 + dmem_addr += 4;
9872 + if (val)
9873 + len += sprintf(buf + len, " drop=%u", cpu_to_be32(val));
9874 +
9875 + len += sprintf(buf + len, " load=%d%%", cpumon->cpu_usage_pct[id]);
9876 +
9877 + len += sprintf(buf + len, "\n");
9878 +
9879 + debug_indicator = pe_dmem_read(id, dmem_addr, 4);
9880 + dmem_addr += 4;
9881 + if (!strncmp((char *)&debug_indicator, "DBUG", 4)) {
9882 + int j, last = 0;
9883 +
9884 + for (j = 0; j < 16; j++) {
9885 + debug[j] = pe_dmem_read(id, dmem_addr, 4);
9886 + if (debug[j]) {
9887 + if (do_clear)
9888 + pe_dmem_write(id, 0, dmem_addr, 4);
9889 + last = j + 1;
9890 + }
9891 + dmem_addr += 4;
9892 + }
9893 + for (j = 0; j < last; j++) {
9894 + len += sprintf(buf + len, "%08x%s",
9895 + cpu_to_be32(debug[j]),
9896 + (j & 0x7) == 0x7 || j == last - 1 ? "\n" : " ");
9897 + }
9898 + }
9899 +
9900 + if (!strncmp(statebuf, "DEAD", 4)) {
9901 + u32 i, dump = PE_EXCEPTION_DUMP_ADDRESS;
9902 +
9903 + len += sprintf(buf + len, "Exception details:\n");
9904 + for (i = 0; i < 20; i++) {
9905 + debug[i] = pe_dmem_read(id, dump, 4);
9906 + dump += 4;
9907 + if (i == 2)
9908 + len += sprintf(buf + len, "%4s = %08x (=%s) ",
9909 + register_name[i], cpu_to_be32(debug[i]),
9910 + exception_name[min((u32)
9911 + cpu_to_be32(debug[i]), (u32)13)]);
9912 + else
9913 + len += sprintf(buf + len, "%4s = %08x%s",
9914 + register_name[i], cpu_to_be32(debug[i]),
9915 + (i & 0x3) == 0x3 || i == 19 ? "\n" : " ");
9916 + }
9917 + }
9918 +
9919 + return len;
9920 +}
9921 +
9922 +static ssize_t class_phy_stats(char *buf, int phy)
9923 +{
9924 + ssize_t len = 0;
9925 + int off1 = phy * 0x28;
9926 + int off2 = phy * 0x10;
9927 +
9928 + if (phy == 3)
9929 + off1 = CLASS_PHY4_RX_PKTS - CLASS_PHY1_RX_PKTS;
9930 +
9931 + len += sprintf(buf + len, "phy: %d\n", phy);
9932 + len += sprintf(buf + len,
9933 + " rx: %10u, tx: %10u, intf: %10u, ipv4: %10u, ipv6: %10u\n",
9934 + readl(CLASS_PHY1_RX_PKTS + off1),
9935 + readl(CLASS_PHY1_TX_PKTS + off1),
9936 + readl(CLASS_PHY1_INTF_MATCH_PKTS + off1),
9937 + readl(CLASS_PHY1_V4_PKTS + off1),
9938 + readl(CLASS_PHY1_V6_PKTS + off1));
9939 +
9940 + len += sprintf(buf + len,
9941 + " icmp: %10u, igmp: %10u, tcp: %10u, udp: %10u\n",
9942 + readl(CLASS_PHY1_ICMP_PKTS + off2),
9943 + readl(CLASS_PHY1_IGMP_PKTS + off2),
9944 + readl(CLASS_PHY1_TCP_PKTS + off2),
9945 + readl(CLASS_PHY1_UDP_PKTS + off2));
9946 +
9947 + len += sprintf(buf + len, " err\n");
9948 + len += sprintf(buf + len,
9949 + " lp: %10u, intf: %10u, l3: %10u, chcksum: %10u, ttl: %10u\n",
9950 + readl(CLASS_PHY1_LP_FAIL_PKTS + off1),
9951 + readl(CLASS_PHY1_INTF_FAIL_PKTS + off1),
9952 + readl(CLASS_PHY1_L3_FAIL_PKTS + off1),
9953 + readl(CLASS_PHY1_CHKSUM_ERR_PKTS + off1),
9954 + readl(CLASS_PHY1_TTL_ERR_PKTS + off1));
9955 +
9956 + return len;
9957 +}
9958 +
9959 +/* qm_read_drop_stat
9960 + * This function is used to read the drop statistics from the TMU
9961 + * hw drop counter. Since the hw counter is always cleared afer
9962 + * reading, this function maintains the previous drop count, and
9963 + * adds the new value to it. That value can be retrieved by
9964 + * passing a pointer to it with the total_drops arg.
9965 + *
9966 + * @param tmu TMU number (0 - 3)
9967 + * @param queue queue number (0 - 15)
9968 + * @param total_drops pointer to location to store total drops (or NULL)
9969 + * @param do_reset if TRUE, clear total drops after updating
9970 + */
9971 +u32 qm_read_drop_stat(u32 tmu, u32 queue, u32 *total_drops, int do_reset)
9972 +{
9973 + static u32 qtotal[TMU_MAX_ID + 1][NUM_QUEUES];
9974 + u32 val;
9975 +
9976 + writel((tmu << 8) | queue, TMU_TEQ_CTRL);
9977 + writel((tmu << 8) | queue, TMU_LLM_CTRL);
9978 + val = readl(TMU_TEQ_DROP_STAT);
9979 + qtotal[tmu][queue] += val;
9980 + if (total_drops)
9981 + *total_drops = qtotal[tmu][queue];
9982 + if (do_reset)
9983 + qtotal[tmu][queue] = 0;
9984 + return val;
9985 +}
9986 +
9987 +static ssize_t tmu_queue_stats(char *buf, int tmu, int queue)
9988 +{
9989 + ssize_t len = 0;
9990 + u32 drops;
9991 +
9992 + len += sprintf(buf + len, "%d-%02d, ", tmu, queue);
9993 +
9994 + drops = qm_read_drop_stat(tmu, queue, NULL, 0);
9995 +
9996 + /* Select queue */
9997 + writel((tmu << 8) | queue, TMU_TEQ_CTRL);
9998 + writel((tmu << 8) | queue, TMU_LLM_CTRL);
9999 +
10000 + len += sprintf(buf + len,
10001 + "(teq) drop: %10u, tx: %10u (llm) head: %08x, tail: %08x, drop: %10u\n",
10002 + drops, readl(TMU_TEQ_TRANS_STAT),
10003 + readl(TMU_LLM_QUE_HEADPTR), readl(TMU_LLM_QUE_TAILPTR),
10004 + readl(TMU_LLM_QUE_DROPCNT));
10005 +
10006 + return len;
10007 +}
10008 +
10009 +static ssize_t tmu_queues(char *buf, int tmu)
10010 +{
10011 + ssize_t len = 0;
10012 + int queue;
10013 +
10014 + for (queue = 0; queue < 16; queue++)
10015 + len += tmu_queue_stats(buf + len, tmu, queue);
10016 +
10017 + return len;
10018 +}
10019 +
10020 +static ssize_t block_version(char *buf, void *addr)
10021 +{
10022 + ssize_t len = 0;
10023 + u32 val;
10024 +
10025 + val = readl(addr);
10026 + len += sprintf(buf + len, "revision: %x, version: %x, id: %x\n",
10027 + (val >> 24) & 0xff, (val >> 16) & 0xff, val & 0xffff);
10028 +
10029 + return len;
10030 +}
10031 +
10032 +static ssize_t bmu(char *buf, int id, void *base)
10033 +{
10034 + ssize_t len = 0;
10035 +
10036 + len += sprintf(buf + len, "%s: %d\n ", __func__, id);
10037 +
10038 + len += block_version(buf + len, base + BMU_VERSION);
10039 +
10040 + len += sprintf(buf + len, " buf size: %x\n", (1 << readl(base +
10041 + BMU_BUF_SIZE)));
10042 + len += sprintf(buf + len, " buf count: %x\n", readl(base +
10043 + BMU_BUF_CNT));
10044 + len += sprintf(buf + len, " buf rem: %x\n", readl(base +
10045 + BMU_REM_BUF_CNT));
10046 + len += sprintf(buf + len, " buf curr: %x\n", readl(base +
10047 + BMU_CURR_BUF_CNT));
10048 + len += sprintf(buf + len, " free err: %x\n", readl(base +
10049 + BMU_FREE_ERR_ADDR));
10050 +
10051 + return len;
10052 +}
10053 +
10054 +static ssize_t gpi(char *buf, int id, void *base)
10055 +{
10056 + ssize_t len = 0;
10057 + u32 val;
10058 +
10059 + len += sprintf(buf + len, "%s%d:\n ", __func__, id);
10060 + len += block_version(buf + len, base + GPI_VERSION);
10061 +
10062 + len += sprintf(buf + len, " tx under stick: %x\n", readl(base +
10063 + GPI_FIFO_STATUS));
10064 + val = readl(base + GPI_FIFO_DEBUG);
10065 + len += sprintf(buf + len, " tx pkts: %x\n", (val >> 23) &
10066 + 0x3f);
10067 + len += sprintf(buf + len, " rx pkts: %x\n", (val >> 18) &
10068 + 0x3f);
10069 + len += sprintf(buf + len, " tx bytes: %x\n", (val >> 9) &
10070 + 0x1ff);
10071 + len += sprintf(buf + len, " rx bytes: %x\n", (val >> 0) &
10072 + 0x1ff);
10073 + len += sprintf(buf + len, " overrun: %x\n", readl(base +
10074 + GPI_OVERRUN_DROPCNT));
10075 +
10076 + return len;
10077 +}
10078 +
10079 +static ssize_t pfe_set_class(struct device *dev, struct device_attribute *attr,
10080 + const char *buf, size_t count)
10081 +{
10082 + class_do_clear = kstrtoul(buf, 0, 0);
10083 + return count;
10084 +}
10085 +
10086 +static ssize_t pfe_show_class(struct device *dev, struct device_attribute *attr,
10087 + char *buf)
10088 +{
10089 + ssize_t len = 0;
10090 + int id;
10091 + u32 val;
10092 + struct pfe_cpumon *cpumon = &pfe->cpumon;
10093 +
10094 + len += block_version(buf + len, CLASS_VERSION);
10095 +
10096 + for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
10097 + len += sprintf(buf + len, "%d: ", id - CLASS0_ID);
10098 +
10099 + val = readl(CLASS_PE0_DEBUG + id * 4);
10100 + len += sprintf(buf + len, "pc=1%04x ", val & 0xffff);
10101 +
10102 + len += display_pe_status(buf + len, id, CLASS_DM_PESTATUS,
10103 + class_do_clear);
10104 + }
10105 + len += sprintf(buf + len, "aggregate load=%d%%\n\n",
10106 + cpumon->class_usage_pct);
10107 +
10108 + len += sprintf(buf + len, "pe status: 0x%x\n",
10109 + readl(CLASS_PE_STATUS));
10110 + len += sprintf(buf + len, "max buf cnt: 0x%x afull thres: 0x%x\n",
10111 + readl(CLASS_MAX_BUF_CNT), readl(CLASS_AFULL_THRES));
10112 + len += sprintf(buf + len, "tsq max cnt: 0x%x tsq fifo thres: 0x%x\n",
10113 + readl(CLASS_TSQ_MAX_CNT), readl(CLASS_TSQ_FIFO_THRES));
10114 + len += sprintf(buf + len, "state: 0x%x\n", readl(CLASS_STATE));
10115 +
10116 + len += class_phy_stats(buf + len, 0);
10117 + len += class_phy_stats(buf + len, 1);
10118 + len += class_phy_stats(buf + len, 2);
10119 + len += class_phy_stats(buf + len, 3);
10120 +
10121 + return len;
10122 +}
10123 +
10124 +static ssize_t pfe_set_tmu(struct device *dev, struct device_attribute *attr,
10125 + const char *buf, size_t count)
10126 +{
10127 + tmu_do_clear = kstrtoul(buf, 0, 0);
10128 + return count;
10129 +}
10130 +
10131 +static ssize_t pfe_show_tmu(struct device *dev, struct device_attribute *attr,
10132 + char *buf)
10133 +{
10134 + ssize_t len = 0;
10135 + int id;
10136 + u32 val;
10137 +
10138 + len += block_version(buf + len, TMU_VERSION);
10139 +
10140 + for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
10141 + if (id == TMU2_ID)
10142 + continue;
10143 + len += sprintf(buf + len, "%d: ", id - TMU0_ID);
10144 +
10145 + len += display_pe_status(buf + len, id, TMU_DM_PESTATUS,
10146 + tmu_do_clear);
10147 + }
10148 +
10149 + len += sprintf(buf + len, "pe status: %x\n", readl(TMU_PE_STATUS));
10150 + len += sprintf(buf + len, "inq fifo cnt: %x\n",
10151 + readl(TMU_PHY_INQ_FIFO_CNT));
10152 + val = readl(TMU_INQ_STAT);
10153 + len += sprintf(buf + len, "inq wr ptr: %x\n", val & 0x3ff);
10154 + len += sprintf(buf + len, "inq rd ptr: %x\n", val >> 10);
10155 +
10156 + return len;
10157 +}
10158 +
10159 +static unsigned long drops_do_clear;
10160 +static u32 class_drop_counter[CLASS_NUM_DROP_COUNTERS];
10161 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10162 +static u32 util_drop_counter[UTIL_NUM_DROP_COUNTERS];
10163 +#endif
10164 +
10165 +char *class_drop_description[CLASS_NUM_DROP_COUNTERS] = {
10166 + "ICC",
10167 + "Host Pkt Error",
10168 + "Rx Error",
10169 + "IPsec Outbound",
10170 + "IPsec Inbound",
10171 + "EXPT IPsec Error",
10172 + "Reassembly",
10173 + "Fragmenter",
10174 + "NAT-T",
10175 + "Socket",
10176 + "Multicast",
10177 + "NAT-PT",
10178 + "Tx Disabled",
10179 +};
10180 +
10181 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10182 +char *util_drop_description[UTIL_NUM_DROP_COUNTERS] = {
10183 + "IPsec Outbound",
10184 + "IPsec Inbound",
10185 + "IPsec Rate Limiter",
10186 + "Fragmenter",
10187 + "Socket",
10188 + "Tx Disabled",
10189 + "Rx Error",
10190 +};
10191 +#endif
10192 +
10193 +static ssize_t pfe_set_drops(struct device *dev, struct device_attribute *attr,
10194 + const char *buf, size_t count)
10195 +{
10196 + drops_do_clear = kstrtoul(buf, 0, 0);
10197 + return count;
10198 +}
10199 +
10200 +static u32 tmu_drops[4][16];
10201 +static ssize_t pfe_show_drops(struct device *dev, struct device_attribute *attr,
10202 + char *buf)
10203 +{
10204 + ssize_t len = 0;
10205 + int id, dropnum;
10206 + int tmu, queue;
10207 + u32 val;
10208 + u32 dmem_addr;
10209 + int num_class_drops = 0, num_tmu_drops = 0, num_util_drops = 0;
10210 + struct pfe_ctrl *ctrl = &pfe->ctrl;
10211 +
10212 + memset(class_drop_counter, 0, sizeof(class_drop_counter));
10213 + for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
10214 + if (drops_do_clear)
10215 + pe_sync_stop(ctrl, (1 << id));
10216 + for (dropnum = 0; dropnum < CLASS_NUM_DROP_COUNTERS;
10217 + dropnum++) {
10218 + dmem_addr = CLASS_DM_DROP_CNTR;
10219 + val = be32_to_cpu(pe_dmem_read(id, dmem_addr, 4));
10220 + class_drop_counter[dropnum] += val;
10221 + num_class_drops += val;
10222 + if (drops_do_clear)
10223 + pe_dmem_write(id, 0, dmem_addr, 4);
10224 + }
10225 + if (drops_do_clear)
10226 + pe_start(ctrl, (1 << id));
10227 + }
10228 +
10229 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10230 + if (drops_do_clear)
10231 + pe_sync_stop(ctrl, (1 << UTIL_ID));
10232 + for (dropnum = 0; dropnum < UTIL_NUM_DROP_COUNTERS; dropnum++) {
10233 + dmem_addr = UTIL_DM_DROP_CNTR;
10234 + val = be32_to_cpu(pe_dmem_read(UTIL_ID, dmem_addr, 4));
10235 + util_drop_counter[dropnum] = val;
10236 + num_util_drops += val;
10237 + if (drops_do_clear)
10238 + pe_dmem_write(UTIL_ID, 0, dmem_addr, 4);
10239 + }
10240 + if (drops_do_clear)
10241 + pe_start(ctrl, (1 << UTIL_ID));
10242 +#endif
10243 + for (tmu = 0; tmu < 4; tmu++) {
10244 + for (queue = 0; queue < 16; queue++) {
10245 + qm_read_drop_stat(tmu, queue, &tmu_drops[tmu][queue],
10246 + drops_do_clear);
10247 + num_tmu_drops += tmu_drops[tmu][queue];
10248 + }
10249 + }
10250 +
10251 + if (num_class_drops == 0 && num_util_drops == 0 && num_tmu_drops == 0)
10252 + len += sprintf(buf + len, "No PE drops\n\n");
10253 +
10254 + if (num_class_drops > 0) {
10255 + len += sprintf(buf + len, "Class PE drops --\n");
10256 + for (dropnum = 0; dropnum < CLASS_NUM_DROP_COUNTERS;
10257 + dropnum++) {
10258 + if (class_drop_counter[dropnum] > 0)
10259 + len += sprintf(buf + len, " %s: %d\n",
10260 + class_drop_description[dropnum],
10261 + class_drop_counter[dropnum]);
10262 + }
10263 + len += sprintf(buf + len, "\n");
10264 + }
10265 +
10266 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10267 + if (num_util_drops > 0) {
10268 + len += sprintf(buf + len, "Util PE drops --\n");
10269 + for (dropnum = 0; dropnum < UTIL_NUM_DROP_COUNTERS; dropnum++) {
10270 + if (util_drop_counter[dropnum] > 0)
10271 + len += sprintf(buf + len, " %s: %d\n",
10272 + util_drop_description[dropnum],
10273 + util_drop_counter[dropnum]);
10274 + }
10275 + len += sprintf(buf + len, "\n");
10276 + }
10277 +#endif
10278 + if (num_tmu_drops > 0) {
10279 + len += sprintf(buf + len, "TMU drops --\n");
10280 + for (tmu = 0; tmu < 4; tmu++) {
10281 + for (queue = 0; queue < 16; queue++) {
10282 + if (tmu_drops[tmu][queue] > 0)
10283 + len += sprintf(buf + len,
10284 + " TMU%d-Q%d: %d\n"
10285 + , tmu, queue, tmu_drops[tmu][queue]);
10286 + }
10287 + }
10288 + len += sprintf(buf + len, "\n");
10289 + }
10290 +
10291 + return len;
10292 +}
10293 +
10294 +static ssize_t pfe_show_tmu0_queues(struct device *dev, struct device_attribute
10295 + *attr, char *buf)
10296 +{
10297 + return tmu_queues(buf, 0);
10298 +}
10299 +
10300 +static ssize_t pfe_show_tmu1_queues(struct device *dev, struct device_attribute
10301 + *attr, char *buf)
10302 +{
10303 + return tmu_queues(buf, 1);
10304 +}
10305 +
10306 +static ssize_t pfe_show_tmu2_queues(struct device *dev, struct device_attribute
10307 + *attr, char *buf)
10308 +{
10309 + return tmu_queues(buf, 2);
10310 +}
10311 +
10312 +static ssize_t pfe_show_tmu3_queues(struct device *dev, struct device_attribute
10313 + *attr, char *buf)
10314 +{
10315 + return tmu_queues(buf, 3);
10316 +}
10317 +
10318 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10319 +static ssize_t pfe_set_util(struct device *dev, struct device_attribute *attr,
10320 + const char *buf, size_t count)
10321 +{
10322 + util_do_clear = kstrtoul(buf, NULL, 0);
10323 + return count;
10324 +}
10325 +
10326 +static ssize_t pfe_show_util(struct device *dev, struct device_attribute *attr,
10327 + char *buf)
10328 +{
10329 + ssize_t len = 0;
10330 + struct pfe_ctrl *ctrl = &pfe->ctrl;
10331 +
10332 + len += block_version(buf + len, UTIL_VERSION);
10333 +
10334 + pe_sync_stop(ctrl, (1 << UTIL_ID));
10335 + len += display_pe_status(buf + len, UTIL_ID, UTIL_DM_PESTATUS,
10336 + util_do_clear);
10337 + pe_start(ctrl, (1 << UTIL_ID));
10338 +
10339 + len += sprintf(buf + len, "pe status: %x\n", readl(UTIL_PE_STATUS));
10340 + len += sprintf(buf + len, "max buf cnt: %x\n",
10341 + readl(UTIL_MAX_BUF_CNT));
10342 + len += sprintf(buf + len, "tsq max cnt: %x\n",
10343 + readl(UTIL_TSQ_MAX_CNT));
10344 +
10345 + return len;
10346 +}
10347 +#endif
10348 +
10349 +static ssize_t pfe_show_bmu(struct device *dev, struct device_attribute *attr,
10350 + char *buf)
10351 +{
10352 + ssize_t len = 0;
10353 +
10354 + len += bmu(buf + len, 1, BMU1_BASE_ADDR);
10355 + len += bmu(buf + len, 2, BMU2_BASE_ADDR);
10356 +
10357 + return len;
10358 +}
10359 +
10360 +static ssize_t pfe_show_hif(struct device *dev, struct device_attribute *attr,
10361 + char *buf)
10362 +{
10363 + ssize_t len = 0;
10364 +
10365 + len += sprintf(buf + len, "hif:\n ");
10366 + len += block_version(buf + len, HIF_VERSION);
10367 +
10368 + len += sprintf(buf + len, " tx curr bd: %x\n",
10369 + readl(HIF_TX_CURR_BD_ADDR));
10370 + len += sprintf(buf + len, " tx status: %x\n",
10371 + readl(HIF_TX_STATUS));
10372 + len += sprintf(buf + len, " tx dma status: %x\n",
10373 + readl(HIF_TX_DMA_STATUS));
10374 +
10375 + len += sprintf(buf + len, " rx curr bd: %x\n",
10376 + readl(HIF_RX_CURR_BD_ADDR));
10377 + len += sprintf(buf + len, " rx status: %x\n",
10378 + readl(HIF_RX_STATUS));
10379 + len += sprintf(buf + len, " rx dma status: %x\n",
10380 + readl(HIF_RX_DMA_STATUS));
10381 +
10382 + len += sprintf(buf + len, "hif nocopy:\n ");
10383 + len += block_version(buf + len, HIF_NOCPY_VERSION);
10384 +
10385 + len += sprintf(buf + len, " tx curr bd: %x\n",
10386 + readl(HIF_NOCPY_TX_CURR_BD_ADDR));
10387 + len += sprintf(buf + len, " tx status: %x\n",
10388 + readl(HIF_NOCPY_TX_STATUS));
10389 + len += sprintf(buf + len, " tx dma status: %x\n",
10390 + readl(HIF_NOCPY_TX_DMA_STATUS));
10391 +
10392 + len += sprintf(buf + len, " rx curr bd: %x\n",
10393 + readl(HIF_NOCPY_RX_CURR_BD_ADDR));
10394 + len += sprintf(buf + len, " rx status: %x\n",
10395 + readl(HIF_NOCPY_RX_STATUS));
10396 + len += sprintf(buf + len, " rx dma status: %x\n",
10397 + readl(HIF_NOCPY_RX_DMA_STATUS));
10398 +
10399 + return len;
10400 +}
10401 +
10402 +static ssize_t pfe_show_gpi(struct device *dev, struct device_attribute *attr,
10403 + char *buf)
10404 +{
10405 + ssize_t len = 0;
10406 +
10407 + len += gpi(buf + len, 0, EGPI1_BASE_ADDR);
10408 + len += gpi(buf + len, 1, EGPI2_BASE_ADDR);
10409 + len += gpi(buf + len, 3, HGPI_BASE_ADDR);
10410 +
10411 + return len;
10412 +}
10413 +
10414 +static ssize_t pfe_show_pfemem(struct device *dev, struct device_attribute
10415 + *attr, char *buf)
10416 +{
10417 + ssize_t len = 0;
10418 + struct pfe_memmon *memmon = &pfe->memmon;
10419 +
10420 + len += sprintf(buf + len, "Kernel Memory: %d Bytes (%d KB)\n",
10421 + memmon->kernel_memory_allocated,
10422 + (memmon->kernel_memory_allocated + 1023) / 1024);
10423 +
10424 + return len;
10425 +}
10426 +
10427 +#ifdef HIF_NAPI_STATS
10428 +static ssize_t pfe_show_hif_napi_stats(struct device *dev,
10429 + struct device_attribute *attr,
10430 + char *buf)
10431 +{
10432 + struct platform_device *pdev = to_platform_device(dev);
10433 + struct pfe *pfe = platform_get_drvdata(pdev);
10434 + ssize_t len = 0;
10435 +
10436 + len += sprintf(buf + len, "sched: %u\n",
10437 + pfe->hif.napi_counters[NAPI_SCHED_COUNT]);
10438 + len += sprintf(buf + len, "poll: %u\n",
10439 + pfe->hif.napi_counters[NAPI_POLL_COUNT]);
10440 + len += sprintf(buf + len, "packet: %u\n",
10441 + pfe->hif.napi_counters[NAPI_PACKET_COUNT]);
10442 + len += sprintf(buf + len, "budget: %u\n",
10443 + pfe->hif.napi_counters[NAPI_FULL_BUDGET_COUNT]);
10444 + len += sprintf(buf + len, "desc: %u\n",
10445 + pfe->hif.napi_counters[NAPI_DESC_COUNT]);
10446 + len += sprintf(buf + len, "full: %u\n",
10447 + pfe->hif.napi_counters[NAPI_CLIENT_FULL_COUNT]);
10448 +
10449 + return len;
10450 +}
10451 +
10452 +static ssize_t pfe_set_hif_napi_stats(struct device *dev,
10453 + struct device_attribute *attr,
10454 + const char *buf, size_t count)
10455 +{
10456 + struct platform_device *pdev = to_platform_device(dev);
10457 + struct pfe *pfe = platform_get_drvdata(pdev);
10458 +
10459 + memset(pfe->hif.napi_counters, 0, sizeof(pfe->hif.napi_counters));
10460 +
10461 + return count;
10462 +}
10463 +
10464 +static DEVICE_ATTR(hif_napi_stats, 0644, pfe_show_hif_napi_stats,
10465 + pfe_set_hif_napi_stats);
10466 +#endif
10467 +
10468 +static DEVICE_ATTR(class, 0644, pfe_show_class, pfe_set_class);
10469 +static DEVICE_ATTR(tmu, 0644, pfe_show_tmu, pfe_set_tmu);
10470 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10471 +static DEVICE_ATTR(util, 0644, pfe_show_util, pfe_set_util);
10472 +#endif
10473 +static DEVICE_ATTR(bmu, 0444, pfe_show_bmu, NULL);
10474 +static DEVICE_ATTR(hif, 0444, pfe_show_hif, NULL);
10475 +static DEVICE_ATTR(gpi, 0444, pfe_show_gpi, NULL);
10476 +static DEVICE_ATTR(drops, 0644, pfe_show_drops, pfe_set_drops);
10477 +static DEVICE_ATTR(tmu0_queues, 0444, pfe_show_tmu0_queues, NULL);
10478 +static DEVICE_ATTR(tmu1_queues, 0444, pfe_show_tmu1_queues, NULL);
10479 +static DEVICE_ATTR(tmu2_queues, 0444, pfe_show_tmu2_queues, NULL);
10480 +static DEVICE_ATTR(tmu3_queues, 0444, pfe_show_tmu3_queues, NULL);
10481 +static DEVICE_ATTR(pfemem, 0444, pfe_show_pfemem, NULL);
10482 +
10483 +int pfe_sysfs_init(struct pfe *pfe)
10484 +{
10485 + if (device_create_file(pfe->dev, &dev_attr_class))
10486 + goto err_class;
10487 +
10488 + if (device_create_file(pfe->dev, &dev_attr_tmu))
10489 + goto err_tmu;
10490 +
10491 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10492 + if (device_create_file(pfe->dev, &dev_attr_util))
10493 + goto err_util;
10494 +#endif
10495 +
10496 + if (device_create_file(pfe->dev, &dev_attr_bmu))
10497 + goto err_bmu;
10498 +
10499 + if (device_create_file(pfe->dev, &dev_attr_hif))
10500 + goto err_hif;
10501 +
10502 + if (device_create_file(pfe->dev, &dev_attr_gpi))
10503 + goto err_gpi;
10504 +
10505 + if (device_create_file(pfe->dev, &dev_attr_drops))
10506 + goto err_drops;
10507 +
10508 + if (device_create_file(pfe->dev, &dev_attr_tmu0_queues))
10509 + goto err_tmu0_queues;
10510 +
10511 + if (device_create_file(pfe->dev, &dev_attr_tmu1_queues))
10512 + goto err_tmu1_queues;
10513 +
10514 + if (device_create_file(pfe->dev, &dev_attr_tmu2_queues))
10515 + goto err_tmu2_queues;
10516 +
10517 + if (device_create_file(pfe->dev, &dev_attr_tmu3_queues))
10518 + goto err_tmu3_queues;
10519 +
10520 + if (device_create_file(pfe->dev, &dev_attr_pfemem))
10521 + goto err_pfemem;
10522 +
10523 +#ifdef HIF_NAPI_STATS
10524 + if (device_create_file(pfe->dev, &dev_attr_hif_napi_stats))
10525 + goto err_hif_napi_stats;
10526 +#endif
10527 +
10528 + return 0;
10529 +
10530 +#ifdef HIF_NAPI_STATS
10531 +err_hif_napi_stats:
10532 + device_remove_file(pfe->dev, &dev_attr_pfemem);
10533 +#endif
10534 +
10535 +err_pfemem:
10536 + device_remove_file(pfe->dev, &dev_attr_tmu3_queues);
10537 +
10538 +err_tmu3_queues:
10539 + device_remove_file(pfe->dev, &dev_attr_tmu2_queues);
10540 +
10541 +err_tmu2_queues:
10542 + device_remove_file(pfe->dev, &dev_attr_tmu1_queues);
10543 +
10544 +err_tmu1_queues:
10545 + device_remove_file(pfe->dev, &dev_attr_tmu0_queues);
10546 +
10547 +err_tmu0_queues:
10548 + device_remove_file(pfe->dev, &dev_attr_drops);
10549 +
10550 +err_drops:
10551 + device_remove_file(pfe->dev, &dev_attr_gpi);
10552 +
10553 +err_gpi:
10554 + device_remove_file(pfe->dev, &dev_attr_hif);
10555 +
10556 +err_hif:
10557 + device_remove_file(pfe->dev, &dev_attr_bmu);
10558 +
10559 +err_bmu:
10560 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10561 + device_remove_file(pfe->dev, &dev_attr_util);
10562 +
10563 +err_util:
10564 +#endif
10565 + device_remove_file(pfe->dev, &dev_attr_tmu);
10566 +
10567 +err_tmu:
10568 + device_remove_file(pfe->dev, &dev_attr_class);
10569 +
10570 +err_class:
10571 + return -1;
10572 +}
10573 +
10574 +void pfe_sysfs_exit(struct pfe *pfe)
10575 +{
10576 +#ifdef HIF_NAPI_STATS
10577 + device_remove_file(pfe->dev, &dev_attr_hif_napi_stats);
10578 +#endif
10579 + device_remove_file(pfe->dev, &dev_attr_pfemem);
10580 + device_remove_file(pfe->dev, &dev_attr_tmu3_queues);
10581 + device_remove_file(pfe->dev, &dev_attr_tmu2_queues);
10582 + device_remove_file(pfe->dev, &dev_attr_tmu1_queues);
10583 + device_remove_file(pfe->dev, &dev_attr_tmu0_queues);
10584 + device_remove_file(pfe->dev, &dev_attr_drops);
10585 + device_remove_file(pfe->dev, &dev_attr_gpi);
10586 + device_remove_file(pfe->dev, &dev_attr_hif);
10587 + device_remove_file(pfe->dev, &dev_attr_bmu);
10588 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10589 + device_remove_file(pfe->dev, &dev_attr_util);
10590 +#endif
10591 + device_remove_file(pfe->dev, &dev_attr_tmu);
10592 + device_remove_file(pfe->dev, &dev_attr_class);
10593 +}
10594 --- /dev/null
10595 +++ b/drivers/staging/fsl_ppfe/pfe_sysfs.h
10596 @@ -0,0 +1,29 @@
10597 +/*
10598 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
10599 + * Copyright 2017 NXP
10600 + *
10601 + * This program is free software; you can redistribute it and/or modify
10602 + * it under the terms of the GNU General Public License as published by
10603 + * the Free Software Foundation; either version 2 of the License, or
10604 + * (at your option) any later version.
10605 + *
10606 + * This program is distributed in the hope that it will be useful,
10607 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
10608 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10609 + * GNU General Public License for more details.
10610 + *
10611 + * You should have received a copy of the GNU General Public License
10612 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
10613 + */
10614 +
10615 +#ifndef _PFE_SYSFS_H_
10616 +#define _PFE_SYSFS_H_
10617 +
10618 +#include <linux/proc_fs.h>
10619 +
10620 +u32 qm_read_drop_stat(u32 tmu, u32 queue, u32 *total_drops, int do_reset);
10621 +
10622 +int pfe_sysfs_init(struct pfe *pfe);
10623 +void pfe_sysfs_exit(struct pfe *pfe);
10624 +
10625 +#endif /* _PFE_SYSFS_H_ */