ae64527f3d9faceb64c04f836231f442954f3c94
[openwrt/staging/chunkeey.git] / target / linux / layerscape / patches-4.14 / 710-pfe-eth-support-layerscape.patch
1 From 93febc09be23aa75cbc5bf5e76250c923f4004e5 Mon Sep 17 00:00:00 2001
2 From: Biwen Li <biwen.li@nxp.com>
3 Date: Tue, 30 Oct 2018 18:26:59 +0800
4 Subject: [PATCH 16/40] pfe-eth: support layerscape
5 This is an integrated patch of pfe-eth for layerscape
6
7 Signed-off-by: Akhila Kavi <akhila.kavi@nxp.com>
8 Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
9 Signed-off-by: Anjaneyulu Jagarlmudi <anji.jagarlmudi@nxp.com>
10 Signed-off-by: Archana Madhavan <archana.madhavan@nxp.com>
11 Signed-off-by: Bhaskar Upadhaya <Bhaskar.Upadhaya@nxp.com>
12 Signed-off-by: Calvin Johnson <calvin.johnson@nxp.com>
13 Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
14 Signed-off-by: Guanhua Gao <guanhua.gao@nxp.com>
15 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
16 Signed-off-by: Biwen Li <biwen.li@nxp.com>
17 ---
18 .../devicetree/bindings/net/fsl_ppfe/pfe.txt | 173 ++
19 drivers/staging/fsl_ppfe/Kconfig | 20 +
20 drivers/staging/fsl_ppfe/Makefile | 19 +
21 drivers/staging/fsl_ppfe/TODO | 2 +
22 drivers/staging/fsl_ppfe/include/pfe/cbus.h | 78 +
23 .../staging/fsl_ppfe/include/pfe/cbus/bmu.h | 55 +
24 .../fsl_ppfe/include/pfe/cbus/class_csr.h | 289 ++
25 .../fsl_ppfe/include/pfe/cbus/emac_mtip.h | 242 ++
26 .../staging/fsl_ppfe/include/pfe/cbus/gpi.h | 86 +
27 .../staging/fsl_ppfe/include/pfe/cbus/hif.h | 100 +
28 .../fsl_ppfe/include/pfe/cbus/hif_nocpy.h | 50 +
29 .../fsl_ppfe/include/pfe/cbus/tmu_csr.h | 168 ++
30 .../fsl_ppfe/include/pfe/cbus/util_csr.h | 61 +
31 drivers/staging/fsl_ppfe/include/pfe/pfe.h | 373 +++
32 drivers/staging/fsl_ppfe/pfe_ctrl.c | 238 ++
33 drivers/staging/fsl_ppfe/pfe_ctrl.h | 112 +
34 drivers/staging/fsl_ppfe/pfe_debugfs.c | 111 +
35 drivers/staging/fsl_ppfe/pfe_debugfs.h | 25 +
36 drivers/staging/fsl_ppfe/pfe_eth.c | 2521 +++++++++++++++++
37 drivers/staging/fsl_ppfe/pfe_eth.h | 185 ++
38 drivers/staging/fsl_ppfe/pfe_firmware.c | 314 ++
39 drivers/staging/fsl_ppfe/pfe_firmware.h | 32 +
40 drivers/staging/fsl_ppfe/pfe_hal.c | 1528 ++++++++++
41 drivers/staging/fsl_ppfe/pfe_hif.c | 1072 +++++++
42 drivers/staging/fsl_ppfe/pfe_hif.h | 212 ++
43 drivers/staging/fsl_ppfe/pfe_hif_lib.c | 640 +++++
44 drivers/staging/fsl_ppfe/pfe_hif_lib.h | 241 ++
45 drivers/staging/fsl_ppfe/pfe_hw.c | 176 ++
46 drivers/staging/fsl_ppfe/pfe_hw.h | 27 +
47 .../staging/fsl_ppfe/pfe_ls1012a_platform.c | 385 +++
48 drivers/staging/fsl_ppfe/pfe_mod.c | 156 +
49 drivers/staging/fsl_ppfe/pfe_mod.h | 114 +
50 drivers/staging/fsl_ppfe/pfe_perfmon.h | 38 +
51 drivers/staging/fsl_ppfe/pfe_sysfs.c | 818 ++++++
52 drivers/staging/fsl_ppfe/pfe_sysfs.h | 29 +
53 35 files changed, 10690 insertions(+)
54 create mode 100644 Documentation/devicetree/bindings/net/fsl_ppfe/pfe.txt
55 create mode 100644 drivers/staging/fsl_ppfe/Kconfig
56 create mode 100644 drivers/staging/fsl_ppfe/Makefile
57 create mode 100644 drivers/staging/fsl_ppfe/TODO
58 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus.h
59 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h
60 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/class_csr.h
61 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h
62 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h
63 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h
64 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h
65 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h
66 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/util_csr.h
67 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/pfe.h
68 create mode 100644 drivers/staging/fsl_ppfe/pfe_ctrl.c
69 create mode 100644 drivers/staging/fsl_ppfe/pfe_ctrl.h
70 create mode 100644 drivers/staging/fsl_ppfe/pfe_debugfs.c
71 create mode 100644 drivers/staging/fsl_ppfe/pfe_debugfs.h
72 create mode 100644 drivers/staging/fsl_ppfe/pfe_eth.c
73 create mode 100644 drivers/staging/fsl_ppfe/pfe_eth.h
74 create mode 100644 drivers/staging/fsl_ppfe/pfe_firmware.c
75 create mode 100644 drivers/staging/fsl_ppfe/pfe_firmware.h
76 create mode 100644 drivers/staging/fsl_ppfe/pfe_hal.c
77 create mode 100644 drivers/staging/fsl_ppfe/pfe_hif.c
78 create mode 100644 drivers/staging/fsl_ppfe/pfe_hif.h
79 create mode 100644 drivers/staging/fsl_ppfe/pfe_hif_lib.c
80 create mode 100644 drivers/staging/fsl_ppfe/pfe_hif_lib.h
81 create mode 100644 drivers/staging/fsl_ppfe/pfe_hw.c
82 create mode 100644 drivers/staging/fsl_ppfe/pfe_hw.h
83 create mode 100644 drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c
84 create mode 100644 drivers/staging/fsl_ppfe/pfe_mod.c
85 create mode 100644 drivers/staging/fsl_ppfe/pfe_mod.h
86 create mode 100644 drivers/staging/fsl_ppfe/pfe_perfmon.h
87 create mode 100644 drivers/staging/fsl_ppfe/pfe_sysfs.c
88 create mode 100644 drivers/staging/fsl_ppfe/pfe_sysfs.h
89
90 --- /dev/null
91 +++ b/Documentation/devicetree/bindings/net/fsl_ppfe/pfe.txt
92 @@ -0,0 +1,173 @@
93 +=============================================================================
94 +NXP Programmable Packet Forwarding Engine Device Bindings
95 +
96 +CONTENTS
97 + - PFE Node
98 + - Ethernet Node
99 +
100 +=============================================================================
101 +PFE Node
102 +
103 +DESCRIPTION
104 +
105 +PFE Node has all the properties associated with Packet Forwarding Engine block.
106 +
107 +PROPERTIES
108 +
109 +- compatible
110 + Usage: required
111 + Value type: <stringlist>
112 + Definition: Must include "fsl,pfe"
113 +
114 +- reg
115 + Usage: required
116 + Value type: <prop-encoded-array>
117 + Definition: A standard property.
118 + Specifies the offset of the following registers:
119 + - PFE configuration registers
120 + - DDR memory used by PFE
121 +
122 +- fsl,pfe-num-interfaces
123 + Usage: required
124 + Value type: <u32>
125 + Definition: Must be present. Value can be either one or two.
126 +
127 +- interrupts
128 + Usage: required
129 + Value type: <prop-encoded-array>
130 + Definition: Three interrupts are specified in this property.
131 + - HIF interrupt
132 + - HIF NO COPY interrupt
133 + - Wake On LAN interrupt
134 +
135 +- interrupt-names
136 + Usage: required
137 + Value type: <stringlist>
138 + Definition: Following strings are defined for the 3 interrupts.
139 + "pfe_hif" - HIF interrupt
140 + "pfe_hif_nocpy" - HIF NO COPY interrupt
141 + "pfe_wol" - Wake On LAN interrupt
142 +
143 +- memory-region
144 + Usage: required
145 + Value type: <phandle>
146 + Definition: phandle to a node describing reserved memory used by pfe.
147 + Refer:- Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
148 +
149 +- fsl,pfe-scfg
150 + Usage: required
151 + Value type: <phandle>
152 + Definition: phandle for scfg.
153 +
154 +- fsl,rcpm-wakeup
155 + Usage: required
156 + Value type: <phandle>
157 + Definition: phandle for rcpm.
158 +
159 +- clocks
160 + Usage: required
161 + Value type: <phandle>
162 + Definition: phandle for clockgen.
163 +
164 +- clock-names
165 + Usage: required
166 + Value type: <string>
167 + Definition: phandle for clock name.
168 +
169 +EXAMPLE
170 +
171 +pfe: pfe@04000000 {
172 + compatible = "fsl,pfe";
173 + reg = <0x0 0x04000000 0x0 0xc00000>, /* AXI 16M */
174 + <0x0 0x83400000 0x0 0xc00000>; /* PFE DDR 12M */
175 + reg-names = "pfe", "pfe-ddr";
176 + fsl,pfe-num-interfaces = <0x2>;
177 + interrupts = <0 172 0x4>, /* HIF interrupt */
178 + <0 173 0x4>, /*HIF_NOCPY interrupt */
179 + <0 174 0x4>; /* WoL interrupt */
180 + interrupt-names = "pfe_hif", "pfe_hif_nocpy", "pfe_wol";
181 + memory-region = <&pfe_reserved>;
182 + fsl,pfe-scfg = <&scfg 0>;
183 + fsl,rcpm-wakeup = <&rcpm 0xf0000020>;
184 + clocks = <&clockgen 4 0>;
185 + clock-names = "pfe";
186 +
187 + status = "okay";
188 + pfe_mac0: ethernet@0 {
189 + };
190 +
191 + pfe_mac1: ethernet@1 {
192 + };
193 +};
194 +
195 +=============================================================================
196 +Ethernet Node
197 +
198 +DESCRIPTION
199 +
200 +Ethernet Node has all the properties associated with PFE used by platforms to
201 +connect to PHY:
202 +
203 +PROPERTIES
204 +
205 +- compatible
206 + Usage: required
207 + Value type: <stringlist>
208 + Definition: Must include "fsl,pfe-gemac-port"
209 +
210 +- reg
211 + Usage: required
212 + Value type: <prop-encoded-array>
213 + Definition: A standard property.
214 + Specifies the gemacid of the interface.
215 +
216 +- fsl,gemac-bus-id
217 + Usage: required
218 + Value type: <u32>
219 + Definition: Must be present. Value should be the id of the bus
220 + connected to gemac.
221 +
222 +- fsl,gemac-phy-id
223 + Usage: required
224 + Value type: <u32>
225 + Definition: Must be present. Value should be the id of the phy
226 + connected to gemac.
227 +
228 +- fsl,mdio-mux-val
229 + Usage: required
230 + Value type: <u32>
231 + Definition: Must be present. Value can be either 0 or 2 or 3.
232 + This value is used to configure the mux to enable mdio.
233 +
234 +- phy-mode
235 + Usage: required
236 + Value type: <string>
237 + Definition: Must include "sgmii"
238 +
239 +- fsl,pfe-phy-if-flags
240 + Usage: required
241 + Value type: <u32>
242 + Definition: Must be present. Value should be 0 by default.
243 + If there is not phy connected, this need to be 1.
244 +
245 +- mdio
246 + optional subnode that specifies the mdio bus. This has reg
247 + property which is used to enable/disable the mdio bus.
248 +
249 +EXAMPLE
250 +
251 +ethernet@0 {
252 + compatible = "fsl,pfe-gemac-port";
253 + #address-cells = <1>;
254 + #size-cells = <0>;
255 + reg = <0x0>; /* GEM_ID */
256 + fsl,gemac-bus-id = <0x0>; /* BUS_ID */
257 + fsl,gemac-phy-id = <0x2>; /* PHY_ID */
258 + fsl,mdio-mux-val = <0x0>;
259 + phy-mode = "sgmii";
260 + fsl,pfe-phy-if-flags = <0x0>;
261 +
262 + mdio@0 {
263 + reg = <0x1>; /* enabled/disabled */
264 + };
265 +};
266 --- /dev/null
267 +++ b/drivers/staging/fsl_ppfe/Kconfig
268 @@ -0,0 +1,20 @@
269 +#
270 +# Freescale Programmable Packet Forwarding Engine driver
271 +#
272 +config FSL_PPFE
273 + bool "Freescale PPFE Driver"
274 + default n
275 + ---help---
276 + Freescale LS1012A SoC has a Programmable Packet Forwarding Engine.
277 + It provides two high performance ethernet interfaces.
278 + This driver initializes, programs and controls the PPFE.
279 + Use this driver to enable network connectivity on LS1012A platforms.
280 +
281 +if FSL_PPFE
282 +
283 +config FSL_PPFE_UTIL_DISABLED
284 + bool "Disable PPFE UTIL Processor Engine"
285 + ---help---
286 + UTIL PE has to be enabled only if required.
287 +
288 +endif # FSL_PPFE
289 --- /dev/null
290 +++ b/drivers/staging/fsl_ppfe/Makefile
291 @@ -0,0 +1,19 @@
292 +#
293 +# Makefile for Freesecale PPFE driver
294 +#
295 +
296 +ccflags-y += -I$(src)/include -I$(src)
297 +
298 +obj-m += pfe.o
299 +
300 +pfe-y += pfe_mod.o \
301 + pfe_hw.o \
302 + pfe_firmware.o \
303 + pfe_ctrl.o \
304 + pfe_hif.o \
305 + pfe_hif_lib.o\
306 + pfe_eth.o \
307 + pfe_sysfs.o \
308 + pfe_debugfs.o \
309 + pfe_ls1012a_platform.o \
310 + pfe_hal.o
311 --- /dev/null
312 +++ b/drivers/staging/fsl_ppfe/TODO
313 @@ -0,0 +1,2 @@
314 +TODO:
315 + - provide pfe pe monitoring support
316 --- /dev/null
317 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus.h
318 @@ -0,0 +1,78 @@
319 +/*
320 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
321 + * Copyright 2017 NXP
322 + *
323 + * This program is free software; you can redistribute it and/or modify
324 + * it under the terms of the GNU General Public License as published by
325 + * the Free Software Foundation; either version 2 of the License, or
326 + * (at your option) any later version.
327 + *
328 + * This program is distributed in the hope that it will be useful,
329 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
330 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
331 + * GNU General Public License for more details.
332 + *
333 + * You should have received a copy of the GNU General Public License
334 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
335 + */
336 +
337 +#ifndef _CBUS_H_
338 +#define _CBUS_H_
339 +
340 +#define EMAC1_BASE_ADDR (CBUS_BASE_ADDR + 0x200000)
341 +#define EGPI1_BASE_ADDR (CBUS_BASE_ADDR + 0x210000)
342 +#define EMAC2_BASE_ADDR (CBUS_BASE_ADDR + 0x220000)
343 +#define EGPI2_BASE_ADDR (CBUS_BASE_ADDR + 0x230000)
344 +#define BMU1_BASE_ADDR (CBUS_BASE_ADDR + 0x240000)
345 +#define BMU2_BASE_ADDR (CBUS_BASE_ADDR + 0x250000)
346 +#define ARB_BASE_ADDR (CBUS_BASE_ADDR + 0x260000)
347 +#define DDR_CONFIG_BASE_ADDR (CBUS_BASE_ADDR + 0x270000)
348 +#define HIF_BASE_ADDR (CBUS_BASE_ADDR + 0x280000)
349 +#define HGPI_BASE_ADDR (CBUS_BASE_ADDR + 0x290000)
350 +#define LMEM_BASE_ADDR (CBUS_BASE_ADDR + 0x300000)
351 +#define LMEM_SIZE 0x10000
352 +#define LMEM_END (LMEM_BASE_ADDR + LMEM_SIZE)
353 +#define TMU_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x310000)
354 +#define CLASS_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x320000)
355 +#define HIF_NOCPY_BASE_ADDR (CBUS_BASE_ADDR + 0x350000)
356 +#define UTIL_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x360000)
357 +#define CBUS_GPT_BASE_ADDR (CBUS_BASE_ADDR + 0x370000)
358 +
359 +/*
360 + * defgroup XXX_MEM_ACCESS_ADDR PE memory access through CSR
361 + * XXX_MEM_ACCESS_ADDR register bit definitions.
362 + */
363 +#define PE_MEM_ACCESS_WRITE BIT(31) /* Internal Memory Write. */
364 +#define PE_MEM_ACCESS_IMEM BIT(15)
365 +#define PE_MEM_ACCESS_DMEM BIT(16)
366 +
367 +/* Byte Enables of the Internal memory access. These are interpred in BE */
368 +#define PE_MEM_ACCESS_BYTE_ENABLE(offset, size) \
369 + ({ typeof(size) size_ = (size); \
370 + (((BIT(size_) - 1) << (4 - (offset) - (size_))) & 0xf) << 24; })
371 +
372 +#include "cbus/emac_mtip.h"
373 +#include "cbus/gpi.h"
374 +#include "cbus/bmu.h"
375 +#include "cbus/hif.h"
376 +#include "cbus/tmu_csr.h"
377 +#include "cbus/class_csr.h"
378 +#include "cbus/hif_nocpy.h"
379 +#include "cbus/util_csr.h"
380 +
381 +/* PFE cores states */
382 +#define CORE_DISABLE 0x00000000
383 +#define CORE_ENABLE 0x00000001
384 +#define CORE_SW_RESET 0x00000002
385 +
386 +/* LMEM defines */
387 +#define LMEM_HDR_SIZE 0x0010
388 +#define LMEM_BUF_SIZE_LN2 0x7
389 +#define LMEM_BUF_SIZE BIT(LMEM_BUF_SIZE_LN2)
390 +
391 +/* DDR defines */
392 +#define DDR_HDR_SIZE 0x0100
393 +#define DDR_BUF_SIZE_LN2 0xb
394 +#define DDR_BUF_SIZE BIT(DDR_BUF_SIZE_LN2)
395 +
396 +#endif /* _CBUS_H_ */
397 --- /dev/null
398 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h
399 @@ -0,0 +1,55 @@
400 +/*
401 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
402 + * Copyright 2017 NXP
403 + *
404 + * This program is free software; you can redistribute it and/or modify
405 + * it under the terms of the GNU General Public License as published by
406 + * the Free Software Foundation; either version 2 of the License, or
407 + * (at your option) any later version.
408 + *
409 + * This program is distributed in the hope that it will be useful,
410 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
411 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
412 + * GNU General Public License for more details.
413 + *
414 + * You should have received a copy of the GNU General Public License
415 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
416 + */
417 +
418 +#ifndef _BMU_H_
419 +#define _BMU_H_
420 +
421 +#define BMU_VERSION 0x000
422 +#define BMU_CTRL 0x004
423 +#define BMU_UCAST_CONFIG 0x008
424 +#define BMU_UCAST_BASE_ADDR 0x00c
425 +#define BMU_BUF_SIZE 0x010
426 +#define BMU_BUF_CNT 0x014
427 +#define BMU_THRES 0x018
428 +#define BMU_INT_SRC 0x020
429 +#define BMU_INT_ENABLE 0x024
430 +#define BMU_ALLOC_CTRL 0x030
431 +#define BMU_FREE_CTRL 0x034
432 +#define BMU_FREE_ERR_ADDR 0x038
433 +#define BMU_CURR_BUF_CNT 0x03c
434 +#define BMU_MCAST_CNT 0x040
435 +#define BMU_MCAST_ALLOC_CTRL 0x044
436 +#define BMU_REM_BUF_CNT 0x048
437 +#define BMU_LOW_WATERMARK 0x050
438 +#define BMU_HIGH_WATERMARK 0x054
439 +#define BMU_INT_MEM_ACCESS 0x100
440 +
441 +struct BMU_CFG {
442 + unsigned long baseaddr;
443 + u32 count;
444 + u32 size;
445 + u32 low_watermark;
446 + u32 high_watermark;
447 +};
448 +
449 +#define BMU1_BUF_SIZE LMEM_BUF_SIZE_LN2
450 +#define BMU2_BUF_SIZE DDR_BUF_SIZE_LN2
451 +
452 +#define BMU2_MCAST_ALLOC_CTRL (BMU2_BASE_ADDR + BMU_MCAST_ALLOC_CTRL)
453 +
454 +#endif /* _BMU_H_ */
455 --- /dev/null
456 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/class_csr.h
457 @@ -0,0 +1,289 @@
458 +/*
459 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
460 + * Copyright 2017 NXP
461 + *
462 + * This program is free software; you can redistribute it and/or modify
463 + * it under the terms of the GNU General Public License as published by
464 + * the Free Software Foundation; either version 2 of the License, or
465 + * (at your option) any later version.
466 + *
467 + * This program is distributed in the hope that it will be useful,
468 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
469 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
470 + * GNU General Public License for more details.
471 + *
472 + * You should have received a copy of the GNU General Public License
473 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
474 + */
475 +
476 +#ifndef _CLASS_CSR_H_
477 +#define _CLASS_CSR_H_
478 +
479 +/* @file class_csr.h.
480 + * class_csr - block containing all the classifier control and status register.
481 + * Mapped on CBUS and accessible from all PE's and ARM.
482 + */
483 +#define CLASS_VERSION (CLASS_CSR_BASE_ADDR + 0x000)
484 +#define CLASS_TX_CTRL (CLASS_CSR_BASE_ADDR + 0x004)
485 +#define CLASS_INQ_PKTPTR (CLASS_CSR_BASE_ADDR + 0x010)
486 +
487 +/* (ddr_hdr_size[24:16], lmem_hdr_size[5:0]) */
488 +#define CLASS_HDR_SIZE (CLASS_CSR_BASE_ADDR + 0x014)
489 +
490 +/* LMEM header size for the Classifier block.\ Data in the LMEM
491 + * is written from this offset.
492 + */
493 +#define CLASS_HDR_SIZE_LMEM(off) ((off) & 0x3f)
494 +
495 +/* DDR header size for the Classifier block.\ Data in the DDR
496 + * is written from this offset.
497 + */
498 +#define CLASS_HDR_SIZE_DDR(off) (((off) & 0x1ff) << 16)
499 +
500 +#define CLASS_PE0_QB_DM_ADDR0 (CLASS_CSR_BASE_ADDR + 0x020)
501 +
502 +/* DMEM address of first [15:0] and second [31:16] buffers on QB side. */
503 +#define CLASS_PE0_QB_DM_ADDR1 (CLASS_CSR_BASE_ADDR + 0x024)
504 +
505 +/* DMEM address of third [15:0] and fourth [31:16] buffers on QB side. */
506 +#define CLASS_PE0_RO_DM_ADDR0 (CLASS_CSR_BASE_ADDR + 0x060)
507 +
508 +/* DMEM address of first [15:0] and second [31:16] buffers on RO side. */
509 +#define CLASS_PE0_RO_DM_ADDR1 (CLASS_CSR_BASE_ADDR + 0x064)
510 +
511 +/* DMEM address of third [15:0] and fourth [31:16] buffers on RO side. */
512 +
513 +/* @name Class PE memory access. Allows external PE's and HOST to
514 + * read/write PMEM/DMEM memory ranges for each classifier PE.
515 + */
516 +/* {sr_pe_mem_cmd[31], csr_pe_mem_wren[27:24], csr_pe_mem_addr[23:0]},
517 + * See \ref XXX_MEM_ACCESS_ADDR for details.
518 + */
519 +#define CLASS_MEM_ACCESS_ADDR (CLASS_CSR_BASE_ADDR + 0x100)
520 +
521 +/* Internal Memory Access Write Data [31:0] */
522 +#define CLASS_MEM_ACCESS_WDATA (CLASS_CSR_BASE_ADDR + 0x104)
523 +
524 +/* Internal Memory Access Read Data [31:0] */
525 +#define CLASS_MEM_ACCESS_RDATA (CLASS_CSR_BASE_ADDR + 0x108)
526 +#define CLASS_TM_INQ_ADDR (CLASS_CSR_BASE_ADDR + 0x114)
527 +#define CLASS_PE_STATUS (CLASS_CSR_BASE_ADDR + 0x118)
528 +
529 +#define CLASS_PHY1_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x11c)
530 +#define CLASS_PHY1_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x120)
531 +#define CLASS_PHY1_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x124)
532 +#define CLASS_PHY1_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x128)
533 +#define CLASS_PHY1_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x12c)
534 +#define CLASS_PHY1_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x130)
535 +#define CLASS_PHY1_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x134)
536 +#define CLASS_PHY1_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x138)
537 +#define CLASS_PHY1_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x13c)
538 +#define CLASS_PHY1_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x140)
539 +#define CLASS_PHY2_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x144)
540 +#define CLASS_PHY2_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x148)
541 +#define CLASS_PHY2_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x14c)
542 +#define CLASS_PHY2_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x150)
543 +#define CLASS_PHY2_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x154)
544 +#define CLASS_PHY2_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x158)
545 +#define CLASS_PHY2_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x15c)
546 +#define CLASS_PHY2_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x160)
547 +#define CLASS_PHY2_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x164)
548 +#define CLASS_PHY2_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x168)
549 +#define CLASS_PHY3_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x16c)
550 +#define CLASS_PHY3_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x170)
551 +#define CLASS_PHY3_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x174)
552 +#define CLASS_PHY3_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x178)
553 +#define CLASS_PHY3_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x17c)
554 +#define CLASS_PHY3_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x180)
555 +#define CLASS_PHY3_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x184)
556 +#define CLASS_PHY3_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x188)
557 +#define CLASS_PHY3_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x18c)
558 +#define CLASS_PHY3_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x190)
559 +#define CLASS_PHY1_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x194)
560 +#define CLASS_PHY1_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x198)
561 +#define CLASS_PHY1_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x19c)
562 +#define CLASS_PHY1_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a0)
563 +#define CLASS_PHY2_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a4)
564 +#define CLASS_PHY2_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a8)
565 +#define CLASS_PHY2_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1ac)
566 +#define CLASS_PHY2_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b0)
567 +#define CLASS_PHY3_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b4)
568 +#define CLASS_PHY3_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b8)
569 +#define CLASS_PHY3_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1bc)
570 +#define CLASS_PHY3_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c0)
571 +#define CLASS_PHY4_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c4)
572 +#define CLASS_PHY4_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c8)
573 +#define CLASS_PHY4_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1cc)
574 +#define CLASS_PHY4_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1d0)
575 +#define CLASS_PHY4_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x1d4)
576 +#define CLASS_PHY4_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x1d8)
577 +#define CLASS_PHY4_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1dc)
578 +#define CLASS_PHY4_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1e0)
579 +#define CLASS_PHY4_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x1e4)
580 +#define CLASS_PHY4_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1e8)
581 +#define CLASS_PHY4_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x1ec)
582 +#define CLASS_PHY4_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x1f0)
583 +#define CLASS_PHY4_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x1f4)
584 +#define CLASS_PHY4_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x1f8)
585 +
586 +#define CLASS_PE_SYS_CLK_RATIO (CLASS_CSR_BASE_ADDR + 0x200)
587 +#define CLASS_AFULL_THRES (CLASS_CSR_BASE_ADDR + 0x204)
588 +#define CLASS_GAP_BETWEEN_READS (CLASS_CSR_BASE_ADDR + 0x208)
589 +#define CLASS_MAX_BUF_CNT (CLASS_CSR_BASE_ADDR + 0x20c)
590 +#define CLASS_TSQ_FIFO_THRES (CLASS_CSR_BASE_ADDR + 0x210)
591 +#define CLASS_TSQ_MAX_CNT (CLASS_CSR_BASE_ADDR + 0x214)
592 +#define CLASS_IRAM_DATA_0 (CLASS_CSR_BASE_ADDR + 0x218)
593 +#define CLASS_IRAM_DATA_1 (CLASS_CSR_BASE_ADDR + 0x21c)
594 +#define CLASS_IRAM_DATA_2 (CLASS_CSR_BASE_ADDR + 0x220)
595 +#define CLASS_IRAM_DATA_3 (CLASS_CSR_BASE_ADDR + 0x224)
596 +
597 +#define CLASS_BUS_ACCESS_ADDR (CLASS_CSR_BASE_ADDR + 0x228)
598 +
599 +#define CLASS_BUS_ACCESS_WDATA (CLASS_CSR_BASE_ADDR + 0x22c)
600 +#define CLASS_BUS_ACCESS_RDATA (CLASS_CSR_BASE_ADDR + 0x230)
601 +
602 +/* (route_entry_size[9:0], route_hash_size[23:16]
603 + * (this is actually ln2(size)))
604 + */
605 +#define CLASS_ROUTE_HASH_ENTRY_SIZE (CLASS_CSR_BASE_ADDR + 0x234)
606 +
607 +#define CLASS_ROUTE_ENTRY_SIZE(size) ((size) & 0x1ff)
608 +#define CLASS_ROUTE_HASH_SIZE(hash_bits) (((hash_bits) & 0xff) << 16)
609 +
610 +#define CLASS_ROUTE_TABLE_BASE (CLASS_CSR_BASE_ADDR + 0x238)
611 +
612 +#define CLASS_ROUTE_MULTI (CLASS_CSR_BASE_ADDR + 0x23c)
613 +#define CLASS_SMEM_OFFSET (CLASS_CSR_BASE_ADDR + 0x240)
614 +#define CLASS_LMEM_BUF_SIZE (CLASS_CSR_BASE_ADDR + 0x244)
615 +#define CLASS_VLAN_ID (CLASS_CSR_BASE_ADDR + 0x248)
616 +#define CLASS_BMU1_BUF_FREE (CLASS_CSR_BASE_ADDR + 0x24c)
617 +#define CLASS_USE_TMU_INQ (CLASS_CSR_BASE_ADDR + 0x250)
618 +#define CLASS_VLAN_ID1 (CLASS_CSR_BASE_ADDR + 0x254)
619 +
620 +#define CLASS_BUS_ACCESS_BASE (CLASS_CSR_BASE_ADDR + 0x258)
621 +#define CLASS_BUS_ACCESS_BASE_MASK (0xFF000000)
622 +/* bit 31:24 of PE peripheral address are stored in CLASS_BUS_ACCESS_BASE */
623 +
624 +#define CLASS_HIF_PARSE (CLASS_CSR_BASE_ADDR + 0x25c)
625 +
626 +#define CLASS_HOST_PE0_GP (CLASS_CSR_BASE_ADDR + 0x260)
627 +#define CLASS_PE0_GP (CLASS_CSR_BASE_ADDR + 0x264)
628 +#define CLASS_HOST_PE1_GP (CLASS_CSR_BASE_ADDR + 0x268)
629 +#define CLASS_PE1_GP (CLASS_CSR_BASE_ADDR + 0x26c)
630 +#define CLASS_HOST_PE2_GP (CLASS_CSR_BASE_ADDR + 0x270)
631 +#define CLASS_PE2_GP (CLASS_CSR_BASE_ADDR + 0x274)
632 +#define CLASS_HOST_PE3_GP (CLASS_CSR_BASE_ADDR + 0x278)
633 +#define CLASS_PE3_GP (CLASS_CSR_BASE_ADDR + 0x27c)
634 +#define CLASS_HOST_PE4_GP (CLASS_CSR_BASE_ADDR + 0x280)
635 +#define CLASS_PE4_GP (CLASS_CSR_BASE_ADDR + 0x284)
636 +#define CLASS_HOST_PE5_GP (CLASS_CSR_BASE_ADDR + 0x288)
637 +#define CLASS_PE5_GP (CLASS_CSR_BASE_ADDR + 0x28c)
638 +
639 +#define CLASS_PE_INT_SRC (CLASS_CSR_BASE_ADDR + 0x290)
640 +#define CLASS_PE_INT_ENABLE (CLASS_CSR_BASE_ADDR + 0x294)
641 +
642 +#define CLASS_TPID0_TPID1 (CLASS_CSR_BASE_ADDR + 0x298)
643 +#define CLASS_TPID2 (CLASS_CSR_BASE_ADDR + 0x29c)
644 +
645 +#define CLASS_L4_CHKSUM_ADDR (CLASS_CSR_BASE_ADDR + 0x2a0)
646 +
647 +#define CLASS_PE0_DEBUG (CLASS_CSR_BASE_ADDR + 0x2a4)
648 +#define CLASS_PE1_DEBUG (CLASS_CSR_BASE_ADDR + 0x2a8)
649 +#define CLASS_PE2_DEBUG (CLASS_CSR_BASE_ADDR + 0x2ac)
650 +#define CLASS_PE3_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b0)
651 +#define CLASS_PE4_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b4)
652 +#define CLASS_PE5_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b8)
653 +
654 +#define CLASS_STATE (CLASS_CSR_BASE_ADDR + 0x2bc)
655 +
656 +/* CLASS defines */
657 +#define CLASS_PBUF_SIZE 0x100 /* Fixed by hardware */
658 +#define CLASS_PBUF_HEADER_OFFSET 0x80 /* Can be configured */
659 +
660 +/* Can be configured */
661 +#define CLASS_PBUF0_BASE_ADDR 0x000
662 +/* Can be configured */
663 +#define CLASS_PBUF1_BASE_ADDR (CLASS_PBUF0_BASE_ADDR + CLASS_PBUF_SIZE)
664 +/* Can be configured */
665 +#define CLASS_PBUF2_BASE_ADDR (CLASS_PBUF1_BASE_ADDR + CLASS_PBUF_SIZE)
666 +/* Can be configured */
667 +#define CLASS_PBUF3_BASE_ADDR (CLASS_PBUF2_BASE_ADDR + CLASS_PBUF_SIZE)
668 +
669 +#define CLASS_PBUF0_HEADER_BASE_ADDR (CLASS_PBUF0_BASE_ADDR + \
670 + CLASS_PBUF_HEADER_OFFSET)
671 +#define CLASS_PBUF1_HEADER_BASE_ADDR (CLASS_PBUF1_BASE_ADDR + \
672 + CLASS_PBUF_HEADER_OFFSET)
673 +#define CLASS_PBUF2_HEADER_BASE_ADDR (CLASS_PBUF2_BASE_ADDR + \
674 + CLASS_PBUF_HEADER_OFFSET)
675 +#define CLASS_PBUF3_HEADER_BASE_ADDR (CLASS_PBUF3_BASE_ADDR + \
676 + CLASS_PBUF_HEADER_OFFSET)
677 +
678 +#define CLASS_PE0_RO_DM_ADDR0_VAL ((CLASS_PBUF1_BASE_ADDR << 16) | \
679 + CLASS_PBUF0_BASE_ADDR)
680 +#define CLASS_PE0_RO_DM_ADDR1_VAL ((CLASS_PBUF3_BASE_ADDR << 16) | \
681 + CLASS_PBUF2_BASE_ADDR)
682 +
683 +#define CLASS_PE0_QB_DM_ADDR0_VAL ((CLASS_PBUF1_HEADER_BASE_ADDR << 16) |\
684 + CLASS_PBUF0_HEADER_BASE_ADDR)
685 +#define CLASS_PE0_QB_DM_ADDR1_VAL ((CLASS_PBUF3_HEADER_BASE_ADDR << 16) |\
686 + CLASS_PBUF2_HEADER_BASE_ADDR)
687 +
688 +#define CLASS_ROUTE_SIZE 128
689 +#define CLASS_MAX_ROUTE_SIZE 256
690 +#define CLASS_ROUTE_HASH_BITS 20
691 +#define CLASS_ROUTE_HASH_MASK (BIT(CLASS_ROUTE_HASH_BITS) - 1)
692 +
693 +/* Can be configured */
694 +#define CLASS_ROUTE0_BASE_ADDR 0x400
695 +/* Can be configured */
696 +#define CLASS_ROUTE1_BASE_ADDR (CLASS_ROUTE0_BASE_ADDR + CLASS_ROUTE_SIZE)
697 +/* Can be configured */
698 +#define CLASS_ROUTE2_BASE_ADDR (CLASS_ROUTE1_BASE_ADDR + CLASS_ROUTE_SIZE)
699 +/* Can be configured */
700 +#define CLASS_ROUTE3_BASE_ADDR (CLASS_ROUTE2_BASE_ADDR + CLASS_ROUTE_SIZE)
701 +
702 +#define CLASS_SA_SIZE 128
703 +#define CLASS_IPSEC_SA0_BASE_ADDR 0x600
704 +/* not used */
705 +#define CLASS_IPSEC_SA1_BASE_ADDR (CLASS_IPSEC_SA0_BASE_ADDR + CLASS_SA_SIZE)
706 +/* not used */
707 +#define CLASS_IPSEC_SA2_BASE_ADDR (CLASS_IPSEC_SA1_BASE_ADDR + CLASS_SA_SIZE)
708 +/* not used */
709 +#define CLASS_IPSEC_SA3_BASE_ADDR (CLASS_IPSEC_SA2_BASE_ADDR + CLASS_SA_SIZE)
710 +
711 +/* generic purpose free dmem buffer, last portion of 2K dmem pbuf */
712 +#define CLASS_GP_DMEM_BUF_SIZE (2048 - (CLASS_PBUF_SIZE * 4) - \
713 + (CLASS_ROUTE_SIZE * 4) - (CLASS_SA_SIZE))
714 +#define CLASS_GP_DMEM_BUF ((void *)(CLASS_IPSEC_SA0_BASE_ADDR + \
715 + CLASS_SA_SIZE))
716 +
717 +#define TWO_LEVEL_ROUTE BIT(0)
718 +#define PHYNO_IN_HASH BIT(1)
719 +#define HW_ROUTE_FETCH BIT(3)
720 +#define HW_BRIDGE_FETCH BIT(5)
721 +#define IP_ALIGNED BIT(6)
722 +#define ARC_HIT_CHECK_EN BIT(7)
723 +#define CLASS_TOE BIT(11)
724 +#define HASH_NORMAL (0 << 12)
725 +#define HASH_CRC_PORT BIT(12)
726 +#define HASH_CRC_IP (2 << 12)
727 +#define HASH_CRC_PORT_IP (3 << 12)
728 +#define QB2BUS_LE BIT(15)
729 +
730 +#define TCP_CHKSUM_DROP BIT(0)
731 +#define UDP_CHKSUM_DROP BIT(1)
732 +#define IPV4_CHKSUM_DROP BIT(9)
733 +
734 +/*CLASS_HIF_PARSE bits*/
735 +#define HIF_PKT_CLASS_EN BIT(0)
736 +#define HIF_PKT_OFFSET(ofst) (((ofst) & 0xF) << 1)
737 +
738 +struct class_cfg {
739 + u32 toe_mode;
740 + unsigned long route_table_baseaddr;
741 + u32 route_table_hash_bits;
742 + u32 pe_sys_clk_ratio;
743 + u32 resume;
744 +};
745 +
746 +#endif /* _CLASS_CSR_H_ */
747 --- /dev/null
748 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h
749 @@ -0,0 +1,242 @@
750 +/*
751 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
752 + * Copyright 2017 NXP
753 + *
754 + * This program is free software; you can redistribute it and/or modify
755 + * it under the terms of the GNU General Public License as published by
756 + * the Free Software Foundation; either version 2 of the License, or
757 + * (at your option) any later version.
758 + *
759 + * This program is distributed in the hope that it will be useful,
760 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
761 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
762 + * GNU General Public License for more details.
763 + *
764 + * You should have received a copy of the GNU General Public License
765 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
766 + */
767 +
768 +#ifndef _EMAC_H_
769 +#define _EMAC_H_
770 +
771 +#include <linux/ethtool.h>
772 +
773 +#define EMAC_IEVENT_REG 0x004
774 +#define EMAC_IMASK_REG 0x008
775 +#define EMAC_R_DES_ACTIVE_REG 0x010
776 +#define EMAC_X_DES_ACTIVE_REG 0x014
777 +#define EMAC_ECNTRL_REG 0x024
778 +#define EMAC_MII_DATA_REG 0x040
779 +#define EMAC_MII_CTRL_REG 0x044
780 +#define EMAC_MIB_CTRL_STS_REG 0x064
781 +#define EMAC_RCNTRL_REG 0x084
782 +#define EMAC_TCNTRL_REG 0x0C4
783 +#define EMAC_PHY_ADDR_LOW 0x0E4
784 +#define EMAC_PHY_ADDR_HIGH 0x0E8
785 +#define EMAC_GAUR 0x120
786 +#define EMAC_GALR 0x124
787 +#define EMAC_TFWR_STR_FWD 0x144
788 +#define EMAC_RX_SECTION_FULL 0x190
789 +#define EMAC_RX_SECTION_EMPTY 0x194
790 +#define EMAC_TX_SECTION_EMPTY 0x1A0
791 +#define EMAC_TRUNC_FL 0x1B0
792 +
793 +#define RMON_T_DROP 0x200 /* Count of frames not cntd correctly */
794 +#define RMON_T_PACKETS 0x204 /* RMON TX packet count */
795 +#define RMON_T_BC_PKT 0x208 /* RMON TX broadcast pkts */
796 +#define RMON_T_MC_PKT 0x20c /* RMON TX multicast pkts */
797 +#define RMON_T_CRC_ALIGN 0x210 /* RMON TX pkts with CRC align err */
798 +#define RMON_T_UNDERSIZE 0x214 /* RMON TX pkts < 64 bytes, good CRC */
799 +#define RMON_T_OVERSIZE 0x218 /* RMON TX pkts > MAX_FL bytes good CRC */
800 +#define RMON_T_FRAG 0x21c /* RMON TX pkts < 64 bytes, bad CRC */
801 +#define RMON_T_JAB 0x220 /* RMON TX pkts > MAX_FL bytes, bad CRC */
802 +#define RMON_T_COL 0x224 /* RMON TX collision count */
803 +#define RMON_T_P64 0x228 /* RMON TX 64 byte pkts */
804 +#define RMON_T_P65TO127 0x22c /* RMON TX 65 to 127 byte pkts */
805 +#define RMON_T_P128TO255 0x230 /* RMON TX 128 to 255 byte pkts */
806 +#define RMON_T_P256TO511 0x234 /* RMON TX 256 to 511 byte pkts */
807 +#define RMON_T_P512TO1023 0x238 /* RMON TX 512 to 1023 byte pkts */
808 +#define RMON_T_P1024TO2047 0x23c /* RMON TX 1024 to 2047 byte pkts */
809 +#define RMON_T_P_GTE2048 0x240 /* RMON TX pkts > 2048 bytes */
810 +#define RMON_T_OCTETS 0x244 /* RMON TX octets */
811 +#define IEEE_T_DROP 0x248 /* Count of frames not counted crtly */
812 +#define IEEE_T_FRAME_OK 0x24c /* Frames tx'd OK */
813 +#define IEEE_T_1COL 0x250 /* Frames tx'd with single collision */
814 +#define IEEE_T_MCOL 0x254 /* Frames tx'd with multiple collision */
815 +#define IEEE_T_DEF 0x258 /* Frames tx'd after deferral delay */
816 +#define IEEE_T_LCOL 0x25c /* Frames tx'd with late collision */
817 +#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excesv collisions */
818 +#define IEEE_T_MACERR 0x264 /* Frames tx'd with TX FIFO underrun */
819 +#define IEEE_T_CSERR 0x268 /* Frames tx'd with carrier sense err */
820 +#define IEEE_T_SQE 0x26c /* Frames tx'd with SQE err */
821 +#define IEEE_T_FDXFC 0x270 /* Flow control pause frames tx'd */
822 +#define IEEE_T_OCTETS_OK 0x274 /* Octet count for frames tx'd w/o err */
823 +#define RMON_R_PACKETS 0x284 /* RMON RX packet count */
824 +#define RMON_R_BC_PKT 0x288 /* RMON RX broadcast pkts */
825 +#define RMON_R_MC_PKT 0x28c /* RMON RX multicast pkts */
826 +#define RMON_R_CRC_ALIGN 0x290 /* RMON RX pkts with CRC alignment err */
827 +#define RMON_R_UNDERSIZE 0x294 /* RMON RX pkts < 64 bytes, good CRC */
828 +#define RMON_R_OVERSIZE 0x298 /* RMON RX pkts > MAX_FL bytes good CRC */
829 +#define RMON_R_FRAG 0x29c /* RMON RX pkts < 64 bytes, bad CRC */
830 +#define RMON_R_JAB 0x2a0 /* RMON RX pkts > MAX_FL bytes, bad CRC */
831 +#define RMON_R_RESVD_O 0x2a4 /* Reserved */
832 +#define RMON_R_P64 0x2a8 /* RMON RX 64 byte pkts */
833 +#define RMON_R_P65TO127 0x2ac /* RMON RX 65 to 127 byte pkts */
834 +#define RMON_R_P128TO255 0x2b0 /* RMON RX 128 to 255 byte pkts */
835 +#define RMON_R_P256TO511 0x2b4 /* RMON RX 256 to 511 byte pkts */
836 +#define RMON_R_P512TO1023 0x2b8 /* RMON RX 512 to 1023 byte pkts */
837 +#define RMON_R_P1024TO2047 0x2bc /* RMON RX 1024 to 2047 byte pkts */
838 +#define RMON_R_P_GTE2048 0x2c0 /* RMON RX pkts > 2048 bytes */
839 +#define RMON_R_OCTETS 0x2c4 /* RMON RX octets */
840 +#define IEEE_R_DROP 0x2c8 /* Count frames not counted correctly */
841 +#define IEEE_R_FRAME_OK 0x2cc /* Frames rx'd OK */
842 +#define IEEE_R_CRC 0x2d0 /* Frames rx'd with CRC err */
843 +#define IEEE_R_ALIGN 0x2d4 /* Frames rx'd with alignment err */
844 +#define IEEE_R_MACERR 0x2d8 /* Receive FIFO overflow count */
845 +#define IEEE_R_FDXFC 0x2dc /* Flow control pause frames rx'd */
846 +#define IEEE_R_OCTETS_OK 0x2e0 /* Octet cnt for frames rx'd w/o err */
847 +
848 +#define EMAC_SMAC_0_0 0x500 /*Supplemental MAC Address 0 (RW).*/
849 +#define EMAC_SMAC_0_1 0x504 /*Supplemental MAC Address 0 (RW).*/
850 +
851 +/* GEMAC definitions and settings */
852 +
853 +#define EMAC_PORT_0 0
854 +#define EMAC_PORT_1 1
855 +
856 +/* GEMAC Bit definitions */
857 +#define EMAC_IEVENT_HBERR 0x80000000
858 +#define EMAC_IEVENT_BABR 0x40000000
859 +#define EMAC_IEVENT_BABT 0x20000000
860 +#define EMAC_IEVENT_GRA 0x10000000
861 +#define EMAC_IEVENT_TXF 0x08000000
862 +#define EMAC_IEVENT_TXB 0x04000000
863 +#define EMAC_IEVENT_RXF 0x02000000
864 +#define EMAC_IEVENT_RXB 0x01000000
865 +#define EMAC_IEVENT_MII 0x00800000
866 +#define EMAC_IEVENT_EBERR 0x00400000
867 +#define EMAC_IEVENT_LC 0x00200000
868 +#define EMAC_IEVENT_RL 0x00100000
869 +#define EMAC_IEVENT_UN 0x00080000
870 +
871 +#define EMAC_IMASK_HBERR 0x80000000
872 +#define EMAC_IMASK_BABR 0x40000000
873 +#define EMAC_IMASKT_BABT 0x20000000
874 +#define EMAC_IMASK_GRA 0x10000000
875 +#define EMAC_IMASKT_TXF 0x08000000
876 +#define EMAC_IMASK_TXB 0x04000000
877 +#define EMAC_IMASKT_RXF 0x02000000
878 +#define EMAC_IMASK_RXB 0x01000000
879 +#define EMAC_IMASK_MII 0x00800000
880 +#define EMAC_IMASK_EBERR 0x00400000
881 +#define EMAC_IMASK_LC 0x00200000
882 +#define EMAC_IMASKT_RL 0x00100000
883 +#define EMAC_IMASK_UN 0x00080000
884 +
885 +#define EMAC_RCNTRL_MAX_FL_SHIFT 16
886 +#define EMAC_RCNTRL_LOOP 0x00000001
887 +#define EMAC_RCNTRL_DRT 0x00000002
888 +#define EMAC_RCNTRL_MII_MODE 0x00000004
889 +#define EMAC_RCNTRL_PROM 0x00000008
890 +#define EMAC_RCNTRL_BC_REJ 0x00000010
891 +#define EMAC_RCNTRL_FCE 0x00000020
892 +#define EMAC_RCNTRL_RGMII 0x00000040
893 +#define EMAC_RCNTRL_SGMII 0x00000080
894 +#define EMAC_RCNTRL_RMII 0x00000100
895 +#define EMAC_RCNTRL_RMII_10T 0x00000200
896 +#define EMAC_RCNTRL_CRC_FWD 0x00004000
897 +
898 +#define EMAC_TCNTRL_GTS 0x00000001
899 +#define EMAC_TCNTRL_HBC 0x00000002
900 +#define EMAC_TCNTRL_FDEN 0x00000004
901 +#define EMAC_TCNTRL_TFC_PAUSE 0x00000008
902 +#define EMAC_TCNTRL_RFC_PAUSE 0x00000010
903 +
904 +#define EMAC_ECNTRL_RESET 0x00000001 /* reset the EMAC */
905 +#define EMAC_ECNTRL_ETHER_EN 0x00000002 /* enable the EMAC */
906 +#define EMAC_ECNTRL_MAGIC_ENA 0x00000004
907 +#define EMAC_ECNTRL_SLEEP 0x00000008
908 +#define EMAC_ECNTRL_SPEED 0x00000020
909 +#define EMAC_ECNTRL_DBSWAP 0x00000100
910 +
911 +#define EMAC_X_WMRK_STRFWD 0x00000100
912 +
913 +#define EMAC_X_DES_ACTIVE_TDAR 0x01000000
914 +#define EMAC_R_DES_ACTIVE_RDAR 0x01000000
915 +
916 +#define EMAC_RX_SECTION_EMPTY_V 0x00010006
917 +/*
918 + * The possible operating speeds of the MAC, currently supporting 10, 100 and
919 + * 1000Mb modes.
920 + */
921 +enum mac_speed {SPEED_10M, SPEED_100M, SPEED_1000M, SPEED_1000M_PCS};
922 +
923 +/* MII-related definitios */
924 +#define EMAC_MII_DATA_ST 0x40000000 /* Start of frame delimiter */
925 +#define EMAC_MII_DATA_OP_RD 0x20000000 /* Perform a read operation */
926 +#define EMAC_MII_DATA_OP_CL45_RD 0x30000000 /* Perform a read operation */
927 +#define EMAC_MII_DATA_OP_WR 0x10000000 /* Perform a write operation */
928 +#define EMAC_MII_DATA_OP_CL45_WR 0x10000000 /* Perform a write operation */
929 +#define EMAC_MII_DATA_PA_MSK 0x0f800000 /* PHY Address field mask */
930 +#define EMAC_MII_DATA_RA_MSK 0x007c0000 /* PHY Register field mask */
931 +#define EMAC_MII_DATA_TA 0x00020000 /* Turnaround */
932 +#define EMAC_MII_DATA_DATAMSK 0x0000ffff /* PHY data field */
933 +
934 +#define EMAC_MII_DATA_RA_SHIFT 18 /* MII Register address bits */
935 +#define EMAC_MII_DATA_RA_MASK 0x1F /* MII Register address mask */
936 +#define EMAC_MII_DATA_PA_SHIFT 23 /* MII PHY address bits */
937 +#define EMAC_MII_DATA_PA_MASK 0x1F /* MII PHY address mask */
938 +
939 +#define EMAC_MII_DATA_RA(v) (((v) & EMAC_MII_DATA_RA_MASK) << \
940 + EMAC_MII_DATA_RA_SHIFT)
941 +#define EMAC_MII_DATA_PA(v) (((v) & EMAC_MII_DATA_RA_MASK) << \
942 + EMAC_MII_DATA_PA_SHIFT)
943 +#define EMAC_MII_DATA(v) ((v) & 0xffff)
944 +
945 +#define EMAC_MII_SPEED_SHIFT 1
946 +#define EMAC_HOLDTIME_SHIFT 8
947 +#define EMAC_HOLDTIME_MASK 0x7
948 +#define EMAC_HOLDTIME(v) (((v) & EMAC_HOLDTIME_MASK) << \
949 + EMAC_HOLDTIME_SHIFT)
950 +
951 +/*
952 + * The Address organisation for the MAC device. All addresses are split into
953 + * two 32-bit register fields. The first one (bottom) is the lower 32-bits of
954 + * the address and the other field are the high order bits - this may be 16-bits
955 + * in the case of MAC addresses, or 32-bits for the hash address.
956 + * In terms of memory storage, the first item (bottom) is assumed to be at a
957 + * lower address location than 'top'. i.e. top should be at address location of
958 + * 'bottom' + 4 bytes.
959 + */
960 +struct pfe_mac_addr {
961 + u32 bottom; /* Lower 32-bits of address. */
962 + u32 top; /* Upper 32-bits of address. */
963 +};
964 +
965 +/*
966 + * The following is the organisation of the address filters section of the MAC
967 + * registers. The Cadence MAC contains four possible specific address match
968 + * addresses, if an incoming frame corresponds to any one of these four
969 + * addresses then the frame will be copied to memory.
970 + * It is not necessary for all four of the address match registers to be
971 + * programmed, this is application dependent.
972 + */
973 +struct spec_addr {
974 + struct pfe_mac_addr one; /* Specific address register 1. */
975 + struct pfe_mac_addr two; /* Specific address register 2. */
976 + struct pfe_mac_addr three; /* Specific address register 3. */
977 + struct pfe_mac_addr four; /* Specific address register 4. */
978 +};
979 +
980 +struct gemac_cfg {
981 + u32 mode;
982 + u32 speed;
983 + u32 duplex;
984 +};
985 +
986 +/* EMAC Hash size */
987 +#define EMAC_HASH_REG_BITS 64
988 +
989 +#define EMAC_SPEC_ADDR_MAX 4
990 +
991 +#endif /* _EMAC_H_ */
992 --- /dev/null
993 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h
994 @@ -0,0 +1,86 @@
995 +/*
996 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
997 + * Copyright 2017 NXP
998 + *
999 + * This program is free software; you can redistribute it and/or modify
1000 + * it under the terms of the GNU General Public License as published by
1001 + * the Free Software Foundation; either version 2 of the License, or
1002 + * (at your option) any later version.
1003 + *
1004 + * This program is distributed in the hope that it will be useful,
1005 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1006 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1007 + * GNU General Public License for more details.
1008 + *
1009 + * You should have received a copy of the GNU General Public License
1010 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
1011 + */
1012 +
1013 +#ifndef _GPI_H_
1014 +#define _GPI_H_
1015 +
1016 +#define GPI_VERSION 0x00
1017 +#define GPI_CTRL 0x04
1018 +#define GPI_RX_CONFIG 0x08
1019 +#define GPI_HDR_SIZE 0x0c
1020 +#define GPI_BUF_SIZE 0x10
1021 +#define GPI_LMEM_ALLOC_ADDR 0x14
1022 +#define GPI_LMEM_FREE_ADDR 0x18
1023 +#define GPI_DDR_ALLOC_ADDR 0x1c
1024 +#define GPI_DDR_FREE_ADDR 0x20
1025 +#define GPI_CLASS_ADDR 0x24
1026 +#define GPI_DRX_FIFO 0x28
1027 +#define GPI_TRX_FIFO 0x2c
1028 +#define GPI_INQ_PKTPTR 0x30
1029 +#define GPI_DDR_DATA_OFFSET 0x34
1030 +#define GPI_LMEM_DATA_OFFSET 0x38
1031 +#define GPI_TMLF_TX 0x4c
1032 +#define GPI_DTX_ASEQ 0x50
1033 +#define GPI_FIFO_STATUS 0x54
1034 +#define GPI_FIFO_DEBUG 0x58
1035 +#define GPI_TX_PAUSE_TIME 0x5c
1036 +#define GPI_LMEM_SEC_BUF_DATA_OFFSET 0x60
1037 +#define GPI_DDR_SEC_BUF_DATA_OFFSET 0x64
1038 +#define GPI_TOE_CHKSUM_EN 0x68
1039 +#define GPI_OVERRUN_DROPCNT 0x6c
1040 +#define GPI_CSR_MTIP_PAUSE_REG 0x74
1041 +#define GPI_CSR_MTIP_PAUSE_QUANTUM 0x78
1042 +#define GPI_CSR_RX_CNT 0x7c
1043 +#define GPI_CSR_TX_CNT 0x80
1044 +#define GPI_CSR_DEBUG1 0x84
1045 +#define GPI_CSR_DEBUG2 0x88
1046 +
1047 +struct gpi_cfg {
1048 + u32 lmem_rtry_cnt;
1049 + u32 tmlf_txthres;
1050 + u32 aseq_len;
1051 + u32 mtip_pause_reg;
1052 +};
1053 +
1054 +/* GPI commons defines */
1055 +#define GPI_LMEM_BUF_EN 0x1
1056 +#define GPI_DDR_BUF_EN 0x1
1057 +
1058 +/* EGPI 1 defines */
1059 +#define EGPI1_LMEM_RTRY_CNT 0x40
1060 +#define EGPI1_TMLF_TXTHRES 0xBC
1061 +#define EGPI1_ASEQ_LEN 0x50
1062 +
1063 +/* EGPI 2 defines */
1064 +#define EGPI2_LMEM_RTRY_CNT 0x40
1065 +#define EGPI2_TMLF_TXTHRES 0xBC
1066 +#define EGPI2_ASEQ_LEN 0x40
1067 +
1068 +/* EGPI 3 defines */
1069 +#define EGPI3_LMEM_RTRY_CNT 0x40
1070 +#define EGPI3_TMLF_TXTHRES 0xBC
1071 +#define EGPI3_ASEQ_LEN 0x40
1072 +
1073 +/* HGPI defines */
1074 +#define HGPI_LMEM_RTRY_CNT 0x40
1075 +#define HGPI_TMLF_TXTHRES 0xBC
1076 +#define HGPI_ASEQ_LEN 0x40
1077 +
1078 +#define EGPI_PAUSE_TIME 0x000007D0
1079 +#define EGPI_PAUSE_ENABLE 0x40000000
1080 +#endif /* _GPI_H_ */
1081 --- /dev/null
1082 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h
1083 @@ -0,0 +1,100 @@
1084 +/*
1085 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
1086 + * Copyright 2017 NXP
1087 + *
1088 + * This program is free software; you can redistribute it and/or modify
1089 + * it under the terms of the GNU General Public License as published by
1090 + * the Free Software Foundation; either version 2 of the License, or
1091 + * (at your option) any later version.
1092 + *
1093 + * This program is distributed in the hope that it will be useful,
1094 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1095 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1096 + * GNU General Public License for more details.
1097 + *
1098 + * You should have received a copy of the GNU General Public License
1099 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
1100 + */
1101 +
1102 +#ifndef _HIF_H_
1103 +#define _HIF_H_
1104 +
1105 +/* @file hif.h.
1106 + * hif - PFE hif block control and status register.
1107 + * Mapped on CBUS and accessible from all PE's and ARM.
1108 + */
1109 +#define HIF_VERSION (HIF_BASE_ADDR + 0x00)
1110 +#define HIF_TX_CTRL (HIF_BASE_ADDR + 0x04)
1111 +#define HIF_TX_CURR_BD_ADDR (HIF_BASE_ADDR + 0x08)
1112 +#define HIF_TX_ALLOC (HIF_BASE_ADDR + 0x0c)
1113 +#define HIF_TX_BDP_ADDR (HIF_BASE_ADDR + 0x10)
1114 +#define HIF_TX_STATUS (HIF_BASE_ADDR + 0x14)
1115 +#define HIF_RX_CTRL (HIF_BASE_ADDR + 0x20)
1116 +#define HIF_RX_BDP_ADDR (HIF_BASE_ADDR + 0x24)
1117 +#define HIF_RX_STATUS (HIF_BASE_ADDR + 0x30)
1118 +#define HIF_INT_SRC (HIF_BASE_ADDR + 0x34)
1119 +#define HIF_INT_ENABLE (HIF_BASE_ADDR + 0x38)
1120 +#define HIF_POLL_CTRL (HIF_BASE_ADDR + 0x3c)
1121 +#define HIF_RX_CURR_BD_ADDR (HIF_BASE_ADDR + 0x40)
1122 +#define HIF_RX_ALLOC (HIF_BASE_ADDR + 0x44)
1123 +#define HIF_TX_DMA_STATUS (HIF_BASE_ADDR + 0x48)
1124 +#define HIF_RX_DMA_STATUS (HIF_BASE_ADDR + 0x4c)
1125 +#define HIF_INT_COAL (HIF_BASE_ADDR + 0x50)
1126 +
1127 +/* HIF_INT_SRC/ HIF_INT_ENABLE control bits */
1128 +#define HIF_INT BIT(0)
1129 +#define HIF_RXBD_INT BIT(1)
1130 +#define HIF_RXPKT_INT BIT(2)
1131 +#define HIF_TXBD_INT BIT(3)
1132 +#define HIF_TXPKT_INT BIT(4)
1133 +
1134 +/* HIF_TX_CTRL bits */
1135 +#define HIF_CTRL_DMA_EN BIT(0)
1136 +#define HIF_CTRL_BDP_POLL_CTRL_EN BIT(1)
1137 +#define HIF_CTRL_BDP_CH_START_WSTB BIT(2)
1138 +
1139 +/* HIF_RX_STATUS bits */
1140 +#define BDP_CSR_RX_DMA_ACTV BIT(16)
1141 +
1142 +/* HIF_INT_ENABLE bits */
1143 +#define HIF_INT_EN BIT(0)
1144 +#define HIF_RXBD_INT_EN BIT(1)
1145 +#define HIF_RXPKT_INT_EN BIT(2)
1146 +#define HIF_TXBD_INT_EN BIT(3)
1147 +#define HIF_TXPKT_INT_EN BIT(4)
1148 +
1149 +/* HIF_POLL_CTRL bits*/
1150 +#define HIF_RX_POLL_CTRL_CYCLE 0x0400
1151 +#define HIF_TX_POLL_CTRL_CYCLE 0x0400
1152 +
1153 +/* HIF_INT_COAL bits*/
1154 +#define HIF_INT_COAL_ENABLE BIT(31)
1155 +
1156 +/* Buffer descriptor control bits */
1157 +#define BD_CTRL_BUFLEN_MASK 0x3fff
1158 +#define BD_BUF_LEN(x) ((x) & BD_CTRL_BUFLEN_MASK)
1159 +#define BD_CTRL_CBD_INT_EN BIT(16)
1160 +#define BD_CTRL_PKT_INT_EN BIT(17)
1161 +#define BD_CTRL_LIFM BIT(18)
1162 +#define BD_CTRL_LAST_BD BIT(19)
1163 +#define BD_CTRL_DIR BIT(20)
1164 +#define BD_CTRL_LMEM_CPY BIT(21) /* Valid only for HIF_NOCPY */
1165 +#define BD_CTRL_PKT_XFER BIT(24)
1166 +#define BD_CTRL_DESC_EN BIT(31)
1167 +#define BD_CTRL_PARSE_DISABLE BIT(25)
1168 +#define BD_CTRL_BRFETCH_DISABLE BIT(26)
1169 +#define BD_CTRL_RTFETCH_DISABLE BIT(27)
1170 +
1171 +/* Buffer descriptor status bits*/
1172 +#define BD_STATUS_CONN_ID(x) ((x) & 0xffff)
1173 +#define BD_STATUS_DIR_PROC_ID BIT(16)
1174 +#define BD_STATUS_CONN_ID_EN BIT(17)
1175 +#define BD_STATUS_PE2PROC_ID(x) (((x) & 7) << 18)
1176 +#define BD_STATUS_LE_DATA BIT(21)
1177 +#define BD_STATUS_CHKSUM_EN BIT(22)
1178 +
1179 +/* HIF Buffer descriptor status bits */
1180 +#define DIR_PROC_ID BIT(16)
1181 +#define PROC_ID(id) ((id) << 18)
1182 +
1183 +#endif /* _HIF_H_ */
1184 --- /dev/null
1185 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h
1186 @@ -0,0 +1,50 @@
1187 +/*
1188 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
1189 + * Copyright 2017 NXP
1190 + *
1191 + * This program is free software; you can redistribute it and/or modify
1192 + * it under the terms of the GNU General Public License as published by
1193 + * the Free Software Foundation; either version 2 of the License, or
1194 + * (at your option) any later version.
1195 + *
1196 + * This program is distributed in the hope that it will be useful,
1197 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1198 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1199 + * GNU General Public License for more details.
1200 + *
1201 + * You should have received a copy of the GNU General Public License
1202 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
1203 + */
1204 +
1205 +#ifndef _HIF_NOCPY_H_
1206 +#define _HIF_NOCPY_H_
1207 +
1208 +#define HIF_NOCPY_VERSION (HIF_NOCPY_BASE_ADDR + 0x00)
1209 +#define HIF_NOCPY_TX_CTRL (HIF_NOCPY_BASE_ADDR + 0x04)
1210 +#define HIF_NOCPY_TX_CURR_BD_ADDR (HIF_NOCPY_BASE_ADDR + 0x08)
1211 +#define HIF_NOCPY_TX_ALLOC (HIF_NOCPY_BASE_ADDR + 0x0c)
1212 +#define HIF_NOCPY_TX_BDP_ADDR (HIF_NOCPY_BASE_ADDR + 0x10)
1213 +#define HIF_NOCPY_TX_STATUS (HIF_NOCPY_BASE_ADDR + 0x14)
1214 +#define HIF_NOCPY_RX_CTRL (HIF_NOCPY_BASE_ADDR + 0x20)
1215 +#define HIF_NOCPY_RX_BDP_ADDR (HIF_NOCPY_BASE_ADDR + 0x24)
1216 +#define HIF_NOCPY_RX_STATUS (HIF_NOCPY_BASE_ADDR + 0x30)
1217 +#define HIF_NOCPY_INT_SRC (HIF_NOCPY_BASE_ADDR + 0x34)
1218 +#define HIF_NOCPY_INT_ENABLE (HIF_NOCPY_BASE_ADDR + 0x38)
1219 +#define HIF_NOCPY_POLL_CTRL (HIF_NOCPY_BASE_ADDR + 0x3c)
1220 +#define HIF_NOCPY_RX_CURR_BD_ADDR (HIF_NOCPY_BASE_ADDR + 0x40)
1221 +#define HIF_NOCPY_RX_ALLOC (HIF_NOCPY_BASE_ADDR + 0x44)
1222 +#define HIF_NOCPY_TX_DMA_STATUS (HIF_NOCPY_BASE_ADDR + 0x48)
1223 +#define HIF_NOCPY_RX_DMA_STATUS (HIF_NOCPY_BASE_ADDR + 0x4c)
1224 +#define HIF_NOCPY_RX_INQ0_PKTPTR (HIF_NOCPY_BASE_ADDR + 0x50)
1225 +#define HIF_NOCPY_RX_INQ1_PKTPTR (HIF_NOCPY_BASE_ADDR + 0x54)
1226 +#define HIF_NOCPY_TX_PORT_NO (HIF_NOCPY_BASE_ADDR + 0x60)
1227 +#define HIF_NOCPY_LMEM_ALLOC_ADDR (HIF_NOCPY_BASE_ADDR + 0x64)
1228 +#define HIF_NOCPY_CLASS_ADDR (HIF_NOCPY_BASE_ADDR + 0x68)
1229 +#define HIF_NOCPY_TMU_PORT0_ADDR (HIF_NOCPY_BASE_ADDR + 0x70)
1230 +#define HIF_NOCPY_TMU_PORT1_ADDR (HIF_NOCPY_BASE_ADDR + 0x74)
1231 +#define HIF_NOCPY_TMU_PORT2_ADDR (HIF_NOCPY_BASE_ADDR + 0x7c)
1232 +#define HIF_NOCPY_TMU_PORT3_ADDR (HIF_NOCPY_BASE_ADDR + 0x80)
1233 +#define HIF_NOCPY_TMU_PORT4_ADDR (HIF_NOCPY_BASE_ADDR + 0x84)
1234 +#define HIF_NOCPY_INT_COAL (HIF_NOCPY_BASE_ADDR + 0x90)
1235 +
1236 +#endif /* _HIF_NOCPY_H_ */
1237 --- /dev/null
1238 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h
1239 @@ -0,0 +1,168 @@
1240 +/*
1241 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
1242 + * Copyright 2017 NXP
1243 + *
1244 + * This program is free software; you can redistribute it and/or modify
1245 + * it under the terms of the GNU General Public License as published by
1246 + * the Free Software Foundation; either version 2 of the License, or
1247 + * (at your option) any later version.
1248 + *
1249 + * This program is distributed in the hope that it will be useful,
1250 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1251 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1252 + * GNU General Public License for more details.
1253 + *
1254 + * You should have received a copy of the GNU General Public License
1255 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
1256 + */
1257 +
1258 +#ifndef _TMU_CSR_H_
1259 +#define _TMU_CSR_H_
1260 +
1261 +#define TMU_VERSION (TMU_CSR_BASE_ADDR + 0x000)
1262 +#define TMU_INQ_WATERMARK (TMU_CSR_BASE_ADDR + 0x004)
1263 +#define TMU_PHY_INQ_PKTPTR (TMU_CSR_BASE_ADDR + 0x008)
1264 +#define TMU_PHY_INQ_PKTINFO (TMU_CSR_BASE_ADDR + 0x00c)
1265 +#define TMU_PHY_INQ_FIFO_CNT (TMU_CSR_BASE_ADDR + 0x010)
1266 +#define TMU_SYS_GENERIC_CONTROL (TMU_CSR_BASE_ADDR + 0x014)
1267 +#define TMU_SYS_GENERIC_STATUS (TMU_CSR_BASE_ADDR + 0x018)
1268 +#define TMU_SYS_GEN_CON0 (TMU_CSR_BASE_ADDR + 0x01c)
1269 +#define TMU_SYS_GEN_CON1 (TMU_CSR_BASE_ADDR + 0x020)
1270 +#define TMU_SYS_GEN_CON2 (TMU_CSR_BASE_ADDR + 0x024)
1271 +#define TMU_SYS_GEN_CON3 (TMU_CSR_BASE_ADDR + 0x028)
1272 +#define TMU_SYS_GEN_CON4 (TMU_CSR_BASE_ADDR + 0x02c)
1273 +#define TMU_TEQ_DISABLE_DROPCHK (TMU_CSR_BASE_ADDR + 0x030)
1274 +#define TMU_TEQ_CTRL (TMU_CSR_BASE_ADDR + 0x034)
1275 +#define TMU_TEQ_QCFG (TMU_CSR_BASE_ADDR + 0x038)
1276 +#define TMU_TEQ_DROP_STAT (TMU_CSR_BASE_ADDR + 0x03c)
1277 +#define TMU_TEQ_QAVG (TMU_CSR_BASE_ADDR + 0x040)
1278 +#define TMU_TEQ_WREG_PROB (TMU_CSR_BASE_ADDR + 0x044)
1279 +#define TMU_TEQ_TRANS_STAT (TMU_CSR_BASE_ADDR + 0x048)
1280 +#define TMU_TEQ_HW_PROB_CFG0 (TMU_CSR_BASE_ADDR + 0x04c)
1281 +#define TMU_TEQ_HW_PROB_CFG1 (TMU_CSR_BASE_ADDR + 0x050)
1282 +#define TMU_TEQ_HW_PROB_CFG2 (TMU_CSR_BASE_ADDR + 0x054)
1283 +#define TMU_TEQ_HW_PROB_CFG3 (TMU_CSR_BASE_ADDR + 0x058)
1284 +#define TMU_TEQ_HW_PROB_CFG4 (TMU_CSR_BASE_ADDR + 0x05c)
1285 +#define TMU_TEQ_HW_PROB_CFG5 (TMU_CSR_BASE_ADDR + 0x060)
1286 +#define TMU_TEQ_HW_PROB_CFG6 (TMU_CSR_BASE_ADDR + 0x064)
1287 +#define TMU_TEQ_HW_PROB_CFG7 (TMU_CSR_BASE_ADDR + 0x068)
1288 +#define TMU_TEQ_HW_PROB_CFG8 (TMU_CSR_BASE_ADDR + 0x06c)
1289 +#define TMU_TEQ_HW_PROB_CFG9 (TMU_CSR_BASE_ADDR + 0x070)
1290 +#define TMU_TEQ_HW_PROB_CFG10 (TMU_CSR_BASE_ADDR + 0x074)
1291 +#define TMU_TEQ_HW_PROB_CFG11 (TMU_CSR_BASE_ADDR + 0x078)
1292 +#define TMU_TEQ_HW_PROB_CFG12 (TMU_CSR_BASE_ADDR + 0x07c)
1293 +#define TMU_TEQ_HW_PROB_CFG13 (TMU_CSR_BASE_ADDR + 0x080)
1294 +#define TMU_TEQ_HW_PROB_CFG14 (TMU_CSR_BASE_ADDR + 0x084)
1295 +#define TMU_TEQ_HW_PROB_CFG15 (TMU_CSR_BASE_ADDR + 0x088)
1296 +#define TMU_TEQ_HW_PROB_CFG16 (TMU_CSR_BASE_ADDR + 0x08c)
1297 +#define TMU_TEQ_HW_PROB_CFG17 (TMU_CSR_BASE_ADDR + 0x090)
1298 +#define TMU_TEQ_HW_PROB_CFG18 (TMU_CSR_BASE_ADDR + 0x094)
1299 +#define TMU_TEQ_HW_PROB_CFG19 (TMU_CSR_BASE_ADDR + 0x098)
1300 +#define TMU_TEQ_HW_PROB_CFG20 (TMU_CSR_BASE_ADDR + 0x09c)
1301 +#define TMU_TEQ_HW_PROB_CFG21 (TMU_CSR_BASE_ADDR + 0x0a0)
1302 +#define TMU_TEQ_HW_PROB_CFG22 (TMU_CSR_BASE_ADDR + 0x0a4)
1303 +#define TMU_TEQ_HW_PROB_CFG23 (TMU_CSR_BASE_ADDR + 0x0a8)
1304 +#define TMU_TEQ_HW_PROB_CFG24 (TMU_CSR_BASE_ADDR + 0x0ac)
1305 +#define TMU_TEQ_HW_PROB_CFG25 (TMU_CSR_BASE_ADDR + 0x0b0)
1306 +#define TMU_TDQ_IIFG_CFG (TMU_CSR_BASE_ADDR + 0x0b4)
1307 +/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
1308 + * This is a global Enable for all schedulers in PHY0
1309 + */
1310 +#define TMU_TDQ0_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x0b8)
1311 +
1312 +#define TMU_LLM_CTRL (TMU_CSR_BASE_ADDR + 0x0bc)
1313 +#define TMU_LLM_BASE_ADDR (TMU_CSR_BASE_ADDR + 0x0c0)
1314 +#define TMU_LLM_QUE_LEN (TMU_CSR_BASE_ADDR + 0x0c4)
1315 +#define TMU_LLM_QUE_HEADPTR (TMU_CSR_BASE_ADDR + 0x0c8)
1316 +#define TMU_LLM_QUE_TAILPTR (TMU_CSR_BASE_ADDR + 0x0cc)
1317 +#define TMU_LLM_QUE_DROPCNT (TMU_CSR_BASE_ADDR + 0x0d0)
1318 +#define TMU_INT_EN (TMU_CSR_BASE_ADDR + 0x0d4)
1319 +#define TMU_INT_SRC (TMU_CSR_BASE_ADDR + 0x0d8)
1320 +#define TMU_INQ_STAT (TMU_CSR_BASE_ADDR + 0x0dc)
1321 +#define TMU_CTRL (TMU_CSR_BASE_ADDR + 0x0e0)
1322 +
1323 +/* [31] Mem Access Command. 0 = Internal Memory Read, 1 = Internal memory
1324 + * Write [27:24] Byte Enables of the Internal memory access [23:0] Address of
1325 + * the internal memory. This address is used to access both the PM and DM of
1326 + * all the PE's
1327 + */
1328 +#define TMU_MEM_ACCESS_ADDR (TMU_CSR_BASE_ADDR + 0x0e4)
1329 +
1330 +/* Internal Memory Access Write Data */
1331 +#define TMU_MEM_ACCESS_WDATA (TMU_CSR_BASE_ADDR + 0x0e8)
1332 +/* Internal Memory Access Read Data. The commands are blocked
1333 + * at the mem_access only
1334 + */
1335 +#define TMU_MEM_ACCESS_RDATA (TMU_CSR_BASE_ADDR + 0x0ec)
1336 +
1337 +/* [31:0] PHY0 in queue address (must be initialized with one of the
1338 + * xxx_INQ_PKTPTR cbus addresses)
1339 + */
1340 +#define TMU_PHY0_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f0)
1341 +/* [31:0] PHY1 in queue address (must be initialized with one of the
1342 + * xxx_INQ_PKTPTR cbus addresses)
1343 + */
1344 +#define TMU_PHY1_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f4)
1345 +/* [31:0] PHY2 in queue address (must be initialized with one of the
1346 + * xxx_INQ_PKTPTR cbus addresses)
1347 + */
1348 +#define TMU_PHY2_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f8)
1349 +/* [31:0] PHY3 in queue address (must be initialized with one of the
1350 + * xxx_INQ_PKTPTR cbus addresses)
1351 + */
1352 +#define TMU_PHY3_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0fc)
1353 +#define TMU_BMU_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x100)
1354 +#define TMU_TX_CTRL (TMU_CSR_BASE_ADDR + 0x104)
1355 +
1356 +#define TMU_BUS_ACCESS_WDATA (TMU_CSR_BASE_ADDR + 0x108)
1357 +#define TMU_BUS_ACCESS (TMU_CSR_BASE_ADDR + 0x10c)
1358 +#define TMU_BUS_ACCESS_RDATA (TMU_CSR_BASE_ADDR + 0x110)
1359 +
1360 +#define TMU_PE_SYS_CLK_RATIO (TMU_CSR_BASE_ADDR + 0x114)
1361 +#define TMU_PE_STATUS (TMU_CSR_BASE_ADDR + 0x118)
1362 +#define TMU_TEQ_MAX_THRESHOLD (TMU_CSR_BASE_ADDR + 0x11c)
1363 +/* [31:0] PHY4 in queue address (must be initialized with one of the
1364 + * xxx_INQ_PKTPTR cbus addresses)
1365 + */
1366 +#define TMU_PHY4_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x134)
1367 +/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
1368 + * This is a global Enable for all schedulers in PHY1
1369 + */
1370 +#define TMU_TDQ1_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x138)
1371 +/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
1372 + * This is a global Enable for all schedulers in PHY2
1373 + */
1374 +#define TMU_TDQ2_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x13c)
1375 +/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
1376 + * This is a global Enable for all schedulers in PHY3
1377 + */
1378 +#define TMU_TDQ3_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x140)
1379 +#define TMU_BMU_BUF_SIZE (TMU_CSR_BASE_ADDR + 0x144)
1380 +/* [31:0] PHY5 in queue address (must be initialized with one of the
1381 + * xxx_INQ_PKTPTR cbus addresses)
1382 + */
1383 +#define TMU_PHY5_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x148)
1384 +
1385 +#define SW_RESET BIT(0) /* Global software reset */
1386 +#define INQ_RESET BIT(2)
1387 +#define TEQ_RESET BIT(3)
1388 +#define TDQ_RESET BIT(4)
1389 +#define PE_RESET BIT(5)
1390 +#define MEM_INIT BIT(6)
1391 +#define MEM_INIT_DONE BIT(7)
1392 +#define LLM_INIT BIT(8)
1393 +#define LLM_INIT_DONE BIT(9)
1394 +#define ECC_MEM_INIT_DONE BIT(10)
1395 +
1396 +struct tmu_cfg {
1397 + u32 pe_sys_clk_ratio;
1398 + unsigned long llm_base_addr;
1399 + u32 llm_queue_len;
1400 +};
1401 +
1402 +/* Not HW related for pfe_ctrl / pfe common defines */
1403 +#define DEFAULT_MAX_QDEPTH 80
1404 +#define DEFAULT_Q0_QDEPTH 511 /*We keep one large queue for host tx qos */
1405 +#define DEFAULT_TMU3_QDEPTH 127
1406 +
1407 +#endif /* _TMU_CSR_H_ */
1408 --- /dev/null
1409 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/util_csr.h
1410 @@ -0,0 +1,61 @@
1411 +/*
1412 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
1413 + * Copyright 2017 NXP
1414 + *
1415 + * This program is free software; you can redistribute it and/or modify
1416 + * it under the terms of the GNU General Public License as published by
1417 + * the Free Software Foundation; either version 2 of the License, or
1418 + * (at your option) any later version.
1419 + *
1420 + * This program is distributed in the hope that it will be useful,
1421 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1422 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1423 + * GNU General Public License for more details.
1424 + *
1425 + * You should have received a copy of the GNU General Public License
1426 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
1427 + */
1428 +
1429 +#ifndef _UTIL_CSR_H_
1430 +#define _UTIL_CSR_H_
1431 +
1432 +#define UTIL_VERSION (UTIL_CSR_BASE_ADDR + 0x000)
1433 +#define UTIL_TX_CTRL (UTIL_CSR_BASE_ADDR + 0x004)
1434 +#define UTIL_INQ_PKTPTR (UTIL_CSR_BASE_ADDR + 0x010)
1435 +
1436 +#define UTIL_HDR_SIZE (UTIL_CSR_BASE_ADDR + 0x014)
1437 +
1438 +#define UTIL_PE0_QB_DM_ADDR0 (UTIL_CSR_BASE_ADDR + 0x020)
1439 +#define UTIL_PE0_QB_DM_ADDR1 (UTIL_CSR_BASE_ADDR + 0x024)
1440 +#define UTIL_PE0_RO_DM_ADDR0 (UTIL_CSR_BASE_ADDR + 0x060)
1441 +#define UTIL_PE0_RO_DM_ADDR1 (UTIL_CSR_BASE_ADDR + 0x064)
1442 +
1443 +#define UTIL_MEM_ACCESS_ADDR (UTIL_CSR_BASE_ADDR + 0x100)
1444 +#define UTIL_MEM_ACCESS_WDATA (UTIL_CSR_BASE_ADDR + 0x104)
1445 +#define UTIL_MEM_ACCESS_RDATA (UTIL_CSR_BASE_ADDR + 0x108)
1446 +
1447 +#define UTIL_TM_INQ_ADDR (UTIL_CSR_BASE_ADDR + 0x114)
1448 +#define UTIL_PE_STATUS (UTIL_CSR_BASE_ADDR + 0x118)
1449 +
1450 +#define UTIL_PE_SYS_CLK_RATIO (UTIL_CSR_BASE_ADDR + 0x200)
1451 +#define UTIL_AFULL_THRES (UTIL_CSR_BASE_ADDR + 0x204)
1452 +#define UTIL_GAP_BETWEEN_READS (UTIL_CSR_BASE_ADDR + 0x208)
1453 +#define UTIL_MAX_BUF_CNT (UTIL_CSR_BASE_ADDR + 0x20c)
1454 +#define UTIL_TSQ_FIFO_THRES (UTIL_CSR_BASE_ADDR + 0x210)
1455 +#define UTIL_TSQ_MAX_CNT (UTIL_CSR_BASE_ADDR + 0x214)
1456 +#define UTIL_IRAM_DATA_0 (UTIL_CSR_BASE_ADDR + 0x218)
1457 +#define UTIL_IRAM_DATA_1 (UTIL_CSR_BASE_ADDR + 0x21c)
1458 +#define UTIL_IRAM_DATA_2 (UTIL_CSR_BASE_ADDR + 0x220)
1459 +#define UTIL_IRAM_DATA_3 (UTIL_CSR_BASE_ADDR + 0x224)
1460 +
1461 +#define UTIL_BUS_ACCESS_ADDR (UTIL_CSR_BASE_ADDR + 0x228)
1462 +#define UTIL_BUS_ACCESS_WDATA (UTIL_CSR_BASE_ADDR + 0x22c)
1463 +#define UTIL_BUS_ACCESS_RDATA (UTIL_CSR_BASE_ADDR + 0x230)
1464 +
1465 +#define UTIL_INQ_AFULL_THRES (UTIL_CSR_BASE_ADDR + 0x234)
1466 +
1467 +struct util_cfg {
1468 + u32 pe_sys_clk_ratio;
1469 +};
1470 +
1471 +#endif /* _UTIL_CSR_H_ */
1472 --- /dev/null
1473 +++ b/drivers/staging/fsl_ppfe/include/pfe/pfe.h
1474 @@ -0,0 +1,373 @@
1475 +/*
1476 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
1477 + * Copyright 2017 NXP
1478 + *
1479 + * This program is free software; you can redistribute it and/or modify
1480 + * it under the terms of the GNU General Public License as published by
1481 + * the Free Software Foundation; either version 2 of the License, or
1482 + * (at your option) any later version.
1483 + *
1484 + * This program is distributed in the hope that it will be useful,
1485 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1486 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1487 + * GNU General Public License for more details.
1488 + *
1489 + * You should have received a copy of the GNU General Public License
1490 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
1491 + */
1492 +
1493 +#ifndef _PFE_H_
1494 +#define _PFE_H_
1495 +
1496 +#include "cbus.h"
1497 +
1498 +#define CLASS_DMEM_BASE_ADDR(i) (0x00000000 | ((i) << 20))
1499 +/*
1500 + * Only valid for mem access register interface
1501 + */
1502 +#define CLASS_IMEM_BASE_ADDR(i) (0x00000000 | ((i) << 20))
1503 +#define CLASS_DMEM_SIZE 0x00002000
1504 +#define CLASS_IMEM_SIZE 0x00008000
1505 +
1506 +#define TMU_DMEM_BASE_ADDR(i) (0x00000000 + ((i) << 20))
1507 +/*
1508 + * Only valid for mem access register interface
1509 + */
1510 +#define TMU_IMEM_BASE_ADDR(i) (0x00000000 + ((i) << 20))
1511 +#define TMU_DMEM_SIZE 0x00000800
1512 +#define TMU_IMEM_SIZE 0x00002000
1513 +
1514 +#define UTIL_DMEM_BASE_ADDR 0x00000000
1515 +#define UTIL_DMEM_SIZE 0x00002000
1516 +
1517 +#define PE_LMEM_BASE_ADDR 0xc3010000
1518 +#define PE_LMEM_SIZE 0x8000
1519 +#define PE_LMEM_END (PE_LMEM_BASE_ADDR + PE_LMEM_SIZE)
1520 +
1521 +#define DMEM_BASE_ADDR 0x00000000
1522 +#define DMEM_SIZE 0x2000 /* TMU has less... */
1523 +#define DMEM_END (DMEM_BASE_ADDR + DMEM_SIZE)
1524 +
1525 +#define PMEM_BASE_ADDR 0x00010000
1526 +#define PMEM_SIZE 0x8000 /* TMU has less... */
1527 +#define PMEM_END (PMEM_BASE_ADDR + PMEM_SIZE)
1528 +
1529 +/* These check memory ranges from PE point of view/memory map */
1530 +#define IS_DMEM(addr, len) \
1531 + ({ typeof(addr) addr_ = (addr); \
1532 + ((unsigned long)(addr_) >= DMEM_BASE_ADDR) && \
1533 + (((unsigned long)(addr_) + (len)) <= DMEM_END); })
1534 +
1535 +#define IS_PMEM(addr, len) \
1536 + ({ typeof(addr) addr_ = (addr); \
1537 + ((unsigned long)(addr_) >= PMEM_BASE_ADDR) && \
1538 + (((unsigned long)(addr_) + (len)) <= PMEM_END); })
1539 +
1540 +#define IS_PE_LMEM(addr, len) \
1541 + ({ typeof(addr) addr_ = (addr); \
1542 + ((unsigned long)(addr_) >= \
1543 + PE_LMEM_BASE_ADDR) && \
1544 + (((unsigned long)(addr_) + \
1545 + (len)) <= PE_LMEM_END); })
1546 +
1547 +#define IS_PFE_LMEM(addr, len) \
1548 + ({ typeof(addr) addr_ = (addr); \
1549 + ((unsigned long)(addr_) >= \
1550 + CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR)) && \
1551 + (((unsigned long)(addr_) + (len)) <= \
1552 + CBUS_VIRT_TO_PFE(LMEM_END)); })
1553 +
1554 +#define __IS_PHYS_DDR(addr, len) \
1555 + ({ typeof(addr) addr_ = (addr); \
1556 + ((unsigned long)(addr_) >= \
1557 + DDR_PHYS_BASE_ADDR) && \
1558 + (((unsigned long)(addr_) + (len)) <= \
1559 + DDR_PHYS_END); })
1560 +
1561 +#define IS_PHYS_DDR(addr, len) __IS_PHYS_DDR(DDR_PFE_TO_PHYS(addr), len)
1562 +
1563 +/*
1564 + * If using a run-time virtual address for the cbus base address use this code
1565 + */
1566 +extern void *cbus_base_addr;
1567 +extern void *ddr_base_addr;
1568 +extern unsigned long ddr_phys_base_addr;
1569 +extern unsigned int ddr_size;
1570 +
1571 +#define CBUS_BASE_ADDR cbus_base_addr
1572 +#define DDR_PHYS_BASE_ADDR ddr_phys_base_addr
1573 +#define DDR_BASE_ADDR ddr_base_addr
1574 +#define DDR_SIZE ddr_size
1575 +
1576 +#define DDR_PHYS_END (DDR_PHYS_BASE_ADDR + DDR_SIZE)
1577 +
1578 +#define LS1012A_PFE_RESET_WA /*
1579 + * PFE doesn't have global reset and re-init
1580 + * should takecare few things to make PFE
1581 + * functional after reset
1582 + */
1583 +#define PFE_CBUS_PHYS_BASE_ADDR 0xc0000000 /* CBUS physical base address
1584 + * as seen by PE's.
1585 + */
1586 +/* CBUS physical base address as seen by PE's. */
1587 +#define PFE_CBUS_PHYS_BASE_ADDR_FROM_PFE 0xc0000000
1588 +
1589 +#define DDR_PHYS_TO_PFE(p) (((unsigned long int)(p)) & 0x7FFFFFFF)
1590 +#define DDR_PFE_TO_PHYS(p) (((unsigned long int)(p)) | 0x80000000)
1591 +#define CBUS_PHYS_TO_PFE(p) (((p) - PFE_CBUS_PHYS_BASE_ADDR) + \
1592 + PFE_CBUS_PHYS_BASE_ADDR_FROM_PFE)
1593 +/* Translates to PFE address map */
1594 +
1595 +#define DDR_PHYS_TO_VIRT(p) (((p) - DDR_PHYS_BASE_ADDR) + DDR_BASE_ADDR)
1596 +#define DDR_VIRT_TO_PHYS(v) (((v) - DDR_BASE_ADDR) + DDR_PHYS_BASE_ADDR)
1597 +#define DDR_VIRT_TO_PFE(p) (DDR_PHYS_TO_PFE(DDR_VIRT_TO_PHYS(p)))
1598 +
1599 +#define CBUS_VIRT_TO_PFE(v) (((v) - CBUS_BASE_ADDR) + \
1600 + PFE_CBUS_PHYS_BASE_ADDR)
1601 +#define CBUS_PFE_TO_VIRT(p) (((unsigned long int)(p) - \
1602 + PFE_CBUS_PHYS_BASE_ADDR) + CBUS_BASE_ADDR)
1603 +
1604 +/* The below part of the code is used in QOS control driver from host */
1605 +#define TMU_APB_BASE_ADDR 0xc1000000 /* TMU base address seen by
1606 + * pe's
1607 + */
1608 +
1609 +enum {
1610 + CLASS0_ID = 0,
1611 + CLASS1_ID,
1612 + CLASS2_ID,
1613 + CLASS3_ID,
1614 + CLASS4_ID,
1615 + CLASS5_ID,
1616 + TMU0_ID,
1617 + TMU1_ID,
1618 + TMU2_ID,
1619 + TMU3_ID,
1620 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
1621 + UTIL_ID,
1622 +#endif
1623 + MAX_PE
1624 +};
1625 +
1626 +#define CLASS_MASK (BIT(CLASS0_ID) | BIT(CLASS1_ID) |\
1627 + BIT(CLASS2_ID) | BIT(CLASS3_ID) |\
1628 + BIT(CLASS4_ID) | BIT(CLASS5_ID))
1629 +#define CLASS_MAX_ID CLASS5_ID
1630 +
1631 +#define TMU_MASK (BIT(TMU0_ID) | BIT(TMU1_ID) |\
1632 + BIT(TMU3_ID))
1633 +
1634 +#define TMU_MAX_ID TMU3_ID
1635 +
1636 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
1637 +#define UTIL_MASK BIT(UTIL_ID)
1638 +#endif
1639 +
1640 +struct pe_status {
1641 + u32 cpu_state;
1642 + u32 activity_counter;
1643 + u32 rx;
1644 + union {
1645 + u32 tx;
1646 + u32 tmu_qstatus;
1647 + };
1648 + u32 drop;
1649 +#if defined(CFG_PE_DEBUG)
1650 + u32 debug_indicator;
1651 + u32 debug[16];
1652 +#endif
1653 +} __aligned(16);
1654 +
1655 +struct pe_sync_mailbox {
1656 + u32 stop;
1657 + u32 stopped;
1658 +};
1659 +
1660 +/* Drop counter definitions */
1661 +
1662 +#define CLASS_NUM_DROP_COUNTERS 13
1663 +#define UTIL_NUM_DROP_COUNTERS 8
1664 +
1665 +/* PE information.
1666 + * Structure containing PE's specific information. It is used to create
1667 + * generic C functions common to all PE's.
1668 + * Before using the library functions this structure needs to be initialized
1669 + * with the different registers virtual addresses
1670 + * (according to the ARM MMU mmaping). The default initialization supports a
1671 + * virtual == physical mapping.
1672 + */
1673 +struct pe_info {
1674 + u32 dmem_base_addr; /* PE's dmem base address */
1675 + u32 pmem_base_addr; /* PE's pmem base address */
1676 + u32 pmem_size; /* PE's pmem size */
1677 +
1678 + void *mem_access_wdata; /* PE's _MEM_ACCESS_WDATA register
1679 + * address
1680 + */
1681 + void *mem_access_addr; /* PE's _MEM_ACCESS_ADDR register
1682 + * address
1683 + */
1684 + void *mem_access_rdata; /* PE's _MEM_ACCESS_RDATA register
1685 + * address
1686 + */
1687 +};
1688 +
1689 +void pe_lmem_read(u32 *dst, u32 len, u32 offset);
1690 +void pe_lmem_write(u32 *src, u32 len, u32 offset);
1691 +
1692 +void pe_dmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len);
1693 +void pe_pmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len);
1694 +
1695 +u32 pe_pmem_read(int id, u32 addr, u8 size);
1696 +
1697 +void pe_dmem_write(int id, u32 val, u32 addr, u8 size);
1698 +u32 pe_dmem_read(int id, u32 addr, u8 size);
1699 +void class_pe_lmem_memcpy_to32(u32 dst, const void *src, unsigned int len);
1700 +void class_pe_lmem_memset(u32 dst, int val, unsigned int len);
1701 +void class_bus_write(u32 val, u32 addr, u8 size);
1702 +u32 class_bus_read(u32 addr, u8 size);
1703 +
1704 +#define class_bus_readl(addr) class_bus_read(addr, 4)
1705 +#define class_bus_readw(addr) class_bus_read(addr, 2)
1706 +#define class_bus_readb(addr) class_bus_read(addr, 1)
1707 +
1708 +#define class_bus_writel(val, addr) class_bus_write(val, addr, 4)
1709 +#define class_bus_writew(val, addr) class_bus_write(val, addr, 2)
1710 +#define class_bus_writeb(val, addr) class_bus_write(val, addr, 1)
1711 +
1712 +#define pe_dmem_readl(id, addr) pe_dmem_read(id, addr, 4)
1713 +#define pe_dmem_readw(id, addr) pe_dmem_read(id, addr, 2)
1714 +#define pe_dmem_readb(id, addr) pe_dmem_read(id, addr, 1)
1715 +
1716 +#define pe_dmem_writel(id, val, addr) pe_dmem_write(id, val, addr, 4)
1717 +#define pe_dmem_writew(id, val, addr) pe_dmem_write(id, val, addr, 2)
1718 +#define pe_dmem_writeb(id, val, addr) pe_dmem_write(id, val, addr, 1)
1719 +
1720 +/*int pe_load_elf_section(int id, const void *data, elf32_shdr *shdr); */
1721 +int pe_load_elf_section(int id, const void *data, struct elf32_shdr *shdr,
1722 + struct device *dev);
1723 +
1724 +void pfe_lib_init(void *cbus_base, void *ddr_base, unsigned long ddr_phys_base,
1725 + unsigned int ddr_size);
1726 +void bmu_init(void *base, struct BMU_CFG *cfg);
1727 +void bmu_reset(void *base);
1728 +void bmu_enable(void *base);
1729 +void bmu_disable(void *base);
1730 +void bmu_set_config(void *base, struct BMU_CFG *cfg);
1731 +
1732 +/*
1733 + * An enumerated type for loopback values. This can be one of three values, no
1734 + * loopback -normal operation, local loopback with internal loopback module of
1735 + * MAC or PHY loopback which is through the external PHY.
1736 + */
1737 +#ifndef __MAC_LOOP_ENUM__
1738 +#define __MAC_LOOP_ENUM__
1739 +enum mac_loop {LB_NONE, LB_EXT, LB_LOCAL};
1740 +#endif
1741 +
1742 +void gemac_init(void *base, void *config);
1743 +void gemac_disable_rx_checksum_offload(void *base);
1744 +void gemac_enable_rx_checksum_offload(void *base);
1745 +void gemac_set_mdc_div(void *base, int mdc_div);
1746 +void gemac_set_speed(void *base, enum mac_speed gem_speed);
1747 +void gemac_set_duplex(void *base, int duplex);
1748 +void gemac_set_mode(void *base, int mode);
1749 +void gemac_enable(void *base);
1750 +void gemac_tx_disable(void *base);
1751 +void gemac_tx_enable(void *base);
1752 +void gemac_disable(void *base);
1753 +void gemac_reset(void *base);
1754 +void gemac_set_address(void *base, struct spec_addr *addr);
1755 +struct spec_addr gemac_get_address(void *base);
1756 +void gemac_set_loop(void *base, enum mac_loop gem_loop);
1757 +void gemac_set_laddr1(void *base, struct pfe_mac_addr *address);
1758 +void gemac_set_laddr2(void *base, struct pfe_mac_addr *address);
1759 +void gemac_set_laddr3(void *base, struct pfe_mac_addr *address);
1760 +void gemac_set_laddr4(void *base, struct pfe_mac_addr *address);
1761 +void gemac_set_laddrN(void *base, struct pfe_mac_addr *address,
1762 + unsigned int entry_index);
1763 +void gemac_clear_laddr1(void *base);
1764 +void gemac_clear_laddr2(void *base);
1765 +void gemac_clear_laddr3(void *base);
1766 +void gemac_clear_laddr4(void *base);
1767 +void gemac_clear_laddrN(void *base, unsigned int entry_index);
1768 +struct pfe_mac_addr gemac_get_hash(void *base);
1769 +void gemac_set_hash(void *base, struct pfe_mac_addr *hash);
1770 +struct pfe_mac_addr gem_get_laddr1(void *base);
1771 +struct pfe_mac_addr gem_get_laddr2(void *base);
1772 +struct pfe_mac_addr gem_get_laddr3(void *base);
1773 +struct pfe_mac_addr gem_get_laddr4(void *base);
1774 +struct pfe_mac_addr gem_get_laddrN(void *base, unsigned int entry_index);
1775 +void gemac_set_config(void *base, struct gemac_cfg *cfg);
1776 +void gemac_allow_broadcast(void *base);
1777 +void gemac_no_broadcast(void *base);
1778 +void gemac_enable_1536_rx(void *base);
1779 +void gemac_disable_1536_rx(void *base);
1780 +void gemac_set_rx_max_fl(void *base, int mtu);
1781 +void gemac_enable_rx_jmb(void *base);
1782 +void gemac_disable_rx_jmb(void *base);
1783 +void gemac_enable_stacked_vlan(void *base);
1784 +void gemac_disable_stacked_vlan(void *base);
1785 +void gemac_enable_pause_rx(void *base);
1786 +void gemac_disable_pause_rx(void *base);
1787 +void gemac_enable_copy_all(void *base);
1788 +void gemac_disable_copy_all(void *base);
1789 +void gemac_set_bus_width(void *base, int width);
1790 +void gemac_set_wol(void *base, u32 wol_conf);
1791 +
1792 +void gpi_init(void *base, struct gpi_cfg *cfg);
1793 +void gpi_reset(void *base);
1794 +void gpi_enable(void *base);
1795 +void gpi_disable(void *base);
1796 +void gpi_set_config(void *base, struct gpi_cfg *cfg);
1797 +
1798 +void class_init(struct class_cfg *cfg);
1799 +void class_reset(void);
1800 +void class_enable(void);
1801 +void class_disable(void);
1802 +void class_set_config(struct class_cfg *cfg);
1803 +
1804 +void tmu_reset(void);
1805 +void tmu_init(struct tmu_cfg *cfg);
1806 +void tmu_enable(u32 pe_mask);
1807 +void tmu_disable(u32 pe_mask);
1808 +u32 tmu_qstatus(u32 if_id);
1809 +u32 tmu_pkts_processed(u32 if_id);
1810 +
1811 +void util_init(struct util_cfg *cfg);
1812 +void util_reset(void);
1813 +void util_enable(void);
1814 +void util_disable(void);
1815 +
1816 +void hif_init(void);
1817 +void hif_tx_enable(void);
1818 +void hif_tx_disable(void);
1819 +void hif_rx_enable(void);
1820 +void hif_rx_disable(void);
1821 +
1822 +/* Get Chip Revision level
1823 + *
1824 + */
1825 +static inline unsigned int CHIP_REVISION(void)
1826 +{
1827 + /*For LS1012A return always 1 */
1828 + return 1;
1829 +}
1830 +
1831 +/* Start HIF rx DMA
1832 + *
1833 + */
1834 +static inline void hif_rx_dma_start(void)
1835 +{
1836 + writel(HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB, HIF_RX_CTRL);
1837 +}
1838 +
1839 +/* Start HIF tx DMA
1840 + *
1841 + */
1842 +static inline void hif_tx_dma_start(void)
1843 +{
1844 + writel(HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB, HIF_TX_CTRL);
1845 +}
1846 +
1847 +#endif /* _PFE_H_ */
1848 --- /dev/null
1849 +++ b/drivers/staging/fsl_ppfe/pfe_ctrl.c
1850 @@ -0,0 +1,238 @@
1851 +/*
1852 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
1853 + * Copyright 2017 NXP
1854 + *
1855 + * This program is free software; you can redistribute it and/or modify
1856 + * it under the terms of the GNU General Public License as published by
1857 + * the Free Software Foundation; either version 2 of the License, or
1858 + * (at your option) any later version.
1859 + *
1860 + * This program is distributed in the hope that it will be useful,
1861 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1862 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1863 + * GNU General Public License for more details.
1864 + *
1865 + * You should have received a copy of the GNU General Public License
1866 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
1867 + */
1868 +
1869 +#include <linux/kernel.h>
1870 +#include <linux/sched.h>
1871 +#include <linux/module.h>
1872 +#include <linux/list.h>
1873 +#include <linux/kthread.h>
1874 +
1875 +#include "pfe_mod.h"
1876 +#include "pfe_ctrl.h"
1877 +
1878 +#define TIMEOUT_MS 1000
1879 +
1880 +int relax(unsigned long end)
1881 +{
1882 + if (time_after(jiffies, end)) {
1883 + if (time_after(jiffies, end + (TIMEOUT_MS * HZ) / 1000))
1884 + return -1;
1885 +
1886 + if (need_resched())
1887 + schedule();
1888 + }
1889 +
1890 + return 0;
1891 +}
1892 +
1893 +void pfe_ctrl_suspend(struct pfe_ctrl *ctrl)
1894 +{
1895 + int id;
1896 +
1897 + mutex_lock(&ctrl->mutex);
1898 +
1899 + for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++)
1900 + pe_dmem_write(id, cpu_to_be32(0x1), CLASS_DM_RESUME, 4);
1901 +
1902 + for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
1903 + if (id == TMU2_ID)
1904 + continue;
1905 + pe_dmem_write(id, cpu_to_be32(0x1), TMU_DM_RESUME, 4);
1906 + }
1907 +
1908 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
1909 + pe_dmem_write(UTIL_ID, cpu_to_be32(0x1), UTIL_DM_RESUME, 4);
1910 +#endif
1911 + mutex_unlock(&ctrl->mutex);
1912 +}
1913 +
1914 +void pfe_ctrl_resume(struct pfe_ctrl *ctrl)
1915 +{
1916 + int pe_mask = CLASS_MASK | TMU_MASK;
1917 +
1918 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
1919 + pe_mask |= UTIL_MASK;
1920 +#endif
1921 + mutex_lock(&ctrl->mutex);
1922 + pe_start(&pfe->ctrl, pe_mask);
1923 + mutex_unlock(&ctrl->mutex);
1924 +}
1925 +
1926 +/* PE sync stop.
1927 + * Stops packet processing for a list of PE's (specified using a bitmask).
1928 + * The caller must hold ctrl->mutex.
1929 + *
1930 + * @param ctrl Control context
1931 + * @param pe_mask Mask of PE id's to stop
1932 + *
1933 + */
1934 +int pe_sync_stop(struct pfe_ctrl *ctrl, int pe_mask)
1935 +{
1936 + struct pe_sync_mailbox *mbox;
1937 + int pe_stopped = 0;
1938 + unsigned long end = jiffies + 2;
1939 + int i;
1940 +
1941 + pe_mask &= 0x2FF; /*Exclude Util + TMU2 */
1942 +
1943 + for (i = 0; i < MAX_PE; i++)
1944 + if (pe_mask & (1 << i)) {
1945 + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
1946 +
1947 + pe_dmem_write(i, cpu_to_be32(0x1), (unsigned
1948 + long)&mbox->stop, 4);
1949 + }
1950 +
1951 + while (pe_stopped != pe_mask) {
1952 + for (i = 0; i < MAX_PE; i++)
1953 + if ((pe_mask & (1 << i)) && !(pe_stopped & (1 << i))) {
1954 + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
1955 +
1956 + if (pe_dmem_read(i, (unsigned
1957 + long)&mbox->stopped, 4) &
1958 + cpu_to_be32(0x1))
1959 + pe_stopped |= (1 << i);
1960 + }
1961 +
1962 + if (relax(end) < 0)
1963 + goto err;
1964 + }
1965 +
1966 + return 0;
1967 +
1968 +err:
1969 + pr_err("%s: timeout, %x %x\n", __func__, pe_mask, pe_stopped);
1970 +
1971 + for (i = 0; i < MAX_PE; i++)
1972 + if (pe_mask & (1 << i)) {
1973 + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
1974 +
1975 + pe_dmem_write(i, cpu_to_be32(0x0), (unsigned
1976 + long)&mbox->stop, 4);
1977 + }
1978 +
1979 + return -EIO;
1980 +}
1981 +
1982 +/* PE start.
1983 + * Starts packet processing for a list of PE's (specified using a bitmask).
1984 + * The caller must hold ctrl->mutex.
1985 + *
1986 + * @param ctrl Control context
1987 + * @param pe_mask Mask of PE id's to start
1988 + *
1989 + */
1990 +void pe_start(struct pfe_ctrl *ctrl, int pe_mask)
1991 +{
1992 + struct pe_sync_mailbox *mbox;
1993 + int i;
1994 +
1995 + for (i = 0; i < MAX_PE; i++)
1996 + if (pe_mask & (1 << i)) {
1997 + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
1998 +
1999 + pe_dmem_write(i, cpu_to_be32(0x0), (unsigned
2000 + long)&mbox->stop, 4);
2001 + }
2002 +}
2003 +
2004 +/* This function will ensure all PEs are put in to idle state */
2005 +int pe_reset_all(struct pfe_ctrl *ctrl)
2006 +{
2007 + struct pe_sync_mailbox *mbox;
2008 + int pe_stopped = 0;
2009 + unsigned long end = jiffies + 2;
2010 + int i;
2011 + int pe_mask = CLASS_MASK | TMU_MASK;
2012 +
2013 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
2014 + pe_mask |= UTIL_MASK;
2015 +#endif
2016 +
2017 + for (i = 0; i < MAX_PE; i++)
2018 + if (pe_mask & (1 << i)) {
2019 + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
2020 +
2021 + pe_dmem_write(i, cpu_to_be32(0x2), (unsigned
2022 + long)&mbox->stop, 4);
2023 + }
2024 +
2025 + while (pe_stopped != pe_mask) {
2026 + for (i = 0; i < MAX_PE; i++)
2027 + if ((pe_mask & (1 << i)) && !(pe_stopped & (1 << i))) {
2028 + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
2029 +
2030 + if (pe_dmem_read(i, (unsigned long)
2031 + &mbox->stopped, 4) &
2032 + cpu_to_be32(0x1))
2033 + pe_stopped |= (1 << i);
2034 + }
2035 +
2036 + if (relax(end) < 0)
2037 + goto err;
2038 + }
2039 +
2040 + return 0;
2041 +
2042 +err:
2043 + pr_err("%s: timeout, %x %x\n", __func__, pe_mask, pe_stopped);
2044 + return -EIO;
2045 +}
2046 +
2047 +int pfe_ctrl_init(struct pfe *pfe)
2048 +{
2049 + struct pfe_ctrl *ctrl = &pfe->ctrl;
2050 + int id;
2051 +
2052 + pr_info("%s\n", __func__);
2053 +
2054 + mutex_init(&ctrl->mutex);
2055 + spin_lock_init(&ctrl->lock);
2056 +
2057 + for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
2058 + ctrl->sync_mailbox_baseaddr[id] = CLASS_DM_SYNC_MBOX;
2059 + ctrl->msg_mailbox_baseaddr[id] = CLASS_DM_MSG_MBOX;
2060 + }
2061 +
2062 + for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
2063 + if (id == TMU2_ID)
2064 + continue;
2065 + ctrl->sync_mailbox_baseaddr[id] = TMU_DM_SYNC_MBOX;
2066 + ctrl->msg_mailbox_baseaddr[id] = TMU_DM_MSG_MBOX;
2067 + }
2068 +
2069 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
2070 + ctrl->sync_mailbox_baseaddr[UTIL_ID] = UTIL_DM_SYNC_MBOX;
2071 + ctrl->msg_mailbox_baseaddr[UTIL_ID] = UTIL_DM_MSG_MBOX;
2072 +#endif
2073 +
2074 + ctrl->hash_array_baseaddr = pfe->ddr_baseaddr + ROUTE_TABLE_BASEADDR;
2075 + ctrl->hash_array_phys_baseaddr = pfe->ddr_phys_baseaddr +
2076 + ROUTE_TABLE_BASEADDR;
2077 +
2078 + ctrl->dev = pfe->dev;
2079 +
2080 + pr_info("%s finished\n", __func__);
2081 +
2082 + return 0;
2083 +}
2084 +
2085 +void pfe_ctrl_exit(struct pfe *pfe)
2086 +{
2087 + pr_info("%s\n", __func__);
2088 +}
2089 --- /dev/null
2090 +++ b/drivers/staging/fsl_ppfe/pfe_ctrl.h
2091 @@ -0,0 +1,112 @@
2092 +/*
2093 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
2094 + * Copyright 2017 NXP
2095 + *
2096 + * This program is free software; you can redistribute it and/or modify
2097 + * it under the terms of the GNU General Public License as published by
2098 + * the Free Software Foundation; either version 2 of the License, or
2099 + * (at your option) any later version.
2100 + *
2101 + * This program is distributed in the hope that it will be useful,
2102 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
2103 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2104 + * GNU General Public License for more details.
2105 + *
2106 + * You should have received a copy of the GNU General Public License
2107 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
2108 + */
2109 +
2110 +#ifndef _PFE_CTRL_H_
2111 +#define _PFE_CTRL_H_
2112 +
2113 +#include <linux/dmapool.h>
2114 +
2115 +#include "pfe_mod.h"
2116 +#include "pfe/pfe.h"
2117 +
2118 +#define DMA_BUF_SIZE_128 0x80 /* enough for 1 conntracks */
2119 +#define DMA_BUF_SIZE_256 0x100
2120 +/* enough for 2 conntracks, 1 bridge entry or 1 multicast entry */
2121 +#define DMA_BUF_SIZE_512 0x200
2122 +/* 512bytes dma allocated buffers used by rtp relay feature */
2123 +#define DMA_BUF_MIN_ALIGNMENT 8
2124 +#define DMA_BUF_BOUNDARY (4 * 1024)
2125 +/* bursts can not cross 4k boundary */
2126 +
2127 +#define CMD_TX_ENABLE 0x0501
2128 +#define CMD_TX_DISABLE 0x0502
2129 +
2130 +#define CMD_RX_LRO 0x0011
2131 +#define CMD_PKTCAP_ENABLE 0x0d01
2132 +#define CMD_QM_EXPT_RATE 0x020c
2133 +
2134 +#define CLASS_DM_SH_STATIC (0x800)
2135 +#define CLASS_DM_CPU_TICKS (CLASS_DM_SH_STATIC)
2136 +#define CLASS_DM_SYNC_MBOX (0x808)
2137 +#define CLASS_DM_MSG_MBOX (0x810)
2138 +#define CLASS_DM_DROP_CNTR (0x820)
2139 +#define CLASS_DM_RESUME (0x854)
2140 +#define CLASS_DM_PESTATUS (0x860)
2141 +
2142 +#define TMU_DM_SH_STATIC (0x80)
2143 +#define TMU_DM_CPU_TICKS (TMU_DM_SH_STATIC)
2144 +#define TMU_DM_SYNC_MBOX (0x88)
2145 +#define TMU_DM_MSG_MBOX (0x90)
2146 +#define TMU_DM_RESUME (0xA0)
2147 +#define TMU_DM_PESTATUS (0xB0)
2148 +#define TMU_DM_CONTEXT (0x300)
2149 +#define TMU_DM_TX_TRANS (0x480)
2150 +
2151 +#define UTIL_DM_SH_STATIC (0x0)
2152 +#define UTIL_DM_CPU_TICKS (UTIL_DM_SH_STATIC)
2153 +#define UTIL_DM_SYNC_MBOX (0x8)
2154 +#define UTIL_DM_MSG_MBOX (0x10)
2155 +#define UTIL_DM_DROP_CNTR (0x20)
2156 +#define UTIL_DM_RESUME (0x40)
2157 +#define UTIL_DM_PESTATUS (0x50)
2158 +
2159 +struct pfe_ctrl {
2160 + struct mutex mutex; /* to serialize pfe control access */
2161 + spinlock_t lock;
2162 +
2163 + void *dma_pool;
2164 + void *dma_pool_512;
2165 + void *dma_pool_128;
2166 +
2167 + struct device *dev;
2168 +
2169 + void *hash_array_baseaddr; /*
2170 + * Virtual base address of
2171 + * the conntrack hash array
2172 + */
2173 + unsigned long hash_array_phys_baseaddr; /*
2174 + * Physical base address of
2175 + * the conntrack hash array
2176 + */
2177 +
2178 + int (*event_cb)(u16, u16, u16*);
2179 +
2180 + unsigned long sync_mailbox_baseaddr[MAX_PE]; /*
2181 + * Sync mailbox PFE
2182 + * internal address,
2183 + * initialized
2184 + * when parsing elf images
2185 + */
2186 + unsigned long msg_mailbox_baseaddr[MAX_PE]; /*
2187 + * Msg mailbox PFE internal
2188 + * address, initialized
2189 + * when parsing elf images
2190 + */
2191 + unsigned int sys_clk; /* AXI clock value, in KHz */
2192 +};
2193 +
2194 +int pfe_ctrl_init(struct pfe *pfe);
2195 +void pfe_ctrl_exit(struct pfe *pfe);
2196 +int pe_sync_stop(struct pfe_ctrl *ctrl, int pe_mask);
2197 +void pe_start(struct pfe_ctrl *ctrl, int pe_mask);
2198 +int pe_reset_all(struct pfe_ctrl *ctrl);
2199 +void pfe_ctrl_suspend(struct pfe_ctrl *ctrl);
2200 +void pfe_ctrl_resume(struct pfe_ctrl *ctrl);
2201 +int relax(unsigned long end);
2202 +
2203 +#endif /* _PFE_CTRL_H_ */
2204 --- /dev/null
2205 +++ b/drivers/staging/fsl_ppfe/pfe_debugfs.c
2206 @@ -0,0 +1,111 @@
2207 +/*
2208 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
2209 + * Copyright 2017 NXP
2210 + *
2211 + * This program is free software; you can redistribute it and/or modify
2212 + * it under the terms of the GNU General Public License as published by
2213 + * the Free Software Foundation; either version 2 of the License, or
2214 + * (at your option) any later version.
2215 + *
2216 + * This program is distributed in the hope that it will be useful,
2217 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
2218 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2219 + * GNU General Public License for more details.
2220 + *
2221 + * You should have received a copy of the GNU General Public License
2222 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
2223 + */
2224 +
2225 +#include <linux/module.h>
2226 +#include <linux/debugfs.h>
2227 +#include <linux/platform_device.h>
2228 +
2229 +#include "pfe_mod.h"
2230 +
2231 +static int dmem_show(struct seq_file *s, void *unused)
2232 +{
2233 + u32 dmem_addr, val;
2234 + int id = (long int)s->private;
2235 + int i;
2236 +
2237 + for (dmem_addr = 0; dmem_addr < CLASS_DMEM_SIZE; dmem_addr += 8 * 4) {
2238 + seq_printf(s, "%04x:", dmem_addr);
2239 +
2240 + for (i = 0; i < 8; i++) {
2241 + val = pe_dmem_read(id, dmem_addr + i * 4, 4);
2242 + seq_printf(s, " %02x %02x %02x %02x", val & 0xff,
2243 + (val >> 8) & 0xff, (val >> 16) & 0xff,
2244 + (val >> 24) & 0xff);
2245 + }
2246 +
2247 + seq_puts(s, "\n");
2248 + }
2249 +
2250 + return 0;
2251 +}
2252 +
2253 +static int dmem_open(struct inode *inode, struct file *file)
2254 +{
2255 + return single_open(file, dmem_show, inode->i_private);
2256 +}
2257 +
2258 +static const struct file_operations dmem_fops = {
2259 + .open = dmem_open,
2260 + .read = seq_read,
2261 + .llseek = seq_lseek,
2262 + .release = single_release,
2263 +};
2264 +
2265 +int pfe_debugfs_init(struct pfe *pfe)
2266 +{
2267 + struct dentry *d;
2268 +
2269 + pr_info("%s\n", __func__);
2270 +
2271 + pfe->dentry = debugfs_create_dir("pfe", NULL);
2272 + if (IS_ERR_OR_NULL(pfe->dentry))
2273 + goto err_dir;
2274 +
2275 + d = debugfs_create_file("pe0_dmem", 0444, pfe->dentry, (void *)0,
2276 + &dmem_fops);
2277 + if (IS_ERR_OR_NULL(d))
2278 + goto err_pe;
2279 +
2280 + d = debugfs_create_file("pe1_dmem", 0444, pfe->dentry, (void *)1,
2281 + &dmem_fops);
2282 + if (IS_ERR_OR_NULL(d))
2283 + goto err_pe;
2284 +
2285 + d = debugfs_create_file("pe2_dmem", 0444, pfe->dentry, (void *)2,
2286 + &dmem_fops);
2287 + if (IS_ERR_OR_NULL(d))
2288 + goto err_pe;
2289 +
2290 + d = debugfs_create_file("pe3_dmem", 0444, pfe->dentry, (void *)3,
2291 + &dmem_fops);
2292 + if (IS_ERR_OR_NULL(d))
2293 + goto err_pe;
2294 +
2295 + d = debugfs_create_file("pe4_dmem", 0444, pfe->dentry, (void *)4,
2296 + &dmem_fops);
2297 + if (IS_ERR_OR_NULL(d))
2298 + goto err_pe;
2299 +
2300 + d = debugfs_create_file("pe5_dmem", 0444, pfe->dentry, (void *)5,
2301 + &dmem_fops);
2302 + if (IS_ERR_OR_NULL(d))
2303 + goto err_pe;
2304 +
2305 + return 0;
2306 +
2307 +err_pe:
2308 + debugfs_remove_recursive(pfe->dentry);
2309 +
2310 +err_dir:
2311 + return -1;
2312 +}
2313 +
2314 +void pfe_debugfs_exit(struct pfe *pfe)
2315 +{
2316 + debugfs_remove_recursive(pfe->dentry);
2317 +}
2318 --- /dev/null
2319 +++ b/drivers/staging/fsl_ppfe/pfe_debugfs.h
2320 @@ -0,0 +1,25 @@
2321 +/*
2322 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
2323 + * Copyright 2017 NXP
2324 + *
2325 + * This program is free software; you can redistribute it and/or modify
2326 + * it under the terms of the GNU General Public License as published by
2327 + * the Free Software Foundation; either version 2 of the License, or
2328 + * (at your option) any later version.
2329 + *
2330 + * This program is distributed in the hope that it will be useful,
2331 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
2332 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2333 + * GNU General Public License for more details.
2334 + *
2335 + * You should have received a copy of the GNU General Public License
2336 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
2337 + */
2338 +
2339 +#ifndef _PFE_DEBUGFS_H_
2340 +#define _PFE_DEBUGFS_H_
2341 +
2342 +int pfe_debugfs_init(struct pfe *pfe);
2343 +void pfe_debugfs_exit(struct pfe *pfe);
2344 +
2345 +#endif /* _PFE_DEBUGFS_H_ */
2346 --- /dev/null
2347 +++ b/drivers/staging/fsl_ppfe/pfe_eth.c
2348 @@ -0,0 +1,2521 @@
2349 +/*
2350 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
2351 + * Copyright 2017 NXP
2352 + *
2353 + * This program is free software; you can redistribute it and/or modify
2354 + * it under the terms of the GNU General Public License as published by
2355 + * the Free Software Foundation; either version 2 of the License, or
2356 + * (at your option) any later version.
2357 + *
2358 + * This program is distributed in the hope that it will be useful,
2359 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
2360 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2361 + * GNU General Public License for more details.
2362 + *
2363 + * You should have received a copy of the GNU General Public License
2364 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
2365 + */
2366 +
2367 +/* @pfe_eth.c.
2368 + * Ethernet driver for to handle exception path for PFE.
2369 + * - uses HIF functions to send/receive packets.
2370 + * - uses ctrl function to start/stop interfaces.
2371 + * - uses direct register accesses to control phy operation.
2372 + */
2373 +#include <linux/version.h>
2374 +#include <linux/kernel.h>
2375 +#include <linux/interrupt.h>
2376 +#include <linux/dma-mapping.h>
2377 +#include <linux/dmapool.h>
2378 +#include <linux/netdevice.h>
2379 +#include <linux/etherdevice.h>
2380 +#include <linux/ethtool.h>
2381 +#include <linux/mii.h>
2382 +#include <linux/phy.h>
2383 +#include <linux/timer.h>
2384 +#include <linux/hrtimer.h>
2385 +#include <linux/platform_device.h>
2386 +
2387 +#include <net/ip.h>
2388 +#include <net/sock.h>
2389 +
2390 +#include <linux/io.h>
2391 +#include <asm/irq.h>
2392 +#include <linux/delay.h>
2393 +#include <linux/regmap.h>
2394 +#include <linux/i2c.h>
2395 +#include <linux/fsl/guts.h>
2396 +
2397 +#if defined(CONFIG_NF_CONNTRACK_MARK)
2398 +#include <net/netfilter/nf_conntrack.h>
2399 +#endif
2400 +
2401 +#include "pfe_mod.h"
2402 +#include "pfe_eth.h"
2403 +
2404 +#define LS1012A_REV_1_0 0x87040010
2405 +
2406 +bool pfe_errata_a010897;
2407 +
2408 +static void *cbus_emac_base[3];
2409 +static void *cbus_gpi_base[3];
2410 +
2411 +/* Forward Declaration */
2412 +static void pfe_eth_exit_one(struct pfe_eth_priv_s *priv);
2413 +static void pfe_eth_flush_tx(struct pfe_eth_priv_s *priv);
2414 +static void pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int
2415 + from_tx, int n_desc);
2416 +
2417 +unsigned int gemac_regs[] = {
2418 + 0x0004, /* Interrupt event */
2419 + 0x0008, /* Interrupt mask */
2420 + 0x0024, /* Ethernet control */
2421 + 0x0064, /* MIB Control/Status */
2422 + 0x0084, /* Receive control/status */
2423 + 0x00C4, /* Transmit control */
2424 + 0x00E4, /* Physical address low */
2425 + 0x00E8, /* Physical address high */
2426 + 0x0144, /* Transmit FIFO Watermark and Store and Forward Control*/
2427 + 0x0190, /* Receive FIFO Section Full Threshold */
2428 + 0x01A0, /* Transmit FIFO Section Empty Threshold */
2429 + 0x01B0, /* Frame Truncation Length */
2430 +};
2431 +
2432 +/********************************************************************/
2433 +/* SYSFS INTERFACE */
2434 +/********************************************************************/
2435 +
2436 +#ifdef PFE_ETH_NAPI_STATS
2437 +/*
2438 + * pfe_eth_show_napi_stats
2439 + */
2440 +static ssize_t pfe_eth_show_napi_stats(struct device *dev,
2441 + struct device_attribute *attr,
2442 + char *buf)
2443 +{
2444 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2445 + ssize_t len = 0;
2446 +
2447 + len += sprintf(buf + len, "sched: %u\n",
2448 + priv->napi_counters[NAPI_SCHED_COUNT]);
2449 + len += sprintf(buf + len, "poll: %u\n",
2450 + priv->napi_counters[NAPI_POLL_COUNT]);
2451 + len += sprintf(buf + len, "packet: %u\n",
2452 + priv->napi_counters[NAPI_PACKET_COUNT]);
2453 + len += sprintf(buf + len, "budget: %u\n",
2454 + priv->napi_counters[NAPI_FULL_BUDGET_COUNT]);
2455 + len += sprintf(buf + len, "desc: %u\n",
2456 + priv->napi_counters[NAPI_DESC_COUNT]);
2457 +
2458 + return len;
2459 +}
2460 +
2461 +/*
2462 + * pfe_eth_set_napi_stats
2463 + */
2464 +static ssize_t pfe_eth_set_napi_stats(struct device *dev,
2465 + struct device_attribute *attr,
2466 + const char *buf, size_t count)
2467 +{
2468 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2469 +
2470 + memset(priv->napi_counters, 0, sizeof(priv->napi_counters));
2471 +
2472 + return count;
2473 +}
2474 +#endif
2475 +#ifdef PFE_ETH_TX_STATS
2476 +/* pfe_eth_show_tx_stats
2477 + *
2478 + */
2479 +static ssize_t pfe_eth_show_tx_stats(struct device *dev,
2480 + struct device_attribute *attr,
2481 + char *buf)
2482 +{
2483 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2484 + ssize_t len = 0;
2485 + int i;
2486 +
2487 + len += sprintf(buf + len, "TX queues stats:\n");
2488 +
2489 + for (i = 0; i < emac_txq_cnt; i++) {
2490 + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
2491 + i);
2492 +
2493 + len += sprintf(buf + len, "\n");
2494 + __netif_tx_lock_bh(tx_queue);
2495 +
2496 + hif_tx_lock(&pfe->hif);
2497 + len += sprintf(buf + len,
2498 + "Queue %2d : credits = %10d\n"
2499 + , i, hif_lib_tx_credit_avail(pfe, priv->id, i));
2500 + len += sprintf(buf + len,
2501 + " tx packets = %10d\n"
2502 + , pfe->tmu_credit.tx_packets[priv->id][i]);
2503 + hif_tx_unlock(&pfe->hif);
2504 +
2505 + /* Don't output additionnal stats if queue never used */
2506 + if (!pfe->tmu_credit.tx_packets[priv->id][i])
2507 + goto skip;
2508 +
2509 + len += sprintf(buf + len,
2510 + " clean_fail = %10d\n"
2511 + , priv->clean_fail[i]);
2512 + len += sprintf(buf + len,
2513 + " stop_queue = %10d\n"
2514 + , priv->stop_queue_total[i]);
2515 + len += sprintf(buf + len,
2516 + " stop_queue_hif = %10d\n"
2517 + , priv->stop_queue_hif[i]);
2518 + len += sprintf(buf + len,
2519 + " stop_queue_hif_client = %10d\n"
2520 + , priv->stop_queue_hif_client[i]);
2521 + len += sprintf(buf + len,
2522 + " stop_queue_credit = %10d\n"
2523 + , priv->stop_queue_credit[i]);
2524 +skip:
2525 + __netif_tx_unlock_bh(tx_queue);
2526 + }
2527 + return len;
2528 +}
2529 +
2530 +/* pfe_eth_set_tx_stats
2531 + *
2532 + */
2533 +static ssize_t pfe_eth_set_tx_stats(struct device *dev,
2534 + struct device_attribute *attr,
2535 + const char *buf, size_t count)
2536 +{
2537 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2538 + int i;
2539 +
2540 + for (i = 0; i < emac_txq_cnt; i++) {
2541 + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
2542 + i);
2543 +
2544 + __netif_tx_lock_bh(tx_queue);
2545 + priv->clean_fail[i] = 0;
2546 + priv->stop_queue_total[i] = 0;
2547 + priv->stop_queue_hif[i] = 0;
2548 + priv->stop_queue_hif_client[i] = 0;
2549 + priv->stop_queue_credit[i] = 0;
2550 + __netif_tx_unlock_bh(tx_queue);
2551 + }
2552 +
2553 + return count;
2554 +}
2555 +#endif
2556 +/* pfe_eth_show_txavail
2557 + *
2558 + */
2559 +static ssize_t pfe_eth_show_txavail(struct device *dev,
2560 + struct device_attribute *attr,
2561 + char *buf)
2562 +{
2563 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2564 + ssize_t len = 0;
2565 + int i;
2566 +
2567 + for (i = 0; i < emac_txq_cnt; i++) {
2568 + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
2569 + i);
2570 +
2571 + __netif_tx_lock_bh(tx_queue);
2572 +
2573 + len += sprintf(buf + len, "%d",
2574 + hif_lib_tx_avail(&priv->client, i));
2575 +
2576 + __netif_tx_unlock_bh(tx_queue);
2577 +
2578 + if (i == (emac_txq_cnt - 1))
2579 + len += sprintf(buf + len, "\n");
2580 + else
2581 + len += sprintf(buf + len, " ");
2582 + }
2583 +
2584 + return len;
2585 +}
2586 +
2587 +/* pfe_eth_show_default_priority
2588 + *
2589 + */
2590 +static ssize_t pfe_eth_show_default_priority(struct device *dev,
2591 + struct device_attribute *attr,
2592 + char *buf)
2593 +{
2594 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2595 + unsigned long flags;
2596 + int rc;
2597 +
2598 + spin_lock_irqsave(&priv->lock, flags);
2599 + rc = sprintf(buf, "%d\n", priv->default_priority);
2600 + spin_unlock_irqrestore(&priv->lock, flags);
2601 +
2602 + return rc;
2603 +}
2604 +
2605 +/* pfe_eth_set_default_priority
2606 + *
2607 + */
2608 +
2609 +static ssize_t pfe_eth_set_default_priority(struct device *dev,
2610 + struct device_attribute *attr,
2611 + const char *buf, size_t count)
2612 +{
2613 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2614 + unsigned long flags;
2615 +
2616 + spin_lock_irqsave(&priv->lock, flags);
2617 + priv->default_priority = kstrtoul(buf, 0, 0);
2618 + spin_unlock_irqrestore(&priv->lock, flags);
2619 +
2620 + return count;
2621 +}
2622 +
2623 +static DEVICE_ATTR(txavail, 0444, pfe_eth_show_txavail, NULL);
2624 +static DEVICE_ATTR(default_priority, 0644, pfe_eth_show_default_priority,
2625 + pfe_eth_set_default_priority);
2626 +
2627 +#ifdef PFE_ETH_NAPI_STATS
2628 +static DEVICE_ATTR(napi_stats, 0644, pfe_eth_show_napi_stats,
2629 + pfe_eth_set_napi_stats);
2630 +#endif
2631 +
2632 +#ifdef PFE_ETH_TX_STATS
2633 +static DEVICE_ATTR(tx_stats, 0644, pfe_eth_show_tx_stats,
2634 + pfe_eth_set_tx_stats);
2635 +#endif
2636 +
2637 +/*
2638 + * pfe_eth_sysfs_init
2639 + *
2640 + */
2641 +static int pfe_eth_sysfs_init(struct net_device *ndev)
2642 +{
2643 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2644 + int err;
2645 +
2646 + /* Initialize the default values */
2647 +
2648 + /*
2649 + * By default, packets without conntrack will use this default low
2650 + * priority queue
2651 + */
2652 + priv->default_priority = 0;
2653 +
2654 + /* Create our sysfs files */
2655 + err = device_create_file(&ndev->dev, &dev_attr_default_priority);
2656 + if (err) {
2657 + netdev_err(ndev,
2658 + "failed to create default_priority sysfs files\n");
2659 + goto err_priority;
2660 + }
2661 +
2662 + err = device_create_file(&ndev->dev, &dev_attr_txavail);
2663 + if (err) {
2664 + netdev_err(ndev,
2665 + "failed to create default_priority sysfs files\n");
2666 + goto err_txavail;
2667 + }
2668 +
2669 +#ifdef PFE_ETH_NAPI_STATS
2670 + err = device_create_file(&ndev->dev, &dev_attr_napi_stats);
2671 + if (err) {
2672 + netdev_err(ndev, "failed to create napi stats sysfs files\n");
2673 + goto err_napi;
2674 + }
2675 +#endif
2676 +
2677 +#ifdef PFE_ETH_TX_STATS
2678 + err = device_create_file(&ndev->dev, &dev_attr_tx_stats);
2679 + if (err) {
2680 + netdev_err(ndev, "failed to create tx stats sysfs files\n");
2681 + goto err_tx;
2682 + }
2683 +#endif
2684 +
2685 + return 0;
2686 +
2687 +#ifdef PFE_ETH_TX_STATS
2688 +err_tx:
2689 +#endif
2690 +#ifdef PFE_ETH_NAPI_STATS
2691 + device_remove_file(&ndev->dev, &dev_attr_napi_stats);
2692 +
2693 +err_napi:
2694 +#endif
2695 + device_remove_file(&ndev->dev, &dev_attr_txavail);
2696 +
2697 +err_txavail:
2698 + device_remove_file(&ndev->dev, &dev_attr_default_priority);
2699 +
2700 +err_priority:
2701 + return -1;
2702 +}
2703 +
2704 +/* pfe_eth_sysfs_exit
2705 + *
2706 + */
2707 +void pfe_eth_sysfs_exit(struct net_device *ndev)
2708 +{
2709 +#ifdef PFE_ETH_TX_STATS
2710 + device_remove_file(&ndev->dev, &dev_attr_tx_stats);
2711 +#endif
2712 +
2713 +#ifdef PFE_ETH_NAPI_STATS
2714 + device_remove_file(&ndev->dev, &dev_attr_napi_stats);
2715 +#endif
2716 + device_remove_file(&ndev->dev, &dev_attr_txavail);
2717 + device_remove_file(&ndev->dev, &dev_attr_default_priority);
2718 +}
2719 +
2720 +/*************************************************************************/
2721 +/* ETHTOOL INTERCAE */
2722 +/*************************************************************************/
2723 +
2724 +/*MTIP GEMAC */
2725 +static const struct fec_stat {
2726 + char name[ETH_GSTRING_LEN];
2727 + u16 offset;
2728 +} fec_stats[] = {
2729 + /* RMON TX */
2730 + { "tx_dropped", RMON_T_DROP },
2731 + { "tx_packets", RMON_T_PACKETS },
2732 + { "tx_broadcast", RMON_T_BC_PKT },
2733 + { "tx_multicast", RMON_T_MC_PKT },
2734 + { "tx_crc_errors", RMON_T_CRC_ALIGN },
2735 + { "tx_undersize", RMON_T_UNDERSIZE },
2736 + { "tx_oversize", RMON_T_OVERSIZE },
2737 + { "tx_fragment", RMON_T_FRAG },
2738 + { "tx_jabber", RMON_T_JAB },
2739 + { "tx_collision", RMON_T_COL },
2740 + { "tx_64byte", RMON_T_P64 },
2741 + { "tx_65to127byte", RMON_T_P65TO127 },
2742 + { "tx_128to255byte", RMON_T_P128TO255 },
2743 + { "tx_256to511byte", RMON_T_P256TO511 },
2744 + { "tx_512to1023byte", RMON_T_P512TO1023 },
2745 + { "tx_1024to2047byte", RMON_T_P1024TO2047 },
2746 + { "tx_GTE2048byte", RMON_T_P_GTE2048 },
2747 + { "tx_octets", RMON_T_OCTETS },
2748 +
2749 + /* IEEE TX */
2750 + { "IEEE_tx_drop", IEEE_T_DROP },
2751 + { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
2752 + { "IEEE_tx_1col", IEEE_T_1COL },
2753 + { "IEEE_tx_mcol", IEEE_T_MCOL },
2754 + { "IEEE_tx_def", IEEE_T_DEF },
2755 + { "IEEE_tx_lcol", IEEE_T_LCOL },
2756 + { "IEEE_tx_excol", IEEE_T_EXCOL },
2757 + { "IEEE_tx_macerr", IEEE_T_MACERR },
2758 + { "IEEE_tx_cserr", IEEE_T_CSERR },
2759 + { "IEEE_tx_sqe", IEEE_T_SQE },
2760 + { "IEEE_tx_fdxfc", IEEE_T_FDXFC },
2761 + { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
2762 +
2763 + /* RMON RX */
2764 + { "rx_packets", RMON_R_PACKETS },
2765 + { "rx_broadcast", RMON_R_BC_PKT },
2766 + { "rx_multicast", RMON_R_MC_PKT },
2767 + { "rx_crc_errors", RMON_R_CRC_ALIGN },
2768 + { "rx_undersize", RMON_R_UNDERSIZE },
2769 + { "rx_oversize", RMON_R_OVERSIZE },
2770 + { "rx_fragment", RMON_R_FRAG },
2771 + { "rx_jabber", RMON_R_JAB },
2772 + { "rx_64byte", RMON_R_P64 },
2773 + { "rx_65to127byte", RMON_R_P65TO127 },
2774 + { "rx_128to255byte", RMON_R_P128TO255 },
2775 + { "rx_256to511byte", RMON_R_P256TO511 },
2776 + { "rx_512to1023byte", RMON_R_P512TO1023 },
2777 + { "rx_1024to2047byte", RMON_R_P1024TO2047 },
2778 + { "rx_GTE2048byte", RMON_R_P_GTE2048 },
2779 + { "rx_octets", RMON_R_OCTETS },
2780 +
2781 + /* IEEE RX */
2782 + { "IEEE_rx_drop", IEEE_R_DROP },
2783 + { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
2784 + { "IEEE_rx_crc", IEEE_R_CRC },
2785 + { "IEEE_rx_align", IEEE_R_ALIGN },
2786 + { "IEEE_rx_macerr", IEEE_R_MACERR },
2787 + { "IEEE_rx_fdxfc", IEEE_R_FDXFC },
2788 + { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
2789 +};
2790 +
2791 +static void pfe_eth_fill_stats(struct net_device *ndev, struct ethtool_stats
2792 + *stats, u64 *data)
2793 +{
2794 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2795 + int i;
2796 +
2797 + for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2798 + data[i] = readl(priv->EMAC_baseaddr + fec_stats[i].offset);
2799 +}
2800 +
2801 +static void pfe_eth_gstrings(struct net_device *netdev,
2802 + u32 stringset, u8 *data)
2803 +{
2804 + int i;
2805 +
2806 + switch (stringset) {
2807 + case ETH_SS_STATS:
2808 + for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2809 + memcpy(data + i * ETH_GSTRING_LEN,
2810 + fec_stats[i].name, ETH_GSTRING_LEN);
2811 + break;
2812 + }
2813 +}
2814 +
2815 +static int pfe_eth_stats_count(struct net_device *ndev, int sset)
2816 +{
2817 + switch (sset) {
2818 + case ETH_SS_STATS:
2819 + return ARRAY_SIZE(fec_stats);
2820 + default:
2821 + return -EOPNOTSUPP;
2822 + }
2823 +}
2824 +
2825 +/*
2826 + * pfe_eth_gemac_reglen - Return the length of the register structure.
2827 + *
2828 + */
2829 +static int pfe_eth_gemac_reglen(struct net_device *ndev)
2830 +{
2831 + pr_info("%s()\n", __func__);
2832 + return (sizeof(gemac_regs) / sizeof(u32));
2833 +}
2834 +
2835 +/*
2836 + * pfe_eth_gemac_get_regs - Return the gemac register structure.
2837 + *
2838 + */
2839 +static void pfe_eth_gemac_get_regs(struct net_device *ndev, struct ethtool_regs
2840 + *regs, void *regbuf)
2841 +{
2842 + int i;
2843 +
2844 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2845 + u32 *buf = (u32 *)regbuf;
2846 +
2847 + pr_info("%s()\n", __func__);
2848 + for (i = 0; i < sizeof(gemac_regs) / sizeof(u32); i++)
2849 + buf[i] = readl(priv->EMAC_baseaddr + gemac_regs[i]);
2850 +}
2851 +
2852 +/*
2853 + * pfe_eth_set_wol - Set the magic packet option, in WoL register.
2854 + *
2855 + */
2856 +static int pfe_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2857 +{
2858 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2859 +
2860 + if (wol->wolopts & ~WAKE_MAGIC)
2861 + return -EOPNOTSUPP;
2862 +
2863 + /* for MTIP we store wol->wolopts */
2864 + priv->wol = wol->wolopts;
2865 +
2866 + device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
2867 +
2868 + return 0;
2869 +}
2870 +
2871 +/*
2872 + *
2873 + * pfe_eth_get_wol - Get the WoL options.
2874 + *
2875 + */
2876 +static void pfe_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo
2877 + *wol)
2878 +{
2879 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2880 +
2881 + wol->supported = WAKE_MAGIC;
2882 + wol->wolopts = 0;
2883 +
2884 + if (priv->wol & WAKE_MAGIC)
2885 + wol->wolopts = WAKE_MAGIC;
2886 +
2887 + memset(&wol->sopass, 0, sizeof(wol->sopass));
2888 +}
2889 +
2890 +/*
2891 + * pfe_eth_get_drvinfo - Fills in the drvinfo structure with some basic info
2892 + *
2893 + */
2894 +static void pfe_eth_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo
2895 + *drvinfo)
2896 +{
2897 + strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
2898 + strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
2899 + strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
2900 + strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
2901 +}
2902 +
2903 +/*
2904 + * pfe_eth_set_settings - Used to send commands to PHY.
2905 + *
2906 + */
2907 +static int pfe_eth_set_settings(struct net_device *ndev,
2908 + const struct ethtool_link_ksettings *cmd)
2909 +{
2910 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2911 + struct phy_device *phydev = priv->phydev;
2912 +
2913 + if (!phydev)
2914 + return -ENODEV;
2915 +
2916 + return phy_ethtool_ksettings_set(phydev, cmd);
2917 +}
2918 +
2919 +/*
2920 + * pfe_eth_getsettings - Return the current settings in the ethtool_cmd
2921 + * structure.
2922 + *
2923 + */
2924 +static int pfe_eth_get_settings(struct net_device *ndev,
2925 + struct ethtool_link_ksettings *cmd)
2926 +{
2927 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2928 + struct phy_device *phydev = priv->phydev;
2929 +
2930 + if (!phydev)
2931 + return -ENODEV;
2932 +
2933 + phy_ethtool_ksettings_get(phydev, cmd);
2934 +
2935 + return 0;
2936 +}
2937 +
2938 +/*
2939 + * pfe_eth_get_msglevel - Gets the debug message mask.
2940 + *
2941 + */
2942 +static uint32_t pfe_eth_get_msglevel(struct net_device *ndev)
2943 +{
2944 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2945 +
2946 + return priv->msg_enable;
2947 +}
2948 +
2949 +/*
2950 + * pfe_eth_set_msglevel - Sets the debug message mask.
2951 + *
2952 + */
2953 +static void pfe_eth_set_msglevel(struct net_device *ndev, uint32_t data)
2954 +{
2955 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2956 +
2957 + priv->msg_enable = data;
2958 +}
2959 +
2960 +#define HIF_RX_COAL_MAX_CLKS (~(1 << 31))
2961 +#define HIF_RX_COAL_CLKS_PER_USEC (pfe->ctrl.sys_clk / 1000)
2962 +#define HIF_RX_COAL_MAX_USECS (HIF_RX_COAL_MAX_CLKS / \
2963 + HIF_RX_COAL_CLKS_PER_USEC)
2964 +
2965 +/*
2966 + * pfe_eth_set_coalesce - Sets rx interrupt coalescing timer.
2967 + *
2968 + */
2969 +static int pfe_eth_set_coalesce(struct net_device *ndev,
2970 + struct ethtool_coalesce *ec)
2971 +{
2972 + if (ec->rx_coalesce_usecs > HIF_RX_COAL_MAX_USECS)
2973 + return -EINVAL;
2974 +
2975 + if (!ec->rx_coalesce_usecs) {
2976 + writel(0, HIF_INT_COAL);
2977 + return 0;
2978 + }
2979 +
2980 + writel((ec->rx_coalesce_usecs * HIF_RX_COAL_CLKS_PER_USEC) |
2981 + HIF_INT_COAL_ENABLE, HIF_INT_COAL);
2982 +
2983 + return 0;
2984 +}
2985 +
2986 +/*
2987 + * pfe_eth_get_coalesce - Gets rx interrupt coalescing timer value.
2988 + *
2989 + */
2990 +static int pfe_eth_get_coalesce(struct net_device *ndev,
2991 + struct ethtool_coalesce *ec)
2992 +{
2993 + int reg_val = readl(HIF_INT_COAL);
2994 +
2995 + if (reg_val & HIF_INT_COAL_ENABLE)
2996 + ec->rx_coalesce_usecs = (reg_val & HIF_RX_COAL_MAX_CLKS) /
2997 + HIF_RX_COAL_CLKS_PER_USEC;
2998 + else
2999 + ec->rx_coalesce_usecs = 0;
3000 +
3001 + return 0;
3002 +}
3003 +
3004 +/*
3005 + * pfe_eth_set_pauseparam - Sets pause parameters
3006 + *
3007 + */
3008 +static int pfe_eth_set_pauseparam(struct net_device *ndev,
3009 + struct ethtool_pauseparam *epause)
3010 +{
3011 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3012 +
3013 + if (epause->tx_pause != epause->rx_pause) {
3014 + netdev_info(ndev,
3015 + "hardware only support enable/disable both tx and rx\n");
3016 + return -EINVAL;
3017 + }
3018 +
3019 + priv->pause_flag = 0;
3020 + priv->pause_flag |= epause->rx_pause ? PFE_PAUSE_FLAG_ENABLE : 0;
3021 + priv->pause_flag |= epause->autoneg ? PFE_PAUSE_FLAG_AUTONEG : 0;
3022 +
3023 + if (epause->rx_pause || epause->autoneg) {
3024 + gemac_enable_pause_rx(priv->EMAC_baseaddr);
3025 + writel((readl(priv->GPI_baseaddr + GPI_TX_PAUSE_TIME) |
3026 + EGPI_PAUSE_ENABLE),
3027 + priv->GPI_baseaddr + GPI_TX_PAUSE_TIME);
3028 + if (priv->phydev) {
3029 + priv->phydev->supported |= ADVERTISED_Pause |
3030 + ADVERTISED_Asym_Pause;
3031 + priv->phydev->advertising |= ADVERTISED_Pause |
3032 + ADVERTISED_Asym_Pause;
3033 + }
3034 + } else {
3035 + gemac_disable_pause_rx(priv->EMAC_baseaddr);
3036 + writel((readl(priv->GPI_baseaddr + GPI_TX_PAUSE_TIME) &
3037 + ~EGPI_PAUSE_ENABLE),
3038 + priv->GPI_baseaddr + GPI_TX_PAUSE_TIME);
3039 + if (priv->phydev) {
3040 + priv->phydev->supported &= ~(ADVERTISED_Pause |
3041 + ADVERTISED_Asym_Pause);
3042 + priv->phydev->advertising &= ~(ADVERTISED_Pause |
3043 + ADVERTISED_Asym_Pause);
3044 + }
3045 + }
3046 +
3047 + return 0;
3048 +}
3049 +
3050 +/*
3051 + * pfe_eth_get_pauseparam - Gets pause parameters
3052 + *
3053 + */
3054 +static void pfe_eth_get_pauseparam(struct net_device *ndev,
3055 + struct ethtool_pauseparam *epause)
3056 +{
3057 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3058 +
3059 + epause->autoneg = (priv->pause_flag & PFE_PAUSE_FLAG_AUTONEG) != 0;
3060 + epause->tx_pause = (priv->pause_flag & PFE_PAUSE_FLAG_ENABLE) != 0;
3061 + epause->rx_pause = epause->tx_pause;
3062 +}
3063 +
3064 +/*
3065 + * pfe_eth_get_hash
3066 + */
3067 +#define PFE_HASH_BITS 6 /* #bits in hash */
3068 +#define CRC32_POLY 0xEDB88320
3069 +
3070 +static int pfe_eth_get_hash(u8 *addr)
3071 +{
3072 + unsigned int i, bit, data, crc, hash;
3073 +
3074 + /* calculate crc32 value of mac address */
3075 + crc = 0xffffffff;
3076 +
3077 + for (i = 0; i < 6; i++) {
3078 + data = addr[i];
3079 + for (bit = 0; bit < 8; bit++, data >>= 1) {
3080 + crc = (crc >> 1) ^
3081 + (((crc ^ data) & 1) ? CRC32_POLY : 0);
3082 + }
3083 + }
3084 +
3085 + /*
3086 + * only upper 6 bits (PFE_HASH_BITS) are used
3087 + * which point to specific bit in the hash registers
3088 + */
3089 + hash = (crc >> (32 - PFE_HASH_BITS)) & 0x3f;
3090 +
3091 + return hash;
3092 +}
3093 +
3094 +const struct ethtool_ops pfe_ethtool_ops = {
3095 + .get_drvinfo = pfe_eth_get_drvinfo,
3096 + .get_regs_len = pfe_eth_gemac_reglen,
3097 + .get_regs = pfe_eth_gemac_get_regs,
3098 + .get_link = ethtool_op_get_link,
3099 + .get_wol = pfe_eth_get_wol,
3100 + .set_wol = pfe_eth_set_wol,
3101 + .set_pauseparam = pfe_eth_set_pauseparam,
3102 + .get_pauseparam = pfe_eth_get_pauseparam,
3103 + .get_strings = pfe_eth_gstrings,
3104 + .get_sset_count = pfe_eth_stats_count,
3105 + .get_ethtool_stats = pfe_eth_fill_stats,
3106 + .get_msglevel = pfe_eth_get_msglevel,
3107 + .set_msglevel = pfe_eth_set_msglevel,
3108 + .set_coalesce = pfe_eth_set_coalesce,
3109 + .get_coalesce = pfe_eth_get_coalesce,
3110 + .get_link_ksettings = pfe_eth_get_settings,
3111 + .set_link_ksettings = pfe_eth_set_settings,
3112 +};
3113 +
3114 +/* pfe_eth_mdio_reset
3115 + */
3116 +int pfe_eth_mdio_reset(struct mii_bus *bus)
3117 +{
3118 + struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
3119 + u32 phy_speed;
3120 +
3121 + netif_info(priv, hw, priv->ndev, "%s\n", __func__);
3122 +
3123 + mutex_lock(&bus->mdio_lock);
3124 +
3125 + /*
3126 + * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
3127 + *
3128 + * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
3129 + * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'.
3130 + */
3131 + phy_speed = (DIV_ROUND_UP((pfe->ctrl.sys_clk * 1000), 4000000)
3132 + << EMAC_MII_SPEED_SHIFT);
3133 + phy_speed |= EMAC_HOLDTIME(0x5);
3134 + __raw_writel(phy_speed, priv->PHY_baseaddr + EMAC_MII_CTRL_REG);
3135 +
3136 + mutex_unlock(&bus->mdio_lock);
3137 +
3138 + return 0;
3139 +}
3140 +
3141 +/* pfe_eth_gemac_phy_timeout
3142 + *
3143 + */
3144 +static int pfe_eth_gemac_phy_timeout(struct pfe_eth_priv_s *priv, int timeout)
3145 +{
3146 + while (!(__raw_readl(priv->PHY_baseaddr + EMAC_IEVENT_REG) &
3147 + EMAC_IEVENT_MII)) {
3148 + if (timeout-- <= 0)
3149 + return -1;
3150 + usleep_range(10, 20);
3151 + }
3152 + __raw_writel(EMAC_IEVENT_MII, priv->PHY_baseaddr + EMAC_IEVENT_REG);
3153 + return 0;
3154 +}
3155 +
3156 +static int pfe_eth_mdio_mux(u8 muxval)
3157 +{
3158 + struct i2c_adapter *a;
3159 + struct i2c_msg msg;
3160 + unsigned char buf[2];
3161 + int ret;
3162 +
3163 + a = i2c_get_adapter(0);
3164 + if (!a)
3165 + return -ENODEV;
3166 +
3167 + /* set bit 1 (the second bit) of chip at 0x09, register 0x13 */
3168 + buf[0] = 0x54; /* reg number */
3169 + buf[1] = (muxval << 6) | 0x3; /* data */
3170 + msg.addr = 0x66;
3171 + msg.buf = buf;
3172 + msg.len = 2;
3173 + msg.flags = 0;
3174 + ret = i2c_transfer(a, &msg, 1);
3175 + i2c_put_adapter(a);
3176 + if (ret != 1)
3177 + return -ENODEV;
3178 + return 0;
3179 +}
3180 +
3181 +static int pfe_eth_mdio_write_addr(struct mii_bus *bus, int mii_id,
3182 + int dev_addr, int regnum)
3183 +{
3184 + struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
3185 +
3186 + __raw_writel(EMAC_MII_DATA_PA(mii_id) |
3187 + EMAC_MII_DATA_RA(dev_addr) |
3188 + EMAC_MII_DATA_TA | EMAC_MII_DATA(regnum),
3189 + priv->PHY_baseaddr + EMAC_MII_DATA_REG);
3190 +
3191 + if (pfe_eth_gemac_phy_timeout(priv, EMAC_MDIO_TIMEOUT)) {
3192 + netdev_err(priv->ndev, "%s: phy MDIO address write timeout\n",
3193 + __func__);
3194 + return -1;
3195 + }
3196 +
3197 + return 0;
3198 +}
3199 +
3200 +static int pfe_eth_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
3201 + u16 value)
3202 +{
3203 + struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
3204 +
3205 + /*To access external PHYs on QDS board mux needs to be configured*/
3206 + if ((mii_id) && (pfe->mdio_muxval[mii_id]))
3207 + pfe_eth_mdio_mux(pfe->mdio_muxval[mii_id]);
3208 +
3209 + if (regnum & MII_ADDR_C45) {
3210 + pfe_eth_mdio_write_addr(bus, mii_id, (regnum >> 16) & 0x1f,
3211 + regnum & 0xffff);
3212 + __raw_writel(EMAC_MII_DATA_OP_CL45_WR |
3213 + EMAC_MII_DATA_PA(mii_id) |
3214 + EMAC_MII_DATA_RA((regnum >> 16) & 0x1f) |
3215 + EMAC_MII_DATA_TA | EMAC_MII_DATA(value),
3216 + priv->PHY_baseaddr + EMAC_MII_DATA_REG);
3217 + } else {
3218 + /* start a write op */
3219 + __raw_writel(EMAC_MII_DATA_ST | EMAC_MII_DATA_OP_WR |
3220 + EMAC_MII_DATA_PA(mii_id) |
3221 + EMAC_MII_DATA_RA(regnum) |
3222 + EMAC_MII_DATA_TA | EMAC_MII_DATA(value),
3223 + priv->PHY_baseaddr + EMAC_MII_DATA_REG);
3224 + }
3225 +
3226 + if (pfe_eth_gemac_phy_timeout(priv, EMAC_MDIO_TIMEOUT)) {
3227 + netdev_err(priv->ndev, "%s: phy MDIO write timeout\n",
3228 + __func__);
3229 + return -1;
3230 + }
3231 + netif_info(priv, hw, priv->ndev, "%s: phy %x reg %x val %x\n", __func__,
3232 + mii_id, regnum, value);
3233 +
3234 + return 0;
3235 +}
3236 +
3237 +static int pfe_eth_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
3238 +{
3239 + struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
3240 + u16 value = 0;
3241 +
3242 + /*To access external PHYs on QDS board mux needs to be configured*/
3243 + if ((mii_id) && (pfe->mdio_muxval[mii_id]))
3244 + pfe_eth_mdio_mux(pfe->mdio_muxval[mii_id]);
3245 +
3246 + if (regnum & MII_ADDR_C45) {
3247 + pfe_eth_mdio_write_addr(bus, mii_id, (regnum >> 16) & 0x1f,
3248 + regnum & 0xffff);
3249 + __raw_writel(EMAC_MII_DATA_OP_CL45_RD |
3250 + EMAC_MII_DATA_PA(mii_id) |
3251 + EMAC_MII_DATA_RA((regnum >> 16) & 0x1f) |
3252 + EMAC_MII_DATA_TA,
3253 + priv->PHY_baseaddr + EMAC_MII_DATA_REG);
3254 + } else {
3255 + /* start a read op */
3256 + __raw_writel(EMAC_MII_DATA_ST | EMAC_MII_DATA_OP_RD |
3257 + EMAC_MII_DATA_PA(mii_id) |
3258 + EMAC_MII_DATA_RA(regnum) |
3259 + EMAC_MII_DATA_TA, priv->PHY_baseaddr +
3260 + EMAC_MII_DATA_REG);
3261 + }
3262 +
3263 + if (pfe_eth_gemac_phy_timeout(priv, EMAC_MDIO_TIMEOUT)) {
3264 + netdev_err(priv->ndev, "%s: phy MDIO read timeout\n", __func__);
3265 + return -1;
3266 + }
3267 +
3268 + value = EMAC_MII_DATA(__raw_readl(priv->PHY_baseaddr +
3269 + EMAC_MII_DATA_REG));
3270 + netif_info(priv, hw, priv->ndev, "%s: phy %x reg %x val %x\n", __func__,
3271 + mii_id, regnum, value);
3272 + return value;
3273 +}
3274 +
3275 +static int pfe_eth_mdio_init(struct pfe_eth_priv_s *priv,
3276 + struct ls1012a_mdio_platform_data *minfo)
3277 +{
3278 + struct mii_bus *bus;
3279 + int rc, ii;
3280 + struct phy_device *phydev;
3281 +
3282 + netif_info(priv, drv, priv->ndev, "%s\n", __func__);
3283 + pr_info("%s\n", __func__);
3284 +
3285 + bus = mdiobus_alloc();
3286 + if (!bus) {
3287 + netdev_err(priv->ndev, "mdiobus_alloc() failed\n");
3288 + rc = -ENOMEM;
3289 + goto err0;
3290 + }
3291 +
3292 + bus->name = "ls1012a MDIO Bus";
3293 + bus->read = &pfe_eth_mdio_read;
3294 + bus->write = &pfe_eth_mdio_write;
3295 + bus->reset = &pfe_eth_mdio_reset;
3296 + snprintf(bus->id, MII_BUS_ID_SIZE, "ls1012a-%x", priv->id);
3297 + bus->priv = priv;
3298 +
3299 + bus->phy_mask = minfo->phy_mask;
3300 + priv->mdc_div = minfo->mdc_div;
3301 +
3302 + if (!priv->mdc_div)
3303 + priv->mdc_div = 64;
3304 +
3305 + bus->irq[0] = minfo->irq[0];
3306 +
3307 + bus->parent = priv->pfe->dev;
3308 +
3309 + netif_info(priv, drv, priv->ndev, "%s: mdc_div: %d, phy_mask: %x\n",
3310 + __func__, priv->mdc_div, bus->phy_mask);
3311 + rc = mdiobus_register(bus);
3312 + if (rc) {
3313 + netdev_err(priv->ndev, "mdiobus_register(%s) failed\n",
3314 + bus->name);
3315 + goto err1;
3316 + }
3317 +
3318 + priv->mii_bus = bus;
3319 +
3320 + /* For clause 45 we need to call get_phy_device() with it's
3321 + * 3rd argument as true and then register the phy device
3322 + * via phy_device_register()
3323 + */
3324 +
3325 + if (priv->einfo->mii_config == PHY_INTERFACE_MODE_2500SGMII) {
3326 + for (ii = 0; ii < NUM_GEMAC_SUPPORT; ii++) {
3327 + phydev = get_phy_device(priv->mii_bus,
3328 + priv->einfo->phy_id + ii, true);
3329 + if (!phydev || IS_ERR(phydev)) {
3330 + rc = -EIO;
3331 + netdev_err(priv->ndev, "fail to get device\n");
3332 + goto err1;
3333 + }
3334 + rc = phy_device_register(phydev);
3335 + if (rc) {
3336 + phy_device_free(phydev);
3337 + netdev_err(priv->ndev,
3338 + "phy_device_register() failed\n");
3339 + goto err1;
3340 + }
3341 + }
3342 + }
3343 +
3344 + pfe_eth_mdio_reset(bus);
3345 +
3346 + return 0;
3347 +
3348 +err1:
3349 + mdiobus_free(bus);
3350 +err0:
3351 + return rc;
3352 +}
3353 +
3354 +/* pfe_eth_mdio_exit
3355 + */
3356 +static void pfe_eth_mdio_exit(struct mii_bus *bus)
3357 +{
3358 + if (!bus)
3359 + return;
3360 +
3361 + netif_info((struct pfe_eth_priv_s *)bus->priv, drv, ((struct
3362 + pfe_eth_priv_s *)(bus->priv))->ndev, "%s\n", __func__);
3363 +
3364 + mdiobus_unregister(bus);
3365 + mdiobus_free(bus);
3366 +}
3367 +
3368 +/* pfe_get_phydev_speed
3369 + */
3370 +static int pfe_get_phydev_speed(struct phy_device *phydev)
3371 +{
3372 + switch (phydev->speed) {
3373 + case 10:
3374 + return SPEED_10M;
3375 + case 100:
3376 + return SPEED_100M;
3377 + case 1000:
3378 + default:
3379 + return SPEED_1000M;
3380 + }
3381 +}
3382 +
3383 +/* pfe_set_rgmii_speed
3384 + */
3385 +#define RGMIIPCR 0x434
3386 +/* RGMIIPCR bit definitions*/
3387 +#define SCFG_RGMIIPCR_EN_AUTO (0x00000008)
3388 +#define SCFG_RGMIIPCR_SETSP_1000M (0x00000004)
3389 +#define SCFG_RGMIIPCR_SETSP_100M (0x00000000)
3390 +#define SCFG_RGMIIPCR_SETSP_10M (0x00000002)
3391 +#define SCFG_RGMIIPCR_SETFD (0x00000001)
3392 +
3393 +static void pfe_set_rgmii_speed(struct phy_device *phydev)
3394 +{
3395 + u32 rgmii_pcr;
3396 +
3397 + regmap_read(pfe->scfg, RGMIIPCR, &rgmii_pcr);
3398 + rgmii_pcr &= ~(SCFG_RGMIIPCR_SETSP_1000M | SCFG_RGMIIPCR_SETSP_10M);
3399 +
3400 + switch (phydev->speed) {
3401 + case 10:
3402 + rgmii_pcr |= SCFG_RGMIIPCR_SETSP_10M;
3403 + break;
3404 + case 1000:
3405 + rgmii_pcr |= SCFG_RGMIIPCR_SETSP_1000M;
3406 + break;
3407 + case 100:
3408 + default:
3409 + /* Default is 100M */
3410 + break;
3411 + }
3412 + regmap_write(pfe->scfg, RGMIIPCR, rgmii_pcr);
3413 +}
3414 +
3415 +/* pfe_get_phydev_duplex
3416 + */
3417 +static int pfe_get_phydev_duplex(struct phy_device *phydev)
3418 +{
3419 + /*return (phydev->duplex == DUPLEX_HALF) ? DUP_HALF:DUP_FULL ; */
3420 + return DUPLEX_FULL;
3421 +}
3422 +
3423 +/* pfe_eth_adjust_link
3424 + */
3425 +static void pfe_eth_adjust_link(struct net_device *ndev)
3426 +{
3427 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3428 + unsigned long flags;
3429 + struct phy_device *phydev = priv->phydev;
3430 + int new_state = 0;
3431 +
3432 + netif_info(priv, drv, ndev, "%s\n", __func__);
3433 +
3434 + spin_lock_irqsave(&priv->lock, flags);
3435 +
3436 + if (phydev->link) {
3437 + /*
3438 + * Now we make sure that we can be in full duplex mode.
3439 + * If not, we operate in half-duplex mode.
3440 + */
3441 + if (phydev->duplex != priv->oldduplex) {
3442 + new_state = 1;
3443 + gemac_set_duplex(priv->EMAC_baseaddr,
3444 + pfe_get_phydev_duplex(phydev));
3445 + priv->oldduplex = phydev->duplex;
3446 + }
3447 +
3448 + if (phydev->speed != priv->oldspeed) {
3449 + new_state = 1;
3450 + gemac_set_speed(priv->EMAC_baseaddr,
3451 + pfe_get_phydev_speed(phydev));
3452 + if (priv->einfo->mii_config ==
3453 + PHY_INTERFACE_MODE_RGMII_TXID)
3454 + pfe_set_rgmii_speed(phydev);
3455 + priv->oldspeed = phydev->speed;
3456 + }
3457 +
3458 + if (!priv->oldlink) {
3459 + new_state = 1;
3460 + priv->oldlink = 1;
3461 + }
3462 +
3463 + } else if (priv->oldlink) {
3464 + new_state = 1;
3465 + priv->oldlink = 0;
3466 + priv->oldspeed = 0;
3467 + priv->oldduplex = -1;
3468 + }
3469 +
3470 + if (new_state && netif_msg_link(priv))
3471 + phy_print_status(phydev);
3472 +
3473 + spin_unlock_irqrestore(&priv->lock, flags);
3474 +}
3475 +
3476 +/* pfe_phy_exit
3477 + */
3478 +static void pfe_phy_exit(struct net_device *ndev)
3479 +{
3480 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3481 +
3482 + netif_info(priv, drv, ndev, "%s\n", __func__);
3483 +
3484 + phy_disconnect(priv->phydev);
3485 + priv->phydev = NULL;
3486 +}
3487 +
3488 +/* pfe_eth_stop
3489 + */
3490 +static void pfe_eth_stop(struct net_device *ndev, int wake)
3491 +{
3492 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3493 +
3494 + netif_info(priv, drv, ndev, "%s\n", __func__);
3495 +
3496 + if (wake) {
3497 + gemac_tx_disable(priv->EMAC_baseaddr);
3498 + } else {
3499 + gemac_disable(priv->EMAC_baseaddr);
3500 + gpi_disable(priv->GPI_baseaddr);
3501 +
3502 + if (priv->phydev)
3503 + phy_stop(priv->phydev);
3504 + }
3505 +}
3506 +
3507 +/* pfe_eth_start
3508 + */
3509 +static int pfe_eth_start(struct pfe_eth_priv_s *priv)
3510 +{
3511 + netif_info(priv, drv, priv->ndev, "%s\n", __func__);
3512 +
3513 + if (priv->phydev)
3514 + phy_start(priv->phydev);
3515 +
3516 + gpi_enable(priv->GPI_baseaddr);
3517 + gemac_enable(priv->EMAC_baseaddr);
3518 +
3519 + return 0;
3520 +}
3521 +
3522 +/*
3523 + * Configure on chip serdes through mdio
3524 + */
3525 +static void ls1012a_configure_serdes(struct net_device *ndev)
3526 +{
3527 + struct pfe_eth_priv_s *priv = pfe->eth.eth_priv[0];
3528 + int sgmii_2500 = 0;
3529 + struct mii_bus *bus = priv->mii_bus;
3530 + u16 value = 0;
3531 +
3532 + if (priv->einfo->mii_config == PHY_INTERFACE_MODE_2500SGMII)
3533 + sgmii_2500 = 1;
3534 +
3535 + netif_info(priv, drv, ndev, "%s\n", __func__);
3536 + /* PCS configuration done with corresponding GEMAC */
3537 +
3538 + pfe_eth_mdio_read(bus, 0, 0);
3539 + pfe_eth_mdio_read(bus, 0, 1);
3540 +
3541 + /*These settings taken from validtion team */
3542 + pfe_eth_mdio_write(bus, 0, 0x0, 0x8000);
3543 + if (sgmii_2500) {
3544 + pfe_eth_mdio_write(bus, 0, 0x14, 0x9);
3545 + pfe_eth_mdio_write(bus, 0, 0x4, 0x4001);
3546 + pfe_eth_mdio_write(bus, 0, 0x12, 0xa120);
3547 + pfe_eth_mdio_write(bus, 0, 0x13, 0x7);
3548 + /* Autonegotiation need to be disabled for 2.5G SGMII mode*/
3549 + value = 0x0140;
3550 + pfe_eth_mdio_write(bus, 0, 0x0, value);
3551 + } else {
3552 + pfe_eth_mdio_write(bus, 0, 0x14, 0xb);
3553 + pfe_eth_mdio_write(bus, 0, 0x4, 0x1a1);
3554 + pfe_eth_mdio_write(bus, 0, 0x12, 0x400);
3555 + pfe_eth_mdio_write(bus, 0, 0x13, 0x0);
3556 + pfe_eth_mdio_write(bus, 0, 0x0, 0x1140);
3557 + }
3558 +}
3559 +
3560 +/*
3561 + * pfe_phy_init
3562 + *
3563 + */
3564 +static int pfe_phy_init(struct net_device *ndev)
3565 +{
3566 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3567 + struct phy_device *phydev;
3568 + char phy_id[MII_BUS_ID_SIZE + 3];
3569 + char bus_id[MII_BUS_ID_SIZE];
3570 + phy_interface_t interface;
3571 +
3572 + priv->oldlink = 0;
3573 + priv->oldspeed = 0;
3574 + priv->oldduplex = -1;
3575 +
3576 + snprintf(bus_id, MII_BUS_ID_SIZE, "ls1012a-%d", 0);
3577 + snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
3578 + priv->einfo->phy_id);
3579 +
3580 + netif_info(priv, drv, ndev, "%s: %s\n", __func__, phy_id);
3581 + interface = priv->einfo->mii_config;
3582 + if ((interface == PHY_INTERFACE_MODE_SGMII) ||
3583 + (interface == PHY_INTERFACE_MODE_2500SGMII)) {
3584 + /*Configure SGMII PCS */
3585 + if (pfe->scfg) {
3586 + /*Config MDIO from serdes */
3587 + regmap_write(pfe->scfg, 0x484, 0x00000000);
3588 + }
3589 + ls1012a_configure_serdes(ndev);
3590 + }
3591 +
3592 + if (pfe->scfg) {
3593 + /*Config MDIO from PAD */
3594 + regmap_write(pfe->scfg, 0x484, 0x80000000);
3595 + }
3596 +
3597 + priv->oldlink = 0;
3598 + priv->oldspeed = 0;
3599 + priv->oldduplex = -1;
3600 + pr_info("%s interface %x\n", __func__, interface);
3601 + phydev = phy_connect(ndev, phy_id, &pfe_eth_adjust_link, interface);
3602 +
3603 + if (IS_ERR(phydev)) {
3604 + netdev_err(ndev, "phy_connect() failed\n");
3605 + return PTR_ERR(phydev);
3606 + }
3607 +
3608 + priv->phydev = phydev;
3609 + phydev->irq = PHY_POLL;
3610 +
3611 + return 0;
3612 +}
3613 +
3614 +/* pfe_gemac_init
3615 + */
3616 +static int pfe_gemac_init(struct pfe_eth_priv_s *priv)
3617 +{
3618 + struct gemac_cfg cfg;
3619 +
3620 + netif_info(priv, ifup, priv->ndev, "%s\n", __func__);
3621 +
3622 + cfg.speed = SPEED_1000M;
3623 + cfg.duplex = DUPLEX_FULL;
3624 +
3625 + gemac_set_config(priv->EMAC_baseaddr, &cfg);
3626 + gemac_allow_broadcast(priv->EMAC_baseaddr);
3627 + gemac_enable_1536_rx(priv->EMAC_baseaddr);
3628 + gemac_enable_stacked_vlan(priv->EMAC_baseaddr);
3629 + gemac_enable_pause_rx(priv->EMAC_baseaddr);
3630 + gemac_set_bus_width(priv->EMAC_baseaddr, 64);
3631 +
3632 + /*GEM will perform checksum verifications*/
3633 + if (priv->ndev->features & NETIF_F_RXCSUM)
3634 + gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
3635 + else
3636 + gemac_disable_rx_checksum_offload(priv->EMAC_baseaddr);
3637 +
3638 + return 0;
3639 +}
3640 +
3641 +/* pfe_eth_event_handler
3642 + */
3643 +static int pfe_eth_event_handler(void *data, int event, int qno)
3644 +{
3645 + struct pfe_eth_priv_s *priv = data;
3646 +
3647 + switch (event) {
3648 + case EVENT_RX_PKT_IND:
3649 +
3650 + if (qno == 0) {
3651 + if (napi_schedule_prep(&priv->high_napi)) {
3652 + netif_info(priv, intr, priv->ndev,
3653 + "%s: schedule high prio poll\n"
3654 + , __func__);
3655 +
3656 +#ifdef PFE_ETH_NAPI_STATS
3657 + priv->napi_counters[NAPI_SCHED_COUNT]++;
3658 +#endif
3659 +
3660 + __napi_schedule(&priv->high_napi);
3661 + }
3662 + } else if (qno == 1) {
3663 + if (napi_schedule_prep(&priv->low_napi)) {
3664 + netif_info(priv, intr, priv->ndev,
3665 + "%s: schedule low prio poll\n"
3666 + , __func__);
3667 +
3668 +#ifdef PFE_ETH_NAPI_STATS
3669 + priv->napi_counters[NAPI_SCHED_COUNT]++;
3670 +#endif
3671 + __napi_schedule(&priv->low_napi);
3672 + }
3673 + } else if (qno == 2) {
3674 + if (napi_schedule_prep(&priv->lro_napi)) {
3675 + netif_info(priv, intr, priv->ndev,
3676 + "%s: schedule lro prio poll\n"
3677 + , __func__);
3678 +
3679 +#ifdef PFE_ETH_NAPI_STATS
3680 + priv->napi_counters[NAPI_SCHED_COUNT]++;
3681 +#endif
3682 + __napi_schedule(&priv->lro_napi);
3683 + }
3684 + }
3685 +
3686 + break;
3687 +
3688 + case EVENT_TXDONE_IND:
3689 + pfe_eth_flush_tx(priv);
3690 + hif_lib_event_handler_start(&priv->client, EVENT_TXDONE_IND, 0);
3691 + break;
3692 + case EVENT_HIGH_RX_WM:
3693 + default:
3694 + break;
3695 + }
3696 +
3697 + return 0;
3698 +}
3699 +
3700 +static int pfe_eth_change_mtu(struct net_device *ndev, int new_mtu)
3701 +{
3702 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3703 +
3704 + ndev->mtu = new_mtu;
3705 + new_mtu += ETH_HLEN + ETH_FCS_LEN;
3706 + gemac_set_rx_max_fl(priv->EMAC_baseaddr, new_mtu);
3707 +
3708 + return 0;
3709 +}
3710 +
3711 +/* pfe_eth_open
3712 + */
3713 +static int pfe_eth_open(struct net_device *ndev)
3714 +{
3715 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3716 + struct hif_client_s *client;
3717 + int rc;
3718 +
3719 + netif_info(priv, ifup, ndev, "%s\n", __func__);
3720 +
3721 + /* Register client driver with HIF */
3722 + client = &priv->client;
3723 + memset(client, 0, sizeof(*client));
3724 + client->id = PFE_CL_GEM0 + priv->id;
3725 + client->tx_qn = emac_txq_cnt;
3726 + client->rx_qn = EMAC_RXQ_CNT;
3727 + client->priv = priv;
3728 + client->pfe = priv->pfe;
3729 + client->event_handler = pfe_eth_event_handler;
3730 +
3731 + client->tx_qsize = EMAC_TXQ_DEPTH;
3732 + client->rx_qsize = EMAC_RXQ_DEPTH;
3733 +
3734 + rc = hif_lib_client_register(client);
3735 + if (rc) {
3736 + netdev_err(ndev, "%s: hif_lib_client_register(%d) failed\n",
3737 + __func__, client->id);
3738 + goto err0;
3739 + }
3740 +
3741 + netif_info(priv, drv, ndev, "%s: registered client: %p\n", __func__,
3742 + client);
3743 +
3744 + pfe_gemac_init(priv);
3745 +
3746 + if (!is_valid_ether_addr(ndev->dev_addr)) {
3747 + netdev_err(ndev, "%s: invalid MAC address\n", __func__);
3748 + rc = -EADDRNOTAVAIL;
3749 + goto err1;
3750 + }
3751 +
3752 + gemac_set_laddrN(priv->EMAC_baseaddr,
3753 + (struct pfe_mac_addr *)ndev->dev_addr, 1);
3754 +
3755 + napi_enable(&priv->high_napi);
3756 + napi_enable(&priv->low_napi);
3757 + napi_enable(&priv->lro_napi);
3758 +
3759 + rc = pfe_eth_start(priv);
3760 +
3761 + netif_tx_wake_all_queues(ndev);
3762 +
3763 + return rc;
3764 +
3765 +err1:
3766 + hif_lib_client_unregister(&priv->client);
3767 +
3768 +err0:
3769 + return rc;
3770 +}
3771 +
3772 +/*
3773 + * pfe_eth_shutdown
3774 + */
3775 +int pfe_eth_shutdown(struct net_device *ndev, int wake)
3776 +{
3777 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3778 + int i, qstatus;
3779 + unsigned long next_poll = jiffies + 1, end = jiffies +
3780 + (TX_POLL_TIMEOUT_MS * HZ) / 1000;
3781 + int tx_pkts, prv_tx_pkts;
3782 +
3783 + netif_info(priv, ifdown, ndev, "%s\n", __func__);
3784 +
3785 + for (i = 0; i < emac_txq_cnt; i++)
3786 + hrtimer_cancel(&priv->fast_tx_timeout[i].timer);
3787 +
3788 + netif_tx_stop_all_queues(ndev);
3789 +
3790 + do {
3791 + tx_pkts = 0;
3792 + pfe_eth_flush_tx(priv);
3793 +
3794 + for (i = 0; i < emac_txq_cnt; i++)
3795 + tx_pkts += hif_lib_tx_pending(&priv->client, i);
3796 +
3797 + if (tx_pkts) {
3798 + /*Don't wait forever, break if we cross max timeout */
3799 + if (time_after(jiffies, end)) {
3800 + pr_err(
3801 + "(%s)Tx is not complete after %dmsec\n",
3802 + ndev->name, TX_POLL_TIMEOUT_MS);
3803 + break;
3804 + }
3805 +
3806 + pr_info("%s : (%s) Waiting for tx packets to free. Pending tx pkts = %d.\n"
3807 + , __func__, ndev->name, tx_pkts);
3808 + if (need_resched())
3809 + schedule();
3810 + }
3811 +
3812 + } while (tx_pkts);
3813 +
3814 + end = jiffies + (TX_POLL_TIMEOUT_MS * HZ) / 1000;
3815 +
3816 + prv_tx_pkts = tmu_pkts_processed(priv->id);
3817 + /*
3818 + * Wait till TMU transmits all pending packets
3819 + * poll tmu_qstatus and pkts processed by TMU for every 10ms
3820 + * Consider TMU is busy, If we see TMU qeueu pending or any packets
3821 + * processed by TMU
3822 + */
3823 + while (1) {
3824 + if (time_after(jiffies, next_poll)) {
3825 + tx_pkts = tmu_pkts_processed(priv->id);
3826 + qstatus = tmu_qstatus(priv->id) & 0x7ffff;
3827 +
3828 + if (!qstatus && (tx_pkts == prv_tx_pkts))
3829 + break;
3830 + /* Don't wait forever, break if we cross max
3831 + * timeout(TX_POLL_TIMEOUT_MS)
3832 + */
3833 + if (time_after(jiffies, end)) {
3834 + pr_err("TMU%d is busy after %dmsec\n",
3835 + priv->id, TX_POLL_TIMEOUT_MS);
3836 + break;
3837 + }
3838 + prv_tx_pkts = tx_pkts;
3839 + next_poll++;
3840 + }
3841 + if (need_resched())
3842 + schedule();
3843 + }
3844 + /* Wait for some more time to complete transmitting packet if any */
3845 + next_poll = jiffies + 1;
3846 + while (1) {
3847 + if (time_after(jiffies, next_poll))
3848 + break;
3849 + if (need_resched())
3850 + schedule();
3851 + }
3852 +
3853 + pfe_eth_stop(ndev, wake);
3854 +
3855 + napi_disable(&priv->lro_napi);
3856 + napi_disable(&priv->low_napi);
3857 + napi_disable(&priv->high_napi);
3858 +
3859 + hif_lib_client_unregister(&priv->client);
3860 +
3861 + return 0;
3862 +}
3863 +
3864 +/* pfe_eth_close
3865 + *
3866 + */
3867 +static int pfe_eth_close(struct net_device *ndev)
3868 +{
3869 + pfe_eth_shutdown(ndev, 0);
3870 +
3871 + return 0;
3872 +}
3873 +
3874 +/* pfe_eth_suspend
3875 + *
3876 + * return value : 1 if netdevice is configured to wakeup system
3877 + * 0 otherwise
3878 + */
3879 +int pfe_eth_suspend(struct net_device *ndev)
3880 +{
3881 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3882 + int retval = 0;
3883 +
3884 + if (priv->wol) {
3885 + gemac_set_wol(priv->EMAC_baseaddr, priv->wol);
3886 + retval = 1;
3887 + }
3888 + pfe_eth_shutdown(ndev, priv->wol);
3889 +
3890 + return retval;
3891 +}
3892 +
3893 +/* pfe_eth_resume
3894 + *
3895 + */
3896 +int pfe_eth_resume(struct net_device *ndev)
3897 +{
3898 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3899 +
3900 + if (priv->wol)
3901 + gemac_set_wol(priv->EMAC_baseaddr, 0);
3902 + gemac_tx_enable(priv->EMAC_baseaddr);
3903 +
3904 + return pfe_eth_open(ndev);
3905 +}
3906 +
3907 +/* pfe_eth_get_queuenum
3908 + */
3909 +static int pfe_eth_get_queuenum(struct pfe_eth_priv_s *priv, struct sk_buff
3910 + *skb)
3911 +{
3912 + int queuenum = 0;
3913 + unsigned long flags;
3914 +
3915 + /* Get the Fast Path queue number */
3916 + /*
3917 + * Use conntrack mark (if conntrack exists), then packet mark (if any),
3918 + * then fallback to default
3919 + */
3920 +#if defined(CONFIG_IP_NF_CONNTRACK_MARK) || defined(CONFIG_NF_CONNTRACK_MARK)
3921 + if (skb->_nfct) {
3922 + enum ip_conntrack_info cinfo;
3923 + struct nf_conn *ct;
3924 +
3925 + ct = nf_ct_get(skb, &cinfo);
3926 +
3927 + if (ct) {
3928 + u32 connmark;
3929 +
3930 + connmark = ct->mark;
3931 +
3932 + if ((connmark & 0x80000000) && priv->id != 0)
3933 + connmark >>= 16;
3934 +
3935 + queuenum = connmark & EMAC_QUEUENUM_MASK;
3936 + }
3937 + } else {/* continued after #endif ... */
3938 +#endif
3939 + if (skb->mark) {
3940 + queuenum = skb->mark & EMAC_QUEUENUM_MASK;
3941 + } else {
3942 + spin_lock_irqsave(&priv->lock, flags);
3943 + queuenum = priv->default_priority & EMAC_QUEUENUM_MASK;
3944 + spin_unlock_irqrestore(&priv->lock, flags);
3945 + }
3946 +#if defined(CONFIG_IP_NF_CONNTRACK_MARK) || defined(CONFIG_NF_CONNTRACK_MARK)
3947 + }
3948 +#endif
3949 + return queuenum;
3950 +}
3951 +
3952 +/* pfe_eth_might_stop_tx
3953 + *
3954 + */
3955 +static int pfe_eth_might_stop_tx(struct pfe_eth_priv_s *priv, int queuenum,
3956 + struct netdev_queue *tx_queue,
3957 + unsigned int n_desc,
3958 + unsigned int n_segs)
3959 +{
3960 + ktime_t kt;
3961 + int tried = 0;
3962 +
3963 +try_again:
3964 + if (unlikely((__hif_tx_avail(&pfe->hif) < n_desc) ||
3965 + (hif_lib_tx_avail(&priv->client, queuenum) < n_desc) ||
3966 + (hif_lib_tx_credit_avail(pfe, priv->id, queuenum) < n_segs))) {
3967 + if (!tried) {
3968 + __hif_lib_update_credit(&priv->client, queuenum);
3969 + tried = 1;
3970 + goto try_again;
3971 + }
3972 +#ifdef PFE_ETH_TX_STATS
3973 + if (__hif_tx_avail(&pfe->hif) < n_desc) {
3974 + priv->stop_queue_hif[queuenum]++;
3975 + } else if (hif_lib_tx_avail(&priv->client, queuenum) < n_desc) {
3976 + priv->stop_queue_hif_client[queuenum]++;
3977 + } else if (hif_lib_tx_credit_avail(pfe, priv->id, queuenum) <
3978 + n_segs) {
3979 + priv->stop_queue_credit[queuenum]++;
3980 + }
3981 + priv->stop_queue_total[queuenum]++;
3982 +#endif
3983 + netif_tx_stop_queue(tx_queue);
3984 +
3985 + kt = ktime_set(0, LS1012A_TX_FAST_RECOVERY_TIMEOUT_MS *
3986 + NSEC_PER_MSEC);
3987 + hrtimer_start(&priv->fast_tx_timeout[queuenum].timer, kt,
3988 + HRTIMER_MODE_REL);
3989 + return -1;
3990 + } else {
3991 + return 0;
3992 + }
3993 +}
3994 +
3995 +#define SA_MAX_OP 2
3996 +/* pfe_hif_send_packet
3997 + *
3998 + * At this level if TX fails we drop the packet
3999 + */
4000 +static void pfe_hif_send_packet(struct sk_buff *skb, struct pfe_eth_priv_s
4001 + *priv, int queuenum)
4002 +{
4003 + struct skb_shared_info *sh = skb_shinfo(skb);
4004 + unsigned int nr_frags;
4005 + u32 ctrl = 0;
4006 +
4007 + netif_info(priv, tx_queued, priv->ndev, "%s\n", __func__);
4008 +
4009 + if (skb_is_gso(skb)) {
4010 + priv->stats.tx_dropped++;
4011 + return;
4012 + }
4013 +
4014 + if (skb->ip_summed == CHECKSUM_PARTIAL)
4015 + ctrl = HIF_CTRL_TX_CHECKSUM;
4016 +
4017 + nr_frags = sh->nr_frags;
4018 +
4019 + if (nr_frags) {
4020 + skb_frag_t *f;
4021 + int i;
4022 +
4023 + __hif_lib_xmit_pkt(&priv->client, queuenum, skb->data,
4024 + skb_headlen(skb), ctrl, HIF_FIRST_BUFFER,
4025 + skb);
4026 +
4027 + for (i = 0; i < nr_frags - 1; i++) {
4028 + f = &sh->frags[i];
4029 + __hif_lib_xmit_pkt(&priv->client, queuenum,
4030 + skb_frag_address(f),
4031 + skb_frag_size(f),
4032 + 0x0, 0x0, skb);
4033 + }
4034 +
4035 + f = &sh->frags[i];
4036 +
4037 + __hif_lib_xmit_pkt(&priv->client, queuenum,
4038 + skb_frag_address(f), skb_frag_size(f),
4039 + 0x0, HIF_LAST_BUFFER | HIF_DATA_VALID,
4040 + skb);
4041 +
4042 + netif_info(priv, tx_queued, priv->ndev,
4043 + "%s: pkt sent successfully skb:%p nr_frags:%d len:%d\n",
4044 + __func__, skb, nr_frags, skb->len);
4045 + } else {
4046 + __hif_lib_xmit_pkt(&priv->client, queuenum, skb->data,
4047 + skb->len, ctrl, HIF_FIRST_BUFFER |
4048 + HIF_LAST_BUFFER | HIF_DATA_VALID,
4049 + skb);
4050 + netif_info(priv, tx_queued, priv->ndev,
4051 + "%s: pkt sent successfully skb:%p len:%d\n",
4052 + __func__, skb, skb->len);
4053 + }
4054 + hif_tx_dma_start();
4055 + priv->stats.tx_packets++;
4056 + priv->stats.tx_bytes += skb->len;
4057 + hif_lib_tx_credit_use(pfe, priv->id, queuenum, 1);
4058 +}
4059 +
4060 +/* pfe_eth_flush_txQ
4061 + */
4062 +static void pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int
4063 + from_tx, int n_desc)
4064 +{
4065 + struct sk_buff *skb;
4066 + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
4067 + tx_q_num);
4068 + unsigned int flags;
4069 +
4070 + netif_info(priv, tx_done, priv->ndev, "%s\n", __func__);
4071 +
4072 + if (!from_tx)
4073 + __netif_tx_lock_bh(tx_queue);
4074 +
4075 + /* Clean HIF and client queue */
4076 + while ((skb = hif_lib_tx_get_next_complete(&priv->client,
4077 + tx_q_num, &flags,
4078 + HIF_TX_DESC_NT))) {
4079 + if (flags & HIF_DATA_VALID)
4080 + dev_kfree_skb_any(skb);
4081 + }
4082 + if (!from_tx)
4083 + __netif_tx_unlock_bh(tx_queue);
4084 +}
4085 +
4086 +/* pfe_eth_flush_tx
4087 + */
4088 +static void pfe_eth_flush_tx(struct pfe_eth_priv_s *priv)
4089 +{
4090 + int ii;
4091 +
4092 + netif_info(priv, tx_done, priv->ndev, "%s\n", __func__);
4093 +
4094 + for (ii = 0; ii < emac_txq_cnt; ii++) {
4095 + pfe_eth_flush_txQ(priv, ii, 0, 0);
4096 + __hif_lib_update_credit(&priv->client, ii);
4097 + }
4098 +}
4099 +
4100 +void pfe_tx_get_req_desc(struct sk_buff *skb, unsigned int *n_desc, unsigned int
4101 + *n_segs)
4102 +{
4103 + struct skb_shared_info *sh = skb_shinfo(skb);
4104 +
4105 + /* Scattered data */
4106 + if (sh->nr_frags) {
4107 + *n_desc = sh->nr_frags + 1;
4108 + *n_segs = 1;
4109 + /* Regular case */
4110 + } else {
4111 + *n_desc = 1;
4112 + *n_segs = 1;
4113 + }
4114 +}
4115 +
4116 +/* pfe_eth_send_packet
4117 + */
4118 +static int pfe_eth_send_packet(struct sk_buff *skb, struct net_device *ndev)
4119 +{
4120 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
4121 + int tx_q_num = skb_get_queue_mapping(skb);
4122 + int n_desc, n_segs;
4123 + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
4124 + tx_q_num);
4125 +
4126 + netif_info(priv, tx_queued, ndev, "%s\n", __func__);
4127 +
4128 + if ((!skb_is_gso(skb)) && (skb_headroom(skb) < (PFE_PKT_HEADER_SZ +
4129 + sizeof(unsigned long)))) {
4130 + netif_warn(priv, tx_err, priv->ndev, "%s: copying skb\n",
4131 + __func__);
4132 +
4133 + if (pskb_expand_head(skb, (PFE_PKT_HEADER_SZ + sizeof(unsigned
4134 + long)), 0, GFP_ATOMIC)) {
4135 + /* No need to re-transmit, no way to recover*/
4136 + kfree_skb(skb);
4137 + priv->stats.tx_dropped++;
4138 + return NETDEV_TX_OK;
4139 + }
4140 + }
4141 +
4142 + pfe_tx_get_req_desc(skb, &n_desc, &n_segs);
4143 +
4144 + hif_tx_lock(&pfe->hif);
4145 + if (unlikely(pfe_eth_might_stop_tx(priv, tx_q_num, tx_queue, n_desc,
4146 + n_segs))) {
4147 +#ifdef PFE_ETH_TX_STATS
4148 + if (priv->was_stopped[tx_q_num]) {
4149 + priv->clean_fail[tx_q_num]++;
4150 + priv->was_stopped[tx_q_num] = 0;
4151 + }
4152 +#endif
4153 + hif_tx_unlock(&pfe->hif);
4154 + return NETDEV_TX_BUSY;
4155 + }
4156 +
4157 + pfe_hif_send_packet(skb, priv, tx_q_num);
4158 +
4159 + hif_tx_unlock(&pfe->hif);
4160 +
4161 + tx_queue->trans_start = jiffies;
4162 +
4163 +#ifdef PFE_ETH_TX_STATS
4164 + priv->was_stopped[tx_q_num] = 0;
4165 +#endif
4166 +
4167 + return NETDEV_TX_OK;
4168 +}
4169 +
4170 +/* pfe_eth_select_queue
4171 + *
4172 + */
4173 +static u16 pfe_eth_select_queue(struct net_device *ndev, struct sk_buff *skb,
4174 + void *accel_priv,
4175 + select_queue_fallback_t fallback)
4176 +{
4177 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
4178 +
4179 + return pfe_eth_get_queuenum(priv, skb);
4180 +}
4181 +
4182 +/* pfe_eth_get_stats
4183 + */
4184 +static struct net_device_stats *pfe_eth_get_stats(struct net_device *ndev)
4185 +{
4186 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
4187 +
4188 + netif_info(priv, drv, ndev, "%s\n", __func__);
4189 +
4190 + return &priv->stats;
4191 +}
4192 +
4193 +/* pfe_eth_set_mac_address
4194 + */
4195 +static int pfe_eth_set_mac_address(struct net_device *ndev, void *addr)
4196 +{
4197 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
4198 + struct sockaddr *sa = addr;
4199 +
4200 + netif_info(priv, drv, ndev, "%s\n", __func__);
4201 +
4202 + if (!is_valid_ether_addr(sa->sa_data))
4203 + return -EADDRNOTAVAIL;
4204 +
4205 + memcpy(ndev->dev_addr, sa->sa_data, ETH_ALEN);
4206 +
4207 + gemac_set_laddrN(priv->EMAC_baseaddr,
4208 + (struct pfe_mac_addr *)ndev->dev_addr, 1);
4209 +
4210 + return 0;
4211 +}
4212 +
4213 +/* pfe_eth_enet_addr_byte_mac
4214 + */
4215 +int pfe_eth_enet_addr_byte_mac(u8 *enet_byte_addr,
4216 + struct pfe_mac_addr *enet_addr)
4217 +{
4218 + if (!enet_byte_addr || !enet_addr) {
4219 + return -1;
4220 +
4221 + } else {
4222 + enet_addr->bottom = enet_byte_addr[0] |
4223 + (enet_byte_addr[1] << 8) |
4224 + (enet_byte_addr[2] << 16) |
4225 + (enet_byte_addr[3] << 24);
4226 + enet_addr->top = enet_byte_addr[4] |
4227 + (enet_byte_addr[5] << 8);
4228 + return 0;
4229 + }
4230 +}
4231 +
4232 +/* pfe_eth_set_multi
4233 + */
4234 +static void pfe_eth_set_multi(struct net_device *ndev)
4235 +{
4236 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
4237 + struct pfe_mac_addr hash_addr; /* hash register structure */
4238 + /* specific mac address register structure */
4239 + struct pfe_mac_addr spec_addr;
4240 + int result; /* index into hash register to set.. */
4241 + int uc_count = 0;
4242 + struct netdev_hw_addr *ha;
4243 +
4244 + if (ndev->flags & IFF_PROMISC) {
4245 + netif_info(priv, drv, ndev, "entering promiscuous mode\n");
4246 +
4247 + priv->promisc = 1;
4248 + gemac_enable_copy_all(priv->EMAC_baseaddr);
4249 + } else {
4250 + priv->promisc = 0;
4251 + gemac_disable_copy_all(priv->EMAC_baseaddr);
4252 + }
4253 +
4254 + /* Enable broadcast frame reception if required. */
4255 + if (ndev->flags & IFF_BROADCAST) {
4256 + gemac_allow_broadcast(priv->EMAC_baseaddr);
4257 + } else {
4258 + netif_info(priv, drv, ndev,
4259 + "disabling broadcast frame reception\n");
4260 +
4261 + gemac_no_broadcast(priv->EMAC_baseaddr);
4262 + }
4263 +
4264 + if (ndev->flags & IFF_ALLMULTI) {
4265 + /* Set the hash to rx all multicast frames */
4266 + hash_addr.bottom = 0xFFFFFFFF;
4267 + hash_addr.top = 0xFFFFFFFF;
4268 + gemac_set_hash(priv->EMAC_baseaddr, &hash_addr);
4269 + netdev_for_each_uc_addr(ha, ndev) {
4270 + if (uc_count >= MAX_UC_SPEC_ADDR_REG)
4271 + break;
4272 + pfe_eth_enet_addr_byte_mac(ha->addr, &spec_addr);
4273 + gemac_set_laddrN(priv->EMAC_baseaddr, &spec_addr,
4274 + uc_count + 2);
4275 + uc_count++;
4276 + }
4277 + } else if ((netdev_mc_count(ndev) > 0) || (netdev_uc_count(ndev))) {
4278 + u8 *addr;
4279 +
4280 + hash_addr.bottom = 0;
4281 + hash_addr.top = 0;
4282 +
4283 + netdev_for_each_mc_addr(ha, ndev) {
4284 + addr = ha->addr;
4285 +
4286 + netif_info(priv, drv, ndev,
4287 + "adding multicast address %X:%X:%X:%X:%X:%X to gem filter\n",
4288 + addr[0], addr[1], addr[2],
4289 + addr[3], addr[4], addr[5]);
4290 +
4291 + result = pfe_eth_get_hash(addr);
4292 +
4293 + if (result < EMAC_HASH_REG_BITS) {
4294 + if (result < 32)
4295 + hash_addr.bottom |= (1 << result);
4296 + else
4297 + hash_addr.top |= (1 << (result - 32));
4298 + } else {
4299 + break;
4300 + }
4301 + }
4302 +
4303 + uc_count = -1;
4304 + netdev_for_each_uc_addr(ha, ndev) {
4305 + addr = ha->addr;
4306 +
4307 + if (++uc_count < MAX_UC_SPEC_ADDR_REG) {
4308 + netdev_info(ndev,
4309 + "adding unicast address %02x:%02x:%02x:%02x:%02x:%02x to gem filter\n",
4310 + addr[0], addr[1], addr[2],
4311 + addr[3], addr[4], addr[5]);
4312 + pfe_eth_enet_addr_byte_mac(addr, &spec_addr);
4313 + gemac_set_laddrN(priv->EMAC_baseaddr,
4314 + &spec_addr, uc_count + 2);
4315 + } else {
4316 + netif_info(priv, drv, ndev,
4317 + "adding unicast address %02x:%02x:%02x:%02x:%02x:%02x to gem hash\n",
4318 + addr[0], addr[1], addr[2],
4319 + addr[3], addr[4], addr[5]);
4320 +
4321 + result = pfe_eth_get_hash(addr);
4322 + if (result >= EMAC_HASH_REG_BITS) {
4323 + break;
4324 +
4325 + } else {
4326 + if (result < 32)
4327 + hash_addr.bottom |= (1 <<
4328 + result);
4329 + else
4330 + hash_addr.top |= (1 <<
4331 + (result - 32));
4332 + }
4333 + }
4334 + }
4335 +
4336 + gemac_set_hash(priv->EMAC_baseaddr, &hash_addr);
4337 + }
4338 +
4339 + if (!(netdev_uc_count(ndev) >= MAX_UC_SPEC_ADDR_REG)) {
4340 + /*
4341 + * Check if there are any specific address HW registers that
4342 + * need to be flushed
4343 + */
4344 + for (uc_count = netdev_uc_count(ndev); uc_count <
4345 + MAX_UC_SPEC_ADDR_REG; uc_count++)
4346 + gemac_clear_laddrN(priv->EMAC_baseaddr, uc_count + 2);
4347 + }
4348 +
4349 + if (ndev->flags & IFF_LOOPBACK)
4350 + gemac_set_loop(priv->EMAC_baseaddr, LB_LOCAL);
4351 +}
4352 +
4353 +/* pfe_eth_set_features
4354 + */
4355 +static int pfe_eth_set_features(struct net_device *ndev, netdev_features_t
4356 + features)
4357 +{
4358 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
4359 + int rc = 0;
4360 +
4361 + if (features & NETIF_F_RXCSUM)
4362 + gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
4363 + else
4364 + gemac_disable_rx_checksum_offload(priv->EMAC_baseaddr);
4365 + return rc;
4366 +}
4367 +
4368 +/* pfe_eth_fast_tx_timeout
4369 + */
4370 +static enum hrtimer_restart pfe_eth_fast_tx_timeout(struct hrtimer *timer)
4371 +{
4372 + struct pfe_eth_fast_timer *fast_tx_timeout = container_of(timer, struct
4373 + pfe_eth_fast_timer,
4374 + timer);
4375 + struct pfe_eth_priv_s *priv = container_of(fast_tx_timeout->base,
4376 + struct pfe_eth_priv_s,
4377 + fast_tx_timeout);
4378 + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
4379 + fast_tx_timeout->queuenum);
4380 +
4381 + if (netif_tx_queue_stopped(tx_queue)) {
4382 +#ifdef PFE_ETH_TX_STATS
4383 + priv->was_stopped[fast_tx_timeout->queuenum] = 1;
4384 +#endif
4385 + netif_tx_wake_queue(tx_queue);
4386 + }
4387 +
4388 + return HRTIMER_NORESTART;
4389 +}
4390 +
4391 +/* pfe_eth_fast_tx_timeout_init
4392 + */
4393 +static void pfe_eth_fast_tx_timeout_init(struct pfe_eth_priv_s *priv)
4394 +{
4395 + int i;
4396 +
4397 + for (i = 0; i < emac_txq_cnt; i++) {
4398 + priv->fast_tx_timeout[i].queuenum = i;
4399 + hrtimer_init(&priv->fast_tx_timeout[i].timer, CLOCK_MONOTONIC,
4400 + HRTIMER_MODE_REL);
4401 + priv->fast_tx_timeout[i].timer.function =
4402 + pfe_eth_fast_tx_timeout;
4403 + priv->fast_tx_timeout[i].base = priv->fast_tx_timeout;
4404 + }
4405 +}
4406 +
4407 +static struct sk_buff *pfe_eth_rx_skb(struct net_device *ndev,
4408 + struct pfe_eth_priv_s *priv,
4409 + unsigned int qno)
4410 +{
4411 + void *buf_addr;
4412 + unsigned int rx_ctrl;
4413 + unsigned int desc_ctrl = 0;
4414 + struct hif_ipsec_hdr *ipsec_hdr = NULL;
4415 + struct sk_buff *skb;
4416 + struct sk_buff *skb_frag, *skb_frag_last = NULL;
4417 + int length = 0, offset;
4418 +
4419 + skb = priv->skb_inflight[qno];
4420 +
4421 + if (skb) {
4422 + skb_frag_last = skb_shinfo(skb)->frag_list;
4423 + if (skb_frag_last) {
4424 + while (skb_frag_last->next)
4425 + skb_frag_last = skb_frag_last->next;
4426 + }
4427 + }
4428 +
4429 + while (!(desc_ctrl & CL_DESC_LAST)) {
4430 + buf_addr = hif_lib_receive_pkt(&priv->client, qno, &length,
4431 + &offset, &rx_ctrl, &desc_ctrl,
4432 + (void **)&ipsec_hdr);
4433 + if (!buf_addr)
4434 + goto incomplete;
4435 +
4436 +#ifdef PFE_ETH_NAPI_STATS
4437 + priv->napi_counters[NAPI_DESC_COUNT]++;
4438 +#endif
4439 +
4440 + /* First frag */
4441 + if (desc_ctrl & CL_DESC_FIRST) {
4442 + skb = build_skb(buf_addr, 0);
4443 + if (unlikely(!skb))
4444 + goto pkt_drop;
4445 +
4446 + skb_reserve(skb, offset);
4447 + skb_put(skb, length);
4448 + skb->dev = ndev;
4449 +
4450 + if ((ndev->features & NETIF_F_RXCSUM) && (rx_ctrl &
4451 + HIF_CTRL_RX_CHECKSUMMED))
4452 + skb->ip_summed = CHECKSUM_UNNECESSARY;
4453 + else
4454 + skb_checksum_none_assert(skb);
4455 +
4456 + } else {
4457 + /* Next frags */
4458 + if (unlikely(!skb)) {
4459 + pr_err("%s: NULL skb_inflight\n",
4460 + __func__);
4461 + goto pkt_drop;
4462 + }
4463 +
4464 + skb_frag = build_skb(buf_addr, 0);
4465 +
4466 + if (unlikely(!skb_frag)) {
4467 + kfree(buf_addr);
4468 + goto pkt_drop;
4469 + }
4470 +
4471 + skb_reserve(skb_frag, offset);
4472 + skb_put(skb_frag, length);
4473 +
4474 + skb_frag->dev = ndev;
4475 +
4476 + if (skb_shinfo(skb)->frag_list)
4477 + skb_frag_last->next = skb_frag;
4478 + else
4479 + skb_shinfo(skb)->frag_list = skb_frag;
4480 +
4481 + skb->truesize += skb_frag->truesize;
4482 + skb->data_len += length;
4483 + skb->len += length;
4484 + skb_frag_last = skb_frag;
4485 + }
4486 + }
4487 +
4488 + priv->skb_inflight[qno] = NULL;
4489 + return skb;
4490 +
4491 +incomplete:
4492 + priv->skb_inflight[qno] = skb;
4493 + return NULL;
4494 +
4495 +pkt_drop:
4496 + priv->skb_inflight[qno] = NULL;
4497 +
4498 + if (skb)
4499 + kfree_skb(skb);
4500 + else
4501 + kfree(buf_addr);
4502 +
4503 + priv->stats.rx_errors++;
4504 +
4505 + return NULL;
4506 +}
4507 +
4508 +/* pfe_eth_poll
4509 + */
4510 +static int pfe_eth_poll(struct pfe_eth_priv_s *priv, struct napi_struct *napi,
4511 + unsigned int qno, int budget)
4512 +{
4513 + struct net_device *ndev = priv->ndev;
4514 + struct sk_buff *skb;
4515 + int work_done = 0;
4516 + unsigned int len;
4517 +
4518 + netif_info(priv, intr, priv->ndev, "%s\n", __func__);
4519 +
4520 +#ifdef PFE_ETH_NAPI_STATS
4521 + priv->napi_counters[NAPI_POLL_COUNT]++;
4522 +#endif
4523 +
4524 + do {
4525 + skb = pfe_eth_rx_skb(ndev, priv, qno);
4526 +
4527 + if (!skb)
4528 + break;
4529 +
4530 + len = skb->len;
4531 +
4532 + /* Packet will be processed */
4533 + skb->protocol = eth_type_trans(skb, ndev);
4534 +
4535 + netif_receive_skb(skb);
4536 +
4537 + priv->stats.rx_packets++;
4538 + priv->stats.rx_bytes += len;
4539 +
4540 + work_done++;
4541 +
4542 +#ifdef PFE_ETH_NAPI_STATS
4543 + priv->napi_counters[NAPI_PACKET_COUNT]++;
4544 +#endif
4545 +
4546 + } while (work_done < budget);
4547 +
4548 + /*
4549 + * If no Rx receive nor cleanup work was done, exit polling mode.
4550 + * No more netif_running(dev) check is required here , as this is
4551 + * checked in net/core/dev.c (2.6.33.5 kernel specific).
4552 + */
4553 + if (work_done < budget) {
4554 + napi_complete(napi);
4555 +
4556 + hif_lib_event_handler_start(&priv->client, EVENT_RX_PKT_IND,
4557 + qno);
4558 + }
4559 +#ifdef PFE_ETH_NAPI_STATS
4560 + else
4561 + priv->napi_counters[NAPI_FULL_BUDGET_COUNT]++;
4562 +#endif
4563 +
4564 + return work_done;
4565 +}
4566 +
4567 +/*
4568 + * pfe_eth_lro_poll
4569 + */
4570 +static int pfe_eth_lro_poll(struct napi_struct *napi, int budget)
4571 +{
4572 + struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
4573 + lro_napi);
4574 +
4575 + netif_info(priv, intr, priv->ndev, "%s\n", __func__);
4576 +
4577 + return pfe_eth_poll(priv, napi, 2, budget);
4578 +}
4579 +
4580 +/* pfe_eth_low_poll
4581 + */
4582 +static int pfe_eth_low_poll(struct napi_struct *napi, int budget)
4583 +{
4584 + struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
4585 + low_napi);
4586 +
4587 + netif_info(priv, intr, priv->ndev, "%s\n", __func__);
4588 +
4589 + return pfe_eth_poll(priv, napi, 1, budget);
4590 +}
4591 +
4592 +/* pfe_eth_high_poll
4593 + */
4594 +static int pfe_eth_high_poll(struct napi_struct *napi, int budget)
4595 +{
4596 + struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
4597 + high_napi);
4598 +
4599 + netif_info(priv, intr, priv->ndev, "%s\n", __func__);
4600 +
4601 + return pfe_eth_poll(priv, napi, 0, budget);
4602 +}
4603 +
4604 +static const struct net_device_ops pfe_netdev_ops = {
4605 + .ndo_open = pfe_eth_open,
4606 + .ndo_stop = pfe_eth_close,
4607 + .ndo_start_xmit = pfe_eth_send_packet,
4608 + .ndo_select_queue = pfe_eth_select_queue,
4609 + .ndo_set_rx_mode = pfe_eth_set_multi,
4610 + .ndo_set_mac_address = pfe_eth_set_mac_address,
4611 + .ndo_validate_addr = eth_validate_addr,
4612 + .ndo_change_mtu = pfe_eth_change_mtu,
4613 + .ndo_get_stats = pfe_eth_get_stats,
4614 + .ndo_set_features = pfe_eth_set_features,
4615 +};
4616 +
4617 +/* pfe_eth_init_one
4618 + */
4619 +static int pfe_eth_init_one(struct pfe *pfe, int id)
4620 +{
4621 + struct net_device *ndev = NULL;
4622 + struct pfe_eth_priv_s *priv = NULL;
4623 + struct ls1012a_eth_platform_data *einfo;
4624 + struct ls1012a_mdio_platform_data *minfo;
4625 + struct ls1012a_pfe_platform_data *pfe_info;
4626 + int err;
4627 +
4628 + /* Extract pltform data */
4629 + pfe_info = (struct ls1012a_pfe_platform_data *)
4630 + pfe->dev->platform_data;
4631 + if (!pfe_info) {
4632 + pr_err(
4633 + "%s: pfe missing additional platform data\n"
4634 + , __func__);
4635 + err = -ENODEV;
4636 + goto err0;
4637 + }
4638 +
4639 + einfo = (struct ls1012a_eth_platform_data *)
4640 + pfe_info->ls1012a_eth_pdata;
4641 +
4642 + /* einfo never be NULL, but no harm in having this check */
4643 + if (!einfo) {
4644 + pr_err(
4645 + "%s: pfe missing additional gemacs platform data\n"
4646 + , __func__);
4647 + err = -ENODEV;
4648 + goto err0;
4649 + }
4650 +
4651 + minfo = (struct ls1012a_mdio_platform_data *)
4652 + pfe_info->ls1012a_mdio_pdata;
4653 +
4654 + /* einfo never be NULL, but no harm in having this check */
4655 + if (!minfo) {
4656 + pr_err(
4657 + "%s: pfe missing additional mdios platform data\n",
4658 + __func__);
4659 + err = -ENODEV;
4660 + goto err0;
4661 + }
4662 +
4663 + if (us)
4664 + emac_txq_cnt = EMAC_TXQ_CNT;
4665 + /* Create an ethernet device instance */
4666 + ndev = alloc_etherdev_mq(sizeof(*priv), emac_txq_cnt);
4667 +
4668 + if (!ndev) {
4669 + pr_err("%s: gemac %d device allocation failed\n",
4670 + __func__, einfo[id].gem_id);
4671 + err = -ENOMEM;
4672 + goto err0;
4673 + }
4674 +
4675 + priv = netdev_priv(ndev);
4676 + priv->ndev = ndev;
4677 + priv->id = einfo[id].gem_id;
4678 + priv->pfe = pfe;
4679 +
4680 + SET_NETDEV_DEV(priv->ndev, priv->pfe->dev);
4681 +
4682 + pfe->eth.eth_priv[id] = priv;
4683 +
4684 + /* Set the info in the priv to the current info */
4685 + priv->einfo = &einfo[id];
4686 + priv->EMAC_baseaddr = cbus_emac_base[id];
4687 + priv->PHY_baseaddr = cbus_emac_base[0];
4688 + priv->GPI_baseaddr = cbus_gpi_base[id];
4689 +
4690 +#define HIF_GEMAC_TMUQ_BASE 6
4691 + priv->low_tmu_q = HIF_GEMAC_TMUQ_BASE + (id * 2);
4692 + priv->high_tmu_q = priv->low_tmu_q + 1;
4693 +
4694 + spin_lock_init(&priv->lock);
4695 +
4696 + pfe_eth_fast_tx_timeout_init(priv);
4697 +
4698 + /* Copy the station address into the dev structure, */
4699 + memcpy(ndev->dev_addr, einfo[id].mac_addr, ETH_ALEN);
4700 +
4701 + /* Initialize mdio */
4702 + if (minfo[id].enabled) {
4703 + err = pfe_eth_mdio_init(priv, &minfo[id]);
4704 + if (err) {
4705 + netdev_err(ndev, "%s: pfe_eth_mdio_init() failed\n",
4706 + __func__);
4707 + goto err2;
4708 + }
4709 + }
4710 +
4711 + if (us)
4712 + goto phy_init;
4713 +
4714 + ndev->mtu = 1500;
4715 +
4716 + /* Set MTU limits */
4717 + ndev->min_mtu = ETH_MIN_MTU;
4718 +
4719 +/*
4720 + * Jumbo frames are not supported on LS1012A rev-1.0.
4721 + * So max mtu should be restricted to supported frame length.
4722 + */
4723 + if (pfe_errata_a010897)
4724 + ndev->max_mtu = JUMBO_FRAME_SIZE_V1 - ETH_HLEN - ETH_FCS_LEN;
4725 + else
4726 + ndev->max_mtu = JUMBO_FRAME_SIZE_V2 - ETH_HLEN - ETH_FCS_LEN;
4727 +
4728 + /* supported features */
4729 + ndev->hw_features = NETIF_F_SG;
4730 +
4731 + /*Enable after checksum offload is validated */
4732 + ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
4733 + NETIF_F_IPV6_CSUM | NETIF_F_SG;
4734 +
4735 + /* enabled by default */
4736 + ndev->features = ndev->hw_features;
4737 +
4738 + priv->usr_features = ndev->features;
4739 +
4740 + ndev->netdev_ops = &pfe_netdev_ops;
4741 +
4742 + ndev->ethtool_ops = &pfe_ethtool_ops;
4743 +
4744 + /* Enable basic messages by default */
4745 + priv->msg_enable = NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_LINK |
4746 + NETIF_MSG_PROBE;
4747 +
4748 + netif_napi_add(ndev, &priv->low_napi, pfe_eth_low_poll,
4749 + HIF_RX_POLL_WEIGHT - 16);
4750 + netif_napi_add(ndev, &priv->high_napi, pfe_eth_high_poll,
4751 + HIF_RX_POLL_WEIGHT - 16);
4752 + netif_napi_add(ndev, &priv->lro_napi, pfe_eth_lro_poll,
4753 + HIF_RX_POLL_WEIGHT - 16);
4754 +
4755 + err = register_netdev(ndev);
4756 +
4757 + if (err) {
4758 + netdev_err(ndev, "register_netdev() failed\n");
4759 + goto err3;
4760 + }
4761 +
4762 +phy_init:
4763 + device_init_wakeup(&ndev->dev, WAKE_MAGIC);
4764 +
4765 + if (!(priv->einfo->phy_flags & GEMAC_NO_PHY)) {
4766 + err = pfe_phy_init(ndev);
4767 + if (err) {
4768 + netdev_err(ndev, "%s: pfe_phy_init() failed\n",
4769 + __func__);
4770 + goto err4;
4771 + }
4772 + }
4773 +
4774 + if (us) {
4775 + if (priv->phydev)
4776 + phy_start(priv->phydev);
4777 + return 0;
4778 + }
4779 +
4780 + netif_carrier_on(ndev);
4781 +
4782 + /* Create all the sysfs files */
4783 + if (pfe_eth_sysfs_init(ndev))
4784 + goto err4;
4785 +
4786 + netif_info(priv, probe, ndev, "%s: created interface, baseaddr: %p\n",
4787 + __func__, priv->EMAC_baseaddr);
4788 +
4789 + return 0;
4790 +err4:
4791 + if (us)
4792 + goto err3;
4793 + unregister_netdev(ndev);
4794 +err3:
4795 + pfe_eth_mdio_exit(priv->mii_bus);
4796 +err2:
4797 + free_netdev(priv->ndev);
4798 +err0:
4799 + return err;
4800 +}
4801 +
4802 +/* pfe_eth_init
4803 + */
4804 +int pfe_eth_init(struct pfe *pfe)
4805 +{
4806 + int ii = 0;
4807 + int err;
4808 +
4809 + pr_info("%s\n", __func__);
4810 +
4811 + cbus_emac_base[0] = EMAC1_BASE_ADDR;
4812 + cbus_emac_base[1] = EMAC2_BASE_ADDR;
4813 +
4814 + cbus_gpi_base[0] = EGPI1_BASE_ADDR;
4815 + cbus_gpi_base[1] = EGPI2_BASE_ADDR;
4816 +
4817 + if (fsl_guts_get_svr() == LS1012A_REV_1_0)
4818 + pfe_errata_a010897 = true;
4819 + else
4820 + pfe_errata_a010897 = false;
4821 +
4822 + for (ii = 0; ii < NUM_GEMAC_SUPPORT; ii++) {
4823 + err = pfe_eth_init_one(pfe, ii);
4824 + if (err)
4825 + goto err0;
4826 + }
4827 +
4828 + return 0;
4829 +
4830 +err0:
4831 + while (ii--)
4832 + pfe_eth_exit_one(pfe->eth.eth_priv[ii]);
4833 +
4834 + /* Register three network devices in the kernel */
4835 + return err;
4836 +}
4837 +
4838 +/* pfe_eth_exit_one
4839 + */
4840 +static void pfe_eth_exit_one(struct pfe_eth_priv_s *priv)
4841 +{
4842 + netif_info(priv, probe, priv->ndev, "%s\n", __func__);
4843 +
4844 + if (!us)
4845 + pfe_eth_sysfs_exit(priv->ndev);
4846 +
4847 + if (!(priv->einfo->phy_flags & GEMAC_NO_PHY))
4848 + pfe_phy_exit(priv->ndev);
4849 +
4850 + if (!us)
4851 + unregister_netdev(priv->ndev);
4852 +
4853 + if (priv->mii_bus)
4854 + pfe_eth_mdio_exit(priv->mii_bus);
4855 +
4856 + free_netdev(priv->ndev);
4857 +}
4858 +
4859 +/* pfe_eth_exit
4860 + */
4861 +void pfe_eth_exit(struct pfe *pfe)
4862 +{
4863 + int ii;
4864 +
4865 + pr_info("%s\n", __func__);
4866 +
4867 + for (ii = NUM_GEMAC_SUPPORT - 1; ii >= 0; ii--)
4868 + pfe_eth_exit_one(pfe->eth.eth_priv[ii]);
4869 +}
4870 --- /dev/null
4871 +++ b/drivers/staging/fsl_ppfe/pfe_eth.h
4872 @@ -0,0 +1,185 @@
4873 +/*
4874 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
4875 + * Copyright 2017 NXP
4876 + *
4877 + * This program is free software; you can redistribute it and/or modify
4878 + * it under the terms of the GNU General Public License as published by
4879 + * the Free Software Foundation; either version 2 of the License, or
4880 + * (at your option) any later version.
4881 + *
4882 + * This program is distributed in the hope that it will be useful,
4883 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
4884 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4885 + * GNU General Public License for more details.
4886 + *
4887 + * You should have received a copy of the GNU General Public License
4888 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
4889 + */
4890 +
4891 +#ifndef _PFE_ETH_H_
4892 +#define _PFE_ETH_H_
4893 +#include <linux/kernel.h>
4894 +#include <linux/netdevice.h>
4895 +#include <linux/etherdevice.h>
4896 +#include <linux/ethtool.h>
4897 +#include <linux/mii.h>
4898 +#include <linux/phy.h>
4899 +#include <linux/clk.h>
4900 +#include <linux/interrupt.h>
4901 +#include <linux/time.h>
4902 +
4903 +#define PFE_ETH_NAPI_STATS
4904 +#define PFE_ETH_TX_STATS
4905 +
4906 +#define PFE_ETH_FRAGS_MAX (65536 / HIF_RX_PKT_MIN_SIZE)
4907 +#define LRO_LEN_COUNT_MAX 32
4908 +#define LRO_NB_COUNT_MAX 32
4909 +
4910 +#define PFE_PAUSE_FLAG_ENABLE 1
4911 +#define PFE_PAUSE_FLAG_AUTONEG 2
4912 +
4913 +/* GEMAC configured by SW */
4914 +/* GEMAC configured by phy lines (not for MII/GMII) */
4915 +
4916 +#define GEMAC_SW_FULL_DUPLEX BIT(9)
4917 +#define GEMAC_SW_SPEED_10M (0 << 12)
4918 +#define GEMAC_SW_SPEED_100M BIT(12)
4919 +#define GEMAC_SW_SPEED_1G (2 << 12)
4920 +
4921 +#define GEMAC_NO_PHY BIT(0)
4922 +
4923 +struct ls1012a_eth_platform_data {
4924 + /* device specific information */
4925 + u32 device_flags;
4926 + char name[16];
4927 +
4928 + /* board specific information */
4929 + u32 mii_config;
4930 + u32 phy_flags;
4931 + u32 gem_id;
4932 + u32 bus_id;
4933 + u32 phy_id;
4934 + u32 mdio_muxval;
4935 + u8 mac_addr[ETH_ALEN];
4936 +};
4937 +
4938 +struct ls1012a_mdio_platform_data {
4939 + int enabled;
4940 + int irq[32];
4941 + u32 phy_mask;
4942 + int mdc_div;
4943 +};
4944 +
4945 +struct ls1012a_pfe_platform_data {
4946 + struct ls1012a_eth_platform_data ls1012a_eth_pdata[3];
4947 + struct ls1012a_mdio_platform_data ls1012a_mdio_pdata[3];
4948 +};
4949 +
4950 +#define NUM_GEMAC_SUPPORT 2
4951 +#define DRV_NAME "pfe-eth"
4952 +#define DRV_VERSION "1.0"
4953 +
4954 +#define LS1012A_TX_FAST_RECOVERY_TIMEOUT_MS 3
4955 +#define TX_POLL_TIMEOUT_MS 1000
4956 +
4957 +#define EMAC_TXQ_CNT 16
4958 +#define EMAC_TXQ_DEPTH (HIF_TX_DESC_NT)
4959 +
4960 +#define JUMBO_FRAME_SIZE_V1 1900
4961 +#define JUMBO_FRAME_SIZE_V2 10258
4962 +/*
4963 + * Client Tx queue threshold, for txQ flush condition.
4964 + * It must be smaller than the queue size (in case we ever change it in the
4965 + * future).
4966 + */
4967 +#define HIF_CL_TX_FLUSH_MARK 32
4968 +
4969 +/*
4970 + * Max number of TX resources (HIF descriptors or skbs) that will be released
4971 + * in a single go during batch recycling.
4972 + * Should be lower than the flush mark so the SW can provide the HW with a
4973 + * continuous stream of packets instead of bursts.
4974 + */
4975 +#define TX_FREE_MAX_COUNT 16
4976 +#define EMAC_RXQ_CNT 3
4977 +#define EMAC_RXQ_DEPTH HIF_RX_DESC_NT
4978 +/* make sure clients can receive a full burst of packets */
4979 +#define EMAC_RMON_TXBYTES_POS 0x00
4980 +#define EMAC_RMON_RXBYTES_POS 0x14
4981 +
4982 +#define EMAC_QUEUENUM_MASK (emac_txq_cnt - 1)
4983 +#define EMAC_MDIO_TIMEOUT 1000
4984 +#define MAX_UC_SPEC_ADDR_REG 31
4985 +
4986 +struct pfe_eth_fast_timer {
4987 + int queuenum;
4988 + struct hrtimer timer;
4989 + void *base;
4990 +};
4991 +
4992 +struct pfe_eth_priv_s {
4993 + struct pfe *pfe;
4994 + struct hif_client_s client;
4995 + struct napi_struct lro_napi;
4996 + struct napi_struct low_napi;
4997 + struct napi_struct high_napi;
4998 + int low_tmu_q;
4999 + int high_tmu_q;
5000 + struct net_device_stats stats;
5001 + struct net_device *ndev;
5002 + int id;
5003 + int promisc;
5004 + unsigned int msg_enable;
5005 + unsigned int usr_features;
5006 +
5007 + spinlock_t lock; /* protect member variables */
5008 + unsigned int event_status;
5009 + int irq;
5010 + void *EMAC_baseaddr;
5011 + /* This points to the EMAC base from where we access PHY */
5012 + void *PHY_baseaddr;
5013 + void *GPI_baseaddr;
5014 + /* PHY stuff */
5015 + struct phy_device *phydev;
5016 + int oldspeed;
5017 + int oldduplex;
5018 + int oldlink;
5019 + /* mdio info */
5020 + int mdc_div;
5021 + struct mii_bus *mii_bus;
5022 + struct clk *gemtx_clk;
5023 + int wol;
5024 + int pause_flag;
5025 +
5026 + int default_priority;
5027 + struct pfe_eth_fast_timer fast_tx_timeout[EMAC_TXQ_CNT];
5028 +
5029 + struct ls1012a_eth_platform_data *einfo;
5030 + struct sk_buff *skb_inflight[EMAC_RXQ_CNT + 6];
5031 +
5032 +#ifdef PFE_ETH_TX_STATS
5033 + unsigned int stop_queue_total[EMAC_TXQ_CNT];
5034 + unsigned int stop_queue_hif[EMAC_TXQ_CNT];
5035 + unsigned int stop_queue_hif_client[EMAC_TXQ_CNT];
5036 + unsigned int stop_queue_credit[EMAC_TXQ_CNT];
5037 + unsigned int clean_fail[EMAC_TXQ_CNT];
5038 + unsigned int was_stopped[EMAC_TXQ_CNT];
5039 +#endif
5040 +
5041 +#ifdef PFE_ETH_NAPI_STATS
5042 + unsigned int napi_counters[NAPI_MAX_COUNT];
5043 +#endif
5044 + unsigned int frags_inflight[EMAC_RXQ_CNT + 6];
5045 +};
5046 +
5047 +struct pfe_eth {
5048 + struct pfe_eth_priv_s *eth_priv[3];
5049 +};
5050 +
5051 +int pfe_eth_init(struct pfe *pfe);
5052 +void pfe_eth_exit(struct pfe *pfe);
5053 +int pfe_eth_suspend(struct net_device *dev);
5054 +int pfe_eth_resume(struct net_device *dev);
5055 +int pfe_eth_mdio_reset(struct mii_bus *bus);
5056 +
5057 +#endif /* _PFE_ETH_H_ */
5058 --- /dev/null
5059 +++ b/drivers/staging/fsl_ppfe/pfe_firmware.c
5060 @@ -0,0 +1,314 @@
5061 +/*
5062 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
5063 + * Copyright 2017 NXP
5064 + *
5065 + * This program is free software; you can redistribute it and/or modify
5066 + * it under the terms of the GNU General Public License as published by
5067 + * the Free Software Foundation; either version 2 of the License, or
5068 + * (at your option) any later version.
5069 + *
5070 + * This program is distributed in the hope that it will be useful,
5071 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
5072 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
5073 + * GNU General Public License for more details.
5074 + *
5075 + * You should have received a copy of the GNU General Public License
5076 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
5077 + */
5078 +
5079 +/*
5080 + * @file
5081 + * Contains all the functions to handle parsing and loading of PE firmware
5082 + * files.
5083 + */
5084 +#include <linux/firmware.h>
5085 +
5086 +#include "pfe_mod.h"
5087 +#include "pfe_firmware.h"
5088 +#include "pfe/pfe.h"
5089 +
5090 +static struct elf32_shdr *get_elf_section_header(const struct firmware *fw,
5091 + const char *section)
5092 +{
5093 + struct elf32_hdr *elf_hdr = (struct elf32_hdr *)fw->data;
5094 + struct elf32_shdr *shdr;
5095 + struct elf32_shdr *shdr_shstr;
5096 + Elf32_Off e_shoff = be32_to_cpu(elf_hdr->e_shoff);
5097 + Elf32_Half e_shentsize = be16_to_cpu(elf_hdr->e_shentsize);
5098 + Elf32_Half e_shnum = be16_to_cpu(elf_hdr->e_shnum);
5099 + Elf32_Half e_shstrndx = be16_to_cpu(elf_hdr->e_shstrndx);
5100 + Elf32_Off shstr_offset;
5101 + Elf32_Word sh_name;
5102 + const char *name;
5103 + int i;
5104 +
5105 + /* Section header strings */
5106 + shdr_shstr = (struct elf32_shdr *)(fw->data + e_shoff + e_shstrndx *
5107 + e_shentsize);
5108 + shstr_offset = be32_to_cpu(shdr_shstr->sh_offset);
5109 +
5110 + for (i = 0; i < e_shnum; i++) {
5111 + shdr = (struct elf32_shdr *)(fw->data + e_shoff
5112 + + i * e_shentsize);
5113 +
5114 + sh_name = be32_to_cpu(shdr->sh_name);
5115 +
5116 + name = (const char *)(fw->data + shstr_offset + sh_name);
5117 +
5118 + if (!strcmp(name, section))
5119 + return shdr;
5120 + }
5121 +
5122 + pr_err("%s: didn't find section %s\n", __func__, section);
5123 +
5124 + return NULL;
5125 +}
5126 +
5127 +#if defined(CFG_DIAGS)
5128 +static int pfe_get_diags_info(const struct firmware *fw, struct pfe_diags_info
5129 + *diags_info)
5130 +{
5131 + struct elf32_shdr *shdr;
5132 + unsigned long offset, size;
5133 +
5134 + shdr = get_elf_section_header(fw, ".pfe_diags_str");
5135 + if (shdr) {
5136 + offset = be32_to_cpu(shdr->sh_offset);
5137 + size = be32_to_cpu(shdr->sh_size);
5138 + diags_info->diags_str_base = be32_to_cpu(shdr->sh_addr);
5139 + diags_info->diags_str_size = size;
5140 + diags_info->diags_str_array = kmalloc(size, GFP_KERNEL);
5141 + memcpy(diags_info->diags_str_array, fw->data + offset, size);
5142 +
5143 + return 0;
5144 + } else {
5145 + return -1;
5146 + }
5147 +}
5148 +#endif
5149 +
5150 +static void pfe_check_version_info(const struct firmware *fw)
5151 +{
5152 + /*static char *version = NULL;*/
5153 + static char *version;
5154 +
5155 + struct elf32_shdr *shdr = get_elf_section_header(fw, ".version");
5156 +
5157 + if (shdr) {
5158 + if (!version) {
5159 + /*
5160 + * this is the first fw we load, use its version
5161 + * string as reference (whatever it is)
5162 + */
5163 + version = (char *)(fw->data +
5164 + be32_to_cpu(shdr->sh_offset));
5165 +
5166 + pr_info("PFE binary version: %s\n", version);
5167 + } else {
5168 + /*
5169 + * already have loaded at least one firmware, check
5170 + * sequence can start now
5171 + */
5172 + if (strcmp(version, (char *)(fw->data +
5173 + be32_to_cpu(shdr->sh_offset)))) {
5174 + pr_info(
5175 + "WARNING: PFE firmware binaries from incompatible version\n");
5176 + }
5177 + }
5178 + } else {
5179 + /*
5180 + * version cannot be verified, a potential issue that should
5181 + * be reported
5182 + */
5183 + pr_info(
5184 + "WARNING: PFE firmware binaries from incompatible version\n");
5185 + }
5186 +}
5187 +
5188 +/* PFE elf firmware loader.
5189 + * Loads an elf firmware image into a list of PE's (specified using a bitmask)
5190 + *
5191 + * @param pe_mask Mask of PE id's to load firmware to
5192 + * @param fw Pointer to the firmware image
5193 + *
5194 + * @return 0 on success, a negative value on error
5195 + *
5196 + */
5197 +int pfe_load_elf(int pe_mask, const struct firmware *fw, struct pfe *pfe)
5198 +{
5199 + struct elf32_hdr *elf_hdr = (struct elf32_hdr *)fw->data;
5200 + Elf32_Half sections = be16_to_cpu(elf_hdr->e_shnum);
5201 + struct elf32_shdr *shdr = (struct elf32_shdr *)(fw->data +
5202 + be32_to_cpu(elf_hdr->e_shoff));
5203 + int id, section;
5204 + int rc;
5205 +
5206 + pr_info("%s\n", __func__);
5207 +
5208 + /* Some sanity checks */
5209 + if (strncmp(&elf_hdr->e_ident[EI_MAG0], ELFMAG, SELFMAG)) {
5210 + pr_err("%s: incorrect elf magic number\n", __func__);
5211 + return -EINVAL;
5212 + }
5213 +
5214 + if (elf_hdr->e_ident[EI_CLASS] != ELFCLASS32) {
5215 + pr_err("%s: incorrect elf class(%x)\n", __func__,
5216 + elf_hdr->e_ident[EI_CLASS]);
5217 + return -EINVAL;
5218 + }
5219 +
5220 + if (elf_hdr->e_ident[EI_DATA] != ELFDATA2MSB) {
5221 + pr_err("%s: incorrect elf data(%x)\n", __func__,
5222 + elf_hdr->e_ident[EI_DATA]);
5223 + return -EINVAL;
5224 + }
5225 +
5226 + if (be16_to_cpu(elf_hdr->e_type) != ET_EXEC) {
5227 + pr_err("%s: incorrect elf file type(%x)\n", __func__,
5228 + be16_to_cpu(elf_hdr->e_type));
5229 + return -EINVAL;
5230 + }
5231 +
5232 + for (section = 0; section < sections; section++, shdr++) {
5233 + if (!(be32_to_cpu(shdr->sh_flags) & (SHF_WRITE | SHF_ALLOC |
5234 + SHF_EXECINSTR)))
5235 + continue;
5236 +
5237 + for (id = 0; id < MAX_PE; id++)
5238 + if (pe_mask & (1 << id)) {
5239 + rc = pe_load_elf_section(id, fw->data, shdr,
5240 + pfe->dev);
5241 + if (rc < 0)
5242 + goto err;
5243 + }
5244 + }
5245 +
5246 + pfe_check_version_info(fw);
5247 +
5248 + return 0;
5249 +
5250 +err:
5251 + return rc;
5252 +}
5253 +
5254 +/* PFE firmware initialization.
5255 + * Loads different firmware files from filesystem.
5256 + * Initializes PE IMEM/DMEM and UTIL-PE DDR
5257 + * Initializes control path symbol addresses (by looking them up in the elf
5258 + * firmware files
5259 + * Takes PE's out of reset
5260 + *
5261 + * @return 0 on success, a negative value on error
5262 + *
5263 + */
5264 +int pfe_firmware_init(struct pfe *pfe)
5265 +{
5266 + const struct firmware *class_fw, *tmu_fw;
5267 + int rc = 0;
5268 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5269 + const char *util_fw_name;
5270 + const struct firmware *util_fw;
5271 +#endif
5272 +
5273 + pr_info("%s\n", __func__);
5274 +
5275 + if (request_firmware(&class_fw, CLASS_FIRMWARE_FILENAME, pfe->dev)) {
5276 + pr_err("%s: request firmware %s failed\n", __func__,
5277 + CLASS_FIRMWARE_FILENAME);
5278 + rc = -ETIMEDOUT;
5279 + goto err0;
5280 + }
5281 +
5282 + if (request_firmware(&tmu_fw, TMU_FIRMWARE_FILENAME, pfe->dev)) {
5283 + pr_err("%s: request firmware %s failed\n", __func__,
5284 + TMU_FIRMWARE_FILENAME);
5285 + rc = -ETIMEDOUT;
5286 + goto err1;
5287 +}
5288 +
5289 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5290 + util_fw_name = UTIL_FIRMWARE_FILENAME;
5291 +
5292 + if (request_firmware(&util_fw, util_fw_name, pfe->dev)) {
5293 + pr_err("%s: request firmware %s failed\n", __func__,
5294 + util_fw_name);
5295 + rc = -ETIMEDOUT;
5296 + goto err2;
5297 + }
5298 +#endif
5299 + rc = pfe_load_elf(CLASS_MASK, class_fw, pfe);
5300 + if (rc < 0) {
5301 + pr_err("%s: class firmware load failed\n", __func__);
5302 + goto err3;
5303 + }
5304 +
5305 +#if defined(CFG_DIAGS)
5306 + rc = pfe_get_diags_info(class_fw, &pfe->diags.class_diags_info);
5307 + if (rc < 0) {
5308 + pr_warn(
5309 + "PFE diags won't be available for class PEs\n");
5310 + rc = 0;
5311 + }
5312 +#endif
5313 +
5314 + rc = pfe_load_elf(TMU_MASK, tmu_fw, pfe);
5315 + if (rc < 0) {
5316 + pr_err("%s: tmu firmware load failed\n", __func__);
5317 + goto err3;
5318 + }
5319 +
5320 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5321 + rc = pfe_load_elf(UTIL_MASK, util_fw, pfe);
5322 + if (rc < 0) {
5323 + pr_err("%s: util firmware load failed\n", __func__);
5324 + goto err3;
5325 + }
5326 +
5327 +#if defined(CFG_DIAGS)
5328 + rc = pfe_get_diags_info(util_fw, &pfe->diags.util_diags_info);
5329 + if (rc < 0) {
5330 + pr_warn(
5331 + "PFE diags won't be available for util PE\n");
5332 + rc = 0;
5333 + }
5334 +#endif
5335 +
5336 + util_enable();
5337 +#endif
5338 +
5339 + tmu_enable(0xf);
5340 + class_enable();
5341 +
5342 +err3:
5343 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5344 + release_firmware(util_fw);
5345 +
5346 +err2:
5347 +#endif
5348 + release_firmware(tmu_fw);
5349 +
5350 +err1:
5351 + release_firmware(class_fw);
5352 +
5353 +err0:
5354 + return rc;
5355 +}
5356 +
5357 +/* PFE firmware cleanup
5358 + * Puts PE's in reset
5359 + *
5360 + *
5361 + */
5362 +void pfe_firmware_exit(struct pfe *pfe)
5363 +{
5364 + pr_info("%s\n", __func__);
5365 +
5366 + if (pe_reset_all(&pfe->ctrl) != 0)
5367 + pr_err("Error: Failed to stop PEs, PFE reload may not work correctly\n");
5368 +
5369 + class_disable();
5370 + tmu_disable(0xf);
5371 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5372 + util_disable();
5373 +#endif
5374 +}
5375 --- /dev/null
5376 +++ b/drivers/staging/fsl_ppfe/pfe_firmware.h
5377 @@ -0,0 +1,32 @@
5378 +/*
5379 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
5380 + * Copyright 2017 NXP
5381 + *
5382 + * This program is free software; you can redistribute it and/or modify
5383 + * it under the terms of the GNU General Public License as published by
5384 + * the Free Software Foundation; either version 2 of the License, or
5385 + * (at your option) any later version.
5386 + *
5387 + * This program is distributed in the hope that it will be useful,
5388 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
5389 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
5390 + * GNU General Public License for more details.
5391 + *
5392 + * You should have received a copy of the GNU General Public License
5393 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
5394 + */
5395 +
5396 +#ifndef _PFE_FIRMWARE_H_
5397 +#define _PFE_FIRMWARE_H_
5398 +
5399 +#define CLASS_FIRMWARE_FILENAME "ppfe_class_ls1012a.elf"
5400 +#define TMU_FIRMWARE_FILENAME "ppfe_tmu_ls1012a.elf"
5401 +
5402 +#define PFE_FW_CHECK_PASS 0
5403 +#define PFE_FW_CHECK_FAIL 1
5404 +#define NUM_PFE_FW 3
5405 +
5406 +int pfe_firmware_init(struct pfe *pfe);
5407 +void pfe_firmware_exit(struct pfe *pfe);
5408 +
5409 +#endif /* _PFE_FIRMWARE_H_ */
5410 --- /dev/null
5411 +++ b/drivers/staging/fsl_ppfe/pfe_hal.c
5412 @@ -0,0 +1,1528 @@
5413 +/*
5414 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
5415 + * Copyright 2017 NXP
5416 + *
5417 + * This program is free software; you can redistribute it and/or modify
5418 + * it under the terms of the GNU General Public License as published by
5419 + * the Free Software Foundation; either version 2 of the License, or
5420 + * (at your option) any later version.
5421 + *
5422 + * This program is distributed in the hope that it will be useful,
5423 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
5424 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
5425 + * GNU General Public License for more details.
5426 + *
5427 + * You should have received a copy of the GNU General Public License
5428 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
5429 + */
5430 +
5431 +#include "pfe_mod.h"
5432 +#include "pfe/pfe.h"
5433 +
5434 +/* A-010897: Jumbo frame is not supported */
5435 +extern bool pfe_errata_a010897;
5436 +
5437 +#define PFE_RCR_MAX_FL_MASK 0xC000FFFF
5438 +
5439 +void *cbus_base_addr;
5440 +void *ddr_base_addr;
5441 +unsigned long ddr_phys_base_addr;
5442 +unsigned int ddr_size;
5443 +
5444 +static struct pe_info pe[MAX_PE];
5445 +
5446 +/* Initializes the PFE library.
5447 + * Must be called before using any of the library functions.
5448 + *
5449 + * @param[in] cbus_base CBUS virtual base address (as mapped in
5450 + * the host CPU address space)
5451 + * @param[in] ddr_base PFE DDR range virtual base address (as
5452 + * mapped in the host CPU address space)
5453 + * @param[in] ddr_phys_base PFE DDR range physical base address (as
5454 + * mapped in platform)
5455 + * @param[in] size PFE DDR range size (as defined by the host
5456 + * software)
5457 + */
5458 +void pfe_lib_init(void *cbus_base, void *ddr_base, unsigned long ddr_phys_base,
5459 + unsigned int size)
5460 +{
5461 + cbus_base_addr = cbus_base;
5462 + ddr_base_addr = ddr_base;
5463 + ddr_phys_base_addr = ddr_phys_base;
5464 + ddr_size = size;
5465 +
5466 + pe[CLASS0_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(0);
5467 + pe[CLASS0_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(0);
5468 + pe[CLASS0_ID].pmem_size = CLASS_IMEM_SIZE;
5469 + pe[CLASS0_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
5470 + pe[CLASS0_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
5471 + pe[CLASS0_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
5472 +
5473 + pe[CLASS1_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(1);
5474 + pe[CLASS1_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(1);
5475 + pe[CLASS1_ID].pmem_size = CLASS_IMEM_SIZE;
5476 + pe[CLASS1_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
5477 + pe[CLASS1_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
5478 + pe[CLASS1_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
5479 +
5480 + pe[CLASS2_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(2);
5481 + pe[CLASS2_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(2);
5482 + pe[CLASS2_ID].pmem_size = CLASS_IMEM_SIZE;
5483 + pe[CLASS2_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
5484 + pe[CLASS2_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
5485 + pe[CLASS2_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
5486 +
5487 + pe[CLASS3_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(3);
5488 + pe[CLASS3_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(3);
5489 + pe[CLASS3_ID].pmem_size = CLASS_IMEM_SIZE;
5490 + pe[CLASS3_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
5491 + pe[CLASS3_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
5492 + pe[CLASS3_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
5493 +
5494 + pe[CLASS4_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(4);
5495 + pe[CLASS4_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(4);
5496 + pe[CLASS4_ID].pmem_size = CLASS_IMEM_SIZE;
5497 + pe[CLASS4_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
5498 + pe[CLASS4_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
5499 + pe[CLASS4_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
5500 +
5501 + pe[CLASS5_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(5);
5502 + pe[CLASS5_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(5);
5503 + pe[CLASS5_ID].pmem_size = CLASS_IMEM_SIZE;
5504 + pe[CLASS5_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
5505 + pe[CLASS5_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
5506 + pe[CLASS5_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
5507 +
5508 + pe[TMU0_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(0);
5509 + pe[TMU0_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(0);
5510 + pe[TMU0_ID].pmem_size = TMU_IMEM_SIZE;
5511 + pe[TMU0_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
5512 + pe[TMU0_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
5513 + pe[TMU0_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
5514 +
5515 + pe[TMU1_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(1);
5516 + pe[TMU1_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(1);
5517 + pe[TMU1_ID].pmem_size = TMU_IMEM_SIZE;
5518 + pe[TMU1_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
5519 + pe[TMU1_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
5520 + pe[TMU1_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
5521 +
5522 + pe[TMU3_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(3);
5523 + pe[TMU3_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(3);
5524 + pe[TMU3_ID].pmem_size = TMU_IMEM_SIZE;
5525 + pe[TMU3_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
5526 + pe[TMU3_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
5527 + pe[TMU3_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
5528 +
5529 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5530 + pe[UTIL_ID].dmem_base_addr = UTIL_DMEM_BASE_ADDR;
5531 + pe[UTIL_ID].mem_access_wdata = UTIL_MEM_ACCESS_WDATA;
5532 + pe[UTIL_ID].mem_access_addr = UTIL_MEM_ACCESS_ADDR;
5533 + pe[UTIL_ID].mem_access_rdata = UTIL_MEM_ACCESS_RDATA;
5534 +#endif
5535 +}
5536 +
5537 +/* Writes a buffer to PE internal memory from the host
5538 + * through indirect access registers.
5539 + *
5540 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5541 + * ..., UTIL_ID)
5542 + * @param[in] src Buffer source address
5543 + * @param[in] mem_access_addr DMEM destination address (must be 32bit
5544 + * aligned)
5545 + * @param[in] len Number of bytes to copy
5546 + */
5547 +void pe_mem_memcpy_to32(int id, u32 mem_access_addr, const void *src, unsigned
5548 +int len)
5549 +{
5550 + u32 offset = 0, val, addr;
5551 + unsigned int len32 = len >> 2;
5552 + int i;
5553 +
5554 + addr = mem_access_addr | PE_MEM_ACCESS_WRITE |
5555 + PE_MEM_ACCESS_BYTE_ENABLE(0, 4);
5556 +
5557 + for (i = 0; i < len32; i++, offset += 4, src += 4) {
5558 + val = *(u32 *)src;
5559 + writel(cpu_to_be32(val), pe[id].mem_access_wdata);
5560 + writel(addr + offset, pe[id].mem_access_addr);
5561 + }
5562 +
5563 + len = (len & 0x3);
5564 + if (len) {
5565 + val = 0;
5566 +
5567 + addr = (mem_access_addr | PE_MEM_ACCESS_WRITE |
5568 + PE_MEM_ACCESS_BYTE_ENABLE(0, len)) + offset;
5569 +
5570 + for (i = 0; i < len; i++, src++)
5571 + val |= (*(u8 *)src) << (8 * i);
5572 +
5573 + writel(cpu_to_be32(val), pe[id].mem_access_wdata);
5574 + writel(addr, pe[id].mem_access_addr);
5575 + }
5576 +}
5577 +
5578 +/* Writes a buffer to PE internal data memory (DMEM) from the host
5579 + * through indirect access registers.
5580 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5581 + * ..., UTIL_ID)
5582 + * @param[in] src Buffer source address
5583 + * @param[in] dst DMEM destination address (must be 32bit
5584 + * aligned)
5585 + * @param[in] len Number of bytes to copy
5586 + */
5587 +void pe_dmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len)
5588 +{
5589 + pe_mem_memcpy_to32(id, pe[id].dmem_base_addr | dst |
5590 + PE_MEM_ACCESS_DMEM, src, len);
5591 +}
5592 +
5593 +/* Writes a buffer to PE internal program memory (PMEM) from the host
5594 + * through indirect access registers.
5595 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5596 + * ..., TMU3_ID)
5597 + * @param[in] src Buffer source address
5598 + * @param[in] dst PMEM destination address (must be 32bit
5599 + * aligned)
5600 + * @param[in] len Number of bytes to copy
5601 + */
5602 +void pe_pmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len)
5603 +{
5604 + pe_mem_memcpy_to32(id, pe[id].pmem_base_addr | (dst & (pe[id].pmem_size
5605 + - 1)) | PE_MEM_ACCESS_IMEM, src, len);
5606 +}
5607 +
5608 +/* Reads PE internal program memory (IMEM) from the host
5609 + * through indirect access registers.
5610 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5611 + * ..., TMU3_ID)
5612 + * @param[in] addr PMEM read address (must be aligned on size)
5613 + * @param[in] size Number of bytes to read (maximum 4, must not
5614 + * cross 32bit boundaries)
5615 + * @return the data read (in PE endianness, i.e BE).
5616 + */
5617 +u32 pe_pmem_read(int id, u32 addr, u8 size)
5618 +{
5619 + u32 offset = addr & 0x3;
5620 + u32 mask = 0xffffffff >> ((4 - size) << 3);
5621 + u32 val;
5622 +
5623 + addr = pe[id].pmem_base_addr | ((addr & ~0x3) & (pe[id].pmem_size - 1))
5624 + | PE_MEM_ACCESS_IMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
5625 +
5626 + writel(addr, pe[id].mem_access_addr);
5627 + val = be32_to_cpu(readl(pe[id].mem_access_rdata));
5628 +
5629 + return (val >> (offset << 3)) & mask;
5630 +}
5631 +
5632 +/* Writes PE internal data memory (DMEM) from the host
5633 + * through indirect access registers.
5634 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5635 + * ..., UTIL_ID)
5636 + * @param[in] addr DMEM write address (must be aligned on size)
5637 + * @param[in] val Value to write (in PE endianness, i.e BE)
5638 + * @param[in] size Number of bytes to write (maximum 4, must not
5639 + * cross 32bit boundaries)
5640 + */
5641 +void pe_dmem_write(int id, u32 val, u32 addr, u8 size)
5642 +{
5643 + u32 offset = addr & 0x3;
5644 +
5645 + addr = pe[id].dmem_base_addr | (addr & ~0x3) | PE_MEM_ACCESS_WRITE |
5646 + PE_MEM_ACCESS_DMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
5647 +
5648 + /* Indirect access interface is byte swapping data being written */
5649 + writel(cpu_to_be32(val << (offset << 3)), pe[id].mem_access_wdata);
5650 + writel(addr, pe[id].mem_access_addr);
5651 +}
5652 +
5653 +/* Reads PE internal data memory (DMEM) from the host
5654 + * through indirect access registers.
5655 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5656 + * ..., UTIL_ID)
5657 + * @param[in] addr DMEM read address (must be aligned on size)
5658 + * @param[in] size Number of bytes to read (maximum 4, must not
5659 + * cross 32bit boundaries)
5660 + * @return the data read (in PE endianness, i.e BE).
5661 + */
5662 +u32 pe_dmem_read(int id, u32 addr, u8 size)
5663 +{
5664 + u32 offset = addr & 0x3;
5665 + u32 mask = 0xffffffff >> ((4 - size) << 3);
5666 + u32 val;
5667 +
5668 + addr = pe[id].dmem_base_addr | (addr & ~0x3) | PE_MEM_ACCESS_DMEM |
5669 + PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
5670 +
5671 + writel(addr, pe[id].mem_access_addr);
5672 +
5673 + /* Indirect access interface is byte swapping data being read */
5674 + val = be32_to_cpu(readl(pe[id].mem_access_rdata));
5675 +
5676 + return (val >> (offset << 3)) & mask;
5677 +}
5678 +
5679 +/* This function is used to write to CLASS internal bus peripherals (ccu,
5680 + * pe-lem) from the host
5681 + * through indirect access registers.
5682 + * @param[in] val value to write
5683 + * @param[in] addr Address to write to (must be aligned on size)
5684 + * @param[in] size Number of bytes to write (1, 2 or 4)
5685 + *
5686 + */
5687 +void class_bus_write(u32 val, u32 addr, u8 size)
5688 +{
5689 + u32 offset = addr & 0x3;
5690 +
5691 + writel((addr & CLASS_BUS_ACCESS_BASE_MASK), CLASS_BUS_ACCESS_BASE);
5692 +
5693 + addr = (addr & ~CLASS_BUS_ACCESS_BASE_MASK) | PE_MEM_ACCESS_WRITE |
5694 + (size << 24);
5695 +
5696 + writel(cpu_to_be32(val << (offset << 3)), CLASS_BUS_ACCESS_WDATA);
5697 + writel(addr, CLASS_BUS_ACCESS_ADDR);
5698 +}
5699 +
5700 +/* Reads from CLASS internal bus peripherals (ccu, pe-lem) from the host
5701 + * through indirect access registers.
5702 + * @param[in] addr Address to read from (must be aligned on size)
5703 + * @param[in] size Number of bytes to read (1, 2 or 4)
5704 + * @return the read data
5705 + *
5706 + */
5707 +u32 class_bus_read(u32 addr, u8 size)
5708 +{
5709 + u32 offset = addr & 0x3;
5710 + u32 mask = 0xffffffff >> ((4 - size) << 3);
5711 + u32 val;
5712 +
5713 + writel((addr & CLASS_BUS_ACCESS_BASE_MASK), CLASS_BUS_ACCESS_BASE);
5714 +
5715 + addr = (addr & ~CLASS_BUS_ACCESS_BASE_MASK) | (size << 24);
5716 +
5717 + writel(addr, CLASS_BUS_ACCESS_ADDR);
5718 + val = be32_to_cpu(readl(CLASS_BUS_ACCESS_RDATA));
5719 +
5720 + return (val >> (offset << 3)) & mask;
5721 +}
5722 +
5723 +/* Writes data to the cluster memory (PE_LMEM)
5724 + * @param[in] dst PE LMEM destination address (must be 32bit aligned)
5725 + * @param[in] src Buffer source address
5726 + * @param[in] len Number of bytes to copy
5727 + */
5728 +void class_pe_lmem_memcpy_to32(u32 dst, const void *src, unsigned int len)
5729 +{
5730 + u32 len32 = len >> 2;
5731 + int i;
5732 +
5733 + for (i = 0; i < len32; i++, src += 4, dst += 4)
5734 + class_bus_write(*(u32 *)src, dst, 4);
5735 +
5736 + if (len & 0x2) {
5737 + class_bus_write(*(u16 *)src, dst, 2);
5738 + src += 2;
5739 + dst += 2;
5740 + }
5741 +
5742 + if (len & 0x1) {
5743 + class_bus_write(*(u8 *)src, dst, 1);
5744 + src++;
5745 + dst++;
5746 + }
5747 +}
5748 +
5749 +/* Writes value to the cluster memory (PE_LMEM)
5750 + * @param[in] dst PE LMEM destination address (must be 32bit aligned)
5751 + * @param[in] val Value to write
5752 + * @param[in] len Number of bytes to write
5753 + */
5754 +void class_pe_lmem_memset(u32 dst, int val, unsigned int len)
5755 +{
5756 + u32 len32 = len >> 2;
5757 + int i;
5758 +
5759 + val = val | (val << 8) | (val << 16) | (val << 24);
5760 +
5761 + for (i = 0; i < len32; i++, dst += 4)
5762 + class_bus_write(val, dst, 4);
5763 +
5764 + if (len & 0x2) {
5765 + class_bus_write(val, dst, 2);
5766 + dst += 2;
5767 + }
5768 +
5769 + if (len & 0x1) {
5770 + class_bus_write(val, dst, 1);
5771 + dst++;
5772 + }
5773 +}
5774 +
5775 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5776 +
5777 +/* Writes UTIL program memory (DDR) from the host.
5778 + *
5779 + * @param[in] addr Address to write (virtual, must be aligned on size)
5780 + * @param[in] val Value to write (in PE endianness, i.e BE)
5781 + * @param[in] size Number of bytes to write (2 or 4)
5782 + */
5783 +static void util_pmem_write(u32 val, void *addr, u8 size)
5784 +{
5785 + void *addr64 = (void *)((unsigned long)addr & ~0x7);
5786 + unsigned long off = 8 - ((unsigned long)addr & 0x7) - size;
5787 +
5788 + /*
5789 + * IMEM should be loaded as a 64bit swapped value in a 64bit aligned
5790 + * location
5791 + */
5792 + if (size == 4)
5793 + writel(be32_to_cpu(val), addr64 + off);
5794 + else
5795 + writew(be16_to_cpu((u16)val), addr64 + off);
5796 +}
5797 +
5798 +/* Writes a buffer to UTIL program memory (DDR) from the host.
5799 + *
5800 + * @param[in] dst Address to write (virtual, must be at least 16bit
5801 + * aligned)
5802 + * @param[in] src Buffer to write (in PE endianness, i.e BE, must have
5803 + * same alignment as dst)
5804 + * @param[in] len Number of bytes to write (must be at least 16bit
5805 + * aligned)
5806 + */
5807 +static void util_pmem_memcpy(void *dst, const void *src, unsigned int len)
5808 +{
5809 + unsigned int len32;
5810 + int i;
5811 +
5812 + if ((unsigned long)src & 0x2) {
5813 + util_pmem_write(*(u16 *)src, dst, 2);
5814 + src += 2;
5815 + dst += 2;
5816 + len -= 2;
5817 + }
5818 +
5819 + len32 = len >> 2;
5820 +
5821 + for (i = 0; i < len32; i++, dst += 4, src += 4)
5822 + util_pmem_write(*(u32 *)src, dst, 4);
5823 +
5824 + if (len & 0x2)
5825 + util_pmem_write(*(u16 *)src, dst, len & 0x2);
5826 +}
5827 +#endif
5828 +
5829 +/* Loads an elf section into pmem
5830 + * Code needs to be at least 16bit aligned and only PROGBITS sections are
5831 + * supported
5832 + *
5833 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID, ...,
5834 + * TMU3_ID)
5835 + * @param[in] data pointer to the elf firmware
5836 + * @param[in] shdr pointer to the elf section header
5837 + *
5838 + */
5839 +static int pe_load_pmem_section(int id, const void *data,
5840 + struct elf32_shdr *shdr)
5841 +{
5842 + u32 offset = be32_to_cpu(shdr->sh_offset);
5843 + u32 addr = be32_to_cpu(shdr->sh_addr);
5844 + u32 size = be32_to_cpu(shdr->sh_size);
5845 + u32 type = be32_to_cpu(shdr->sh_type);
5846 +
5847 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5848 + if (id == UTIL_ID) {
5849 + pr_err("%s: unsupported pmem section for UTIL\n",
5850 + __func__);
5851 + return -EINVAL;
5852 + }
5853 +#endif
5854 +
5855 + if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
5856 + pr_err(
5857 + "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
5858 + , __func__, addr, (unsigned long)data + offset);
5859 +
5860 + return -EINVAL;
5861 + }
5862 +
5863 + if (addr & 0x1) {
5864 + pr_err("%s: load address(%x) is not 16bit aligned\n",
5865 + __func__, addr);
5866 + return -EINVAL;
5867 + }
5868 +
5869 + if (size & 0x1) {
5870 + pr_err("%s: load size(%x) is not 16bit aligned\n",
5871 + __func__, size);
5872 + return -EINVAL;
5873 + }
5874 +
5875 + switch (type) {
5876 + case SHT_PROGBITS:
5877 + pe_pmem_memcpy_to32(id, addr, data + offset, size);
5878 +
5879 + break;
5880 +
5881 + default:
5882 + pr_err("%s: unsupported section type(%x)\n", __func__,
5883 + type);
5884 + return -EINVAL;
5885 + }
5886 +
5887 + return 0;
5888 +}
5889 +
5890 +/* Loads an elf section into dmem
5891 + * Data needs to be at least 32bit aligned, NOBITS sections are correctly
5892 + * initialized to 0
5893 + *
5894 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5895 + * ..., UTIL_ID)
5896 + * @param[in] data pointer to the elf firmware
5897 + * @param[in] shdr pointer to the elf section header
5898 + *
5899 + */
5900 +static int pe_load_dmem_section(int id, const void *data,
5901 + struct elf32_shdr *shdr)
5902 +{
5903 + u32 offset = be32_to_cpu(shdr->sh_offset);
5904 + u32 addr = be32_to_cpu(shdr->sh_addr);
5905 + u32 size = be32_to_cpu(shdr->sh_size);
5906 + u32 type = be32_to_cpu(shdr->sh_type);
5907 + u32 size32 = size >> 2;
5908 + int i;
5909 +
5910 + if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
5911 + pr_err(
5912 + "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
5913 + __func__, addr, (unsigned long)data + offset);
5914 +
5915 + return -EINVAL;
5916 + }
5917 +
5918 + if (addr & 0x3) {
5919 + pr_err("%s: load address(%x) is not 32bit aligned\n",
5920 + __func__, addr);
5921 + return -EINVAL;
5922 + }
5923 +
5924 + switch (type) {
5925 + case SHT_PROGBITS:
5926 + pe_dmem_memcpy_to32(id, addr, data + offset, size);
5927 + break;
5928 +
5929 + case SHT_NOBITS:
5930 + for (i = 0; i < size32; i++, addr += 4)
5931 + pe_dmem_write(id, 0, addr, 4);
5932 +
5933 + if (size & 0x3)
5934 + pe_dmem_write(id, 0, addr, size & 0x3);
5935 +
5936 + break;
5937 +
5938 + default:
5939 + pr_err("%s: unsupported section type(%x)\n", __func__,
5940 + type);
5941 + return -EINVAL;
5942 + }
5943 +
5944 + return 0;
5945 +}
5946 +
5947 +/* Loads an elf section into DDR
5948 + * Data needs to be at least 32bit aligned, NOBITS sections are correctly
5949 + * initialized to 0
5950 + *
5951 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5952 + * ..., UTIL_ID)
5953 + * @param[in] data pointer to the elf firmware
5954 + * @param[in] shdr pointer to the elf section header
5955 + *
5956 + */
5957 +static int pe_load_ddr_section(int id, const void *data,
5958 + struct elf32_shdr *shdr,
5959 + struct device *dev) {
5960 + u32 offset = be32_to_cpu(shdr->sh_offset);
5961 + u32 addr = be32_to_cpu(shdr->sh_addr);
5962 + u32 size = be32_to_cpu(shdr->sh_size);
5963 + u32 type = be32_to_cpu(shdr->sh_type);
5964 + u32 flags = be32_to_cpu(shdr->sh_flags);
5965 +
5966 + switch (type) {
5967 + case SHT_PROGBITS:
5968 + if (flags & SHF_EXECINSTR) {
5969 + if (id <= CLASS_MAX_ID) {
5970 + /* DO the loading only once in DDR */
5971 + if (id == CLASS0_ID) {
5972 + pr_err(
5973 + "%s: load address(%x) and elf file address(%lx) rcvd\n",
5974 + __func__, addr,
5975 + (unsigned long)data + offset);
5976 + if (((unsigned long)(data + offset)
5977 + & 0x3) != (addr & 0x3)) {
5978 + pr_err(
5979 + "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
5980 + , __func__, addr,
5981 + (unsigned long)data + offset);
5982 +
5983 + return -EINVAL;
5984 + }
5985 +
5986 + if (addr & 0x1) {
5987 + pr_err(
5988 + "%s: load address(%x) is not 16bit aligned\n"
5989 + , __func__, addr);
5990 + return -EINVAL;
5991 + }
5992 +
5993 + if (size & 0x1) {
5994 + pr_err(
5995 + "%s: load length(%x) is not 16bit aligned\n"
5996 + , __func__, size);
5997 + return -EINVAL;
5998 + }
5999 + memcpy(DDR_PHYS_TO_VIRT(
6000 + DDR_PFE_TO_PHYS(addr)),
6001 + data + offset, size);
6002 + }
6003 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
6004 + } else if (id == UTIL_ID) {
6005 + if (((unsigned long)(data + offset) & 0x3)
6006 + != (addr & 0x3)) {
6007 + pr_err(
6008 + "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
6009 + , __func__, addr,
6010 + (unsigned long)data + offset);
6011 +
6012 + return -EINVAL;
6013 + }
6014 +
6015 + if (addr & 0x1) {
6016 + pr_err(
6017 + "%s: load address(%x) is not 16bit aligned\n"
6018 + , __func__, addr);
6019 + return -EINVAL;
6020 + }
6021 +
6022 + if (size & 0x1) {
6023 + pr_err(
6024 + "%s: load length(%x) is not 16bit aligned\n"
6025 + , __func__, size);
6026 + return -EINVAL;
6027 + }
6028 +
6029 + util_pmem_memcpy(DDR_PHYS_TO_VIRT(
6030 + DDR_PFE_TO_PHYS(addr)),
6031 + data + offset, size);
6032 + }
6033 +#endif
6034 + } else {
6035 + pr_err(
6036 + "%s: unsupported ddr section type(%x) for PE(%d)\n"
6037 + , __func__, type, id);
6038 + return -EINVAL;
6039 + }
6040 +
6041 + } else {
6042 + memcpy(DDR_PHYS_TO_VIRT(DDR_PFE_TO_PHYS(addr)), data
6043 + + offset, size);
6044 + }
6045 +
6046 + break;
6047 +
6048 + case SHT_NOBITS:
6049 + memset(DDR_PHYS_TO_VIRT(DDR_PFE_TO_PHYS(addr)), 0, size);
6050 +
6051 + break;
6052 +
6053 + default:
6054 + pr_err("%s: unsupported section type(%x)\n", __func__,
6055 + type);
6056 + return -EINVAL;
6057 + }
6058 +
6059 + return 0;
6060 +}
6061 +
6062 +/* Loads an elf section into pe lmem
6063 + * Data needs to be at least 32bit aligned, NOBITS sections are correctly
6064 + * initialized to 0
6065 + *
6066 + * @param[in] id PE identification (CLASS0_ID,..., CLASS5_ID)
6067 + * @param[in] data pointer to the elf firmware
6068 + * @param[in] shdr pointer to the elf section header
6069 + *
6070 + */
6071 +static int pe_load_pe_lmem_section(int id, const void *data,
6072 + struct elf32_shdr *shdr)
6073 +{
6074 + u32 offset = be32_to_cpu(shdr->sh_offset);
6075 + u32 addr = be32_to_cpu(shdr->sh_addr);
6076 + u32 size = be32_to_cpu(shdr->sh_size);
6077 + u32 type = be32_to_cpu(shdr->sh_type);
6078 +
6079 + if (id > CLASS_MAX_ID) {
6080 + pr_err(
6081 + "%s: unsupported pe-lmem section type(%x) for PE(%d)\n",
6082 + __func__, type, id);
6083 + return -EINVAL;
6084 + }
6085 +
6086 + if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
6087 + pr_err(
6088 + "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
6089 + __func__, addr, (unsigned long)data + offset);
6090 +
6091 + return -EINVAL;
6092 + }
6093 +
6094 + if (addr & 0x3) {
6095 + pr_err("%s: load address(%x) is not 32bit aligned\n",
6096 + __func__, addr);
6097 + return -EINVAL;
6098 + }
6099 +
6100 + switch (type) {
6101 + case SHT_PROGBITS:
6102 + class_pe_lmem_memcpy_to32(addr, data + offset, size);
6103 + break;
6104 +
6105 + case SHT_NOBITS:
6106 + class_pe_lmem_memset(addr, 0, size);
6107 + break;
6108 +
6109 + default:
6110 + pr_err("%s: unsupported section type(%x)\n", __func__,
6111 + type);
6112 + return -EINVAL;
6113 + }
6114 +
6115 + return 0;
6116 +}
6117 +
6118 +/* Loads an elf section into a PE
6119 + * For now only supports loading a section to dmem (all PE's), pmem (class and
6120 + * tmu PE's),
6121 + * DDDR (util PE code)
6122 + *
6123 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
6124 + * ..., UTIL_ID)
6125 + * @param[in] data pointer to the elf firmware
6126 + * @param[in] shdr pointer to the elf section header
6127 + *
6128 + */
6129 +int pe_load_elf_section(int id, const void *data, struct elf32_shdr *shdr,
6130 + struct device *dev) {
6131 + u32 addr = be32_to_cpu(shdr->sh_addr);
6132 + u32 size = be32_to_cpu(shdr->sh_size);
6133 +
6134 + if (IS_DMEM(addr, size))
6135 + return pe_load_dmem_section(id, data, shdr);
6136 + else if (IS_PMEM(addr, size))
6137 + return pe_load_pmem_section(id, data, shdr);
6138 + else if (IS_PFE_LMEM(addr, size))
6139 + return 0;
6140 + else if (IS_PHYS_DDR(addr, size))
6141 + return pe_load_ddr_section(id, data, shdr, dev);
6142 + else if (IS_PE_LMEM(addr, size))
6143 + return pe_load_pe_lmem_section(id, data, shdr);
6144 +
6145 + pr_err("%s: unsupported memory range(%x)\n", __func__,
6146 + addr);
6147 + return 0;
6148 +}
6149 +
6150 +/**************************** BMU ***************************/
6151 +
6152 +/* Initializes a BMU block.
6153 + * @param[in] base BMU block base address
6154 + * @param[in] cfg BMU configuration
6155 + */
6156 +void bmu_init(void *base, struct BMU_CFG *cfg)
6157 +{
6158 + bmu_disable(base);
6159 +
6160 + bmu_set_config(base, cfg);
6161 +
6162 + bmu_reset(base);
6163 +}
6164 +
6165 +/* Resets a BMU block.
6166 + * @param[in] base BMU block base address
6167 + */
6168 +void bmu_reset(void *base)
6169 +{
6170 + writel(CORE_SW_RESET, base + BMU_CTRL);
6171 +
6172 + /* Wait for self clear */
6173 + while (readl(base + BMU_CTRL) & CORE_SW_RESET)
6174 + ;
6175 +}
6176 +
6177 +/* Enabled a BMU block.
6178 + * @param[in] base BMU block base address
6179 + */
6180 +void bmu_enable(void *base)
6181 +{
6182 + writel(CORE_ENABLE, base + BMU_CTRL);
6183 +}
6184 +
6185 +/* Disables a BMU block.
6186 + * @param[in] base BMU block base address
6187 + */
6188 +void bmu_disable(void *base)
6189 +{
6190 + writel(CORE_DISABLE, base + BMU_CTRL);
6191 +}
6192 +
6193 +/* Sets the configuration of a BMU block.
6194 + * @param[in] base BMU block base address
6195 + * @param[in] cfg BMU configuration
6196 + */
6197 +void bmu_set_config(void *base, struct BMU_CFG *cfg)
6198 +{
6199 + writel(cfg->baseaddr, base + BMU_UCAST_BASE_ADDR);
6200 + writel(cfg->count & 0xffff, base + BMU_UCAST_CONFIG);
6201 + writel(cfg->size & 0xffff, base + BMU_BUF_SIZE);
6202 +
6203 + /* Interrupts are never used */
6204 + writel(cfg->low_watermark, base + BMU_LOW_WATERMARK);
6205 + writel(cfg->high_watermark, base + BMU_HIGH_WATERMARK);
6206 + writel(0x0, base + BMU_INT_ENABLE);
6207 +}
6208 +
6209 +/**************************** MTIP GEMAC ***************************/
6210 +
6211 +/* Enable Rx Checksum Engine. With this enabled, Frame with bad IP,
6212 + * TCP or UDP checksums are discarded
6213 + *
6214 + * @param[in] base GEMAC base address.
6215 + */
6216 +void gemac_enable_rx_checksum_offload(void *base)
6217 +{
6218 + /*Do not find configuration to do this */
6219 +}
6220 +
6221 +/* Disable Rx Checksum Engine.
6222 + *
6223 + * @param[in] base GEMAC base address.
6224 + */
6225 +void gemac_disable_rx_checksum_offload(void *base)
6226 +{
6227 + /*Do not find configuration to do this */
6228 +}
6229 +
6230 +/* GEMAC set speed.
6231 + * @param[in] base GEMAC base address
6232 + * @param[in] speed GEMAC speed (10, 100 or 1000 Mbps)
6233 + */
6234 +void gemac_set_speed(void *base, enum mac_speed gem_speed)
6235 +{
6236 + u32 ecr = readl(base + EMAC_ECNTRL_REG) & ~EMAC_ECNTRL_SPEED;
6237 + u32 rcr = readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_RMII_10T;
6238 +
6239 + switch (gem_speed) {
6240 + case SPEED_10M:
6241 + rcr |= EMAC_RCNTRL_RMII_10T;
6242 + break;
6243 +
6244 + case SPEED_1000M:
6245 + ecr |= EMAC_ECNTRL_SPEED;
6246 + break;
6247 +
6248 + case SPEED_100M:
6249 + default:
6250 + /*It is in 100M mode */
6251 + break;
6252 + }
6253 + writel(ecr, (base + EMAC_ECNTRL_REG));
6254 + writel(rcr, (base + EMAC_RCNTRL_REG));
6255 +}
6256 +
6257 +/* GEMAC set duplex.
6258 + * @param[in] base GEMAC base address
6259 + * @param[in] duplex GEMAC duplex mode (Full, Half)
6260 + */
6261 +void gemac_set_duplex(void *base, int duplex)
6262 +{
6263 + if (duplex == DUPLEX_HALF) {
6264 + writel(readl(base + EMAC_TCNTRL_REG) & ~EMAC_TCNTRL_FDEN, base
6265 + + EMAC_TCNTRL_REG);
6266 + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_DRT, (base
6267 + + EMAC_RCNTRL_REG));
6268 + } else{
6269 + writel(readl(base + EMAC_TCNTRL_REG) | EMAC_TCNTRL_FDEN, base
6270 + + EMAC_TCNTRL_REG);
6271 + writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_DRT, (base
6272 + + EMAC_RCNTRL_REG));
6273 + }
6274 +}
6275 +
6276 +/* GEMAC set mode.
6277 + * @param[in] base GEMAC base address
6278 + * @param[in] mode GEMAC operation mode (MII, RMII, RGMII, SGMII)
6279 + */
6280 +void gemac_set_mode(void *base, int mode)
6281 +{
6282 + u32 val = readl(base + EMAC_RCNTRL_REG);
6283 +
6284 + /*Remove loopbank*/
6285 + val &= ~EMAC_RCNTRL_LOOP;
6286 +
6287 + /* Enable flow control and MII mode and terminate received CRC */
6288 + val |= (EMAC_RCNTRL_FCE | EMAC_RCNTRL_MII_MODE | EMAC_RCNTRL_CRC_FWD);
6289 +
6290 + writel(val, base + EMAC_RCNTRL_REG);
6291 +}
6292 +
6293 +/* GEMAC enable function.
6294 + * @param[in] base GEMAC base address
6295 + */
6296 +void gemac_enable(void *base)
6297 +{
6298 + writel(readl(base + EMAC_ECNTRL_REG) | EMAC_ECNTRL_ETHER_EN, base +
6299 + EMAC_ECNTRL_REG);
6300 +}
6301 +
6302 +/* GEMAC disable function.
6303 + * @param[in] base GEMAC base address
6304 + */
6305 +void gemac_disable(void *base)
6306 +{
6307 + writel(readl(base + EMAC_ECNTRL_REG) & ~EMAC_ECNTRL_ETHER_EN, base +
6308 + EMAC_ECNTRL_REG);
6309 +}
6310 +
6311 +/* GEMAC TX disable function.
6312 + * @param[in] base GEMAC base address
6313 + */
6314 +void gemac_tx_disable(void *base)
6315 +{
6316 + writel(readl(base + EMAC_TCNTRL_REG) | EMAC_TCNTRL_GTS, base +
6317 + EMAC_TCNTRL_REG);
6318 +}
6319 +
6320 +void gemac_tx_enable(void *base)
6321 +{
6322 + writel(readl(base + EMAC_TCNTRL_REG) & ~EMAC_TCNTRL_GTS, base +
6323 + EMAC_TCNTRL_REG);
6324 +}
6325 +
6326 +/* Sets the hash register of the MAC.
6327 + * This register is used for matching unicast and multicast frames.
6328 + *
6329 + * @param[in] base GEMAC base address.
6330 + * @param[in] hash 64-bit hash to be configured.
6331 + */
6332 +void gemac_set_hash(void *base, struct pfe_mac_addr *hash)
6333 +{
6334 + writel(hash->bottom, base + EMAC_GALR);
6335 + writel(hash->top, base + EMAC_GAUR);
6336 +}
6337 +
6338 +void gemac_set_laddrN(void *base, struct pfe_mac_addr *address,
6339 + unsigned int entry_index)
6340 +{
6341 + if ((entry_index < 1) || (entry_index > EMAC_SPEC_ADDR_MAX))
6342 + return;
6343 +
6344 + entry_index = entry_index - 1;
6345 + if (entry_index < 1) {
6346 + writel(htonl(address->bottom), base + EMAC_PHY_ADDR_LOW);
6347 + writel((htonl(address->top) | 0x8808), base +
6348 + EMAC_PHY_ADDR_HIGH);
6349 + } else {
6350 + writel(htonl(address->bottom), base + ((entry_index - 1) * 8)
6351 + + EMAC_SMAC_0_0);
6352 + writel((htonl(address->top) | 0x8808), base + ((entry_index -
6353 + 1) * 8) + EMAC_SMAC_0_1);
6354 + }
6355 +}
6356 +
6357 +void gemac_clear_laddrN(void *base, unsigned int entry_index)
6358 +{
6359 + if ((entry_index < 1) || (entry_index > EMAC_SPEC_ADDR_MAX))
6360 + return;
6361 +
6362 + entry_index = entry_index - 1;
6363 + if (entry_index < 1) {
6364 + writel(0, base + EMAC_PHY_ADDR_LOW);
6365 + writel(0, base + EMAC_PHY_ADDR_HIGH);
6366 + } else {
6367 + writel(0, base + ((entry_index - 1) * 8) + EMAC_SMAC_0_0);
6368 + writel(0, base + ((entry_index - 1) * 8) + EMAC_SMAC_0_1);
6369 + }
6370 +}
6371 +
6372 +/* Set the loopback mode of the MAC. This can be either no loopback for
6373 + * normal operation, local loopback through MAC internal loopback module or PHY
6374 + * loopback for external loopback through a PHY. This asserts the external
6375 + * loop pin.
6376 + *
6377 + * @param[in] base GEMAC base address.
6378 + * @param[in] gem_loop Loopback mode to be enabled. LB_LOCAL - MAC
6379 + * Loopback,
6380 + * LB_EXT - PHY Loopback.
6381 + */
6382 +void gemac_set_loop(void *base, enum mac_loop gem_loop)
6383 +{
6384 + pr_info("%s()\n", __func__);
6385 + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_LOOP, (base +
6386 + EMAC_RCNTRL_REG));
6387 +}
6388 +
6389 +/* GEMAC allow frames
6390 + * @param[in] base GEMAC base address
6391 + */
6392 +void gemac_enable_copy_all(void *base)
6393 +{
6394 + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_PROM, (base +
6395 + EMAC_RCNTRL_REG));
6396 +}
6397 +
6398 +/* GEMAC do not allow frames
6399 + * @param[in] base GEMAC base address
6400 + */
6401 +void gemac_disable_copy_all(void *base)
6402 +{
6403 + writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_PROM, (base +
6404 + EMAC_RCNTRL_REG));
6405 +}
6406 +
6407 +/* GEMAC allow broadcast function.
6408 + * @param[in] base GEMAC base address
6409 + */
6410 +void gemac_allow_broadcast(void *base)
6411 +{
6412 + writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_BC_REJ, base +
6413 + EMAC_RCNTRL_REG);
6414 +}
6415 +
6416 +/* GEMAC no broadcast function.
6417 + * @param[in] base GEMAC base address
6418 + */
6419 +void gemac_no_broadcast(void *base)
6420 +{
6421 + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_BC_REJ, base +
6422 + EMAC_RCNTRL_REG);
6423 +}
6424 +
6425 +/* GEMAC enable 1536 rx function.
6426 + * @param[in] base GEMAC base address
6427 + */
6428 +void gemac_enable_1536_rx(void *base)
6429 +{
6430 + /* Set 1536 as Maximum frame length */
6431 + writel((readl(base + EMAC_RCNTRL_REG) & PFE_RCR_MAX_FL_MASK)
6432 + | (1536 << 16), base + EMAC_RCNTRL_REG);
6433 +}
6434 +
6435 +/* GEMAC set rx Max frame length.
6436 + * @param[in] base GEMAC base address
6437 + * @param[in] mtu new mtu
6438 + */
6439 +void gemac_set_rx_max_fl(void *base, int mtu)
6440 +{
6441 + /* Set mtu as Maximum frame length */
6442 + writel((readl(base + EMAC_RCNTRL_REG) & PFE_RCR_MAX_FL_MASK)
6443 + | (mtu << 16), base + EMAC_RCNTRL_REG);
6444 +}
6445 +
6446 +/* GEMAC enable stacked vlan function.
6447 + * @param[in] base GEMAC base address
6448 + */
6449 +void gemac_enable_stacked_vlan(void *base)
6450 +{
6451 + /* MTIP doesn't support stacked vlan */
6452 +}
6453 +
6454 +/* GEMAC enable pause rx function.
6455 + * @param[in] base GEMAC base address
6456 + */
6457 +void gemac_enable_pause_rx(void *base)
6458 +{
6459 + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_FCE,
6460 + base + EMAC_RCNTRL_REG);
6461 +}
6462 +
6463 +/* GEMAC disable pause rx function.
6464 + * @param[in] base GEMAC base address
6465 + */
6466 +void gemac_disable_pause_rx(void *base)
6467 +{
6468 + writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_FCE,
6469 + base + EMAC_RCNTRL_REG);
6470 +}
6471 +
6472 +/* GEMAC enable pause tx function.
6473 + * @param[in] base GEMAC base address
6474 + */
6475 +void gemac_enable_pause_tx(void *base)
6476 +{
6477 + writel(EMAC_RX_SECTION_EMPTY_V, base + EMAC_RX_SECTION_EMPTY);
6478 +}
6479 +
6480 +/* GEMAC disable pause tx function.
6481 + * @param[in] base GEMAC base address
6482 + */
6483 +void gemac_disable_pause_tx(void *base)
6484 +{
6485 + writel(0x0, base + EMAC_RX_SECTION_EMPTY);
6486 +}
6487 +
6488 +/* GEMAC wol configuration
6489 + * @param[in] base GEMAC base address
6490 + * @param[in] wol_conf WoL register configuration
6491 + */
6492 +void gemac_set_wol(void *base, u32 wol_conf)
6493 +{
6494 + u32 val = readl(base + EMAC_ECNTRL_REG);
6495 +
6496 + if (wol_conf)
6497 + val |= (EMAC_ECNTRL_MAGIC_ENA | EMAC_ECNTRL_SLEEP);
6498 + else
6499 + val &= ~(EMAC_ECNTRL_MAGIC_ENA | EMAC_ECNTRL_SLEEP);
6500 + writel(val, base + EMAC_ECNTRL_REG);
6501 +}
6502 +
6503 +/* Sets Gemac bus width to 64bit
6504 + * @param[in] base GEMAC base address
6505 + * @param[in] width gemac bus width to be set possible values are 32/64/128
6506 + */
6507 +void gemac_set_bus_width(void *base, int width)
6508 +{
6509 +}
6510 +
6511 +/* Sets Gemac configuration.
6512 + * @param[in] base GEMAC base address
6513 + * @param[in] cfg GEMAC configuration
6514 + */
6515 +void gemac_set_config(void *base, struct gemac_cfg *cfg)
6516 +{
6517 + /*GEMAC config taken from VLSI */
6518 + writel(0x00000004, base + EMAC_TFWR_STR_FWD);
6519 + writel(0x00000005, base + EMAC_RX_SECTION_FULL);
6520 +
6521 + if (pfe_errata_a010897)
6522 + writel(0x0000076c, base + EMAC_TRUNC_FL);
6523 + else
6524 + writel(0x00003fff, base + EMAC_TRUNC_FL);
6525 +
6526 + writel(0x00000030, base + EMAC_TX_SECTION_EMPTY);
6527 + writel(0x00000000, base + EMAC_MIB_CTRL_STS_REG);
6528 +
6529 + gemac_set_mode(base, cfg->mode);
6530 +
6531 + gemac_set_speed(base, cfg->speed);
6532 +
6533 + gemac_set_duplex(base, cfg->duplex);
6534 +}
6535 +
6536 +/**************************** GPI ***************************/
6537 +
6538 +/* Initializes a GPI block.
6539 + * @param[in] base GPI base address
6540 + * @param[in] cfg GPI configuration
6541 + */
6542 +void gpi_init(void *base, struct gpi_cfg *cfg)
6543 +{
6544 + gpi_reset(base);
6545 +
6546 + gpi_disable(base);
6547 +
6548 + gpi_set_config(base, cfg);
6549 +}
6550 +
6551 +/* Resets a GPI block.
6552 + * @param[in] base GPI base address
6553 + */
6554 +void gpi_reset(void *base)
6555 +{
6556 + writel(CORE_SW_RESET, base + GPI_CTRL);
6557 +}
6558 +
6559 +/* Enables a GPI block.
6560 + * @param[in] base GPI base address
6561 + */
6562 +void gpi_enable(void *base)
6563 +{
6564 + writel(CORE_ENABLE, base + GPI_CTRL);
6565 +}
6566 +
6567 +/* Disables a GPI block.
6568 + * @param[in] base GPI base address
6569 + */
6570 +void gpi_disable(void *base)
6571 +{
6572 + writel(CORE_DISABLE, base + GPI_CTRL);
6573 +}
6574 +
6575 +/* Sets the configuration of a GPI block.
6576 + * @param[in] base GPI base address
6577 + * @param[in] cfg GPI configuration
6578 + */
6579 +void gpi_set_config(void *base, struct gpi_cfg *cfg)
6580 +{
6581 + writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_ALLOC_CTRL), base
6582 + + GPI_LMEM_ALLOC_ADDR);
6583 + writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_FREE_CTRL), base
6584 + + GPI_LMEM_FREE_ADDR);
6585 + writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_ALLOC_CTRL), base
6586 + + GPI_DDR_ALLOC_ADDR);
6587 + writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL), base
6588 + + GPI_DDR_FREE_ADDR);
6589 + writel(CBUS_VIRT_TO_PFE(CLASS_INQ_PKTPTR), base + GPI_CLASS_ADDR);
6590 + writel(DDR_HDR_SIZE, base + GPI_DDR_DATA_OFFSET);
6591 + writel(LMEM_HDR_SIZE, base + GPI_LMEM_DATA_OFFSET);
6592 + writel(0, base + GPI_LMEM_SEC_BUF_DATA_OFFSET);
6593 + writel(0, base + GPI_DDR_SEC_BUF_DATA_OFFSET);
6594 + writel((DDR_HDR_SIZE << 16) | LMEM_HDR_SIZE, base + GPI_HDR_SIZE);
6595 + writel((DDR_BUF_SIZE << 16) | LMEM_BUF_SIZE, base + GPI_BUF_SIZE);
6596 +
6597 + writel(((cfg->lmem_rtry_cnt << 16) | (GPI_DDR_BUF_EN << 1) |
6598 + GPI_LMEM_BUF_EN), base + GPI_RX_CONFIG);
6599 + writel(cfg->tmlf_txthres, base + GPI_TMLF_TX);
6600 + writel(cfg->aseq_len, base + GPI_DTX_ASEQ);
6601 + writel(1, base + GPI_TOE_CHKSUM_EN);
6602 +
6603 + if (cfg->mtip_pause_reg) {
6604 + writel(cfg->mtip_pause_reg, base + GPI_CSR_MTIP_PAUSE_REG);
6605 + writel(EGPI_PAUSE_TIME, base + GPI_TX_PAUSE_TIME);
6606 + }
6607 +}
6608 +
6609 +/**************************** CLASSIFIER ***************************/
6610 +
6611 +/* Initializes CLASSIFIER block.
6612 + * @param[in] cfg CLASSIFIER configuration
6613 + */
6614 +void class_init(struct class_cfg *cfg)
6615 +{
6616 + class_reset();
6617 +
6618 + class_disable();
6619 +
6620 + class_set_config(cfg);
6621 +}
6622 +
6623 +/* Resets CLASSIFIER block.
6624 + *
6625 + */
6626 +void class_reset(void)
6627 +{
6628 + writel(CORE_SW_RESET, CLASS_TX_CTRL);
6629 +}
6630 +
6631 +/* Enables all CLASS-PE's cores.
6632 + *
6633 + */
6634 +void class_enable(void)
6635 +{
6636 + writel(CORE_ENABLE, CLASS_TX_CTRL);
6637 +}
6638 +
6639 +/* Disables all CLASS-PE's cores.
6640 + *
6641 + */
6642 +void class_disable(void)
6643 +{
6644 + writel(CORE_DISABLE, CLASS_TX_CTRL);
6645 +}
6646 +
6647 +/*
6648 + * Sets the configuration of the CLASSIFIER block.
6649 + * @param[in] cfg CLASSIFIER configuration
6650 + */
6651 +void class_set_config(struct class_cfg *cfg)
6652 +{
6653 + u32 val;
6654 +
6655 + /* Initialize route table */
6656 + if (!cfg->resume)
6657 + memset(DDR_PHYS_TO_VIRT(cfg->route_table_baseaddr), 0, (1 <<
6658 + cfg->route_table_hash_bits) * CLASS_ROUTE_SIZE);
6659 +
6660 +#if !defined(LS1012A_PFE_RESET_WA)
6661 + writel(cfg->pe_sys_clk_ratio, CLASS_PE_SYS_CLK_RATIO);
6662 +#endif
6663 +
6664 + writel((DDR_HDR_SIZE << 16) | LMEM_HDR_SIZE, CLASS_HDR_SIZE);
6665 + writel(LMEM_BUF_SIZE, CLASS_LMEM_BUF_SIZE);
6666 + writel(CLASS_ROUTE_ENTRY_SIZE(CLASS_ROUTE_SIZE) |
6667 + CLASS_ROUTE_HASH_SIZE(cfg->route_table_hash_bits),
6668 + CLASS_ROUTE_HASH_ENTRY_SIZE);
6669 + writel(HIF_PKT_CLASS_EN | HIF_PKT_OFFSET(sizeof(struct hif_hdr)),
6670 + CLASS_HIF_PARSE);
6671 +
6672 + val = HASH_CRC_PORT_IP | QB2BUS_LE;
6673 +
6674 +#if defined(CONFIG_IP_ALIGNED)
6675 + val |= IP_ALIGNED;
6676 +#endif
6677 +
6678 + /*
6679 + * Class PE packet steering will only work if TOE mode, bridge fetch or
6680 + * route fetch are enabled (see class/qb_fet.v). Route fetch would
6681 + * trigger additional memory copies (likely from DDR because of hash
6682 + * table size, which cannot be reduced because PE software still
6683 + * relies on hash value computed in HW), so when not in TOE mode we
6684 + * simply enable HW bridge fetch even though we don't use it.
6685 + */
6686 + if (cfg->toe_mode)
6687 + val |= CLASS_TOE;
6688 + else
6689 + val |= HW_BRIDGE_FETCH;
6690 +
6691 + writel(val, CLASS_ROUTE_MULTI);
6692 +
6693 + writel(DDR_PHYS_TO_PFE(cfg->route_table_baseaddr),
6694 + CLASS_ROUTE_TABLE_BASE);
6695 + writel(CLASS_PE0_RO_DM_ADDR0_VAL, CLASS_PE0_RO_DM_ADDR0);
6696 + writel(CLASS_PE0_RO_DM_ADDR1_VAL, CLASS_PE0_RO_DM_ADDR1);
6697 + writel(CLASS_PE0_QB_DM_ADDR0_VAL, CLASS_PE0_QB_DM_ADDR0);
6698 + writel(CLASS_PE0_QB_DM_ADDR1_VAL, CLASS_PE0_QB_DM_ADDR1);
6699 + writel(CBUS_VIRT_TO_PFE(TMU_PHY_INQ_PKTPTR), CLASS_TM_INQ_ADDR);
6700 +
6701 + writel(23, CLASS_AFULL_THRES);
6702 + writel(23, CLASS_TSQ_FIFO_THRES);
6703 +
6704 + writel(24, CLASS_MAX_BUF_CNT);
6705 + writel(24, CLASS_TSQ_MAX_CNT);
6706 +}
6707 +
6708 +/**************************** TMU ***************************/
6709 +
6710 +void tmu_reset(void)
6711 +{
6712 + writel(SW_RESET, TMU_CTRL);
6713 +}
6714 +
6715 +/* Initializes TMU block.
6716 + * @param[in] cfg TMU configuration
6717 + */
6718 +void tmu_init(struct tmu_cfg *cfg)
6719 +{
6720 + int q, phyno;
6721 +
6722 + tmu_disable(0xF);
6723 + mdelay(10);
6724 +
6725 +#if !defined(LS1012A_PFE_RESET_WA)
6726 + /* keep in soft reset */
6727 + writel(SW_RESET, TMU_CTRL);
6728 +#endif
6729 + writel(0x3, TMU_SYS_GENERIC_CONTROL);
6730 + writel(750, TMU_INQ_WATERMARK);
6731 + writel(CBUS_VIRT_TO_PFE(EGPI1_BASE_ADDR +
6732 + GPI_INQ_PKTPTR), TMU_PHY0_INQ_ADDR);
6733 + writel(CBUS_VIRT_TO_PFE(EGPI2_BASE_ADDR +
6734 + GPI_INQ_PKTPTR), TMU_PHY1_INQ_ADDR);
6735 + writel(CBUS_VIRT_TO_PFE(HGPI_BASE_ADDR +
6736 + GPI_INQ_PKTPTR), TMU_PHY3_INQ_ADDR);
6737 + writel(CBUS_VIRT_TO_PFE(HIF_NOCPY_RX_INQ0_PKTPTR), TMU_PHY4_INQ_ADDR);
6738 + writel(CBUS_VIRT_TO_PFE(UTIL_INQ_PKTPTR), TMU_PHY5_INQ_ADDR);
6739 + writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL),
6740 + TMU_BMU_INQ_ADDR);
6741 +
6742 + writel(0x3FF, TMU_TDQ0_SCH_CTRL); /*
6743 + * enabling all 10
6744 + * schedulers [9:0] of each TDQ
6745 + */
6746 + writel(0x3FF, TMU_TDQ1_SCH_CTRL);
6747 + writel(0x3FF, TMU_TDQ3_SCH_CTRL);
6748 +
6749 +#if !defined(LS1012A_PFE_RESET_WA)
6750 + writel(cfg->pe_sys_clk_ratio, TMU_PE_SYS_CLK_RATIO);
6751 +#endif
6752 +
6753 +#if !defined(LS1012A_PFE_RESET_WA)
6754 + writel(DDR_PHYS_TO_PFE(cfg->llm_base_addr), TMU_LLM_BASE_ADDR);
6755 + /* Extra packet pointers will be stored from this address onwards */
6756 +
6757 + writel(cfg->llm_queue_len, TMU_LLM_QUE_LEN);
6758 + writel(5, TMU_TDQ_IIFG_CFG);
6759 + writel(DDR_BUF_SIZE, TMU_BMU_BUF_SIZE);
6760 +
6761 + writel(0x0, TMU_CTRL);
6762 +
6763 + /* MEM init */
6764 + pr_info("%s: mem init\n", __func__);
6765 + writel(MEM_INIT, TMU_CTRL);
6766 +
6767 + while (!(readl(TMU_CTRL) & MEM_INIT_DONE))
6768 + ;
6769 +
6770 + /* LLM init */
6771 + pr_info("%s: lmem init\n", __func__);
6772 + writel(LLM_INIT, TMU_CTRL);
6773 +
6774 + while (!(readl(TMU_CTRL) & LLM_INIT_DONE))
6775 + ;
6776 +#endif
6777 + /* set up each queue for tail drop */
6778 + for (phyno = 0; phyno < 4; phyno++) {
6779 + if (phyno == 2)
6780 + continue;
6781 + for (q = 0; q < 16; q++) {
6782 + u32 qdepth;
6783 +
6784 + writel((phyno << 8) | q, TMU_TEQ_CTRL);
6785 + writel(1 << 22, TMU_TEQ_QCFG); /*Enable tail drop */
6786 +
6787 + if (phyno == 3)
6788 + qdepth = DEFAULT_TMU3_QDEPTH;
6789 + else
6790 + qdepth = (q == 0) ? DEFAULT_Q0_QDEPTH :
6791 + DEFAULT_MAX_QDEPTH;
6792 +
6793 + /* LOG: 68855 */
6794 + /*
6795 + * The following is a workaround for the reordered
6796 + * packet and BMU2 buffer leakage issue.
6797 + */
6798 + if (CHIP_REVISION() == 0)
6799 + qdepth = 31;
6800 +
6801 + writel(qdepth << 18, TMU_TEQ_HW_PROB_CFG2);
6802 + writel(qdepth >> 14, TMU_TEQ_HW_PROB_CFG3);
6803 + }
6804 + }
6805 +
6806 +#ifdef CFG_LRO
6807 + /* Set TMU-3 queue 5 (LRO) in no-drop mode */
6808 + writel((3 << 8) | TMU_QUEUE_LRO, TMU_TEQ_CTRL);
6809 + writel(0, TMU_TEQ_QCFG);
6810 +#endif
6811 +
6812 + writel(0x05, TMU_TEQ_DISABLE_DROPCHK);
6813 +
6814 + writel(0x0, TMU_CTRL);
6815 +}
6816 +
6817 +/* Enables TMU-PE cores.
6818 + * @param[in] pe_mask TMU PE mask
6819 + */
6820 +void tmu_enable(u32 pe_mask)
6821 +{
6822 + writel(readl(TMU_TX_CTRL) | (pe_mask & 0xF), TMU_TX_CTRL);
6823 +}
6824 +
6825 +/* Disables TMU cores.
6826 + * @param[in] pe_mask TMU PE mask
6827 + */
6828 +void tmu_disable(u32 pe_mask)
6829 +{
6830 + writel(readl(TMU_TX_CTRL) & ~(pe_mask & 0xF), TMU_TX_CTRL);
6831 +}
6832 +
6833 +/* This will return the tmu queue status
6834 + * @param[in] if_id gem interface id or TMU index
6835 + * @return returns the bit mask of busy queues, zero means all
6836 + * queues are empty
6837 + */
6838 +u32 tmu_qstatus(u32 if_id)
6839 +{
6840 + return cpu_to_be32(pe_dmem_read(TMU0_ID + if_id, TMU_DM_PESTATUS +
6841 + offsetof(struct pe_status, tmu_qstatus), 4));
6842 +}
6843 +
6844 +u32 tmu_pkts_processed(u32 if_id)
6845 +{
6846 + return cpu_to_be32(pe_dmem_read(TMU0_ID + if_id, TMU_DM_PESTATUS +
6847 + offsetof(struct pe_status, rx), 4));
6848 +}
6849 +
6850 +/**************************** UTIL ***************************/
6851 +
6852 +/* Resets UTIL block.
6853 + */
6854 +void util_reset(void)
6855 +{
6856 + writel(CORE_SW_RESET, UTIL_TX_CTRL);
6857 +}
6858 +
6859 +/* Initializes UTIL block.
6860 + * @param[in] cfg UTIL configuration
6861 + */
6862 +void util_init(struct util_cfg *cfg)
6863 +{
6864 + writel(cfg->pe_sys_clk_ratio, UTIL_PE_SYS_CLK_RATIO);
6865 +}
6866 +
6867 +/* Enables UTIL-PE core.
6868 + *
6869 + */
6870 +void util_enable(void)
6871 +{
6872 + writel(CORE_ENABLE, UTIL_TX_CTRL);
6873 +}
6874 +
6875 +/* Disables UTIL-PE core.
6876 + *
6877 + */
6878 +void util_disable(void)
6879 +{
6880 + writel(CORE_DISABLE, UTIL_TX_CTRL);
6881 +}
6882 +
6883 +/**************************** HIF ***************************/
6884 +/* Initializes HIF copy block.
6885 + *
6886 + */
6887 +void hif_init(void)
6888 +{
6889 + /*Initialize HIF registers*/
6890 + writel((HIF_RX_POLL_CTRL_CYCLE << 16) | HIF_TX_POLL_CTRL_CYCLE,
6891 + HIF_POLL_CTRL);
6892 +}
6893 +
6894 +/* Enable hif tx DMA and interrupt
6895 + *
6896 + */
6897 +void hif_tx_enable(void)
6898 +{
6899 + writel(HIF_CTRL_DMA_EN, HIF_TX_CTRL);
6900 + writel((readl(HIF_INT_ENABLE) | HIF_INT_EN | HIF_TXPKT_INT_EN),
6901 + HIF_INT_ENABLE);
6902 +}
6903 +
6904 +/* Disable hif tx DMA and interrupt
6905 + *
6906 + */
6907 +void hif_tx_disable(void)
6908 +{
6909 + u32 hif_int;
6910 +
6911 + writel(0, HIF_TX_CTRL);
6912 +
6913 + hif_int = readl(HIF_INT_ENABLE);
6914 + hif_int &= HIF_TXPKT_INT_EN;
6915 + writel(hif_int, HIF_INT_ENABLE);
6916 +}
6917 +
6918 +/* Enable hif rx DMA and interrupt
6919 + *
6920 + */
6921 +void hif_rx_enable(void)
6922 +{
6923 + hif_rx_dma_start();
6924 + writel((readl(HIF_INT_ENABLE) | HIF_INT_EN | HIF_RXPKT_INT_EN),
6925 + HIF_INT_ENABLE);
6926 +}
6927 +
6928 +/* Disable hif rx DMA and interrupt
6929 + *
6930 + */
6931 +void hif_rx_disable(void)
6932 +{
6933 + u32 hif_int;
6934 +
6935 + writel(0, HIF_RX_CTRL);
6936 +
6937 + hif_int = readl(HIF_INT_ENABLE);
6938 + hif_int &= HIF_RXPKT_INT_EN;
6939 + writel(hif_int, HIF_INT_ENABLE);
6940 +}
6941 --- /dev/null
6942 +++ b/drivers/staging/fsl_ppfe/pfe_hif.c
6943 @@ -0,0 +1,1072 @@
6944 +/*
6945 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
6946 + * Copyright 2017 NXP
6947 + *
6948 + * This program is free software; you can redistribute it and/or modify
6949 + * it under the terms of the GNU General Public License as published by
6950 + * the Free Software Foundation; either version 2 of the License, or
6951 + * (at your option) any later version.
6952 + *
6953 + * This program is distributed in the hope that it will be useful,
6954 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
6955 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
6956 + * GNU General Public License for more details.
6957 + *
6958 + * You should have received a copy of the GNU General Public License
6959 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
6960 + */
6961 +
6962 +#include <linux/kernel.h>
6963 +#include <linux/interrupt.h>
6964 +#include <linux/dma-mapping.h>
6965 +#include <linux/dmapool.h>
6966 +#include <linux/sched.h>
6967 +#include <linux/module.h>
6968 +#include <linux/list.h>
6969 +#include <linux/kthread.h>
6970 +#include <linux/slab.h>
6971 +
6972 +#include <linux/io.h>
6973 +#include <asm/irq.h>
6974 +
6975 +#include "pfe_mod.h"
6976 +
6977 +#define HIF_INT_MASK (HIF_INT | HIF_RXPKT_INT | HIF_TXPKT_INT)
6978 +
6979 +unsigned char napi_first_batch;
6980 +
6981 +static void pfe_tx_do_cleanup(unsigned long data);
6982 +
6983 +static int pfe_hif_alloc_descr(struct pfe_hif *hif)
6984 +{
6985 + void *addr;
6986 + dma_addr_t dma_addr;
6987 + int err = 0;
6988 +
6989 + pr_info("%s\n", __func__);
6990 + addr = dma_alloc_coherent(pfe->dev,
6991 + HIF_RX_DESC_NT * sizeof(struct hif_desc) +
6992 + HIF_TX_DESC_NT * sizeof(struct hif_desc),
6993 + &dma_addr, GFP_KERNEL);
6994 +
6995 + if (!addr) {
6996 + pr_err("%s: Could not allocate buffer descriptors!\n"
6997 + , __func__);
6998 + err = -ENOMEM;
6999 + goto err0;
7000 + }
7001 +
7002 + hif->descr_baseaddr_p = dma_addr;
7003 + hif->descr_baseaddr_v = addr;
7004 + hif->rx_ring_size = HIF_RX_DESC_NT;
7005 + hif->tx_ring_size = HIF_TX_DESC_NT;
7006 +
7007 + return 0;
7008 +
7009 +err0:
7010 + return err;
7011 +}
7012 +
7013 +#if defined(LS1012A_PFE_RESET_WA)
7014 +static void pfe_hif_disable_rx_desc(struct pfe_hif *hif)
7015 +{
7016 + int ii;
7017 + struct hif_desc *desc = hif->rx_base;
7018 +
7019 + /*Mark all descriptors as LAST_BD */
7020 + for (ii = 0; ii < hif->rx_ring_size; ii++) {
7021 + desc->ctrl |= BD_CTRL_LAST_BD;
7022 + desc++;
7023 + }
7024 +}
7025 +
7026 +struct class_rx_hdr_t {
7027 + u32 next_ptr; /* ptr to the start of the first DDR buffer */
7028 + u16 length; /* total packet length */
7029 + u16 phyno; /* input physical port number */
7030 + u32 status; /* gemac status bits */
7031 + u32 status2; /* reserved for software usage */
7032 +};
7033 +
7034 +/* STATUS_BAD_FRAME_ERR is set for all errors (including checksums if enabled)
7035 + * except overflow
7036 + */
7037 +#define STATUS_BAD_FRAME_ERR BIT(16)
7038 +#define STATUS_LENGTH_ERR BIT(17)
7039 +#define STATUS_CRC_ERR BIT(18)
7040 +#define STATUS_TOO_SHORT_ERR BIT(19)
7041 +#define STATUS_TOO_LONG_ERR BIT(20)
7042 +#define STATUS_CODE_ERR BIT(21)
7043 +#define STATUS_MC_HASH_MATCH BIT(22)
7044 +#define STATUS_CUMULATIVE_ARC_HIT BIT(23)
7045 +#define STATUS_UNICAST_HASH_MATCH BIT(24)
7046 +#define STATUS_IP_CHECKSUM_CORRECT BIT(25)
7047 +#define STATUS_TCP_CHECKSUM_CORRECT BIT(26)
7048 +#define STATUS_UDP_CHECKSUM_CORRECT BIT(27)
7049 +#define STATUS_OVERFLOW_ERR BIT(28) /* GPI error */
7050 +#define MIN_PKT_SIZE 64
7051 +
7052 +static inline void copy_to_lmem(u32 *dst, u32 *src, int len)
7053 +{
7054 + int i;
7055 +
7056 + for (i = 0; i < len; i += sizeof(u32)) {
7057 + *dst = htonl(*src);
7058 + dst++; src++;
7059 + }
7060 +}
7061 +
7062 +static void send_dummy_pkt_to_hif(void)
7063 +{
7064 + void *lmem_ptr, *ddr_ptr, *lmem_virt_addr;
7065 + u32 physaddr;
7066 + struct class_rx_hdr_t local_hdr;
7067 + static u32 dummy_pkt[] = {
7068 + 0x33221100, 0x2b785544, 0xd73093cb, 0x01000608,
7069 + 0x04060008, 0x2b780200, 0xd73093cb, 0x0a01a8c0,
7070 + 0x33221100, 0xa8c05544, 0x00000301, 0x00000000,
7071 + 0x00000000, 0x00000000, 0x00000000, 0xbe86c51f };
7072 +
7073 + ddr_ptr = (void *)((u64)readl(BMU2_BASE_ADDR + BMU_ALLOC_CTRL));
7074 + if (!ddr_ptr)
7075 + return;
7076 +
7077 + lmem_ptr = (void *)((u64)readl(BMU1_BASE_ADDR + BMU_ALLOC_CTRL));
7078 + if (!lmem_ptr)
7079 + return;
7080 +
7081 + pr_info("Sending a dummy pkt to HIF %p %p\n", ddr_ptr, lmem_ptr);
7082 + physaddr = (u32)DDR_VIRT_TO_PFE(ddr_ptr);
7083 +
7084 + lmem_virt_addr = (void *)CBUS_PFE_TO_VIRT((unsigned long int)lmem_ptr);
7085 +
7086 + local_hdr.phyno = htons(0); /* RX_PHY_0 */
7087 + local_hdr.length = htons(MIN_PKT_SIZE);
7088 +
7089 + local_hdr.next_ptr = htonl((u32)physaddr);
7090 + /*Mark checksum is correct */
7091 + local_hdr.status = htonl((STATUS_IP_CHECKSUM_CORRECT |
7092 + STATUS_UDP_CHECKSUM_CORRECT |
7093 + STATUS_TCP_CHECKSUM_CORRECT |
7094 + STATUS_UNICAST_HASH_MATCH |
7095 + STATUS_CUMULATIVE_ARC_HIT));
7096 + copy_to_lmem((u32 *)lmem_virt_addr, (u32 *)&local_hdr,
7097 + sizeof(local_hdr));
7098 +
7099 + copy_to_lmem((u32 *)(lmem_virt_addr + LMEM_HDR_SIZE), (u32 *)dummy_pkt,
7100 + 0x40);
7101 +
7102 + writel((unsigned long int)lmem_ptr, CLASS_INQ_PKTPTR);
7103 +}
7104 +
7105 +void pfe_hif_rx_idle(struct pfe_hif *hif)
7106 +{
7107 + int hif_stop_loop = 10;
7108 + u32 rx_status;
7109 +
7110 + pfe_hif_disable_rx_desc(hif);
7111 + pr_info("Bringing hif to idle state...");
7112 + writel(0, HIF_INT_ENABLE);
7113 + /*If HIF Rx BDP is busy send a dummy packet */
7114 + do {
7115 + rx_status = readl(HIF_RX_STATUS);
7116 + if (rx_status & BDP_CSR_RX_DMA_ACTV)
7117 + send_dummy_pkt_to_hif();
7118 +
7119 + usleep_range(100, 150);
7120 + } while (--hif_stop_loop);
7121 +
7122 + if (readl(HIF_RX_STATUS) & BDP_CSR_RX_DMA_ACTV)
7123 + pr_info("Failed\n");
7124 + else
7125 + pr_info("Done\n");
7126 +}
7127 +#endif
7128 +
7129 +static void pfe_hif_free_descr(struct pfe_hif *hif)
7130 +{
7131 + pr_info("%s\n", __func__);
7132 +
7133 + dma_free_coherent(pfe->dev,
7134 + hif->rx_ring_size * sizeof(struct hif_desc) +
7135 + hif->tx_ring_size * sizeof(struct hif_desc),
7136 + hif->descr_baseaddr_v, hif->descr_baseaddr_p);
7137 +}
7138 +
7139 +void pfe_hif_desc_dump(struct pfe_hif *hif)
7140 +{
7141 + struct hif_desc *desc;
7142 + unsigned long desc_p;
7143 + int ii = 0;
7144 +
7145 + pr_info("%s\n", __func__);
7146 +
7147 + desc = hif->rx_base;
7148 + desc_p = (u32)((u64)desc - (u64)hif->descr_baseaddr_v +
7149 + hif->descr_baseaddr_p);
7150 +
7151 + pr_info("HIF Rx desc base %p physical %x\n", desc, (u32)desc_p);
7152 + for (ii = 0; ii < hif->rx_ring_size; ii++) {
7153 + pr_info("status: %08x, ctrl: %08x, data: %08x, next: %x\n",
7154 + readl(&desc->status), readl(&desc->ctrl),
7155 + readl(&desc->data), readl(&desc->next));
7156 + desc++;
7157 + }
7158 +
7159 + desc = hif->tx_base;
7160 + desc_p = ((u64)desc - (u64)hif->descr_baseaddr_v +
7161 + hif->descr_baseaddr_p);
7162 +
7163 + pr_info("HIF Tx desc base %p physical %x\n", desc, (u32)desc_p);
7164 + for (ii = 0; ii < hif->tx_ring_size; ii++) {
7165 + pr_info("status: %08x, ctrl: %08x, data: %08x, next: %x\n",
7166 + readl(&desc->status), readl(&desc->ctrl),
7167 + readl(&desc->data), readl(&desc->next));
7168 + desc++;
7169 + }
7170 +}
7171 +
7172 +/* pfe_hif_release_buffers */
7173 +static void pfe_hif_release_buffers(struct pfe_hif *hif)
7174 +{
7175 + struct hif_desc *desc;
7176 + int i = 0;
7177 +
7178 + hif->rx_base = hif->descr_baseaddr_v;
7179 +
7180 + pr_info("%s\n", __func__);
7181 +
7182 + /*Free Rx buffers */
7183 + desc = hif->rx_base;
7184 + for (i = 0; i < hif->rx_ring_size; i++) {
7185 + if (readl(&desc->data)) {
7186 + if ((i < hif->shm->rx_buf_pool_cnt) &&
7187 + (!hif->shm->rx_buf_pool[i])) {
7188 + /*
7189 + * dma_unmap_single(hif->dev, desc->data,
7190 + * hif->rx_buf_len[i], DMA_FROM_DEVICE);
7191 + */
7192 + dma_unmap_single(hif->dev,
7193 + DDR_PFE_TO_PHYS(
7194 + readl(&desc->data)),
7195 + hif->rx_buf_len[i],
7196 + DMA_FROM_DEVICE);
7197 + hif->shm->rx_buf_pool[i] = hif->rx_buf_addr[i];
7198 + } else {
7199 + pr_err("%s: buffer pool already full\n"
7200 + , __func__);
7201 + }
7202 + }
7203 +
7204 + writel(0, &desc->data);
7205 + writel(0, &desc->status);
7206 + writel(0, &desc->ctrl);
7207 + desc++;
7208 + }
7209 +}
7210 +
7211 +/*
7212 + * pfe_hif_init_buffers
7213 + * This function initializes the HIF Rx/Tx ring descriptors and
7214 + * initialize Rx queue with buffers.
7215 + */
7216 +static int pfe_hif_init_buffers(struct pfe_hif *hif)
7217 +{
7218 + struct hif_desc *desc, *first_desc_p;
7219 + u32 data;
7220 + int i = 0;
7221 +
7222 + pr_info("%s\n", __func__);
7223 +
7224 + /* Check enough Rx buffers available in the shared memory */
7225 + if (hif->shm->rx_buf_pool_cnt < hif->rx_ring_size)
7226 + return -ENOMEM;
7227 +
7228 + hif->rx_base = hif->descr_baseaddr_v;
7229 + memset(hif->rx_base, 0, hif->rx_ring_size * sizeof(struct hif_desc));
7230 +
7231 + /*Initialize Rx descriptors */
7232 + desc = hif->rx_base;
7233 + first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p;
7234 +
7235 + for (i = 0; i < hif->rx_ring_size; i++) {
7236 + /* Initialize Rx buffers from the shared memory */
7237 +
7238 + data = (u32)dma_map_single(hif->dev, hif->shm->rx_buf_pool[i],
7239 + pfe_pkt_size, DMA_FROM_DEVICE);
7240 + hif->rx_buf_addr[i] = hif->shm->rx_buf_pool[i];
7241 + hif->rx_buf_len[i] = pfe_pkt_size;
7242 + hif->shm->rx_buf_pool[i] = NULL;
7243 +
7244 + if (likely(dma_mapping_error(hif->dev, data) == 0)) {
7245 + writel(DDR_PHYS_TO_PFE(data), &desc->data);
7246 + } else {
7247 + pr_err("%s : low on mem\n", __func__);
7248 +
7249 + goto err;
7250 + }
7251 +
7252 + writel(0, &desc->status);
7253 +
7254 + /*
7255 + * Ensure everything else is written to DDR before
7256 + * writing bd->ctrl
7257 + */
7258 + wmb();
7259 +
7260 + writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM
7261 + | BD_CTRL_DIR | BD_CTRL_DESC_EN
7262 + | BD_BUF_LEN(pfe_pkt_size)), &desc->ctrl);
7263 +
7264 + /* Chain descriptors */
7265 + writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
7266 + desc++;
7267 + }
7268 +
7269 + /* Overwrite last descriptor to chain it to first one*/
7270 + desc--;
7271 + writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
7272 +
7273 + hif->rxtoclean_index = 0;
7274 +
7275 + /*Initialize Rx buffer descriptor ring base address */
7276 + writel(DDR_PHYS_TO_PFE(hif->descr_baseaddr_p), HIF_RX_BDP_ADDR);
7277 +
7278 + hif->tx_base = hif->rx_base + hif->rx_ring_size;
7279 + first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p +
7280 + hif->rx_ring_size;
7281 + memset(hif->tx_base, 0, hif->tx_ring_size * sizeof(struct hif_desc));
7282 +
7283 + /*Initialize tx descriptors */
7284 + desc = hif->tx_base;
7285 +
7286 + for (i = 0; i < hif->tx_ring_size; i++) {
7287 + /* Chain descriptors */
7288 + writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
7289 + writel(0, &desc->ctrl);
7290 + desc++;
7291 + }
7292 +
7293 + /* Overwrite last descriptor to chain it to first one */
7294 + desc--;
7295 + writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
7296 + hif->txavail = hif->tx_ring_size;
7297 + hif->txtosend = 0;
7298 + hif->txtoclean = 0;
7299 + hif->txtoflush = 0;
7300 +
7301 + /*Initialize Tx buffer descriptor ring base address */
7302 + writel((u32)DDR_PHYS_TO_PFE(first_desc_p), HIF_TX_BDP_ADDR);
7303 +
7304 + return 0;
7305 +
7306 +err:
7307 + pfe_hif_release_buffers(hif);
7308 + return -ENOMEM;
7309 +}
7310 +
7311 +/*
7312 + * pfe_hif_client_register
7313 + *
7314 + * This function used to register a client driver with the HIF driver.
7315 + *
7316 + * Return value:
7317 + * 0 - on Successful registration
7318 + */
7319 +static int pfe_hif_client_register(struct pfe_hif *hif, u32 client_id,
7320 + struct hif_client_shm *client_shm)
7321 +{
7322 + struct hif_client *client = &hif->client[client_id];
7323 + u32 i, cnt;
7324 + struct rx_queue_desc *rx_qbase;
7325 + struct tx_queue_desc *tx_qbase;
7326 + struct hif_rx_queue *rx_queue;
7327 + struct hif_tx_queue *tx_queue;
7328 + int err = 0;
7329 +
7330 + pr_info("%s\n", __func__);
7331 +
7332 + spin_lock_bh(&hif->tx_lock);
7333 +
7334 + if (test_bit(client_id, &hif->shm->g_client_status[0])) {
7335 + pr_err("%s: client %d already registered\n",
7336 + __func__, client_id);
7337 + err = -1;
7338 + goto unlock;
7339 + }
7340 +
7341 + memset(client, 0, sizeof(struct hif_client));
7342 +
7343 + /* Initialize client Rx queues baseaddr, size */
7344 +
7345 + cnt = CLIENT_CTRL_RX_Q_CNT(client_shm->ctrl);
7346 + /* Check if client is requesting for more queues than supported */
7347 + if (cnt > HIF_CLIENT_QUEUES_MAX)
7348 + cnt = HIF_CLIENT_QUEUES_MAX;
7349 +
7350 + client->rx_qn = cnt;
7351 + rx_qbase = (struct rx_queue_desc *)client_shm->rx_qbase;
7352 + for (i = 0; i < cnt; i++) {
7353 + rx_queue = &client->rx_q[i];
7354 + rx_queue->base = rx_qbase + i * client_shm->rx_qsize;
7355 + rx_queue->size = client_shm->rx_qsize;
7356 + rx_queue->write_idx = 0;
7357 + }
7358 +
7359 + /* Initialize client Tx queues baseaddr, size */
7360 + cnt = CLIENT_CTRL_TX_Q_CNT(client_shm->ctrl);
7361 +
7362 + /* Check if client is requesting for more queues than supported */
7363 + if (cnt > HIF_CLIENT_QUEUES_MAX)
7364 + cnt = HIF_CLIENT_QUEUES_MAX;
7365 +
7366 + client->tx_qn = cnt;
7367 + tx_qbase = (struct tx_queue_desc *)client_shm->tx_qbase;
7368 + for (i = 0; i < cnt; i++) {
7369 + tx_queue = &client->tx_q[i];
7370 + tx_queue->base = tx_qbase + i * client_shm->tx_qsize;
7371 + tx_queue->size = client_shm->tx_qsize;
7372 + tx_queue->ack_idx = 0;
7373 + }
7374 +
7375 + set_bit(client_id, &hif->shm->g_client_status[0]);
7376 +
7377 +unlock:
7378 + spin_unlock_bh(&hif->tx_lock);
7379 +
7380 + return err;
7381 +}
7382 +
7383 +/*
7384 + * pfe_hif_client_unregister
7385 + *
7386 + * This function used to unregister a client from the HIF driver.
7387 + *
7388 + */
7389 +static void pfe_hif_client_unregister(struct pfe_hif *hif, u32 client_id)
7390 +{
7391 + pr_info("%s\n", __func__);
7392 +
7393 + /*
7394 + * Mark client as no longer available (which prevents further packet
7395 + * receive for this client)
7396 + */
7397 + spin_lock_bh(&hif->tx_lock);
7398 +
7399 + if (!test_bit(client_id, &hif->shm->g_client_status[0])) {
7400 + pr_err("%s: client %d not registered\n", __func__,
7401 + client_id);
7402 +
7403 + spin_unlock_bh(&hif->tx_lock);
7404 + return;
7405 + }
7406 +
7407 + clear_bit(client_id, &hif->shm->g_client_status[0]);
7408 +
7409 + spin_unlock_bh(&hif->tx_lock);
7410 +}
7411 +
7412 +/*
7413 + * client_put_rxpacket-
7414 + * This functions puts the Rx pkt in the given client Rx queue.
7415 + * It actually swap the Rx pkt in the client Rx descriptor buffer
7416 + * and returns the free buffer from it.
7417 + *
7418 + * If the function returns NULL means client Rx queue is full and
7419 + * packet couldn't send to client queue.
7420 + */
7421 +static void *client_put_rxpacket(struct hif_rx_queue *queue, void *pkt, u32 len,
7422 + u32 flags, u32 client_ctrl, u32 *rem_len)
7423 +{
7424 + void *free_pkt = NULL;
7425 + struct rx_queue_desc *desc = queue->base + queue->write_idx;
7426 +
7427 + if (readl(&desc->ctrl) & CL_DESC_OWN) {
7428 + if (page_mode) {
7429 + int rem_page_size = PAGE_SIZE -
7430 + PRESENT_OFST_IN_PAGE(pkt);
7431 + int cur_pkt_size = ROUND_MIN_RX_SIZE(len +
7432 + pfe_pkt_headroom);
7433 + *rem_len = (rem_page_size - cur_pkt_size);
7434 + if (*rem_len) {
7435 + free_pkt = pkt + cur_pkt_size;
7436 + get_page(virt_to_page(free_pkt));
7437 + } else {
7438 + free_pkt = (void
7439 + *)__get_free_page(GFP_ATOMIC | GFP_DMA_PFE);
7440 + *rem_len = pfe_pkt_size;
7441 + }
7442 + } else {
7443 + free_pkt = kmalloc(PFE_BUF_SIZE, GFP_ATOMIC |
7444 + GFP_DMA_PFE);
7445 + *rem_len = PFE_BUF_SIZE - pfe_pkt_headroom;
7446 + }
7447 +
7448 + if (free_pkt) {
7449 + desc->data = pkt;
7450 + desc->client_ctrl = client_ctrl;
7451 + /*
7452 + * Ensure everything else is written to DDR before
7453 + * writing bd->ctrl
7454 + */
7455 + smp_wmb();
7456 + writel(CL_DESC_BUF_LEN(len) | flags, &desc->ctrl);
7457 + queue->write_idx = (queue->write_idx + 1)
7458 + & (queue->size - 1);
7459 +
7460 + free_pkt += pfe_pkt_headroom;
7461 + }
7462 + }
7463 +
7464 + return free_pkt;
7465 +}
7466 +
7467 +/*
7468 + * pfe_hif_rx_process-
7469 + * This function does pfe hif rx queue processing.
7470 + * Dequeue packet from Rx queue and send it to corresponding client queue
7471 + */
7472 +static int pfe_hif_rx_process(struct pfe_hif *hif, int budget)
7473 +{
7474 + struct hif_desc *desc;
7475 + struct hif_hdr *pkt_hdr;
7476 + struct __hif_hdr hif_hdr;
7477 + void *free_buf;
7478 + int rtc, len, rx_processed = 0;
7479 + struct __hif_desc local_desc;
7480 + int flags;
7481 + unsigned int desc_p;
7482 + unsigned int buf_size = 0;
7483 +
7484 + spin_lock_bh(&hif->lock);
7485 +
7486 + rtc = hif->rxtoclean_index;
7487 +
7488 + while (rx_processed < budget) {
7489 + desc = hif->rx_base + rtc;
7490 +
7491 + __memcpy12(&local_desc, desc);
7492 +
7493 + /* ACK pending Rx interrupt */
7494 + if (local_desc.ctrl & BD_CTRL_DESC_EN) {
7495 + writel(HIF_INT | HIF_RXPKT_INT, HIF_INT_SRC);
7496 +
7497 + if (rx_processed == 0) {
7498 + if (napi_first_batch == 1) {
7499 + desc_p = hif->descr_baseaddr_p +
7500 + ((unsigned long int)(desc) -
7501 + (unsigned long
7502 + int)hif->descr_baseaddr_v);
7503 + napi_first_batch = 0;
7504 + }
7505 + }
7506 +
7507 + __memcpy12(&local_desc, desc);
7508 +
7509 + if (local_desc.ctrl & BD_CTRL_DESC_EN)
7510 + break;
7511 + }
7512 +
7513 + napi_first_batch = 0;
7514 +
7515 +#ifdef HIF_NAPI_STATS
7516 + hif->napi_counters[NAPI_DESC_COUNT]++;
7517 +#endif
7518 + len = BD_BUF_LEN(local_desc.ctrl);
7519 + /*
7520 + * dma_unmap_single(hif->dev, DDR_PFE_TO_PHYS(local_desc.data),
7521 + * hif->rx_buf_len[rtc], DMA_FROM_DEVICE);
7522 + */
7523 + dma_unmap_single(hif->dev, DDR_PFE_TO_PHYS(local_desc.data),
7524 + hif->rx_buf_len[rtc], DMA_FROM_DEVICE);
7525 +
7526 + pkt_hdr = (struct hif_hdr *)hif->rx_buf_addr[rtc];
7527 +
7528 + /* Track last HIF header received */
7529 + if (!hif->started) {
7530 + hif->started = 1;
7531 +
7532 + __memcpy8(&hif_hdr, pkt_hdr);
7533 +
7534 + hif->qno = hif_hdr.hdr.q_num;
7535 + hif->client_id = hif_hdr.hdr.client_id;
7536 + hif->client_ctrl = (hif_hdr.hdr.client_ctrl1 << 16) |
7537 + hif_hdr.hdr.client_ctrl;
7538 + flags = CL_DESC_FIRST;
7539 +
7540 + } else {
7541 + flags = 0;
7542 + }
7543 +
7544 + if (local_desc.ctrl & BD_CTRL_LIFM)
7545 + flags |= CL_DESC_LAST;
7546 +
7547 + /* Check for valid client id and still registered */
7548 + if ((hif->client_id >= HIF_CLIENTS_MAX) ||
7549 + !(test_bit(hif->client_id,
7550 + &hif->shm->g_client_status[0]))) {
7551 + printk_ratelimited("%s: packet with invalid client id %d q_num %d\n",
7552 + __func__,
7553 + hif->client_id,
7554 + hif->qno);
7555 +
7556 + free_buf = pkt_hdr;
7557 +
7558 + goto pkt_drop;
7559 + }
7560 +
7561 + /* Check to valid queue number */
7562 + if (hif->client[hif->client_id].rx_qn <= hif->qno) {
7563 + pr_info("%s: packet with invalid queue: %d\n"
7564 + , __func__, hif->qno);
7565 + hif->qno = 0;
7566 + }
7567 +
7568 + free_buf =
7569 + client_put_rxpacket(&hif->client[hif->client_id].rx_q[hif->qno],
7570 + (void *)pkt_hdr, len, flags,
7571 + hif->client_ctrl, &buf_size);
7572 +
7573 + hif_lib_indicate_client(hif->client_id, EVENT_RX_PKT_IND,
7574 + hif->qno);
7575 +
7576 + if (unlikely(!free_buf)) {
7577 +#ifdef HIF_NAPI_STATS
7578 + hif->napi_counters[NAPI_CLIENT_FULL_COUNT]++;
7579 +#endif
7580 + /*
7581 + * If we want to keep in polling mode to retry later,
7582 + * we need to tell napi that we consumed
7583 + * the full budget or we will hit a livelock scenario.
7584 + * The core code keeps this napi instance
7585 + * at the head of the list and none of the other
7586 + * instances get to run
7587 + */
7588 + rx_processed = budget;
7589 +
7590 + if (flags & CL_DESC_FIRST)
7591 + hif->started = 0;
7592 +
7593 + break;
7594 + }
7595 +
7596 +pkt_drop:
7597 + /*Fill free buffer in the descriptor */
7598 + hif->rx_buf_addr[rtc] = free_buf;
7599 + hif->rx_buf_len[rtc] = min(pfe_pkt_size, buf_size);
7600 + writel((DDR_PHYS_TO_PFE
7601 + ((u32)dma_map_single(hif->dev,
7602 + free_buf, hif->rx_buf_len[rtc], DMA_FROM_DEVICE))),
7603 + &desc->data);
7604 + /*
7605 + * Ensure everything else is written to DDR before
7606 + * writing bd->ctrl
7607 + */
7608 + wmb();
7609 + writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM | BD_CTRL_DIR |
7610 + BD_CTRL_DESC_EN | BD_BUF_LEN(hif->rx_buf_len[rtc])),
7611 + &desc->ctrl);
7612 +
7613 + rtc = (rtc + 1) & (hif->rx_ring_size - 1);
7614 +
7615 + if (local_desc.ctrl & BD_CTRL_LIFM) {
7616 + if (!(hif->client_ctrl & HIF_CTRL_RX_CONTINUED)) {
7617 + rx_processed++;
7618 +
7619 +#ifdef HIF_NAPI_STATS
7620 + hif->napi_counters[NAPI_PACKET_COUNT]++;
7621 +#endif
7622 + }
7623 + hif->started = 0;
7624 + }
7625 + }
7626 +
7627 + hif->rxtoclean_index = rtc;
7628 + spin_unlock_bh(&hif->lock);
7629 +
7630 + /* we made some progress, re-start rx dma in case it stopped */
7631 + hif_rx_dma_start();
7632 +
7633 + return rx_processed;
7634 +}
7635 +
7636 +/*
7637 + * client_ack_txpacket-
7638 + * This function ack the Tx packet in the give client Tx queue by resetting
7639 + * ownership bit in the descriptor.
7640 + */
7641 +static int client_ack_txpacket(struct pfe_hif *hif, unsigned int client_id,
7642 + unsigned int q_no)
7643 +{
7644 + struct hif_tx_queue *queue = &hif->client[client_id].tx_q[q_no];
7645 + struct tx_queue_desc *desc = queue->base + queue->ack_idx;
7646 +
7647 + if (readl(&desc->ctrl) & CL_DESC_OWN) {
7648 + writel((readl(&desc->ctrl) & ~CL_DESC_OWN), &desc->ctrl);
7649 + queue->ack_idx = (queue->ack_idx + 1) & (queue->size - 1);
7650 +
7651 + return 0;
7652 +
7653 + } else {
7654 + /*This should not happen */
7655 + pr_err("%s: %d %d %d %d %d %p %d\n", __func__,
7656 + hif->txtosend, hif->txtoclean, hif->txavail,
7657 + client_id, q_no, queue, queue->ack_idx);
7658 + WARN(1, "%s: doesn't own this descriptor", __func__);
7659 + return 1;
7660 + }
7661 +}
7662 +
7663 +void __hif_tx_done_process(struct pfe_hif *hif, int count)
7664 +{
7665 + struct hif_desc *desc;
7666 + struct hif_desc_sw *desc_sw;
7667 + int ttc, tx_avl;
7668 + int pkts_done[HIF_CLIENTS_MAX] = {0, 0};
7669 +
7670 + ttc = hif->txtoclean;
7671 + tx_avl = hif->txavail;
7672 +
7673 + while ((tx_avl < hif->tx_ring_size) && count--) {
7674 + desc = hif->tx_base + ttc;
7675 +
7676 + if (readl(&desc->ctrl) & BD_CTRL_DESC_EN)
7677 + break;
7678 +
7679 + desc_sw = &hif->tx_sw_queue[ttc];
7680 +
7681 + if (desc_sw->data) {
7682 + /*
7683 + * dmap_unmap_single(hif->dev, desc_sw->data,
7684 + * desc_sw->len, DMA_TO_DEVICE);
7685 + */
7686 + dma_unmap_single(hif->dev, desc_sw->data,
7687 + desc_sw->len, DMA_TO_DEVICE);
7688 + }
7689 +
7690 + if (desc_sw->client_id > HIF_CLIENTS_MAX)
7691 + pr_err("Invalid cl id %d\n", desc_sw->client_id);
7692 +
7693 + pkts_done[desc_sw->client_id]++;
7694 +
7695 + client_ack_txpacket(hif, desc_sw->client_id, desc_sw->q_no);
7696 +
7697 + ttc = (ttc + 1) & (hif->tx_ring_size - 1);
7698 + tx_avl++;
7699 + }
7700 +
7701 + if (pkts_done[0])
7702 + hif_lib_indicate_client(0, EVENT_TXDONE_IND, 0);
7703 + if (pkts_done[1])
7704 + hif_lib_indicate_client(1, EVENT_TXDONE_IND, 0);
7705 +
7706 + hif->txtoclean = ttc;
7707 + hif->txavail = tx_avl;
7708 +
7709 + if (!count) {
7710 + tasklet_schedule(&hif->tx_cleanup_tasklet);
7711 + } else {
7712 + /*Enable Tx done interrupt */
7713 + writel(readl_relaxed(HIF_INT_ENABLE) | HIF_TXPKT_INT,
7714 + HIF_INT_ENABLE);
7715 + }
7716 +}
7717 +
7718 +static void pfe_tx_do_cleanup(unsigned long data)
7719 +{
7720 + struct pfe_hif *hif = (struct pfe_hif *)data;
7721 +
7722 + writel(HIF_INT | HIF_TXPKT_INT, HIF_INT_SRC);
7723 +
7724 + hif_tx_done_process(hif, 64);
7725 +}
7726 +
7727 +/*
7728 + * __hif_xmit_pkt -
7729 + * This function puts one packet in the HIF Tx queue
7730 + */
7731 +void __hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int
7732 + q_no, void *data, u32 len, unsigned int flags)
7733 +{
7734 + struct hif_desc *desc;
7735 + struct hif_desc_sw *desc_sw;
7736 +
7737 + desc = hif->tx_base + hif->txtosend;
7738 + desc_sw = &hif->tx_sw_queue[hif->txtosend];
7739 +
7740 + desc_sw->len = len;
7741 + desc_sw->client_id = client_id;
7742 + desc_sw->q_no = q_no;
7743 + desc_sw->flags = flags;
7744 +
7745 + if (flags & HIF_DONT_DMA_MAP) {
7746 + desc_sw->data = 0;
7747 + writel((u32)DDR_PHYS_TO_PFE(data), &desc->data);
7748 + } else {
7749 + desc_sw->data = dma_map_single(hif->dev, data, len,
7750 + DMA_TO_DEVICE);
7751 + writel((u32)DDR_PHYS_TO_PFE(desc_sw->data), &desc->data);
7752 + }
7753 +
7754 + hif->txtosend = (hif->txtosend + 1) & (hif->tx_ring_size - 1);
7755 + hif->txavail--;
7756 +
7757 + if ((!((flags & HIF_DATA_VALID) && (flags &
7758 + HIF_LAST_BUFFER))))
7759 + goto skip_tx;
7760 +
7761 + /*
7762 + * Ensure everything else is written to DDR before
7763 + * writing bd->ctrl
7764 + */
7765 + wmb();
7766 +
7767 + do {
7768 + desc_sw = &hif->tx_sw_queue[hif->txtoflush];
7769 + desc = hif->tx_base + hif->txtoflush;
7770 +
7771 + if (desc_sw->flags & HIF_LAST_BUFFER) {
7772 + writel((BD_CTRL_LIFM |
7773 + BD_CTRL_BRFETCH_DISABLE | BD_CTRL_RTFETCH_DISABLE
7774 + | BD_CTRL_PARSE_DISABLE | BD_CTRL_DESC_EN |
7775 + BD_CTRL_PKT_INT_EN | BD_BUF_LEN(desc_sw->len)),
7776 + &desc->ctrl);
7777 + } else {
7778 + writel((BD_CTRL_DESC_EN |
7779 + BD_BUF_LEN(desc_sw->len)), &desc->ctrl);
7780 + }
7781 + hif->txtoflush = (hif->txtoflush + 1) & (hif->tx_ring_size - 1);
7782 + }
7783 + while (hif->txtoflush != hif->txtosend)
7784 + ;
7785 +
7786 +skip_tx:
7787 + return;
7788 +}
7789 +
7790 +static irqreturn_t wol_isr(int irq, void *dev_id)
7791 +{
7792 + pr_info("WoL\n");
7793 + gemac_set_wol(EMAC1_BASE_ADDR, 0);
7794 + gemac_set_wol(EMAC2_BASE_ADDR, 0);
7795 + return IRQ_HANDLED;
7796 +}
7797 +
7798 +/*
7799 + * hif_isr-
7800 + * This ISR routine processes Rx/Tx done interrupts from the HIF hardware block
7801 + */
7802 +static irqreturn_t hif_isr(int irq, void *dev_id)
7803 +{
7804 + struct pfe_hif *hif = (struct pfe_hif *)dev_id;
7805 + int int_status;
7806 + int int_enable_mask;
7807 +
7808 + /*Read hif interrupt source register */
7809 + int_status = readl_relaxed(HIF_INT_SRC);
7810 + int_enable_mask = readl_relaxed(HIF_INT_ENABLE);
7811 +
7812 + if ((int_status & HIF_INT) == 0)
7813 + return IRQ_NONE;
7814 +
7815 + int_status &= ~(HIF_INT);
7816 +
7817 + if (int_status & HIF_RXPKT_INT) {
7818 + int_status &= ~(HIF_RXPKT_INT);
7819 + int_enable_mask &= ~(HIF_RXPKT_INT);
7820 +
7821 + napi_first_batch = 1;
7822 +
7823 + if (napi_schedule_prep(&hif->napi)) {
7824 +#ifdef HIF_NAPI_STATS
7825 + hif->napi_counters[NAPI_SCHED_COUNT]++;
7826 +#endif
7827 + __napi_schedule(&hif->napi);
7828 + }
7829 + }
7830 +
7831 + if (int_status & HIF_TXPKT_INT) {
7832 + int_status &= ~(HIF_TXPKT_INT);
7833 + int_enable_mask &= ~(HIF_TXPKT_INT);
7834 + /*Schedule tx cleanup tassklet */
7835 + tasklet_schedule(&hif->tx_cleanup_tasklet);
7836 + }
7837 +
7838 + /*Disable interrupts, they will be enabled after they are serviced */
7839 + writel_relaxed(int_enable_mask, HIF_INT_ENABLE);
7840 +
7841 + if (int_status) {
7842 + pr_info("%s : Invalid interrupt : %d\n", __func__,
7843 + int_status);
7844 + writel(int_status, HIF_INT_SRC);
7845 + }
7846 +
7847 + return IRQ_HANDLED;
7848 +}
7849 +
7850 +void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int data2)
7851 +{
7852 + unsigned int client_id = data1;
7853 +
7854 + if (client_id >= HIF_CLIENTS_MAX) {
7855 + pr_err("%s: client id %d out of bounds\n", __func__,
7856 + client_id);
7857 + return;
7858 + }
7859 +
7860 + switch (req) {
7861 + case REQUEST_CL_REGISTER:
7862 + /* Request for register a client */
7863 + pr_info("%s: register client_id %d\n",
7864 + __func__, client_id);
7865 + pfe_hif_client_register(hif, client_id, (struct
7866 + hif_client_shm *)&hif->shm->client[client_id]);
7867 + break;
7868 +
7869 + case REQUEST_CL_UNREGISTER:
7870 + pr_info("%s: unregister client_id %d\n",
7871 + __func__, client_id);
7872 +
7873 + /* Request for unregister a client */
7874 + pfe_hif_client_unregister(hif, client_id);
7875 +
7876 + break;
7877 +
7878 + default:
7879 + pr_err("%s: unsupported request %d\n",
7880 + __func__, req);
7881 + break;
7882 + }
7883 +
7884 + /*
7885 + * Process client Tx queues
7886 + * Currently we don't have checking for tx pending
7887 + */
7888 +}
7889 +
7890 +/*
7891 + * pfe_hif_rx_poll
7892 + * This function is NAPI poll function to process HIF Rx queue.
7893 + */
7894 +static int pfe_hif_rx_poll(struct napi_struct *napi, int budget)
7895 +{
7896 + struct pfe_hif *hif = container_of(napi, struct pfe_hif, napi);
7897 + int work_done;
7898 +
7899 +#ifdef HIF_NAPI_STATS
7900 + hif->napi_counters[NAPI_POLL_COUNT]++;
7901 +#endif
7902 +
7903 + work_done = pfe_hif_rx_process(hif, budget);
7904 +
7905 + if (work_done < budget) {
7906 + napi_complete(napi);
7907 + writel(readl_relaxed(HIF_INT_ENABLE) | HIF_RXPKT_INT,
7908 + HIF_INT_ENABLE);
7909 + }
7910 +#ifdef HIF_NAPI_STATS
7911 + else
7912 + hif->napi_counters[NAPI_FULL_BUDGET_COUNT]++;
7913 +#endif
7914 +
7915 + return work_done;
7916 +}
7917 +
7918 +/*
7919 + * pfe_hif_init
7920 + * This function initializes the baseaddresses and irq, etc.
7921 + */
7922 +int pfe_hif_init(struct pfe *pfe)
7923 +{
7924 + struct pfe_hif *hif = &pfe->hif;
7925 + int err;
7926 +
7927 + pr_info("%s\n", __func__);
7928 +
7929 + hif->dev = pfe->dev;
7930 + hif->irq = pfe->hif_irq;
7931 +
7932 + err = pfe_hif_alloc_descr(hif);
7933 + if (err)
7934 + goto err0;
7935 +
7936 + if (pfe_hif_init_buffers(hif)) {
7937 + pr_err("%s: Could not initialize buffer descriptors\n"
7938 + , __func__);
7939 + err = -ENOMEM;
7940 + goto err1;
7941 + }
7942 +
7943 + /* Initialize NAPI for Rx processing */
7944 + init_dummy_netdev(&hif->dummy_dev);
7945 + netif_napi_add(&hif->dummy_dev, &hif->napi, pfe_hif_rx_poll,
7946 + HIF_RX_POLL_WEIGHT);
7947 + napi_enable(&hif->napi);
7948 +
7949 + spin_lock_init(&hif->tx_lock);
7950 + spin_lock_init(&hif->lock);
7951 +
7952 + hif_init();
7953 + hif_rx_enable();
7954 + hif_tx_enable();
7955 +
7956 + /* Disable tx done interrupt */
7957 + writel(HIF_INT_MASK, HIF_INT_ENABLE);
7958 +
7959 + gpi_enable(HGPI_BASE_ADDR);
7960 +
7961 + err = request_irq(hif->irq, hif_isr, 0, "pfe_hif", hif);
7962 + if (err) {
7963 + pr_err("%s: failed to get the hif IRQ = %d\n",
7964 + __func__, hif->irq);
7965 + goto err1;
7966 + }
7967 +
7968 + err = request_irq(pfe->wol_irq, wol_isr, 0, "pfe_wol", pfe);
7969 + if (err) {
7970 + pr_err("%s: failed to get the wol IRQ = %d\n",
7971 + __func__, pfe->wol_irq);
7972 + goto err1;
7973 + }
7974 +
7975 + tasklet_init(&hif->tx_cleanup_tasklet,
7976 + (void(*)(unsigned long))pfe_tx_do_cleanup,
7977 + (unsigned long)hif);
7978 +
7979 + return 0;
7980 +err1:
7981 + pfe_hif_free_descr(hif);
7982 +err0:
7983 + return err;
7984 +}
7985 +
7986 +/* pfe_hif_exit- */
7987 +void pfe_hif_exit(struct pfe *pfe)
7988 +{
7989 + struct pfe_hif *hif = &pfe->hif;
7990 +
7991 + pr_info("%s\n", __func__);
7992 +
7993 + tasklet_kill(&hif->tx_cleanup_tasklet);
7994 +
7995 + spin_lock_bh(&hif->lock);
7996 + hif->shm->g_client_status[0] = 0;
7997 + /* Make sure all clients are disabled*/
7998 + hif->shm->g_client_status[1] = 0;
7999 +
8000 + spin_unlock_bh(&hif->lock);
8001 +
8002 + /*Disable Rx/Tx */
8003 + gpi_disable(HGPI_BASE_ADDR);
8004 + hif_rx_disable();
8005 + hif_tx_disable();
8006 +
8007 + napi_disable(&hif->napi);
8008 + netif_napi_del(&hif->napi);
8009 +
8010 + free_irq(pfe->wol_irq, pfe);
8011 + free_irq(hif->irq, hif);
8012 +
8013 + pfe_hif_release_buffers(hif);
8014 + pfe_hif_free_descr(hif);
8015 +}
8016 --- /dev/null
8017 +++ b/drivers/staging/fsl_ppfe/pfe_hif.h
8018 @@ -0,0 +1,212 @@
8019 +/*
8020 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
8021 + * Copyright 2017 NXP
8022 + *
8023 + * This program is free software; you can redistribute it and/or modify
8024 + * it under the terms of the GNU General Public License as published by
8025 + * the Free Software Foundation; either version 2 of the License, or
8026 + * (at your option) any later version.
8027 + *
8028 + * This program is distributed in the hope that it will be useful,
8029 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
8030 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8031 + * GNU General Public License for more details.
8032 + *
8033 + * You should have received a copy of the GNU General Public License
8034 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
8035 + */
8036 +
8037 +#ifndef _PFE_HIF_H_
8038 +#define _PFE_HIF_H_
8039 +
8040 +#include <linux/netdevice.h>
8041 +#include <linux/interrupt.h>
8042 +
8043 +#define HIF_NAPI_STATS
8044 +
8045 +#define HIF_CLIENT_QUEUES_MAX 16
8046 +#define HIF_RX_POLL_WEIGHT 64
8047 +
8048 +#define HIF_RX_PKT_MIN_SIZE 0x800 /* 2KB */
8049 +#define HIF_RX_PKT_MIN_SIZE_MASK ~(HIF_RX_PKT_MIN_SIZE - 1)
8050 +#define ROUND_MIN_RX_SIZE(_sz) (((_sz) + (HIF_RX_PKT_MIN_SIZE - 1)) \
8051 + & HIF_RX_PKT_MIN_SIZE_MASK)
8052 +#define PRESENT_OFST_IN_PAGE(_buf) (((unsigned long int)(_buf) & (PAGE_SIZE \
8053 + - 1)) & HIF_RX_PKT_MIN_SIZE_MASK)
8054 +
8055 +enum {
8056 + NAPI_SCHED_COUNT = 0,
8057 + NAPI_POLL_COUNT,
8058 + NAPI_PACKET_COUNT,
8059 + NAPI_DESC_COUNT,
8060 + NAPI_FULL_BUDGET_COUNT,
8061 + NAPI_CLIENT_FULL_COUNT,
8062 + NAPI_MAX_COUNT
8063 +};
8064 +
8065 +/*
8066 + * HIF_TX_DESC_NT value should be always greter than 4,
8067 + * Otherwise HIF_TX_POLL_MARK will become zero.
8068 + */
8069 +#define HIF_RX_DESC_NT 256
8070 +#define HIF_TX_DESC_NT 2048
8071 +
8072 +#define HIF_FIRST_BUFFER BIT(0)
8073 +#define HIF_LAST_BUFFER BIT(1)
8074 +#define HIF_DONT_DMA_MAP BIT(2)
8075 +#define HIF_DATA_VALID BIT(3)
8076 +#define HIF_TSO BIT(4)
8077 +
8078 +enum {
8079 + PFE_CL_GEM0 = 0,
8080 + PFE_CL_GEM1,
8081 + HIF_CLIENTS_MAX
8082 +};
8083 +
8084 +/*structure to store client queue info */
8085 +struct hif_rx_queue {
8086 + struct rx_queue_desc *base;
8087 + u32 size;
8088 + u32 write_idx;
8089 +};
8090 +
8091 +struct hif_tx_queue {
8092 + struct tx_queue_desc *base;
8093 + u32 size;
8094 + u32 ack_idx;
8095 +};
8096 +
8097 +/*Structure to store the client info */
8098 +struct hif_client {
8099 + int rx_qn;
8100 + struct hif_rx_queue rx_q[HIF_CLIENT_QUEUES_MAX];
8101 + int tx_qn;
8102 + struct hif_tx_queue tx_q[HIF_CLIENT_QUEUES_MAX];
8103 +};
8104 +
8105 +/*HIF hardware buffer descriptor */
8106 +struct hif_desc {
8107 + u32 ctrl;
8108 + u32 status;
8109 + u32 data;
8110 + u32 next;
8111 +};
8112 +
8113 +struct __hif_desc {
8114 + u32 ctrl;
8115 + u32 status;
8116 + u32 data;
8117 +};
8118 +
8119 +struct hif_desc_sw {
8120 + dma_addr_t data;
8121 + u16 len;
8122 + u8 client_id;
8123 + u8 q_no;
8124 + u16 flags;
8125 +};
8126 +
8127 +struct hif_hdr {
8128 + u8 client_id;
8129 + u8 q_num;
8130 + u16 client_ctrl;
8131 + u16 client_ctrl1;
8132 +};
8133 +
8134 +struct __hif_hdr {
8135 + union {
8136 + struct hif_hdr hdr;
8137 + u32 word[2];
8138 + };
8139 +};
8140 +
8141 +struct hif_ipsec_hdr {
8142 + u16 sa_handle[2];
8143 +} __packed;
8144 +
8145 +/* HIF_CTRL_TX... defines */
8146 +#define HIF_CTRL_TX_CHECKSUM BIT(2)
8147 +
8148 +/* HIF_CTRL_RX... defines */
8149 +#define HIF_CTRL_RX_OFFSET_OFST (24)
8150 +#define HIF_CTRL_RX_CHECKSUMMED BIT(2)
8151 +#define HIF_CTRL_RX_CONTINUED BIT(1)
8152 +
8153 +struct pfe_hif {
8154 + /* To store registered clients in hif layer */
8155 + struct hif_client client[HIF_CLIENTS_MAX];
8156 + struct hif_shm *shm;
8157 + int irq;
8158 +
8159 + void *descr_baseaddr_v;
8160 + unsigned long descr_baseaddr_p;
8161 +
8162 + struct hif_desc *rx_base;
8163 + u32 rx_ring_size;
8164 + u32 rxtoclean_index;
8165 + void *rx_buf_addr[HIF_RX_DESC_NT];
8166 + int rx_buf_len[HIF_RX_DESC_NT];
8167 + unsigned int qno;
8168 + unsigned int client_id;
8169 + unsigned int client_ctrl;
8170 + unsigned int started;
8171 +
8172 + struct hif_desc *tx_base;
8173 + u32 tx_ring_size;
8174 + u32 txtosend;
8175 + u32 txtoclean;
8176 + u32 txavail;
8177 + u32 txtoflush;
8178 + struct hif_desc_sw tx_sw_queue[HIF_TX_DESC_NT];
8179 +
8180 +/* tx_lock synchronizes hif packet tx as well as pfe_hif structure access */
8181 + spinlock_t tx_lock;
8182 +/* lock synchronizes hif rx queue processing */
8183 + spinlock_t lock;
8184 + struct net_device dummy_dev;
8185 + struct napi_struct napi;
8186 + struct device *dev;
8187 +
8188 +#ifdef HIF_NAPI_STATS
8189 + unsigned int napi_counters[NAPI_MAX_COUNT];
8190 +#endif
8191 + struct tasklet_struct tx_cleanup_tasklet;
8192 +};
8193 +
8194 +void __hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int
8195 + q_no, void *data, u32 len, unsigned int flags);
8196 +int hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int q_no,
8197 + void *data, unsigned int len);
8198 +void __hif_tx_done_process(struct pfe_hif *hif, int count);
8199 +void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int
8200 + data2);
8201 +int pfe_hif_init(struct pfe *pfe);
8202 +void pfe_hif_exit(struct pfe *pfe);
8203 +void pfe_hif_rx_idle(struct pfe_hif *hif);
8204 +static inline void hif_tx_done_process(struct pfe_hif *hif, int count)
8205 +{
8206 + spin_lock_bh(&hif->tx_lock);
8207 + __hif_tx_done_process(hif, count);
8208 + spin_unlock_bh(&hif->tx_lock);
8209 +}
8210 +
8211 +static inline void hif_tx_lock(struct pfe_hif *hif)
8212 +{
8213 + spin_lock_bh(&hif->tx_lock);
8214 +}
8215 +
8216 +static inline void hif_tx_unlock(struct pfe_hif *hif)
8217 +{
8218 + spin_unlock_bh(&hif->tx_lock);
8219 +}
8220 +
8221 +static inline int __hif_tx_avail(struct pfe_hif *hif)
8222 +{
8223 + return hif->txavail;
8224 +}
8225 +
8226 +#define __memcpy8(dst, src) memcpy(dst, src, 8)
8227 +#define __memcpy12(dst, src) memcpy(dst, src, 12)
8228 +#define __memcpy(dst, src, len) memcpy(dst, src, len)
8229 +
8230 +#endif /* _PFE_HIF_H_ */
8231 --- /dev/null
8232 +++ b/drivers/staging/fsl_ppfe/pfe_hif_lib.c
8233 @@ -0,0 +1,640 @@
8234 +/*
8235 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
8236 + * Copyright 2017 NXP
8237 + *
8238 + * This program is free software; you can redistribute it and/or modify
8239 + * it under the terms of the GNU General Public License as published by
8240 + * the Free Software Foundation; either version 2 of the License, or
8241 + * (at your option) any later version.
8242 + *
8243 + * This program is distributed in the hope that it will be useful,
8244 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
8245 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8246 + * GNU General Public License for more details.
8247 + *
8248 + * You should have received a copy of the GNU General Public License
8249 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
8250 + */
8251 +
8252 +#include <linux/version.h>
8253 +#include <linux/kernel.h>
8254 +#include <linux/slab.h>
8255 +#include <linux/interrupt.h>
8256 +#include <linux/workqueue.h>
8257 +#include <linux/dma-mapping.h>
8258 +#include <linux/dmapool.h>
8259 +#include <linux/sched.h>
8260 +#include <linux/skbuff.h>
8261 +#include <linux/moduleparam.h>
8262 +#include <linux/cpu.h>
8263 +
8264 +#include "pfe_mod.h"
8265 +#include "pfe_hif.h"
8266 +#include "pfe_hif_lib.h"
8267 +
8268 +unsigned int lro_mode;
8269 +unsigned int page_mode;
8270 +unsigned int tx_qos = 1;
8271 +module_param(tx_qos, uint, 0444);
8272 +MODULE_PARM_DESC(tx_qos, "0: disable ,\n"
8273 + "1: enable (default), guarantee no packet drop at TMU level\n");
8274 +unsigned int pfe_pkt_size;
8275 +unsigned int pfe_pkt_headroom;
8276 +unsigned int emac_txq_cnt;
8277 +
8278 +/*
8279 + * @pfe_hal_lib.c.
8280 + * Common functions used by HIF client drivers
8281 + */
8282 +
8283 +/*HIF shared memory Global variable */
8284 +struct hif_shm ghif_shm;
8285 +
8286 +/* Cleanup the HIF shared memory, release HIF rx_buffer_pool.
8287 + * This function should be called after pfe_hif_exit
8288 + *
8289 + * @param[in] hif_shm Shared memory address location in DDR
8290 + */
8291 +static void pfe_hif_shm_clean(struct hif_shm *hif_shm)
8292 +{
8293 + int i;
8294 + void *pkt;
8295 +
8296 + for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
8297 + pkt = hif_shm->rx_buf_pool[i];
8298 + if (pkt) {
8299 + hif_shm->rx_buf_pool[i] = NULL;
8300 + pkt -= pfe_pkt_headroom;
8301 +
8302 + if (page_mode)
8303 + put_page(virt_to_page(pkt));
8304 + else
8305 + kfree(pkt);
8306 + }
8307 + }
8308 +}
8309 +
8310 +/* Initialize shared memory used between HIF driver and clients,
8311 + * allocate rx_buffer_pool required for HIF Rx descriptors.
8312 + * This function should be called before initializing HIF driver.
8313 + *
8314 + * @param[in] hif_shm Shared memory address location in DDR
8315 + * @rerurn 0 - on succes, <0 on fail to initialize
8316 + */
8317 +static int pfe_hif_shm_init(struct hif_shm *hif_shm)
8318 +{
8319 + int i;
8320 + void *pkt;
8321 +
8322 + memset(hif_shm, 0, sizeof(struct hif_shm));
8323 + hif_shm->rx_buf_pool_cnt = HIF_RX_DESC_NT;
8324 +
8325 + for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
8326 + if (page_mode) {
8327 + pkt = (void *)__get_free_page(GFP_KERNEL |
8328 + GFP_DMA_PFE);
8329 + } else {
8330 + pkt = kmalloc(PFE_BUF_SIZE, GFP_KERNEL | GFP_DMA_PFE);
8331 + }
8332 +
8333 + if (pkt)
8334 + hif_shm->rx_buf_pool[i] = pkt + pfe_pkt_headroom;
8335 + else
8336 + goto err0;
8337 + }
8338 +
8339 + return 0;
8340 +
8341 +err0:
8342 + pr_err("%s Low memory\n", __func__);
8343 + pfe_hif_shm_clean(hif_shm);
8344 + return -ENOMEM;
8345 +}
8346 +
8347 +/*This function sends indication to HIF driver
8348 + *
8349 + * @param[in] hif hif context
8350 + */
8351 +static void hif_lib_indicate_hif(struct pfe_hif *hif, int req, int data1, int
8352 + data2)
8353 +{
8354 + hif_process_client_req(hif, req, data1, data2);
8355 +}
8356 +
8357 +void hif_lib_indicate_client(int client_id, int event_type, int qno)
8358 +{
8359 + struct hif_client_s *client = pfe->hif_client[client_id];
8360 +
8361 + if (!client || (event_type >= HIF_EVENT_MAX) || (qno >=
8362 + HIF_CLIENT_QUEUES_MAX))
8363 + return;
8364 +
8365 + if (!test_and_set_bit(qno, &client->queue_mask[event_type]))
8366 + client->event_handler(client->priv, event_type, qno);
8367 +}
8368 +
8369 +/*This function releases Rx queue descriptors memory and pre-filled buffers
8370 + *
8371 + * @param[in] client hif_client context
8372 + */
8373 +static void hif_lib_client_release_rx_buffers(struct hif_client_s *client)
8374 +{
8375 + struct rx_queue_desc *desc;
8376 + int qno, ii;
8377 + void *buf;
8378 +
8379 + for (qno = 0; qno < client->rx_qn; qno++) {
8380 + desc = client->rx_q[qno].base;
8381 +
8382 + for (ii = 0; ii < client->rx_q[qno].size; ii++) {
8383 + buf = (void *)desc->data;
8384 + if (buf) {
8385 + buf -= pfe_pkt_headroom;
8386 +
8387 + if (page_mode)
8388 + free_page((unsigned long)buf);
8389 + else
8390 + kfree(buf);
8391 +
8392 + desc->ctrl = 0;
8393 + }
8394 +
8395 + desc++;
8396 + }
8397 + }
8398 +
8399 + kfree(client->rx_qbase);
8400 +}
8401 +
8402 +/*This function allocates memory for the rxq descriptors and pre-fill rx queues
8403 + * with buffers.
8404 + * @param[in] client client context
8405 + * @param[in] q_size size of the rxQ, all queues are of same size
8406 + */
8407 +static int hif_lib_client_init_rx_buffers(struct hif_client_s *client, int
8408 + q_size)
8409 +{
8410 + struct rx_queue_desc *desc;
8411 + struct hif_client_rx_queue *queue;
8412 + int ii, qno;
8413 +
8414 + /*Allocate memory for the client queues */
8415 + client->rx_qbase = kzalloc(client->rx_qn * q_size * sizeof(struct
8416 + rx_queue_desc), GFP_KERNEL);
8417 + if (!client->rx_qbase)
8418 + goto err;
8419 +
8420 + for (qno = 0; qno < client->rx_qn; qno++) {
8421 + queue = &client->rx_q[qno];
8422 +
8423 + queue->base = client->rx_qbase + qno * q_size * sizeof(struct
8424 + rx_queue_desc);
8425 + queue->size = q_size;
8426 + queue->read_idx = 0;
8427 + queue->write_idx = 0;
8428 +
8429 + pr_debug("rx queue: %d, base: %p, size: %d\n", qno,
8430 + queue->base, queue->size);
8431 + }
8432 +
8433 + for (qno = 0; qno < client->rx_qn; qno++) {
8434 + queue = &client->rx_q[qno];
8435 + desc = queue->base;
8436 +
8437 + for (ii = 0; ii < queue->size; ii++) {
8438 + desc->ctrl = CL_DESC_BUF_LEN(pfe_pkt_size) |
8439 + CL_DESC_OWN;
8440 + desc++;
8441 + }
8442 + }
8443 +
8444 + return 0;
8445 +
8446 +err:
8447 + return 1;
8448 +}
8449 +
8450 +
8451 +static void hif_lib_client_cleanup_tx_queue(struct hif_client_tx_queue *queue)
8452 +{
8453 + pr_debug("%s\n", __func__);
8454 +
8455 + /*
8456 + * Check if there are any pending packets. Client must flush the tx
8457 + * queues before unregistering, by calling by calling
8458 + * hif_lib_tx_get_next_complete()
8459 + *
8460 + * Hif no longer calls since we are no longer registered
8461 + */
8462 + if (queue->tx_pending)
8463 + pr_err("%s: pending transmit packets\n", __func__);
8464 +}
8465 +
8466 +static void hif_lib_client_release_tx_buffers(struct hif_client_s *client)
8467 +{
8468 + int qno;
8469 +
8470 + pr_debug("%s\n", __func__);
8471 +
8472 + for (qno = 0; qno < client->tx_qn; qno++)
8473 + hif_lib_client_cleanup_tx_queue(&client->tx_q[qno]);
8474 +
8475 + kfree(client->tx_qbase);
8476 +}
8477 +
8478 +static int hif_lib_client_init_tx_buffers(struct hif_client_s *client, int
8479 + q_size)
8480 +{
8481 + struct hif_client_tx_queue *queue;
8482 + int qno;
8483 +
8484 + client->tx_qbase = kzalloc(client->tx_qn * q_size * sizeof(struct
8485 + tx_queue_desc), GFP_KERNEL);
8486 + if (!client->tx_qbase)
8487 + return 1;
8488 +
8489 + for (qno = 0; qno < client->tx_qn; qno++) {
8490 + queue = &client->tx_q[qno];
8491 +
8492 + queue->base = client->tx_qbase + qno * q_size * sizeof(struct
8493 + tx_queue_desc);
8494 + queue->size = q_size;
8495 + queue->read_idx = 0;
8496 + queue->write_idx = 0;
8497 + queue->tx_pending = 0;
8498 + queue->nocpy_flag = 0;
8499 + queue->prev_tmu_tx_pkts = 0;
8500 + queue->done_tmu_tx_pkts = 0;
8501 +
8502 + pr_debug("tx queue: %d, base: %p, size: %d\n", qno,
8503 + queue->base, queue->size);
8504 + }
8505 +
8506 + return 0;
8507 +}
8508 +
8509 +static int hif_lib_event_dummy(void *priv, int event_type, int qno)
8510 +{
8511 + return 0;
8512 +}
8513 +
8514 +int hif_lib_client_register(struct hif_client_s *client)
8515 +{
8516 + struct hif_shm *hif_shm;
8517 + struct hif_client_shm *client_shm;
8518 + int err, i;
8519 + /* int loop_cnt = 0; */
8520 +
8521 + pr_debug("%s\n", __func__);
8522 +
8523 + /*Allocate memory before spin_lock*/
8524 + if (hif_lib_client_init_rx_buffers(client, client->rx_qsize)) {
8525 + err = -ENOMEM;
8526 + goto err_rx;
8527 + }
8528 +
8529 + if (hif_lib_client_init_tx_buffers(client, client->tx_qsize)) {
8530 + err = -ENOMEM;
8531 + goto err_tx;
8532 + }
8533 +
8534 + spin_lock_bh(&pfe->hif.lock);
8535 + if (!(client->pfe) || (client->id >= HIF_CLIENTS_MAX) ||
8536 + (pfe->hif_client[client->id])) {
8537 + err = -EINVAL;
8538 + goto err;
8539 + }
8540 +
8541 + hif_shm = client->pfe->hif.shm;
8542 +
8543 + if (!client->event_handler)
8544 + client->event_handler = hif_lib_event_dummy;
8545 +
8546 + /*Initialize client specific shared memory */
8547 + client_shm = (struct hif_client_shm *)&hif_shm->client[client->id];
8548 + client_shm->rx_qbase = (unsigned long int)client->rx_qbase;
8549 + client_shm->rx_qsize = client->rx_qsize;
8550 + client_shm->tx_qbase = (unsigned long int)client->tx_qbase;
8551 + client_shm->tx_qsize = client->tx_qsize;
8552 + client_shm->ctrl = (client->tx_qn << CLIENT_CTRL_TX_Q_CNT_OFST) |
8553 + (client->rx_qn << CLIENT_CTRL_RX_Q_CNT_OFST);
8554 + /* spin_lock_init(&client->rx_lock); */
8555 +
8556 + for (i = 0; i < HIF_EVENT_MAX; i++) {
8557 + client->queue_mask[i] = 0; /*
8558 + * By default all events are
8559 + * unmasked
8560 + */
8561 + }
8562 +
8563 + /*Indicate to HIF driver*/
8564 + hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_REGISTER, client->id, 0);
8565 +
8566 + pr_debug("%s: client: %p, client_id: %d, tx_qsize: %d, rx_qsize: %d\n",
8567 + __func__, client, client->id, client->tx_qsize,
8568 + client->rx_qsize);
8569 +
8570 + client->cpu_id = -1;
8571 +
8572 + pfe->hif_client[client->id] = client;
8573 + spin_unlock_bh(&pfe->hif.lock);
8574 +
8575 + return 0;
8576 +
8577 +err:
8578 + spin_unlock_bh(&pfe->hif.lock);
8579 + hif_lib_client_release_tx_buffers(client);
8580 +
8581 +err_tx:
8582 + hif_lib_client_release_rx_buffers(client);
8583 +
8584 +err_rx:
8585 + return err;
8586 +}
8587 +
8588 +int hif_lib_client_unregister(struct hif_client_s *client)
8589 +{
8590 + struct pfe *pfe = client->pfe;
8591 + u32 client_id = client->id;
8592 +
8593 + pr_info(
8594 + "%s : client: %p, client_id: %d, txQ_depth: %d, rxQ_depth: %d\n"
8595 + , __func__, client, client->id, client->tx_qsize,
8596 + client->rx_qsize);
8597 +
8598 + spin_lock_bh(&pfe->hif.lock);
8599 + hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_UNREGISTER, client->id, 0);
8600 +
8601 + hif_lib_client_release_tx_buffers(client);
8602 + hif_lib_client_release_rx_buffers(client);
8603 + pfe->hif_client[client_id] = NULL;
8604 + spin_unlock_bh(&pfe->hif.lock);
8605 +
8606 + return 0;
8607 +}
8608 +
8609 +int hif_lib_event_handler_start(struct hif_client_s *client, int event,
8610 + int qno)
8611 +{
8612 + struct hif_client_rx_queue *queue = &client->rx_q[qno];
8613 + struct rx_queue_desc *desc = queue->base + queue->read_idx;
8614 +
8615 + if ((event >= HIF_EVENT_MAX) || (qno >= HIF_CLIENT_QUEUES_MAX)) {
8616 + pr_debug("%s: Unsupported event : %d queue number : %d\n",
8617 + __func__, event, qno);
8618 + return -1;
8619 + }
8620 +
8621 + test_and_clear_bit(qno, &client->queue_mask[event]);
8622 +
8623 + switch (event) {
8624 + case EVENT_RX_PKT_IND:
8625 + if (!(desc->ctrl & CL_DESC_OWN))
8626 + hif_lib_indicate_client(client->id,
8627 + EVENT_RX_PKT_IND, qno);
8628 + break;
8629 +
8630 + case EVENT_HIGH_RX_WM:
8631 + case EVENT_TXDONE_IND:
8632 + default:
8633 + break;
8634 + }
8635 +
8636 + return 0;
8637 +}
8638 +
8639 +/*
8640 + * This function gets one packet from the specified client queue
8641 + * It also refill the rx buffer
8642 + */
8643 +void *hif_lib_receive_pkt(struct hif_client_s *client, int qno, int *len, int
8644 + *ofst, unsigned int *rx_ctrl,
8645 + unsigned int *desc_ctrl, void **priv_data)
8646 +{
8647 + struct hif_client_rx_queue *queue = &client->rx_q[qno];
8648 + struct rx_queue_desc *desc;
8649 + void *pkt = NULL;
8650 +
8651 + /*
8652 + * Following lock is to protect rx queue access from,
8653 + * hif_lib_event_handler_start.
8654 + * In general below lock is not required, because hif_lib_xmit_pkt and
8655 + * hif_lib_event_handler_start are called from napi poll and which is
8656 + * not re-entrant. But if some client use in different way this lock is
8657 + * required.
8658 + */
8659 + /*spin_lock_irqsave(&client->rx_lock, flags); */
8660 + desc = queue->base + queue->read_idx;
8661 + if (!(desc->ctrl & CL_DESC_OWN)) {
8662 + pkt = desc->data - pfe_pkt_headroom;
8663 +
8664 + *rx_ctrl = desc->client_ctrl;
8665 + *desc_ctrl = desc->ctrl;
8666 +
8667 + if (desc->ctrl & CL_DESC_FIRST) {
8668 + u16 size = *rx_ctrl >> HIF_CTRL_RX_OFFSET_OFST;
8669 +
8670 + if (size) {
8671 + size += PFE_PARSE_INFO_SIZE;
8672 + *len = CL_DESC_BUF_LEN(desc->ctrl) -
8673 + PFE_PKT_HEADER_SZ - size;
8674 + *ofst = pfe_pkt_headroom + PFE_PKT_HEADER_SZ
8675 + + size;
8676 + *priv_data = desc->data + PFE_PKT_HEADER_SZ;
8677 + } else {
8678 + *len = CL_DESC_BUF_LEN(desc->ctrl) -
8679 + PFE_PKT_HEADER_SZ - PFE_PARSE_INFO_SIZE;
8680 + *ofst = pfe_pkt_headroom
8681 + + PFE_PKT_HEADER_SZ
8682 + + PFE_PARSE_INFO_SIZE;
8683 + *priv_data = NULL;
8684 + }
8685 +
8686 + } else {
8687 + *len = CL_DESC_BUF_LEN(desc->ctrl);
8688 + *ofst = pfe_pkt_headroom;
8689 + }
8690 +
8691 + /*
8692 + * Needed so we don't free a buffer/page
8693 + * twice on module_exit
8694 + */
8695 + desc->data = NULL;
8696 +
8697 + /*
8698 + * Ensure everything else is written to DDR before
8699 + * writing bd->ctrl
8700 + */
8701 + smp_wmb();
8702 +
8703 + desc->ctrl = CL_DESC_BUF_LEN(pfe_pkt_size) | CL_DESC_OWN;
8704 + queue->read_idx = (queue->read_idx + 1) & (queue->size - 1);
8705 + }
8706 +
8707 + /*spin_unlock_irqrestore(&client->rx_lock, flags); */
8708 + return pkt;
8709 +}
8710 +
8711 +static inline void hif_hdr_write(struct hif_hdr *pkt_hdr, unsigned int
8712 + client_id, unsigned int qno,
8713 + u32 client_ctrl)
8714 +{
8715 + /* Optimize the write since the destinaton may be non-cacheable */
8716 + if (!((unsigned long)pkt_hdr & 0x3)) {
8717 + ((u32 *)pkt_hdr)[0] = (client_ctrl << 16) | (qno << 8) |
8718 + client_id;
8719 + } else {
8720 + ((u16 *)pkt_hdr)[0] = (qno << 8) | (client_id & 0xFF);
8721 + ((u16 *)pkt_hdr)[1] = (client_ctrl & 0xFFFF);
8722 + }
8723 +}
8724 +
8725 +/*This function puts the given packet in the specific client queue */
8726 +void __hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void
8727 + *data, unsigned int len, u32 client_ctrl,
8728 + unsigned int flags, void *client_data)
8729 +{
8730 + struct hif_client_tx_queue *queue = &client->tx_q[qno];
8731 + struct tx_queue_desc *desc = queue->base + queue->write_idx;
8732 +
8733 + /* First buffer */
8734 + if (flags & HIF_FIRST_BUFFER) {
8735 + data -= sizeof(struct hif_hdr);
8736 + len += sizeof(struct hif_hdr);
8737 +
8738 + hif_hdr_write(data, client->id, qno, client_ctrl);
8739 + }
8740 +
8741 + desc->data = client_data;
8742 + desc->ctrl = CL_DESC_OWN | CL_DESC_FLAGS(flags);
8743 +
8744 + __hif_xmit_pkt(&pfe->hif, client->id, qno, data, len, flags);
8745 +
8746 + queue->write_idx = (queue->write_idx + 1) & (queue->size - 1);
8747 + queue->tx_pending++;
8748 + queue->jiffies_last_packet = jiffies;
8749 +}
8750 +
8751 +void *hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno,
8752 + unsigned int *flags, int count)
8753 +{
8754 + struct hif_client_tx_queue *queue = &client->tx_q[qno];
8755 + struct tx_queue_desc *desc = queue->base + queue->read_idx;
8756 +
8757 + pr_debug("%s: qno : %d rd_indx: %d pending:%d\n", __func__, qno,
8758 + queue->read_idx, queue->tx_pending);
8759 +
8760 + if (!queue->tx_pending)
8761 + return NULL;
8762 +
8763 + if (queue->nocpy_flag && !queue->done_tmu_tx_pkts) {
8764 + u32 tmu_tx_pkts = be32_to_cpu(pe_dmem_read(TMU0_ID +
8765 + client->id, TMU_DM_TX_TRANS, 4));
8766 +
8767 + if (queue->prev_tmu_tx_pkts > tmu_tx_pkts)
8768 + queue->done_tmu_tx_pkts = UINT_MAX -
8769 + queue->prev_tmu_tx_pkts + tmu_tx_pkts;
8770 + else
8771 + queue->done_tmu_tx_pkts = tmu_tx_pkts -
8772 + queue->prev_tmu_tx_pkts;
8773 +
8774 + queue->prev_tmu_tx_pkts = tmu_tx_pkts;
8775 +
8776 + if (!queue->done_tmu_tx_pkts)
8777 + return NULL;
8778 + }
8779 +
8780 + if (desc->ctrl & CL_DESC_OWN)
8781 + return NULL;
8782 +
8783 + queue->read_idx = (queue->read_idx + 1) & (queue->size - 1);
8784 + queue->tx_pending--;
8785 +
8786 + *flags = CL_DESC_GET_FLAGS(desc->ctrl);
8787 +
8788 + if (queue->done_tmu_tx_pkts && (*flags & HIF_LAST_BUFFER))
8789 + queue->done_tmu_tx_pkts--;
8790 +
8791 + return desc->data;
8792 +}
8793 +
8794 +static void hif_lib_tmu_credit_init(struct pfe *pfe)
8795 +{
8796 + int i, q;
8797 +
8798 + for (i = 0; i < NUM_GEMAC_SUPPORT; i++)
8799 + for (q = 0; q < emac_txq_cnt; q++) {
8800 + pfe->tmu_credit.tx_credit_max[i][q] = (q == 0) ?
8801 + DEFAULT_Q0_QDEPTH : DEFAULT_MAX_QDEPTH;
8802 + pfe->tmu_credit.tx_credit[i][q] =
8803 + pfe->tmu_credit.tx_credit_max[i][q];
8804 + }
8805 +}
8806 +
8807 +/* __hif_lib_update_credit
8808 + *
8809 + * @param[in] client hif client context
8810 + * @param[in] queue queue number in match with TMU
8811 + */
8812 +void __hif_lib_update_credit(struct hif_client_s *client, unsigned int queue)
8813 +{
8814 + unsigned int tmu_tx_packets, tmp;
8815 +
8816 + if (tx_qos) {
8817 + tmu_tx_packets = be32_to_cpu(pe_dmem_read(TMU0_ID +
8818 + client->id, (TMU_DM_TX_TRANS + (queue * 4)), 4));
8819 +
8820 + /* tx_packets counter overflowed */
8821 + if (tmu_tx_packets >
8822 + pfe->tmu_credit.tx_packets[client->id][queue]) {
8823 + tmp = UINT_MAX - tmu_tx_packets +
8824 + pfe->tmu_credit.tx_packets[client->id][queue];
8825 +
8826 + pfe->tmu_credit.tx_credit[client->id][queue] =
8827 + pfe->tmu_credit.tx_credit_max[client->id][queue] - tmp;
8828 + } else {
8829 + /* TMU tx <= pfe_eth tx, normal case or both OF since
8830 + * last time
8831 + */
8832 + pfe->tmu_credit.tx_credit[client->id][queue] =
8833 + pfe->tmu_credit.tx_credit_max[client->id][queue] -
8834 + (pfe->tmu_credit.tx_packets[client->id][queue] -
8835 + tmu_tx_packets);
8836 + }
8837 + }
8838 +}
8839 +
8840 +int pfe_hif_lib_init(struct pfe *pfe)
8841 +{
8842 + int rc;
8843 +
8844 + pr_info("%s\n", __func__);
8845 +
8846 + if (lro_mode) {
8847 + page_mode = 1;
8848 + pfe_pkt_size = min(PAGE_SIZE, MAX_PFE_PKT_SIZE);
8849 + pfe_pkt_headroom = 0;
8850 + } else {
8851 + page_mode = 0;
8852 + pfe_pkt_size = PFE_PKT_SIZE;
8853 + pfe_pkt_headroom = PFE_PKT_HEADROOM;
8854 + }
8855 +
8856 + if (tx_qos)
8857 + emac_txq_cnt = EMAC_TXQ_CNT / 2;
8858 + else
8859 + emac_txq_cnt = EMAC_TXQ_CNT;
8860 +
8861 + hif_lib_tmu_credit_init(pfe);
8862 + pfe->hif.shm = &ghif_shm;
8863 + rc = pfe_hif_shm_init(pfe->hif.shm);
8864 +
8865 + return rc;
8866 +}
8867 +
8868 +void pfe_hif_lib_exit(struct pfe *pfe)
8869 +{
8870 + pr_info("%s\n", __func__);
8871 +
8872 + pfe_hif_shm_clean(pfe->hif.shm);
8873 +}
8874 --- /dev/null
8875 +++ b/drivers/staging/fsl_ppfe/pfe_hif_lib.h
8876 @@ -0,0 +1,241 @@
8877 +/*
8878 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
8879 + * Copyright 2017 NXP
8880 + *
8881 + * This program is free software; you can redistribute it and/or modify
8882 + * it under the terms of the GNU General Public License as published by
8883 + * the Free Software Foundation; either version 2 of the License, or
8884 + * (at your option) any later version.
8885 + *
8886 + * This program is distributed in the hope that it will be useful,
8887 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
8888 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8889 + * GNU General Public License for more details.
8890 + *
8891 + * You should have received a copy of the GNU General Public License
8892 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
8893 + */
8894 +
8895 +#ifndef _PFE_HIF_LIB_H_
8896 +#define _PFE_HIF_LIB_H_
8897 +
8898 +#include "pfe_hif.h"
8899 +
8900 +#define HIF_CL_REQ_TIMEOUT 10
8901 +#define GFP_DMA_PFE 0
8902 +#define PFE_PARSE_INFO_SIZE 16
8903 +
8904 +enum {
8905 + REQUEST_CL_REGISTER = 0,
8906 + REQUEST_CL_UNREGISTER,
8907 + HIF_REQUEST_MAX
8908 +};
8909 +
8910 +enum {
8911 + /* Event to indicate that client rx queue is reached water mark level */
8912 + EVENT_HIGH_RX_WM = 0,
8913 + /* Event to indicate that, packet received for client */
8914 + EVENT_RX_PKT_IND,
8915 + /* Event to indicate that, packet tx done for client */
8916 + EVENT_TXDONE_IND,
8917 + HIF_EVENT_MAX
8918 +};
8919 +
8920 +/*structure to store client queue info */
8921 +
8922 +/*structure to store client queue info */
8923 +struct hif_client_rx_queue {
8924 + struct rx_queue_desc *base;
8925 + u32 size;
8926 + u32 read_idx;
8927 + u32 write_idx;
8928 +};
8929 +
8930 +struct hif_client_tx_queue {
8931 + struct tx_queue_desc *base;
8932 + u32 size;
8933 + u32 read_idx;
8934 + u32 write_idx;
8935 + u32 tx_pending;
8936 + unsigned long jiffies_last_packet;
8937 + u32 nocpy_flag;
8938 + u32 prev_tmu_tx_pkts;
8939 + u32 done_tmu_tx_pkts;
8940 +};
8941 +
8942 +struct hif_client_s {
8943 + int id;
8944 + int tx_qn;
8945 + int rx_qn;
8946 + void *rx_qbase;
8947 + void *tx_qbase;
8948 + int tx_qsize;
8949 + int rx_qsize;
8950 + int cpu_id;
8951 + struct hif_client_tx_queue tx_q[HIF_CLIENT_QUEUES_MAX];
8952 + struct hif_client_rx_queue rx_q[HIF_CLIENT_QUEUES_MAX];
8953 + int (*event_handler)(void *priv, int event, int data);
8954 + unsigned long queue_mask[HIF_EVENT_MAX];
8955 + struct pfe *pfe;
8956 + void *priv;
8957 +};
8958 +
8959 +/*
8960 + * Client specific shared memory
8961 + * It contains number of Rx/Tx queues, base addresses and queue sizes
8962 + */
8963 +struct hif_client_shm {
8964 + u32 ctrl; /*0-7: number of Rx queues, 8-15: number of tx queues */
8965 + unsigned long rx_qbase; /*Rx queue base address */
8966 + u32 rx_qsize; /*each Rx queue size, all Rx queues are of same size */
8967 + unsigned long tx_qbase; /* Tx queue base address */
8968 + u32 tx_qsize; /*each Tx queue size, all Tx queues are of same size */
8969 +};
8970 +
8971 +/*Client shared memory ctrl bit description */
8972 +#define CLIENT_CTRL_RX_Q_CNT_OFST 0
8973 +#define CLIENT_CTRL_TX_Q_CNT_OFST 8
8974 +#define CLIENT_CTRL_RX_Q_CNT(ctrl) (((ctrl) >> CLIENT_CTRL_RX_Q_CNT_OFST) \
8975 + & 0xFF)
8976 +#define CLIENT_CTRL_TX_Q_CNT(ctrl) (((ctrl) >> CLIENT_CTRL_TX_Q_CNT_OFST) \
8977 + & 0xFF)
8978 +
8979 +/*
8980 + * Shared memory used to communicate between HIF driver and host/client drivers
8981 + * Before starting the hif driver rx_buf_pool ans rx_buf_pool_cnt should be
8982 + * initialized with host buffers and buffers count in the pool.
8983 + * rx_buf_pool_cnt should be >= HIF_RX_DESC_NT.
8984 + *
8985 + */
8986 +struct hif_shm {
8987 + u32 rx_buf_pool_cnt; /*Number of rx buffers available*/
8988 + /*Rx buffers required to initialize HIF rx descriptors */
8989 + void *rx_buf_pool[HIF_RX_DESC_NT];
8990 + unsigned long g_client_status[2]; /*Global client status bit mask */
8991 + /* Client specific shared memory */
8992 + struct hif_client_shm client[HIF_CLIENTS_MAX];
8993 +};
8994 +
8995 +#define CL_DESC_OWN BIT(31)
8996 +/* This sets owner ship to HIF driver */
8997 +#define CL_DESC_LAST BIT(30)
8998 +/* This indicates last packet for multi buffers handling */
8999 +#define CL_DESC_FIRST BIT(29)
9000 +/* This indicates first packet for multi buffers handling */
9001 +
9002 +#define CL_DESC_BUF_LEN(x) ((x) & 0xFFFF)
9003 +#define CL_DESC_FLAGS(x) (((x) & 0xF) << 16)
9004 +#define CL_DESC_GET_FLAGS(x) (((x) >> 16) & 0xF)
9005 +
9006 +struct rx_queue_desc {
9007 + void *data;
9008 + u32 ctrl; /*0-15bit len, 16-20bit flags, 31bit owner*/
9009 + u32 client_ctrl;
9010 +};
9011 +
9012 +struct tx_queue_desc {
9013 + void *data;
9014 + u32 ctrl; /*0-15bit len, 16-20bit flags, 31bit owner*/
9015 +};
9016 +
9017 +/* HIF Rx is not working properly for 2-byte aligned buffers and
9018 + * ip_header should be 4byte aligned for better iperformance.
9019 + * "ip_header = 64 + 6(hif_header) + 14 (MAC Header)" will be 4byte aligned.
9020 + */
9021 +#define PFE_PKT_HEADER_SZ sizeof(struct hif_hdr)
9022 +/* must be big enough for headroom, pkt size and skb shared info */
9023 +#define PFE_BUF_SIZE 2048
9024 +#define PFE_PKT_HEADROOM 128
9025 +
9026 +#define SKB_SHARED_INFO_SIZE SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
9027 +#define PFE_PKT_SIZE (PFE_BUF_SIZE - PFE_PKT_HEADROOM \
9028 + - SKB_SHARED_INFO_SIZE)
9029 +#define MAX_L2_HDR_SIZE 14 /* Not correct for VLAN/PPPoE */
9030 +#define MAX_L3_HDR_SIZE 20 /* Not correct for IPv6 */
9031 +#define MAX_L4_HDR_SIZE 60 /* TCP with maximum options */
9032 +#define MAX_HDR_SIZE (MAX_L2_HDR_SIZE + MAX_L3_HDR_SIZE \
9033 + + MAX_L4_HDR_SIZE)
9034 +/* Used in page mode to clamp packet size to the maximum supported by the hif
9035 + *hw interface (<16KiB)
9036 + */
9037 +#define MAX_PFE_PKT_SIZE 16380UL
9038 +
9039 +extern unsigned int pfe_pkt_size;
9040 +extern unsigned int pfe_pkt_headroom;
9041 +extern unsigned int page_mode;
9042 +extern unsigned int lro_mode;
9043 +extern unsigned int tx_qos;
9044 +extern unsigned int emac_txq_cnt;
9045 +
9046 +int pfe_hif_lib_init(struct pfe *pfe);
9047 +void pfe_hif_lib_exit(struct pfe *pfe);
9048 +int hif_lib_client_register(struct hif_client_s *client);
9049 +int hif_lib_client_unregister(struct hif_client_s *client);
9050 +void __hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void
9051 + *data, unsigned int len, u32 client_ctrl,
9052 + unsigned int flags, void *client_data);
9053 +int hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void *data,
9054 + unsigned int len, u32 client_ctrl, void *client_data);
9055 +void hif_lib_indicate_client(int cl_id, int event, int data);
9056 +int hif_lib_event_handler_start(struct hif_client_s *client, int event, int
9057 + data);
9058 +int hif_lib_tmu_queue_start(struct hif_client_s *client, int qno);
9059 +int hif_lib_tmu_queue_stop(struct hif_client_s *client, int qno);
9060 +void *hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno,
9061 + unsigned int *flags, int count);
9062 +void *hif_lib_receive_pkt(struct hif_client_s *client, int qno, int *len, int
9063 + *ofst, unsigned int *rx_ctrl,
9064 + unsigned int *desc_ctrl, void **priv_data);
9065 +void __hif_lib_update_credit(struct hif_client_s *client, unsigned int queue);
9066 +void hif_lib_set_rx_cpu_affinity(struct hif_client_s *client, int cpu_id);
9067 +void hif_lib_set_tx_queue_nocpy(struct hif_client_s *client, int qno, int
9068 + enable);
9069 +static inline int hif_lib_tx_avail(struct hif_client_s *client, unsigned int
9070 + qno)
9071 +{
9072 + struct hif_client_tx_queue *queue = &client->tx_q[qno];
9073 +
9074 + return (queue->size - queue->tx_pending);
9075 +}
9076 +
9077 +static inline int hif_lib_get_tx_wr_index(struct hif_client_s *client, unsigned
9078 + int qno)
9079 +{
9080 + struct hif_client_tx_queue *queue = &client->tx_q[qno];
9081 +
9082 + return queue->write_idx;
9083 +}
9084 +
9085 +static inline int hif_lib_tx_pending(struct hif_client_s *client, unsigned int
9086 + qno)
9087 +{
9088 + struct hif_client_tx_queue *queue = &client->tx_q[qno];
9089 +
9090 + return queue->tx_pending;
9091 +}
9092 +
9093 +#define hif_lib_tx_credit_avail(pfe, id, qno) \
9094 + ((pfe)->tmu_credit.tx_credit[id][qno])
9095 +
9096 +#define hif_lib_tx_credit_max(pfe, id, qno) \
9097 + ((pfe)->tmu_credit.tx_credit_max[id][qno])
9098 +
9099 +/*
9100 + * Test comment
9101 + */
9102 +#define hif_lib_tx_credit_use(pfe, id, qno, credit) \
9103 + ({ typeof(pfe) pfe_ = pfe; \
9104 + typeof(id) id_ = id; \
9105 + typeof(qno) qno_ = qno_; \
9106 + typeof(credit) credit_ = credit; \
9107 + do { \
9108 + if (tx_qos) { \
9109 + (pfe_)->tmu_credit.tx_credit[id_][qno_]\
9110 + -= credit_; \
9111 + (pfe_)->tmu_credit.tx_packets[id_][qno_]\
9112 + += credit_; \
9113 + } \
9114 + } while (0); \
9115 + })
9116 +
9117 +#endif /* _PFE_HIF_LIB_H_ */
9118 --- /dev/null
9119 +++ b/drivers/staging/fsl_ppfe/pfe_hw.c
9120 @@ -0,0 +1,176 @@
9121 +/*
9122 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
9123 + * Copyright 2017 NXP
9124 + *
9125 + * This program is free software; you can redistribute it and/or modify
9126 + * it under the terms of the GNU General Public License as published by
9127 + * the Free Software Foundation; either version 2 of the License, or
9128 + * (at your option) any later version.
9129 + *
9130 + * This program is distributed in the hope that it will be useful,
9131 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
9132 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9133 + * GNU General Public License for more details.
9134 + *
9135 + * You should have received a copy of the GNU General Public License
9136 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
9137 + */
9138 +
9139 +#include "pfe_mod.h"
9140 +#include "pfe_hw.h"
9141 +
9142 +/* Functions to handle most of pfe hw register initialization */
9143 +int pfe_hw_init(struct pfe *pfe, int resume)
9144 +{
9145 + struct class_cfg class_cfg = {
9146 + .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
9147 + .route_table_baseaddr = pfe->ddr_phys_baseaddr +
9148 + ROUTE_TABLE_BASEADDR,
9149 + .route_table_hash_bits = ROUTE_TABLE_HASH_BITS,
9150 + };
9151 +
9152 + struct tmu_cfg tmu_cfg = {
9153 + .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
9154 + .llm_base_addr = pfe->ddr_phys_baseaddr + TMU_LLM_BASEADDR,
9155 + .llm_queue_len = TMU_LLM_QUEUE_LEN,
9156 + };
9157 +
9158 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
9159 + struct util_cfg util_cfg = {
9160 + .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
9161 + };
9162 +#endif
9163 +
9164 + struct BMU_CFG bmu1_cfg = {
9165 + .baseaddr = CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR +
9166 + BMU1_LMEM_BASEADDR),
9167 + .count = BMU1_BUF_COUNT,
9168 + .size = BMU1_BUF_SIZE,
9169 + .low_watermark = 10,
9170 + .high_watermark = 15,
9171 + };
9172 +
9173 + struct BMU_CFG bmu2_cfg = {
9174 + .baseaddr = DDR_PHYS_TO_PFE(pfe->ddr_phys_baseaddr +
9175 + BMU2_DDR_BASEADDR),
9176 + .count = BMU2_BUF_COUNT,
9177 + .size = BMU2_BUF_SIZE,
9178 + .low_watermark = 250,
9179 + .high_watermark = 253,
9180 + };
9181 +
9182 + struct gpi_cfg egpi1_cfg = {
9183 + .lmem_rtry_cnt = EGPI1_LMEM_RTRY_CNT,
9184 + .tmlf_txthres = EGPI1_TMLF_TXTHRES,
9185 + .aseq_len = EGPI1_ASEQ_LEN,
9186 + .mtip_pause_reg = CBUS_VIRT_TO_PFE(EMAC1_BASE_ADDR +
9187 + EMAC_TCNTRL_REG),
9188 + };
9189 +
9190 + struct gpi_cfg egpi2_cfg = {
9191 + .lmem_rtry_cnt = EGPI2_LMEM_RTRY_CNT,
9192 + .tmlf_txthres = EGPI2_TMLF_TXTHRES,
9193 + .aseq_len = EGPI2_ASEQ_LEN,
9194 + .mtip_pause_reg = CBUS_VIRT_TO_PFE(EMAC2_BASE_ADDR +
9195 + EMAC_TCNTRL_REG),
9196 + };
9197 +
9198 + struct gpi_cfg hgpi_cfg = {
9199 + .lmem_rtry_cnt = HGPI_LMEM_RTRY_CNT,
9200 + .tmlf_txthres = HGPI_TMLF_TXTHRES,
9201 + .aseq_len = HGPI_ASEQ_LEN,
9202 + .mtip_pause_reg = 0,
9203 + };
9204 +
9205 + pr_info("%s\n", __func__);
9206 +
9207 +#if !defined(LS1012A_PFE_RESET_WA)
9208 + /* LS1012A needs this to make PE work correctly */
9209 + writel(0x3, CLASS_PE_SYS_CLK_RATIO);
9210 + writel(0x3, TMU_PE_SYS_CLK_RATIO);
9211 + writel(0x3, UTIL_PE_SYS_CLK_RATIO);
9212 + usleep_range(10, 20);
9213 +#endif
9214 +
9215 + pr_info("CLASS version: %x\n", readl(CLASS_VERSION));
9216 + pr_info("TMU version: %x\n", readl(TMU_VERSION));
9217 +
9218 + pr_info("BMU1 version: %x\n", readl(BMU1_BASE_ADDR +
9219 + BMU_VERSION));
9220 + pr_info("BMU2 version: %x\n", readl(BMU2_BASE_ADDR +
9221 + BMU_VERSION));
9222 +
9223 + pr_info("EGPI1 version: %x\n", readl(EGPI1_BASE_ADDR +
9224 + GPI_VERSION));
9225 + pr_info("EGPI2 version: %x\n", readl(EGPI2_BASE_ADDR +
9226 + GPI_VERSION));
9227 + pr_info("HGPI version: %x\n", readl(HGPI_BASE_ADDR +
9228 + GPI_VERSION));
9229 +
9230 + pr_info("HIF version: %x\n", readl(HIF_VERSION));
9231 + pr_info("HIF NOPCY version: %x\n", readl(HIF_NOCPY_VERSION));
9232 +
9233 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
9234 + pr_info("UTIL version: %x\n", readl(UTIL_VERSION));
9235 +#endif
9236 + while (!(readl(TMU_CTRL) & ECC_MEM_INIT_DONE))
9237 + ;
9238 +
9239 + hif_rx_disable();
9240 + hif_tx_disable();
9241 +
9242 + bmu_init(BMU1_BASE_ADDR, &bmu1_cfg);
9243 +
9244 + pr_info("bmu_init(1) done\n");
9245 +
9246 + bmu_init(BMU2_BASE_ADDR, &bmu2_cfg);
9247 +
9248 + pr_info("bmu_init(2) done\n");
9249 +
9250 + class_cfg.resume = resume ? 1 : 0;
9251 +
9252 + class_init(&class_cfg);
9253 +
9254 + pr_info("class_init() done\n");
9255 +
9256 + tmu_init(&tmu_cfg);
9257 +
9258 + pr_info("tmu_init() done\n");
9259 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
9260 + util_init(&util_cfg);
9261 +
9262 + pr_info("util_init() done\n");
9263 +#endif
9264 + gpi_init(EGPI1_BASE_ADDR, &egpi1_cfg);
9265 +
9266 + pr_info("gpi_init(1) done\n");
9267 +
9268 + gpi_init(EGPI2_BASE_ADDR, &egpi2_cfg);
9269 +
9270 + pr_info("gpi_init(2) done\n");
9271 +
9272 + gpi_init(HGPI_BASE_ADDR, &hgpi_cfg);
9273 +
9274 + pr_info("gpi_init(hif) done\n");
9275 +
9276 + bmu_enable(BMU1_BASE_ADDR);
9277 +
9278 + pr_info("bmu_enable(1) done\n");
9279 +
9280 + bmu_enable(BMU2_BASE_ADDR);
9281 +
9282 + pr_info("bmu_enable(2) done\n");
9283 +
9284 + return 0;
9285 +}
9286 +
9287 +void pfe_hw_exit(struct pfe *pfe)
9288 +{
9289 + pr_info("%s\n", __func__);
9290 +
9291 + bmu_disable(BMU1_BASE_ADDR);
9292 + bmu_reset(BMU1_BASE_ADDR);
9293 +
9294 + bmu_disable(BMU2_BASE_ADDR);
9295 + bmu_reset(BMU2_BASE_ADDR);
9296 +}
9297 --- /dev/null
9298 +++ b/drivers/staging/fsl_ppfe/pfe_hw.h
9299 @@ -0,0 +1,27 @@
9300 +/*
9301 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
9302 + * Copyright 2017 NXP
9303 + *
9304 + * This program is free software; you can redistribute it and/or modify
9305 + * it under the terms of the GNU General Public License as published by
9306 + * the Free Software Foundation; either version 2 of the License, or
9307 + * (at your option) any later version.
9308 + *
9309 + * This program is distributed in the hope that it will be useful,
9310 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
9311 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9312 + * GNU General Public License for more details.
9313 + *
9314 + * You should have received a copy of the GNU General Public License
9315 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
9316 + */
9317 +
9318 +#ifndef _PFE_HW_H_
9319 +#define _PFE_HW_H_
9320 +
9321 +#define PE_SYS_CLK_RATIO 1 /* SYS/AXI = 250MHz, HFE = 500MHz */
9322 +
9323 +int pfe_hw_init(struct pfe *pfe, int resume);
9324 +void pfe_hw_exit(struct pfe *pfe);
9325 +
9326 +#endif /* _PFE_HW_H_ */
9327 --- /dev/null
9328 +++ b/drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c
9329 @@ -0,0 +1,385 @@
9330 +/*
9331 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
9332 + * Copyright 2017 NXP
9333 + *
9334 + * This program is free software; you can redistribute it and/or modify
9335 + * it under the terms of the GNU General Public License as published by
9336 + * the Free Software Foundation; either version 2 of the License, or
9337 + * (at your option) any later version.
9338 + *
9339 + * This program is distributed in the hope that it will be useful,
9340 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
9341 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9342 + * GNU General Public License for more details.
9343 + *
9344 + * You should have received a copy of the GNU General Public License
9345 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
9346 + */
9347 +
9348 +#include <linux/module.h>
9349 +#include <linux/device.h>
9350 +#include <linux/of_net.h>
9351 +#include <linux/of_address.h>
9352 +#include <linux/platform_device.h>
9353 +#include <linux/slab.h>
9354 +#include <linux/clk.h>
9355 +#include <linux/mfd/syscon.h>
9356 +#include <linux/regmap.h>
9357 +
9358 +#include "pfe_mod.h"
9359 +
9360 +struct ls1012a_pfe_platform_data pfe_platform_data;
9361 +
9362 +static int pfe_get_gemac_if_proprties(struct device_node *parent, int port, int
9363 + if_cnt,
9364 + struct ls1012a_pfe_platform_data
9365 + *pdata)
9366 +{
9367 + struct device_node *gem = NULL, *phy = NULL;
9368 + int size;
9369 + int ii = 0, phy_id = 0;
9370 + const u32 *addr;
9371 + const void *mac_addr;
9372 +
9373 + for (ii = 0; ii < if_cnt; ii++) {
9374 + gem = of_get_next_child(parent, gem);
9375 + if (!gem)
9376 + goto err;
9377 + addr = of_get_property(gem, "reg", &size);
9378 + if (addr && (be32_to_cpup(addr) == port))
9379 + break;
9380 + }
9381 +
9382 + if (ii >= if_cnt) {
9383 + pr_err("%s:%d Failed to find interface = %d\n",
9384 + __func__, __LINE__, if_cnt);
9385 + goto err;
9386 + }
9387 +
9388 + pdata->ls1012a_eth_pdata[port].gem_id = port;
9389 +
9390 + mac_addr = of_get_mac_address(gem);
9391 +
9392 + if (mac_addr) {
9393 + memcpy(pdata->ls1012a_eth_pdata[port].mac_addr, mac_addr,
9394 + ETH_ALEN);
9395 + }
9396 +
9397 + pdata->ls1012a_eth_pdata[port].mii_config = of_get_phy_mode(gem);
9398 +
9399 + if ((pdata->ls1012a_eth_pdata[port].mii_config) < 0)
9400 + pr_err("%s:%d Incorrect Phy mode....\n", __func__,
9401 + __LINE__);
9402 +
9403 + addr = of_get_property(gem, "fsl,gemac-bus-id", &size);
9404 + if (!addr)
9405 + pr_err("%s:%d Invalid gemac-bus-id....\n", __func__,
9406 + __LINE__);
9407 + else
9408 + pdata->ls1012a_eth_pdata[port].bus_id = be32_to_cpup(addr);
9409 +
9410 + addr = of_get_property(gem, "fsl,gemac-phy-id", &size);
9411 + if (!addr) {
9412 + pr_err("%s:%d Invalid gemac-phy-id....\n", __func__,
9413 + __LINE__);
9414 + } else {
9415 + phy_id = be32_to_cpup(addr);
9416 + pdata->ls1012a_eth_pdata[port].phy_id = phy_id;
9417 + pdata->ls1012a_mdio_pdata[0].phy_mask &= ~(1 << phy_id);
9418 + }
9419 +
9420 + addr = of_get_property(gem, "fsl,mdio-mux-val", &size);
9421 + if (!addr)
9422 + pr_err("%s: Invalid mdio-mux-val....\n", __func__);
9423 + else
9424 + phy_id = be32_to_cpup(addr);
9425 + pdata->ls1012a_eth_pdata[port].mdio_muxval = phy_id;
9426 +
9427 + if (pdata->ls1012a_eth_pdata[port].phy_id < 32)
9428 + pfe->mdio_muxval[pdata->ls1012a_eth_pdata[port].phy_id] =
9429 + pdata->ls1012a_eth_pdata[port].mdio_muxval;
9430 +
9431 + addr = of_get_property(gem, "fsl,pfe-phy-if-flags", &size);
9432 + if (!addr)
9433 + pr_err("%s:%d Invalid pfe-phy-if-flags....\n",
9434 + __func__, __LINE__);
9435 + else
9436 + pdata->ls1012a_eth_pdata[port].phy_flags = be32_to_cpup(addr);
9437 +
9438 + /* If PHY is enabled, read mdio properties */
9439 + if (pdata->ls1012a_eth_pdata[port].phy_flags & GEMAC_NO_PHY)
9440 + goto done;
9441 +
9442 + phy = of_get_next_child(gem, NULL);
9443 +
9444 + addr = of_get_property(phy, "reg", &size);
9445 +
9446 + if (!addr)
9447 + pr_err("%s:%d Invalid phy enable flag....\n",
9448 + __func__, __LINE__);
9449 + else
9450 + pdata->ls1012a_mdio_pdata[port].enabled = be32_to_cpup(addr);
9451 +
9452 + pdata->ls1012a_mdio_pdata[port].irq[0] = PHY_POLL;
9453 +
9454 +done:
9455 +
9456 + return 0;
9457 +
9458 +err:
9459 + return -1;
9460 +}
9461 +
9462 +/*
9463 + *
9464 + * pfe_platform_probe -
9465 + *
9466 + *
9467 + */
9468 +static int pfe_platform_probe(struct platform_device *pdev)
9469 +{
9470 + struct resource res;
9471 + int ii, rc, interface_count = 0, size = 0;
9472 + const u32 *prop;
9473 + struct device_node *np;
9474 + struct clk *pfe_clk;
9475 +
9476 + np = pdev->dev.of_node;
9477 +
9478 + if (!np) {
9479 + pr_err("Invalid device node\n");
9480 + return -EINVAL;
9481 + }
9482 +
9483 + pfe = kzalloc(sizeof(*pfe), GFP_KERNEL);
9484 + if (!pfe) {
9485 + rc = -ENOMEM;
9486 + goto err_alloc;
9487 + }
9488 +
9489 + platform_set_drvdata(pdev, pfe);
9490 +
9491 + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9492 +
9493 + if (of_address_to_resource(np, 1, &res)) {
9494 + rc = -ENOMEM;
9495 + pr_err("failed to get ddr resource\n");
9496 + goto err_ddr;
9497 + }
9498 +
9499 + pfe->ddr_phys_baseaddr = res.start;
9500 + pfe->ddr_size = resource_size(&res);
9501 + pfe->ddr_baseaddr = phys_to_virt(res.start);
9502 +
9503 + pfe->scfg =
9504 + syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
9505 + "fsl,pfe-scfg");
9506 + if (IS_ERR(pfe->scfg)) {
9507 + dev_err(&pdev->dev, "No syscfg phandle specified\n");
9508 + return PTR_ERR(pfe->scfg);
9509 + }
9510 +
9511 + pfe->cbus_baseaddr = of_iomap(np, 0);
9512 + if (!pfe->cbus_baseaddr) {
9513 + rc = -ENOMEM;
9514 + pr_err("failed to get axi resource\n");
9515 + goto err_axi;
9516 + }
9517 +
9518 + pfe->hif_irq = platform_get_irq(pdev, 0);
9519 + if (pfe->hif_irq < 0) {
9520 + pr_err("platform_get_irq for hif failed\n");
9521 + rc = pfe->hif_irq;
9522 + goto err_hif_irq;
9523 + }
9524 +
9525 + pfe->wol_irq = platform_get_irq(pdev, 2);
9526 + if (pfe->wol_irq < 0) {
9527 + pr_err("platform_get_irq for WoL failed\n");
9528 + rc = pfe->wol_irq;
9529 + goto err_hif_irq;
9530 + }
9531 +
9532 + /* Read interface count */
9533 + prop = of_get_property(np, "fsl,pfe-num-interfaces", &size);
9534 + if (!prop) {
9535 + pr_err("Failed to read number of interfaces\n");
9536 + rc = -ENXIO;
9537 + goto err_prop;
9538 + }
9539 +
9540 + interface_count = be32_to_cpup(prop);
9541 + if (interface_count <= 0) {
9542 + pr_err("No ethernet interface count : %d\n",
9543 + interface_count);
9544 + rc = -ENXIO;
9545 + goto err_prop;
9546 + }
9547 +
9548 + pfe_platform_data.ls1012a_mdio_pdata[0].phy_mask = 0xffffffff;
9549 +
9550 + for (ii = 0; ii < interface_count; ii++) {
9551 + pfe_get_gemac_if_proprties(np, ii, interface_count,
9552 + &pfe_platform_data);
9553 + }
9554 +
9555 + pfe->dev = &pdev->dev;
9556 +
9557 + pfe->dev->platform_data = &pfe_platform_data;
9558 +
9559 + /* declare WoL capabilities */
9560 + device_init_wakeup(&pdev->dev, true);
9561 +
9562 + /* find the clocks */
9563 + pfe_clk = devm_clk_get(pfe->dev, "pfe");
9564 + if (IS_ERR(pfe_clk))
9565 + return PTR_ERR(pfe_clk);
9566 +
9567 + /* PFE clock is (platform clock / 2) */
9568 + /* save sys_clk value as KHz */
9569 + pfe->ctrl.sys_clk = clk_get_rate(pfe_clk) / (2 * 1000);
9570 +
9571 + rc = pfe_probe(pfe);
9572 + if (rc < 0)
9573 + goto err_probe;
9574 +
9575 + return 0;
9576 +
9577 +err_probe:
9578 +err_prop:
9579 +err_hif_irq:
9580 + iounmap(pfe->cbus_baseaddr);
9581 +
9582 +err_axi:
9583 +err_ddr:
9584 + platform_set_drvdata(pdev, NULL);
9585 +
9586 + kfree(pfe);
9587 +
9588 +err_alloc:
9589 + return rc;
9590 +}
9591 +
9592 +/*
9593 + * pfe_platform_remove -
9594 + */
9595 +static int pfe_platform_remove(struct platform_device *pdev)
9596 +{
9597 + struct pfe *pfe = platform_get_drvdata(pdev);
9598 + int rc;
9599 +
9600 + pr_info("%s\n", __func__);
9601 +
9602 + rc = pfe_remove(pfe);
9603 +
9604 + iounmap(pfe->cbus_baseaddr);
9605 +
9606 + platform_set_drvdata(pdev, NULL);
9607 +
9608 + kfree(pfe);
9609 +
9610 + return rc;
9611 +}
9612 +
9613 +#ifdef CONFIG_PM
9614 +#ifdef CONFIG_PM_SLEEP
9615 +int pfe_platform_suspend(struct device *dev)
9616 +{
9617 + struct pfe *pfe = platform_get_drvdata(to_platform_device(dev));
9618 + struct net_device *netdev;
9619 + int i;
9620 +
9621 + pfe->wake = 0;
9622 +
9623 + for (i = 0; i < (NUM_GEMAC_SUPPORT); i++) {
9624 + netdev = pfe->eth.eth_priv[i]->ndev;
9625 +
9626 + netif_device_detach(netdev);
9627 +
9628 + if (netif_running(netdev))
9629 + if (pfe_eth_suspend(netdev))
9630 + pfe->wake = 1;
9631 + }
9632 +
9633 + /* Shutdown PFE only if we're not waking up the system */
9634 + if (!pfe->wake) {
9635 +#if defined(LS1012A_PFE_RESET_WA)
9636 + pfe_hif_rx_idle(&pfe->hif);
9637 +#endif
9638 + pfe_ctrl_suspend(&pfe->ctrl);
9639 + pfe_firmware_exit(pfe);
9640 +
9641 + pfe_hif_exit(pfe);
9642 + pfe_hif_lib_exit(pfe);
9643 +
9644 + pfe_hw_exit(pfe);
9645 + }
9646 +
9647 + return 0;
9648 +}
9649 +
9650 +static int pfe_platform_resume(struct device *dev)
9651 +{
9652 + struct pfe *pfe = platform_get_drvdata(to_platform_device(dev));
9653 + struct net_device *netdev;
9654 + int i;
9655 +
9656 + if (!pfe->wake) {
9657 + pfe_hw_init(pfe, 1);
9658 + pfe_hif_lib_init(pfe);
9659 + pfe_hif_init(pfe);
9660 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
9661 + util_enable();
9662 +#endif
9663 + tmu_enable(0xf);
9664 + class_enable();
9665 + pfe_ctrl_resume(&pfe->ctrl);
9666 + }
9667 +
9668 + for (i = 0; i < (NUM_GEMAC_SUPPORT); i++) {
9669 + netdev = pfe->eth.eth_priv[i]->ndev;
9670 +
9671 + if (pfe->eth.eth_priv[i]->mii_bus)
9672 + pfe_eth_mdio_reset(pfe->eth.eth_priv[i]->mii_bus);
9673 +
9674 + if (netif_running(netdev))
9675 + pfe_eth_resume(netdev);
9676 +
9677 + netif_device_attach(netdev);
9678 + }
9679 + return 0;
9680 +}
9681 +#else
9682 +#define pfe_platform_suspend NULL
9683 +#define pfe_platform_resume NULL
9684 +#endif
9685 +
9686 +static const struct dev_pm_ops pfe_platform_pm_ops = {
9687 + SET_SYSTEM_SLEEP_PM_OPS(pfe_platform_suspend, pfe_platform_resume)
9688 +};
9689 +#endif
9690 +
9691 +static const struct of_device_id pfe_match[] = {
9692 + {
9693 + .compatible = "fsl,pfe",
9694 + },
9695 + {},
9696 +};
9697 +MODULE_DEVICE_TABLE(of, pfe_match);
9698 +
9699 +static struct platform_driver pfe_platform_driver = {
9700 + .probe = pfe_platform_probe,
9701 + .remove = pfe_platform_remove,
9702 + .driver = {
9703 + .name = "pfe",
9704 + .of_match_table = pfe_match,
9705 +#ifdef CONFIG_PM
9706 + .pm = &pfe_platform_pm_ops,
9707 +#endif
9708 + },
9709 +};
9710 +
9711 +module_platform_driver(pfe_platform_driver);
9712 +MODULE_LICENSE("GPL");
9713 +MODULE_DESCRIPTION("PFE Ethernet driver");
9714 +MODULE_AUTHOR("NXP DNCPE");
9715 --- /dev/null
9716 +++ b/drivers/staging/fsl_ppfe/pfe_mod.c
9717 @@ -0,0 +1,156 @@
9718 +/*
9719 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
9720 + * Copyright 2017 NXP
9721 + *
9722 + * This program is free software; you can redistribute it and/or modify
9723 + * it under the terms of the GNU General Public License as published by
9724 + * the Free Software Foundation; either version 2 of the License, or
9725 + * (at your option) any later version.
9726 + *
9727 + * This program is distributed in the hope that it will be useful,
9728 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
9729 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9730 + * GNU General Public License for more details.
9731 + *
9732 + * You should have received a copy of the GNU General Public License
9733 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
9734 + */
9735 +
9736 +#include <linux/dma-mapping.h>
9737 +#include "pfe_mod.h"
9738 +
9739 +unsigned int us;
9740 +module_param(us, uint, 0444);
9741 +MODULE_PARM_DESC(us, "0: module enabled for kernel networking (DEFAULT)\n"
9742 + "1: module enabled for userspace networking\n");
9743 +struct pfe *pfe;
9744 +
9745 +/*
9746 + * pfe_probe -
9747 + */
9748 +int pfe_probe(struct pfe *pfe)
9749 +{
9750 + int rc;
9751 +
9752 + if (pfe->ddr_size < DDR_MAX_SIZE) {
9753 + pr_err("%s: required DDR memory (%x) above platform ddr memory (%x)\n",
9754 + __func__, (unsigned int)DDR_MAX_SIZE, pfe->ddr_size);
9755 + rc = -ENOMEM;
9756 + goto err_hw;
9757 + }
9758 +
9759 + if (((int)(pfe->ddr_phys_baseaddr + BMU2_DDR_BASEADDR) &
9760 + (8 * SZ_1M - 1)) != 0) {
9761 + pr_err("%s: BMU2 base address (0x%x) must be aligned on 8MB boundary\n",
9762 + __func__, (int)pfe->ddr_phys_baseaddr +
9763 + BMU2_DDR_BASEADDR);
9764 + rc = -ENOMEM;
9765 + goto err_hw;
9766 + }
9767 +
9768 + pr_info("cbus_baseaddr: %lx, ddr_baseaddr: %lx, ddr_phys_baseaddr: %lx, ddr_size: %x\n",
9769 + (unsigned long)pfe->cbus_baseaddr,
9770 + (unsigned long)pfe->ddr_baseaddr,
9771 + pfe->ddr_phys_baseaddr, pfe->ddr_size);
9772 +
9773 + pfe_lib_init(pfe->cbus_baseaddr, pfe->ddr_baseaddr,
9774 + pfe->ddr_phys_baseaddr, pfe->ddr_size);
9775 +
9776 + rc = pfe_hw_init(pfe, 0);
9777 + if (rc < 0)
9778 + goto err_hw;
9779 +
9780 + if (us)
9781 + goto firmware_init;
9782 +
9783 + rc = pfe_hif_lib_init(pfe);
9784 + if (rc < 0)
9785 + goto err_hif_lib;
9786 +
9787 + rc = pfe_hif_init(pfe);
9788 + if (rc < 0)
9789 + goto err_hif;
9790 +
9791 +firmware_init:
9792 + rc = pfe_firmware_init(pfe);
9793 + if (rc < 0)
9794 + goto err_firmware;
9795 +
9796 + rc = pfe_ctrl_init(pfe);
9797 + if (rc < 0)
9798 + goto err_ctrl;
9799 +
9800 + rc = pfe_eth_init(pfe);
9801 + if (rc < 0)
9802 + goto err_eth;
9803 +
9804 + rc = pfe_sysfs_init(pfe);
9805 + if (rc < 0)
9806 + goto err_sysfs;
9807 +
9808 + rc = pfe_debugfs_init(pfe);
9809 + if (rc < 0)
9810 + goto err_debugfs;
9811 +
9812 + return 0;
9813 +
9814 +err_debugfs:
9815 + pfe_sysfs_exit(pfe);
9816 +
9817 +err_sysfs:
9818 + pfe_eth_exit(pfe);
9819 +
9820 +err_eth:
9821 + pfe_ctrl_exit(pfe);
9822 +
9823 +err_ctrl:
9824 + pfe_firmware_exit(pfe);
9825 +
9826 +err_firmware:
9827 + if (us)
9828 + goto err_hif_lib;
9829 +
9830 + pfe_hif_exit(pfe);
9831 +
9832 +err_hif:
9833 + pfe_hif_lib_exit(pfe);
9834 +
9835 +err_hif_lib:
9836 + pfe_hw_exit(pfe);
9837 +
9838 +err_hw:
9839 + return rc;
9840 +}
9841 +
9842 +/*
9843 + * pfe_remove -
9844 + */
9845 +int pfe_remove(struct pfe *pfe)
9846 +{
9847 + pr_info("%s\n", __func__);
9848 +
9849 + pfe_debugfs_exit(pfe);
9850 +
9851 + pfe_sysfs_exit(pfe);
9852 +
9853 + pfe_eth_exit(pfe);
9854 +
9855 + pfe_ctrl_exit(pfe);
9856 +
9857 +#if defined(LS1012A_PFE_RESET_WA)
9858 + pfe_hif_rx_idle(&pfe->hif);
9859 +#endif
9860 + pfe_firmware_exit(pfe);
9861 +
9862 + if (us)
9863 + goto hw_exit;
9864 +
9865 + pfe_hif_exit(pfe);
9866 +
9867 + pfe_hif_lib_exit(pfe);
9868 +
9869 +hw_exit:
9870 + pfe_hw_exit(pfe);
9871 +
9872 + return 0;
9873 +}
9874 --- /dev/null
9875 +++ b/drivers/staging/fsl_ppfe/pfe_mod.h
9876 @@ -0,0 +1,114 @@
9877 +/*
9878 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
9879 + * Copyright 2017 NXP
9880 + *
9881 + * This program is free software; you can redistribute it and/or modify
9882 + * it under the terms of the GNU General Public License as published by
9883 + * the Free Software Foundation; either version 2 of the License, or
9884 + * (at your option) any later version.
9885 + *
9886 + * This program is distributed in the hope that it will be useful,
9887 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
9888 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9889 + * GNU General Public License for more details.
9890 + *
9891 + * You should have received a copy of the GNU General Public License
9892 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
9893 + */
9894 +
9895 +#ifndef _PFE_MOD_H_
9896 +#define _PFE_MOD_H_
9897 +
9898 +#include <linux/device.h>
9899 +#include <linux/elf.h>
9900 +
9901 +extern unsigned int us;
9902 +
9903 +struct pfe;
9904 +
9905 +#include "pfe_hw.h"
9906 +#include "pfe_firmware.h"
9907 +#include "pfe_ctrl.h"
9908 +#include "pfe_hif.h"
9909 +#include "pfe_hif_lib.h"
9910 +#include "pfe_eth.h"
9911 +#include "pfe_sysfs.h"
9912 +#include "pfe_perfmon.h"
9913 +#include "pfe_debugfs.h"
9914 +
9915 +#define PHYID_MAX_VAL 32
9916 +
9917 +struct pfe_tmu_credit {
9918 + /* Number of allowed TX packet in-flight, matches TMU queue size */
9919 + unsigned int tx_credit[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
9920 + unsigned int tx_credit_max[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
9921 + unsigned int tx_packets[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
9922 +};
9923 +
9924 +struct pfe {
9925 + struct regmap *scfg;
9926 + unsigned long ddr_phys_baseaddr;
9927 + void *ddr_baseaddr;
9928 + unsigned int ddr_size;
9929 + void *cbus_baseaddr;
9930 + void *apb_baseaddr;
9931 + unsigned long iram_phys_baseaddr;
9932 + void *iram_baseaddr;
9933 + unsigned long ipsec_phys_baseaddr;
9934 + void *ipsec_baseaddr;
9935 + int hif_irq;
9936 + int wol_irq;
9937 + int hif_client_irq;
9938 + struct device *dev;
9939 + struct dentry *dentry;
9940 + struct pfe_ctrl ctrl;
9941 + struct pfe_hif hif;
9942 + struct pfe_eth eth;
9943 + struct hif_client_s *hif_client[HIF_CLIENTS_MAX];
9944 +#if defined(CFG_DIAGS)
9945 + struct pfe_diags diags;
9946 +#endif
9947 + struct pfe_tmu_credit tmu_credit;
9948 + struct pfe_cpumon cpumon;
9949 + struct pfe_memmon memmon;
9950 + int wake;
9951 + int mdio_muxval[PHYID_MAX_VAL];
9952 + struct clk *hfe_clock;
9953 +};
9954 +
9955 +extern struct pfe *pfe;
9956 +
9957 +int pfe_probe(struct pfe *pfe);
9958 +int pfe_remove(struct pfe *pfe);
9959 +
9960 +/* DDR Mapping in reserved memory*/
9961 +#define ROUTE_TABLE_BASEADDR 0
9962 +#define ROUTE_TABLE_HASH_BITS 15 /* 32K entries */
9963 +#define ROUTE_TABLE_SIZE ((1 << ROUTE_TABLE_HASH_BITS) \
9964 + * CLASS_ROUTE_SIZE)
9965 +#define BMU2_DDR_BASEADDR (ROUTE_TABLE_BASEADDR + ROUTE_TABLE_SIZE)
9966 +#define BMU2_BUF_COUNT (4096 - 256)
9967 +/* This is to get a total DDR size of 12MiB */
9968 +#define BMU2_DDR_SIZE (DDR_BUF_SIZE * BMU2_BUF_COUNT)
9969 +#define UTIL_CODE_BASEADDR (BMU2_DDR_BASEADDR + BMU2_DDR_SIZE)
9970 +#define UTIL_CODE_SIZE (128 * SZ_1K)
9971 +#define UTIL_DDR_DATA_BASEADDR (UTIL_CODE_BASEADDR + UTIL_CODE_SIZE)
9972 +#define UTIL_DDR_DATA_SIZE (64 * SZ_1K)
9973 +#define CLASS_DDR_DATA_BASEADDR (UTIL_DDR_DATA_BASEADDR + UTIL_DDR_DATA_SIZE)
9974 +#define CLASS_DDR_DATA_SIZE (32 * SZ_1K)
9975 +#define TMU_DDR_DATA_BASEADDR (CLASS_DDR_DATA_BASEADDR + CLASS_DDR_DATA_SIZE)
9976 +#define TMU_DDR_DATA_SIZE (32 * SZ_1K)
9977 +#define TMU_LLM_BASEADDR (TMU_DDR_DATA_BASEADDR + TMU_DDR_DATA_SIZE)
9978 +#define TMU_LLM_QUEUE_LEN (8 * 512)
9979 +/* Must be power of two and at least 16 * 8 = 128 bytes */
9980 +#define TMU_LLM_SIZE (4 * 16 * TMU_LLM_QUEUE_LEN)
9981 +/* (4 TMU's x 16 queues x queue_len) */
9982 +
9983 +#define DDR_MAX_SIZE (TMU_LLM_BASEADDR + TMU_LLM_SIZE)
9984 +
9985 +/* LMEM Mapping */
9986 +#define BMU1_LMEM_BASEADDR 0
9987 +#define BMU1_BUF_COUNT 256
9988 +#define BMU1_LMEM_SIZE (LMEM_BUF_SIZE * BMU1_BUF_COUNT)
9989 +
9990 +#endif /* _PFE_MOD_H */
9991 --- /dev/null
9992 +++ b/drivers/staging/fsl_ppfe/pfe_perfmon.h
9993 @@ -0,0 +1,38 @@
9994 +/*
9995 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
9996 + * Copyright 2017 NXP
9997 + *
9998 + * This program is free software; you can redistribute it and/or modify
9999 + * it under the terms of the GNU General Public License as published by
10000 + * the Free Software Foundation; either version 2 of the License, or
10001 + * (at your option) any later version.
10002 + *
10003 + * This program is distributed in the hope that it will be useful,
10004 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
10005 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10006 + * GNU General Public License for more details.
10007 + *
10008 + * You should have received a copy of the GNU General Public License
10009 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
10010 + */
10011 +
10012 +#ifndef _PFE_PERFMON_H_
10013 +#define _PFE_PERFMON_H_
10014 +
10015 +#include "pfe/pfe.h"
10016 +
10017 +#define CT_CPUMON_INTERVAL (1 * TIMER_TICKS_PER_SEC)
10018 +
10019 +struct pfe_cpumon {
10020 + u32 cpu_usage_pct[MAX_PE];
10021 + u32 class_usage_pct;
10022 +};
10023 +
10024 +struct pfe_memmon {
10025 + u32 kernel_memory_allocated;
10026 +};
10027 +
10028 +int pfe_perfmon_init(struct pfe *pfe);
10029 +void pfe_perfmon_exit(struct pfe *pfe);
10030 +
10031 +#endif /* _PFE_PERFMON_H_ */
10032 --- /dev/null
10033 +++ b/drivers/staging/fsl_ppfe/pfe_sysfs.c
10034 @@ -0,0 +1,818 @@
10035 +/*
10036 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
10037 + * Copyright 2017 NXP
10038 + *
10039 + * This program is free software; you can redistribute it and/or modify
10040 + * it under the terms of the GNU General Public License as published by
10041 + * the Free Software Foundation; either version 2 of the License, or
10042 + * (at your option) any later version.
10043 + *
10044 + * This program is distributed in the hope that it will be useful,
10045 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
10046 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10047 + * GNU General Public License for more details.
10048 + *
10049 + * You should have received a copy of the GNU General Public License
10050 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
10051 + */
10052 +
10053 +#include <linux/module.h>
10054 +#include <linux/platform_device.h>
10055 +
10056 +#include "pfe_mod.h"
10057 +
10058 +#define PE_EXCEPTION_DUMP_ADDRESS 0x1fa8
10059 +#define NUM_QUEUES 16
10060 +
10061 +static char register_name[20][5] = {
10062 + "EPC", "ECAS", "EID", "ED",
10063 + "r0", "r1", "r2", "r3",
10064 + "r4", "r5", "r6", "r7",
10065 + "r8", "r9", "r10", "r11",
10066 + "r12", "r13", "r14", "r15",
10067 +};
10068 +
10069 +static char exception_name[14][20] = {
10070 + "Reset",
10071 + "HardwareFailure",
10072 + "NMI",
10073 + "InstBreakpoint",
10074 + "DataBreakpoint",
10075 + "Unsupported",
10076 + "PrivilegeViolation",
10077 + "InstBusError",
10078 + "DataBusError",
10079 + "AlignmentError",
10080 + "ArithmeticError",
10081 + "SystemCall",
10082 + "MemoryManagement",
10083 + "Interrupt",
10084 +};
10085 +
10086 +static unsigned long class_do_clear;
10087 +static unsigned long tmu_do_clear;
10088 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10089 +static unsigned long util_do_clear;
10090 +#endif
10091 +
10092 +static ssize_t display_pe_status(char *buf, int id, u32 dmem_addr, unsigned long
10093 + do_clear)
10094 +{
10095 + ssize_t len = 0;
10096 + u32 val;
10097 + char statebuf[5];
10098 + struct pfe_cpumon *cpumon = &pfe->cpumon;
10099 + u32 debug_indicator;
10100 + u32 debug[20];
10101 +
10102 + *(u32 *)statebuf = pe_dmem_read(id, dmem_addr, 4);
10103 + dmem_addr += 4;
10104 +
10105 + statebuf[4] = '\0';
10106 + len += sprintf(buf + len, "state=%4s ", statebuf);
10107 +
10108 + val = pe_dmem_read(id, dmem_addr, 4);
10109 + dmem_addr += 4;
10110 + len += sprintf(buf + len, "ctr=%08x ", cpu_to_be32(val));
10111 +
10112 + val = pe_dmem_read(id, dmem_addr, 4);
10113 + if (do_clear && val)
10114 + pe_dmem_write(id, 0, dmem_addr, 4);
10115 + dmem_addr += 4;
10116 + len += sprintf(buf + len, "rx=%u ", cpu_to_be32(val));
10117 +
10118 + val = pe_dmem_read(id, dmem_addr, 4);
10119 + if (do_clear && val)
10120 + pe_dmem_write(id, 0, dmem_addr, 4);
10121 + dmem_addr += 4;
10122 + if (id >= TMU0_ID && id <= TMU_MAX_ID)
10123 + len += sprintf(buf + len, "qstatus=%x", cpu_to_be32(val));
10124 + else
10125 + len += sprintf(buf + len, "tx=%u", cpu_to_be32(val));
10126 +
10127 + val = pe_dmem_read(id, dmem_addr, 4);
10128 + if (do_clear && val)
10129 + pe_dmem_write(id, 0, dmem_addr, 4);
10130 + dmem_addr += 4;
10131 + if (val)
10132 + len += sprintf(buf + len, " drop=%u", cpu_to_be32(val));
10133 +
10134 + len += sprintf(buf + len, " load=%d%%", cpumon->cpu_usage_pct[id]);
10135 +
10136 + len += sprintf(buf + len, "\n");
10137 +
10138 + debug_indicator = pe_dmem_read(id, dmem_addr, 4);
10139 + dmem_addr += 4;
10140 + if (!strncmp((char *)&debug_indicator, "DBUG", 4)) {
10141 + int j, last = 0;
10142 +
10143 + for (j = 0; j < 16; j++) {
10144 + debug[j] = pe_dmem_read(id, dmem_addr, 4);
10145 + if (debug[j]) {
10146 + if (do_clear)
10147 + pe_dmem_write(id, 0, dmem_addr, 4);
10148 + last = j + 1;
10149 + }
10150 + dmem_addr += 4;
10151 + }
10152 + for (j = 0; j < last; j++) {
10153 + len += sprintf(buf + len, "%08x%s",
10154 + cpu_to_be32(debug[j]),
10155 + (j & 0x7) == 0x7 || j == last - 1 ? "\n" : " ");
10156 + }
10157 + }
10158 +
10159 + if (!strncmp(statebuf, "DEAD", 4)) {
10160 + u32 i, dump = PE_EXCEPTION_DUMP_ADDRESS;
10161 +
10162 + len += sprintf(buf + len, "Exception details:\n");
10163 + for (i = 0; i < 20; i++) {
10164 + debug[i] = pe_dmem_read(id, dump, 4);
10165 + dump += 4;
10166 + if (i == 2)
10167 + len += sprintf(buf + len, "%4s = %08x (=%s) ",
10168 + register_name[i], cpu_to_be32(debug[i]),
10169 + exception_name[min((u32)
10170 + cpu_to_be32(debug[i]), (u32)13)]);
10171 + else
10172 + len += sprintf(buf + len, "%4s = %08x%s",
10173 + register_name[i], cpu_to_be32(debug[i]),
10174 + (i & 0x3) == 0x3 || i == 19 ? "\n" : " ");
10175 + }
10176 + }
10177 +
10178 + return len;
10179 +}
10180 +
10181 +static ssize_t class_phy_stats(char *buf, int phy)
10182 +{
10183 + ssize_t len = 0;
10184 + int off1 = phy * 0x28;
10185 + int off2 = phy * 0x10;
10186 +
10187 + if (phy == 3)
10188 + off1 = CLASS_PHY4_RX_PKTS - CLASS_PHY1_RX_PKTS;
10189 +
10190 + len += sprintf(buf + len, "phy: %d\n", phy);
10191 + len += sprintf(buf + len,
10192 + " rx: %10u, tx: %10u, intf: %10u, ipv4: %10u, ipv6: %10u\n",
10193 + readl(CLASS_PHY1_RX_PKTS + off1),
10194 + readl(CLASS_PHY1_TX_PKTS + off1),
10195 + readl(CLASS_PHY1_INTF_MATCH_PKTS + off1),
10196 + readl(CLASS_PHY1_V4_PKTS + off1),
10197 + readl(CLASS_PHY1_V6_PKTS + off1));
10198 +
10199 + len += sprintf(buf + len,
10200 + " icmp: %10u, igmp: %10u, tcp: %10u, udp: %10u\n",
10201 + readl(CLASS_PHY1_ICMP_PKTS + off2),
10202 + readl(CLASS_PHY1_IGMP_PKTS + off2),
10203 + readl(CLASS_PHY1_TCP_PKTS + off2),
10204 + readl(CLASS_PHY1_UDP_PKTS + off2));
10205 +
10206 + len += sprintf(buf + len, " err\n");
10207 + len += sprintf(buf + len,
10208 + " lp: %10u, intf: %10u, l3: %10u, chcksum: %10u, ttl: %10u\n",
10209 + readl(CLASS_PHY1_LP_FAIL_PKTS + off1),
10210 + readl(CLASS_PHY1_INTF_FAIL_PKTS + off1),
10211 + readl(CLASS_PHY1_L3_FAIL_PKTS + off1),
10212 + readl(CLASS_PHY1_CHKSUM_ERR_PKTS + off1),
10213 + readl(CLASS_PHY1_TTL_ERR_PKTS + off1));
10214 +
10215 + return len;
10216 +}
10217 +
10218 +/* qm_read_drop_stat
10219 + * This function is used to read the drop statistics from the TMU
10220 + * hw drop counter. Since the hw counter is always cleared afer
10221 + * reading, this function maintains the previous drop count, and
10222 + * adds the new value to it. That value can be retrieved by
10223 + * passing a pointer to it with the total_drops arg.
10224 + *
10225 + * @param tmu TMU number (0 - 3)
10226 + * @param queue queue number (0 - 15)
10227 + * @param total_drops pointer to location to store total drops (or NULL)
10228 + * @param do_reset if TRUE, clear total drops after updating
10229 + */
10230 +u32 qm_read_drop_stat(u32 tmu, u32 queue, u32 *total_drops, int do_reset)
10231 +{
10232 + static u32 qtotal[TMU_MAX_ID + 1][NUM_QUEUES];
10233 + u32 val;
10234 +
10235 + writel((tmu << 8) | queue, TMU_TEQ_CTRL);
10236 + writel((tmu << 8) | queue, TMU_LLM_CTRL);
10237 + val = readl(TMU_TEQ_DROP_STAT);
10238 + qtotal[tmu][queue] += val;
10239 + if (total_drops)
10240 + *total_drops = qtotal[tmu][queue];
10241 + if (do_reset)
10242 + qtotal[tmu][queue] = 0;
10243 + return val;
10244 +}
10245 +
10246 +static ssize_t tmu_queue_stats(char *buf, int tmu, int queue)
10247 +{
10248 + ssize_t len = 0;
10249 + u32 drops;
10250 +
10251 + len += sprintf(buf + len, "%d-%02d, ", tmu, queue);
10252 +
10253 + drops = qm_read_drop_stat(tmu, queue, NULL, 0);
10254 +
10255 + /* Select queue */
10256 + writel((tmu << 8) | queue, TMU_TEQ_CTRL);
10257 + writel((tmu << 8) | queue, TMU_LLM_CTRL);
10258 +
10259 + len += sprintf(buf + len,
10260 + "(teq) drop: %10u, tx: %10u (llm) head: %08x, tail: %08x, drop: %10u\n",
10261 + drops, readl(TMU_TEQ_TRANS_STAT),
10262 + readl(TMU_LLM_QUE_HEADPTR), readl(TMU_LLM_QUE_TAILPTR),
10263 + readl(TMU_LLM_QUE_DROPCNT));
10264 +
10265 + return len;
10266 +}
10267 +
10268 +static ssize_t tmu_queues(char *buf, int tmu)
10269 +{
10270 + ssize_t len = 0;
10271 + int queue;
10272 +
10273 + for (queue = 0; queue < 16; queue++)
10274 + len += tmu_queue_stats(buf + len, tmu, queue);
10275 +
10276 + return len;
10277 +}
10278 +
10279 +static ssize_t block_version(char *buf, void *addr)
10280 +{
10281 + ssize_t len = 0;
10282 + u32 val;
10283 +
10284 + val = readl(addr);
10285 + len += sprintf(buf + len, "revision: %x, version: %x, id: %x\n",
10286 + (val >> 24) & 0xff, (val >> 16) & 0xff, val & 0xffff);
10287 +
10288 + return len;
10289 +}
10290 +
10291 +static ssize_t bmu(char *buf, int id, void *base)
10292 +{
10293 + ssize_t len = 0;
10294 +
10295 + len += sprintf(buf + len, "%s: %d\n ", __func__, id);
10296 +
10297 + len += block_version(buf + len, base + BMU_VERSION);
10298 +
10299 + len += sprintf(buf + len, " buf size: %x\n", (1 << readl(base +
10300 + BMU_BUF_SIZE)));
10301 + len += sprintf(buf + len, " buf count: %x\n", readl(base +
10302 + BMU_BUF_CNT));
10303 + len += sprintf(buf + len, " buf rem: %x\n", readl(base +
10304 + BMU_REM_BUF_CNT));
10305 + len += sprintf(buf + len, " buf curr: %x\n", readl(base +
10306 + BMU_CURR_BUF_CNT));
10307 + len += sprintf(buf + len, " free err: %x\n", readl(base +
10308 + BMU_FREE_ERR_ADDR));
10309 +
10310 + return len;
10311 +}
10312 +
10313 +static ssize_t gpi(char *buf, int id, void *base)
10314 +{
10315 + ssize_t len = 0;
10316 + u32 val;
10317 +
10318 + len += sprintf(buf + len, "%s%d:\n ", __func__, id);
10319 + len += block_version(buf + len, base + GPI_VERSION);
10320 +
10321 + len += sprintf(buf + len, " tx under stick: %x\n", readl(base +
10322 + GPI_FIFO_STATUS));
10323 + val = readl(base + GPI_FIFO_DEBUG);
10324 + len += sprintf(buf + len, " tx pkts: %x\n", (val >> 23) &
10325 + 0x3f);
10326 + len += sprintf(buf + len, " rx pkts: %x\n", (val >> 18) &
10327 + 0x3f);
10328 + len += sprintf(buf + len, " tx bytes: %x\n", (val >> 9) &
10329 + 0x1ff);
10330 + len += sprintf(buf + len, " rx bytes: %x\n", (val >> 0) &
10331 + 0x1ff);
10332 + len += sprintf(buf + len, " overrun: %x\n", readl(base +
10333 + GPI_OVERRUN_DROPCNT));
10334 +
10335 + return len;
10336 +}
10337 +
10338 +static ssize_t pfe_set_class(struct device *dev, struct device_attribute *attr,
10339 + const char *buf, size_t count)
10340 +{
10341 + class_do_clear = kstrtoul(buf, 0, 0);
10342 + return count;
10343 +}
10344 +
10345 +static ssize_t pfe_show_class(struct device *dev, struct device_attribute *attr,
10346 + char *buf)
10347 +{
10348 + ssize_t len = 0;
10349 + int id;
10350 + u32 val;
10351 + struct pfe_cpumon *cpumon = &pfe->cpumon;
10352 +
10353 + len += block_version(buf + len, CLASS_VERSION);
10354 +
10355 + for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
10356 + len += sprintf(buf + len, "%d: ", id - CLASS0_ID);
10357 +
10358 + val = readl(CLASS_PE0_DEBUG + id * 4);
10359 + len += sprintf(buf + len, "pc=1%04x ", val & 0xffff);
10360 +
10361 + len += display_pe_status(buf + len, id, CLASS_DM_PESTATUS,
10362 + class_do_clear);
10363 + }
10364 + len += sprintf(buf + len, "aggregate load=%d%%\n\n",
10365 + cpumon->class_usage_pct);
10366 +
10367 + len += sprintf(buf + len, "pe status: 0x%x\n",
10368 + readl(CLASS_PE_STATUS));
10369 + len += sprintf(buf + len, "max buf cnt: 0x%x afull thres: 0x%x\n",
10370 + readl(CLASS_MAX_BUF_CNT), readl(CLASS_AFULL_THRES));
10371 + len += sprintf(buf + len, "tsq max cnt: 0x%x tsq fifo thres: 0x%x\n",
10372 + readl(CLASS_TSQ_MAX_CNT), readl(CLASS_TSQ_FIFO_THRES));
10373 + len += sprintf(buf + len, "state: 0x%x\n", readl(CLASS_STATE));
10374 +
10375 + len += class_phy_stats(buf + len, 0);
10376 + len += class_phy_stats(buf + len, 1);
10377 + len += class_phy_stats(buf + len, 2);
10378 + len += class_phy_stats(buf + len, 3);
10379 +
10380 + return len;
10381 +}
10382 +
10383 +static ssize_t pfe_set_tmu(struct device *dev, struct device_attribute *attr,
10384 + const char *buf, size_t count)
10385 +{
10386 + tmu_do_clear = kstrtoul(buf, 0, 0);
10387 + return count;
10388 +}
10389 +
10390 +static ssize_t pfe_show_tmu(struct device *dev, struct device_attribute *attr,
10391 + char *buf)
10392 +{
10393 + ssize_t len = 0;
10394 + int id;
10395 + u32 val;
10396 +
10397 + len += block_version(buf + len, TMU_VERSION);
10398 +
10399 + for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
10400 + if (id == TMU2_ID)
10401 + continue;
10402 + len += sprintf(buf + len, "%d: ", id - TMU0_ID);
10403 +
10404 + len += display_pe_status(buf + len, id, TMU_DM_PESTATUS,
10405 + tmu_do_clear);
10406 + }
10407 +
10408 + len += sprintf(buf + len, "pe status: %x\n", readl(TMU_PE_STATUS));
10409 + len += sprintf(buf + len, "inq fifo cnt: %x\n",
10410 + readl(TMU_PHY_INQ_FIFO_CNT));
10411 + val = readl(TMU_INQ_STAT);
10412 + len += sprintf(buf + len, "inq wr ptr: %x\n", val & 0x3ff);
10413 + len += sprintf(buf + len, "inq rd ptr: %x\n", val >> 10);
10414 +
10415 + return len;
10416 +}
10417 +
10418 +static unsigned long drops_do_clear;
10419 +static u32 class_drop_counter[CLASS_NUM_DROP_COUNTERS];
10420 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10421 +static u32 util_drop_counter[UTIL_NUM_DROP_COUNTERS];
10422 +#endif
10423 +
10424 +char *class_drop_description[CLASS_NUM_DROP_COUNTERS] = {
10425 + "ICC",
10426 + "Host Pkt Error",
10427 + "Rx Error",
10428 + "IPsec Outbound",
10429 + "IPsec Inbound",
10430 + "EXPT IPsec Error",
10431 + "Reassembly",
10432 + "Fragmenter",
10433 + "NAT-T",
10434 + "Socket",
10435 + "Multicast",
10436 + "NAT-PT",
10437 + "Tx Disabled",
10438 +};
10439 +
10440 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10441 +char *util_drop_description[UTIL_NUM_DROP_COUNTERS] = {
10442 + "IPsec Outbound",
10443 + "IPsec Inbound",
10444 + "IPsec Rate Limiter",
10445 + "Fragmenter",
10446 + "Socket",
10447 + "Tx Disabled",
10448 + "Rx Error",
10449 +};
10450 +#endif
10451 +
10452 +static ssize_t pfe_set_drops(struct device *dev, struct device_attribute *attr,
10453 + const char *buf, size_t count)
10454 +{
10455 + drops_do_clear = kstrtoul(buf, 0, 0);
10456 + return count;
10457 +}
10458 +
10459 +static u32 tmu_drops[4][16];
10460 +static ssize_t pfe_show_drops(struct device *dev, struct device_attribute *attr,
10461 + char *buf)
10462 +{
10463 + ssize_t len = 0;
10464 + int id, dropnum;
10465 + int tmu, queue;
10466 + u32 val;
10467 + u32 dmem_addr;
10468 + int num_class_drops = 0, num_tmu_drops = 0, num_util_drops = 0;
10469 + struct pfe_ctrl *ctrl = &pfe->ctrl;
10470 +
10471 + memset(class_drop_counter, 0, sizeof(class_drop_counter));
10472 + for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
10473 + if (drops_do_clear)
10474 + pe_sync_stop(ctrl, (1 << id));
10475 + for (dropnum = 0; dropnum < CLASS_NUM_DROP_COUNTERS;
10476 + dropnum++) {
10477 + dmem_addr = CLASS_DM_DROP_CNTR;
10478 + val = be32_to_cpu(pe_dmem_read(id, dmem_addr, 4));
10479 + class_drop_counter[dropnum] += val;
10480 + num_class_drops += val;
10481 + if (drops_do_clear)
10482 + pe_dmem_write(id, 0, dmem_addr, 4);
10483 + }
10484 + if (drops_do_clear)
10485 + pe_start(ctrl, (1 << id));
10486 + }
10487 +
10488 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10489 + if (drops_do_clear)
10490 + pe_sync_stop(ctrl, (1 << UTIL_ID));
10491 + for (dropnum = 0; dropnum < UTIL_NUM_DROP_COUNTERS; dropnum++) {
10492 + dmem_addr = UTIL_DM_DROP_CNTR;
10493 + val = be32_to_cpu(pe_dmem_read(UTIL_ID, dmem_addr, 4));
10494 + util_drop_counter[dropnum] = val;
10495 + num_util_drops += val;
10496 + if (drops_do_clear)
10497 + pe_dmem_write(UTIL_ID, 0, dmem_addr, 4);
10498 + }
10499 + if (drops_do_clear)
10500 + pe_start(ctrl, (1 << UTIL_ID));
10501 +#endif
10502 + for (tmu = 0; tmu < 4; tmu++) {
10503 + for (queue = 0; queue < 16; queue++) {
10504 + qm_read_drop_stat(tmu, queue, &tmu_drops[tmu][queue],
10505 + drops_do_clear);
10506 + num_tmu_drops += tmu_drops[tmu][queue];
10507 + }
10508 + }
10509 +
10510 + if (num_class_drops == 0 && num_util_drops == 0 && num_tmu_drops == 0)
10511 + len += sprintf(buf + len, "No PE drops\n\n");
10512 +
10513 + if (num_class_drops > 0) {
10514 + len += sprintf(buf + len, "Class PE drops --\n");
10515 + for (dropnum = 0; dropnum < CLASS_NUM_DROP_COUNTERS;
10516 + dropnum++) {
10517 + if (class_drop_counter[dropnum] > 0)
10518 + len += sprintf(buf + len, " %s: %d\n",
10519 + class_drop_description[dropnum],
10520 + class_drop_counter[dropnum]);
10521 + }
10522 + len += sprintf(buf + len, "\n");
10523 + }
10524 +
10525 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10526 + if (num_util_drops > 0) {
10527 + len += sprintf(buf + len, "Util PE drops --\n");
10528 + for (dropnum = 0; dropnum < UTIL_NUM_DROP_COUNTERS; dropnum++) {
10529 + if (util_drop_counter[dropnum] > 0)
10530 + len += sprintf(buf + len, " %s: %d\n",
10531 + util_drop_description[dropnum],
10532 + util_drop_counter[dropnum]);
10533 + }
10534 + len += sprintf(buf + len, "\n");
10535 + }
10536 +#endif
10537 + if (num_tmu_drops > 0) {
10538 + len += sprintf(buf + len, "TMU drops --\n");
10539 + for (tmu = 0; tmu < 4; tmu++) {
10540 + for (queue = 0; queue < 16; queue++) {
10541 + if (tmu_drops[tmu][queue] > 0)
10542 + len += sprintf(buf + len,
10543 + " TMU%d-Q%d: %d\n"
10544 + , tmu, queue, tmu_drops[tmu][queue]);
10545 + }
10546 + }
10547 + len += sprintf(buf + len, "\n");
10548 + }
10549 +
10550 + return len;
10551 +}
10552 +
10553 +static ssize_t pfe_show_tmu0_queues(struct device *dev, struct device_attribute
10554 + *attr, char *buf)
10555 +{
10556 + return tmu_queues(buf, 0);
10557 +}
10558 +
10559 +static ssize_t pfe_show_tmu1_queues(struct device *dev, struct device_attribute
10560 + *attr, char *buf)
10561 +{
10562 + return tmu_queues(buf, 1);
10563 +}
10564 +
10565 +static ssize_t pfe_show_tmu2_queues(struct device *dev, struct device_attribute
10566 + *attr, char *buf)
10567 +{
10568 + return tmu_queues(buf, 2);
10569 +}
10570 +
10571 +static ssize_t pfe_show_tmu3_queues(struct device *dev, struct device_attribute
10572 + *attr, char *buf)
10573 +{
10574 + return tmu_queues(buf, 3);
10575 +}
10576 +
10577 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10578 +static ssize_t pfe_set_util(struct device *dev, struct device_attribute *attr,
10579 + const char *buf, size_t count)
10580 +{
10581 + util_do_clear = kstrtoul(buf, NULL, 0);
10582 + return count;
10583 +}
10584 +
10585 +static ssize_t pfe_show_util(struct device *dev, struct device_attribute *attr,
10586 + char *buf)
10587 +{
10588 + ssize_t len = 0;
10589 + struct pfe_ctrl *ctrl = &pfe->ctrl;
10590 +
10591 + len += block_version(buf + len, UTIL_VERSION);
10592 +
10593 + pe_sync_stop(ctrl, (1 << UTIL_ID));
10594 + len += display_pe_status(buf + len, UTIL_ID, UTIL_DM_PESTATUS,
10595 + util_do_clear);
10596 + pe_start(ctrl, (1 << UTIL_ID));
10597 +
10598 + len += sprintf(buf + len, "pe status: %x\n", readl(UTIL_PE_STATUS));
10599 + len += sprintf(buf + len, "max buf cnt: %x\n",
10600 + readl(UTIL_MAX_BUF_CNT));
10601 + len += sprintf(buf + len, "tsq max cnt: %x\n",
10602 + readl(UTIL_TSQ_MAX_CNT));
10603 +
10604 + return len;
10605 +}
10606 +#endif
10607 +
10608 +static ssize_t pfe_show_bmu(struct device *dev, struct device_attribute *attr,
10609 + char *buf)
10610 +{
10611 + ssize_t len = 0;
10612 +
10613 + len += bmu(buf + len, 1, BMU1_BASE_ADDR);
10614 + len += bmu(buf + len, 2, BMU2_BASE_ADDR);
10615 +
10616 + return len;
10617 +}
10618 +
10619 +static ssize_t pfe_show_hif(struct device *dev, struct device_attribute *attr,
10620 + char *buf)
10621 +{
10622 + ssize_t len = 0;
10623 +
10624 + len += sprintf(buf + len, "hif:\n ");
10625 + len += block_version(buf + len, HIF_VERSION);
10626 +
10627 + len += sprintf(buf + len, " tx curr bd: %x\n",
10628 + readl(HIF_TX_CURR_BD_ADDR));
10629 + len += sprintf(buf + len, " tx status: %x\n",
10630 + readl(HIF_TX_STATUS));
10631 + len += sprintf(buf + len, " tx dma status: %x\n",
10632 + readl(HIF_TX_DMA_STATUS));
10633 +
10634 + len += sprintf(buf + len, " rx curr bd: %x\n",
10635 + readl(HIF_RX_CURR_BD_ADDR));
10636 + len += sprintf(buf + len, " rx status: %x\n",
10637 + readl(HIF_RX_STATUS));
10638 + len += sprintf(buf + len, " rx dma status: %x\n",
10639 + readl(HIF_RX_DMA_STATUS));
10640 +
10641 + len += sprintf(buf + len, "hif nocopy:\n ");
10642 + len += block_version(buf + len, HIF_NOCPY_VERSION);
10643 +
10644 + len += sprintf(buf + len, " tx curr bd: %x\n",
10645 + readl(HIF_NOCPY_TX_CURR_BD_ADDR));
10646 + len += sprintf(buf + len, " tx status: %x\n",
10647 + readl(HIF_NOCPY_TX_STATUS));
10648 + len += sprintf(buf + len, " tx dma status: %x\n",
10649 + readl(HIF_NOCPY_TX_DMA_STATUS));
10650 +
10651 + len += sprintf(buf + len, " rx curr bd: %x\n",
10652 + readl(HIF_NOCPY_RX_CURR_BD_ADDR));
10653 + len += sprintf(buf + len, " rx status: %x\n",
10654 + readl(HIF_NOCPY_RX_STATUS));
10655 + len += sprintf(buf + len, " rx dma status: %x\n",
10656 + readl(HIF_NOCPY_RX_DMA_STATUS));
10657 +
10658 + return len;
10659 +}
10660 +
10661 +static ssize_t pfe_show_gpi(struct device *dev, struct device_attribute *attr,
10662 + char *buf)
10663 +{
10664 + ssize_t len = 0;
10665 +
10666 + len += gpi(buf + len, 0, EGPI1_BASE_ADDR);
10667 + len += gpi(buf + len, 1, EGPI2_BASE_ADDR);
10668 + len += gpi(buf + len, 3, HGPI_BASE_ADDR);
10669 +
10670 + return len;
10671 +}
10672 +
10673 +static ssize_t pfe_show_pfemem(struct device *dev, struct device_attribute
10674 + *attr, char *buf)
10675 +{
10676 + ssize_t len = 0;
10677 + struct pfe_memmon *memmon = &pfe->memmon;
10678 +
10679 + len += sprintf(buf + len, "Kernel Memory: %d Bytes (%d KB)\n",
10680 + memmon->kernel_memory_allocated,
10681 + (memmon->kernel_memory_allocated + 1023) / 1024);
10682 +
10683 + return len;
10684 +}
10685 +
10686 +#ifdef HIF_NAPI_STATS
10687 +static ssize_t pfe_show_hif_napi_stats(struct device *dev,
10688 + struct device_attribute *attr,
10689 + char *buf)
10690 +{
10691 + struct platform_device *pdev = to_platform_device(dev);
10692 + struct pfe *pfe = platform_get_drvdata(pdev);
10693 + ssize_t len = 0;
10694 +
10695 + len += sprintf(buf + len, "sched: %u\n",
10696 + pfe->hif.napi_counters[NAPI_SCHED_COUNT]);
10697 + len += sprintf(buf + len, "poll: %u\n",
10698 + pfe->hif.napi_counters[NAPI_POLL_COUNT]);
10699 + len += sprintf(buf + len, "packet: %u\n",
10700 + pfe->hif.napi_counters[NAPI_PACKET_COUNT]);
10701 + len += sprintf(buf + len, "budget: %u\n",
10702 + pfe->hif.napi_counters[NAPI_FULL_BUDGET_COUNT]);
10703 + len += sprintf(buf + len, "desc: %u\n",
10704 + pfe->hif.napi_counters[NAPI_DESC_COUNT]);
10705 + len += sprintf(buf + len, "full: %u\n",
10706 + pfe->hif.napi_counters[NAPI_CLIENT_FULL_COUNT]);
10707 +
10708 + return len;
10709 +}
10710 +
10711 +static ssize_t pfe_set_hif_napi_stats(struct device *dev,
10712 + struct device_attribute *attr,
10713 + const char *buf, size_t count)
10714 +{
10715 + struct platform_device *pdev = to_platform_device(dev);
10716 + struct pfe *pfe = platform_get_drvdata(pdev);
10717 +
10718 + memset(pfe->hif.napi_counters, 0, sizeof(pfe->hif.napi_counters));
10719 +
10720 + return count;
10721 +}
10722 +
10723 +static DEVICE_ATTR(hif_napi_stats, 0644, pfe_show_hif_napi_stats,
10724 + pfe_set_hif_napi_stats);
10725 +#endif
10726 +
10727 +static DEVICE_ATTR(class, 0644, pfe_show_class, pfe_set_class);
10728 +static DEVICE_ATTR(tmu, 0644, pfe_show_tmu, pfe_set_tmu);
10729 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10730 +static DEVICE_ATTR(util, 0644, pfe_show_util, pfe_set_util);
10731 +#endif
10732 +static DEVICE_ATTR(bmu, 0444, pfe_show_bmu, NULL);
10733 +static DEVICE_ATTR(hif, 0444, pfe_show_hif, NULL);
10734 +static DEVICE_ATTR(gpi, 0444, pfe_show_gpi, NULL);
10735 +static DEVICE_ATTR(drops, 0644, pfe_show_drops, pfe_set_drops);
10736 +static DEVICE_ATTR(tmu0_queues, 0444, pfe_show_tmu0_queues, NULL);
10737 +static DEVICE_ATTR(tmu1_queues, 0444, pfe_show_tmu1_queues, NULL);
10738 +static DEVICE_ATTR(tmu2_queues, 0444, pfe_show_tmu2_queues, NULL);
10739 +static DEVICE_ATTR(tmu3_queues, 0444, pfe_show_tmu3_queues, NULL);
10740 +static DEVICE_ATTR(pfemem, 0444, pfe_show_pfemem, NULL);
10741 +
10742 +int pfe_sysfs_init(struct pfe *pfe)
10743 +{
10744 + if (device_create_file(pfe->dev, &dev_attr_class))
10745 + goto err_class;
10746 +
10747 + if (device_create_file(pfe->dev, &dev_attr_tmu))
10748 + goto err_tmu;
10749 +
10750 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10751 + if (device_create_file(pfe->dev, &dev_attr_util))
10752 + goto err_util;
10753 +#endif
10754 +
10755 + if (device_create_file(pfe->dev, &dev_attr_bmu))
10756 + goto err_bmu;
10757 +
10758 + if (device_create_file(pfe->dev, &dev_attr_hif))
10759 + goto err_hif;
10760 +
10761 + if (device_create_file(pfe->dev, &dev_attr_gpi))
10762 + goto err_gpi;
10763 +
10764 + if (device_create_file(pfe->dev, &dev_attr_drops))
10765 + goto err_drops;
10766 +
10767 + if (device_create_file(pfe->dev, &dev_attr_tmu0_queues))
10768 + goto err_tmu0_queues;
10769 +
10770 + if (device_create_file(pfe->dev, &dev_attr_tmu1_queues))
10771 + goto err_tmu1_queues;
10772 +
10773 + if (device_create_file(pfe->dev, &dev_attr_tmu2_queues))
10774 + goto err_tmu2_queues;
10775 +
10776 + if (device_create_file(pfe->dev, &dev_attr_tmu3_queues))
10777 + goto err_tmu3_queues;
10778 +
10779 + if (device_create_file(pfe->dev, &dev_attr_pfemem))
10780 + goto err_pfemem;
10781 +
10782 +#ifdef HIF_NAPI_STATS
10783 + if (device_create_file(pfe->dev, &dev_attr_hif_napi_stats))
10784 + goto err_hif_napi_stats;
10785 +#endif
10786 +
10787 + return 0;
10788 +
10789 +#ifdef HIF_NAPI_STATS
10790 +err_hif_napi_stats:
10791 + device_remove_file(pfe->dev, &dev_attr_pfemem);
10792 +#endif
10793 +
10794 +err_pfemem:
10795 + device_remove_file(pfe->dev, &dev_attr_tmu3_queues);
10796 +
10797 +err_tmu3_queues:
10798 + device_remove_file(pfe->dev, &dev_attr_tmu2_queues);
10799 +
10800 +err_tmu2_queues:
10801 + device_remove_file(pfe->dev, &dev_attr_tmu1_queues);
10802 +
10803 +err_tmu1_queues:
10804 + device_remove_file(pfe->dev, &dev_attr_tmu0_queues);
10805 +
10806 +err_tmu0_queues:
10807 + device_remove_file(pfe->dev, &dev_attr_drops);
10808 +
10809 +err_drops:
10810 + device_remove_file(pfe->dev, &dev_attr_gpi);
10811 +
10812 +err_gpi:
10813 + device_remove_file(pfe->dev, &dev_attr_hif);
10814 +
10815 +err_hif:
10816 + device_remove_file(pfe->dev, &dev_attr_bmu);
10817 +
10818 +err_bmu:
10819 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10820 + device_remove_file(pfe->dev, &dev_attr_util);
10821 +
10822 +err_util:
10823 +#endif
10824 + device_remove_file(pfe->dev, &dev_attr_tmu);
10825 +
10826 +err_tmu:
10827 + device_remove_file(pfe->dev, &dev_attr_class);
10828 +
10829 +err_class:
10830 + return -1;
10831 +}
10832 +
10833 +void pfe_sysfs_exit(struct pfe *pfe)
10834 +{
10835 +#ifdef HIF_NAPI_STATS
10836 + device_remove_file(pfe->dev, &dev_attr_hif_napi_stats);
10837 +#endif
10838 + device_remove_file(pfe->dev, &dev_attr_pfemem);
10839 + device_remove_file(pfe->dev, &dev_attr_tmu3_queues);
10840 + device_remove_file(pfe->dev, &dev_attr_tmu2_queues);
10841 + device_remove_file(pfe->dev, &dev_attr_tmu1_queues);
10842 + device_remove_file(pfe->dev, &dev_attr_tmu0_queues);
10843 + device_remove_file(pfe->dev, &dev_attr_drops);
10844 + device_remove_file(pfe->dev, &dev_attr_gpi);
10845 + device_remove_file(pfe->dev, &dev_attr_hif);
10846 + device_remove_file(pfe->dev, &dev_attr_bmu);
10847 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10848 + device_remove_file(pfe->dev, &dev_attr_util);
10849 +#endif
10850 + device_remove_file(pfe->dev, &dev_attr_tmu);
10851 + device_remove_file(pfe->dev, &dev_attr_class);
10852 +}
10853 --- /dev/null
10854 +++ b/drivers/staging/fsl_ppfe/pfe_sysfs.h
10855 @@ -0,0 +1,29 @@
10856 +/*
10857 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
10858 + * Copyright 2017 NXP
10859 + *
10860 + * This program is free software; you can redistribute it and/or modify
10861 + * it under the terms of the GNU General Public License as published by
10862 + * the Free Software Foundation; either version 2 of the License, or
10863 + * (at your option) any later version.
10864 + *
10865 + * This program is distributed in the hope that it will be useful,
10866 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
10867 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10868 + * GNU General Public License for more details.
10869 + *
10870 + * You should have received a copy of the GNU General Public License
10871 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
10872 + */
10873 +
10874 +#ifndef _PFE_SYSFS_H_
10875 +#define _PFE_SYSFS_H_
10876 +
10877 +#include <linux/proc_fs.h>
10878 +
10879 +u32 qm_read_drop_stat(u32 tmu, u32 queue, u32 *total_drops, int do_reset);
10880 +
10881 +int pfe_sysfs_init(struct pfe *pfe);
10882 +void pfe_sysfs_exit(struct pfe *pfe);
10883 +
10884 +#endif /* _PFE_SYSFS_H_ */