kernel: bump 4.9 to 4.9.63
[openwrt/openwrt.git] / target / linux / layerscape / patches-4.9 / 706-fsl_ppfe-support-layercape.patch
1 From 8b7935a883d42187716fe486c83352f24d01ddcd Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Thu, 19 Oct 2017 12:48:19 +0800
4 Subject: [PATCH] fsl_ppfe: support layercape
5
6 This is a integrated patch for layerscape pfe support.
7
8 Calvin Johnson <calvin.johnson@nxp.com>
9 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
10 ---
11 drivers/staging/fsl_ppfe/Kconfig | 20 +
12 drivers/staging/fsl_ppfe/Makefile | 19 +
13 drivers/staging/fsl_ppfe/TODO | 2 +
14 drivers/staging/fsl_ppfe/include/pfe/cbus.h | 78 +
15 drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h | 55 +
16 .../staging/fsl_ppfe/include/pfe/cbus/class_csr.h | 289 +++
17 .../staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h | 242 ++
18 drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h | 86 +
19 drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h | 100 +
20 .../staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h | 50 +
21 .../staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h | 168 ++
22 .../staging/fsl_ppfe/include/pfe/cbus/util_csr.h | 61 +
23 drivers/staging/fsl_ppfe/include/pfe/pfe.h | 372 +++
24 drivers/staging/fsl_ppfe/pfe_ctrl.c | 238 ++
25 drivers/staging/fsl_ppfe/pfe_ctrl.h | 112 +
26 drivers/staging/fsl_ppfe/pfe_debugfs.c | 111 +
27 drivers/staging/fsl_ppfe/pfe_debugfs.h | 25 +
28 drivers/staging/fsl_ppfe/pfe_eth.c | 2434 ++++++++++++++++++++
29 drivers/staging/fsl_ppfe/pfe_eth.h | 184 ++
30 drivers/staging/fsl_ppfe/pfe_firmware.c | 314 +++
31 drivers/staging/fsl_ppfe/pfe_firmware.h | 32 +
32 drivers/staging/fsl_ppfe/pfe_hal.c | 1516 ++++++++++++
33 drivers/staging/fsl_ppfe/pfe_hif.c | 1072 +++++++++
34 drivers/staging/fsl_ppfe/pfe_hif.h | 211 ++
35 drivers/staging/fsl_ppfe/pfe_hif_lib.c | 601 +++++
36 drivers/staging/fsl_ppfe/pfe_hif_lib.h | 239 ++
37 drivers/staging/fsl_ppfe/pfe_hw.c | 176 ++
38 drivers/staging/fsl_ppfe/pfe_hw.h | 27 +
39 drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c | 394 ++++
40 drivers/staging/fsl_ppfe/pfe_mod.c | 141 ++
41 drivers/staging/fsl_ppfe/pfe_mod.h | 112 +
42 drivers/staging/fsl_ppfe/pfe_perfmon.h | 38 +
43 drivers/staging/fsl_ppfe/pfe_sysfs.c | 818 +++++++
44 drivers/staging/fsl_ppfe/pfe_sysfs.h | 29 +
45 34 files changed, 10366 insertions(+)
46 create mode 100644 drivers/staging/fsl_ppfe/Kconfig
47 create mode 100644 drivers/staging/fsl_ppfe/Makefile
48 create mode 100644 drivers/staging/fsl_ppfe/TODO
49 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus.h
50 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h
51 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/class_csr.h
52 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h
53 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h
54 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h
55 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h
56 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h
57 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/util_csr.h
58 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/pfe.h
59 create mode 100644 drivers/staging/fsl_ppfe/pfe_ctrl.c
60 create mode 100644 drivers/staging/fsl_ppfe/pfe_ctrl.h
61 create mode 100644 drivers/staging/fsl_ppfe/pfe_debugfs.c
62 create mode 100644 drivers/staging/fsl_ppfe/pfe_debugfs.h
63 create mode 100644 drivers/staging/fsl_ppfe/pfe_eth.c
64 create mode 100644 drivers/staging/fsl_ppfe/pfe_eth.h
65 create mode 100644 drivers/staging/fsl_ppfe/pfe_firmware.c
66 create mode 100644 drivers/staging/fsl_ppfe/pfe_firmware.h
67 create mode 100644 drivers/staging/fsl_ppfe/pfe_hal.c
68 create mode 100644 drivers/staging/fsl_ppfe/pfe_hif.c
69 create mode 100644 drivers/staging/fsl_ppfe/pfe_hif.h
70 create mode 100644 drivers/staging/fsl_ppfe/pfe_hif_lib.c
71 create mode 100644 drivers/staging/fsl_ppfe/pfe_hif_lib.h
72 create mode 100644 drivers/staging/fsl_ppfe/pfe_hw.c
73 create mode 100644 drivers/staging/fsl_ppfe/pfe_hw.h
74 create mode 100644 drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c
75 create mode 100644 drivers/staging/fsl_ppfe/pfe_mod.c
76 create mode 100644 drivers/staging/fsl_ppfe/pfe_mod.h
77 create mode 100644 drivers/staging/fsl_ppfe/pfe_perfmon.h
78 create mode 100644 drivers/staging/fsl_ppfe/pfe_sysfs.c
79 create mode 100644 drivers/staging/fsl_ppfe/pfe_sysfs.h
80
81 --- /dev/null
82 +++ b/drivers/staging/fsl_ppfe/Kconfig
83 @@ -0,0 +1,20 @@
84 +#
85 +# Freescale Programmable Packet Forwarding Engine driver
86 +#
87 +config FSL_PPFE
88 + bool "Freescale PPFE Driver"
89 + default n
90 + ---help---
91 + Freescale LS1012A SoC has a Programmable Packet Forwarding Engine.
92 + It provides two high performance ethernet interfaces.
93 + This driver initializes, programs and controls the PPFE.
94 + Use this driver to enable network connectivity on LS1012A platforms.
95 +
96 +if FSL_PPFE
97 +
98 +config FSL_PPFE_UTIL_DISABLED
99 + bool "Disable PPFE UTIL Processor Engine"
100 + ---help---
101 + UTIL PE has to be enabled only if required.
102 +
103 +endif # FSL_PPFE
104 --- /dev/null
105 +++ b/drivers/staging/fsl_ppfe/Makefile
106 @@ -0,0 +1,19 @@
107 +#
108 +# Makefile for Freesecale PPFE driver
109 +#
110 +
111 +ccflags-y += -I$(src)/include -I$(src)
112 +
113 +obj-m += pfe.o
114 +
115 +pfe-y += pfe_mod.o \
116 + pfe_hw.o \
117 + pfe_firmware.o \
118 + pfe_ctrl.o \
119 + pfe_hif.o \
120 + pfe_hif_lib.o\
121 + pfe_eth.o \
122 + pfe_sysfs.o \
123 + pfe_debugfs.o \
124 + pfe_ls1012a_platform.o \
125 + pfe_hal.o
126 --- /dev/null
127 +++ b/drivers/staging/fsl_ppfe/TODO
128 @@ -0,0 +1,2 @@
129 +TODO:
130 + - provide pfe pe monitoring support
131 --- /dev/null
132 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus.h
133 @@ -0,0 +1,78 @@
134 +/*
135 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
136 + * Copyright 2017 NXP
137 + *
138 + * This program is free software; you can redistribute it and/or modify
139 + * it under the terms of the GNU General Public License as published by
140 + * the Free Software Foundation; either version 2 of the License, or
141 + * (at your option) any later version.
142 + *
143 + * This program is distributed in the hope that it will be useful,
144 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
145 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
146 + * GNU General Public License for more details.
147 + *
148 + * You should have received a copy of the GNU General Public License
149 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
150 + */
151 +
152 +#ifndef _CBUS_H_
153 +#define _CBUS_H_
154 +
155 +#define EMAC1_BASE_ADDR (CBUS_BASE_ADDR + 0x200000)
156 +#define EGPI1_BASE_ADDR (CBUS_BASE_ADDR + 0x210000)
157 +#define EMAC2_BASE_ADDR (CBUS_BASE_ADDR + 0x220000)
158 +#define EGPI2_BASE_ADDR (CBUS_BASE_ADDR + 0x230000)
159 +#define BMU1_BASE_ADDR (CBUS_BASE_ADDR + 0x240000)
160 +#define BMU2_BASE_ADDR (CBUS_BASE_ADDR + 0x250000)
161 +#define ARB_BASE_ADDR (CBUS_BASE_ADDR + 0x260000)
162 +#define DDR_CONFIG_BASE_ADDR (CBUS_BASE_ADDR + 0x270000)
163 +#define HIF_BASE_ADDR (CBUS_BASE_ADDR + 0x280000)
164 +#define HGPI_BASE_ADDR (CBUS_BASE_ADDR + 0x290000)
165 +#define LMEM_BASE_ADDR (CBUS_BASE_ADDR + 0x300000)
166 +#define LMEM_SIZE 0x10000
167 +#define LMEM_END (LMEM_BASE_ADDR + LMEM_SIZE)
168 +#define TMU_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x310000)
169 +#define CLASS_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x320000)
170 +#define HIF_NOCPY_BASE_ADDR (CBUS_BASE_ADDR + 0x350000)
171 +#define UTIL_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x360000)
172 +#define CBUS_GPT_BASE_ADDR (CBUS_BASE_ADDR + 0x370000)
173 +
174 +/*
175 + * defgroup XXX_MEM_ACCESS_ADDR PE memory access through CSR
176 + * XXX_MEM_ACCESS_ADDR register bit definitions.
177 + */
178 +#define PE_MEM_ACCESS_WRITE BIT(31) /* Internal Memory Write. */
179 +#define PE_MEM_ACCESS_IMEM BIT(15)
180 +#define PE_MEM_ACCESS_DMEM BIT(16)
181 +
182 +/* Byte Enables of the Internal memory access. These are interpred in BE */
183 +#define PE_MEM_ACCESS_BYTE_ENABLE(offset, size) \
184 + ({ typeof(size) size_ = (size); \
185 + (((BIT(size_) - 1) << (4 - (offset) - (size_))) & 0xf) << 24; })
186 +
187 +#include "cbus/emac_mtip.h"
188 +#include "cbus/gpi.h"
189 +#include "cbus/bmu.h"
190 +#include "cbus/hif.h"
191 +#include "cbus/tmu_csr.h"
192 +#include "cbus/class_csr.h"
193 +#include "cbus/hif_nocpy.h"
194 +#include "cbus/util_csr.h"
195 +
196 +/* PFE cores states */
197 +#define CORE_DISABLE 0x00000000
198 +#define CORE_ENABLE 0x00000001
199 +#define CORE_SW_RESET 0x00000002
200 +
201 +/* LMEM defines */
202 +#define LMEM_HDR_SIZE 0x0010
203 +#define LMEM_BUF_SIZE_LN2 0x7
204 +#define LMEM_BUF_SIZE BIT(LMEM_BUF_SIZE_LN2)
205 +
206 +/* DDR defines */
207 +#define DDR_HDR_SIZE 0x0100
208 +#define DDR_BUF_SIZE_LN2 0xb
209 +#define DDR_BUF_SIZE BIT(DDR_BUF_SIZE_LN2)
210 +
211 +#endif /* _CBUS_H_ */
212 --- /dev/null
213 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h
214 @@ -0,0 +1,55 @@
215 +/*
216 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
217 + * Copyright 2017 NXP
218 + *
219 + * This program is free software; you can redistribute it and/or modify
220 + * it under the terms of the GNU General Public License as published by
221 + * the Free Software Foundation; either version 2 of the License, or
222 + * (at your option) any later version.
223 + *
224 + * This program is distributed in the hope that it will be useful,
225 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
226 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
227 + * GNU General Public License for more details.
228 + *
229 + * You should have received a copy of the GNU General Public License
230 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
231 + */
232 +
233 +#ifndef _BMU_H_
234 +#define _BMU_H_
235 +
236 +#define BMU_VERSION 0x000
237 +#define BMU_CTRL 0x004
238 +#define BMU_UCAST_CONFIG 0x008
239 +#define BMU_UCAST_BASE_ADDR 0x00c
240 +#define BMU_BUF_SIZE 0x010
241 +#define BMU_BUF_CNT 0x014
242 +#define BMU_THRES 0x018
243 +#define BMU_INT_SRC 0x020
244 +#define BMU_INT_ENABLE 0x024
245 +#define BMU_ALLOC_CTRL 0x030
246 +#define BMU_FREE_CTRL 0x034
247 +#define BMU_FREE_ERR_ADDR 0x038
248 +#define BMU_CURR_BUF_CNT 0x03c
249 +#define BMU_MCAST_CNT 0x040
250 +#define BMU_MCAST_ALLOC_CTRL 0x044
251 +#define BMU_REM_BUF_CNT 0x048
252 +#define BMU_LOW_WATERMARK 0x050
253 +#define BMU_HIGH_WATERMARK 0x054
254 +#define BMU_INT_MEM_ACCESS 0x100
255 +
256 +struct BMU_CFG {
257 + unsigned long baseaddr;
258 + u32 count;
259 + u32 size;
260 + u32 low_watermark;
261 + u32 high_watermark;
262 +};
263 +
264 +#define BMU1_BUF_SIZE LMEM_BUF_SIZE_LN2
265 +#define BMU2_BUF_SIZE DDR_BUF_SIZE_LN2
266 +
267 +#define BMU2_MCAST_ALLOC_CTRL (BMU2_BASE_ADDR + BMU_MCAST_ALLOC_CTRL)
268 +
269 +#endif /* _BMU_H_ */
270 --- /dev/null
271 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/class_csr.h
272 @@ -0,0 +1,289 @@
273 +/*
274 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
275 + * Copyright 2017 NXP
276 + *
277 + * This program is free software; you can redistribute it and/or modify
278 + * it under the terms of the GNU General Public License as published by
279 + * the Free Software Foundation; either version 2 of the License, or
280 + * (at your option) any later version.
281 + *
282 + * This program is distributed in the hope that it will be useful,
283 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
284 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
285 + * GNU General Public License for more details.
286 + *
287 + * You should have received a copy of the GNU General Public License
288 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
289 + */
290 +
291 +#ifndef _CLASS_CSR_H_
292 +#define _CLASS_CSR_H_
293 +
294 +/* @file class_csr.h.
295 + * class_csr - block containing all the classifier control and status register.
296 + * Mapped on CBUS and accessible from all PE's and ARM.
297 + */
298 +#define CLASS_VERSION (CLASS_CSR_BASE_ADDR + 0x000)
299 +#define CLASS_TX_CTRL (CLASS_CSR_BASE_ADDR + 0x004)
300 +#define CLASS_INQ_PKTPTR (CLASS_CSR_BASE_ADDR + 0x010)
301 +
302 +/* (ddr_hdr_size[24:16], lmem_hdr_size[5:0]) */
303 +#define CLASS_HDR_SIZE (CLASS_CSR_BASE_ADDR + 0x014)
304 +
305 +/* LMEM header size for the Classifier block.\ Data in the LMEM
306 + * is written from this offset.
307 + */
308 +#define CLASS_HDR_SIZE_LMEM(off) ((off) & 0x3f)
309 +
310 +/* DDR header size for the Classifier block.\ Data in the DDR
311 + * is written from this offset.
312 + */
313 +#define CLASS_HDR_SIZE_DDR(off) (((off) & 0x1ff) << 16)
314 +
315 +#define CLASS_PE0_QB_DM_ADDR0 (CLASS_CSR_BASE_ADDR + 0x020)
316 +
317 +/* DMEM address of first [15:0] and second [31:16] buffers on QB side. */
318 +#define CLASS_PE0_QB_DM_ADDR1 (CLASS_CSR_BASE_ADDR + 0x024)
319 +
320 +/* DMEM address of third [15:0] and fourth [31:16] buffers on QB side. */
321 +#define CLASS_PE0_RO_DM_ADDR0 (CLASS_CSR_BASE_ADDR + 0x060)
322 +
323 +/* DMEM address of first [15:0] and second [31:16] buffers on RO side. */
324 +#define CLASS_PE0_RO_DM_ADDR1 (CLASS_CSR_BASE_ADDR + 0x064)
325 +
326 +/* DMEM address of third [15:0] and fourth [31:16] buffers on RO side. */
327 +
328 +/* @name Class PE memory access. Allows external PE's and HOST to
329 + * read/write PMEM/DMEM memory ranges for each classifier PE.
330 + */
331 +/* {sr_pe_mem_cmd[31], csr_pe_mem_wren[27:24], csr_pe_mem_addr[23:0]},
332 + * See \ref XXX_MEM_ACCESS_ADDR for details.
333 + */
334 +#define CLASS_MEM_ACCESS_ADDR (CLASS_CSR_BASE_ADDR + 0x100)
335 +
336 +/* Internal Memory Access Write Data [31:0] */
337 +#define CLASS_MEM_ACCESS_WDATA (CLASS_CSR_BASE_ADDR + 0x104)
338 +
339 +/* Internal Memory Access Read Data [31:0] */
340 +#define CLASS_MEM_ACCESS_RDATA (CLASS_CSR_BASE_ADDR + 0x108)
341 +#define CLASS_TM_INQ_ADDR (CLASS_CSR_BASE_ADDR + 0x114)
342 +#define CLASS_PE_STATUS (CLASS_CSR_BASE_ADDR + 0x118)
343 +
344 +#define CLASS_PHY1_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x11c)
345 +#define CLASS_PHY1_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x120)
346 +#define CLASS_PHY1_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x124)
347 +#define CLASS_PHY1_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x128)
348 +#define CLASS_PHY1_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x12c)
349 +#define CLASS_PHY1_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x130)
350 +#define CLASS_PHY1_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x134)
351 +#define CLASS_PHY1_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x138)
352 +#define CLASS_PHY1_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x13c)
353 +#define CLASS_PHY1_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x140)
354 +#define CLASS_PHY2_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x144)
355 +#define CLASS_PHY2_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x148)
356 +#define CLASS_PHY2_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x14c)
357 +#define CLASS_PHY2_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x150)
358 +#define CLASS_PHY2_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x154)
359 +#define CLASS_PHY2_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x158)
360 +#define CLASS_PHY2_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x15c)
361 +#define CLASS_PHY2_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x160)
362 +#define CLASS_PHY2_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x164)
363 +#define CLASS_PHY2_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x168)
364 +#define CLASS_PHY3_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x16c)
365 +#define CLASS_PHY3_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x170)
366 +#define CLASS_PHY3_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x174)
367 +#define CLASS_PHY3_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x178)
368 +#define CLASS_PHY3_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x17c)
369 +#define CLASS_PHY3_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x180)
370 +#define CLASS_PHY3_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x184)
371 +#define CLASS_PHY3_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x188)
372 +#define CLASS_PHY3_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x18c)
373 +#define CLASS_PHY3_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x190)
374 +#define CLASS_PHY1_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x194)
375 +#define CLASS_PHY1_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x198)
376 +#define CLASS_PHY1_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x19c)
377 +#define CLASS_PHY1_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a0)
378 +#define CLASS_PHY2_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a4)
379 +#define CLASS_PHY2_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a8)
380 +#define CLASS_PHY2_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1ac)
381 +#define CLASS_PHY2_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b0)
382 +#define CLASS_PHY3_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b4)
383 +#define CLASS_PHY3_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b8)
384 +#define CLASS_PHY3_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1bc)
385 +#define CLASS_PHY3_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c0)
386 +#define CLASS_PHY4_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c4)
387 +#define CLASS_PHY4_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c8)
388 +#define CLASS_PHY4_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1cc)
389 +#define CLASS_PHY4_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1d0)
390 +#define CLASS_PHY4_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x1d4)
391 +#define CLASS_PHY4_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x1d8)
392 +#define CLASS_PHY4_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1dc)
393 +#define CLASS_PHY4_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1e0)
394 +#define CLASS_PHY4_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x1e4)
395 +#define CLASS_PHY4_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1e8)
396 +#define CLASS_PHY4_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x1ec)
397 +#define CLASS_PHY4_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x1f0)
398 +#define CLASS_PHY4_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x1f4)
399 +#define CLASS_PHY4_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x1f8)
400 +
401 +#define CLASS_PE_SYS_CLK_RATIO (CLASS_CSR_BASE_ADDR + 0x200)
402 +#define CLASS_AFULL_THRES (CLASS_CSR_BASE_ADDR + 0x204)
403 +#define CLASS_GAP_BETWEEN_READS (CLASS_CSR_BASE_ADDR + 0x208)
404 +#define CLASS_MAX_BUF_CNT (CLASS_CSR_BASE_ADDR + 0x20c)
405 +#define CLASS_TSQ_FIFO_THRES (CLASS_CSR_BASE_ADDR + 0x210)
406 +#define CLASS_TSQ_MAX_CNT (CLASS_CSR_BASE_ADDR + 0x214)
407 +#define CLASS_IRAM_DATA_0 (CLASS_CSR_BASE_ADDR + 0x218)
408 +#define CLASS_IRAM_DATA_1 (CLASS_CSR_BASE_ADDR + 0x21c)
409 +#define CLASS_IRAM_DATA_2 (CLASS_CSR_BASE_ADDR + 0x220)
410 +#define CLASS_IRAM_DATA_3 (CLASS_CSR_BASE_ADDR + 0x224)
411 +
412 +#define CLASS_BUS_ACCESS_ADDR (CLASS_CSR_BASE_ADDR + 0x228)
413 +
414 +#define CLASS_BUS_ACCESS_WDATA (CLASS_CSR_BASE_ADDR + 0x22c)
415 +#define CLASS_BUS_ACCESS_RDATA (CLASS_CSR_BASE_ADDR + 0x230)
416 +
417 +/* (route_entry_size[9:0], route_hash_size[23:16]
418 + * (this is actually ln2(size)))
419 + */
420 +#define CLASS_ROUTE_HASH_ENTRY_SIZE (CLASS_CSR_BASE_ADDR + 0x234)
421 +
422 +#define CLASS_ROUTE_ENTRY_SIZE(size) ((size) & 0x1ff)
423 +#define CLASS_ROUTE_HASH_SIZE(hash_bits) (((hash_bits) & 0xff) << 16)
424 +
425 +#define CLASS_ROUTE_TABLE_BASE (CLASS_CSR_BASE_ADDR + 0x238)
426 +
427 +#define CLASS_ROUTE_MULTI (CLASS_CSR_BASE_ADDR + 0x23c)
428 +#define CLASS_SMEM_OFFSET (CLASS_CSR_BASE_ADDR + 0x240)
429 +#define CLASS_LMEM_BUF_SIZE (CLASS_CSR_BASE_ADDR + 0x244)
430 +#define CLASS_VLAN_ID (CLASS_CSR_BASE_ADDR + 0x248)
431 +#define CLASS_BMU1_BUF_FREE (CLASS_CSR_BASE_ADDR + 0x24c)
432 +#define CLASS_USE_TMU_INQ (CLASS_CSR_BASE_ADDR + 0x250)
433 +#define CLASS_VLAN_ID1 (CLASS_CSR_BASE_ADDR + 0x254)
434 +
435 +#define CLASS_BUS_ACCESS_BASE (CLASS_CSR_BASE_ADDR + 0x258)
436 +#define CLASS_BUS_ACCESS_BASE_MASK (0xFF000000)
437 +/* bit 31:24 of PE peripheral address are stored in CLASS_BUS_ACCESS_BASE */
438 +
439 +#define CLASS_HIF_PARSE (CLASS_CSR_BASE_ADDR + 0x25c)
440 +
441 +#define CLASS_HOST_PE0_GP (CLASS_CSR_BASE_ADDR + 0x260)
442 +#define CLASS_PE0_GP (CLASS_CSR_BASE_ADDR + 0x264)
443 +#define CLASS_HOST_PE1_GP (CLASS_CSR_BASE_ADDR + 0x268)
444 +#define CLASS_PE1_GP (CLASS_CSR_BASE_ADDR + 0x26c)
445 +#define CLASS_HOST_PE2_GP (CLASS_CSR_BASE_ADDR + 0x270)
446 +#define CLASS_PE2_GP (CLASS_CSR_BASE_ADDR + 0x274)
447 +#define CLASS_HOST_PE3_GP (CLASS_CSR_BASE_ADDR + 0x278)
448 +#define CLASS_PE3_GP (CLASS_CSR_BASE_ADDR + 0x27c)
449 +#define CLASS_HOST_PE4_GP (CLASS_CSR_BASE_ADDR + 0x280)
450 +#define CLASS_PE4_GP (CLASS_CSR_BASE_ADDR + 0x284)
451 +#define CLASS_HOST_PE5_GP (CLASS_CSR_BASE_ADDR + 0x288)
452 +#define CLASS_PE5_GP (CLASS_CSR_BASE_ADDR + 0x28c)
453 +
454 +#define CLASS_PE_INT_SRC (CLASS_CSR_BASE_ADDR + 0x290)
455 +#define CLASS_PE_INT_ENABLE (CLASS_CSR_BASE_ADDR + 0x294)
456 +
457 +#define CLASS_TPID0_TPID1 (CLASS_CSR_BASE_ADDR + 0x298)
458 +#define CLASS_TPID2 (CLASS_CSR_BASE_ADDR + 0x29c)
459 +
460 +#define CLASS_L4_CHKSUM_ADDR (CLASS_CSR_BASE_ADDR + 0x2a0)
461 +
462 +#define CLASS_PE0_DEBUG (CLASS_CSR_BASE_ADDR + 0x2a4)
463 +#define CLASS_PE1_DEBUG (CLASS_CSR_BASE_ADDR + 0x2a8)
464 +#define CLASS_PE2_DEBUG (CLASS_CSR_BASE_ADDR + 0x2ac)
465 +#define CLASS_PE3_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b0)
466 +#define CLASS_PE4_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b4)
467 +#define CLASS_PE5_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b8)
468 +
469 +#define CLASS_STATE (CLASS_CSR_BASE_ADDR + 0x2bc)
470 +
471 +/* CLASS defines */
472 +#define CLASS_PBUF_SIZE 0x100 /* Fixed by hardware */
473 +#define CLASS_PBUF_HEADER_OFFSET 0x80 /* Can be configured */
474 +
475 +/* Can be configured */
476 +#define CLASS_PBUF0_BASE_ADDR 0x000
477 +/* Can be configured */
478 +#define CLASS_PBUF1_BASE_ADDR (CLASS_PBUF0_BASE_ADDR + CLASS_PBUF_SIZE)
479 +/* Can be configured */
480 +#define CLASS_PBUF2_BASE_ADDR (CLASS_PBUF1_BASE_ADDR + CLASS_PBUF_SIZE)
481 +/* Can be configured */
482 +#define CLASS_PBUF3_BASE_ADDR (CLASS_PBUF2_BASE_ADDR + CLASS_PBUF_SIZE)
483 +
484 +#define CLASS_PBUF0_HEADER_BASE_ADDR (CLASS_PBUF0_BASE_ADDR + \
485 + CLASS_PBUF_HEADER_OFFSET)
486 +#define CLASS_PBUF1_HEADER_BASE_ADDR (CLASS_PBUF1_BASE_ADDR + \
487 + CLASS_PBUF_HEADER_OFFSET)
488 +#define CLASS_PBUF2_HEADER_BASE_ADDR (CLASS_PBUF2_BASE_ADDR + \
489 + CLASS_PBUF_HEADER_OFFSET)
490 +#define CLASS_PBUF3_HEADER_BASE_ADDR (CLASS_PBUF3_BASE_ADDR + \
491 + CLASS_PBUF_HEADER_OFFSET)
492 +
493 +#define CLASS_PE0_RO_DM_ADDR0_VAL ((CLASS_PBUF1_BASE_ADDR << 16) | \
494 + CLASS_PBUF0_BASE_ADDR)
495 +#define CLASS_PE0_RO_DM_ADDR1_VAL ((CLASS_PBUF3_BASE_ADDR << 16) | \
496 + CLASS_PBUF2_BASE_ADDR)
497 +
498 +#define CLASS_PE0_QB_DM_ADDR0_VAL ((CLASS_PBUF1_HEADER_BASE_ADDR << 16) |\
499 + CLASS_PBUF0_HEADER_BASE_ADDR)
500 +#define CLASS_PE0_QB_DM_ADDR1_VAL ((CLASS_PBUF3_HEADER_BASE_ADDR << 16) |\
501 + CLASS_PBUF2_HEADER_BASE_ADDR)
502 +
503 +#define CLASS_ROUTE_SIZE 128
504 +#define CLASS_MAX_ROUTE_SIZE 256
505 +#define CLASS_ROUTE_HASH_BITS 20
506 +#define CLASS_ROUTE_HASH_MASK (BIT(CLASS_ROUTE_HASH_BITS) - 1)
507 +
508 +/* Can be configured */
509 +#define CLASS_ROUTE0_BASE_ADDR 0x400
510 +/* Can be configured */
511 +#define CLASS_ROUTE1_BASE_ADDR (CLASS_ROUTE0_BASE_ADDR + CLASS_ROUTE_SIZE)
512 +/* Can be configured */
513 +#define CLASS_ROUTE2_BASE_ADDR (CLASS_ROUTE1_BASE_ADDR + CLASS_ROUTE_SIZE)
514 +/* Can be configured */
515 +#define CLASS_ROUTE3_BASE_ADDR (CLASS_ROUTE2_BASE_ADDR + CLASS_ROUTE_SIZE)
516 +
517 +#define CLASS_SA_SIZE 128
518 +#define CLASS_IPSEC_SA0_BASE_ADDR 0x600
519 +/* not used */
520 +#define CLASS_IPSEC_SA1_BASE_ADDR (CLASS_IPSEC_SA0_BASE_ADDR + CLASS_SA_SIZE)
521 +/* not used */
522 +#define CLASS_IPSEC_SA2_BASE_ADDR (CLASS_IPSEC_SA1_BASE_ADDR + CLASS_SA_SIZE)
523 +/* not used */
524 +#define CLASS_IPSEC_SA3_BASE_ADDR (CLASS_IPSEC_SA2_BASE_ADDR + CLASS_SA_SIZE)
525 +
526 +/* generic purpose free dmem buffer, last portion of 2K dmem pbuf */
527 +#define CLASS_GP_DMEM_BUF_SIZE (2048 - (CLASS_PBUF_SIZE * 4) - \
528 + (CLASS_ROUTE_SIZE * 4) - (CLASS_SA_SIZE))
529 +#define CLASS_GP_DMEM_BUF ((void *)(CLASS_IPSEC_SA0_BASE_ADDR + \
530 + CLASS_SA_SIZE))
531 +
532 +#define TWO_LEVEL_ROUTE BIT(0)
533 +#define PHYNO_IN_HASH BIT(1)
534 +#define HW_ROUTE_FETCH BIT(3)
535 +#define HW_BRIDGE_FETCH BIT(5)
536 +#define IP_ALIGNED BIT(6)
537 +#define ARC_HIT_CHECK_EN BIT(7)
538 +#define CLASS_TOE BIT(11)
539 +#define HASH_NORMAL (0 << 12)
540 +#define HASH_CRC_PORT BIT(12)
541 +#define HASH_CRC_IP (2 << 12)
542 +#define HASH_CRC_PORT_IP (3 << 12)
543 +#define QB2BUS_LE BIT(15)
544 +
545 +#define TCP_CHKSUM_DROP BIT(0)
546 +#define UDP_CHKSUM_DROP BIT(1)
547 +#define IPV4_CHKSUM_DROP BIT(9)
548 +
549 +/*CLASS_HIF_PARSE bits*/
550 +#define HIF_PKT_CLASS_EN BIT(0)
551 +#define HIF_PKT_OFFSET(ofst) (((ofst) & 0xF) << 1)
552 +
553 +struct class_cfg {
554 + u32 toe_mode;
555 + unsigned long route_table_baseaddr;
556 + u32 route_table_hash_bits;
557 + u32 pe_sys_clk_ratio;
558 + u32 resume;
559 +};
560 +
561 +#endif /* _CLASS_CSR_H_ */
562 --- /dev/null
563 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h
564 @@ -0,0 +1,242 @@
565 +/*
566 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
567 + * Copyright 2017 NXP
568 + *
569 + * This program is free software; you can redistribute it and/or modify
570 + * it under the terms of the GNU General Public License as published by
571 + * the Free Software Foundation; either version 2 of the License, or
572 + * (at your option) any later version.
573 + *
574 + * This program is distributed in the hope that it will be useful,
575 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
576 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
577 + * GNU General Public License for more details.
578 + *
579 + * You should have received a copy of the GNU General Public License
580 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
581 + */
582 +
583 +#ifndef _EMAC_H_
584 +#define _EMAC_H_
585 +
586 +#include <linux/ethtool.h>
587 +
588 +#define EMAC_IEVENT_REG 0x004
589 +#define EMAC_IMASK_REG 0x008
590 +#define EMAC_R_DES_ACTIVE_REG 0x010
591 +#define EMAC_X_DES_ACTIVE_REG 0x014
592 +#define EMAC_ECNTRL_REG 0x024
593 +#define EMAC_MII_DATA_REG 0x040
594 +#define EMAC_MII_CTRL_REG 0x044
595 +#define EMAC_MIB_CTRL_STS_REG 0x064
596 +#define EMAC_RCNTRL_REG 0x084
597 +#define EMAC_TCNTRL_REG 0x0C4
598 +#define EMAC_PHY_ADDR_LOW 0x0E4
599 +#define EMAC_PHY_ADDR_HIGH 0x0E8
600 +#define EMAC_GAUR 0x120
601 +#define EMAC_GALR 0x124
602 +#define EMAC_TFWR_STR_FWD 0x144
603 +#define EMAC_RX_SECTION_FULL 0x190
604 +#define EMAC_RX_SECTION_EMPTY 0x194
605 +#define EMAC_TX_SECTION_EMPTY 0x1A0
606 +#define EMAC_TRUNC_FL 0x1B0
607 +
608 +#define RMON_T_DROP 0x200 /* Count of frames not cntd correctly */
609 +#define RMON_T_PACKETS 0x204 /* RMON TX packet count */
610 +#define RMON_T_BC_PKT 0x208 /* RMON TX broadcast pkts */
611 +#define RMON_T_MC_PKT 0x20c /* RMON TX multicast pkts */
612 +#define RMON_T_CRC_ALIGN 0x210 /* RMON TX pkts with CRC align err */
613 +#define RMON_T_UNDERSIZE 0x214 /* RMON TX pkts < 64 bytes, good CRC */
614 +#define RMON_T_OVERSIZE 0x218 /* RMON TX pkts > MAX_FL bytes good CRC */
615 +#define RMON_T_FRAG 0x21c /* RMON TX pkts < 64 bytes, bad CRC */
616 +#define RMON_T_JAB 0x220 /* RMON TX pkts > MAX_FL bytes, bad CRC */
617 +#define RMON_T_COL 0x224 /* RMON TX collision count */
618 +#define RMON_T_P64 0x228 /* RMON TX 64 byte pkts */
619 +#define RMON_T_P65TO127 0x22c /* RMON TX 65 to 127 byte pkts */
620 +#define RMON_T_P128TO255 0x230 /* RMON TX 128 to 255 byte pkts */
621 +#define RMON_T_P256TO511 0x234 /* RMON TX 256 to 511 byte pkts */
622 +#define RMON_T_P512TO1023 0x238 /* RMON TX 512 to 1023 byte pkts */
623 +#define RMON_T_P1024TO2047 0x23c /* RMON TX 1024 to 2047 byte pkts */
624 +#define RMON_T_P_GTE2048 0x240 /* RMON TX pkts > 2048 bytes */
625 +#define RMON_T_OCTETS 0x244 /* RMON TX octets */
626 +#define IEEE_T_DROP 0x248 /* Count of frames not counted crtly */
627 +#define IEEE_T_FRAME_OK 0x24c /* Frames tx'd OK */
628 +#define IEEE_T_1COL 0x250 /* Frames tx'd with single collision */
629 +#define IEEE_T_MCOL 0x254 /* Frames tx'd with multiple collision */
630 +#define IEEE_T_DEF 0x258 /* Frames tx'd after deferral delay */
631 +#define IEEE_T_LCOL 0x25c /* Frames tx'd with late collision */
632 +#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excesv collisions */
633 +#define IEEE_T_MACERR 0x264 /* Frames tx'd with TX FIFO underrun */
634 +#define IEEE_T_CSERR 0x268 /* Frames tx'd with carrier sense err */
635 +#define IEEE_T_SQE 0x26c /* Frames tx'd with SQE err */
636 +#define IEEE_T_FDXFC 0x270 /* Flow control pause frames tx'd */
637 +#define IEEE_T_OCTETS_OK 0x274 /* Octet count for frames tx'd w/o err */
638 +#define RMON_R_PACKETS 0x284 /* RMON RX packet count */
639 +#define RMON_R_BC_PKT 0x288 /* RMON RX broadcast pkts */
640 +#define RMON_R_MC_PKT 0x28c /* RMON RX multicast pkts */
641 +#define RMON_R_CRC_ALIGN 0x290 /* RMON RX pkts with CRC alignment err */
642 +#define RMON_R_UNDERSIZE 0x294 /* RMON RX pkts < 64 bytes, good CRC */
643 +#define RMON_R_OVERSIZE 0x298 /* RMON RX pkts > MAX_FL bytes good CRC */
644 +#define RMON_R_FRAG 0x29c /* RMON RX pkts < 64 bytes, bad CRC */
645 +#define RMON_R_JAB 0x2a0 /* RMON RX pkts > MAX_FL bytes, bad CRC */
646 +#define RMON_R_RESVD_O 0x2a4 /* Reserved */
647 +#define RMON_R_P64 0x2a8 /* RMON RX 64 byte pkts */
648 +#define RMON_R_P65TO127 0x2ac /* RMON RX 65 to 127 byte pkts */
649 +#define RMON_R_P128TO255 0x2b0 /* RMON RX 128 to 255 byte pkts */
650 +#define RMON_R_P256TO511 0x2b4 /* RMON RX 256 to 511 byte pkts */
651 +#define RMON_R_P512TO1023 0x2b8 /* RMON RX 512 to 1023 byte pkts */
652 +#define RMON_R_P1024TO2047 0x2bc /* RMON RX 1024 to 2047 byte pkts */
653 +#define RMON_R_P_GTE2048 0x2c0 /* RMON RX pkts > 2048 bytes */
654 +#define RMON_R_OCTETS 0x2c4 /* RMON RX octets */
655 +#define IEEE_R_DROP 0x2c8 /* Count frames not counted correctly */
656 +#define IEEE_R_FRAME_OK 0x2cc /* Frames rx'd OK */
657 +#define IEEE_R_CRC 0x2d0 /* Frames rx'd with CRC err */
658 +#define IEEE_R_ALIGN 0x2d4 /* Frames rx'd with alignment err */
659 +#define IEEE_R_MACERR 0x2d8 /* Receive FIFO overflow count */
660 +#define IEEE_R_FDXFC 0x2dc /* Flow control pause frames rx'd */
661 +#define IEEE_R_OCTETS_OK 0x2e0 /* Octet cnt for frames rx'd w/o err */
662 +
663 +#define EMAC_SMAC_0_0 0x500 /*Supplemental MAC Address 0 (RW).*/
664 +#define EMAC_SMAC_0_1 0x504 /*Supplemental MAC Address 0 (RW).*/
665 +
666 +/* GEMAC definitions and settings */
667 +
668 +#define EMAC_PORT_0 0
669 +#define EMAC_PORT_1 1
670 +
671 +/* GEMAC Bit definitions */
672 +#define EMAC_IEVENT_HBERR 0x80000000
673 +#define EMAC_IEVENT_BABR 0x40000000
674 +#define EMAC_IEVENT_BABT 0x20000000
675 +#define EMAC_IEVENT_GRA 0x10000000
676 +#define EMAC_IEVENT_TXF 0x08000000
677 +#define EMAC_IEVENT_TXB 0x04000000
678 +#define EMAC_IEVENT_RXF 0x02000000
679 +#define EMAC_IEVENT_RXB 0x01000000
680 +#define EMAC_IEVENT_MII 0x00800000
681 +#define EMAC_IEVENT_EBERR 0x00400000
682 +#define EMAC_IEVENT_LC 0x00200000
683 +#define EMAC_IEVENT_RL 0x00100000
684 +#define EMAC_IEVENT_UN 0x00080000
685 +
686 +#define EMAC_IMASK_HBERR 0x80000000
687 +#define EMAC_IMASK_BABR 0x40000000
688 +#define EMAC_IMASKT_BABT 0x20000000
689 +#define EMAC_IMASK_GRA 0x10000000
690 +#define EMAC_IMASKT_TXF 0x08000000
691 +#define EMAC_IMASK_TXB 0x04000000
692 +#define EMAC_IMASKT_RXF 0x02000000
693 +#define EMAC_IMASK_RXB 0x01000000
694 +#define EMAC_IMASK_MII 0x00800000
695 +#define EMAC_IMASK_EBERR 0x00400000
696 +#define EMAC_IMASK_LC 0x00200000
697 +#define EMAC_IMASKT_RL 0x00100000
698 +#define EMAC_IMASK_UN 0x00080000
699 +
700 +#define EMAC_RCNTRL_MAX_FL_SHIFT 16
701 +#define EMAC_RCNTRL_LOOP 0x00000001
702 +#define EMAC_RCNTRL_DRT 0x00000002
703 +#define EMAC_RCNTRL_MII_MODE 0x00000004
704 +#define EMAC_RCNTRL_PROM 0x00000008
705 +#define EMAC_RCNTRL_BC_REJ 0x00000010
706 +#define EMAC_RCNTRL_FCE 0x00000020
707 +#define EMAC_RCNTRL_RGMII 0x00000040
708 +#define EMAC_RCNTRL_SGMII 0x00000080
709 +#define EMAC_RCNTRL_RMII 0x00000100
710 +#define EMAC_RCNTRL_RMII_10T 0x00000200
711 +#define EMAC_RCNTRL_CRC_FWD 0x00004000
712 +
713 +#define EMAC_TCNTRL_GTS 0x00000001
714 +#define EMAC_TCNTRL_HBC 0x00000002
715 +#define EMAC_TCNTRL_FDEN 0x00000004
716 +#define EMAC_TCNTRL_TFC_PAUSE 0x00000008
717 +#define EMAC_TCNTRL_RFC_PAUSE 0x00000010
718 +
719 +#define EMAC_ECNTRL_RESET 0x00000001 /* reset the EMAC */
720 +#define EMAC_ECNTRL_ETHER_EN 0x00000002 /* enable the EMAC */
721 +#define EMAC_ECNTRL_MAGIC_ENA 0x00000004
722 +#define EMAC_ECNTRL_SLEEP 0x00000008
723 +#define EMAC_ECNTRL_SPEED 0x00000020
724 +#define EMAC_ECNTRL_DBSWAP 0x00000100
725 +
726 +#define EMAC_X_WMRK_STRFWD 0x00000100
727 +
728 +#define EMAC_X_DES_ACTIVE_TDAR 0x01000000
729 +#define EMAC_R_DES_ACTIVE_RDAR 0x01000000
730 +
731 +#define EMAC_RX_SECTION_EMPTY_V 0x00010006
732 +/*
733 + * The possible operating speeds of the MAC, currently supporting 10, 100 and
734 + * 1000Mb modes.
735 + */
736 +enum mac_speed {SPEED_10M, SPEED_100M, SPEED_1000M, SPEED_1000M_PCS};
737 +
738 +/* MII-related definitios */
739 +#define EMAC_MII_DATA_ST 0x40000000 /* Start of frame delimiter */
740 +#define EMAC_MII_DATA_OP_RD 0x20000000 /* Perform a read operation */
741 +#define EMAC_MII_DATA_OP_CL45_RD 0x30000000 /* Perform a read operation */
742 +#define EMAC_MII_DATA_OP_WR 0x10000000 /* Perform a write operation */
743 +#define EMAC_MII_DATA_OP_CL45_WR 0x10000000 /* Perform a write operation */
744 +#define EMAC_MII_DATA_PA_MSK 0x0f800000 /* PHY Address field mask */
745 +#define EMAC_MII_DATA_RA_MSK 0x007c0000 /* PHY Register field mask */
746 +#define EMAC_MII_DATA_TA 0x00020000 /* Turnaround */
747 +#define EMAC_MII_DATA_DATAMSK 0x0000ffff /* PHY data field */
748 +
749 +#define EMAC_MII_DATA_RA_SHIFT 18 /* MII Register address bits */
750 +#define EMAC_MII_DATA_RA_MASK 0x1F /* MII Register address mask */
751 +#define EMAC_MII_DATA_PA_SHIFT 23 /* MII PHY address bits */
752 +#define EMAC_MII_DATA_PA_MASK 0x1F /* MII PHY address mask */
753 +
754 +#define EMAC_MII_DATA_RA(v) (((v) & EMAC_MII_DATA_RA_MASK) << \
755 + EMAC_MII_DATA_RA_SHIFT)
756 +#define EMAC_MII_DATA_PA(v) (((v) & EMAC_MII_DATA_RA_MASK) << \
757 + EMAC_MII_DATA_PA_SHIFT)
758 +#define EMAC_MII_DATA(v) ((v) & 0xffff)
759 +
760 +#define EMAC_MII_SPEED_SHIFT 1
761 +#define EMAC_HOLDTIME_SHIFT 8
762 +#define EMAC_HOLDTIME_MASK 0x7
763 +#define EMAC_HOLDTIME(v) (((v) & EMAC_HOLDTIME_MASK) << \
764 + EMAC_HOLDTIME_SHIFT)
765 +
766 +/*
767 + * The Address organisation for the MAC device. All addresses are split into
768 + * two 32-bit register fields. The first one (bottom) is the lower 32-bits of
769 + * the address and the other field are the high order bits - this may be 16-bits
770 + * in the case of MAC addresses, or 32-bits for the hash address.
771 + * In terms of memory storage, the first item (bottom) is assumed to be at a
772 + * lower address location than 'top'. i.e. top should be at address location of
773 + * 'bottom' + 4 bytes.
774 + */
775 +struct pfe_mac_addr {
776 + u32 bottom; /* Lower 32-bits of address. */
777 + u32 top; /* Upper 32-bits of address. */
778 +};
779 +
780 +/*
781 + * The following is the organisation of the address filters section of the MAC
782 + * registers. The Cadence MAC contains four possible specific address match
783 + * addresses, if an incoming frame corresponds to any one of these four
784 + * addresses then the frame will be copied to memory.
785 + * It is not necessary for all four of the address match registers to be
786 + * programmed, this is application dependent.
787 + */
788 +struct spec_addr {
789 + struct pfe_mac_addr one; /* Specific address register 1. */
790 + struct pfe_mac_addr two; /* Specific address register 2. */
791 + struct pfe_mac_addr three; /* Specific address register 3. */
792 + struct pfe_mac_addr four; /* Specific address register 4. */
793 +};
794 +
795 +struct gemac_cfg {
796 + u32 mode;
797 + u32 speed;
798 + u32 duplex;
799 +};
800 +
801 +/* EMAC Hash size */
802 +#define EMAC_HASH_REG_BITS 64
803 +
804 +#define EMAC_SPEC_ADDR_MAX 4
805 +
806 +#endif /* _EMAC_H_ */
807 --- /dev/null
808 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h
809 @@ -0,0 +1,86 @@
810 +/*
811 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
812 + * Copyright 2017 NXP
813 + *
814 + * This program is free software; you can redistribute it and/or modify
815 + * it under the terms of the GNU General Public License as published by
816 + * the Free Software Foundation; either version 2 of the License, or
817 + * (at your option) any later version.
818 + *
819 + * This program is distributed in the hope that it will be useful,
820 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
821 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
822 + * GNU General Public License for more details.
823 + *
824 + * You should have received a copy of the GNU General Public License
825 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
826 + */
827 +
828 +#ifndef _GPI_H_
829 +#define _GPI_H_
830 +
831 +#define GPI_VERSION 0x00
832 +#define GPI_CTRL 0x04
833 +#define GPI_RX_CONFIG 0x08
834 +#define GPI_HDR_SIZE 0x0c
835 +#define GPI_BUF_SIZE 0x10
836 +#define GPI_LMEM_ALLOC_ADDR 0x14
837 +#define GPI_LMEM_FREE_ADDR 0x18
838 +#define GPI_DDR_ALLOC_ADDR 0x1c
839 +#define GPI_DDR_FREE_ADDR 0x20
840 +#define GPI_CLASS_ADDR 0x24
841 +#define GPI_DRX_FIFO 0x28
842 +#define GPI_TRX_FIFO 0x2c
843 +#define GPI_INQ_PKTPTR 0x30
844 +#define GPI_DDR_DATA_OFFSET 0x34
845 +#define GPI_LMEM_DATA_OFFSET 0x38
846 +#define GPI_TMLF_TX 0x4c
847 +#define GPI_DTX_ASEQ 0x50
848 +#define GPI_FIFO_STATUS 0x54
849 +#define GPI_FIFO_DEBUG 0x58
850 +#define GPI_TX_PAUSE_TIME 0x5c
851 +#define GPI_LMEM_SEC_BUF_DATA_OFFSET 0x60
852 +#define GPI_DDR_SEC_BUF_DATA_OFFSET 0x64
853 +#define GPI_TOE_CHKSUM_EN 0x68
854 +#define GPI_OVERRUN_DROPCNT 0x6c
855 +#define GPI_CSR_MTIP_PAUSE_REG 0x74
856 +#define GPI_CSR_MTIP_PAUSE_QUANTUM 0x78
857 +#define GPI_CSR_RX_CNT 0x7c
858 +#define GPI_CSR_TX_CNT 0x80
859 +#define GPI_CSR_DEBUG1 0x84
860 +#define GPI_CSR_DEBUG2 0x88
861 +
862 +struct gpi_cfg {
863 + u32 lmem_rtry_cnt;
864 + u32 tmlf_txthres;
865 + u32 aseq_len;
866 + u32 mtip_pause_reg;
867 +};
868 +
869 +/* GPI commons defines */
870 +#define GPI_LMEM_BUF_EN 0x1
871 +#define GPI_DDR_BUF_EN 0x1
872 +
873 +/* EGPI 1 defines */
874 +#define EGPI1_LMEM_RTRY_CNT 0x40
875 +#define EGPI1_TMLF_TXTHRES 0xBC
876 +#define EGPI1_ASEQ_LEN 0x50
877 +
878 +/* EGPI 2 defines */
879 +#define EGPI2_LMEM_RTRY_CNT 0x40
880 +#define EGPI2_TMLF_TXTHRES 0xBC
881 +#define EGPI2_ASEQ_LEN 0x40
882 +
883 +/* EGPI 3 defines */
884 +#define EGPI3_LMEM_RTRY_CNT 0x40
885 +#define EGPI3_TMLF_TXTHRES 0xBC
886 +#define EGPI3_ASEQ_LEN 0x40
887 +
888 +/* HGPI defines */
889 +#define HGPI_LMEM_RTRY_CNT 0x40
890 +#define HGPI_TMLF_TXTHRES 0xBC
891 +#define HGPI_ASEQ_LEN 0x40
892 +
893 +#define EGPI_PAUSE_TIME 0x000007D0
894 +#define EGPI_PAUSE_ENABLE 0x40000000
895 +#endif /* _GPI_H_ */
896 --- /dev/null
897 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h
898 @@ -0,0 +1,100 @@
899 +/*
900 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
901 + * Copyright 2017 NXP
902 + *
903 + * This program is free software; you can redistribute it and/or modify
904 + * it under the terms of the GNU General Public License as published by
905 + * the Free Software Foundation; either version 2 of the License, or
906 + * (at your option) any later version.
907 + *
908 + * This program is distributed in the hope that it will be useful,
909 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
910 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
911 + * GNU General Public License for more details.
912 + *
913 + * You should have received a copy of the GNU General Public License
914 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
915 + */
916 +
917 +#ifndef _HIF_H_
918 +#define _HIF_H_
919 +
920 +/* @file hif.h.
921 + * hif - PFE hif block control and status register.
922 + * Mapped on CBUS and accessible from all PE's and ARM.
923 + */
924 +#define HIF_VERSION (HIF_BASE_ADDR + 0x00)
925 +#define HIF_TX_CTRL (HIF_BASE_ADDR + 0x04)
926 +#define HIF_TX_CURR_BD_ADDR (HIF_BASE_ADDR + 0x08)
927 +#define HIF_TX_ALLOC (HIF_BASE_ADDR + 0x0c)
928 +#define HIF_TX_BDP_ADDR (HIF_BASE_ADDR + 0x10)
929 +#define HIF_TX_STATUS (HIF_BASE_ADDR + 0x14)
930 +#define HIF_RX_CTRL (HIF_BASE_ADDR + 0x20)
931 +#define HIF_RX_BDP_ADDR (HIF_BASE_ADDR + 0x24)
932 +#define HIF_RX_STATUS (HIF_BASE_ADDR + 0x30)
933 +#define HIF_INT_SRC (HIF_BASE_ADDR + 0x34)
934 +#define HIF_INT_ENABLE (HIF_BASE_ADDR + 0x38)
935 +#define HIF_POLL_CTRL (HIF_BASE_ADDR + 0x3c)
936 +#define HIF_RX_CURR_BD_ADDR (HIF_BASE_ADDR + 0x40)
937 +#define HIF_RX_ALLOC (HIF_BASE_ADDR + 0x44)
938 +#define HIF_TX_DMA_STATUS (HIF_BASE_ADDR + 0x48)
939 +#define HIF_RX_DMA_STATUS (HIF_BASE_ADDR + 0x4c)
940 +#define HIF_INT_COAL (HIF_BASE_ADDR + 0x50)
941 +
942 +/* HIF_INT_SRC/ HIF_INT_ENABLE control bits */
943 +#define HIF_INT BIT(0)
944 +#define HIF_RXBD_INT BIT(1)
945 +#define HIF_RXPKT_INT BIT(2)
946 +#define HIF_TXBD_INT BIT(3)
947 +#define HIF_TXPKT_INT BIT(4)
948 +
949 +/* HIF_TX_CTRL bits */
950 +#define HIF_CTRL_DMA_EN BIT(0)
951 +#define HIF_CTRL_BDP_POLL_CTRL_EN BIT(1)
952 +#define HIF_CTRL_BDP_CH_START_WSTB BIT(2)
953 +
954 +/* HIF_RX_STATUS bits */
955 +#define BDP_CSR_RX_DMA_ACTV BIT(16)
956 +
957 +/* HIF_INT_ENABLE bits */
958 +#define HIF_INT_EN BIT(0)
959 +#define HIF_RXBD_INT_EN BIT(1)
960 +#define HIF_RXPKT_INT_EN BIT(2)
961 +#define HIF_TXBD_INT_EN BIT(3)
962 +#define HIF_TXPKT_INT_EN BIT(4)
963 +
964 +/* HIF_POLL_CTRL bits*/
965 +#define HIF_RX_POLL_CTRL_CYCLE 0x0400
966 +#define HIF_TX_POLL_CTRL_CYCLE 0x0400
967 +
968 +/* HIF_INT_COAL bits*/
969 +#define HIF_INT_COAL_ENABLE BIT(31)
970 +
971 +/* Buffer descriptor control bits */
972 +#define BD_CTRL_BUFLEN_MASK 0x3fff
973 +#define BD_BUF_LEN(x) ((x) & BD_CTRL_BUFLEN_MASK)
974 +#define BD_CTRL_CBD_INT_EN BIT(16)
975 +#define BD_CTRL_PKT_INT_EN BIT(17)
976 +#define BD_CTRL_LIFM BIT(18)
977 +#define BD_CTRL_LAST_BD BIT(19)
978 +#define BD_CTRL_DIR BIT(20)
979 +#define BD_CTRL_LMEM_CPY BIT(21) /* Valid only for HIF_NOCPY */
980 +#define BD_CTRL_PKT_XFER BIT(24)
981 +#define BD_CTRL_DESC_EN BIT(31)
982 +#define BD_CTRL_PARSE_DISABLE BIT(25)
983 +#define BD_CTRL_BRFETCH_DISABLE BIT(26)
984 +#define BD_CTRL_RTFETCH_DISABLE BIT(27)
985 +
986 +/* Buffer descriptor status bits*/
987 +#define BD_STATUS_CONN_ID(x) ((x) & 0xffff)
988 +#define BD_STATUS_DIR_PROC_ID BIT(16)
989 +#define BD_STATUS_CONN_ID_EN BIT(17)
990 +#define BD_STATUS_PE2PROC_ID(x) (((x) & 7) << 18)
991 +#define BD_STATUS_LE_DATA BIT(21)
992 +#define BD_STATUS_CHKSUM_EN BIT(22)
993 +
994 +/* HIF Buffer descriptor status bits */
995 +#define DIR_PROC_ID BIT(16)
996 +#define PROC_ID(id) ((id) << 18)
997 +
998 +#endif /* _HIF_H_ */
999 --- /dev/null
1000 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h
1001 @@ -0,0 +1,50 @@
1002 +/*
1003 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
1004 + * Copyright 2017 NXP
1005 + *
1006 + * This program is free software; you can redistribute it and/or modify
1007 + * it under the terms of the GNU General Public License as published by
1008 + * the Free Software Foundation; either version 2 of the License, or
1009 + * (at your option) any later version.
1010 + *
1011 + * This program is distributed in the hope that it will be useful,
1012 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1013 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1014 + * GNU General Public License for more details.
1015 + *
1016 + * You should have received a copy of the GNU General Public License
1017 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
1018 + */
1019 +
1020 +#ifndef _HIF_NOCPY_H_
1021 +#define _HIF_NOCPY_H_
1022 +
1023 +#define HIF_NOCPY_VERSION (HIF_NOCPY_BASE_ADDR + 0x00)
1024 +#define HIF_NOCPY_TX_CTRL (HIF_NOCPY_BASE_ADDR + 0x04)
1025 +#define HIF_NOCPY_TX_CURR_BD_ADDR (HIF_NOCPY_BASE_ADDR + 0x08)
1026 +#define HIF_NOCPY_TX_ALLOC (HIF_NOCPY_BASE_ADDR + 0x0c)
1027 +#define HIF_NOCPY_TX_BDP_ADDR (HIF_NOCPY_BASE_ADDR + 0x10)
1028 +#define HIF_NOCPY_TX_STATUS (HIF_NOCPY_BASE_ADDR + 0x14)
1029 +#define HIF_NOCPY_RX_CTRL (HIF_NOCPY_BASE_ADDR + 0x20)
1030 +#define HIF_NOCPY_RX_BDP_ADDR (HIF_NOCPY_BASE_ADDR + 0x24)
1031 +#define HIF_NOCPY_RX_STATUS (HIF_NOCPY_BASE_ADDR + 0x30)
1032 +#define HIF_NOCPY_INT_SRC (HIF_NOCPY_BASE_ADDR + 0x34)
1033 +#define HIF_NOCPY_INT_ENABLE (HIF_NOCPY_BASE_ADDR + 0x38)
1034 +#define HIF_NOCPY_POLL_CTRL (HIF_NOCPY_BASE_ADDR + 0x3c)
1035 +#define HIF_NOCPY_RX_CURR_BD_ADDR (HIF_NOCPY_BASE_ADDR + 0x40)
1036 +#define HIF_NOCPY_RX_ALLOC (HIF_NOCPY_BASE_ADDR + 0x44)
1037 +#define HIF_NOCPY_TX_DMA_STATUS (HIF_NOCPY_BASE_ADDR + 0x48)
1038 +#define HIF_NOCPY_RX_DMA_STATUS (HIF_NOCPY_BASE_ADDR + 0x4c)
1039 +#define HIF_NOCPY_RX_INQ0_PKTPTR (HIF_NOCPY_BASE_ADDR + 0x50)
1040 +#define HIF_NOCPY_RX_INQ1_PKTPTR (HIF_NOCPY_BASE_ADDR + 0x54)
1041 +#define HIF_NOCPY_TX_PORT_NO (HIF_NOCPY_BASE_ADDR + 0x60)
1042 +#define HIF_NOCPY_LMEM_ALLOC_ADDR (HIF_NOCPY_BASE_ADDR + 0x64)
1043 +#define HIF_NOCPY_CLASS_ADDR (HIF_NOCPY_BASE_ADDR + 0x68)
1044 +#define HIF_NOCPY_TMU_PORT0_ADDR (HIF_NOCPY_BASE_ADDR + 0x70)
1045 +#define HIF_NOCPY_TMU_PORT1_ADDR (HIF_NOCPY_BASE_ADDR + 0x74)
1046 +#define HIF_NOCPY_TMU_PORT2_ADDR (HIF_NOCPY_BASE_ADDR + 0x7c)
1047 +#define HIF_NOCPY_TMU_PORT3_ADDR (HIF_NOCPY_BASE_ADDR + 0x80)
1048 +#define HIF_NOCPY_TMU_PORT4_ADDR (HIF_NOCPY_BASE_ADDR + 0x84)
1049 +#define HIF_NOCPY_INT_COAL (HIF_NOCPY_BASE_ADDR + 0x90)
1050 +
1051 +#endif /* _HIF_NOCPY_H_ */
1052 --- /dev/null
1053 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h
1054 @@ -0,0 +1,168 @@
1055 +/*
1056 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
1057 + * Copyright 2017 NXP
1058 + *
1059 + * This program is free software; you can redistribute it and/or modify
1060 + * it under the terms of the GNU General Public License as published by
1061 + * the Free Software Foundation; either version 2 of the License, or
1062 + * (at your option) any later version.
1063 + *
1064 + * This program is distributed in the hope that it will be useful,
1065 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1066 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1067 + * GNU General Public License for more details.
1068 + *
1069 + * You should have received a copy of the GNU General Public License
1070 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
1071 + */
1072 +
1073 +#ifndef _TMU_CSR_H_
1074 +#define _TMU_CSR_H_
1075 +
1076 +#define TMU_VERSION (TMU_CSR_BASE_ADDR + 0x000)
1077 +#define TMU_INQ_WATERMARK (TMU_CSR_BASE_ADDR + 0x004)
1078 +#define TMU_PHY_INQ_PKTPTR (TMU_CSR_BASE_ADDR + 0x008)
1079 +#define TMU_PHY_INQ_PKTINFO (TMU_CSR_BASE_ADDR + 0x00c)
1080 +#define TMU_PHY_INQ_FIFO_CNT (TMU_CSR_BASE_ADDR + 0x010)
1081 +#define TMU_SYS_GENERIC_CONTROL (TMU_CSR_BASE_ADDR + 0x014)
1082 +#define TMU_SYS_GENERIC_STATUS (TMU_CSR_BASE_ADDR + 0x018)
1083 +#define TMU_SYS_GEN_CON0 (TMU_CSR_BASE_ADDR + 0x01c)
1084 +#define TMU_SYS_GEN_CON1 (TMU_CSR_BASE_ADDR + 0x020)
1085 +#define TMU_SYS_GEN_CON2 (TMU_CSR_BASE_ADDR + 0x024)
1086 +#define TMU_SYS_GEN_CON3 (TMU_CSR_BASE_ADDR + 0x028)
1087 +#define TMU_SYS_GEN_CON4 (TMU_CSR_BASE_ADDR + 0x02c)
1088 +#define TMU_TEQ_DISABLE_DROPCHK (TMU_CSR_BASE_ADDR + 0x030)
1089 +#define TMU_TEQ_CTRL (TMU_CSR_BASE_ADDR + 0x034)
1090 +#define TMU_TEQ_QCFG (TMU_CSR_BASE_ADDR + 0x038)
1091 +#define TMU_TEQ_DROP_STAT (TMU_CSR_BASE_ADDR + 0x03c)
1092 +#define TMU_TEQ_QAVG (TMU_CSR_BASE_ADDR + 0x040)
1093 +#define TMU_TEQ_WREG_PROB (TMU_CSR_BASE_ADDR + 0x044)
1094 +#define TMU_TEQ_TRANS_STAT (TMU_CSR_BASE_ADDR + 0x048)
1095 +#define TMU_TEQ_HW_PROB_CFG0 (TMU_CSR_BASE_ADDR + 0x04c)
1096 +#define TMU_TEQ_HW_PROB_CFG1 (TMU_CSR_BASE_ADDR + 0x050)
1097 +#define TMU_TEQ_HW_PROB_CFG2 (TMU_CSR_BASE_ADDR + 0x054)
1098 +#define TMU_TEQ_HW_PROB_CFG3 (TMU_CSR_BASE_ADDR + 0x058)
1099 +#define TMU_TEQ_HW_PROB_CFG4 (TMU_CSR_BASE_ADDR + 0x05c)
1100 +#define TMU_TEQ_HW_PROB_CFG5 (TMU_CSR_BASE_ADDR + 0x060)
1101 +#define TMU_TEQ_HW_PROB_CFG6 (TMU_CSR_BASE_ADDR + 0x064)
1102 +#define TMU_TEQ_HW_PROB_CFG7 (TMU_CSR_BASE_ADDR + 0x068)
1103 +#define TMU_TEQ_HW_PROB_CFG8 (TMU_CSR_BASE_ADDR + 0x06c)
1104 +#define TMU_TEQ_HW_PROB_CFG9 (TMU_CSR_BASE_ADDR + 0x070)
1105 +#define TMU_TEQ_HW_PROB_CFG10 (TMU_CSR_BASE_ADDR + 0x074)
1106 +#define TMU_TEQ_HW_PROB_CFG11 (TMU_CSR_BASE_ADDR + 0x078)
1107 +#define TMU_TEQ_HW_PROB_CFG12 (TMU_CSR_BASE_ADDR + 0x07c)
1108 +#define TMU_TEQ_HW_PROB_CFG13 (TMU_CSR_BASE_ADDR + 0x080)
1109 +#define TMU_TEQ_HW_PROB_CFG14 (TMU_CSR_BASE_ADDR + 0x084)
1110 +#define TMU_TEQ_HW_PROB_CFG15 (TMU_CSR_BASE_ADDR + 0x088)
1111 +#define TMU_TEQ_HW_PROB_CFG16 (TMU_CSR_BASE_ADDR + 0x08c)
1112 +#define TMU_TEQ_HW_PROB_CFG17 (TMU_CSR_BASE_ADDR + 0x090)
1113 +#define TMU_TEQ_HW_PROB_CFG18 (TMU_CSR_BASE_ADDR + 0x094)
1114 +#define TMU_TEQ_HW_PROB_CFG19 (TMU_CSR_BASE_ADDR + 0x098)
1115 +#define TMU_TEQ_HW_PROB_CFG20 (TMU_CSR_BASE_ADDR + 0x09c)
1116 +#define TMU_TEQ_HW_PROB_CFG21 (TMU_CSR_BASE_ADDR + 0x0a0)
1117 +#define TMU_TEQ_HW_PROB_CFG22 (TMU_CSR_BASE_ADDR + 0x0a4)
1118 +#define TMU_TEQ_HW_PROB_CFG23 (TMU_CSR_BASE_ADDR + 0x0a8)
1119 +#define TMU_TEQ_HW_PROB_CFG24 (TMU_CSR_BASE_ADDR + 0x0ac)
1120 +#define TMU_TEQ_HW_PROB_CFG25 (TMU_CSR_BASE_ADDR + 0x0b0)
1121 +#define TMU_TDQ_IIFG_CFG (TMU_CSR_BASE_ADDR + 0x0b4)
1122 +/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
1123 + * This is a global Enable for all schedulers in PHY0
1124 + */
1125 +#define TMU_TDQ0_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x0b8)
1126 +
1127 +#define TMU_LLM_CTRL (TMU_CSR_BASE_ADDR + 0x0bc)
1128 +#define TMU_LLM_BASE_ADDR (TMU_CSR_BASE_ADDR + 0x0c0)
1129 +#define TMU_LLM_QUE_LEN (TMU_CSR_BASE_ADDR + 0x0c4)
1130 +#define TMU_LLM_QUE_HEADPTR (TMU_CSR_BASE_ADDR + 0x0c8)
1131 +#define TMU_LLM_QUE_TAILPTR (TMU_CSR_BASE_ADDR + 0x0cc)
1132 +#define TMU_LLM_QUE_DROPCNT (TMU_CSR_BASE_ADDR + 0x0d0)
1133 +#define TMU_INT_EN (TMU_CSR_BASE_ADDR + 0x0d4)
1134 +#define TMU_INT_SRC (TMU_CSR_BASE_ADDR + 0x0d8)
1135 +#define TMU_INQ_STAT (TMU_CSR_BASE_ADDR + 0x0dc)
1136 +#define TMU_CTRL (TMU_CSR_BASE_ADDR + 0x0e0)
1137 +
1138 +/* [31] Mem Access Command. 0 = Internal Memory Read, 1 = Internal memory
1139 + * Write [27:24] Byte Enables of the Internal memory access [23:0] Address of
1140 + * the internal memory. This address is used to access both the PM and DM of
1141 + * all the PE's
1142 + */
1143 +#define TMU_MEM_ACCESS_ADDR (TMU_CSR_BASE_ADDR + 0x0e4)
1144 +
1145 +/* Internal Memory Access Write Data */
1146 +#define TMU_MEM_ACCESS_WDATA (TMU_CSR_BASE_ADDR + 0x0e8)
1147 +/* Internal Memory Access Read Data. The commands are blocked
1148 + * at the mem_access only
1149 + */
1150 +#define TMU_MEM_ACCESS_RDATA (TMU_CSR_BASE_ADDR + 0x0ec)
1151 +
1152 +/* [31:0] PHY0 in queue address (must be initialized with one of the
1153 + * xxx_INQ_PKTPTR cbus addresses)
1154 + */
1155 +#define TMU_PHY0_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f0)
1156 +/* [31:0] PHY1 in queue address (must be initialized with one of the
1157 + * xxx_INQ_PKTPTR cbus addresses)
1158 + */
1159 +#define TMU_PHY1_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f4)
1160 +/* [31:0] PHY2 in queue address (must be initialized with one of the
1161 + * xxx_INQ_PKTPTR cbus addresses)
1162 + */
1163 +#define TMU_PHY2_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f8)
1164 +/* [31:0] PHY3 in queue address (must be initialized with one of the
1165 + * xxx_INQ_PKTPTR cbus addresses)
1166 + */
1167 +#define TMU_PHY3_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0fc)
1168 +#define TMU_BMU_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x100)
1169 +#define TMU_TX_CTRL (TMU_CSR_BASE_ADDR + 0x104)
1170 +
1171 +#define TMU_BUS_ACCESS_WDATA (TMU_CSR_BASE_ADDR + 0x108)
1172 +#define TMU_BUS_ACCESS (TMU_CSR_BASE_ADDR + 0x10c)
1173 +#define TMU_BUS_ACCESS_RDATA (TMU_CSR_BASE_ADDR + 0x110)
1174 +
1175 +#define TMU_PE_SYS_CLK_RATIO (TMU_CSR_BASE_ADDR + 0x114)
1176 +#define TMU_PE_STATUS (TMU_CSR_BASE_ADDR + 0x118)
1177 +#define TMU_TEQ_MAX_THRESHOLD (TMU_CSR_BASE_ADDR + 0x11c)
1178 +/* [31:0] PHY4 in queue address (must be initialized with one of the
1179 + * xxx_INQ_PKTPTR cbus addresses)
1180 + */
1181 +#define TMU_PHY4_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x134)
1182 +/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
1183 + * This is a global Enable for all schedulers in PHY1
1184 + */
1185 +#define TMU_TDQ1_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x138)
1186 +/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
1187 + * This is a global Enable for all schedulers in PHY2
1188 + */
1189 +#define TMU_TDQ2_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x13c)
1190 +/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
1191 + * This is a global Enable for all schedulers in PHY3
1192 + */
1193 +#define TMU_TDQ3_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x140)
1194 +#define TMU_BMU_BUF_SIZE (TMU_CSR_BASE_ADDR + 0x144)
1195 +/* [31:0] PHY5 in queue address (must be initialized with one of the
1196 + * xxx_INQ_PKTPTR cbus addresses)
1197 + */
1198 +#define TMU_PHY5_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x148)
1199 +
1200 +#define SW_RESET BIT(0) /* Global software reset */
1201 +#define INQ_RESET BIT(2)
1202 +#define TEQ_RESET BIT(3)
1203 +#define TDQ_RESET BIT(4)
1204 +#define PE_RESET BIT(5)
1205 +#define MEM_INIT BIT(6)
1206 +#define MEM_INIT_DONE BIT(7)
1207 +#define LLM_INIT BIT(8)
1208 +#define LLM_INIT_DONE BIT(9)
1209 +#define ECC_MEM_INIT_DONE BIT(10)
1210 +
1211 +struct tmu_cfg {
1212 + u32 pe_sys_clk_ratio;
1213 + unsigned long llm_base_addr;
1214 + u32 llm_queue_len;
1215 +};
1216 +
1217 +/* Not HW related for pfe_ctrl / pfe common defines */
1218 +#define DEFAULT_MAX_QDEPTH 80
1219 +#define DEFAULT_Q0_QDEPTH 511 /*We keep one large queue for host tx qos */
1220 +#define DEFAULT_TMU3_QDEPTH 127
1221 +
1222 +#endif /* _TMU_CSR_H_ */
1223 --- /dev/null
1224 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/util_csr.h
1225 @@ -0,0 +1,61 @@
1226 +/*
1227 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
1228 + * Copyright 2017 NXP
1229 + *
1230 + * This program is free software; you can redistribute it and/or modify
1231 + * it under the terms of the GNU General Public License as published by
1232 + * the Free Software Foundation; either version 2 of the License, or
1233 + * (at your option) any later version.
1234 + *
1235 + * This program is distributed in the hope that it will be useful,
1236 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1237 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1238 + * GNU General Public License for more details.
1239 + *
1240 + * You should have received a copy of the GNU General Public License
1241 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
1242 + */
1243 +
1244 +#ifndef _UTIL_CSR_H_
1245 +#define _UTIL_CSR_H_
1246 +
1247 +#define UTIL_VERSION (UTIL_CSR_BASE_ADDR + 0x000)
1248 +#define UTIL_TX_CTRL (UTIL_CSR_BASE_ADDR + 0x004)
1249 +#define UTIL_INQ_PKTPTR (UTIL_CSR_BASE_ADDR + 0x010)
1250 +
1251 +#define UTIL_HDR_SIZE (UTIL_CSR_BASE_ADDR + 0x014)
1252 +
1253 +#define UTIL_PE0_QB_DM_ADDR0 (UTIL_CSR_BASE_ADDR + 0x020)
1254 +#define UTIL_PE0_QB_DM_ADDR1 (UTIL_CSR_BASE_ADDR + 0x024)
1255 +#define UTIL_PE0_RO_DM_ADDR0 (UTIL_CSR_BASE_ADDR + 0x060)
1256 +#define UTIL_PE0_RO_DM_ADDR1 (UTIL_CSR_BASE_ADDR + 0x064)
1257 +
1258 +#define UTIL_MEM_ACCESS_ADDR (UTIL_CSR_BASE_ADDR + 0x100)
1259 +#define UTIL_MEM_ACCESS_WDATA (UTIL_CSR_BASE_ADDR + 0x104)
1260 +#define UTIL_MEM_ACCESS_RDATA (UTIL_CSR_BASE_ADDR + 0x108)
1261 +
1262 +#define UTIL_TM_INQ_ADDR (UTIL_CSR_BASE_ADDR + 0x114)
1263 +#define UTIL_PE_STATUS (UTIL_CSR_BASE_ADDR + 0x118)
1264 +
1265 +#define UTIL_PE_SYS_CLK_RATIO (UTIL_CSR_BASE_ADDR + 0x200)
1266 +#define UTIL_AFULL_THRES (UTIL_CSR_BASE_ADDR + 0x204)
1267 +#define UTIL_GAP_BETWEEN_READS (UTIL_CSR_BASE_ADDR + 0x208)
1268 +#define UTIL_MAX_BUF_CNT (UTIL_CSR_BASE_ADDR + 0x20c)
1269 +#define UTIL_TSQ_FIFO_THRES (UTIL_CSR_BASE_ADDR + 0x210)
1270 +#define UTIL_TSQ_MAX_CNT (UTIL_CSR_BASE_ADDR + 0x214)
1271 +#define UTIL_IRAM_DATA_0 (UTIL_CSR_BASE_ADDR + 0x218)
1272 +#define UTIL_IRAM_DATA_1 (UTIL_CSR_BASE_ADDR + 0x21c)
1273 +#define UTIL_IRAM_DATA_2 (UTIL_CSR_BASE_ADDR + 0x220)
1274 +#define UTIL_IRAM_DATA_3 (UTIL_CSR_BASE_ADDR + 0x224)
1275 +
1276 +#define UTIL_BUS_ACCESS_ADDR (UTIL_CSR_BASE_ADDR + 0x228)
1277 +#define UTIL_BUS_ACCESS_WDATA (UTIL_CSR_BASE_ADDR + 0x22c)
1278 +#define UTIL_BUS_ACCESS_RDATA (UTIL_CSR_BASE_ADDR + 0x230)
1279 +
1280 +#define UTIL_INQ_AFULL_THRES (UTIL_CSR_BASE_ADDR + 0x234)
1281 +
1282 +struct util_cfg {
1283 + u32 pe_sys_clk_ratio;
1284 +};
1285 +
1286 +#endif /* _UTIL_CSR_H_ */
1287 --- /dev/null
1288 +++ b/drivers/staging/fsl_ppfe/include/pfe/pfe.h
1289 @@ -0,0 +1,372 @@
1290 +/*
1291 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
1292 + * Copyright 2017 NXP
1293 + *
1294 + * This program is free software; you can redistribute it and/or modify
1295 + * it under the terms of the GNU General Public License as published by
1296 + * the Free Software Foundation; either version 2 of the License, or
1297 + * (at your option) any later version.
1298 + *
1299 + * This program is distributed in the hope that it will be useful,
1300 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1301 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1302 + * GNU General Public License for more details.
1303 + *
1304 + * You should have received a copy of the GNU General Public License
1305 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
1306 + */
1307 +
1308 +#ifndef _PFE_H_
1309 +#define _PFE_H_
1310 +
1311 +#include "cbus.h"
1312 +
1313 +#define CLASS_DMEM_BASE_ADDR(i) (0x00000000 | ((i) << 20))
1314 +/*
1315 + * Only valid for mem access register interface
1316 + */
1317 +#define CLASS_IMEM_BASE_ADDR(i) (0x00000000 | ((i) << 20))
1318 +#define CLASS_DMEM_SIZE 0x00002000
1319 +#define CLASS_IMEM_SIZE 0x00008000
1320 +
1321 +#define TMU_DMEM_BASE_ADDR(i) (0x00000000 + ((i) << 20))
1322 +/*
1323 + * Only valid for mem access register interface
1324 + */
1325 +#define TMU_IMEM_BASE_ADDR(i) (0x00000000 + ((i) << 20))
1326 +#define TMU_DMEM_SIZE 0x00000800
1327 +#define TMU_IMEM_SIZE 0x00002000
1328 +
1329 +#define UTIL_DMEM_BASE_ADDR 0x00000000
1330 +#define UTIL_DMEM_SIZE 0x00002000
1331 +
1332 +#define PE_LMEM_BASE_ADDR 0xc3010000
1333 +#define PE_LMEM_SIZE 0x8000
1334 +#define PE_LMEM_END (PE_LMEM_BASE_ADDR + PE_LMEM_SIZE)
1335 +
1336 +#define DMEM_BASE_ADDR 0x00000000
1337 +#define DMEM_SIZE 0x2000 /* TMU has less... */
1338 +#define DMEM_END (DMEM_BASE_ADDR + DMEM_SIZE)
1339 +
1340 +#define PMEM_BASE_ADDR 0x00010000
1341 +#define PMEM_SIZE 0x8000 /* TMU has less... */
1342 +#define PMEM_END (PMEM_BASE_ADDR + PMEM_SIZE)
1343 +
1344 +/* These check memory ranges from PE point of view/memory map */
1345 +#define IS_DMEM(addr, len) \
1346 + ({ typeof(addr) addr_ = (addr); \
1347 + ((unsigned long)(addr_) >= DMEM_BASE_ADDR) && \
1348 + (((unsigned long)(addr_) + (len)) <= DMEM_END); })
1349 +
1350 +#define IS_PMEM(addr, len) \
1351 + ({ typeof(addr) addr_ = (addr); \
1352 + ((unsigned long)(addr_) >= PMEM_BASE_ADDR) && \
1353 + (((unsigned long)(addr_) + (len)) <= PMEM_END); })
1354 +
1355 +#define IS_PE_LMEM(addr, len) \
1356 + ({ typeof(addr) addr_ = (addr); \
1357 + ((unsigned long)(addr_) >= \
1358 + PE_LMEM_BASE_ADDR) && \
1359 + (((unsigned long)(addr_) + \
1360 + (len)) <= PE_LMEM_END); })
1361 +
1362 +#define IS_PFE_LMEM(addr, len) \
1363 + ({ typeof(addr) addr_ = (addr); \
1364 + ((unsigned long)(addr_) >= \
1365 + CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR)) && \
1366 + (((unsigned long)(addr_) + (len)) <= \
1367 + CBUS_VIRT_TO_PFE(LMEM_END)); })
1368 +
1369 +#define __IS_PHYS_DDR(addr, len) \
1370 + ({ typeof(addr) addr_ = (addr); \
1371 + ((unsigned long)(addr_) >= \
1372 + DDR_PHYS_BASE_ADDR) && \
1373 + (((unsigned long)(addr_) + (len)) <= \
1374 + DDR_PHYS_END); })
1375 +
1376 +#define IS_PHYS_DDR(addr, len) __IS_PHYS_DDR(DDR_PFE_TO_PHYS(addr), len)
1377 +
1378 +/*
1379 + * If using a run-time virtual address for the cbus base address use this code
1380 + */
1381 +extern void *cbus_base_addr;
1382 +extern void *ddr_base_addr;
1383 +extern unsigned long ddr_phys_base_addr;
1384 +extern unsigned int ddr_size;
1385 +
1386 +#define CBUS_BASE_ADDR cbus_base_addr
1387 +#define DDR_PHYS_BASE_ADDR ddr_phys_base_addr
1388 +#define DDR_BASE_ADDR ddr_base_addr
1389 +#define DDR_SIZE ddr_size
1390 +
1391 +#define DDR_PHYS_END (DDR_PHYS_BASE_ADDR + DDR_SIZE)
1392 +
1393 +#define LS1012A_PFE_RESET_WA /*
1394 + * PFE doesn't have global reset and re-init
1395 + * should takecare few things to make PFE
1396 + * functional after reset
1397 + */
1398 +#define PFE_CBUS_PHYS_BASE_ADDR 0xc0000000 /* CBUS physical base address
1399 + * as seen by PE's.
1400 + */
1401 +/* CBUS physical base address as seen by PE's. */
1402 +#define PFE_CBUS_PHYS_BASE_ADDR_FROM_PFE 0xc0000000
1403 +
1404 +#define DDR_PHYS_TO_PFE(p) (((unsigned long int)(p)) & 0x7FFFFFFF)
1405 +#define DDR_PFE_TO_PHYS(p) (((unsigned long int)(p)) | 0x80000000)
1406 +#define CBUS_PHYS_TO_PFE(p) (((p) - PFE_CBUS_PHYS_BASE_ADDR) + \
1407 + PFE_CBUS_PHYS_BASE_ADDR_FROM_PFE)
1408 +/* Translates to PFE address map */
1409 +
1410 +#define DDR_PHYS_TO_VIRT(p) (((p) - DDR_PHYS_BASE_ADDR) + DDR_BASE_ADDR)
1411 +#define DDR_VIRT_TO_PHYS(v) (((v) - DDR_BASE_ADDR) + DDR_PHYS_BASE_ADDR)
1412 +#define DDR_VIRT_TO_PFE(p) (DDR_PHYS_TO_PFE(DDR_VIRT_TO_PHYS(p)))
1413 +
1414 +#define CBUS_VIRT_TO_PFE(v) (((v) - CBUS_BASE_ADDR) + \
1415 + PFE_CBUS_PHYS_BASE_ADDR)
1416 +#define CBUS_PFE_TO_VIRT(p) (((unsigned long int)(p) - \
1417 + PFE_CBUS_PHYS_BASE_ADDR) + CBUS_BASE_ADDR)
1418 +
1419 +/* The below part of the code is used in QOS control driver from host */
1420 +#define TMU_APB_BASE_ADDR 0xc1000000 /* TMU base address seen by
1421 + * pe's
1422 + */
1423 +
1424 +enum {
1425 + CLASS0_ID = 0,
1426 + CLASS1_ID,
1427 + CLASS2_ID,
1428 + CLASS3_ID,
1429 + CLASS4_ID,
1430 + CLASS5_ID,
1431 + TMU0_ID,
1432 + TMU1_ID,
1433 + TMU2_ID,
1434 + TMU3_ID,
1435 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
1436 + UTIL_ID,
1437 +#endif
1438 + MAX_PE
1439 +};
1440 +
1441 +#define CLASS_MASK (BIT(CLASS0_ID) | BIT(CLASS1_ID) |\
1442 + BIT(CLASS2_ID) | BIT(CLASS3_ID) |\
1443 + BIT(CLASS4_ID) | BIT(CLASS5_ID))
1444 +#define CLASS_MAX_ID CLASS5_ID
1445 +
1446 +#define TMU_MASK (BIT(TMU0_ID) | BIT(TMU1_ID) |\
1447 + BIT(TMU3_ID))
1448 +
1449 +#define TMU_MAX_ID TMU3_ID
1450 +
1451 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
1452 +#define UTIL_MASK BIT(UTIL_ID)
1453 +#endif
1454 +
1455 +struct pe_status {
1456 + u32 cpu_state;
1457 + u32 activity_counter;
1458 + u32 rx;
1459 + union {
1460 + u32 tx;
1461 + u32 tmu_qstatus;
1462 + };
1463 + u32 drop;
1464 +#if defined(CFG_PE_DEBUG)
1465 + u32 debug_indicator;
1466 + u32 debug[16];
1467 +#endif
1468 +} __aligned(16);
1469 +
1470 +struct pe_sync_mailbox {
1471 + u32 stop;
1472 + u32 stopped;
1473 +};
1474 +
1475 +/* Drop counter definitions */
1476 +
1477 +#define CLASS_NUM_DROP_COUNTERS 13
1478 +#define UTIL_NUM_DROP_COUNTERS 8
1479 +
1480 +/* PE information.
1481 + * Structure containing PE's specific information. It is used to create
1482 + * generic C functions common to all PE's.
1483 + * Before using the library functions this structure needs to be initialized
1484 + * with the different registers virtual addresses
1485 + * (according to the ARM MMU mmaping). The default initialization supports a
1486 + * virtual == physical mapping.
1487 + */
1488 +struct pe_info {
1489 + u32 dmem_base_addr; /* PE's dmem base address */
1490 + u32 pmem_base_addr; /* PE's pmem base address */
1491 + u32 pmem_size; /* PE's pmem size */
1492 +
1493 + void *mem_access_wdata; /* PE's _MEM_ACCESS_WDATA register
1494 + * address
1495 + */
1496 + void *mem_access_addr; /* PE's _MEM_ACCESS_ADDR register
1497 + * address
1498 + */
1499 + void *mem_access_rdata; /* PE's _MEM_ACCESS_RDATA register
1500 + * address
1501 + */
1502 +};
1503 +
1504 +void pe_lmem_read(u32 *dst, u32 len, u32 offset);
1505 +void pe_lmem_write(u32 *src, u32 len, u32 offset);
1506 +
1507 +void pe_dmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len);
1508 +void pe_pmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len);
1509 +
1510 +u32 pe_pmem_read(int id, u32 addr, u8 size);
1511 +
1512 +void pe_dmem_write(int id, u32 val, u32 addr, u8 size);
1513 +u32 pe_dmem_read(int id, u32 addr, u8 size);
1514 +void class_pe_lmem_memcpy_to32(u32 dst, const void *src, unsigned int len);
1515 +void class_pe_lmem_memset(u32 dst, int val, unsigned int len);
1516 +void class_bus_write(u32 val, u32 addr, u8 size);
1517 +u32 class_bus_read(u32 addr, u8 size);
1518 +
1519 +#define class_bus_readl(addr) class_bus_read(addr, 4)
1520 +#define class_bus_readw(addr) class_bus_read(addr, 2)
1521 +#define class_bus_readb(addr) class_bus_read(addr, 1)
1522 +
1523 +#define class_bus_writel(val, addr) class_bus_write(val, addr, 4)
1524 +#define class_bus_writew(val, addr) class_bus_write(val, addr, 2)
1525 +#define class_bus_writeb(val, addr) class_bus_write(val, addr, 1)
1526 +
1527 +#define pe_dmem_readl(id, addr) pe_dmem_read(id, addr, 4)
1528 +#define pe_dmem_readw(id, addr) pe_dmem_read(id, addr, 2)
1529 +#define pe_dmem_readb(id, addr) pe_dmem_read(id, addr, 1)
1530 +
1531 +#define pe_dmem_writel(id, val, addr) pe_dmem_write(id, val, addr, 4)
1532 +#define pe_dmem_writew(id, val, addr) pe_dmem_write(id, val, addr, 2)
1533 +#define pe_dmem_writeb(id, val, addr) pe_dmem_write(id, val, addr, 1)
1534 +
1535 +/*int pe_load_elf_section(int id, const void *data, elf32_shdr *shdr); */
1536 +int pe_load_elf_section(int id, const void *data, struct elf32_shdr *shdr,
1537 + struct device *dev);
1538 +
1539 +void pfe_lib_init(void *cbus_base, void *ddr_base, unsigned long ddr_phys_base,
1540 + unsigned int ddr_size);
1541 +void bmu_init(void *base, struct BMU_CFG *cfg);
1542 +void bmu_reset(void *base);
1543 +void bmu_enable(void *base);
1544 +void bmu_disable(void *base);
1545 +void bmu_set_config(void *base, struct BMU_CFG *cfg);
1546 +
1547 +/*
1548 + * An enumerated type for loopback values. This can be one of three values, no
1549 + * loopback -normal operation, local loopback with internal loopback module of
1550 + * MAC or PHY loopback which is through the external PHY.
1551 + */
1552 +#ifndef __MAC_LOOP_ENUM__
1553 +#define __MAC_LOOP_ENUM__
1554 +enum mac_loop {LB_NONE, LB_EXT, LB_LOCAL};
1555 +#endif
1556 +
1557 +void gemac_init(void *base, void *config);
1558 +void gemac_disable_rx_checksum_offload(void *base);
1559 +void gemac_enable_rx_checksum_offload(void *base);
1560 +void gemac_set_mdc_div(void *base, int mdc_div);
1561 +void gemac_set_speed(void *base, enum mac_speed gem_speed);
1562 +void gemac_set_duplex(void *base, int duplex);
1563 +void gemac_set_mode(void *base, int mode);
1564 +void gemac_enable(void *base);
1565 +void gemac_tx_disable(void *base);
1566 +void gemac_tx_enable(void *base);
1567 +void gemac_disable(void *base);
1568 +void gemac_reset(void *base);
1569 +void gemac_set_address(void *base, struct spec_addr *addr);
1570 +struct spec_addr gemac_get_address(void *base);
1571 +void gemac_set_loop(void *base, enum mac_loop gem_loop);
1572 +void gemac_set_laddr1(void *base, struct pfe_mac_addr *address);
1573 +void gemac_set_laddr2(void *base, struct pfe_mac_addr *address);
1574 +void gemac_set_laddr3(void *base, struct pfe_mac_addr *address);
1575 +void gemac_set_laddr4(void *base, struct pfe_mac_addr *address);
1576 +void gemac_set_laddrN(void *base, struct pfe_mac_addr *address,
1577 + unsigned int entry_index);
1578 +void gemac_clear_laddr1(void *base);
1579 +void gemac_clear_laddr2(void *base);
1580 +void gemac_clear_laddr3(void *base);
1581 +void gemac_clear_laddr4(void *base);
1582 +void gemac_clear_laddrN(void *base, unsigned int entry_index);
1583 +struct pfe_mac_addr gemac_get_hash(void *base);
1584 +void gemac_set_hash(void *base, struct pfe_mac_addr *hash);
1585 +struct pfe_mac_addr gem_get_laddr1(void *base);
1586 +struct pfe_mac_addr gem_get_laddr2(void *base);
1587 +struct pfe_mac_addr gem_get_laddr3(void *base);
1588 +struct pfe_mac_addr gem_get_laddr4(void *base);
1589 +struct pfe_mac_addr gem_get_laddrN(void *base, unsigned int entry_index);
1590 +void gemac_set_config(void *base, struct gemac_cfg *cfg);
1591 +void gemac_allow_broadcast(void *base);
1592 +void gemac_no_broadcast(void *base);
1593 +void gemac_enable_1536_rx(void *base);
1594 +void gemac_disable_1536_rx(void *base);
1595 +void gemac_enable_rx_jmb(void *base);
1596 +void gemac_disable_rx_jmb(void *base);
1597 +void gemac_enable_stacked_vlan(void *base);
1598 +void gemac_disable_stacked_vlan(void *base);
1599 +void gemac_enable_pause_rx(void *base);
1600 +void gemac_disable_pause_rx(void *base);
1601 +void gemac_enable_copy_all(void *base);
1602 +void gemac_disable_copy_all(void *base);
1603 +void gemac_set_bus_width(void *base, int width);
1604 +void gemac_set_wol(void *base, u32 wol_conf);
1605 +
1606 +void gpi_init(void *base, struct gpi_cfg *cfg);
1607 +void gpi_reset(void *base);
1608 +void gpi_enable(void *base);
1609 +void gpi_disable(void *base);
1610 +void gpi_set_config(void *base, struct gpi_cfg *cfg);
1611 +
1612 +void class_init(struct class_cfg *cfg);
1613 +void class_reset(void);
1614 +void class_enable(void);
1615 +void class_disable(void);
1616 +void class_set_config(struct class_cfg *cfg);
1617 +
1618 +void tmu_reset(void);
1619 +void tmu_init(struct tmu_cfg *cfg);
1620 +void tmu_enable(u32 pe_mask);
1621 +void tmu_disable(u32 pe_mask);
1622 +u32 tmu_qstatus(u32 if_id);
1623 +u32 tmu_pkts_processed(u32 if_id);
1624 +
1625 +void util_init(struct util_cfg *cfg);
1626 +void util_reset(void);
1627 +void util_enable(void);
1628 +void util_disable(void);
1629 +
1630 +void hif_init(void);
1631 +void hif_tx_enable(void);
1632 +void hif_tx_disable(void);
1633 +void hif_rx_enable(void);
1634 +void hif_rx_disable(void);
1635 +
1636 +/* Get Chip Revision level
1637 + *
1638 + */
1639 +static inline unsigned int CHIP_REVISION(void)
1640 +{
1641 + /*For LS1012A return always 1 */
1642 + return 1;
1643 +}
1644 +
1645 +/* Start HIF rx DMA
1646 + *
1647 + */
1648 +static inline void hif_rx_dma_start(void)
1649 +{
1650 + writel(HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB, HIF_RX_CTRL);
1651 +}
1652 +
1653 +/* Start HIF tx DMA
1654 + *
1655 + */
1656 +static inline void hif_tx_dma_start(void)
1657 +{
1658 + writel(HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB, HIF_TX_CTRL);
1659 +}
1660 +
1661 +#endif /* _PFE_H_ */
1662 --- /dev/null
1663 +++ b/drivers/staging/fsl_ppfe/pfe_ctrl.c
1664 @@ -0,0 +1,238 @@
1665 +/*
1666 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
1667 + * Copyright 2017 NXP
1668 + *
1669 + * This program is free software; you can redistribute it and/or modify
1670 + * it under the terms of the GNU General Public License as published by
1671 + * the Free Software Foundation; either version 2 of the License, or
1672 + * (at your option) any later version.
1673 + *
1674 + * This program is distributed in the hope that it will be useful,
1675 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1676 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1677 + * GNU General Public License for more details.
1678 + *
1679 + * You should have received a copy of the GNU General Public License
1680 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
1681 + */
1682 +
1683 +#include <linux/kernel.h>
1684 +#include <linux/sched.h>
1685 +#include <linux/module.h>
1686 +#include <linux/list.h>
1687 +#include <linux/kthread.h>
1688 +
1689 +#include "pfe_mod.h"
1690 +#include "pfe_ctrl.h"
1691 +
1692 +#define TIMEOUT_MS 1000
1693 +
1694 +int relax(unsigned long end)
1695 +{
1696 + if (time_after(jiffies, end)) {
1697 + if (time_after(jiffies, end + (TIMEOUT_MS * HZ) / 1000))
1698 + return -1;
1699 +
1700 + if (need_resched())
1701 + schedule();
1702 + }
1703 +
1704 + return 0;
1705 +}
1706 +
1707 +void pfe_ctrl_suspend(struct pfe_ctrl *ctrl)
1708 +{
1709 + int id;
1710 +
1711 + mutex_lock(&ctrl->mutex);
1712 +
1713 + for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++)
1714 + pe_dmem_write(id, cpu_to_be32(0x1), CLASS_DM_RESUME, 4);
1715 +
1716 + for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
1717 + if (id == TMU2_ID)
1718 + continue;
1719 + pe_dmem_write(id, cpu_to_be32(0x1), TMU_DM_RESUME, 4);
1720 + }
1721 +
1722 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
1723 + pe_dmem_write(UTIL_ID, cpu_to_be32(0x1), UTIL_DM_RESUME, 4);
1724 +#endif
1725 + mutex_unlock(&ctrl->mutex);
1726 +}
1727 +
1728 +void pfe_ctrl_resume(struct pfe_ctrl *ctrl)
1729 +{
1730 + int pe_mask = CLASS_MASK | TMU_MASK;
1731 +
1732 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
1733 + pe_mask |= UTIL_MASK;
1734 +#endif
1735 + mutex_lock(&ctrl->mutex);
1736 + pe_start(&pfe->ctrl, pe_mask);
1737 + mutex_unlock(&ctrl->mutex);
1738 +}
1739 +
1740 +/* PE sync stop.
1741 + * Stops packet processing for a list of PE's (specified using a bitmask).
1742 + * The caller must hold ctrl->mutex.
1743 + *
1744 + * @param ctrl Control context
1745 + * @param pe_mask Mask of PE id's to stop
1746 + *
1747 + */
1748 +int pe_sync_stop(struct pfe_ctrl *ctrl, int pe_mask)
1749 +{
1750 + struct pe_sync_mailbox *mbox;
1751 + int pe_stopped = 0;
1752 + unsigned long end = jiffies + 2;
1753 + int i;
1754 +
1755 + pe_mask &= 0x2FF; /*Exclude Util + TMU2 */
1756 +
1757 + for (i = 0; i < MAX_PE; i++)
1758 + if (pe_mask & (1 << i)) {
1759 + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
1760 +
1761 + pe_dmem_write(i, cpu_to_be32(0x1), (unsigned
1762 + long)&mbox->stop, 4);
1763 + }
1764 +
1765 + while (pe_stopped != pe_mask) {
1766 + for (i = 0; i < MAX_PE; i++)
1767 + if ((pe_mask & (1 << i)) && !(pe_stopped & (1 << i))) {
1768 + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
1769 +
1770 + if (pe_dmem_read(i, (unsigned
1771 + long)&mbox->stopped, 4) &
1772 + cpu_to_be32(0x1))
1773 + pe_stopped |= (1 << i);
1774 + }
1775 +
1776 + if (relax(end) < 0)
1777 + goto err;
1778 + }
1779 +
1780 + return 0;
1781 +
1782 +err:
1783 + pr_err("%s: timeout, %x %x\n", __func__, pe_mask, pe_stopped);
1784 +
1785 + for (i = 0; i < MAX_PE; i++)
1786 + if (pe_mask & (1 << i)) {
1787 + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
1788 +
1789 + pe_dmem_write(i, cpu_to_be32(0x0), (unsigned
1790 + long)&mbox->stop, 4);
1791 + }
1792 +
1793 + return -EIO;
1794 +}
1795 +
1796 +/* PE start.
1797 + * Starts packet processing for a list of PE's (specified using a bitmask).
1798 + * The caller must hold ctrl->mutex.
1799 + *
1800 + * @param ctrl Control context
1801 + * @param pe_mask Mask of PE id's to start
1802 + *
1803 + */
1804 +void pe_start(struct pfe_ctrl *ctrl, int pe_mask)
1805 +{
1806 + struct pe_sync_mailbox *mbox;
1807 + int i;
1808 +
1809 + for (i = 0; i < MAX_PE; i++)
1810 + if (pe_mask & (1 << i)) {
1811 + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
1812 +
1813 + pe_dmem_write(i, cpu_to_be32(0x0), (unsigned
1814 + long)&mbox->stop, 4);
1815 + }
1816 +}
1817 +
1818 +/* This function will ensure all PEs are put in to idle state */
1819 +int pe_reset_all(struct pfe_ctrl *ctrl)
1820 +{
1821 + struct pe_sync_mailbox *mbox;
1822 + int pe_stopped = 0;
1823 + unsigned long end = jiffies + 2;
1824 + int i;
1825 + int pe_mask = CLASS_MASK | TMU_MASK;
1826 +
1827 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
1828 + pe_mask |= UTIL_MASK;
1829 +#endif
1830 +
1831 + for (i = 0; i < MAX_PE; i++)
1832 + if (pe_mask & (1 << i)) {
1833 + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
1834 +
1835 + pe_dmem_write(i, cpu_to_be32(0x2), (unsigned
1836 + long)&mbox->stop, 4);
1837 + }
1838 +
1839 + while (pe_stopped != pe_mask) {
1840 + for (i = 0; i < MAX_PE; i++)
1841 + if ((pe_mask & (1 << i)) && !(pe_stopped & (1 << i))) {
1842 + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
1843 +
1844 + if (pe_dmem_read(i, (unsigned long)
1845 + &mbox->stopped, 4) &
1846 + cpu_to_be32(0x1))
1847 + pe_stopped |= (1 << i);
1848 + }
1849 +
1850 + if (relax(end) < 0)
1851 + goto err;
1852 + }
1853 +
1854 + return 0;
1855 +
1856 +err:
1857 + pr_err("%s: timeout, %x %x\n", __func__, pe_mask, pe_stopped);
1858 + return -EIO;
1859 +}
1860 +
1861 +int pfe_ctrl_init(struct pfe *pfe)
1862 +{
1863 + struct pfe_ctrl *ctrl = &pfe->ctrl;
1864 + int id;
1865 +
1866 + pr_info("%s\n", __func__);
1867 +
1868 + mutex_init(&ctrl->mutex);
1869 + spin_lock_init(&ctrl->lock);
1870 +
1871 + for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
1872 + ctrl->sync_mailbox_baseaddr[id] = CLASS_DM_SYNC_MBOX;
1873 + ctrl->msg_mailbox_baseaddr[id] = CLASS_DM_MSG_MBOX;
1874 + }
1875 +
1876 + for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
1877 + if (id == TMU2_ID)
1878 + continue;
1879 + ctrl->sync_mailbox_baseaddr[id] = TMU_DM_SYNC_MBOX;
1880 + ctrl->msg_mailbox_baseaddr[id] = TMU_DM_MSG_MBOX;
1881 + }
1882 +
1883 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
1884 + ctrl->sync_mailbox_baseaddr[UTIL_ID] = UTIL_DM_SYNC_MBOX;
1885 + ctrl->msg_mailbox_baseaddr[UTIL_ID] = UTIL_DM_MSG_MBOX;
1886 +#endif
1887 +
1888 + ctrl->hash_array_baseaddr = pfe->ddr_baseaddr + ROUTE_TABLE_BASEADDR;
1889 + ctrl->hash_array_phys_baseaddr = pfe->ddr_phys_baseaddr +
1890 + ROUTE_TABLE_BASEADDR;
1891 +
1892 + ctrl->dev = pfe->dev;
1893 +
1894 + pr_info("%s finished\n", __func__);
1895 +
1896 + return 0;
1897 +}
1898 +
1899 +void pfe_ctrl_exit(struct pfe *pfe)
1900 +{
1901 + pr_info("%s\n", __func__);
1902 +}
1903 --- /dev/null
1904 +++ b/drivers/staging/fsl_ppfe/pfe_ctrl.h
1905 @@ -0,0 +1,112 @@
1906 +/*
1907 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
1908 + * Copyright 2017 NXP
1909 + *
1910 + * This program is free software; you can redistribute it and/or modify
1911 + * it under the terms of the GNU General Public License as published by
1912 + * the Free Software Foundation; either version 2 of the License, or
1913 + * (at your option) any later version.
1914 + *
1915 + * This program is distributed in the hope that it will be useful,
1916 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1917 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1918 + * GNU General Public License for more details.
1919 + *
1920 + * You should have received a copy of the GNU General Public License
1921 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
1922 + */
1923 +
1924 +#ifndef _PFE_CTRL_H_
1925 +#define _PFE_CTRL_H_
1926 +
1927 +#include <linux/dmapool.h>
1928 +
1929 +#include "pfe_mod.h"
1930 +#include "pfe/pfe.h"
1931 +
1932 +#define DMA_BUF_SIZE_128 0x80 /* enough for 1 conntracks */
1933 +#define DMA_BUF_SIZE_256 0x100
1934 +/* enough for 2 conntracks, 1 bridge entry or 1 multicast entry */
1935 +#define DMA_BUF_SIZE_512 0x200
1936 +/* 512bytes dma allocated buffers used by rtp relay feature */
1937 +#define DMA_BUF_MIN_ALIGNMENT 8
1938 +#define DMA_BUF_BOUNDARY (4 * 1024)
1939 +/* bursts can not cross 4k boundary */
1940 +
1941 +#define CMD_TX_ENABLE 0x0501
1942 +#define CMD_TX_DISABLE 0x0502
1943 +
1944 +#define CMD_RX_LRO 0x0011
1945 +#define CMD_PKTCAP_ENABLE 0x0d01
1946 +#define CMD_QM_EXPT_RATE 0x020c
1947 +
1948 +#define CLASS_DM_SH_STATIC (0x800)
1949 +#define CLASS_DM_CPU_TICKS (CLASS_DM_SH_STATIC)
1950 +#define CLASS_DM_SYNC_MBOX (0x808)
1951 +#define CLASS_DM_MSG_MBOX (0x810)
1952 +#define CLASS_DM_DROP_CNTR (0x820)
1953 +#define CLASS_DM_RESUME (0x854)
1954 +#define CLASS_DM_PESTATUS (0x860)
1955 +
1956 +#define TMU_DM_SH_STATIC (0x80)
1957 +#define TMU_DM_CPU_TICKS (TMU_DM_SH_STATIC)
1958 +#define TMU_DM_SYNC_MBOX (0x88)
1959 +#define TMU_DM_MSG_MBOX (0x90)
1960 +#define TMU_DM_RESUME (0xA0)
1961 +#define TMU_DM_PESTATUS (0xB0)
1962 +#define TMU_DM_CONTEXT (0x300)
1963 +#define TMU_DM_TX_TRANS (0x480)
1964 +
1965 +#define UTIL_DM_SH_STATIC (0x0)
1966 +#define UTIL_DM_CPU_TICKS (UTIL_DM_SH_STATIC)
1967 +#define UTIL_DM_SYNC_MBOX (0x8)
1968 +#define UTIL_DM_MSG_MBOX (0x10)
1969 +#define UTIL_DM_DROP_CNTR (0x20)
1970 +#define UTIL_DM_RESUME (0x40)
1971 +#define UTIL_DM_PESTATUS (0x50)
1972 +
1973 +struct pfe_ctrl {
1974 + struct mutex mutex; /* to serialize pfe control access */
1975 + spinlock_t lock;
1976 +
1977 + void *dma_pool;
1978 + void *dma_pool_512;
1979 + void *dma_pool_128;
1980 +
1981 + struct device *dev;
1982 +
1983 + void *hash_array_baseaddr; /*
1984 + * Virtual base address of
1985 + * the conntrack hash array
1986 + */
1987 + unsigned long hash_array_phys_baseaddr; /*
1988 + * Physical base address of
1989 + * the conntrack hash array
1990 + */
1991 +
1992 + int (*event_cb)(u16, u16, u16*);
1993 +
1994 + unsigned long sync_mailbox_baseaddr[MAX_PE]; /*
1995 + * Sync mailbox PFE
1996 + * internal address,
1997 + * initialized
1998 + * when parsing elf images
1999 + */
2000 + unsigned long msg_mailbox_baseaddr[MAX_PE]; /*
2001 + * Msg mailbox PFE internal
2002 + * address, initialized
2003 + * when parsing elf images
2004 + */
2005 + unsigned int sys_clk; /* AXI clock value, in KHz */
2006 +};
2007 +
2008 +int pfe_ctrl_init(struct pfe *pfe);
2009 +void pfe_ctrl_exit(struct pfe *pfe);
2010 +int pe_sync_stop(struct pfe_ctrl *ctrl, int pe_mask);
2011 +void pe_start(struct pfe_ctrl *ctrl, int pe_mask);
2012 +int pe_reset_all(struct pfe_ctrl *ctrl);
2013 +void pfe_ctrl_suspend(struct pfe_ctrl *ctrl);
2014 +void pfe_ctrl_resume(struct pfe_ctrl *ctrl);
2015 +int relax(unsigned long end);
2016 +
2017 +#endif /* _PFE_CTRL_H_ */
2018 --- /dev/null
2019 +++ b/drivers/staging/fsl_ppfe/pfe_debugfs.c
2020 @@ -0,0 +1,111 @@
2021 +/*
2022 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
2023 + * Copyright 2017 NXP
2024 + *
2025 + * This program is free software; you can redistribute it and/or modify
2026 + * it under the terms of the GNU General Public License as published by
2027 + * the Free Software Foundation; either version 2 of the License, or
2028 + * (at your option) any later version.
2029 + *
2030 + * This program is distributed in the hope that it will be useful,
2031 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
2032 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2033 + * GNU General Public License for more details.
2034 + *
2035 + * You should have received a copy of the GNU General Public License
2036 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
2037 + */
2038 +
2039 +#include <linux/module.h>
2040 +#include <linux/debugfs.h>
2041 +#include <linux/platform_device.h>
2042 +
2043 +#include "pfe_mod.h"
2044 +
2045 +static int dmem_show(struct seq_file *s, void *unused)
2046 +{
2047 + u32 dmem_addr, val;
2048 + int id = (long int)s->private;
2049 + int i;
2050 +
2051 + for (dmem_addr = 0; dmem_addr < CLASS_DMEM_SIZE; dmem_addr += 8 * 4) {
2052 + seq_printf(s, "%04x:", dmem_addr);
2053 +
2054 + for (i = 0; i < 8; i++) {
2055 + val = pe_dmem_read(id, dmem_addr + i * 4, 4);
2056 + seq_printf(s, " %02x %02x %02x %02x", val & 0xff,
2057 + (val >> 8) & 0xff, (val >> 16) & 0xff,
2058 + (val >> 24) & 0xff);
2059 + }
2060 +
2061 + seq_puts(s, "\n");
2062 + }
2063 +
2064 + return 0;
2065 +}
2066 +
2067 +static int dmem_open(struct inode *inode, struct file *file)
2068 +{
2069 + return single_open(file, dmem_show, inode->i_private);
2070 +}
2071 +
2072 +static const struct file_operations dmem_fops = {
2073 + .open = dmem_open,
2074 + .read = seq_read,
2075 + .llseek = seq_lseek,
2076 + .release = single_release,
2077 +};
2078 +
2079 +int pfe_debugfs_init(struct pfe *pfe)
2080 +{
2081 + struct dentry *d;
2082 +
2083 + pr_info("%s\n", __func__);
2084 +
2085 + pfe->dentry = debugfs_create_dir("pfe", NULL);
2086 + if (IS_ERR_OR_NULL(pfe->dentry))
2087 + goto err_dir;
2088 +
2089 + d = debugfs_create_file("pe0_dmem", 0444, pfe->dentry, (void *)0,
2090 + &dmem_fops);
2091 + if (IS_ERR_OR_NULL(d))
2092 + goto err_pe;
2093 +
2094 + d = debugfs_create_file("pe1_dmem", 0444, pfe->dentry, (void *)1,
2095 + &dmem_fops);
2096 + if (IS_ERR_OR_NULL(d))
2097 + goto err_pe;
2098 +
2099 + d = debugfs_create_file("pe2_dmem", 0444, pfe->dentry, (void *)2,
2100 + &dmem_fops);
2101 + if (IS_ERR_OR_NULL(d))
2102 + goto err_pe;
2103 +
2104 + d = debugfs_create_file("pe3_dmem", 0444, pfe->dentry, (void *)3,
2105 + &dmem_fops);
2106 + if (IS_ERR_OR_NULL(d))
2107 + goto err_pe;
2108 +
2109 + d = debugfs_create_file("pe4_dmem", 0444, pfe->dentry, (void *)4,
2110 + &dmem_fops);
2111 + if (IS_ERR_OR_NULL(d))
2112 + goto err_pe;
2113 +
2114 + d = debugfs_create_file("pe5_dmem", 0444, pfe->dentry, (void *)5,
2115 + &dmem_fops);
2116 + if (IS_ERR_OR_NULL(d))
2117 + goto err_pe;
2118 +
2119 + return 0;
2120 +
2121 +err_pe:
2122 + debugfs_remove_recursive(pfe->dentry);
2123 +
2124 +err_dir:
2125 + return -1;
2126 +}
2127 +
2128 +void pfe_debugfs_exit(struct pfe *pfe)
2129 +{
2130 + debugfs_remove_recursive(pfe->dentry);
2131 +}
2132 --- /dev/null
2133 +++ b/drivers/staging/fsl_ppfe/pfe_debugfs.h
2134 @@ -0,0 +1,25 @@
2135 +/*
2136 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
2137 + * Copyright 2017 NXP
2138 + *
2139 + * This program is free software; you can redistribute it and/or modify
2140 + * it under the terms of the GNU General Public License as published by
2141 + * the Free Software Foundation; either version 2 of the License, or
2142 + * (at your option) any later version.
2143 + *
2144 + * This program is distributed in the hope that it will be useful,
2145 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
2146 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2147 + * GNU General Public License for more details.
2148 + *
2149 + * You should have received a copy of the GNU General Public License
2150 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
2151 + */
2152 +
2153 +#ifndef _PFE_DEBUGFS_H_
2154 +#define _PFE_DEBUGFS_H_
2155 +
2156 +int pfe_debugfs_init(struct pfe *pfe);
2157 +void pfe_debugfs_exit(struct pfe *pfe);
2158 +
2159 +#endif /* _PFE_DEBUGFS_H_ */
2160 --- /dev/null
2161 +++ b/drivers/staging/fsl_ppfe/pfe_eth.c
2162 @@ -0,0 +1,2434 @@
2163 +/*
2164 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
2165 + * Copyright 2017 NXP
2166 + *
2167 + * This program is free software; you can redistribute it and/or modify
2168 + * it under the terms of the GNU General Public License as published by
2169 + * the Free Software Foundation; either version 2 of the License, or
2170 + * (at your option) any later version.
2171 + *
2172 + * This program is distributed in the hope that it will be useful,
2173 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
2174 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2175 + * GNU General Public License for more details.
2176 + *
2177 + * You should have received a copy of the GNU General Public License
2178 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
2179 + */
2180 +
2181 +/* @pfe_eth.c.
2182 + * Ethernet driver for to handle exception path for PFE.
2183 + * - uses HIF functions to send/receive packets.
2184 + * - uses ctrl function to start/stop interfaces.
2185 + * - uses direct register accesses to control phy operation.
2186 + */
2187 +#include <linux/version.h>
2188 +#include <linux/kernel.h>
2189 +#include <linux/interrupt.h>
2190 +#include <linux/dma-mapping.h>
2191 +#include <linux/dmapool.h>
2192 +#include <linux/netdevice.h>
2193 +#include <linux/etherdevice.h>
2194 +#include <linux/ethtool.h>
2195 +#include <linux/mii.h>
2196 +#include <linux/phy.h>
2197 +#include <linux/timer.h>
2198 +#include <linux/hrtimer.h>
2199 +#include <linux/platform_device.h>
2200 +
2201 +#include <net/ip.h>
2202 +#include <net/sock.h>
2203 +
2204 +#include <linux/io.h>
2205 +#include <asm/irq.h>
2206 +#include <linux/delay.h>
2207 +#include <linux/regmap.h>
2208 +#include <linux/i2c.h>
2209 +
2210 +#if defined(CONFIG_NF_CONNTRACK_MARK)
2211 +#include <net/netfilter/nf_conntrack.h>
2212 +#endif
2213 +
2214 +#include "pfe_mod.h"
2215 +#include "pfe_eth.h"
2216 +
2217 +static void *cbus_emac_base[3];
2218 +static void *cbus_gpi_base[3];
2219 +
2220 +/* Forward Declaration */
2221 +static void pfe_eth_exit_one(struct pfe_eth_priv_s *priv);
2222 +static void pfe_eth_flush_tx(struct pfe_eth_priv_s *priv);
2223 +static void pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int
2224 + from_tx, int n_desc);
2225 +
2226 +unsigned int gemac_regs[] = {
2227 + 0x0004, /* Interrupt event */
2228 + 0x0008, /* Interrupt mask */
2229 + 0x0024, /* Ethernet control */
2230 + 0x0064, /* MIB Control/Status */
2231 + 0x0084, /* Receive control/status */
2232 + 0x00C4, /* Transmit control */
2233 + 0x00E4, /* Physical address low */
2234 + 0x00E8, /* Physical address high */
2235 + 0x0144, /* Transmit FIFO Watermark and Store and Forward Control*/
2236 + 0x0190, /* Receive FIFO Section Full Threshold */
2237 + 0x01A0, /* Transmit FIFO Section Empty Threshold */
2238 + 0x01B0, /* Frame Truncation Length */
2239 +};
2240 +
2241 +/********************************************************************/
2242 +/* SYSFS INTERFACE */
2243 +/********************************************************************/
2244 +
2245 +#ifdef PFE_ETH_NAPI_STATS
2246 +/*
2247 + * pfe_eth_show_napi_stats
2248 + */
2249 +static ssize_t pfe_eth_show_napi_stats(struct device *dev,
2250 + struct device_attribute *attr,
2251 + char *buf)
2252 +{
2253 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2254 + ssize_t len = 0;
2255 +
2256 + len += sprintf(buf + len, "sched: %u\n",
2257 + priv->napi_counters[NAPI_SCHED_COUNT]);
2258 + len += sprintf(buf + len, "poll: %u\n",
2259 + priv->napi_counters[NAPI_POLL_COUNT]);
2260 + len += sprintf(buf + len, "packet: %u\n",
2261 + priv->napi_counters[NAPI_PACKET_COUNT]);
2262 + len += sprintf(buf + len, "budget: %u\n",
2263 + priv->napi_counters[NAPI_FULL_BUDGET_COUNT]);
2264 + len += sprintf(buf + len, "desc: %u\n",
2265 + priv->napi_counters[NAPI_DESC_COUNT]);
2266 +
2267 + return len;
2268 +}
2269 +
2270 +/*
2271 + * pfe_eth_set_napi_stats
2272 + */
2273 +static ssize_t pfe_eth_set_napi_stats(struct device *dev,
2274 + struct device_attribute *attr,
2275 + const char *buf, size_t count)
2276 +{
2277 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2278 +
2279 + memset(priv->napi_counters, 0, sizeof(priv->napi_counters));
2280 +
2281 + return count;
2282 +}
2283 +#endif
2284 +#ifdef PFE_ETH_TX_STATS
2285 +/* pfe_eth_show_tx_stats
2286 + *
2287 + */
2288 +static ssize_t pfe_eth_show_tx_stats(struct device *dev,
2289 + struct device_attribute *attr,
2290 + char *buf)
2291 +{
2292 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2293 + ssize_t len = 0;
2294 + int i;
2295 +
2296 + len += sprintf(buf + len, "TX queues stats:\n");
2297 +
2298 + for (i = 0; i < emac_txq_cnt; i++) {
2299 + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
2300 + i);
2301 +
2302 + len += sprintf(buf + len, "\n");
2303 + __netif_tx_lock_bh(tx_queue);
2304 +
2305 + hif_tx_lock(&pfe->hif);
2306 + len += sprintf(buf + len,
2307 + "Queue %2d : credits = %10d\n"
2308 + , i, hif_lib_tx_credit_avail(pfe, priv->id, i));
2309 + len += sprintf(buf + len,
2310 + " tx packets = %10d\n"
2311 + , pfe->tmu_credit.tx_packets[priv->id][i]);
2312 + hif_tx_unlock(&pfe->hif);
2313 +
2314 + /* Don't output additionnal stats if queue never used */
2315 + if (!pfe->tmu_credit.tx_packets[priv->id][i])
2316 + goto skip;
2317 +
2318 + len += sprintf(buf + len,
2319 + " clean_fail = %10d\n"
2320 + , priv->clean_fail[i]);
2321 + len += sprintf(buf + len,
2322 + " stop_queue = %10d\n"
2323 + , priv->stop_queue_total[i]);
2324 + len += sprintf(buf + len,
2325 + " stop_queue_hif = %10d\n"
2326 + , priv->stop_queue_hif[i]);
2327 + len += sprintf(buf + len,
2328 + " stop_queue_hif_client = %10d\n"
2329 + , priv->stop_queue_hif_client[i]);
2330 + len += sprintf(buf + len,
2331 + " stop_queue_credit = %10d\n"
2332 + , priv->stop_queue_credit[i]);
2333 +skip:
2334 + __netif_tx_unlock_bh(tx_queue);
2335 + }
2336 + return len;
2337 +}
2338 +
2339 +/* pfe_eth_set_tx_stats
2340 + *
2341 + */
2342 +static ssize_t pfe_eth_set_tx_stats(struct device *dev,
2343 + struct device_attribute *attr,
2344 + const char *buf, size_t count)
2345 +{
2346 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2347 + int i;
2348 +
2349 + for (i = 0; i < emac_txq_cnt; i++) {
2350 + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
2351 + i);
2352 +
2353 + __netif_tx_lock_bh(tx_queue);
2354 + priv->clean_fail[i] = 0;
2355 + priv->stop_queue_total[i] = 0;
2356 + priv->stop_queue_hif[i] = 0;
2357 + priv->stop_queue_hif_client[i] = 0;
2358 + priv->stop_queue_credit[i] = 0;
2359 + __netif_tx_unlock_bh(tx_queue);
2360 + }
2361 +
2362 + return count;
2363 +}
2364 +#endif
2365 +/* pfe_eth_show_txavail
2366 + *
2367 + */
2368 +static ssize_t pfe_eth_show_txavail(struct device *dev,
2369 + struct device_attribute *attr,
2370 + char *buf)
2371 +{
2372 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2373 + ssize_t len = 0;
2374 + int i;
2375 +
2376 + for (i = 0; i < emac_txq_cnt; i++) {
2377 + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
2378 + i);
2379 +
2380 + __netif_tx_lock_bh(tx_queue);
2381 +
2382 + len += sprintf(buf + len, "%d",
2383 + hif_lib_tx_avail(&priv->client, i));
2384 +
2385 + __netif_tx_unlock_bh(tx_queue);
2386 +
2387 + if (i == (emac_txq_cnt - 1))
2388 + len += sprintf(buf + len, "\n");
2389 + else
2390 + len += sprintf(buf + len, " ");
2391 + }
2392 +
2393 + return len;
2394 +}
2395 +
2396 +/* pfe_eth_show_default_priority
2397 + *
2398 + */
2399 +static ssize_t pfe_eth_show_default_priority(struct device *dev,
2400 + struct device_attribute *attr,
2401 + char *buf)
2402 +{
2403 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2404 + unsigned long flags;
2405 + int rc;
2406 +
2407 + spin_lock_irqsave(&priv->lock, flags);
2408 + rc = sprintf(buf, "%d\n", priv->default_priority);
2409 + spin_unlock_irqrestore(&priv->lock, flags);
2410 +
2411 + return rc;
2412 +}
2413 +
2414 +/* pfe_eth_set_default_priority
2415 + *
2416 + */
2417 +
2418 +static ssize_t pfe_eth_set_default_priority(struct device *dev,
2419 + struct device_attribute *attr,
2420 + const char *buf, size_t count)
2421 +{
2422 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2423 + unsigned long flags;
2424 +
2425 + spin_lock_irqsave(&priv->lock, flags);
2426 + priv->default_priority = kstrtoul(buf, 0, 0);
2427 + spin_unlock_irqrestore(&priv->lock, flags);
2428 +
2429 + return count;
2430 +}
2431 +
2432 +static DEVICE_ATTR(txavail, 0444, pfe_eth_show_txavail, NULL);
2433 +static DEVICE_ATTR(default_priority, 0644, pfe_eth_show_default_priority,
2434 + pfe_eth_set_default_priority);
2435 +
2436 +#ifdef PFE_ETH_NAPI_STATS
2437 +static DEVICE_ATTR(napi_stats, 0644, pfe_eth_show_napi_stats,
2438 + pfe_eth_set_napi_stats);
2439 +#endif
2440 +
2441 +#ifdef PFE_ETH_TX_STATS
2442 +static DEVICE_ATTR(tx_stats, 0644, pfe_eth_show_tx_stats,
2443 + pfe_eth_set_tx_stats);
2444 +#endif
2445 +
2446 +/*
2447 + * pfe_eth_sysfs_init
2448 + *
2449 + */
2450 +static int pfe_eth_sysfs_init(struct net_device *ndev)
2451 +{
2452 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2453 + int err;
2454 +
2455 + /* Initialize the default values */
2456 +
2457 + /*
2458 + * By default, packets without conntrack will use this default high
2459 + * priority queue
2460 + */
2461 + priv->default_priority = 15;
2462 +
2463 + /* Create our sysfs files */
2464 + err = device_create_file(&ndev->dev, &dev_attr_default_priority);
2465 + if (err) {
2466 + netdev_err(ndev,
2467 + "failed to create default_priority sysfs files\n");
2468 + goto err_priority;
2469 + }
2470 +
2471 + err = device_create_file(&ndev->dev, &dev_attr_txavail);
2472 + if (err) {
2473 + netdev_err(ndev,
2474 + "failed to create default_priority sysfs files\n");
2475 + goto err_txavail;
2476 + }
2477 +
2478 +#ifdef PFE_ETH_NAPI_STATS
2479 + err = device_create_file(&ndev->dev, &dev_attr_napi_stats);
2480 + if (err) {
2481 + netdev_err(ndev, "failed to create napi stats sysfs files\n");
2482 + goto err_napi;
2483 + }
2484 +#endif
2485 +
2486 +#ifdef PFE_ETH_TX_STATS
2487 + err = device_create_file(&ndev->dev, &dev_attr_tx_stats);
2488 + if (err) {
2489 + netdev_err(ndev, "failed to create tx stats sysfs files\n");
2490 + goto err_tx;
2491 + }
2492 +#endif
2493 +
2494 + return 0;
2495 +
2496 +#ifdef PFE_ETH_TX_STATS
2497 +err_tx:
2498 +#endif
2499 +#ifdef PFE_ETH_NAPI_STATS
2500 + device_remove_file(&ndev->dev, &dev_attr_napi_stats);
2501 +
2502 +err_napi:
2503 +#endif
2504 + device_remove_file(&ndev->dev, &dev_attr_txavail);
2505 +
2506 +err_txavail:
2507 + device_remove_file(&ndev->dev, &dev_attr_default_priority);
2508 +
2509 +err_priority:
2510 + return -1;
2511 +}
2512 +
2513 +/* pfe_eth_sysfs_exit
2514 + *
2515 + */
2516 +void pfe_eth_sysfs_exit(struct net_device *ndev)
2517 +{
2518 +#ifdef PFE_ETH_TX_STATS
2519 + device_remove_file(&ndev->dev, &dev_attr_tx_stats);
2520 +#endif
2521 +
2522 +#ifdef PFE_ETH_NAPI_STATS
2523 + device_remove_file(&ndev->dev, &dev_attr_napi_stats);
2524 +#endif
2525 + device_remove_file(&ndev->dev, &dev_attr_txavail);
2526 + device_remove_file(&ndev->dev, &dev_attr_default_priority);
2527 +}
2528 +
2529 +/*************************************************************************/
2530 +/* ETHTOOL INTERCAE */
2531 +/*************************************************************************/
2532 +
2533 +/*MTIP GEMAC */
2534 +static const struct fec_stat {
2535 + char name[ETH_GSTRING_LEN];
2536 + u16 offset;
2537 +} fec_stats[] = {
2538 + /* RMON TX */
2539 + { "tx_dropped", RMON_T_DROP },
2540 + { "tx_packets", RMON_T_PACKETS },
2541 + { "tx_broadcast", RMON_T_BC_PKT },
2542 + { "tx_multicast", RMON_T_MC_PKT },
2543 + { "tx_crc_errors", RMON_T_CRC_ALIGN },
2544 + { "tx_undersize", RMON_T_UNDERSIZE },
2545 + { "tx_oversize", RMON_T_OVERSIZE },
2546 + { "tx_fragment", RMON_T_FRAG },
2547 + { "tx_jabber", RMON_T_JAB },
2548 + { "tx_collision", RMON_T_COL },
2549 + { "tx_64byte", RMON_T_P64 },
2550 + { "tx_65to127byte", RMON_T_P65TO127 },
2551 + { "tx_128to255byte", RMON_T_P128TO255 },
2552 + { "tx_256to511byte", RMON_T_P256TO511 },
2553 + { "tx_512to1023byte", RMON_T_P512TO1023 },
2554 + { "tx_1024to2047byte", RMON_T_P1024TO2047 },
2555 + { "tx_GTE2048byte", RMON_T_P_GTE2048 },
2556 + { "tx_octets", RMON_T_OCTETS },
2557 +
2558 + /* IEEE TX */
2559 + { "IEEE_tx_drop", IEEE_T_DROP },
2560 + { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
2561 + { "IEEE_tx_1col", IEEE_T_1COL },
2562 + { "IEEE_tx_mcol", IEEE_T_MCOL },
2563 + { "IEEE_tx_def", IEEE_T_DEF },
2564 + { "IEEE_tx_lcol", IEEE_T_LCOL },
2565 + { "IEEE_tx_excol", IEEE_T_EXCOL },
2566 + { "IEEE_tx_macerr", IEEE_T_MACERR },
2567 + { "IEEE_tx_cserr", IEEE_T_CSERR },
2568 + { "IEEE_tx_sqe", IEEE_T_SQE },
2569 + { "IEEE_tx_fdxfc", IEEE_T_FDXFC },
2570 + { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
2571 +
2572 + /* RMON RX */
2573 + { "rx_packets", RMON_R_PACKETS },
2574 + { "rx_broadcast", RMON_R_BC_PKT },
2575 + { "rx_multicast", RMON_R_MC_PKT },
2576 + { "rx_crc_errors", RMON_R_CRC_ALIGN },
2577 + { "rx_undersize", RMON_R_UNDERSIZE },
2578 + { "rx_oversize", RMON_R_OVERSIZE },
2579 + { "rx_fragment", RMON_R_FRAG },
2580 + { "rx_jabber", RMON_R_JAB },
2581 + { "rx_64byte", RMON_R_P64 },
2582 + { "rx_65to127byte", RMON_R_P65TO127 },
2583 + { "rx_128to255byte", RMON_R_P128TO255 },
2584 + { "rx_256to511byte", RMON_R_P256TO511 },
2585 + { "rx_512to1023byte", RMON_R_P512TO1023 },
2586 + { "rx_1024to2047byte", RMON_R_P1024TO2047 },
2587 + { "rx_GTE2048byte", RMON_R_P_GTE2048 },
2588 + { "rx_octets", RMON_R_OCTETS },
2589 +
2590 + /* IEEE RX */
2591 + { "IEEE_rx_drop", IEEE_R_DROP },
2592 + { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
2593 + { "IEEE_rx_crc", IEEE_R_CRC },
2594 + { "IEEE_rx_align", IEEE_R_ALIGN },
2595 + { "IEEE_rx_macerr", IEEE_R_MACERR },
2596 + { "IEEE_rx_fdxfc", IEEE_R_FDXFC },
2597 + { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
2598 +};
2599 +
2600 +static void pfe_eth_fill_stats(struct net_device *ndev, struct ethtool_stats
2601 + *stats, u64 *data)
2602 +{
2603 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2604 + int i;
2605 +
2606 + for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2607 + data[i] = readl(priv->EMAC_baseaddr + fec_stats[i].offset);
2608 +}
2609 +
2610 +static void pfe_eth_gstrings(struct net_device *netdev,
2611 + u32 stringset, u8 *data)
2612 +{
2613 + int i;
2614 +
2615 + switch (stringset) {
2616 + case ETH_SS_STATS:
2617 + for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2618 + memcpy(data + i * ETH_GSTRING_LEN,
2619 + fec_stats[i].name, ETH_GSTRING_LEN);
2620 + break;
2621 + }
2622 +}
2623 +
2624 +static int pfe_eth_stats_count(struct net_device *ndev, int sset)
2625 +{
2626 + switch (sset) {
2627 + case ETH_SS_STATS:
2628 + return ARRAY_SIZE(fec_stats);
2629 + default:
2630 + return -EOPNOTSUPP;
2631 + }
2632 +}
2633 +
2634 +/*
2635 + * pfe_eth_gemac_reglen - Return the length of the register structure.
2636 + *
2637 + */
2638 +static int pfe_eth_gemac_reglen(struct net_device *ndev)
2639 +{
2640 + pr_info("%s()\n", __func__);
2641 + return (sizeof(gemac_regs) / sizeof(u32));
2642 +}
2643 +
2644 +/*
2645 + * pfe_eth_gemac_get_regs - Return the gemac register structure.
2646 + *
2647 + */
2648 +static void pfe_eth_gemac_get_regs(struct net_device *ndev, struct ethtool_regs
2649 + *regs, void *regbuf)
2650 +{
2651 + int i;
2652 +
2653 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2654 + u32 *buf = (u32 *)regbuf;
2655 +
2656 + pr_info("%s()\n", __func__);
2657 + for (i = 0; i < sizeof(gemac_regs) / sizeof(u32); i++)
2658 + buf[i] = readl(priv->EMAC_baseaddr + gemac_regs[i]);
2659 +}
2660 +
2661 +/*
2662 + * pfe_eth_set_wol - Set the magic packet option, in WoL register.
2663 + *
2664 + */
2665 +static int pfe_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2666 +{
2667 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2668 +
2669 + if (wol->wolopts & ~WAKE_MAGIC)
2670 + return -EOPNOTSUPP;
2671 +
2672 + /* for MTIP we store wol->wolopts */
2673 + priv->wol = wol->wolopts;
2674 +
2675 + device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
2676 +
2677 + return 0;
2678 +}
2679 +
2680 +/*
2681 + *
2682 + * pfe_eth_get_wol - Get the WoL options.
2683 + *
2684 + */
2685 +static void pfe_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo
2686 + *wol)
2687 +{
2688 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2689 +
2690 + wol->supported = WAKE_MAGIC;
2691 + wol->wolopts = 0;
2692 +
2693 + if (priv->wol & WAKE_MAGIC)
2694 + wol->wolopts = WAKE_MAGIC;
2695 +
2696 + memset(&wol->sopass, 0, sizeof(wol->sopass));
2697 +}
2698 +
2699 +/*
2700 + * pfe_eth_get_drvinfo - Fills in the drvinfo structure with some basic info
2701 + *
2702 + */
2703 +static void pfe_eth_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo
2704 + *drvinfo)
2705 +{
2706 + strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
2707 + strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
2708 + strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
2709 + strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
2710 +}
2711 +
2712 +/*
2713 + * pfe_eth_set_settings - Used to send commands to PHY.
2714 + *
2715 + */
2716 +static int pfe_eth_set_settings(struct net_device *ndev,
2717 + const struct ethtool_link_ksettings *cmd)
2718 +{
2719 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2720 + struct phy_device *phydev = priv->phydev;
2721 +
2722 + if (!phydev)
2723 + return -ENODEV;
2724 +
2725 + return phy_ethtool_ksettings_set(phydev, cmd);
2726 +}
2727 +
2728 +/*
2729 + * pfe_eth_getsettings - Return the current settings in the ethtool_cmd
2730 + * structure.
2731 + *
2732 + */
2733 +static int pfe_eth_get_settings(struct net_device *ndev,
2734 + struct ethtool_link_ksettings *cmd)
2735 +{
2736 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2737 + struct phy_device *phydev = priv->phydev;
2738 +
2739 + if (!phydev)
2740 + return -ENODEV;
2741 +
2742 + return phy_ethtool_ksettings_get(phydev, cmd);
2743 +}
2744 +
2745 +/*
2746 + * pfe_eth_get_msglevel - Gets the debug message mask.
2747 + *
2748 + */
2749 +static uint32_t pfe_eth_get_msglevel(struct net_device *ndev)
2750 +{
2751 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2752 +
2753 + return priv->msg_enable;
2754 +}
2755 +
2756 +/*
2757 + * pfe_eth_set_msglevel - Sets the debug message mask.
2758 + *
2759 + */
2760 +static void pfe_eth_set_msglevel(struct net_device *ndev, uint32_t data)
2761 +{
2762 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2763 +
2764 + priv->msg_enable = data;
2765 +}
2766 +
2767 +#define HIF_RX_COAL_MAX_CLKS (~(1 << 31))
2768 +#define HIF_RX_COAL_CLKS_PER_USEC (pfe->ctrl.sys_clk / 1000)
2769 +#define HIF_RX_COAL_MAX_USECS (HIF_RX_COAL_MAX_CLKS / \
2770 + HIF_RX_COAL_CLKS_PER_USEC)
2771 +
2772 +/*
2773 + * pfe_eth_set_coalesce - Sets rx interrupt coalescing timer.
2774 + *
2775 + */
2776 +static int pfe_eth_set_coalesce(struct net_device *ndev,
2777 + struct ethtool_coalesce *ec)
2778 +{
2779 + if (ec->rx_coalesce_usecs > HIF_RX_COAL_MAX_USECS)
2780 + return -EINVAL;
2781 +
2782 + if (!ec->rx_coalesce_usecs) {
2783 + writel(0, HIF_INT_COAL);
2784 + return 0;
2785 + }
2786 +
2787 + writel((ec->rx_coalesce_usecs * HIF_RX_COAL_CLKS_PER_USEC) |
2788 + HIF_INT_COAL_ENABLE, HIF_INT_COAL);
2789 +
2790 + return 0;
2791 +}
2792 +
2793 +/*
2794 + * pfe_eth_get_coalesce - Gets rx interrupt coalescing timer value.
2795 + *
2796 + */
2797 +static int pfe_eth_get_coalesce(struct net_device *ndev,
2798 + struct ethtool_coalesce *ec)
2799 +{
2800 + int reg_val = readl(HIF_INT_COAL);
2801 +
2802 + if (reg_val & HIF_INT_COAL_ENABLE)
2803 + ec->rx_coalesce_usecs = (reg_val & HIF_RX_COAL_MAX_CLKS) /
2804 + HIF_RX_COAL_CLKS_PER_USEC;
2805 + else
2806 + ec->rx_coalesce_usecs = 0;
2807 +
2808 + return 0;
2809 +}
2810 +
2811 +/*
2812 + * pfe_eth_set_pauseparam - Sets pause parameters
2813 + *
2814 + */
2815 +static int pfe_eth_set_pauseparam(struct net_device *ndev,
2816 + struct ethtool_pauseparam *epause)
2817 +{
2818 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2819 +
2820 + if (epause->tx_pause != epause->rx_pause) {
2821 + netdev_info(ndev,
2822 + "hardware only support enable/disable both tx and rx\n");
2823 + return -EINVAL;
2824 + }
2825 +
2826 + priv->pause_flag = 0;
2827 + priv->pause_flag |= epause->rx_pause ? PFE_PAUSE_FLAG_ENABLE : 0;
2828 + priv->pause_flag |= epause->autoneg ? PFE_PAUSE_FLAG_AUTONEG : 0;
2829 +
2830 + if (epause->rx_pause || epause->autoneg) {
2831 + gemac_enable_pause_rx(priv->EMAC_baseaddr);
2832 + writel((readl(priv->GPI_baseaddr + GPI_TX_PAUSE_TIME) |
2833 + EGPI_PAUSE_ENABLE),
2834 + priv->GPI_baseaddr + GPI_TX_PAUSE_TIME);
2835 + if (priv->phydev) {
2836 + priv->phydev->supported |= ADVERTISED_Pause |
2837 + ADVERTISED_Asym_Pause;
2838 + priv->phydev->advertising |= ADVERTISED_Pause |
2839 + ADVERTISED_Asym_Pause;
2840 + }
2841 + } else {
2842 + gemac_disable_pause_rx(priv->EMAC_baseaddr);
2843 + writel((readl(priv->GPI_baseaddr + GPI_TX_PAUSE_TIME) &
2844 + ~EGPI_PAUSE_ENABLE),
2845 + priv->GPI_baseaddr + GPI_TX_PAUSE_TIME);
2846 + if (priv->phydev) {
2847 + priv->phydev->supported &= ~(ADVERTISED_Pause |
2848 + ADVERTISED_Asym_Pause);
2849 + priv->phydev->advertising &= ~(ADVERTISED_Pause |
2850 + ADVERTISED_Asym_Pause);
2851 + }
2852 + }
2853 +
2854 + return 0;
2855 +}
2856 +
2857 +/*
2858 + * pfe_eth_get_pauseparam - Gets pause parameters
2859 + *
2860 + */
2861 +static void pfe_eth_get_pauseparam(struct net_device *ndev,
2862 + struct ethtool_pauseparam *epause)
2863 +{
2864 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2865 +
2866 + epause->autoneg = (priv->pause_flag & PFE_PAUSE_FLAG_AUTONEG) != 0;
2867 + epause->tx_pause = (priv->pause_flag & PFE_PAUSE_FLAG_ENABLE) != 0;
2868 + epause->rx_pause = epause->tx_pause;
2869 +}
2870 +
2871 +/*
2872 + * pfe_eth_get_hash
2873 + */
2874 +#define PFE_HASH_BITS 6 /* #bits in hash */
2875 +#define CRC32_POLY 0xEDB88320
2876 +
2877 +static int pfe_eth_get_hash(u8 *addr)
2878 +{
2879 + unsigned int i, bit, data, crc, hash;
2880 +
2881 + /* calculate crc32 value of mac address */
2882 + crc = 0xffffffff;
2883 +
2884 + for (i = 0; i < 6; i++) {
2885 + data = addr[i];
2886 + for (bit = 0; bit < 8; bit++, data >>= 1) {
2887 + crc = (crc >> 1) ^
2888 + (((crc ^ data) & 1) ? CRC32_POLY : 0);
2889 + }
2890 + }
2891 +
2892 + /*
2893 + * only upper 6 bits (PFE_HASH_BITS) are used
2894 + * which point to specific bit in the hash registers
2895 + */
2896 + hash = (crc >> (32 - PFE_HASH_BITS)) & 0x3f;
2897 +
2898 + return hash;
2899 +}
2900 +
2901 +const struct ethtool_ops pfe_ethtool_ops = {
2902 + .get_drvinfo = pfe_eth_get_drvinfo,
2903 + .get_regs_len = pfe_eth_gemac_reglen,
2904 + .get_regs = pfe_eth_gemac_get_regs,
2905 + .get_link = ethtool_op_get_link,
2906 + .get_wol = pfe_eth_get_wol,
2907 + .set_wol = pfe_eth_set_wol,
2908 + .set_pauseparam = pfe_eth_set_pauseparam,
2909 + .get_pauseparam = pfe_eth_get_pauseparam,
2910 + .get_strings = pfe_eth_gstrings,
2911 + .get_sset_count = pfe_eth_stats_count,
2912 + .get_ethtool_stats = pfe_eth_fill_stats,
2913 + .get_msglevel = pfe_eth_get_msglevel,
2914 + .set_msglevel = pfe_eth_set_msglevel,
2915 + .set_coalesce = pfe_eth_set_coalesce,
2916 + .get_coalesce = pfe_eth_get_coalesce,
2917 + .get_link_ksettings = pfe_eth_get_settings,
2918 + .set_link_ksettings = pfe_eth_set_settings,
2919 +};
2920 +
2921 +/* pfe_eth_mdio_reset
2922 + */
2923 +int pfe_eth_mdio_reset(struct mii_bus *bus)
2924 +{
2925 + struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
2926 + u32 phy_speed;
2927 +
2928 + netif_info(priv, hw, priv->ndev, "%s\n", __func__);
2929 +
2930 + mutex_lock(&bus->mdio_lock);
2931 +
2932 + /*
2933 + * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
2934 + *
2935 + * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
2936 + * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'.
2937 + */
2938 + phy_speed = (DIV_ROUND_UP((pfe->ctrl.sys_clk * 1000), 4000000)
2939 + << EMAC_MII_SPEED_SHIFT);
2940 + phy_speed |= EMAC_HOLDTIME(0x5);
2941 + __raw_writel(phy_speed, priv->PHY_baseaddr + EMAC_MII_CTRL_REG);
2942 +
2943 + mutex_unlock(&bus->mdio_lock);
2944 +
2945 + return 0;
2946 +}
2947 +
2948 +/* pfe_eth_gemac_phy_timeout
2949 + *
2950 + */
2951 +static int pfe_eth_gemac_phy_timeout(struct pfe_eth_priv_s *priv, int timeout)
2952 +{
2953 + while (!(__raw_readl(priv->PHY_baseaddr + EMAC_IEVENT_REG) &
2954 + EMAC_IEVENT_MII)) {
2955 + if (timeout-- <= 0)
2956 + return -1;
2957 + usleep_range(10, 20);
2958 + }
2959 + __raw_writel(EMAC_IEVENT_MII, priv->PHY_baseaddr + EMAC_IEVENT_REG);
2960 + return 0;
2961 +}
2962 +
2963 +static int pfe_eth_mdio_mux(u8 muxval)
2964 +{
2965 + struct i2c_adapter *a;
2966 + struct i2c_msg msg;
2967 + unsigned char buf[2];
2968 + int ret;
2969 +
2970 + a = i2c_get_adapter(0);
2971 + if (!a)
2972 + return -ENODEV;
2973 +
2974 + /* set bit 1 (the second bit) of chip at 0x09, register 0x13 */
2975 + buf[0] = 0x54; /* reg number */
2976 + buf[1] = (muxval << 6) | 0x3; /* data */
2977 + msg.addr = 0x66;
2978 + msg.buf = buf;
2979 + msg.len = 2;
2980 + msg.flags = 0;
2981 + ret = i2c_transfer(a, &msg, 1);
2982 + i2c_put_adapter(a);
2983 + if (ret != 1)
2984 + return -ENODEV;
2985 + return 0;
2986 +}
2987 +
2988 +static int pfe_eth_mdio_write_addr(struct mii_bus *bus, int mii_id,
2989 + int dev_addr, int regnum)
2990 +{
2991 + struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
2992 +
2993 + __raw_writel(EMAC_MII_DATA_PA(mii_id) |
2994 + EMAC_MII_DATA_RA(dev_addr) |
2995 + EMAC_MII_DATA_TA | EMAC_MII_DATA(regnum),
2996 + priv->PHY_baseaddr + EMAC_MII_DATA_REG);
2997 +
2998 + if (pfe_eth_gemac_phy_timeout(priv, EMAC_MDIO_TIMEOUT)) {
2999 + netdev_err(priv->ndev, "%s: phy MDIO address write timeout\n",
3000 + __func__);
3001 + return -1;
3002 + }
3003 +
3004 + return 0;
3005 +}
3006 +
3007 +static int pfe_eth_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
3008 + u16 value)
3009 +{
3010 + struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
3011 +
3012 + /*To access external PHYs on QDS board mux needs to be configured*/
3013 + if ((mii_id) && (pfe->mdio_muxval[mii_id]))
3014 + pfe_eth_mdio_mux(pfe->mdio_muxval[mii_id]);
3015 +
3016 + if (regnum & MII_ADDR_C45) {
3017 + pfe_eth_mdio_write_addr(bus, mii_id, (regnum >> 16) & 0x1f,
3018 + regnum & 0xffff);
3019 + __raw_writel(EMAC_MII_DATA_OP_CL45_WR |
3020 + EMAC_MII_DATA_PA(mii_id) |
3021 + EMAC_MII_DATA_RA((regnum >> 16) & 0x1f) |
3022 + EMAC_MII_DATA_TA | EMAC_MII_DATA(value),
3023 + priv->PHY_baseaddr + EMAC_MII_DATA_REG);
3024 + } else {
3025 + /* start a write op */
3026 + __raw_writel(EMAC_MII_DATA_ST | EMAC_MII_DATA_OP_WR |
3027 + EMAC_MII_DATA_PA(mii_id) |
3028 + EMAC_MII_DATA_RA(regnum) |
3029 + EMAC_MII_DATA_TA | EMAC_MII_DATA(value),
3030 + priv->PHY_baseaddr + EMAC_MII_DATA_REG);
3031 + }
3032 +
3033 + if (pfe_eth_gemac_phy_timeout(priv, EMAC_MDIO_TIMEOUT)) {
3034 + netdev_err(priv->ndev, "%s: phy MDIO write timeout\n",
3035 + __func__);
3036 + return -1;
3037 + }
3038 + netif_info(priv, hw, priv->ndev, "%s: phy %x reg %x val %x\n", __func__,
3039 + mii_id, regnum, value);
3040 +
3041 + return 0;
3042 +}
3043 +
3044 +static int pfe_eth_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
3045 +{
3046 + struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
3047 + u16 value = 0;
3048 +
3049 + /*To access external PHYs on QDS board mux needs to be configured*/
3050 + if ((mii_id) && (pfe->mdio_muxval[mii_id]))
3051 + pfe_eth_mdio_mux(pfe->mdio_muxval[mii_id]);
3052 +
3053 + if (regnum & MII_ADDR_C45) {
3054 + pfe_eth_mdio_write_addr(bus, mii_id, (regnum >> 16) & 0x1f,
3055 + regnum & 0xffff);
3056 + __raw_writel(EMAC_MII_DATA_OP_CL45_RD |
3057 + EMAC_MII_DATA_PA(mii_id) |
3058 + EMAC_MII_DATA_RA((regnum >> 16) & 0x1f) |
3059 + EMAC_MII_DATA_TA,
3060 + priv->PHY_baseaddr + EMAC_MII_DATA_REG);
3061 + } else {
3062 + /* start a read op */
3063 + __raw_writel(EMAC_MII_DATA_ST | EMAC_MII_DATA_OP_RD |
3064 + EMAC_MII_DATA_PA(mii_id) |
3065 + EMAC_MII_DATA_RA(regnum) |
3066 + EMAC_MII_DATA_TA, priv->PHY_baseaddr +
3067 + EMAC_MII_DATA_REG);
3068 + }
3069 +
3070 + if (pfe_eth_gemac_phy_timeout(priv, EMAC_MDIO_TIMEOUT)) {
3071 + netdev_err(priv->ndev, "%s: phy MDIO read timeout\n", __func__);
3072 + return -1;
3073 + }
3074 +
3075 + value = EMAC_MII_DATA(__raw_readl(priv->PHY_baseaddr +
3076 + EMAC_MII_DATA_REG));
3077 + netif_info(priv, hw, priv->ndev, "%s: phy %x reg %x val %x\n", __func__,
3078 + mii_id, regnum, value);
3079 + return value;
3080 +}
3081 +
3082 +static int pfe_eth_mdio_init(struct pfe_eth_priv_s *priv,
3083 + struct ls1012a_mdio_platform_data *minfo)
3084 +{
3085 + struct mii_bus *bus;
3086 + int rc;
3087 +
3088 + netif_info(priv, drv, priv->ndev, "%s\n", __func__);
3089 + pr_info("%s\n", __func__);
3090 +
3091 + bus = mdiobus_alloc();
3092 + if (!bus) {
3093 + netdev_err(priv->ndev, "mdiobus_alloc() failed\n");
3094 + rc = -ENOMEM;
3095 + goto err0;
3096 + }
3097 +
3098 + bus->name = "ls1012a MDIO Bus";
3099 + bus->read = &pfe_eth_mdio_read;
3100 + bus->write = &pfe_eth_mdio_write;
3101 + bus->reset = &pfe_eth_mdio_reset;
3102 + snprintf(bus->id, MII_BUS_ID_SIZE, "ls1012a-%x", priv->id);
3103 + bus->priv = priv;
3104 +
3105 + bus->phy_mask = minfo->phy_mask;
3106 + priv->mdc_div = minfo->mdc_div;
3107 +
3108 + if (!priv->mdc_div)
3109 + priv->mdc_div = 64;
3110 +
3111 + bus->irq[0] = minfo->irq[0];
3112 +
3113 + bus->parent = priv->pfe->dev;
3114 +
3115 + netif_info(priv, drv, priv->ndev, "%s: mdc_div: %d, phy_mask: %x\n",
3116 + __func__, priv->mdc_div, bus->phy_mask);
3117 + rc = mdiobus_register(bus);
3118 + if (rc) {
3119 + netdev_err(priv->ndev, "mdiobus_register(%s) failed\n",
3120 + bus->name);
3121 + goto err1;
3122 + }
3123 +
3124 + priv->mii_bus = bus;
3125 + pfe_eth_mdio_reset(bus);
3126 +
3127 + return 0;
3128 +
3129 +err1:
3130 + mdiobus_free(bus);
3131 +err0:
3132 + return rc;
3133 +}
3134 +
3135 +/* pfe_eth_mdio_exit
3136 + */
3137 +static void pfe_eth_mdio_exit(struct mii_bus *bus)
3138 +{
3139 + if (!bus)
3140 + return;
3141 +
3142 + netif_info((struct pfe_eth_priv_s *)bus->priv, drv, ((struct
3143 + pfe_eth_priv_s *)(bus->priv))->ndev, "%s\n", __func__);
3144 +
3145 + mdiobus_unregister(bus);
3146 + mdiobus_free(bus);
3147 +}
3148 +
3149 +/* pfe_get_phydev_speed
3150 + */
3151 +static int pfe_get_phydev_speed(struct phy_device *phydev)
3152 +{
3153 + switch (phydev->speed) {
3154 + case 10:
3155 + return SPEED_10M;
3156 + case 100:
3157 + return SPEED_100M;
3158 + case 1000:
3159 + default:
3160 + return SPEED_1000M;
3161 + }
3162 +}
3163 +
3164 +/* pfe_set_rgmii_speed
3165 + */
3166 +#define RGMIIPCR 0x434
3167 +/* RGMIIPCR bit definitions*/
3168 +#define SCFG_RGMIIPCR_EN_AUTO (0x00000008)
3169 +#define SCFG_RGMIIPCR_SETSP_1000M (0x00000004)
3170 +#define SCFG_RGMIIPCR_SETSP_100M (0x00000000)
3171 +#define SCFG_RGMIIPCR_SETSP_10M (0x00000002)
3172 +#define SCFG_RGMIIPCR_SETFD (0x00000001)
3173 +
3174 +static void pfe_set_rgmii_speed(struct phy_device *phydev)
3175 +{
3176 + u32 rgmii_pcr;
3177 +
3178 + regmap_read(pfe->scfg, RGMIIPCR, &rgmii_pcr);
3179 + rgmii_pcr &= ~(SCFG_RGMIIPCR_SETSP_1000M | SCFG_RGMIIPCR_SETSP_10M);
3180 +
3181 + switch (phydev->speed) {
3182 + case 10:
3183 + rgmii_pcr |= SCFG_RGMIIPCR_SETSP_10M;
3184 + break;
3185 + case 1000:
3186 + rgmii_pcr |= SCFG_RGMIIPCR_SETSP_1000M;
3187 + break;
3188 + case 100:
3189 + default:
3190 + /* Default is 100M */
3191 + break;
3192 + }
3193 + regmap_write(pfe->scfg, RGMIIPCR, rgmii_pcr);
3194 +}
3195 +
3196 +/* pfe_get_phydev_duplex
3197 + */
3198 +static int pfe_get_phydev_duplex(struct phy_device *phydev)
3199 +{
3200 + /*return (phydev->duplex == DUPLEX_HALF) ? DUP_HALF:DUP_FULL ; */
3201 + return DUPLEX_FULL;
3202 +}
3203 +
3204 +/* pfe_eth_adjust_link
3205 + */
3206 +static void pfe_eth_adjust_link(struct net_device *ndev)
3207 +{
3208 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3209 + unsigned long flags;
3210 + struct phy_device *phydev = priv->phydev;
3211 + int new_state = 0;
3212 +
3213 + netif_info(priv, drv, ndev, "%s\n", __func__);
3214 +
3215 + spin_lock_irqsave(&priv->lock, flags);
3216 +
3217 + if (phydev->link) {
3218 + /*
3219 + * Now we make sure that we can be in full duplex mode.
3220 + * If not, we operate in half-duplex mode.
3221 + */
3222 + if (phydev->duplex != priv->oldduplex) {
3223 + new_state = 1;
3224 + gemac_set_duplex(priv->EMAC_baseaddr,
3225 + pfe_get_phydev_duplex(phydev));
3226 + priv->oldduplex = phydev->duplex;
3227 + }
3228 +
3229 + if (phydev->speed != priv->oldspeed) {
3230 + new_state = 1;
3231 + gemac_set_speed(priv->EMAC_baseaddr,
3232 + pfe_get_phydev_speed(phydev));
3233 + if (priv->einfo->mii_config == PHY_INTERFACE_MODE_RGMII_TXID)
3234 + pfe_set_rgmii_speed(phydev);
3235 + priv->oldspeed = phydev->speed;
3236 + }
3237 +
3238 + if (!priv->oldlink) {
3239 + new_state = 1;
3240 + priv->oldlink = 1;
3241 + }
3242 +
3243 + } else if (priv->oldlink) {
3244 + new_state = 1;
3245 + priv->oldlink = 0;
3246 + priv->oldspeed = 0;
3247 + priv->oldduplex = -1;
3248 + }
3249 +
3250 + if (new_state && netif_msg_link(priv))
3251 + phy_print_status(phydev);
3252 +
3253 + spin_unlock_irqrestore(&priv->lock, flags);
3254 +}
3255 +
3256 +/* pfe_phy_exit
3257 + */
3258 +static void pfe_phy_exit(struct net_device *ndev)
3259 +{
3260 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3261 +
3262 + netif_info(priv, drv, ndev, "%s\n", __func__);
3263 +
3264 + phy_disconnect(priv->phydev);
3265 + priv->phydev = NULL;
3266 +}
3267 +
3268 +/* pfe_eth_stop
3269 + */
3270 +static void pfe_eth_stop(struct net_device *ndev, int wake)
3271 +{
3272 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3273 +
3274 + netif_info(priv, drv, ndev, "%s\n", __func__);
3275 +
3276 + if (wake) {
3277 + gemac_tx_disable(priv->EMAC_baseaddr);
3278 + } else {
3279 + gemac_disable(priv->EMAC_baseaddr);
3280 + gpi_disable(priv->GPI_baseaddr);
3281 +
3282 + if (priv->phydev)
3283 + phy_stop(priv->phydev);
3284 + }
3285 +}
3286 +
3287 +/* pfe_eth_start
3288 + */
3289 +static int pfe_eth_start(struct pfe_eth_priv_s *priv)
3290 +{
3291 + netif_info(priv, drv, priv->ndev, "%s\n", __func__);
3292 +
3293 + if (priv->phydev)
3294 + phy_start(priv->phydev);
3295 +
3296 + gpi_enable(priv->GPI_baseaddr);
3297 + gemac_enable(priv->EMAC_baseaddr);
3298 +
3299 + return 0;
3300 +}
3301 +
3302 +/*
3303 + * Configure on chip serdes through mdio
3304 + */
3305 +static void ls1012a_configure_serdes(struct net_device *ndev)
3306 +{
3307 + struct pfe_eth_priv_s *priv = pfe->eth.eth_priv[0];
3308 + int sgmii_2500 = 0;
3309 + struct mii_bus *bus = priv->mii_bus;
3310 +
3311 + if (priv->einfo->mii_config == PHY_INTERFACE_MODE_SGMII_2500)
3312 + sgmii_2500 = 1;
3313 +
3314 + netif_info(priv, drv, ndev, "%s\n", __func__);
3315 + /* PCS configuration done with corresponding GEMAC */
3316 +
3317 + pfe_eth_mdio_read(bus, 0, 0);
3318 + pfe_eth_mdio_read(bus, 0, 1);
3319 +
3320 + /*These settings taken from validtion team */
3321 + pfe_eth_mdio_write(bus, 0, 0x0, 0x8000);
3322 + if (sgmii_2500) {
3323 + pfe_eth_mdio_write(bus, 0, 0x14, 0x9);
3324 + pfe_eth_mdio_write(bus, 0, 0x4, 0x4001);
3325 + pfe_eth_mdio_write(bus, 0, 0x12, 0xa120);
3326 + pfe_eth_mdio_write(bus, 0, 0x13, 0x7);
3327 + } else {
3328 + pfe_eth_mdio_write(bus, 0, 0x14, 0xb);
3329 + pfe_eth_mdio_write(bus, 0, 0x4, 0x1a1);
3330 + pfe_eth_mdio_write(bus, 0, 0x12, 0x400);
3331 + pfe_eth_mdio_write(bus, 0, 0x13, 0x0);
3332 + }
3333 +
3334 + pfe_eth_mdio_write(bus, 0, 0x0, 0x1140);
3335 +}
3336 +
3337 +/*
3338 + * pfe_phy_init
3339 + *
3340 + */
3341 +static int pfe_phy_init(struct net_device *ndev)
3342 +{
3343 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3344 + struct phy_device *phydev;
3345 + char phy_id[MII_BUS_ID_SIZE + 3];
3346 + char bus_id[MII_BUS_ID_SIZE];
3347 + phy_interface_t interface;
3348 +
3349 + priv->oldlink = 0;
3350 + priv->oldspeed = 0;
3351 + priv->oldduplex = -1;
3352 +
3353 + snprintf(bus_id, MII_BUS_ID_SIZE, "ls1012a-%d", 0);
3354 + snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
3355 + priv->einfo->phy_id);
3356 +
3357 + netif_info(priv, drv, ndev, "%s: %s\n", __func__, phy_id);
3358 + interface = priv->einfo->mii_config;
3359 + if ((interface == PHY_INTERFACE_MODE_SGMII) ||
3360 + (interface == PHY_INTERFACE_MODE_SGMII_2500)) {
3361 + /*Configure SGMII PCS */
3362 + if (pfe->scfg) {
3363 + /*Config MDIO from serdes */
3364 + regmap_write(pfe->scfg, 0x484, 0x00000000);
3365 + }
3366 + ls1012a_configure_serdes(ndev);
3367 + }
3368 +
3369 + if (pfe->scfg) {
3370 + /*Config MDIO from PAD */
3371 + regmap_write(pfe->scfg, 0x484, 0x80000000);
3372 + }
3373 +
3374 + priv->oldlink = 0;
3375 + priv->oldspeed = 0;
3376 + priv->oldduplex = -1;
3377 + pr_info("%s interface %x\n", __func__, interface);
3378 + phydev = phy_connect(ndev, phy_id, &pfe_eth_adjust_link, interface);
3379 +
3380 + if (IS_ERR(phydev)) {
3381 + netdev_err(ndev, "phy_connect() failed\n");
3382 + return PTR_ERR(phydev);
3383 + }
3384 +
3385 + priv->phydev = phydev;
3386 + phydev->irq = PHY_POLL;
3387 +
3388 + return 0;
3389 +}
3390 +
3391 +/* pfe_gemac_init
3392 + */
3393 +static int pfe_gemac_init(struct pfe_eth_priv_s *priv)
3394 +{
3395 + struct gemac_cfg cfg;
3396 +
3397 + netif_info(priv, ifup, priv->ndev, "%s\n", __func__);
3398 +
3399 + cfg.speed = SPEED_1000M;
3400 + cfg.duplex = DUPLEX_FULL;
3401 +
3402 + gemac_set_config(priv->EMAC_baseaddr, &cfg);
3403 + gemac_allow_broadcast(priv->EMAC_baseaddr);
3404 + gemac_enable_1536_rx(priv->EMAC_baseaddr);
3405 + gemac_enable_rx_jmb(priv->EMAC_baseaddr);
3406 + gemac_enable_stacked_vlan(priv->EMAC_baseaddr);
3407 + gemac_enable_pause_rx(priv->EMAC_baseaddr);
3408 + gemac_set_bus_width(priv->EMAC_baseaddr, 64);
3409 +
3410 + /*GEM will perform checksum verifications*/
3411 + if (priv->ndev->features & NETIF_F_RXCSUM)
3412 + gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
3413 + else
3414 + gemac_disable_rx_checksum_offload(priv->EMAC_baseaddr);
3415 +
3416 + return 0;
3417 +}
3418 +
3419 +/* pfe_eth_event_handler
3420 + */
3421 +static int pfe_eth_event_handler(void *data, int event, int qno)
3422 +{
3423 + struct pfe_eth_priv_s *priv = data;
3424 +
3425 + switch (event) {
3426 + case EVENT_RX_PKT_IND:
3427 +
3428 + if (qno == 0) {
3429 + if (napi_schedule_prep(&priv->high_napi)) {
3430 + netif_info(priv, intr, priv->ndev,
3431 + "%s: schedule high prio poll\n"
3432 + , __func__);
3433 +
3434 +#ifdef PFE_ETH_NAPI_STATS
3435 + priv->napi_counters[NAPI_SCHED_COUNT]++;
3436 +#endif
3437 +
3438 + __napi_schedule(&priv->high_napi);
3439 + }
3440 + } else if (qno == 1) {
3441 + if (napi_schedule_prep(&priv->low_napi)) {
3442 + netif_info(priv, intr, priv->ndev,
3443 + "%s: schedule low prio poll\n"
3444 + , __func__);
3445 +
3446 +#ifdef PFE_ETH_NAPI_STATS
3447 + priv->napi_counters[NAPI_SCHED_COUNT]++;
3448 +#endif
3449 + __napi_schedule(&priv->low_napi);
3450 + }
3451 + } else if (qno == 2) {
3452 + if (napi_schedule_prep(&priv->lro_napi)) {
3453 + netif_info(priv, intr, priv->ndev,
3454 + "%s: schedule lro prio poll\n"
3455 + , __func__);
3456 +
3457 +#ifdef PFE_ETH_NAPI_STATS
3458 + priv->napi_counters[NAPI_SCHED_COUNT]++;
3459 +#endif
3460 + __napi_schedule(&priv->lro_napi);
3461 + }
3462 + }
3463 +
3464 + break;
3465 +
3466 + case EVENT_TXDONE_IND:
3467 + pfe_eth_flush_tx(priv);
3468 + hif_lib_event_handler_start(&priv->client, EVENT_TXDONE_IND, 0);
3469 + break;
3470 + case EVENT_HIGH_RX_WM:
3471 + default:
3472 + break;
3473 + }
3474 +
3475 + return 0;
3476 +}
3477 +
3478 +/* pfe_eth_open
3479 + */
3480 +static int pfe_eth_open(struct net_device *ndev)
3481 +{
3482 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3483 + struct hif_client_s *client;
3484 + int rc;
3485 +
3486 + netif_info(priv, ifup, ndev, "%s\n", __func__);
3487 +
3488 + /* Register client driver with HIF */
3489 + client = &priv->client;
3490 + memset(client, 0, sizeof(*client));
3491 + client->id = PFE_CL_GEM0 + priv->id;
3492 + client->tx_qn = emac_txq_cnt;
3493 + client->rx_qn = EMAC_RXQ_CNT;
3494 + client->priv = priv;
3495 + client->pfe = priv->pfe;
3496 + client->event_handler = pfe_eth_event_handler;
3497 +
3498 + client->tx_qsize = EMAC_TXQ_DEPTH;
3499 + client->rx_qsize = EMAC_RXQ_DEPTH;
3500 +
3501 + rc = hif_lib_client_register(client);
3502 + if (rc) {
3503 + netdev_err(ndev, "%s: hif_lib_client_register(%d) failed\n",
3504 + __func__, client->id);
3505 + goto err0;
3506 + }
3507 +
3508 + netif_info(priv, drv, ndev, "%s: registered client: %p\n", __func__,
3509 + client);
3510 +
3511 + pfe_gemac_init(priv);
3512 +
3513 + if (!is_valid_ether_addr(ndev->dev_addr)) {
3514 + netdev_err(ndev, "%s: invalid MAC address\n", __func__);
3515 + rc = -EADDRNOTAVAIL;
3516 + goto err1;
3517 + }
3518 +
3519 + gemac_set_laddrN(priv->EMAC_baseaddr,
3520 + (struct pfe_mac_addr *)ndev->dev_addr, 1);
3521 +
3522 + napi_enable(&priv->high_napi);
3523 + napi_enable(&priv->low_napi);
3524 + napi_enable(&priv->lro_napi);
3525 +
3526 + rc = pfe_eth_start(priv);
3527 +
3528 + netif_tx_wake_all_queues(ndev);
3529 +
3530 + return rc;
3531 +
3532 +err1:
3533 + hif_lib_client_unregister(&priv->client);
3534 +
3535 +err0:
3536 + return rc;
3537 +}
3538 +
3539 +/*
3540 + * pfe_eth_shutdown
3541 + */
3542 +int pfe_eth_shutdown(struct net_device *ndev, int wake)
3543 +{
3544 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3545 + int i, qstatus;
3546 + unsigned long next_poll = jiffies + 1, end = jiffies +
3547 + (TX_POLL_TIMEOUT_MS * HZ) / 1000;
3548 + int tx_pkts, prv_tx_pkts;
3549 +
3550 + netif_info(priv, ifdown, ndev, "%s\n", __func__);
3551 +
3552 + for (i = 0; i < emac_txq_cnt; i++)
3553 + hrtimer_cancel(&priv->fast_tx_timeout[i].timer);
3554 +
3555 + netif_tx_stop_all_queues(ndev);
3556 +
3557 + do {
3558 + tx_pkts = 0;
3559 + pfe_eth_flush_tx(priv);
3560 +
3561 + for (i = 0; i < emac_txq_cnt; i++)
3562 + tx_pkts += hif_lib_tx_pending(&priv->client, i);
3563 +
3564 + if (tx_pkts) {
3565 + /*Don't wait forever, break if we cross max timeout */
3566 + if (time_after(jiffies, end)) {
3567 + pr_err(
3568 + "(%s)Tx is not complete after %dmsec\n",
3569 + ndev->name, TX_POLL_TIMEOUT_MS);
3570 + break;
3571 + }
3572 +
3573 + pr_info("%s : (%s) Waiting for tx packets to free. Pending tx pkts = %d.\n"
3574 + , __func__, ndev->name, tx_pkts);
3575 + if (need_resched())
3576 + schedule();
3577 + }
3578 +
3579 + } while (tx_pkts);
3580 +
3581 + end = jiffies + (TX_POLL_TIMEOUT_MS * HZ) / 1000;
3582 +
3583 + prv_tx_pkts = tmu_pkts_processed(priv->id);
3584 + /*
3585 + * Wait till TMU transmits all pending packets
3586 + * poll tmu_qstatus and pkts processed by TMU for every 10ms
3587 + * Consider TMU is busy, If we see TMU qeueu pending or any packets
3588 + * processed by TMU
3589 + */
3590 + while (1) {
3591 + if (time_after(jiffies, next_poll)) {
3592 + tx_pkts = tmu_pkts_processed(priv->id);
3593 + qstatus = tmu_qstatus(priv->id) & 0x7ffff;
3594 +
3595 + if (!qstatus && (tx_pkts == prv_tx_pkts))
3596 + break;
3597 + /* Don't wait forever, break if we cross max
3598 + * timeout(TX_POLL_TIMEOUT_MS)
3599 + */
3600 + if (time_after(jiffies, end)) {
3601 + pr_err("TMU%d is busy after %dmsec\n",
3602 + priv->id, TX_POLL_TIMEOUT_MS);
3603 + break;
3604 + }
3605 + prv_tx_pkts = tx_pkts;
3606 + next_poll++;
3607 + }
3608 + if (need_resched())
3609 + schedule();
3610 + }
3611 + /* Wait for some more time to complete transmitting packet if any */
3612 + next_poll = jiffies + 1;
3613 + while (1) {
3614 + if (time_after(jiffies, next_poll))
3615 + break;
3616 + if (need_resched())
3617 + schedule();
3618 + }
3619 +
3620 + pfe_eth_stop(ndev, wake);
3621 +
3622 + napi_disable(&priv->lro_napi);
3623 + napi_disable(&priv->low_napi);
3624 + napi_disable(&priv->high_napi);
3625 +
3626 + hif_lib_client_unregister(&priv->client);
3627 +
3628 + return 0;
3629 +}
3630 +
3631 +/* pfe_eth_close
3632 + *
3633 + */
3634 +static int pfe_eth_close(struct net_device *ndev)
3635 +{
3636 + pfe_eth_shutdown(ndev, 0);
3637 +
3638 + return 0;
3639 +}
3640 +
3641 +/* pfe_eth_suspend
3642 + *
3643 + * return value : 1 if netdevice is configured to wakeup system
3644 + * 0 otherwise
3645 + */
3646 +int pfe_eth_suspend(struct net_device *ndev)
3647 +{
3648 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3649 + int retval = 0;
3650 +
3651 + if (priv->wol) {
3652 + gemac_set_wol(priv->EMAC_baseaddr, priv->wol);
3653 + retval = 1;
3654 + }
3655 + pfe_eth_shutdown(ndev, priv->wol);
3656 +
3657 + return retval;
3658 +}
3659 +
3660 +/* pfe_eth_resume
3661 + *
3662 + */
3663 +int pfe_eth_resume(struct net_device *ndev)
3664 +{
3665 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3666 +
3667 + if (priv->wol)
3668 + gemac_set_wol(priv->EMAC_baseaddr, 0);
3669 + gemac_tx_enable(priv->EMAC_baseaddr);
3670 +
3671 + return pfe_eth_open(ndev);
3672 +}
3673 +
3674 +/* pfe_eth_get_queuenum
3675 + */
3676 +static int pfe_eth_get_queuenum(struct pfe_eth_priv_s *priv, struct sk_buff
3677 + *skb)
3678 +{
3679 + int queuenum = 0;
3680 + unsigned long flags;
3681 +
3682 + /* Get the Fast Path queue number */
3683 + /*
3684 + * Use conntrack mark (if conntrack exists), then packet mark (if any),
3685 + * then fallback to default
3686 + */
3687 +#if defined(CONFIG_IP_NF_CONNTRACK_MARK) || defined(CONFIG_NF_CONNTRACK_MARK)
3688 + if (skb->nfct) {
3689 + enum ip_conntrack_info cinfo;
3690 + struct nf_conn *ct;
3691 +
3692 + ct = nf_ct_get(skb, &cinfo);
3693 +
3694 + if (ct) {
3695 + u32 connmark;
3696 +
3697 + connmark = ct->mark;
3698 +
3699 + if ((connmark & 0x80000000) && priv->id != 0)
3700 + connmark >>= 16;
3701 +
3702 + queuenum = connmark & EMAC_QUEUENUM_MASK;
3703 + }
3704 + } else {/* continued after #endif ... */
3705 +#endif
3706 + if (skb->mark) {
3707 + queuenum = skb->mark & EMAC_QUEUENUM_MASK;
3708 + } else {
3709 + spin_lock_irqsave(&priv->lock, flags);
3710 + queuenum = priv->default_priority & EMAC_QUEUENUM_MASK;
3711 + spin_unlock_irqrestore(&priv->lock, flags);
3712 + }
3713 +#if defined(CONFIG_IP_NF_CONNTRACK_MARK) || defined(CONFIG_NF_CONNTRACK_MARK)
3714 + }
3715 +#endif
3716 + return queuenum;
3717 +}
3718 +
3719 +/* pfe_eth_might_stop_tx
3720 + *
3721 + */
3722 +static int pfe_eth_might_stop_tx(struct pfe_eth_priv_s *priv, int queuenum,
3723 + struct netdev_queue *tx_queue,
3724 + unsigned int n_desc,
3725 + unsigned int n_segs)
3726 +{
3727 + ktime_t kt;
3728 +
3729 + if (unlikely((__hif_tx_avail(&pfe->hif) < n_desc) ||
3730 + (hif_lib_tx_avail(&priv->client, queuenum) < n_desc) ||
3731 + (hif_lib_tx_credit_avail(pfe, priv->id, queuenum) < n_segs))) {
3732 +#ifdef PFE_ETH_TX_STATS
3733 + if (__hif_tx_avail(&pfe->hif) < n_desc) {
3734 + priv->stop_queue_hif[queuenum]++;
3735 + } else if (hif_lib_tx_avail(&priv->client, queuenum) < n_desc) {
3736 + priv->stop_queue_hif_client[queuenum]++;
3737 + } else if (hif_lib_tx_credit_avail(pfe, priv->id, queuenum) <
3738 + n_segs) {
3739 + priv->stop_queue_credit[queuenum]++;
3740 + }
3741 + priv->stop_queue_total[queuenum]++;
3742 +#endif
3743 + netif_tx_stop_queue(tx_queue);
3744 +
3745 + kt = ktime_set(0, LS1012A_TX_FAST_RECOVERY_TIMEOUT_MS *
3746 + NSEC_PER_MSEC);
3747 + hrtimer_start(&priv->fast_tx_timeout[queuenum].timer, kt,
3748 + HRTIMER_MODE_REL);
3749 + return -1;
3750 + } else {
3751 + return 0;
3752 + }
3753 +}
3754 +
3755 +#define SA_MAX_OP 2
3756 +/* pfe_hif_send_packet
3757 + *
3758 + * At this level if TX fails we drop the packet
3759 + */
3760 +static void pfe_hif_send_packet(struct sk_buff *skb, struct pfe_eth_priv_s
3761 + *priv, int queuenum)
3762 +{
3763 + struct skb_shared_info *sh = skb_shinfo(skb);
3764 + unsigned int nr_frags;
3765 + u32 ctrl = 0;
3766 +
3767 + netif_info(priv, tx_queued, priv->ndev, "%s\n", __func__);
3768 +
3769 + if (skb_is_gso(skb)) {
3770 + priv->stats.tx_dropped++;
3771 + return;
3772 + }
3773 +
3774 + if (skb->ip_summed == CHECKSUM_PARTIAL)
3775 + ctrl = HIF_CTRL_TX_CHECKSUM;
3776 +
3777 + nr_frags = sh->nr_frags;
3778 +
3779 + if (nr_frags) {
3780 + skb_frag_t *f;
3781 + int i;
3782 +
3783 + __hif_lib_xmit_pkt(&priv->client, queuenum, skb->data,
3784 + skb_headlen(skb), ctrl, HIF_FIRST_BUFFER,
3785 + skb);
3786 +
3787 + for (i = 0; i < nr_frags - 1; i++) {
3788 + f = &sh->frags[i];
3789 + __hif_lib_xmit_pkt(&priv->client, queuenum,
3790 + skb_frag_address(f),
3791 + skb_frag_size(f),
3792 + 0x0, 0x0, skb);
3793 + }
3794 +
3795 + f = &sh->frags[i];
3796 +
3797 + __hif_lib_xmit_pkt(&priv->client, queuenum,
3798 + skb_frag_address(f), skb_frag_size(f),
3799 + 0x0, HIF_LAST_BUFFER | HIF_DATA_VALID,
3800 + skb);
3801 +
3802 + netif_info(priv, tx_queued, priv->ndev,
3803 + "%s: pkt sent successfully skb:%p nr_frags:%d len:%d\n",
3804 + __func__, skb, nr_frags, skb->len);
3805 + } else {
3806 + __hif_lib_xmit_pkt(&priv->client, queuenum, skb->data,
3807 + skb->len, ctrl, HIF_FIRST_BUFFER |
3808 + HIF_LAST_BUFFER | HIF_DATA_VALID,
3809 + skb);
3810 + netif_info(priv, tx_queued, priv->ndev,
3811 + "%s: pkt sent successfully skb:%p len:%d\n",
3812 + __func__, skb, skb->len);
3813 + }
3814 + hif_tx_dma_start();
3815 + priv->stats.tx_packets++;
3816 + priv->stats.tx_bytes += skb->len;
3817 + hif_lib_tx_credit_use(pfe, priv->id, queuenum, 1);
3818 +}
3819 +
3820 +/* pfe_eth_flush_txQ
3821 + */
3822 +static void pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int
3823 + from_tx, int n_desc)
3824 +{
3825 + struct sk_buff *skb;
3826 + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
3827 + tx_q_num);
3828 + unsigned int flags;
3829 +
3830 + netif_info(priv, tx_done, priv->ndev, "%s\n", __func__);
3831 +
3832 + if (!from_tx)
3833 + __netif_tx_lock_bh(tx_queue);
3834 +
3835 + /* Clean HIF and client queue */
3836 + while ((skb = hif_lib_tx_get_next_complete(&priv->client,
3837 + tx_q_num, &flags,
3838 + HIF_TX_DESC_NT))) {
3839 + if (flags & HIF_DATA_VALID)
3840 + dev_kfree_skb_any(skb);
3841 + }
3842 + if (!from_tx)
3843 + __netif_tx_unlock_bh(tx_queue);
3844 +}
3845 +
3846 +/* pfe_eth_flush_tx
3847 + */
3848 +static void pfe_eth_flush_tx(struct pfe_eth_priv_s *priv)
3849 +{
3850 + int ii;
3851 +
3852 + netif_info(priv, tx_done, priv->ndev, "%s\n", __func__);
3853 +
3854 + for (ii = 0; ii < emac_txq_cnt; ii++)
3855 + pfe_eth_flush_txQ(priv, ii, 0, 0);
3856 +}
3857 +
3858 +void pfe_tx_get_req_desc(struct sk_buff *skb, unsigned int *n_desc, unsigned int
3859 + *n_segs)
3860 +{
3861 + struct skb_shared_info *sh = skb_shinfo(skb);
3862 +
3863 + /* Scattered data */
3864 + if (sh->nr_frags) {
3865 + *n_desc = sh->nr_frags + 1;
3866 + *n_segs = 1;
3867 + /* Regular case */
3868 + } else {
3869 + *n_desc = 1;
3870 + *n_segs = 1;
3871 + }
3872 +}
3873 +
3874 +/* pfe_eth_send_packet
3875 + */
3876 +static int pfe_eth_send_packet(struct sk_buff *skb, struct net_device *ndev)
3877 +{
3878 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3879 + int tx_q_num = skb_get_queue_mapping(skb);
3880 + int n_desc, n_segs;
3881 + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
3882 + tx_q_num);
3883 +
3884 + netif_info(priv, tx_queued, ndev, "%s\n", __func__);
3885 +
3886 + if ((!skb_is_gso(skb)) && (skb_headroom(skb) < (PFE_PKT_HEADER_SZ +
3887 + sizeof(unsigned long)))) {
3888 + netif_warn(priv, tx_err, priv->ndev, "%s: copying skb\n",
3889 + __func__);
3890 +
3891 + if (pskb_expand_head(skb, (PFE_PKT_HEADER_SZ + sizeof(unsigned
3892 + long)), 0, GFP_ATOMIC)) {
3893 + /* No need to re-transmit, no way to recover*/
3894 + kfree_skb(skb);
3895 + priv->stats.tx_dropped++;
3896 + return NETDEV_TX_OK;
3897 + }
3898 + }
3899 +
3900 + pfe_tx_get_req_desc(skb, &n_desc, &n_segs);
3901 +
3902 + hif_tx_lock(&pfe->hif);
3903 + if (unlikely(pfe_eth_might_stop_tx(priv, tx_q_num, tx_queue, n_desc,
3904 + n_segs))) {
3905 +#ifdef PFE_ETH_TX_STATS
3906 + if (priv->was_stopped[tx_q_num]) {
3907 + priv->clean_fail[tx_q_num]++;
3908 + priv->was_stopped[tx_q_num] = 0;
3909 + }
3910 +#endif
3911 + hif_tx_unlock(&pfe->hif);
3912 + return NETDEV_TX_BUSY;
3913 + }
3914 +
3915 + pfe_hif_send_packet(skb, priv, tx_q_num);
3916 +
3917 + hif_tx_unlock(&pfe->hif);
3918 +
3919 + tx_queue->trans_start = jiffies;
3920 +
3921 +#ifdef PFE_ETH_TX_STATS
3922 + priv->was_stopped[tx_q_num] = 0;
3923 +#endif
3924 +
3925 + return NETDEV_TX_OK;
3926 +}
3927 +
3928 +/* pfe_eth_select_queue
3929 + *
3930 + */
3931 +static u16 pfe_eth_select_queue(struct net_device *ndev, struct sk_buff *skb,
3932 + void *accel_priv,
3933 + select_queue_fallback_t fallback)
3934 +{
3935 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3936 +
3937 + return pfe_eth_get_queuenum(priv, skb);
3938 +}
3939 +
3940 +/* pfe_eth_get_stats
3941 + */
3942 +static struct net_device_stats *pfe_eth_get_stats(struct net_device *ndev)
3943 +{
3944 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3945 +
3946 + netif_info(priv, drv, ndev, "%s\n", __func__);
3947 +
3948 + return &priv->stats;
3949 +}
3950 +
3951 +/* pfe_eth_set_mac_address
3952 + */
3953 +static int pfe_eth_set_mac_address(struct net_device *ndev, void *addr)
3954 +{
3955 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3956 + struct sockaddr *sa = addr;
3957 +
3958 + netif_info(priv, drv, ndev, "%s\n", __func__);
3959 +
3960 + if (!is_valid_ether_addr(sa->sa_data))
3961 + return -EADDRNOTAVAIL;
3962 +
3963 + memcpy(ndev->dev_addr, sa->sa_data, ETH_ALEN);
3964 +
3965 + gemac_set_laddrN(priv->EMAC_baseaddr,
3966 + (struct pfe_mac_addr *)ndev->dev_addr, 1);
3967 +
3968 + return 0;
3969 +}
3970 +
3971 +/* pfe_eth_enet_addr_byte_mac
3972 + */
3973 +int pfe_eth_enet_addr_byte_mac(u8 *enet_byte_addr,
3974 + struct pfe_mac_addr *enet_addr)
3975 +{
3976 + if (!enet_byte_addr || !enet_addr) {
3977 + return -1;
3978 +
3979 + } else {
3980 + enet_addr->bottom = enet_byte_addr[0] |
3981 + (enet_byte_addr[1] << 8) |
3982 + (enet_byte_addr[2] << 16) |
3983 + (enet_byte_addr[3] << 24);
3984 + enet_addr->top = enet_byte_addr[4] |
3985 + (enet_byte_addr[5] << 8);
3986 + return 0;
3987 + }
3988 +}
3989 +
3990 +/* pfe_eth_set_multi
3991 + */
3992 +static void pfe_eth_set_multi(struct net_device *ndev)
3993 +{
3994 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3995 + struct pfe_mac_addr hash_addr; /* hash register structure */
3996 + /* specific mac address register structure */
3997 + struct pfe_mac_addr spec_addr;
3998 + int result; /* index into hash register to set.. */
3999 + int uc_count = 0;
4000 + struct netdev_hw_addr *ha;
4001 +
4002 + if (ndev->flags & IFF_PROMISC) {
4003 + netif_info(priv, drv, ndev, "entering promiscuous mode\n");
4004 +
4005 + priv->promisc = 1;
4006 + gemac_enable_copy_all(priv->EMAC_baseaddr);
4007 + } else {
4008 + priv->promisc = 0;
4009 + gemac_disable_copy_all(priv->EMAC_baseaddr);
4010 + }
4011 +
4012 + /* Enable broadcast frame reception if required. */
4013 + if (ndev->flags & IFF_BROADCAST) {
4014 + gemac_allow_broadcast(priv->EMAC_baseaddr);
4015 + } else {
4016 + netif_info(priv, drv, ndev,
4017 + "disabling broadcast frame reception\n");
4018 +
4019 + gemac_no_broadcast(priv->EMAC_baseaddr);
4020 + }
4021 +
4022 + if (ndev->flags & IFF_ALLMULTI) {
4023 + /* Set the hash to rx all multicast frames */
4024 + hash_addr.bottom = 0xFFFFFFFF;
4025 + hash_addr.top = 0xFFFFFFFF;
4026 + gemac_set_hash(priv->EMAC_baseaddr, &hash_addr);
4027 + netdev_for_each_uc_addr(ha, ndev) {
4028 + if (uc_count >= MAX_UC_SPEC_ADDR_REG)
4029 + break;
4030 + pfe_eth_enet_addr_byte_mac(ha->addr, &spec_addr);
4031 + gemac_set_laddrN(priv->EMAC_baseaddr, &spec_addr,
4032 + uc_count + 2);
4033 + uc_count++;
4034 + }
4035 + } else if ((netdev_mc_count(ndev) > 0) || (netdev_uc_count(ndev))) {
4036 + u8 *addr;
4037 +
4038 + hash_addr.bottom = 0;
4039 + hash_addr.top = 0;
4040 +
4041 + netdev_for_each_mc_addr(ha, ndev) {
4042 + addr = ha->addr;
4043 +
4044 + netif_info(priv, drv, ndev,
4045 + "adding multicast address %X:%X:%X:%X:%X:%X to gem filter\n",
4046 + addr[0], addr[1], addr[2],
4047 + addr[3], addr[4], addr[5]);
4048 +
4049 + result = pfe_eth_get_hash(addr);
4050 +
4051 + if (result < EMAC_HASH_REG_BITS) {
4052 + if (result < 32)
4053 + hash_addr.bottom |= (1 << result);
4054 + else
4055 + hash_addr.top |= (1 << (result - 32));
4056 + } else {
4057 + break;
4058 + }
4059 + }
4060 +
4061 + uc_count = -1;
4062 + netdev_for_each_uc_addr(ha, ndev) {
4063 + addr = ha->addr;
4064 +
4065 + if (++uc_count < MAX_UC_SPEC_ADDR_REG) {
4066 + netdev_info(ndev,
4067 + "adding unicast address %02x:%02x:%02x:%02x:%02x:%02x to gem filter\n",
4068 + addr[0], addr[1], addr[2],
4069 + addr[3], addr[4], addr[5]);
4070 + pfe_eth_enet_addr_byte_mac(addr, &spec_addr);
4071 + gemac_set_laddrN(priv->EMAC_baseaddr,
4072 + &spec_addr, uc_count + 2);
4073 + } else {
4074 + netif_info(priv, drv, ndev,
4075 + "adding unicast address %02x:%02x:%02x:%02x:%02x:%02x to gem hash\n",
4076 + addr[0], addr[1], addr[2],
4077 + addr[3], addr[4], addr[5]);
4078 +
4079 + result = pfe_eth_get_hash(addr);
4080 + if (result >= EMAC_HASH_REG_BITS) {
4081 + break;
4082 +
4083 + } else {
4084 + if (result < 32)
4085 + hash_addr.bottom |= (1 <<
4086 + result);
4087 + else
4088 + hash_addr.top |= (1 <<
4089 + (result - 32));
4090 + }
4091 + }
4092 + }
4093 +
4094 + gemac_set_hash(priv->EMAC_baseaddr, &hash_addr);
4095 + }
4096 +
4097 + if (!(netdev_uc_count(ndev) >= MAX_UC_SPEC_ADDR_REG)) {
4098 + /*
4099 + * Check if there are any specific address HW registers that
4100 + * need to be flushed
4101 + */
4102 + for (uc_count = netdev_uc_count(ndev); uc_count <
4103 + MAX_UC_SPEC_ADDR_REG; uc_count++)
4104 + gemac_clear_laddrN(priv->EMAC_baseaddr, uc_count + 2);
4105 + }
4106 +
4107 + if (ndev->flags & IFF_LOOPBACK)
4108 + gemac_set_loop(priv->EMAC_baseaddr, LB_LOCAL);
4109 +}
4110 +
4111 +/* pfe_eth_set_features
4112 + */
4113 +static int pfe_eth_set_features(struct net_device *ndev, netdev_features_t
4114 + features)
4115 +{
4116 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
4117 + int rc = 0;
4118 +
4119 + if (features & NETIF_F_RXCSUM)
4120 + gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
4121 + else
4122 + gemac_disable_rx_checksum_offload(priv->EMAC_baseaddr);
4123 + return rc;
4124 +}
4125 +
4126 +/* pfe_eth_fast_tx_timeout
4127 + */
4128 +static enum hrtimer_restart pfe_eth_fast_tx_timeout(struct hrtimer *timer)
4129 +{
4130 + struct pfe_eth_fast_timer *fast_tx_timeout = container_of(timer, struct
4131 + pfe_eth_fast_timer,
4132 + timer);
4133 + struct pfe_eth_priv_s *priv = container_of(fast_tx_timeout->base,
4134 + struct pfe_eth_priv_s,
4135 + fast_tx_timeout);
4136 + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
4137 + fast_tx_timeout->queuenum);
4138 +
4139 + if (netif_tx_queue_stopped(tx_queue)) {
4140 +#ifdef PFE_ETH_TX_STATS
4141 + priv->was_stopped[fast_tx_timeout->queuenum] = 1;
4142 +#endif
4143 + netif_tx_wake_queue(tx_queue);
4144 + }
4145 +
4146 + return HRTIMER_NORESTART;
4147 +}
4148 +
4149 +/* pfe_eth_fast_tx_timeout_init
4150 + */
4151 +static void pfe_eth_fast_tx_timeout_init(struct pfe_eth_priv_s *priv)
4152 +{
4153 + int i;
4154 +
4155 + for (i = 0; i < emac_txq_cnt; i++) {
4156 + priv->fast_tx_timeout[i].queuenum = i;
4157 + hrtimer_init(&priv->fast_tx_timeout[i].timer, CLOCK_MONOTONIC,
4158 + HRTIMER_MODE_REL);
4159 + priv->fast_tx_timeout[i].timer.function =
4160 + pfe_eth_fast_tx_timeout;
4161 + priv->fast_tx_timeout[i].base = priv->fast_tx_timeout;
4162 + }
4163 +}
4164 +
4165 +static struct sk_buff *pfe_eth_rx_skb(struct net_device *ndev,
4166 + struct pfe_eth_priv_s *priv,
4167 + unsigned int qno)
4168 +{
4169 + void *buf_addr;
4170 + unsigned int rx_ctrl;
4171 + unsigned int desc_ctrl = 0;
4172 + struct hif_ipsec_hdr *ipsec_hdr = NULL;
4173 + struct sk_buff *skb;
4174 + struct sk_buff *skb_frag, *skb_frag_last = NULL;
4175 + int length = 0, offset;
4176 +
4177 + skb = priv->skb_inflight[qno];
4178 +
4179 + if (skb) {
4180 + skb_frag_last = skb_shinfo(skb)->frag_list;
4181 + if (skb_frag_last) {
4182 + while (skb_frag_last->next)
4183 + skb_frag_last = skb_frag_last->next;
4184 + }
4185 + }
4186 +
4187 + while (!(desc_ctrl & CL_DESC_LAST)) {
4188 + buf_addr = hif_lib_receive_pkt(&priv->client, qno, &length,
4189 + &offset, &rx_ctrl, &desc_ctrl,
4190 + (void **)&ipsec_hdr);
4191 + if (!buf_addr)
4192 + goto incomplete;
4193 +
4194 +#ifdef PFE_ETH_NAPI_STATS
4195 + priv->napi_counters[NAPI_DESC_COUNT]++;
4196 +#endif
4197 +
4198 + /* First frag */
4199 + if (desc_ctrl & CL_DESC_FIRST) {
4200 + skb = build_skb(buf_addr, 0);
4201 + if (unlikely(!skb))
4202 + goto pkt_drop;
4203 +
4204 + skb_reserve(skb, offset);
4205 + skb_put(skb, length);
4206 + skb->dev = ndev;
4207 +
4208 + if ((ndev->features & NETIF_F_RXCSUM) && (rx_ctrl &
4209 + HIF_CTRL_RX_CHECKSUMMED))
4210 + skb->ip_summed = CHECKSUM_UNNECESSARY;
4211 + else
4212 + skb_checksum_none_assert(skb);
4213 +
4214 + } else {
4215 + /* Next frags */
4216 + if (unlikely(!skb)) {
4217 + pr_err("%s: NULL skb_inflight\n",
4218 + __func__);
4219 + goto pkt_drop;
4220 + }
4221 +
4222 + skb_frag = build_skb(buf_addr, 0);
4223 +
4224 + if (unlikely(!skb_frag)) {
4225 + kfree(buf_addr);
4226 + goto pkt_drop;
4227 + }
4228 +
4229 + skb_reserve(skb_frag, offset);
4230 + skb_put(skb_frag, length);
4231 +
4232 + skb_frag->dev = ndev;
4233 +
4234 + if (skb_shinfo(skb)->frag_list)
4235 + skb_frag_last->next = skb_frag;
4236 + else
4237 + skb_shinfo(skb)->frag_list = skb_frag;
4238 +
4239 + skb->truesize += skb_frag->truesize;
4240 + skb->data_len += length;
4241 + skb->len += length;
4242 + skb_frag_last = skb_frag;
4243 + }
4244 + }
4245 +
4246 + priv->skb_inflight[qno] = NULL;
4247 + return skb;
4248 +
4249 +incomplete:
4250 + priv->skb_inflight[qno] = skb;
4251 + return NULL;
4252 +
4253 +pkt_drop:
4254 + priv->skb_inflight[qno] = NULL;
4255 +
4256 + if (skb)
4257 + kfree_skb(skb);
4258 + else
4259 + kfree(buf_addr);
4260 +
4261 + priv->stats.rx_errors++;
4262 +
4263 + return NULL;
4264 +}
4265 +
4266 +/* pfe_eth_poll
4267 + */
4268 +static int pfe_eth_poll(struct pfe_eth_priv_s *priv, struct napi_struct *napi,
4269 + unsigned int qno, int budget)
4270 +{
4271 + struct net_device *ndev = priv->ndev;
4272 + struct sk_buff *skb;
4273 + int work_done = 0;
4274 + unsigned int len;
4275 +
4276 + netif_info(priv, intr, priv->ndev, "%s\n", __func__);
4277 +
4278 +#ifdef PFE_ETH_NAPI_STATS
4279 + priv->napi_counters[NAPI_POLL_COUNT]++;
4280 +#endif
4281 +
4282 + do {
4283 + skb = pfe_eth_rx_skb(ndev, priv, qno);
4284 +
4285 + if (!skb)
4286 + break;
4287 +
4288 + len = skb->len;
4289 +
4290 + /* Packet will be processed */
4291 + skb->protocol = eth_type_trans(skb, ndev);
4292 +
4293 + netif_receive_skb(skb);
4294 +
4295 + priv->stats.rx_packets++;
4296 + priv->stats.rx_bytes += len;
4297 +
4298 + work_done++;
4299 +
4300 +#ifdef PFE_ETH_NAPI_STATS
4301 + priv->napi_counters[NAPI_PACKET_COUNT]++;
4302 +#endif
4303 +
4304 + } while (work_done < budget);
4305 +
4306 + /*
4307 + * If no Rx receive nor cleanup work was done, exit polling mode.
4308 + * No more netif_running(dev) check is required here , as this is
4309 + * checked in net/core/dev.c (2.6.33.5 kernel specific).
4310 + */
4311 + if (work_done < budget) {
4312 + napi_complete(napi);
4313 +
4314 + hif_lib_event_handler_start(&priv->client, EVENT_RX_PKT_IND,
4315 + qno);
4316 + }
4317 +#ifdef PFE_ETH_NAPI_STATS
4318 + else
4319 + priv->napi_counters[NAPI_FULL_BUDGET_COUNT]++;
4320 +#endif
4321 +
4322 + return work_done;
4323 +}
4324 +
4325 +/*
4326 + * pfe_eth_lro_poll
4327 + */
4328 +static int pfe_eth_lro_poll(struct napi_struct *napi, int budget)
4329 +{
4330 + struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
4331 + lro_napi);
4332 +
4333 + netif_info(priv, intr, priv->ndev, "%s\n", __func__);
4334 +
4335 + return pfe_eth_poll(priv, napi, 2, budget);
4336 +}
4337 +
4338 +/* pfe_eth_low_poll
4339 + */
4340 +static int pfe_eth_low_poll(struct napi_struct *napi, int budget)
4341 +{
4342 + struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
4343 + low_napi);
4344 +
4345 + netif_info(priv, intr, priv->ndev, "%s\n", __func__);
4346 +
4347 + return pfe_eth_poll(priv, napi, 1, budget);
4348 +}
4349 +
4350 +/* pfe_eth_high_poll
4351 + */
4352 +static int pfe_eth_high_poll(struct napi_struct *napi, int budget)
4353 +{
4354 + struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
4355 + high_napi);
4356 +
4357 + netif_info(priv, intr, priv->ndev, "%s\n", __func__);
4358 +
4359 + return pfe_eth_poll(priv, napi, 0, budget);
4360 +}
4361 +
4362 +static const struct net_device_ops pfe_netdev_ops = {
4363 + .ndo_open = pfe_eth_open,
4364 + .ndo_stop = pfe_eth_close,
4365 + .ndo_start_xmit = pfe_eth_send_packet,
4366 + .ndo_select_queue = pfe_eth_select_queue,
4367 + .ndo_get_stats = pfe_eth_get_stats,
4368 + .ndo_set_mac_address = pfe_eth_set_mac_address,
4369 + .ndo_set_rx_mode = pfe_eth_set_multi,
4370 + .ndo_set_features = pfe_eth_set_features,
4371 + .ndo_validate_addr = eth_validate_addr,
4372 +};
4373 +
4374 +/* pfe_eth_init_one
4375 + */
4376 +static int pfe_eth_init_one(struct pfe *pfe, int id)
4377 +{
4378 + struct net_device *ndev = NULL;
4379 + struct pfe_eth_priv_s *priv = NULL;
4380 + struct ls1012a_eth_platform_data *einfo;
4381 + struct ls1012a_mdio_platform_data *minfo;
4382 + struct ls1012a_pfe_platform_data *pfe_info;
4383 + int err;
4384 +
4385 + /* Extract pltform data */
4386 + pfe_info = (struct ls1012a_pfe_platform_data *)
4387 + pfe->dev->platform_data;
4388 + if (!pfe_info) {
4389 + pr_err(
4390 + "%s: pfe missing additional platform data\n"
4391 + , __func__);
4392 + err = -ENODEV;
4393 + goto err0;
4394 + }
4395 +
4396 + einfo = (struct ls1012a_eth_platform_data *)
4397 + pfe_info->ls1012a_eth_pdata;
4398 +
4399 + /* einfo never be NULL, but no harm in having this check */
4400 + if (!einfo) {
4401 + pr_err(
4402 + "%s: pfe missing additional gemacs platform data\n"
4403 + , __func__);
4404 + err = -ENODEV;
4405 + goto err0;
4406 + }
4407 +
4408 + minfo = (struct ls1012a_mdio_platform_data *)
4409 + pfe_info->ls1012a_mdio_pdata;
4410 +
4411 + /* einfo never be NULL, but no harm in having this check */
4412 + if (!minfo) {
4413 + pr_err(
4414 + "%s: pfe missing additional mdios platform data\n",
4415 + __func__);
4416 + err = -ENODEV;
4417 + goto err0;
4418 + }
4419 +
4420 + /* Create an ethernet device instance */
4421 + ndev = alloc_etherdev_mq(sizeof(*priv), emac_txq_cnt);
4422 +
4423 + if (!ndev) {
4424 + pr_err("%s: gemac %d device allocation failed\n",
4425 + __func__, einfo[id].gem_id);
4426 + err = -ENOMEM;
4427 + goto err0;
4428 + }
4429 +
4430 + priv = netdev_priv(ndev);
4431 + priv->ndev = ndev;
4432 + priv->id = einfo[id].gem_id;
4433 + priv->pfe = pfe;
4434 +
4435 + SET_NETDEV_DEV(priv->ndev, priv->pfe->dev);
4436 +
4437 + pfe->eth.eth_priv[id] = priv;
4438 +
4439 + /* Set the info in the priv to the current info */
4440 + priv->einfo = &einfo[id];
4441 + priv->EMAC_baseaddr = cbus_emac_base[id];
4442 + priv->PHY_baseaddr = cbus_emac_base[0];
4443 + priv->GPI_baseaddr = cbus_gpi_base[id];
4444 +
4445 +#define HIF_GEMAC_TMUQ_BASE 6
4446 + priv->low_tmu_q = HIF_GEMAC_TMUQ_BASE + (id * 2);
4447 + priv->high_tmu_q = priv->low_tmu_q + 1;
4448 +
4449 + spin_lock_init(&priv->lock);
4450 +
4451 + pfe_eth_fast_tx_timeout_init(priv);
4452 +
4453 + /* Copy the station address into the dev structure, */
4454 + memcpy(ndev->dev_addr, einfo[id].mac_addr, ETH_ALEN);
4455 +
4456 + /* Initialize mdio */
4457 + if (minfo[id].enabled) {
4458 + err = pfe_eth_mdio_init(priv, &minfo[id]);
4459 + if (err) {
4460 + netdev_err(ndev, "%s: pfe_eth_mdio_init() failed\n",
4461 + __func__);
4462 + goto err2;
4463 + }
4464 + }
4465 +
4466 + ndev->mtu = 1500;
4467 +
4468 + /* Set MTU limits */
4469 + ndev->min_mtu = ETH_MIN_MTU;
4470 + ndev->max_mtu = JUMBO_FRAME_SIZE;
4471 +
4472 + /* supported features */
4473 + ndev->hw_features = NETIF_F_SG;
4474 +
4475 + /*Enable after checksum offload is validated */
4476 + ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
4477 + NETIF_F_IPV6_CSUM | NETIF_F_SG;
4478 +
4479 + /* enabled by default */
4480 + ndev->features = ndev->hw_features;
4481 +
4482 + priv->usr_features = ndev->features;
4483 +
4484 + ndev->netdev_ops = &pfe_netdev_ops;
4485 +
4486 + ndev->ethtool_ops = &pfe_ethtool_ops;
4487 +
4488 + /* Enable basic messages by default */
4489 + priv->msg_enable = NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_LINK |
4490 + NETIF_MSG_PROBE;
4491 +
4492 + netif_napi_add(ndev, &priv->low_napi, pfe_eth_low_poll,
4493 + HIF_RX_POLL_WEIGHT - 16);
4494 + netif_napi_add(ndev, &priv->high_napi, pfe_eth_high_poll,
4495 + HIF_RX_POLL_WEIGHT - 16);
4496 + netif_napi_add(ndev, &priv->lro_napi, pfe_eth_lro_poll,
4497 + HIF_RX_POLL_WEIGHT - 16);
4498 +
4499 + err = register_netdev(ndev);
4500 +
4501 + if (err) {
4502 + netdev_err(ndev, "register_netdev() failed\n");
4503 + goto err3;
4504 + }
4505 + device_init_wakeup(&ndev->dev, WAKE_MAGIC);
4506 +
4507 + if (!(priv->einfo->phy_flags & GEMAC_NO_PHY)) {
4508 + err = pfe_phy_init(ndev);
4509 + if (err) {
4510 + netdev_err(ndev, "%s: pfe_phy_init() failed\n",
4511 + __func__);
4512 + goto err4;
4513 + }
4514 + }
4515 +
4516 + netif_carrier_on(ndev);
4517 +
4518 + /* Create all the sysfs files */
4519 + if (pfe_eth_sysfs_init(ndev))
4520 + goto err4;
4521 +
4522 + netif_info(priv, probe, ndev, "%s: created interface, baseaddr: %p\n",
4523 + __func__, priv->EMAC_baseaddr);
4524 +
4525 + return 0;
4526 +err4:
4527 + unregister_netdev(ndev);
4528 +err3:
4529 + pfe_eth_mdio_exit(priv->mii_bus);
4530 +err2:
4531 + free_netdev(priv->ndev);
4532 +err0:
4533 + return err;
4534 +}
4535 +
4536 +/* pfe_eth_init
4537 + */
4538 +int pfe_eth_init(struct pfe *pfe)
4539 +{
4540 + int ii = 0;
4541 + int err;
4542 +
4543 + pr_info("%s\n", __func__);
4544 +
4545 + cbus_emac_base[0] = EMAC1_BASE_ADDR;
4546 + cbus_emac_base[1] = EMAC2_BASE_ADDR;
4547 +
4548 + cbus_gpi_base[0] = EGPI1_BASE_ADDR;
4549 + cbus_gpi_base[1] = EGPI2_BASE_ADDR;
4550 +
4551 + for (ii = 0; ii < NUM_GEMAC_SUPPORT; ii++) {
4552 + err = pfe_eth_init_one(pfe, ii);
4553 + if (err)
4554 + goto err0;
4555 + }
4556 +
4557 + return 0;
4558 +
4559 +err0:
4560 + while (ii--)
4561 + pfe_eth_exit_one(pfe->eth.eth_priv[ii]);
4562 +
4563 + /* Register three network devices in the kernel */
4564 + return err;
4565 +}
4566 +
4567 +/* pfe_eth_exit_one
4568 + */
4569 +static void pfe_eth_exit_one(struct pfe_eth_priv_s *priv)
4570 +{
4571 + netif_info(priv, probe, priv->ndev, "%s\n", __func__);
4572 +
4573 + pfe_eth_sysfs_exit(priv->ndev);
4574 +
4575 + unregister_netdev(priv->ndev);
4576 +
4577 + if (!(priv->einfo->phy_flags & GEMAC_NO_PHY))
4578 + pfe_phy_exit(priv->ndev);
4579 +
4580 + if (priv->mii_bus)
4581 + pfe_eth_mdio_exit(priv->mii_bus);
4582 +
4583 + free_netdev(priv->ndev);
4584 +}
4585 +
4586 +/* pfe_eth_exit
4587 + */
4588 +void pfe_eth_exit(struct pfe *pfe)
4589 +{
4590 + int ii;
4591 +
4592 + pr_info("%s\n", __func__);
4593 +
4594 + for (ii = NUM_GEMAC_SUPPORT - 1; ii >= 0; ii--)
4595 + pfe_eth_exit_one(pfe->eth.eth_priv[ii]);
4596 +}
4597 --- /dev/null
4598 +++ b/drivers/staging/fsl_ppfe/pfe_eth.h
4599 @@ -0,0 +1,184 @@
4600 +/*
4601 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
4602 + * Copyright 2017 NXP
4603 + *
4604 + * This program is free software; you can redistribute it and/or modify
4605 + * it under the terms of the GNU General Public License as published by
4606 + * the Free Software Foundation; either version 2 of the License, or
4607 + * (at your option) any later version.
4608 + *
4609 + * This program is distributed in the hope that it will be useful,
4610 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
4611 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4612 + * GNU General Public License for more details.
4613 + *
4614 + * You should have received a copy of the GNU General Public License
4615 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
4616 + */
4617 +
4618 +#ifndef _PFE_ETH_H_
4619 +#define _PFE_ETH_H_
4620 +#include <linux/kernel.h>
4621 +#include <linux/netdevice.h>
4622 +#include <linux/etherdevice.h>
4623 +#include <linux/ethtool.h>
4624 +#include <linux/mii.h>
4625 +#include <linux/phy.h>
4626 +#include <linux/clk.h>
4627 +#include <linux/interrupt.h>
4628 +#include <linux/time.h>
4629 +
4630 +#define PFE_ETH_NAPI_STATS
4631 +#define PFE_ETH_TX_STATS
4632 +
4633 +#define PFE_ETH_FRAGS_MAX (65536 / HIF_RX_PKT_MIN_SIZE)
4634 +#define LRO_LEN_COUNT_MAX 32
4635 +#define LRO_NB_COUNT_MAX 32
4636 +
4637 +#define PFE_PAUSE_FLAG_ENABLE 1
4638 +#define PFE_PAUSE_FLAG_AUTONEG 2
4639 +
4640 +/* GEMAC configured by SW */
4641 +/* GEMAC configured by phy lines (not for MII/GMII) */
4642 +
4643 +#define GEMAC_SW_FULL_DUPLEX BIT(9)
4644 +#define GEMAC_SW_SPEED_10M (0 << 12)
4645 +#define GEMAC_SW_SPEED_100M BIT(12)
4646 +#define GEMAC_SW_SPEED_1G (2 << 12)
4647 +
4648 +#define GEMAC_NO_PHY BIT(0)
4649 +
4650 +struct ls1012a_eth_platform_data {
4651 + /* device specific information */
4652 + u32 device_flags;
4653 + char name[16];
4654 +
4655 + /* board specific information */
4656 + u32 mii_config;
4657 + u32 phy_flags;
4658 + u32 gem_id;
4659 + u32 bus_id;
4660 + u32 phy_id;
4661 + u32 mdio_muxval;
4662 + u8 mac_addr[ETH_ALEN];
4663 +};
4664 +
4665 +struct ls1012a_mdio_platform_data {
4666 + int enabled;
4667 + int irq[32];
4668 + u32 phy_mask;
4669 + int mdc_div;
4670 +};
4671 +
4672 +struct ls1012a_pfe_platform_data {
4673 + struct ls1012a_eth_platform_data ls1012a_eth_pdata[3];
4674 + struct ls1012a_mdio_platform_data ls1012a_mdio_pdata[3];
4675 +};
4676 +
4677 +#define NUM_GEMAC_SUPPORT 2
4678 +#define DRV_NAME "pfe-eth"
4679 +#define DRV_VERSION "1.0"
4680 +
4681 +#define LS1012A_TX_FAST_RECOVERY_TIMEOUT_MS 3
4682 +#define TX_POLL_TIMEOUT_MS 1000
4683 +
4684 +#define EMAC_TXQ_CNT 16
4685 +#define EMAC_TXQ_DEPTH (HIF_TX_DESC_NT)
4686 +
4687 +#define JUMBO_FRAME_SIZE 10258
4688 +/*
4689 + * Client Tx queue threshold, for txQ flush condition.
4690 + * It must be smaller than the queue size (in case we ever change it in the
4691 + * future).
4692 + */
4693 +#define HIF_CL_TX_FLUSH_MARK 32
4694 +
4695 +/*
4696 + * Max number of TX resources (HIF descriptors or skbs) that will be released
4697 + * in a single go during batch recycling.
4698 + * Should be lower than the flush mark so the SW can provide the HW with a
4699 + * continuous stream of packets instead of bursts.
4700 + */
4701 +#define TX_FREE_MAX_COUNT 16
4702 +#define EMAC_RXQ_CNT 3
4703 +#define EMAC_RXQ_DEPTH HIF_RX_DESC_NT
4704 +/* make sure clients can receive a full burst of packets */
4705 +#define EMAC_RMON_TXBYTES_POS 0x00
4706 +#define EMAC_RMON_RXBYTES_POS 0x14
4707 +
4708 +#define EMAC_QUEUENUM_MASK (emac_txq_cnt - 1)
4709 +#define EMAC_MDIO_TIMEOUT 1000
4710 +#define MAX_UC_SPEC_ADDR_REG 31
4711 +
4712 +struct pfe_eth_fast_timer {
4713 + int queuenum;
4714 + struct hrtimer timer;
4715 + void *base;
4716 +};
4717 +
4718 +struct pfe_eth_priv_s {
4719 + struct pfe *pfe;
4720 + struct hif_client_s client;
4721 + struct napi_struct lro_napi;
4722 + struct napi_struct low_napi;
4723 + struct napi_struct high_napi;
4724 + int low_tmu_q;
4725 + int high_tmu_q;
4726 + struct net_device_stats stats;
4727 + struct net_device *ndev;
4728 + int id;
4729 + int promisc;
4730 + unsigned int msg_enable;
4731 + unsigned int usr_features;
4732 +
4733 + spinlock_t lock; /* protect member variables */
4734 + unsigned int event_status;
4735 + int irq;
4736 + void *EMAC_baseaddr;
4737 + /* This points to the EMAC base from where we access PHY */
4738 + void *PHY_baseaddr;
4739 + void *GPI_baseaddr;
4740 + /* PHY stuff */
4741 + struct phy_device *phydev;
4742 + int oldspeed;
4743 + int oldduplex;
4744 + int oldlink;
4745 + /* mdio info */
4746 + int mdc_div;
4747 + struct mii_bus *mii_bus;
4748 + struct clk *gemtx_clk;
4749 + int wol;
4750 + int pause_flag;
4751 +
4752 + int default_priority;
4753 + struct pfe_eth_fast_timer fast_tx_timeout[EMAC_TXQ_CNT];
4754 +
4755 + struct ls1012a_eth_platform_data *einfo;
4756 + struct sk_buff *skb_inflight[EMAC_RXQ_CNT + 6];
4757 +
4758 +#ifdef PFE_ETH_TX_STATS
4759 + unsigned int stop_queue_total[EMAC_TXQ_CNT];
4760 + unsigned int stop_queue_hif[EMAC_TXQ_CNT];
4761 + unsigned int stop_queue_hif_client[EMAC_TXQ_CNT];
4762 + unsigned int stop_queue_credit[EMAC_TXQ_CNT];
4763 + unsigned int clean_fail[EMAC_TXQ_CNT];
4764 + unsigned int was_stopped[EMAC_TXQ_CNT];
4765 +#endif
4766 +
4767 +#ifdef PFE_ETH_NAPI_STATS
4768 + unsigned int napi_counters[NAPI_MAX_COUNT];
4769 +#endif
4770 + unsigned int frags_inflight[EMAC_RXQ_CNT + 6];
4771 +};
4772 +
4773 +struct pfe_eth {
4774 + struct pfe_eth_priv_s *eth_priv[3];
4775 +};
4776 +
4777 +int pfe_eth_init(struct pfe *pfe);
4778 +void pfe_eth_exit(struct pfe *pfe);
4779 +int pfe_eth_suspend(struct net_device *dev);
4780 +int pfe_eth_resume(struct net_device *dev);
4781 +int pfe_eth_mdio_reset(struct mii_bus *bus);
4782 +
4783 +#endif /* _PFE_ETH_H_ */
4784 --- /dev/null
4785 +++ b/drivers/staging/fsl_ppfe/pfe_firmware.c
4786 @@ -0,0 +1,314 @@
4787 +/*
4788 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
4789 + * Copyright 2017 NXP
4790 + *
4791 + * This program is free software; you can redistribute it and/or modify
4792 + * it under the terms of the GNU General Public License as published by
4793 + * the Free Software Foundation; either version 2 of the License, or
4794 + * (at your option) any later version.
4795 + *
4796 + * This program is distributed in the hope that it will be useful,
4797 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
4798 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4799 + * GNU General Public License for more details.
4800 + *
4801 + * You should have received a copy of the GNU General Public License
4802 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
4803 + */
4804 +
4805 +/*
4806 + * @file
4807 + * Contains all the functions to handle parsing and loading of PE firmware
4808 + * files.
4809 + */
4810 +#include <linux/firmware.h>
4811 +
4812 +#include "pfe_mod.h"
4813 +#include "pfe_firmware.h"
4814 +#include "pfe/pfe.h"
4815 +
4816 +static struct elf32_shdr *get_elf_section_header(const struct firmware *fw,
4817 + const char *section)
4818 +{
4819 + struct elf32_hdr *elf_hdr = (struct elf32_hdr *)fw->data;
4820 + struct elf32_shdr *shdr;
4821 + struct elf32_shdr *shdr_shstr;
4822 + Elf32_Off e_shoff = be32_to_cpu(elf_hdr->e_shoff);
4823 + Elf32_Half e_shentsize = be16_to_cpu(elf_hdr->e_shentsize);
4824 + Elf32_Half e_shnum = be16_to_cpu(elf_hdr->e_shnum);
4825 + Elf32_Half e_shstrndx = be16_to_cpu(elf_hdr->e_shstrndx);
4826 + Elf32_Off shstr_offset;
4827 + Elf32_Word sh_name;
4828 + const char *name;
4829 + int i;
4830 +
4831 + /* Section header strings */
4832 + shdr_shstr = (struct elf32_shdr *)(fw->data + e_shoff + e_shstrndx *
4833 + e_shentsize);
4834 + shstr_offset = be32_to_cpu(shdr_shstr->sh_offset);
4835 +
4836 + for (i = 0; i < e_shnum; i++) {
4837 + shdr = (struct elf32_shdr *)(fw->data + e_shoff
4838 + + i * e_shentsize);
4839 +
4840 + sh_name = be32_to_cpu(shdr->sh_name);
4841 +
4842 + name = (const char *)(fw->data + shstr_offset + sh_name);
4843 +
4844 + if (!strcmp(name, section))
4845 + return shdr;
4846 + }
4847 +
4848 + pr_err("%s: didn't find section %s\n", __func__, section);
4849 +
4850 + return NULL;
4851 +}
4852 +
4853 +#if defined(CFG_DIAGS)
4854 +static int pfe_get_diags_info(const struct firmware *fw, struct pfe_diags_info
4855 + *diags_info)
4856 +{
4857 + struct elf32_shdr *shdr;
4858 + unsigned long offset, size;
4859 +
4860 + shdr = get_elf_section_header(fw, ".pfe_diags_str");
4861 + if (shdr) {
4862 + offset = be32_to_cpu(shdr->sh_offset);
4863 + size = be32_to_cpu(shdr->sh_size);
4864 + diags_info->diags_str_base = be32_to_cpu(shdr->sh_addr);
4865 + diags_info->diags_str_size = size;
4866 + diags_info->diags_str_array = kmalloc(size, GFP_KERNEL);
4867 + memcpy(diags_info->diags_str_array, fw->data + offset, size);
4868 +
4869 + return 0;
4870 + } else {
4871 + return -1;
4872 + }
4873 +}
4874 +#endif
4875 +
4876 +static void pfe_check_version_info(const struct firmware *fw)
4877 +{
4878 + /*static char *version = NULL;*/
4879 + static char *version;
4880 +
4881 + struct elf32_shdr *shdr = get_elf_section_header(fw, ".version");
4882 +
4883 + if (shdr) {
4884 + if (!version) {
4885 + /*
4886 + * this is the first fw we load, use its version
4887 + * string as reference (whatever it is)
4888 + */
4889 + version = (char *)(fw->data +
4890 + be32_to_cpu(shdr->sh_offset));
4891 +
4892 + pr_info("PFE binary version: %s\n", version);
4893 + } else {
4894 + /*
4895 + * already have loaded at least one firmware, check
4896 + * sequence can start now
4897 + */
4898 + if (strcmp(version, (char *)(fw->data +
4899 + be32_to_cpu(shdr->sh_offset)))) {
4900 + pr_info(
4901 + "WARNING: PFE firmware binaries from incompatible version\n");
4902 + }
4903 + }
4904 + } else {
4905 + /*
4906 + * version cannot be verified, a potential issue that should
4907 + * be reported
4908 + */
4909 + pr_info(
4910 + "WARNING: PFE firmware binaries from incompatible version\n");
4911 + }
4912 +}
4913 +
4914 +/* PFE elf firmware loader.
4915 + * Loads an elf firmware image into a list of PE's (specified using a bitmask)
4916 + *
4917 + * @param pe_mask Mask of PE id's to load firmware to
4918 + * @param fw Pointer to the firmware image
4919 + *
4920 + * @return 0 on success, a negative value on error
4921 + *
4922 + */
4923 +int pfe_load_elf(int pe_mask, const struct firmware *fw, struct pfe *pfe)
4924 +{
4925 + struct elf32_hdr *elf_hdr = (struct elf32_hdr *)fw->data;
4926 + Elf32_Half sections = be16_to_cpu(elf_hdr->e_shnum);
4927 + struct elf32_shdr *shdr = (struct elf32_shdr *)(fw->data +
4928 + be32_to_cpu(elf_hdr->e_shoff));
4929 + int id, section;
4930 + int rc;
4931 +
4932 + pr_info("%s\n", __func__);
4933 +
4934 + /* Some sanity checks */
4935 + if (strncmp(&elf_hdr->e_ident[EI_MAG0], ELFMAG, SELFMAG)) {
4936 + pr_err("%s: incorrect elf magic number\n", __func__);
4937 + return -EINVAL;
4938 + }
4939 +
4940 + if (elf_hdr->e_ident[EI_CLASS] != ELFCLASS32) {
4941 + pr_err("%s: incorrect elf class(%x)\n", __func__,
4942 + elf_hdr->e_ident[EI_CLASS]);
4943 + return -EINVAL;
4944 + }
4945 +
4946 + if (elf_hdr->e_ident[EI_DATA] != ELFDATA2MSB) {
4947 + pr_err("%s: incorrect elf data(%x)\n", __func__,
4948 + elf_hdr->e_ident[EI_DATA]);
4949 + return -EINVAL;
4950 + }
4951 +
4952 + if (be16_to_cpu(elf_hdr->e_type) != ET_EXEC) {
4953 + pr_err("%s: incorrect elf file type(%x)\n", __func__,
4954 + be16_to_cpu(elf_hdr->e_type));
4955 + return -EINVAL;
4956 + }
4957 +
4958 + for (section = 0; section < sections; section++, shdr++) {
4959 + if (!(be32_to_cpu(shdr->sh_flags) & (SHF_WRITE | SHF_ALLOC |
4960 + SHF_EXECINSTR)))
4961 + continue;
4962 +
4963 + for (id = 0; id < MAX_PE; id++)
4964 + if (pe_mask & (1 << id)) {
4965 + rc = pe_load_elf_section(id, fw->data, shdr,
4966 + pfe->dev);
4967 + if (rc < 0)
4968 + goto err;
4969 + }
4970 + }
4971 +
4972 + pfe_check_version_info(fw);
4973 +
4974 + return 0;
4975 +
4976 +err:
4977 + return rc;
4978 +}
4979 +
4980 +/* PFE firmware initialization.
4981 + * Loads different firmware files from filesystem.
4982 + * Initializes PE IMEM/DMEM and UTIL-PE DDR
4983 + * Initializes control path symbol addresses (by looking them up in the elf
4984 + * firmware files
4985 + * Takes PE's out of reset
4986 + *
4987 + * @return 0 on success, a negative value on error
4988 + *
4989 + */
4990 +int pfe_firmware_init(struct pfe *pfe)
4991 +{
4992 + const struct firmware *class_fw, *tmu_fw;
4993 + int rc = 0;
4994 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
4995 + const char *util_fw_name;
4996 + const struct firmware *util_fw;
4997 +#endif
4998 +
4999 + pr_info("%s\n", __func__);
5000 +
5001 + if (request_firmware(&class_fw, CLASS_FIRMWARE_FILENAME, pfe->dev)) {
5002 + pr_err("%s: request firmware %s failed\n", __func__,
5003 + CLASS_FIRMWARE_FILENAME);
5004 + rc = -ETIMEDOUT;
5005 + goto err0;
5006 + }
5007 +
5008 + if (request_firmware(&tmu_fw, TMU_FIRMWARE_FILENAME, pfe->dev)) {
5009 + pr_err("%s: request firmware %s failed\n", __func__,
5010 + TMU_FIRMWARE_FILENAME);
5011 + rc = -ETIMEDOUT;
5012 + goto err1;
5013 +}
5014 +
5015 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5016 + util_fw_name = UTIL_FIRMWARE_FILENAME;
5017 +
5018 + if (request_firmware(&util_fw, util_fw_name, pfe->dev)) {
5019 + pr_err("%s: request firmware %s failed\n", __func__,
5020 + util_fw_name);
5021 + rc = -ETIMEDOUT;
5022 + goto err2;
5023 + }
5024 +#endif
5025 + rc = pfe_load_elf(CLASS_MASK, class_fw, pfe);
5026 + if (rc < 0) {
5027 + pr_err("%s: class firmware load failed\n", __func__);
5028 + goto err3;
5029 + }
5030 +
5031 +#if defined(CFG_DIAGS)
5032 + rc = pfe_get_diags_info(class_fw, &pfe->diags.class_diags_info);
5033 + if (rc < 0) {
5034 + pr_warn(
5035 + "PFE diags won't be available for class PEs\n");
5036 + rc = 0;
5037 + }
5038 +#endif
5039 +
5040 + rc = pfe_load_elf(TMU_MASK, tmu_fw, pfe);
5041 + if (rc < 0) {
5042 + pr_err("%s: tmu firmware load failed\n", __func__);
5043 + goto err3;
5044 + }
5045 +
5046 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5047 + rc = pfe_load_elf(UTIL_MASK, util_fw, pfe);
5048 + if (rc < 0) {
5049 + pr_err("%s: util firmware load failed\n", __func__);
5050 + goto err3;
5051 + }
5052 +
5053 +#if defined(CFG_DIAGS)
5054 + rc = pfe_get_diags_info(util_fw, &pfe->diags.util_diags_info);
5055 + if (rc < 0) {
5056 + pr_warn(
5057 + "PFE diags won't be available for util PE\n");
5058 + rc = 0;
5059 + }
5060 +#endif
5061 +
5062 + util_enable();
5063 +#endif
5064 +
5065 + tmu_enable(0xf);
5066 + class_enable();
5067 +
5068 +err3:
5069 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5070 + release_firmware(util_fw);
5071 +
5072 +err2:
5073 +#endif
5074 + release_firmware(tmu_fw);
5075 +
5076 +err1:
5077 + release_firmware(class_fw);
5078 +
5079 +err0:
5080 + return rc;
5081 +}
5082 +
5083 +/* PFE firmware cleanup
5084 + * Puts PE's in reset
5085 + *
5086 + *
5087 + */
5088 +void pfe_firmware_exit(struct pfe *pfe)
5089 +{
5090 + pr_info("%s\n", __func__);
5091 +
5092 + if (pe_reset_all(&pfe->ctrl) != 0)
5093 + pr_err("Error: Failed to stop PEs, PFE reload may not work correctly\n");
5094 +
5095 + class_disable();
5096 + tmu_disable(0xf);
5097 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5098 + util_disable();
5099 +#endif
5100 +}
5101 --- /dev/null
5102 +++ b/drivers/staging/fsl_ppfe/pfe_firmware.h
5103 @@ -0,0 +1,32 @@
5104 +/*
5105 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
5106 + * Copyright 2017 NXP
5107 + *
5108 + * This program is free software; you can redistribute it and/or modify
5109 + * it under the terms of the GNU General Public License as published by
5110 + * the Free Software Foundation; either version 2 of the License, or
5111 + * (at your option) any later version.
5112 + *
5113 + * This program is distributed in the hope that it will be useful,
5114 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
5115 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
5116 + * GNU General Public License for more details.
5117 + *
5118 + * You should have received a copy of the GNU General Public License
5119 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
5120 + */
5121 +
5122 +#ifndef _PFE_FIRMWARE_H_
5123 +#define _PFE_FIRMWARE_H_
5124 +
5125 +#define CLASS_FIRMWARE_FILENAME "ppfe_class_ls1012a.elf"
5126 +#define TMU_FIRMWARE_FILENAME "ppfe_tmu_ls1012a.elf"
5127 +
5128 +#define PFE_FW_CHECK_PASS 0
5129 +#define PFE_FW_CHECK_FAIL 1
5130 +#define NUM_PFE_FW 3
5131 +
5132 +int pfe_firmware_init(struct pfe *pfe);
5133 +void pfe_firmware_exit(struct pfe *pfe);
5134 +
5135 +#endif /* _PFE_FIRMWARE_H_ */
5136 --- /dev/null
5137 +++ b/drivers/staging/fsl_ppfe/pfe_hal.c
5138 @@ -0,0 +1,1516 @@
5139 +/*
5140 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
5141 + * Copyright 2017 NXP
5142 + *
5143 + * This program is free software; you can redistribute it and/or modify
5144 + * it under the terms of the GNU General Public License as published by
5145 + * the Free Software Foundation; either version 2 of the License, or
5146 + * (at your option) any later version.
5147 + *
5148 + * This program is distributed in the hope that it will be useful,
5149 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
5150 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
5151 + * GNU General Public License for more details.
5152 + *
5153 + * You should have received a copy of the GNU General Public License
5154 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
5155 + */
5156 +
5157 +#include "pfe_mod.h"
5158 +#include "pfe/pfe.h"
5159 +
5160 +void *cbus_base_addr;
5161 +void *ddr_base_addr;
5162 +unsigned long ddr_phys_base_addr;
5163 +unsigned int ddr_size;
5164 +
5165 +static struct pe_info pe[MAX_PE];
5166 +
5167 +/* Initializes the PFE library.
5168 + * Must be called before using any of the library functions.
5169 + *
5170 + * @param[in] cbus_base CBUS virtual base address (as mapped in
5171 + * the host CPU address space)
5172 + * @param[in] ddr_base PFE DDR range virtual base address (as
5173 + * mapped in the host CPU address space)
5174 + * @param[in] ddr_phys_base PFE DDR range physical base address (as
5175 + * mapped in platform)
5176 + * @param[in] size PFE DDR range size (as defined by the host
5177 + * software)
5178 + */
5179 +void pfe_lib_init(void *cbus_base, void *ddr_base, unsigned long ddr_phys_base,
5180 + unsigned int size)
5181 +{
5182 + cbus_base_addr = cbus_base;
5183 + ddr_base_addr = ddr_base;
5184 + ddr_phys_base_addr = ddr_phys_base;
5185 + ddr_size = size;
5186 +
5187 + pe[CLASS0_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(0);
5188 + pe[CLASS0_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(0);
5189 + pe[CLASS0_ID].pmem_size = CLASS_IMEM_SIZE;
5190 + pe[CLASS0_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
5191 + pe[CLASS0_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
5192 + pe[CLASS0_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
5193 +
5194 + pe[CLASS1_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(1);
5195 + pe[CLASS1_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(1);
5196 + pe[CLASS1_ID].pmem_size = CLASS_IMEM_SIZE;
5197 + pe[CLASS1_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
5198 + pe[CLASS1_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
5199 + pe[CLASS1_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
5200 +
5201 + pe[CLASS2_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(2);
5202 + pe[CLASS2_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(2);
5203 + pe[CLASS2_ID].pmem_size = CLASS_IMEM_SIZE;
5204 + pe[CLASS2_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
5205 + pe[CLASS2_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
5206 + pe[CLASS2_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
5207 +
5208 + pe[CLASS3_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(3);
5209 + pe[CLASS3_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(3);
5210 + pe[CLASS3_ID].pmem_size = CLASS_IMEM_SIZE;
5211 + pe[CLASS3_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
5212 + pe[CLASS3_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
5213 + pe[CLASS3_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
5214 +
5215 + pe[CLASS4_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(4);
5216 + pe[CLASS4_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(4);
5217 + pe[CLASS4_ID].pmem_size = CLASS_IMEM_SIZE;
5218 + pe[CLASS4_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
5219 + pe[CLASS4_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
5220 + pe[CLASS4_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
5221 +
5222 + pe[CLASS5_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(5);
5223 + pe[CLASS5_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(5);
5224 + pe[CLASS5_ID].pmem_size = CLASS_IMEM_SIZE;
5225 + pe[CLASS5_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
5226 + pe[CLASS5_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
5227 + pe[CLASS5_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
5228 +
5229 + pe[TMU0_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(0);
5230 + pe[TMU0_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(0);
5231 + pe[TMU0_ID].pmem_size = TMU_IMEM_SIZE;
5232 + pe[TMU0_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
5233 + pe[TMU0_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
5234 + pe[TMU0_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
5235 +
5236 + pe[TMU1_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(1);
5237 + pe[TMU1_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(1);
5238 + pe[TMU1_ID].pmem_size = TMU_IMEM_SIZE;
5239 + pe[TMU1_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
5240 + pe[TMU1_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
5241 + pe[TMU1_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
5242 +
5243 + pe[TMU3_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(3);
5244 + pe[TMU3_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(3);
5245 + pe[TMU3_ID].pmem_size = TMU_IMEM_SIZE;
5246 + pe[TMU3_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
5247 + pe[TMU3_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
5248 + pe[TMU3_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
5249 +
5250 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5251 + pe[UTIL_ID].dmem_base_addr = UTIL_DMEM_BASE_ADDR;
5252 + pe[UTIL_ID].mem_access_wdata = UTIL_MEM_ACCESS_WDATA;
5253 + pe[UTIL_ID].mem_access_addr = UTIL_MEM_ACCESS_ADDR;
5254 + pe[UTIL_ID].mem_access_rdata = UTIL_MEM_ACCESS_RDATA;
5255 +#endif
5256 +}
5257 +
5258 +/* Writes a buffer to PE internal memory from the host
5259 + * through indirect access registers.
5260 + *
5261 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5262 + * ..., UTIL_ID)
5263 + * @param[in] src Buffer source address
5264 + * @param[in] mem_access_addr DMEM destination address (must be 32bit
5265 + * aligned)
5266 + * @param[in] len Number of bytes to copy
5267 + */
5268 +void pe_mem_memcpy_to32(int id, u32 mem_access_addr, const void *src, unsigned
5269 +int len)
5270 +{
5271 + u32 offset = 0, val, addr;
5272 + unsigned int len32 = len >> 2;
5273 + int i;
5274 +
5275 + addr = mem_access_addr | PE_MEM_ACCESS_WRITE |
5276 + PE_MEM_ACCESS_BYTE_ENABLE(0, 4);
5277 +
5278 + for (i = 0; i < len32; i++, offset += 4, src += 4) {
5279 + val = *(u32 *)src;
5280 + writel(cpu_to_be32(val), pe[id].mem_access_wdata);
5281 + writel(addr + offset, pe[id].mem_access_addr);
5282 + }
5283 +
5284 + len = (len & 0x3);
5285 + if (len) {
5286 + val = 0;
5287 +
5288 + addr = (mem_access_addr | PE_MEM_ACCESS_WRITE |
5289 + PE_MEM_ACCESS_BYTE_ENABLE(0, len)) + offset;
5290 +
5291 + for (i = 0; i < len; i++, src++)
5292 + val |= (*(u8 *)src) << (8 * i);
5293 +
5294 + writel(cpu_to_be32(val), pe[id].mem_access_wdata);
5295 + writel(addr, pe[id].mem_access_addr);
5296 + }
5297 +}
5298 +
5299 +/* Writes a buffer to PE internal data memory (DMEM) from the host
5300 + * through indirect access registers.
5301 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5302 + * ..., UTIL_ID)
5303 + * @param[in] src Buffer source address
5304 + * @param[in] dst DMEM destination address (must be 32bit
5305 + * aligned)
5306 + * @param[in] len Number of bytes to copy
5307 + */
5308 +void pe_dmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len)
5309 +{
5310 + pe_mem_memcpy_to32(id, pe[id].dmem_base_addr | dst |
5311 + PE_MEM_ACCESS_DMEM, src, len);
5312 +}
5313 +
5314 +/* Writes a buffer to PE internal program memory (PMEM) from the host
5315 + * through indirect access registers.
5316 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5317 + * ..., TMU3_ID)
5318 + * @param[in] src Buffer source address
5319 + * @param[in] dst PMEM destination address (must be 32bit
5320 + * aligned)
5321 + * @param[in] len Number of bytes to copy
5322 + */
5323 +void pe_pmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len)
5324 +{
5325 + pe_mem_memcpy_to32(id, pe[id].pmem_base_addr | (dst & (pe[id].pmem_size
5326 + - 1)) | PE_MEM_ACCESS_IMEM, src, len);
5327 +}
5328 +
5329 +/* Reads PE internal program memory (IMEM) from the host
5330 + * through indirect access registers.
5331 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5332 + * ..., TMU3_ID)
5333 + * @param[in] addr PMEM read address (must be aligned on size)
5334 + * @param[in] size Number of bytes to read (maximum 4, must not
5335 + * cross 32bit boundaries)
5336 + * @return the data read (in PE endianness, i.e BE).
5337 + */
5338 +u32 pe_pmem_read(int id, u32 addr, u8 size)
5339 +{
5340 + u32 offset = addr & 0x3;
5341 + u32 mask = 0xffffffff >> ((4 - size) << 3);
5342 + u32 val;
5343 +
5344 + addr = pe[id].pmem_base_addr | ((addr & ~0x3) & (pe[id].pmem_size - 1))
5345 + | PE_MEM_ACCESS_IMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
5346 +
5347 + writel(addr, pe[id].mem_access_addr);
5348 + val = be32_to_cpu(readl(pe[id].mem_access_rdata));
5349 +
5350 + return (val >> (offset << 3)) & mask;
5351 +}
5352 +
5353 +/* Writes PE internal data memory (DMEM) from the host
5354 + * through indirect access registers.
5355 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5356 + * ..., UTIL_ID)
5357 + * @param[in] addr DMEM write address (must be aligned on size)
5358 + * @param[in] val Value to write (in PE endianness, i.e BE)
5359 + * @param[in] size Number of bytes to write (maximum 4, must not
5360 + * cross 32bit boundaries)
5361 + */
5362 +void pe_dmem_write(int id, u32 val, u32 addr, u8 size)
5363 +{
5364 + u32 offset = addr & 0x3;
5365 +
5366 + addr = pe[id].dmem_base_addr | (addr & ~0x3) | PE_MEM_ACCESS_WRITE |
5367 + PE_MEM_ACCESS_DMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
5368 +
5369 + /* Indirect access interface is byte swapping data being written */
5370 + writel(cpu_to_be32(val << (offset << 3)), pe[id].mem_access_wdata);
5371 + writel(addr, pe[id].mem_access_addr);
5372 +}
5373 +
5374 +/* Reads PE internal data memory (DMEM) from the host
5375 + * through indirect access registers.
5376 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5377 + * ..., UTIL_ID)
5378 + * @param[in] addr DMEM read address (must be aligned on size)
5379 + * @param[in] size Number of bytes to read (maximum 4, must not
5380 + * cross 32bit boundaries)
5381 + * @return the data read (in PE endianness, i.e BE).
5382 + */
5383 +u32 pe_dmem_read(int id, u32 addr, u8 size)
5384 +{
5385 + u32 offset = addr & 0x3;
5386 + u32 mask = 0xffffffff >> ((4 - size) << 3);
5387 + u32 val;
5388 +
5389 + addr = pe[id].dmem_base_addr | (addr & ~0x3) | PE_MEM_ACCESS_DMEM |
5390 + PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
5391 +
5392 + writel(addr, pe[id].mem_access_addr);
5393 +
5394 + /* Indirect access interface is byte swapping data being read */
5395 + val = be32_to_cpu(readl(pe[id].mem_access_rdata));
5396 +
5397 + return (val >> (offset << 3)) & mask;
5398 +}
5399 +
5400 +/* This function is used to write to CLASS internal bus peripherals (ccu,
5401 + * pe-lem) from the host
5402 + * through indirect access registers.
5403 + * @param[in] val value to write
5404 + * @param[in] addr Address to write to (must be aligned on size)
5405 + * @param[in] size Number of bytes to write (1, 2 or 4)
5406 + *
5407 + */
5408 +void class_bus_write(u32 val, u32 addr, u8 size)
5409 +{
5410 + u32 offset = addr & 0x3;
5411 +
5412 + writel((addr & CLASS_BUS_ACCESS_BASE_MASK), CLASS_BUS_ACCESS_BASE);
5413 +
5414 + addr = (addr & ~CLASS_BUS_ACCESS_BASE_MASK) | PE_MEM_ACCESS_WRITE |
5415 + (size << 24);
5416 +
5417 + writel(cpu_to_be32(val << (offset << 3)), CLASS_BUS_ACCESS_WDATA);
5418 + writel(addr, CLASS_BUS_ACCESS_ADDR);
5419 +}
5420 +
5421 +/* Reads from CLASS internal bus peripherals (ccu, pe-lem) from the host
5422 + * through indirect access registers.
5423 + * @param[in] addr Address to read from (must be aligned on size)
5424 + * @param[in] size Number of bytes to read (1, 2 or 4)
5425 + * @return the read data
5426 + *
5427 + */
5428 +u32 class_bus_read(u32 addr, u8 size)
5429 +{
5430 + u32 offset = addr & 0x3;
5431 + u32 mask = 0xffffffff >> ((4 - size) << 3);
5432 + u32 val;
5433 +
5434 + writel((addr & CLASS_BUS_ACCESS_BASE_MASK), CLASS_BUS_ACCESS_BASE);
5435 +
5436 + addr = (addr & ~CLASS_BUS_ACCESS_BASE_MASK) | (size << 24);
5437 +
5438 + writel(addr, CLASS_BUS_ACCESS_ADDR);
5439 + val = be32_to_cpu(readl(CLASS_BUS_ACCESS_RDATA));
5440 +
5441 + return (val >> (offset << 3)) & mask;
5442 +}
5443 +
5444 +/* Writes data to the cluster memory (PE_LMEM)
5445 + * @param[in] dst PE LMEM destination address (must be 32bit aligned)
5446 + * @param[in] src Buffer source address
5447 + * @param[in] len Number of bytes to copy
5448 + */
5449 +void class_pe_lmem_memcpy_to32(u32 dst, const void *src, unsigned int len)
5450 +{
5451 + u32 len32 = len >> 2;
5452 + int i;
5453 +
5454 + for (i = 0; i < len32; i++, src += 4, dst += 4)
5455 + class_bus_write(*(u32 *)src, dst, 4);
5456 +
5457 + if (len & 0x2) {
5458 + class_bus_write(*(u16 *)src, dst, 2);
5459 + src += 2;
5460 + dst += 2;
5461 + }
5462 +
5463 + if (len & 0x1) {
5464 + class_bus_write(*(u8 *)src, dst, 1);
5465 + src++;
5466 + dst++;
5467 + }
5468 +}
5469 +
5470 +/* Writes value to the cluster memory (PE_LMEM)
5471 + * @param[in] dst PE LMEM destination address (must be 32bit aligned)
5472 + * @param[in] val Value to write
5473 + * @param[in] len Number of bytes to write
5474 + */
5475 +void class_pe_lmem_memset(u32 dst, int val, unsigned int len)
5476 +{
5477 + u32 len32 = len >> 2;
5478 + int i;
5479 +
5480 + val = val | (val << 8) | (val << 16) | (val << 24);
5481 +
5482 + for (i = 0; i < len32; i++, dst += 4)
5483 + class_bus_write(val, dst, 4);
5484 +
5485 + if (len & 0x2) {
5486 + class_bus_write(val, dst, 2);
5487 + dst += 2;
5488 + }
5489 +
5490 + if (len & 0x1) {
5491 + class_bus_write(val, dst, 1);
5492 + dst++;
5493 + }
5494 +}
5495 +
5496 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5497 +
5498 +/* Writes UTIL program memory (DDR) from the host.
5499 + *
5500 + * @param[in] addr Address to write (virtual, must be aligned on size)
5501 + * @param[in] val Value to write (in PE endianness, i.e BE)
5502 + * @param[in] size Number of bytes to write (2 or 4)
5503 + */
5504 +static void util_pmem_write(u32 val, void *addr, u8 size)
5505 +{
5506 + void *addr64 = (void *)((unsigned long)addr & ~0x7);
5507 + unsigned long off = 8 - ((unsigned long)addr & 0x7) - size;
5508 +
5509 + /*
5510 + * IMEM should be loaded as a 64bit swapped value in a 64bit aligned
5511 + * location
5512 + */
5513 + if (size == 4)
5514 + writel(be32_to_cpu(val), addr64 + off);
5515 + else
5516 + writew(be16_to_cpu((u16)val), addr64 + off);
5517 +}
5518 +
5519 +/* Writes a buffer to UTIL program memory (DDR) from the host.
5520 + *
5521 + * @param[in] dst Address to write (virtual, must be at least 16bit
5522 + * aligned)
5523 + * @param[in] src Buffer to write (in PE endianness, i.e BE, must have
5524 + * same alignment as dst)
5525 + * @param[in] len Number of bytes to write (must be at least 16bit
5526 + * aligned)
5527 + */
5528 +static void util_pmem_memcpy(void *dst, const void *src, unsigned int len)
5529 +{
5530 + unsigned int len32;
5531 + int i;
5532 +
5533 + if ((unsigned long)src & 0x2) {
5534 + util_pmem_write(*(u16 *)src, dst, 2);
5535 + src += 2;
5536 + dst += 2;
5537 + len -= 2;
5538 + }
5539 +
5540 + len32 = len >> 2;
5541 +
5542 + for (i = 0; i < len32; i++, dst += 4, src += 4)
5543 + util_pmem_write(*(u32 *)src, dst, 4);
5544 +
5545 + if (len & 0x2)
5546 + util_pmem_write(*(u16 *)src, dst, len & 0x2);
5547 +}
5548 +#endif
5549 +
5550 +/* Loads an elf section into pmem
5551 + * Code needs to be at least 16bit aligned and only PROGBITS sections are
5552 + * supported
5553 + *
5554 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID, ...,
5555 + * TMU3_ID)
5556 + * @param[in] data pointer to the elf firmware
5557 + * @param[in] shdr pointer to the elf section header
5558 + *
5559 + */
5560 +static int pe_load_pmem_section(int id, const void *data,
5561 + struct elf32_shdr *shdr)
5562 +{
5563 + u32 offset = be32_to_cpu(shdr->sh_offset);
5564 + u32 addr = be32_to_cpu(shdr->sh_addr);
5565 + u32 size = be32_to_cpu(shdr->sh_size);
5566 + u32 type = be32_to_cpu(shdr->sh_type);
5567 +
5568 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5569 + if (id == UTIL_ID) {
5570 + pr_err("%s: unsupported pmem section for UTIL\n",
5571 + __func__);
5572 + return -EINVAL;
5573 + }
5574 +#endif
5575 +
5576 + if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
5577 + pr_err(
5578 + "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
5579 + , __func__, addr, (unsigned long)data + offset);
5580 +
5581 + return -EINVAL;
5582 + }
5583 +
5584 + if (addr & 0x1) {
5585 + pr_err("%s: load address(%x) is not 16bit aligned\n",
5586 + __func__, addr);
5587 + return -EINVAL;
5588 + }
5589 +
5590 + if (size & 0x1) {
5591 + pr_err("%s: load size(%x) is not 16bit aligned\n",
5592 + __func__, size);
5593 + return -EINVAL;
5594 + }
5595 +
5596 + switch (type) {
5597 + case SHT_PROGBITS:
5598 + pe_pmem_memcpy_to32(id, addr, data + offset, size);
5599 +
5600 + break;
5601 +
5602 + default:
5603 + pr_err("%s: unsupported section type(%x)\n", __func__,
5604 + type);
5605 + return -EINVAL;
5606 + }
5607 +
5608 + return 0;
5609 +}
5610 +
5611 +/* Loads an elf section into dmem
5612 + * Data needs to be at least 32bit aligned, NOBITS sections are correctly
5613 + * initialized to 0
5614 + *
5615 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5616 + * ..., UTIL_ID)
5617 + * @param[in] data pointer to the elf firmware
5618 + * @param[in] shdr pointer to the elf section header
5619 + *
5620 + */
5621 +static int pe_load_dmem_section(int id, const void *data,
5622 + struct elf32_shdr *shdr)
5623 +{
5624 + u32 offset = be32_to_cpu(shdr->sh_offset);
5625 + u32 addr = be32_to_cpu(shdr->sh_addr);
5626 + u32 size = be32_to_cpu(shdr->sh_size);
5627 + u32 type = be32_to_cpu(shdr->sh_type);
5628 + u32 size32 = size >> 2;
5629 + int i;
5630 +
5631 + if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
5632 + pr_err(
5633 + "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
5634 + __func__, addr, (unsigned long)data + offset);
5635 +
5636 + return -EINVAL;
5637 + }
5638 +
5639 + if (addr & 0x3) {
5640 + pr_err("%s: load address(%x) is not 32bit aligned\n",
5641 + __func__, addr);
5642 + return -EINVAL;
5643 + }
5644 +
5645 + switch (type) {
5646 + case SHT_PROGBITS:
5647 + pe_dmem_memcpy_to32(id, addr, data + offset, size);
5648 + break;
5649 +
5650 + case SHT_NOBITS:
5651 + for (i = 0; i < size32; i++, addr += 4)
5652 + pe_dmem_write(id, 0, addr, 4);
5653 +
5654 + if (size & 0x3)
5655 + pe_dmem_write(id, 0, addr, size & 0x3);
5656 +
5657 + break;
5658 +
5659 + default:
5660 + pr_err("%s: unsupported section type(%x)\n", __func__,
5661 + type);
5662 + return -EINVAL;
5663 + }
5664 +
5665 + return 0;
5666 +}
5667 +
5668 +/* Loads an elf section into DDR
5669 + * Data needs to be at least 32bit aligned, NOBITS sections are correctly
5670 + * initialized to 0
5671 + *
5672 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5673 + * ..., UTIL_ID)
5674 + * @param[in] data pointer to the elf firmware
5675 + * @param[in] shdr pointer to the elf section header
5676 + *
5677 + */
5678 +static int pe_load_ddr_section(int id, const void *data,
5679 + struct elf32_shdr *shdr,
5680 + struct device *dev) {
5681 + u32 offset = be32_to_cpu(shdr->sh_offset);
5682 + u32 addr = be32_to_cpu(shdr->sh_addr);
5683 + u32 size = be32_to_cpu(shdr->sh_size);
5684 + u32 type = be32_to_cpu(shdr->sh_type);
5685 + u32 flags = be32_to_cpu(shdr->sh_flags);
5686 +
5687 + switch (type) {
5688 + case SHT_PROGBITS:
5689 + if (flags & SHF_EXECINSTR) {
5690 + if (id <= CLASS_MAX_ID) {
5691 + /* DO the loading only once in DDR */
5692 + if (id == CLASS0_ID) {
5693 + pr_err(
5694 + "%s: load address(%x) and elf file address(%lx) rcvd\n",
5695 + __func__, addr,
5696 + (unsigned long)data + offset);
5697 + if (((unsigned long)(data + offset)
5698 + & 0x3) != (addr & 0x3)) {
5699 + pr_err(
5700 + "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
5701 + , __func__, addr,
5702 + (unsigned long)data + offset);
5703 +
5704 + return -EINVAL;
5705 + }
5706 +
5707 + if (addr & 0x1) {
5708 + pr_err(
5709 + "%s: load address(%x) is not 16bit aligned\n"
5710 + , __func__, addr);
5711 + return -EINVAL;
5712 + }
5713 +
5714 + if (size & 0x1) {
5715 + pr_err(
5716 + "%s: load length(%x) is not 16bit aligned\n"
5717 + , __func__, size);
5718 + return -EINVAL;
5719 + }
5720 + memcpy(DDR_PHYS_TO_VIRT(
5721 + DDR_PFE_TO_PHYS(addr)),
5722 + data + offset, size);
5723 + }
5724 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5725 + } else if (id == UTIL_ID) {
5726 + if (((unsigned long)(data + offset) & 0x3)
5727 + != (addr & 0x3)) {
5728 + pr_err(
5729 + "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
5730 + , __func__, addr,
5731 + (unsigned long)data + offset);
5732 +
5733 + return -EINVAL;
5734 + }
5735 +
5736 + if (addr & 0x1) {
5737 + pr_err(
5738 + "%s: load address(%x) is not 16bit aligned\n"
5739 + , __func__, addr);
5740 + return -EINVAL;
5741 + }
5742 +
5743 + if (size & 0x1) {
5744 + pr_err(
5745 + "%s: load length(%x) is not 16bit aligned\n"
5746 + , __func__, size);
5747 + return -EINVAL;
5748 + }
5749 +
5750 + util_pmem_memcpy(DDR_PHYS_TO_VIRT(
5751 + DDR_PFE_TO_PHYS(addr)),
5752 + data + offset, size);
5753 + }
5754 +#endif
5755 + } else {
5756 + pr_err(
5757 + "%s: unsupported ddr section type(%x) for PE(%d)\n"
5758 + , __func__, type, id);
5759 + return -EINVAL;
5760 + }
5761 +
5762 + } else {
5763 + memcpy(DDR_PHYS_TO_VIRT(DDR_PFE_TO_PHYS(addr)), data
5764 + + offset, size);
5765 + }
5766 +
5767 + break;
5768 +
5769 + case SHT_NOBITS:
5770 + memset(DDR_PHYS_TO_VIRT(DDR_PFE_TO_PHYS(addr)), 0, size);
5771 +
5772 + break;
5773 +
5774 + default:
5775 + pr_err("%s: unsupported section type(%x)\n", __func__,
5776 + type);
5777 + return -EINVAL;
5778 + }
5779 +
5780 + return 0;
5781 +}
5782 +
5783 +/* Loads an elf section into pe lmem
5784 + * Data needs to be at least 32bit aligned, NOBITS sections are correctly
5785 + * initialized to 0
5786 + *
5787 + * @param[in] id PE identification (CLASS0_ID,..., CLASS5_ID)
5788 + * @param[in] data pointer to the elf firmware
5789 + * @param[in] shdr pointer to the elf section header
5790 + *
5791 + */
5792 +static int pe_load_pe_lmem_section(int id, const void *data,
5793 + struct elf32_shdr *shdr)
5794 +{
5795 + u32 offset = be32_to_cpu(shdr->sh_offset);
5796 + u32 addr = be32_to_cpu(shdr->sh_addr);
5797 + u32 size = be32_to_cpu(shdr->sh_size);
5798 + u32 type = be32_to_cpu(shdr->sh_type);
5799 +
5800 + if (id > CLASS_MAX_ID) {
5801 + pr_err(
5802 + "%s: unsupported pe-lmem section type(%x) for PE(%d)\n",
5803 + __func__, type, id);
5804 + return -EINVAL;
5805 + }
5806 +
5807 + if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
5808 + pr_err(
5809 + "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
5810 + __func__, addr, (unsigned long)data + offset);
5811 +
5812 + return -EINVAL;
5813 + }
5814 +
5815 + if (addr & 0x3) {
5816 + pr_err("%s: load address(%x) is not 32bit aligned\n",
5817 + __func__, addr);
5818 + return -EINVAL;
5819 + }
5820 +
5821 + switch (type) {
5822 + case SHT_PROGBITS:
5823 + class_pe_lmem_memcpy_to32(addr, data + offset, size);
5824 + break;
5825 +
5826 + case SHT_NOBITS:
5827 + class_pe_lmem_memset(addr, 0, size);
5828 + break;
5829 +
5830 + default:
5831 + pr_err("%s: unsupported section type(%x)\n", __func__,
5832 + type);
5833 + return -EINVAL;
5834 + }
5835 +
5836 + return 0;
5837 +}
5838 +
5839 +/* Loads an elf section into a PE
5840 + * For now only supports loading a section to dmem (all PE's), pmem (class and
5841 + * tmu PE's),
5842 + * DDDR (util PE code)
5843 + *
5844 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5845 + * ..., UTIL_ID)
5846 + * @param[in] data pointer to the elf firmware
5847 + * @param[in] shdr pointer to the elf section header
5848 + *
5849 + */
5850 +int pe_load_elf_section(int id, const void *data, struct elf32_shdr *shdr,
5851 + struct device *dev) {
5852 + u32 addr = be32_to_cpu(shdr->sh_addr);
5853 + u32 size = be32_to_cpu(shdr->sh_size);
5854 +
5855 + if (IS_DMEM(addr, size))
5856 + return pe_load_dmem_section(id, data, shdr);
5857 + else if (IS_PMEM(addr, size))
5858 + return pe_load_pmem_section(id, data, shdr);
5859 + else if (IS_PFE_LMEM(addr, size))
5860 + return 0;
5861 + else if (IS_PHYS_DDR(addr, size))
5862 + return pe_load_ddr_section(id, data, shdr, dev);
5863 + else if (IS_PE_LMEM(addr, size))
5864 + return pe_load_pe_lmem_section(id, data, shdr);
5865 +
5866 + pr_err("%s: unsupported memory range(%x)\n", __func__,
5867 + addr);
5868 + return 0;
5869 +}
5870 +
5871 +/**************************** BMU ***************************/
5872 +
5873 +/* Initializes a BMU block.
5874 + * @param[in] base BMU block base address
5875 + * @param[in] cfg BMU configuration
5876 + */
5877 +void bmu_init(void *base, struct BMU_CFG *cfg)
5878 +{
5879 + bmu_disable(base);
5880 +
5881 + bmu_set_config(base, cfg);
5882 +
5883 + bmu_reset(base);
5884 +}
5885 +
5886 +/* Resets a BMU block.
5887 + * @param[in] base BMU block base address
5888 + */
5889 +void bmu_reset(void *base)
5890 +{
5891 + writel(CORE_SW_RESET, base + BMU_CTRL);
5892 +
5893 + /* Wait for self clear */
5894 + while (readl(base + BMU_CTRL) & CORE_SW_RESET)
5895 + ;
5896 +}
5897 +
5898 +/* Enabled a BMU block.
5899 + * @param[in] base BMU block base address
5900 + */
5901 +void bmu_enable(void *base)
5902 +{
5903 + writel(CORE_ENABLE, base + BMU_CTRL);
5904 +}
5905 +
5906 +/* Disables a BMU block.
5907 + * @param[in] base BMU block base address
5908 + */
5909 +void bmu_disable(void *base)
5910 +{
5911 + writel(CORE_DISABLE, base + BMU_CTRL);
5912 +}
5913 +
5914 +/* Sets the configuration of a BMU block.
5915 + * @param[in] base BMU block base address
5916 + * @param[in] cfg BMU configuration
5917 + */
5918 +void bmu_set_config(void *base, struct BMU_CFG *cfg)
5919 +{
5920 + writel(cfg->baseaddr, base + BMU_UCAST_BASE_ADDR);
5921 + writel(cfg->count & 0xffff, base + BMU_UCAST_CONFIG);
5922 + writel(cfg->size & 0xffff, base + BMU_BUF_SIZE);
5923 +
5924 + /* Interrupts are never used */
5925 + writel(cfg->low_watermark, base + BMU_LOW_WATERMARK);
5926 + writel(cfg->high_watermark, base + BMU_HIGH_WATERMARK);
5927 + writel(0x0, base + BMU_INT_ENABLE);
5928 +}
5929 +
5930 +/**************************** MTIP GEMAC ***************************/
5931 +
5932 +/* Enable Rx Checksum Engine. With this enabled, Frame with bad IP,
5933 + * TCP or UDP checksums are discarded
5934 + *
5935 + * @param[in] base GEMAC base address.
5936 + */
5937 +void gemac_enable_rx_checksum_offload(void *base)
5938 +{
5939 + /*Do not find configuration to do this */
5940 +}
5941 +
5942 +/* Disable Rx Checksum Engine.
5943 + *
5944 + * @param[in] base GEMAC base address.
5945 + */
5946 +void gemac_disable_rx_checksum_offload(void *base)
5947 +{
5948 + /*Do not find configuration to do this */
5949 +}
5950 +
5951 +/* GEMAC set speed.
5952 + * @param[in] base GEMAC base address
5953 + * @param[in] speed GEMAC speed (10, 100 or 1000 Mbps)
5954 + */
5955 +void gemac_set_speed(void *base, enum mac_speed gem_speed)
5956 +{
5957 + u32 ecr = readl(base + EMAC_ECNTRL_REG) & ~EMAC_ECNTRL_SPEED;
5958 + u32 rcr = readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_RMII_10T;
5959 +
5960 + switch (gem_speed) {
5961 + case SPEED_10M:
5962 + rcr |= EMAC_RCNTRL_RMII_10T;
5963 + break;
5964 +
5965 + case SPEED_1000M:
5966 + ecr |= EMAC_ECNTRL_SPEED;
5967 + break;
5968 +
5969 + case SPEED_100M:
5970 + default:
5971 + /*It is in 100M mode */
5972 + break;
5973 + }
5974 + writel(ecr, (base + EMAC_ECNTRL_REG));
5975 + writel(rcr, (base + EMAC_RCNTRL_REG));
5976 +}
5977 +
5978 +/* GEMAC set duplex.
5979 + * @param[in] base GEMAC base address
5980 + * @param[in] duplex GEMAC duplex mode (Full, Half)
5981 + */
5982 +void gemac_set_duplex(void *base, int duplex)
5983 +{
5984 + if (duplex == DUPLEX_HALF) {
5985 + writel(readl(base + EMAC_TCNTRL_REG) & ~EMAC_TCNTRL_FDEN, base
5986 + + EMAC_TCNTRL_REG);
5987 + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_DRT, (base
5988 + + EMAC_RCNTRL_REG));
5989 + } else{
5990 + writel(readl(base + EMAC_TCNTRL_REG) | EMAC_TCNTRL_FDEN, base
5991 + + EMAC_TCNTRL_REG);
5992 + writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_DRT, (base
5993 + + EMAC_RCNTRL_REG));
5994 + }
5995 +}
5996 +
5997 +/* GEMAC set mode.
5998 + * @param[in] base GEMAC base address
5999 + * @param[in] mode GEMAC operation mode (MII, RMII, RGMII, SGMII)
6000 + */
6001 +void gemac_set_mode(void *base, int mode)
6002 +{
6003 + u32 val = readl(base + EMAC_RCNTRL_REG);
6004 +
6005 + /*Remove loopbank*/
6006 + val &= ~EMAC_RCNTRL_LOOP;
6007 +
6008 + /*Enable flow control and MII mode*/
6009 + val |= (EMAC_RCNTRL_FCE | EMAC_RCNTRL_MII_MODE);
6010 +
6011 + writel(val, base + EMAC_RCNTRL_REG);
6012 +}
6013 +
6014 +/* GEMAC enable function.
6015 + * @param[in] base GEMAC base address
6016 + */
6017 +void gemac_enable(void *base)
6018 +{
6019 + writel(readl(base + EMAC_ECNTRL_REG) | EMAC_ECNTRL_ETHER_EN, base +
6020 + EMAC_ECNTRL_REG);
6021 +}
6022 +
6023 +/* GEMAC disable function.
6024 + * @param[in] base GEMAC base address
6025 + */
6026 +void gemac_disable(void *base)
6027 +{
6028 + writel(readl(base + EMAC_ECNTRL_REG) & ~EMAC_ECNTRL_ETHER_EN, base +
6029 + EMAC_ECNTRL_REG);
6030 +}
6031 +
6032 +/* GEMAC TX disable function.
6033 + * @param[in] base GEMAC base address
6034 + */
6035 +void gemac_tx_disable(void *base)
6036 +{
6037 + writel(readl(base + EMAC_TCNTRL_REG) | EMAC_TCNTRL_GTS, base +
6038 + EMAC_TCNTRL_REG);
6039 +}
6040 +
6041 +void gemac_tx_enable(void *base)
6042 +{
6043 + writel(readl(base + EMAC_TCNTRL_REG) & ~EMAC_TCNTRL_GTS, base +
6044 + EMAC_TCNTRL_REG);
6045 +}
6046 +
6047 +/* Sets the hash register of the MAC.
6048 + * This register is used for matching unicast and multicast frames.
6049 + *
6050 + * @param[in] base GEMAC base address.
6051 + * @param[in] hash 64-bit hash to be configured.
6052 + */
6053 +void gemac_set_hash(void *base, struct pfe_mac_addr *hash)
6054 +{
6055 + writel(hash->bottom, base + EMAC_GALR);
6056 + writel(hash->top, base + EMAC_GAUR);
6057 +}
6058 +
6059 +void gemac_set_laddrN(void *base, struct pfe_mac_addr *address,
6060 + unsigned int entry_index)
6061 +{
6062 + if ((entry_index < 1) || (entry_index > EMAC_SPEC_ADDR_MAX))
6063 + return;
6064 +
6065 + entry_index = entry_index - 1;
6066 + if (entry_index < 1) {
6067 + writel(htonl(address->bottom), base + EMAC_PHY_ADDR_LOW);
6068 + writel((htonl(address->top) | 0x8808), base +
6069 + EMAC_PHY_ADDR_HIGH);
6070 + } else {
6071 + writel(htonl(address->bottom), base + ((entry_index - 1) * 8)
6072 + + EMAC_SMAC_0_0);
6073 + writel((htonl(address->top) | 0x8808), base + ((entry_index -
6074 + 1) * 8) + EMAC_SMAC_0_1);
6075 + }
6076 +}
6077 +
6078 +void gemac_clear_laddrN(void *base, unsigned int entry_index)
6079 +{
6080 + if ((entry_index < 1) || (entry_index > EMAC_SPEC_ADDR_MAX))
6081 + return;
6082 +
6083 + entry_index = entry_index - 1;
6084 + if (entry_index < 1) {
6085 + writel(0, base + EMAC_PHY_ADDR_LOW);
6086 + writel(0, base + EMAC_PHY_ADDR_HIGH);
6087 + } else {
6088 + writel(0, base + ((entry_index - 1) * 8) + EMAC_SMAC_0_0);
6089 + writel(0, base + ((entry_index - 1) * 8) + EMAC_SMAC_0_1);
6090 + }
6091 +}
6092 +
6093 +/* Set the loopback mode of the MAC. This can be either no loopback for
6094 + * normal operation, local loopback through MAC internal loopback module or PHY
6095 + * loopback for external loopback through a PHY. This asserts the external
6096 + * loop pin.
6097 + *
6098 + * @param[in] base GEMAC base address.
6099 + * @param[in] gem_loop Loopback mode to be enabled. LB_LOCAL - MAC
6100 + * Loopback,
6101 + * LB_EXT - PHY Loopback.
6102 + */
6103 +void gemac_set_loop(void *base, enum mac_loop gem_loop)
6104 +{
6105 + pr_info("%s()\n", __func__);
6106 + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_LOOP, (base +
6107 + EMAC_RCNTRL_REG));
6108 +}
6109 +
6110 +/* GEMAC allow frames
6111 + * @param[in] base GEMAC base address
6112 + */
6113 +void gemac_enable_copy_all(void *base)
6114 +{
6115 + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_PROM, (base +
6116 + EMAC_RCNTRL_REG));
6117 +}
6118 +
6119 +/* GEMAC do not allow frames
6120 + * @param[in] base GEMAC base address
6121 + */
6122 +void gemac_disable_copy_all(void *base)
6123 +{
6124 + writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_PROM, (base +
6125 + EMAC_RCNTRL_REG));
6126 +}
6127 +
6128 +/* GEMAC allow broadcast function.
6129 + * @param[in] base GEMAC base address
6130 + */
6131 +void gemac_allow_broadcast(void *base)
6132 +{
6133 + writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_BC_REJ, base +
6134 + EMAC_RCNTRL_REG);
6135 +}
6136 +
6137 +/* GEMAC no broadcast function.
6138 + * @param[in] base GEMAC base address
6139 + */
6140 +void gemac_no_broadcast(void *base)
6141 +{
6142 + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_BC_REJ, base +
6143 + EMAC_RCNTRL_REG);
6144 +}
6145 +
6146 +/* GEMAC enable 1536 rx function.
6147 + * @param[in] base GEMAC base address
6148 + */
6149 +void gemac_enable_1536_rx(void *base)
6150 +{
6151 + /* Set 1536 as Maximum frame length */
6152 + writel(readl(base + EMAC_RCNTRL_REG) | (1536 << 16), base +
6153 + EMAC_RCNTRL_REG);
6154 +}
6155 +
6156 +/* GEMAC enable jumbo function.
6157 + * @param[in] base GEMAC base address
6158 + */
6159 +void gemac_enable_rx_jmb(void *base)
6160 +{
6161 + writel(readl(base + EMAC_RCNTRL_REG) | (JUMBO_FRAME_SIZE << 16), base
6162 + + EMAC_RCNTRL_REG);
6163 +}
6164 +
6165 +/* GEMAC enable stacked vlan function.
6166 + * @param[in] base GEMAC base address
6167 + */
6168 +void gemac_enable_stacked_vlan(void *base)
6169 +{
6170 + /* MTIP doesn't support stacked vlan */
6171 +}
6172 +
6173 +/* GEMAC enable pause rx function.
6174 + * @param[in] base GEMAC base address
6175 + */
6176 +void gemac_enable_pause_rx(void *base)
6177 +{
6178 + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_FCE,
6179 + base + EMAC_RCNTRL_REG);
6180 +}
6181 +
6182 +/* GEMAC disable pause rx function.
6183 + * @param[in] base GEMAC base address
6184 + */
6185 +void gemac_disable_pause_rx(void *base)
6186 +{
6187 + writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_FCE,
6188 + base + EMAC_RCNTRL_REG);
6189 +}
6190 +
6191 +/* GEMAC enable pause tx function.
6192 + * @param[in] base GEMAC base address
6193 + */
6194 +void gemac_enable_pause_tx(void *base)
6195 +{
6196 + writel(EMAC_RX_SECTION_EMPTY_V, base + EMAC_RX_SECTION_EMPTY);
6197 +}
6198 +
6199 +/* GEMAC disable pause tx function.
6200 + * @param[in] base GEMAC base address
6201 + */
6202 +void gemac_disable_pause_tx(void *base)
6203 +{
6204 + writel(0x0, base + EMAC_RX_SECTION_EMPTY);
6205 +}
6206 +
6207 +/* GEMAC wol configuration
6208 + * @param[in] base GEMAC base address
6209 + * @param[in] wol_conf WoL register configuration
6210 + */
6211 +void gemac_set_wol(void *base, u32 wol_conf)
6212 +{
6213 + u32 val = readl(base + EMAC_ECNTRL_REG);
6214 +
6215 + if (wol_conf)
6216 + val |= (EMAC_ECNTRL_MAGIC_ENA | EMAC_ECNTRL_SLEEP);
6217 + else
6218 + val &= ~(EMAC_ECNTRL_MAGIC_ENA | EMAC_ECNTRL_SLEEP);
6219 + writel(val, base + EMAC_ECNTRL_REG);
6220 +}
6221 +
6222 +/* Sets Gemac bus width to 64bit
6223 + * @param[in] base GEMAC base address
6224 + * @param[in] width gemac bus width to be set possible values are 32/64/128
6225 + */
6226 +void gemac_set_bus_width(void *base, int width)
6227 +{
6228 +}
6229 +
6230 +/* Sets Gemac configuration.
6231 + * @param[in] base GEMAC base address
6232 + * @param[in] cfg GEMAC configuration
6233 + */
6234 +void gemac_set_config(void *base, struct gemac_cfg *cfg)
6235 +{
6236 + /*GEMAC config taken from VLSI */
6237 + writel(0x00000004, base + EMAC_TFWR_STR_FWD);
6238 + writel(0x00000005, base + EMAC_RX_SECTION_FULL);
6239 + writel(0x00003fff, base + EMAC_TRUNC_FL);
6240 + writel(0x00000030, base + EMAC_TX_SECTION_EMPTY);
6241 + writel(0x00000000, base + EMAC_MIB_CTRL_STS_REG);
6242 +
6243 + gemac_set_mode(base, cfg->mode);
6244 +
6245 + gemac_set_speed(base, cfg->speed);
6246 +
6247 + gemac_set_duplex(base, cfg->duplex);
6248 +}
6249 +
6250 +/**************************** GPI ***************************/
6251 +
6252 +/* Initializes a GPI block.
6253 + * @param[in] base GPI base address
6254 + * @param[in] cfg GPI configuration
6255 + */
6256 +void gpi_init(void *base, struct gpi_cfg *cfg)
6257 +{
6258 + gpi_reset(base);
6259 +
6260 + gpi_disable(base);
6261 +
6262 + gpi_set_config(base, cfg);
6263 +}
6264 +
6265 +/* Resets a GPI block.
6266 + * @param[in] base GPI base address
6267 + */
6268 +void gpi_reset(void *base)
6269 +{
6270 + writel(CORE_SW_RESET, base + GPI_CTRL);
6271 +}
6272 +
6273 +/* Enables a GPI block.
6274 + * @param[in] base GPI base address
6275 + */
6276 +void gpi_enable(void *base)
6277 +{
6278 + writel(CORE_ENABLE, base + GPI_CTRL);
6279 +}
6280 +
6281 +/* Disables a GPI block.
6282 + * @param[in] base GPI base address
6283 + */
6284 +void gpi_disable(void *base)
6285 +{
6286 + writel(CORE_DISABLE, base + GPI_CTRL);
6287 +}
6288 +
6289 +/* Sets the configuration of a GPI block.
6290 + * @param[in] base GPI base address
6291 + * @param[in] cfg GPI configuration
6292 + */
6293 +void gpi_set_config(void *base, struct gpi_cfg *cfg)
6294 +{
6295 + writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_ALLOC_CTRL), base
6296 + + GPI_LMEM_ALLOC_ADDR);
6297 + writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_FREE_CTRL), base
6298 + + GPI_LMEM_FREE_ADDR);
6299 + writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_ALLOC_CTRL), base
6300 + + GPI_DDR_ALLOC_ADDR);
6301 + writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL), base
6302 + + GPI_DDR_FREE_ADDR);
6303 + writel(CBUS_VIRT_TO_PFE(CLASS_INQ_PKTPTR), base + GPI_CLASS_ADDR);
6304 + writel(DDR_HDR_SIZE, base + GPI_DDR_DATA_OFFSET);
6305 + writel(LMEM_HDR_SIZE, base + GPI_LMEM_DATA_OFFSET);
6306 + writel(0, base + GPI_LMEM_SEC_BUF_DATA_OFFSET);
6307 + writel(0, base + GPI_DDR_SEC_BUF_DATA_OFFSET);
6308 + writel((DDR_HDR_SIZE << 16) | LMEM_HDR_SIZE, base + GPI_HDR_SIZE);
6309 + writel((DDR_BUF_SIZE << 16) | LMEM_BUF_SIZE, base + GPI_BUF_SIZE);
6310 +
6311 + writel(((cfg->lmem_rtry_cnt << 16) | (GPI_DDR_BUF_EN << 1) |
6312 + GPI_LMEM_BUF_EN), base + GPI_RX_CONFIG);
6313 + writel(cfg->tmlf_txthres, base + GPI_TMLF_TX);
6314 + writel(cfg->aseq_len, base + GPI_DTX_ASEQ);
6315 + writel(1, base + GPI_TOE_CHKSUM_EN);
6316 +
6317 + if (cfg->mtip_pause_reg) {
6318 + writel(cfg->mtip_pause_reg, base + GPI_CSR_MTIP_PAUSE_REG);
6319 + writel(EGPI_PAUSE_TIME, base + GPI_TX_PAUSE_TIME);
6320 + }
6321 +}
6322 +
6323 +/**************************** CLASSIFIER ***************************/
6324 +
6325 +/* Initializes CLASSIFIER block.
6326 + * @param[in] cfg CLASSIFIER configuration
6327 + */
6328 +void class_init(struct class_cfg *cfg)
6329 +{
6330 + class_reset();
6331 +
6332 + class_disable();
6333 +
6334 + class_set_config(cfg);
6335 +}
6336 +
6337 +/* Resets CLASSIFIER block.
6338 + *
6339 + */
6340 +void class_reset(void)
6341 +{
6342 + writel(CORE_SW_RESET, CLASS_TX_CTRL);
6343 +}
6344 +
6345 +/* Enables all CLASS-PE's cores.
6346 + *
6347 + */
6348 +void class_enable(void)
6349 +{
6350 + writel(CORE_ENABLE, CLASS_TX_CTRL);
6351 +}
6352 +
6353 +/* Disables all CLASS-PE's cores.
6354 + *
6355 + */
6356 +void class_disable(void)
6357 +{
6358 + writel(CORE_DISABLE, CLASS_TX_CTRL);
6359 +}
6360 +
6361 +/*
6362 + * Sets the configuration of the CLASSIFIER block.
6363 + * @param[in] cfg CLASSIFIER configuration
6364 + */
6365 +void class_set_config(struct class_cfg *cfg)
6366 +{
6367 + u32 val;
6368 +
6369 + /* Initialize route table */
6370 + if (!cfg->resume)
6371 + memset(DDR_PHYS_TO_VIRT(cfg->route_table_baseaddr), 0, (1 <<
6372 + cfg->route_table_hash_bits) * CLASS_ROUTE_SIZE);
6373 +
6374 +#if !defined(LS1012A_PFE_RESET_WA)
6375 + writel(cfg->pe_sys_clk_ratio, CLASS_PE_SYS_CLK_RATIO);
6376 +#endif
6377 +
6378 + writel((DDR_HDR_SIZE << 16) | LMEM_HDR_SIZE, CLASS_HDR_SIZE);
6379 + writel(LMEM_BUF_SIZE, CLASS_LMEM_BUF_SIZE);
6380 + writel(CLASS_ROUTE_ENTRY_SIZE(CLASS_ROUTE_SIZE) |
6381 + CLASS_ROUTE_HASH_SIZE(cfg->route_table_hash_bits),
6382 + CLASS_ROUTE_HASH_ENTRY_SIZE);
6383 + writel(HIF_PKT_CLASS_EN | HIF_PKT_OFFSET(sizeof(struct hif_hdr)),
6384 + CLASS_HIF_PARSE);
6385 +
6386 + val = HASH_CRC_PORT_IP | QB2BUS_LE;
6387 +
6388 +#if defined(CONFIG_IP_ALIGNED)
6389 + val |= IP_ALIGNED;
6390 +#endif
6391 +
6392 + /*
6393 + * Class PE packet steering will only work if TOE mode, bridge fetch or
6394 + * route fetch are enabled (see class/qb_fet.v). Route fetch would
6395 + * trigger additional memory copies (likely from DDR because of hash
6396 + * table size, which cannot be reduced because PE software still
6397 + * relies on hash value computed in HW), so when not in TOE mode we
6398 + * simply enable HW bridge fetch even though we don't use it.
6399 + */
6400 + if (cfg->toe_mode)
6401 + val |= CLASS_TOE;
6402 + else
6403 + val |= HW_BRIDGE_FETCH;
6404 +
6405 + writel(val, CLASS_ROUTE_MULTI);
6406 +
6407 + writel(DDR_PHYS_TO_PFE(cfg->route_table_baseaddr),
6408 + CLASS_ROUTE_TABLE_BASE);
6409 + writel(CLASS_PE0_RO_DM_ADDR0_VAL, CLASS_PE0_RO_DM_ADDR0);
6410 + writel(CLASS_PE0_RO_DM_ADDR1_VAL, CLASS_PE0_RO_DM_ADDR1);
6411 + writel(CLASS_PE0_QB_DM_ADDR0_VAL, CLASS_PE0_QB_DM_ADDR0);
6412 + writel(CLASS_PE0_QB_DM_ADDR1_VAL, CLASS_PE0_QB_DM_ADDR1);
6413 + writel(CBUS_VIRT_TO_PFE(TMU_PHY_INQ_PKTPTR), CLASS_TM_INQ_ADDR);
6414 +
6415 + writel(23, CLASS_AFULL_THRES);
6416 + writel(23, CLASS_TSQ_FIFO_THRES);
6417 +
6418 + writel(24, CLASS_MAX_BUF_CNT);
6419 + writel(24, CLASS_TSQ_MAX_CNT);
6420 +}
6421 +
6422 +/**************************** TMU ***************************/
6423 +
6424 +void tmu_reset(void)
6425 +{
6426 + writel(SW_RESET, TMU_CTRL);
6427 +}
6428 +
6429 +/* Initializes TMU block.
6430 + * @param[in] cfg TMU configuration
6431 + */
6432 +void tmu_init(struct tmu_cfg *cfg)
6433 +{
6434 + int q, phyno;
6435 +
6436 + tmu_disable(0xF);
6437 + mdelay(10);
6438 +
6439 +#if !defined(LS1012A_PFE_RESET_WA)
6440 + /* keep in soft reset */
6441 + writel(SW_RESET, TMU_CTRL);
6442 +#endif
6443 + writel(0x3, TMU_SYS_GENERIC_CONTROL);
6444 + writel(750, TMU_INQ_WATERMARK);
6445 + writel(CBUS_VIRT_TO_PFE(EGPI1_BASE_ADDR +
6446 + GPI_INQ_PKTPTR), TMU_PHY0_INQ_ADDR);
6447 + writel(CBUS_VIRT_TO_PFE(EGPI2_BASE_ADDR +
6448 + GPI_INQ_PKTPTR), TMU_PHY1_INQ_ADDR);
6449 + writel(CBUS_VIRT_TO_PFE(HGPI_BASE_ADDR +
6450 + GPI_INQ_PKTPTR), TMU_PHY3_INQ_ADDR);
6451 + writel(CBUS_VIRT_TO_PFE(HIF_NOCPY_RX_INQ0_PKTPTR), TMU_PHY4_INQ_ADDR);
6452 + writel(CBUS_VIRT_TO_PFE(UTIL_INQ_PKTPTR), TMU_PHY5_INQ_ADDR);
6453 + writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL),
6454 + TMU_BMU_INQ_ADDR);
6455 +
6456 + writel(0x3FF, TMU_TDQ0_SCH_CTRL); /*
6457 + * enabling all 10
6458 + * schedulers [9:0] of each TDQ
6459 + */
6460 + writel(0x3FF, TMU_TDQ1_SCH_CTRL);
6461 + writel(0x3FF, TMU_TDQ3_SCH_CTRL);
6462 +
6463 +#if !defined(LS1012A_PFE_RESET_WA)
6464 + writel(cfg->pe_sys_clk_ratio, TMU_PE_SYS_CLK_RATIO);
6465 +#endif
6466 +
6467 +#if !defined(LS1012A_PFE_RESET_WA)
6468 + writel(DDR_PHYS_TO_PFE(cfg->llm_base_addr), TMU_LLM_BASE_ADDR);
6469 + /* Extra packet pointers will be stored from this address onwards */
6470 +
6471 + writel(cfg->llm_queue_len, TMU_LLM_QUE_LEN);
6472 + writel(5, TMU_TDQ_IIFG_CFG);
6473 + writel(DDR_BUF_SIZE, TMU_BMU_BUF_SIZE);
6474 +
6475 + writel(0x0, TMU_CTRL);
6476 +
6477 + /* MEM init */
6478 + pr_info("%s: mem init\n", __func__);
6479 + writel(MEM_INIT, TMU_CTRL);
6480 +
6481 + while (!(readl(TMU_CTRL) & MEM_INIT_DONE))
6482 + ;
6483 +
6484 + /* LLM init */
6485 + pr_info("%s: lmem init\n", __func__);
6486 + writel(LLM_INIT, TMU_CTRL);
6487 +
6488 + while (!(readl(TMU_CTRL) & LLM_INIT_DONE))
6489 + ;
6490 +#endif
6491 + /* set up each queue for tail drop */
6492 + for (phyno = 0; phyno < 4; phyno++) {
6493 + if (phyno == 2)
6494 + continue;
6495 + for (q = 0; q < 16; q++) {
6496 + u32 qdepth;
6497 +
6498 + writel((phyno << 8) | q, TMU_TEQ_CTRL);
6499 + writel(1 << 22, TMU_TEQ_QCFG); /*Enable tail drop */
6500 +
6501 + if (phyno == 3)
6502 + qdepth = DEFAULT_TMU3_QDEPTH;
6503 + else
6504 + qdepth = (q == 0) ? DEFAULT_Q0_QDEPTH :
6505 + DEFAULT_MAX_QDEPTH;
6506 +
6507 + /* LOG: 68855 */
6508 + /*
6509 + * The following is a workaround for the reordered
6510 + * packet and BMU2 buffer leakage issue.
6511 + */
6512 + if (CHIP_REVISION() == 0)
6513 + qdepth = 31;
6514 +
6515 + writel(qdepth << 18, TMU_TEQ_HW_PROB_CFG2);
6516 + writel(qdepth >> 14, TMU_TEQ_HW_PROB_CFG3);
6517 + }
6518 + }
6519 +
6520 +#ifdef CFG_LRO
6521 + /* Set TMU-3 queue 5 (LRO) in no-drop mode */
6522 + writel((3 << 8) | TMU_QUEUE_LRO, TMU_TEQ_CTRL);
6523 + writel(0, TMU_TEQ_QCFG);
6524 +#endif
6525 +
6526 + writel(0x05, TMU_TEQ_DISABLE_DROPCHK);
6527 +
6528 + writel(0x0, TMU_CTRL);
6529 +}
6530 +
6531 +/* Enables TMU-PE cores.
6532 + * @param[in] pe_mask TMU PE mask
6533 + */
6534 +void tmu_enable(u32 pe_mask)
6535 +{
6536 + writel(readl(TMU_TX_CTRL) | (pe_mask & 0xF), TMU_TX_CTRL);
6537 +}
6538 +
6539 +/* Disables TMU cores.
6540 + * @param[in] pe_mask TMU PE mask
6541 + */
6542 +void tmu_disable(u32 pe_mask)
6543 +{
6544 + writel(readl(TMU_TX_CTRL) & ~(pe_mask & 0xF), TMU_TX_CTRL);
6545 +}
6546 +
6547 +/* This will return the tmu queue status
6548 + * @param[in] if_id gem interface id or TMU index
6549 + * @return returns the bit mask of busy queues, zero means all
6550 + * queues are empty
6551 + */
6552 +u32 tmu_qstatus(u32 if_id)
6553 +{
6554 + return cpu_to_be32(pe_dmem_read(TMU0_ID + if_id, TMU_DM_PESTATUS +
6555 + offsetof(struct pe_status, tmu_qstatus), 4));
6556 +}
6557 +
6558 +u32 tmu_pkts_processed(u32 if_id)
6559 +{
6560 + return cpu_to_be32(pe_dmem_read(TMU0_ID + if_id, TMU_DM_PESTATUS +
6561 + offsetof(struct pe_status, rx), 4));
6562 +}
6563 +
6564 +/**************************** UTIL ***************************/
6565 +
6566 +/* Resets UTIL block.
6567 + */
6568 +void util_reset(void)
6569 +{
6570 + writel(CORE_SW_RESET, UTIL_TX_CTRL);
6571 +}
6572 +
6573 +/* Initializes UTIL block.
6574 + * @param[in] cfg UTIL configuration
6575 + */
6576 +void util_init(struct util_cfg *cfg)
6577 +{
6578 + writel(cfg->pe_sys_clk_ratio, UTIL_PE_SYS_CLK_RATIO);
6579 +}
6580 +
6581 +/* Enables UTIL-PE core.
6582 + *
6583 + */
6584 +void util_enable(void)
6585 +{
6586 + writel(CORE_ENABLE, UTIL_TX_CTRL);
6587 +}
6588 +
6589 +/* Disables UTIL-PE core.
6590 + *
6591 + */
6592 +void util_disable(void)
6593 +{
6594 + writel(CORE_DISABLE, UTIL_TX_CTRL);
6595 +}
6596 +
6597 +/**************************** HIF ***************************/
6598 +/* Initializes HIF copy block.
6599 + *
6600 + */
6601 +void hif_init(void)
6602 +{
6603 + /*Initialize HIF registers*/
6604 + writel((HIF_RX_POLL_CTRL_CYCLE << 16) | HIF_TX_POLL_CTRL_CYCLE,
6605 + HIF_POLL_CTRL);
6606 +}
6607 +
6608 +/* Enable hif tx DMA and interrupt
6609 + *
6610 + */
6611 +void hif_tx_enable(void)
6612 +{
6613 + writel(HIF_CTRL_DMA_EN, HIF_TX_CTRL);
6614 + writel((readl(HIF_INT_ENABLE) | HIF_INT_EN | HIF_TXPKT_INT_EN),
6615 + HIF_INT_ENABLE);
6616 +}
6617 +
6618 +/* Disable hif tx DMA and interrupt
6619 + *
6620 + */
6621 +void hif_tx_disable(void)
6622 +{
6623 + u32 hif_int;
6624 +
6625 + writel(0, HIF_TX_CTRL);
6626 +
6627 + hif_int = readl(HIF_INT_ENABLE);
6628 + hif_int &= HIF_TXPKT_INT_EN;
6629 + writel(hif_int, HIF_INT_ENABLE);
6630 +}
6631 +
6632 +/* Enable hif rx DMA and interrupt
6633 + *
6634 + */
6635 +void hif_rx_enable(void)
6636 +{
6637 + hif_rx_dma_start();
6638 + writel((readl(HIF_INT_ENABLE) | HIF_INT_EN | HIF_RXPKT_INT_EN),
6639 + HIF_INT_ENABLE);
6640 +}
6641 +
6642 +/* Disable hif rx DMA and interrupt
6643 + *
6644 + */
6645 +void hif_rx_disable(void)
6646 +{
6647 + u32 hif_int;
6648 +
6649 + writel(0, HIF_RX_CTRL);
6650 +
6651 + hif_int = readl(HIF_INT_ENABLE);
6652 + hif_int &= HIF_RXPKT_INT_EN;
6653 + writel(hif_int, HIF_INT_ENABLE);
6654 +}
6655 --- /dev/null
6656 +++ b/drivers/staging/fsl_ppfe/pfe_hif.c
6657 @@ -0,0 +1,1072 @@
6658 +/*
6659 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
6660 + * Copyright 2017 NXP
6661 + *
6662 + * This program is free software; you can redistribute it and/or modify
6663 + * it under the terms of the GNU General Public License as published by
6664 + * the Free Software Foundation; either version 2 of the License, or
6665 + * (at your option) any later version.
6666 + *
6667 + * This program is distributed in the hope that it will be useful,
6668 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
6669 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
6670 + * GNU General Public License for more details.
6671 + *
6672 + * You should have received a copy of the GNU General Public License
6673 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
6674 + */
6675 +
6676 +#include <linux/kernel.h>
6677 +#include <linux/interrupt.h>
6678 +#include <linux/dma-mapping.h>
6679 +#include <linux/dmapool.h>
6680 +#include <linux/sched.h>
6681 +#include <linux/module.h>
6682 +#include <linux/list.h>
6683 +#include <linux/kthread.h>
6684 +#include <linux/slab.h>
6685 +
6686 +#include <linux/io.h>
6687 +#include <asm/irq.h>
6688 +
6689 +#include "pfe_mod.h"
6690 +
6691 +#define HIF_INT_MASK (HIF_INT | HIF_RXPKT_INT | HIF_TXPKT_INT)
6692 +
6693 +unsigned char napi_first_batch;
6694 +
6695 +static void pfe_tx_do_cleanup(unsigned long data);
6696 +
6697 +static int pfe_hif_alloc_descr(struct pfe_hif *hif)
6698 +{
6699 + void *addr;
6700 + dma_addr_t dma_addr;
6701 + int err = 0;
6702 +
6703 + pr_info("%s\n", __func__);
6704 + addr = dma_alloc_coherent(pfe->dev,
6705 + HIF_RX_DESC_NT * sizeof(struct hif_desc) +
6706 + HIF_TX_DESC_NT * sizeof(struct hif_desc),
6707 + &dma_addr, GFP_KERNEL);
6708 +
6709 + if (!addr) {
6710 + pr_err("%s: Could not allocate buffer descriptors!\n"
6711 + , __func__);
6712 + err = -ENOMEM;
6713 + goto err0;
6714 + }
6715 +
6716 + hif->descr_baseaddr_p = dma_addr;
6717 + hif->descr_baseaddr_v = addr;
6718 + hif->rx_ring_size = HIF_RX_DESC_NT;
6719 + hif->tx_ring_size = HIF_TX_DESC_NT;
6720 +
6721 + return 0;
6722 +
6723 +err0:
6724 + return err;
6725 +}
6726 +
6727 +#if defined(LS1012A_PFE_RESET_WA)
6728 +static void pfe_hif_disable_rx_desc(struct pfe_hif *hif)
6729 +{
6730 + int ii;
6731 + struct hif_desc *desc = hif->rx_base;
6732 +
6733 + /*Mark all descriptors as LAST_BD */
6734 + for (ii = 0; ii < hif->rx_ring_size; ii++) {
6735 + desc->ctrl |= BD_CTRL_LAST_BD;
6736 + desc++;
6737 + }
6738 +}
6739 +
6740 +struct class_rx_hdr_t {
6741 + u32 next_ptr; /* ptr to the start of the first DDR buffer */
6742 + u16 length; /* total packet length */
6743 + u16 phyno; /* input physical port number */
6744 + u32 status; /* gemac status bits */
6745 + u32 status2; /* reserved for software usage */
6746 +};
6747 +
6748 +/* STATUS_BAD_FRAME_ERR is set for all errors (including checksums if enabled)
6749 + * except overflow
6750 + */
6751 +#define STATUS_BAD_FRAME_ERR BIT(16)
6752 +#define STATUS_LENGTH_ERR BIT(17)
6753 +#define STATUS_CRC_ERR BIT(18)
6754 +#define STATUS_TOO_SHORT_ERR BIT(19)
6755 +#define STATUS_TOO_LONG_ERR BIT(20)
6756 +#define STATUS_CODE_ERR BIT(21)
6757 +#define STATUS_MC_HASH_MATCH BIT(22)
6758 +#define STATUS_CUMULATIVE_ARC_HIT BIT(23)
6759 +#define STATUS_UNICAST_HASH_MATCH BIT(24)
6760 +#define STATUS_IP_CHECKSUM_CORRECT BIT(25)
6761 +#define STATUS_TCP_CHECKSUM_CORRECT BIT(26)
6762 +#define STATUS_UDP_CHECKSUM_CORRECT BIT(27)
6763 +#define STATUS_OVERFLOW_ERR BIT(28) /* GPI error */
6764 +#define MIN_PKT_SIZE 64
6765 +
6766 +static inline void copy_to_lmem(u32 *dst, u32 *src, int len)
6767 +{
6768 + int i;
6769 +
6770 + for (i = 0; i < len; i += sizeof(u32)) {
6771 + *dst = htonl(*src);
6772 + dst++; src++;
6773 + }
6774 +}
6775 +
6776 +static void send_dummy_pkt_to_hif(void)
6777 +{
6778 + void *lmem_ptr, *ddr_ptr, *lmem_virt_addr;
6779 + u32 physaddr;
6780 + struct class_rx_hdr_t local_hdr;
6781 + static u32 dummy_pkt[] = {
6782 + 0x33221100, 0x2b785544, 0xd73093cb, 0x01000608,
6783 + 0x04060008, 0x2b780200, 0xd73093cb, 0x0a01a8c0,
6784 + 0x33221100, 0xa8c05544, 0x00000301, 0x00000000,
6785 + 0x00000000, 0x00000000, 0x00000000, 0xbe86c51f };
6786 +
6787 + ddr_ptr = (void *)((u64)readl(BMU2_BASE_ADDR + BMU_ALLOC_CTRL));
6788 + if (!ddr_ptr)
6789 + return;
6790 +
6791 + lmem_ptr = (void *)((u64)readl(BMU1_BASE_ADDR + BMU_ALLOC_CTRL));
6792 + if (!lmem_ptr)
6793 + return;
6794 +
6795 + pr_info("Sending a dummy pkt to HIF %p %p\n", ddr_ptr, lmem_ptr);
6796 + physaddr = (u32)DDR_VIRT_TO_PFE(ddr_ptr);
6797 +
6798 + lmem_virt_addr = (void *)CBUS_PFE_TO_VIRT((unsigned long int)lmem_ptr);
6799 +
6800 + local_hdr.phyno = htons(0); /* RX_PHY_0 */
6801 + local_hdr.length = htons(MIN_PKT_SIZE);
6802 +
6803 + local_hdr.next_ptr = htonl((u32)physaddr);
6804 + /*Mark checksum is correct */
6805 + local_hdr.status = htonl((STATUS_IP_CHECKSUM_CORRECT |
6806 + STATUS_UDP_CHECKSUM_CORRECT |
6807 + STATUS_TCP_CHECKSUM_CORRECT |
6808 + STATUS_UNICAST_HASH_MATCH |
6809 + STATUS_CUMULATIVE_ARC_HIT));
6810 + copy_to_lmem((u32 *)lmem_virt_addr, (u32 *)&local_hdr,
6811 + sizeof(local_hdr));
6812 +
6813 + copy_to_lmem((u32 *)(lmem_virt_addr + LMEM_HDR_SIZE), (u32 *)dummy_pkt,
6814 + 0x40);
6815 +
6816 + writel((unsigned long int)lmem_ptr, CLASS_INQ_PKTPTR);
6817 +}
6818 +
6819 +void pfe_hif_rx_idle(struct pfe_hif *hif)
6820 +{
6821 + int hif_stop_loop = 10;
6822 + u32 rx_status;
6823 +
6824 + pfe_hif_disable_rx_desc(hif);
6825 + pr_info("Bringing hif to idle state...");
6826 + writel(0, HIF_INT_ENABLE);
6827 + /*If HIF Rx BDP is busy send a dummy packet */
6828 + do {
6829 + rx_status = readl(HIF_RX_STATUS);
6830 + if (rx_status & BDP_CSR_RX_DMA_ACTV)
6831 + send_dummy_pkt_to_hif();
6832 +
6833 + usleep_range(100, 150);
6834 + } while (--hif_stop_loop);
6835 +
6836 + if (readl(HIF_RX_STATUS) & BDP_CSR_RX_DMA_ACTV)
6837 + pr_info("Failed\n");
6838 + else
6839 + pr_info("Done\n");
6840 +}
6841 +#endif
6842 +
6843 +static void pfe_hif_free_descr(struct pfe_hif *hif)
6844 +{
6845 + pr_info("%s\n", __func__);
6846 +
6847 + dma_free_coherent(pfe->dev,
6848 + hif->rx_ring_size * sizeof(struct hif_desc) +
6849 + hif->tx_ring_size * sizeof(struct hif_desc),
6850 + hif->descr_baseaddr_v, hif->descr_baseaddr_p);
6851 +}
6852 +
6853 +void pfe_hif_desc_dump(struct pfe_hif *hif)
6854 +{
6855 + struct hif_desc *desc;
6856 + unsigned long desc_p;
6857 + int ii = 0;
6858 +
6859 + pr_info("%s\n", __func__);
6860 +
6861 + desc = hif->rx_base;
6862 + desc_p = (u32)((u64)desc - (u64)hif->descr_baseaddr_v +
6863 + hif->descr_baseaddr_p);
6864 +
6865 + pr_info("HIF Rx desc base %p physical %x\n", desc, (u32)desc_p);
6866 + for (ii = 0; ii < hif->rx_ring_size; ii++) {
6867 + pr_info("status: %08x, ctrl: %08x, data: %08x, next: %x\n",
6868 + readl(&desc->status), readl(&desc->ctrl),
6869 + readl(&desc->data), readl(&desc->next));
6870 + desc++;
6871 + }
6872 +
6873 + desc = hif->tx_base;
6874 + desc_p = ((u64)desc - (u64)hif->descr_baseaddr_v +
6875 + hif->descr_baseaddr_p);
6876 +
6877 + pr_info("HIF Tx desc base %p physical %x\n", desc, (u32)desc_p);
6878 + for (ii = 0; ii < hif->tx_ring_size; ii++) {
6879 + pr_info("status: %08x, ctrl: %08x, data: %08x, next: %x\n",
6880 + readl(&desc->status), readl(&desc->ctrl),
6881 + readl(&desc->data), readl(&desc->next));
6882 + desc++;
6883 + }
6884 +}
6885 +
6886 +/* pfe_hif_release_buffers */
6887 +static void pfe_hif_release_buffers(struct pfe_hif *hif)
6888 +{
6889 + struct hif_desc *desc;
6890 + int i = 0;
6891 +
6892 + hif->rx_base = hif->descr_baseaddr_v;
6893 +
6894 + pr_info("%s\n", __func__);
6895 +
6896 + /*Free Rx buffers */
6897 + desc = hif->rx_base;
6898 + for (i = 0; i < hif->rx_ring_size; i++) {
6899 + if (readl(&desc->data)) {
6900 + if ((i < hif->shm->rx_buf_pool_cnt) &&
6901 + (!hif->shm->rx_buf_pool[i])) {
6902 + /*
6903 + * dma_unmap_single(hif->dev, desc->data,
6904 + * hif->rx_buf_len[i], DMA_FROM_DEVICE);
6905 + */
6906 + dma_unmap_single(hif->dev,
6907 + DDR_PFE_TO_PHYS(
6908 + readl(&desc->data)),
6909 + hif->rx_buf_len[i],
6910 + DMA_FROM_DEVICE);
6911 + hif->shm->rx_buf_pool[i] = hif->rx_buf_addr[i];
6912 + } else {
6913 + pr_err("%s: buffer pool already full\n"
6914 + , __func__);
6915 + }
6916 + }
6917 +
6918 + writel(0, &desc->data);
6919 + writel(0, &desc->status);
6920 + writel(0, &desc->ctrl);
6921 + desc++;
6922 + }
6923 +}
6924 +
6925 +/*
6926 + * pfe_hif_init_buffers
6927 + * This function initializes the HIF Rx/Tx ring descriptors and
6928 + * initialize Rx queue with buffers.
6929 + */
6930 +static int pfe_hif_init_buffers(struct pfe_hif *hif)
6931 +{
6932 + struct hif_desc *desc, *first_desc_p;
6933 + u32 data;
6934 + int i = 0;
6935 +
6936 + pr_info("%s\n", __func__);
6937 +
6938 + /* Check enough Rx buffers available in the shared memory */
6939 + if (hif->shm->rx_buf_pool_cnt < hif->rx_ring_size)
6940 + return -ENOMEM;
6941 +
6942 + hif->rx_base = hif->descr_baseaddr_v;
6943 + memset(hif->rx_base, 0, hif->rx_ring_size * sizeof(struct hif_desc));
6944 +
6945 + /*Initialize Rx descriptors */
6946 + desc = hif->rx_base;
6947 + first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p;
6948 +
6949 + for (i = 0; i < hif->rx_ring_size; i++) {
6950 + /* Initialize Rx buffers from the shared memory */
6951 +
6952 + data = (u32)dma_map_single(hif->dev, hif->shm->rx_buf_pool[i],
6953 + pfe_pkt_size, DMA_FROM_DEVICE);
6954 + hif->rx_buf_addr[i] = hif->shm->rx_buf_pool[i];
6955 + hif->rx_buf_len[i] = pfe_pkt_size;
6956 + hif->shm->rx_buf_pool[i] = NULL;
6957 +
6958 + if (likely(dma_mapping_error(hif->dev, data) == 0)) {
6959 + writel(DDR_PHYS_TO_PFE(data), &desc->data);
6960 + } else {
6961 + pr_err("%s : low on mem\n", __func__);
6962 +
6963 + goto err;
6964 + }
6965 +
6966 + writel(0, &desc->status);
6967 +
6968 + /*
6969 + * Ensure everything else is written to DDR before
6970 + * writing bd->ctrl
6971 + */
6972 + wmb();
6973 +
6974 + writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM
6975 + | BD_CTRL_DIR | BD_CTRL_DESC_EN
6976 + | BD_BUF_LEN(pfe_pkt_size)), &desc->ctrl);
6977 +
6978 + /* Chain descriptors */
6979 + writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
6980 + desc++;
6981 + }
6982 +
6983 + /* Overwrite last descriptor to chain it to first one*/
6984 + desc--;
6985 + writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
6986 +
6987 + hif->rxtoclean_index = 0;
6988 +
6989 + /*Initialize Rx buffer descriptor ring base address */
6990 + writel(DDR_PHYS_TO_PFE(hif->descr_baseaddr_p), HIF_RX_BDP_ADDR);
6991 +
6992 + hif->tx_base = hif->rx_base + hif->rx_ring_size;
6993 + first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p +
6994 + hif->rx_ring_size;
6995 + memset(hif->tx_base, 0, hif->tx_ring_size * sizeof(struct hif_desc));
6996 +
6997 + /*Initialize tx descriptors */
6998 + desc = hif->tx_base;
6999 +
7000 + for (i = 0; i < hif->tx_ring_size; i++) {
7001 + /* Chain descriptors */
7002 + writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
7003 + writel(0, &desc->ctrl);
7004 + desc++;
7005 + }
7006 +
7007 + /* Overwrite last descriptor to chain it to first one */
7008 + desc--;
7009 + writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
7010 + hif->txavail = hif->tx_ring_size;
7011 + hif->txtosend = 0;
7012 + hif->txtoclean = 0;
7013 + hif->txtoflush = 0;
7014 +
7015 + /*Initialize Tx buffer descriptor ring base address */
7016 + writel((u32)DDR_PHYS_TO_PFE(first_desc_p), HIF_TX_BDP_ADDR);
7017 +
7018 + return 0;
7019 +
7020 +err:
7021 + pfe_hif_release_buffers(hif);
7022 + return -ENOMEM;
7023 +}
7024 +
7025 +/*
7026 + * pfe_hif_client_register
7027 + *
7028 + * This function used to register a client driver with the HIF driver.
7029 + *
7030 + * Return value:
7031 + * 0 - on Successful registration
7032 + */
7033 +static int pfe_hif_client_register(struct pfe_hif *hif, u32 client_id,
7034 + struct hif_client_shm *client_shm)
7035 +{
7036 + struct hif_client *client = &hif->client[client_id];
7037 + u32 i, cnt;
7038 + struct rx_queue_desc *rx_qbase;
7039 + struct tx_queue_desc *tx_qbase;
7040 + struct hif_rx_queue *rx_queue;
7041 + struct hif_tx_queue *tx_queue;
7042 + int err = 0;
7043 +
7044 + pr_info("%s\n", __func__);
7045 +
7046 + spin_lock_bh(&hif->tx_lock);
7047 +
7048 + if (test_bit(client_id, &hif->shm->g_client_status[0])) {
7049 + pr_err("%s: client %d already registered\n",
7050 + __func__, client_id);
7051 + err = -1;
7052 + goto unlock;
7053 + }
7054 +
7055 + memset(client, 0, sizeof(struct hif_client));
7056 +
7057 + /* Initialize client Rx queues baseaddr, size */
7058 +
7059 + cnt = CLIENT_CTRL_RX_Q_CNT(client_shm->ctrl);
7060 + /* Check if client is requesting for more queues than supported */
7061 + if (cnt > HIF_CLIENT_QUEUES_MAX)
7062 + cnt = HIF_CLIENT_QUEUES_MAX;
7063 +
7064 + client->rx_qn = cnt;
7065 + rx_qbase = (struct rx_queue_desc *)client_shm->rx_qbase;
7066 + for (i = 0; i < cnt; i++) {
7067 + rx_queue = &client->rx_q[i];
7068 + rx_queue->base = rx_qbase + i * client_shm->rx_qsize;
7069 + rx_queue->size = client_shm->rx_qsize;
7070 + rx_queue->write_idx = 0;
7071 + }
7072 +
7073 + /* Initialize client Tx queues baseaddr, size */
7074 + cnt = CLIENT_CTRL_TX_Q_CNT(client_shm->ctrl);
7075 +
7076 + /* Check if client is requesting for more queues than supported */
7077 + if (cnt > HIF_CLIENT_QUEUES_MAX)
7078 + cnt = HIF_CLIENT_QUEUES_MAX;
7079 +
7080 + client->tx_qn = cnt;
7081 + tx_qbase = (struct tx_queue_desc *)client_shm->tx_qbase;
7082 + for (i = 0; i < cnt; i++) {
7083 + tx_queue = &client->tx_q[i];
7084 + tx_queue->base = tx_qbase + i * client_shm->tx_qsize;
7085 + tx_queue->size = client_shm->tx_qsize;
7086 + tx_queue->ack_idx = 0;
7087 + }
7088 +
7089 + set_bit(client_id, &hif->shm->g_client_status[0]);
7090 +
7091 +unlock:
7092 + spin_unlock_bh(&hif->tx_lock);
7093 +
7094 + return err;
7095 +}
7096 +
7097 +/*
7098 + * pfe_hif_client_unregister
7099 + *
7100 + * This function used to unregister a client from the HIF driver.
7101 + *
7102 + */
7103 +static void pfe_hif_client_unregister(struct pfe_hif *hif, u32 client_id)
7104 +{
7105 + pr_info("%s\n", __func__);
7106 +
7107 + /*
7108 + * Mark client as no longer available (which prevents further packet
7109 + * receive for this client)
7110 + */
7111 + spin_lock_bh(&hif->tx_lock);
7112 +
7113 + if (!test_bit(client_id, &hif->shm->g_client_status[0])) {
7114 + pr_err("%s: client %d not registered\n", __func__,
7115 + client_id);
7116 +
7117 + spin_unlock_bh(&hif->tx_lock);
7118 + return;
7119 + }
7120 +
7121 + clear_bit(client_id, &hif->shm->g_client_status[0]);
7122 +
7123 + spin_unlock_bh(&hif->tx_lock);
7124 +}
7125 +
7126 +/*
7127 + * client_put_rxpacket-
7128 + * This functions puts the Rx pkt in the given client Rx queue.
7129 + * It actually swap the Rx pkt in the client Rx descriptor buffer
7130 + * and returns the free buffer from it.
7131 + *
7132 + * If the function returns NULL means client Rx queue is full and
7133 + * packet couldn't send to client queue.
7134 + */
7135 +static void *client_put_rxpacket(struct hif_rx_queue *queue, void *pkt, u32 len,
7136 + u32 flags, u32 client_ctrl, u32 *rem_len)
7137 +{
7138 + void *free_pkt = NULL;
7139 + struct rx_queue_desc *desc = queue->base + queue->write_idx;
7140 +
7141 + if (readl(&desc->ctrl) & CL_DESC_OWN) {
7142 + if (page_mode) {
7143 + int rem_page_size = PAGE_SIZE -
7144 + PRESENT_OFST_IN_PAGE(pkt);
7145 + int cur_pkt_size = ROUND_MIN_RX_SIZE(len +
7146 + pfe_pkt_headroom);
7147 + *rem_len = (rem_page_size - cur_pkt_size);
7148 + if (*rem_len) {
7149 + free_pkt = pkt + cur_pkt_size;
7150 + get_page(virt_to_page(free_pkt));
7151 + } else {
7152 + free_pkt = (void
7153 + *)__get_free_page(GFP_ATOMIC | GFP_DMA_PFE);
7154 + *rem_len = pfe_pkt_size;
7155 + }
7156 + } else {
7157 + free_pkt = kmalloc(PFE_BUF_SIZE, GFP_ATOMIC |
7158 + GFP_DMA_PFE);
7159 + *rem_len = PFE_BUF_SIZE - pfe_pkt_headroom;
7160 + }
7161 +
7162 + if (free_pkt) {
7163 + desc->data = pkt;
7164 + desc->client_ctrl = client_ctrl;
7165 + /*
7166 + * Ensure everything else is written to DDR before
7167 + * writing bd->ctrl
7168 + */
7169 + smp_wmb();
7170 + writel(CL_DESC_BUF_LEN(len) | flags, &desc->ctrl);
7171 + queue->write_idx = (queue->write_idx + 1)
7172 + & (queue->size - 1);
7173 +
7174 + free_pkt += pfe_pkt_headroom;
7175 + }
7176 + }
7177 +
7178 + return free_pkt;
7179 +}
7180 +
7181 +/*
7182 + * pfe_hif_rx_process-
7183 + * This function does pfe hif rx queue processing.
7184 + * Dequeue packet from Rx queue and send it to corresponding client queue
7185 + */
7186 +static int pfe_hif_rx_process(struct pfe_hif *hif, int budget)
7187 +{
7188 + struct hif_desc *desc;
7189 + struct hif_hdr *pkt_hdr;
7190 + struct __hif_hdr hif_hdr;
7191 + void *free_buf;
7192 + int rtc, len, rx_processed = 0;
7193 + struct __hif_desc local_desc;
7194 + int flags;
7195 + unsigned int desc_p;
7196 + unsigned int buf_size = 0;
7197 +
7198 + spin_lock_bh(&hif->lock);
7199 +
7200 + rtc = hif->rxtoclean_index;
7201 +
7202 + while (rx_processed < budget) {
7203 + desc = hif->rx_base + rtc;
7204 +
7205 + __memcpy12(&local_desc, desc);
7206 +
7207 + /* ACK pending Rx interrupt */
7208 + if (local_desc.ctrl & BD_CTRL_DESC_EN) {
7209 + writel(HIF_INT | HIF_RXPKT_INT, HIF_INT_SRC);
7210 +
7211 + if (rx_processed == 0) {
7212 + if (napi_first_batch == 1) {
7213 + desc_p = hif->descr_baseaddr_p +
7214 + ((unsigned long int)(desc) -
7215 + (unsigned long
7216 + int)hif->descr_baseaddr_v);
7217 + napi_first_batch = 0;
7218 + }
7219 + }
7220 +
7221 + __memcpy12(&local_desc, desc);
7222 +
7223 + if (local_desc.ctrl & BD_CTRL_DESC_EN)
7224 + break;
7225 + }
7226 +
7227 + napi_first_batch = 0;
7228 +
7229 +#ifdef HIF_NAPI_STATS
7230 + hif->napi_counters[NAPI_DESC_COUNT]++;
7231 +#endif
7232 + len = BD_BUF_LEN(local_desc.ctrl);
7233 + /*
7234 + * dma_unmap_single(hif->dev, DDR_PFE_TO_PHYS(local_desc.data),
7235 + * hif->rx_buf_len[rtc], DMA_FROM_DEVICE);
7236 + */
7237 + dma_unmap_single(hif->dev, DDR_PFE_TO_PHYS(local_desc.data),
7238 + hif->rx_buf_len[rtc], DMA_FROM_DEVICE);
7239 +
7240 + pkt_hdr = (struct hif_hdr *)hif->rx_buf_addr[rtc];
7241 +
7242 + /* Track last HIF header received */
7243 + if (!hif->started) {
7244 + hif->started = 1;
7245 +
7246 + __memcpy8(&hif_hdr, pkt_hdr);
7247 +
7248 + hif->qno = hif_hdr.hdr.q_num;
7249 + hif->client_id = hif_hdr.hdr.client_id;
7250 + hif->client_ctrl = (hif_hdr.hdr.client_ctrl1 << 16) |
7251 + hif_hdr.hdr.client_ctrl;
7252 + flags = CL_DESC_FIRST;
7253 +
7254 + } else {
7255 + flags = 0;
7256 + }
7257 +
7258 + if (local_desc.ctrl & BD_CTRL_LIFM)
7259 + flags |= CL_DESC_LAST;
7260 +
7261 + /* Check for valid client id and still registered */
7262 + if ((hif->client_id >= HIF_CLIENTS_MAX) ||
7263 + !(test_bit(hif->client_id,
7264 + &hif->shm->g_client_status[0]))) {
7265 + printk_ratelimited("%s: packet with invalid client id %d q_num %d\n",
7266 + __func__,
7267 + hif->client_id,
7268 + hif->qno);
7269 +
7270 + free_buf = pkt_hdr;
7271 +
7272 + goto pkt_drop;
7273 + }
7274 +
7275 + /* Check to valid queue number */
7276 + if (hif->client[hif->client_id].rx_qn <= hif->qno) {
7277 + pr_info("%s: packet with invalid queue: %d\n"
7278 + , __func__, hif->qno);
7279 + hif->qno = 0;
7280 + }
7281 +
7282 + free_buf =
7283 + client_put_rxpacket(&hif->client[hif->client_id].rx_q[hif->qno],
7284 + (void *)pkt_hdr, len, flags,
7285 + hif->client_ctrl, &buf_size);
7286 +
7287 + hif_lib_indicate_client(hif->client_id, EVENT_RX_PKT_IND,
7288 + hif->qno);
7289 +
7290 + if (unlikely(!free_buf)) {
7291 +#ifdef HIF_NAPI_STATS
7292 + hif->napi_counters[NAPI_CLIENT_FULL_COUNT]++;
7293 +#endif
7294 + /*
7295 + * If we want to keep in polling mode to retry later,
7296 + * we need to tell napi that we consumed
7297 + * the full budget or we will hit a livelock scenario.
7298 + * The core code keeps this napi instance
7299 + * at the head of the list and none of the other
7300 + * instances get to run
7301 + */
7302 + rx_processed = budget;
7303 +
7304 + if (flags & CL_DESC_FIRST)
7305 + hif->started = 0;
7306 +
7307 + break;
7308 + }
7309 +
7310 +pkt_drop:
7311 + /*Fill free buffer in the descriptor */
7312 + hif->rx_buf_addr[rtc] = free_buf;
7313 + hif->rx_buf_len[rtc] = min(pfe_pkt_size, buf_size);
7314 + writel((DDR_PHYS_TO_PFE
7315 + ((u32)dma_map_single(hif->dev,
7316 + free_buf, hif->rx_buf_len[rtc], DMA_FROM_DEVICE))),
7317 + &desc->data);
7318 + /*
7319 + * Ensure everything else is written to DDR before
7320 + * writing bd->ctrl
7321 + */
7322 + wmb();
7323 + writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM | BD_CTRL_DIR |
7324 + BD_CTRL_DESC_EN | BD_BUF_LEN(hif->rx_buf_len[rtc])),
7325 + &desc->ctrl);
7326 +
7327 + rtc = (rtc + 1) & (hif->rx_ring_size - 1);
7328 +
7329 + if (local_desc.ctrl & BD_CTRL_LIFM) {
7330 + if (!(hif->client_ctrl & HIF_CTRL_RX_CONTINUED)) {
7331 + rx_processed++;
7332 +
7333 +#ifdef HIF_NAPI_STATS
7334 + hif->napi_counters[NAPI_PACKET_COUNT]++;
7335 +#endif
7336 + }
7337 + hif->started = 0;
7338 + }
7339 + }
7340 +
7341 + hif->rxtoclean_index = rtc;
7342 + spin_unlock_bh(&hif->lock);
7343 +
7344 + /* we made some progress, re-start rx dma in case it stopped */
7345 + hif_rx_dma_start();
7346 +
7347 + return rx_processed;
7348 +}
7349 +
7350 +/*
7351 + * client_ack_txpacket-
7352 + * This function ack the Tx packet in the give client Tx queue by resetting
7353 + * ownership bit in the descriptor.
7354 + */
7355 +static int client_ack_txpacket(struct pfe_hif *hif, unsigned int client_id,
7356 + unsigned int q_no)
7357 +{
7358 + struct hif_tx_queue *queue = &hif->client[client_id].tx_q[q_no];
7359 + struct tx_queue_desc *desc = queue->base + queue->ack_idx;
7360 +
7361 + if (readl(&desc->ctrl) & CL_DESC_OWN) {
7362 + writel((readl(&desc->ctrl) & ~CL_DESC_OWN), &desc->ctrl);
7363 + queue->ack_idx = (queue->ack_idx + 1) & (queue->size - 1);
7364 +
7365 + return 0;
7366 +
7367 + } else {
7368 + /*This should not happen */
7369 + pr_err("%s: %d %d %d %d %d %p %d\n", __func__,
7370 + hif->txtosend, hif->txtoclean, hif->txavail,
7371 + client_id, q_no, queue, queue->ack_idx);
7372 + WARN(1, "%s: doesn't own this descriptor", __func__);
7373 + return 1;
7374 + }
7375 +}
7376 +
7377 +void __hif_tx_done_process(struct pfe_hif *hif, int count)
7378 +{
7379 + struct hif_desc *desc;
7380 + struct hif_desc_sw *desc_sw;
7381 + int ttc, tx_avl;
7382 + int pkts_done[HIF_CLIENTS_MAX] = {0, 0};
7383 +
7384 + ttc = hif->txtoclean;
7385 + tx_avl = hif->txavail;
7386 +
7387 + while ((tx_avl < hif->tx_ring_size) && count--) {
7388 + desc = hif->tx_base + ttc;
7389 +
7390 + if (readl(&desc->ctrl) & BD_CTRL_DESC_EN)
7391 + break;
7392 +
7393 + desc_sw = &hif->tx_sw_queue[ttc];
7394 +
7395 + if (desc_sw->data) {
7396 + /*
7397 + * dmap_unmap_single(hif->dev, desc_sw->data,
7398 + * desc_sw->len, DMA_TO_DEVICE);
7399 + */
7400 + dma_unmap_single(hif->dev, desc_sw->data,
7401 + desc_sw->len, DMA_TO_DEVICE);
7402 + }
7403 +
7404 + if (desc_sw->client_id > HIF_CLIENTS_MAX)
7405 + pr_err("Invalid cl id %d\n", desc_sw->client_id);
7406 +
7407 + pkts_done[desc_sw->client_id]++;
7408 +
7409 + client_ack_txpacket(hif, desc_sw->client_id, desc_sw->q_no);
7410 +
7411 + ttc = (ttc + 1) & (hif->tx_ring_size - 1);
7412 + tx_avl++;
7413 + }
7414 +
7415 + if (pkts_done[0])
7416 + hif_lib_indicate_client(0, EVENT_TXDONE_IND, 0);
7417 + if (pkts_done[1])
7418 + hif_lib_indicate_client(1, EVENT_TXDONE_IND, 0);
7419 +
7420 + hif->txtoclean = ttc;
7421 + hif->txavail = tx_avl;
7422 +
7423 + if (!count) {
7424 + tasklet_schedule(&hif->tx_cleanup_tasklet);
7425 + } else {
7426 + /*Enable Tx done interrupt */
7427 + writel(readl_relaxed(HIF_INT_ENABLE) | HIF_TXPKT_INT,
7428 + HIF_INT_ENABLE);
7429 + }
7430 +}
7431 +
7432 +static void pfe_tx_do_cleanup(unsigned long data)
7433 +{
7434 + struct pfe_hif *hif = (struct pfe_hif *)data;
7435 +
7436 + writel(HIF_INT | HIF_TXPKT_INT, HIF_INT_SRC);
7437 +
7438 + hif_tx_done_process(hif, 64);
7439 +}
7440 +
7441 +/*
7442 + * __hif_xmit_pkt -
7443 + * This function puts one packet in the HIF Tx queue
7444 + */
7445 +void __hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int
7446 + q_no, void *data, u32 len, unsigned int flags)
7447 +{
7448 + struct hif_desc *desc;
7449 + struct hif_desc_sw *desc_sw;
7450 +
7451 + desc = hif->tx_base + hif->txtosend;
7452 + desc_sw = &hif->tx_sw_queue[hif->txtosend];
7453 +
7454 + desc_sw->len = len;
7455 + desc_sw->client_id = client_id;
7456 + desc_sw->q_no = q_no;
7457 + desc_sw->flags = flags;
7458 +
7459 + if (flags & HIF_DONT_DMA_MAP) {
7460 + desc_sw->data = 0;
7461 + writel((u32)DDR_PHYS_TO_PFE(data), &desc->data);
7462 + } else {
7463 + desc_sw->data = dma_map_single(hif->dev, data, len,
7464 + DMA_TO_DEVICE);
7465 + writel((u32)DDR_PHYS_TO_PFE(desc_sw->data), &desc->data);
7466 + }
7467 +
7468 + hif->txtosend = (hif->txtosend + 1) & (hif->tx_ring_size - 1);
7469 + hif->txavail--;
7470 +
7471 + if ((!((flags & HIF_DATA_VALID) && (flags &
7472 + HIF_LAST_BUFFER))))
7473 + goto skip_tx;
7474 +
7475 + /*
7476 + * Ensure everything else is written to DDR before
7477 + * writing bd->ctrl
7478 + */
7479 + wmb();
7480 +
7481 + do {
7482 + desc_sw = &hif->tx_sw_queue[hif->txtoflush];
7483 + desc = hif->tx_base + hif->txtoflush;
7484 +
7485 + if (desc_sw->flags & HIF_LAST_BUFFER) {
7486 + writel((BD_CTRL_LIFM |
7487 + BD_CTRL_BRFETCH_DISABLE | BD_CTRL_RTFETCH_DISABLE
7488 + | BD_CTRL_PARSE_DISABLE | BD_CTRL_DESC_EN |
7489 + BD_CTRL_PKT_INT_EN | BD_BUF_LEN(desc_sw->len)),
7490 + &desc->ctrl);
7491 + } else {
7492 + writel((BD_CTRL_DESC_EN |
7493 + BD_BUF_LEN(desc_sw->len)), &desc->ctrl);
7494 + }
7495 + hif->txtoflush = (hif->txtoflush + 1) & (hif->tx_ring_size - 1);
7496 + }
7497 + while (hif->txtoflush != hif->txtosend)
7498 + ;
7499 +
7500 +skip_tx:
7501 + return;
7502 +}
7503 +
7504 +static irqreturn_t wol_isr(int irq, void *dev_id)
7505 +{
7506 + pr_info("WoL\n");
7507 + gemac_set_wol(EMAC1_BASE_ADDR, 0);
7508 + gemac_set_wol(EMAC2_BASE_ADDR, 0);
7509 + return IRQ_HANDLED;
7510 +}
7511 +
7512 +/*
7513 + * hif_isr-
7514 + * This ISR routine processes Rx/Tx done interrupts from the HIF hardware block
7515 + */
7516 +static irqreturn_t hif_isr(int irq, void *dev_id)
7517 +{
7518 + struct pfe_hif *hif = (struct pfe_hif *)dev_id;
7519 + int int_status;
7520 + int int_enable_mask;
7521 +
7522 + /*Read hif interrupt source register */
7523 + int_status = readl_relaxed(HIF_INT_SRC);
7524 + int_enable_mask = readl_relaxed(HIF_INT_ENABLE);
7525 +
7526 + if ((int_status & HIF_INT) == 0)
7527 + return IRQ_NONE;
7528 +
7529 + int_status &= ~(HIF_INT);
7530 +
7531 + if (int_status & HIF_RXPKT_INT) {
7532 + int_status &= ~(HIF_RXPKT_INT);
7533 + int_enable_mask &= ~(HIF_RXPKT_INT);
7534 +
7535 + napi_first_batch = 1;
7536 +
7537 + if (napi_schedule_prep(&hif->napi)) {
7538 +#ifdef HIF_NAPI_STATS
7539 + hif->napi_counters[NAPI_SCHED_COUNT]++;
7540 +#endif
7541 + __napi_schedule(&hif->napi);
7542 + }
7543 + }
7544 +
7545 + if (int_status & HIF_TXPKT_INT) {
7546 + int_status &= ~(HIF_TXPKT_INT);
7547 + int_enable_mask &= ~(HIF_TXPKT_INT);
7548 + /*Schedule tx cleanup tassklet */
7549 + tasklet_schedule(&hif->tx_cleanup_tasklet);
7550 + }
7551 +
7552 + /*Disable interrupts, they will be enabled after they are serviced */
7553 + writel_relaxed(int_enable_mask, HIF_INT_ENABLE);
7554 +
7555 + if (int_status) {
7556 + pr_info("%s : Invalid interrupt : %d\n", __func__,
7557 + int_status);
7558 + writel(int_status, HIF_INT_SRC);
7559 + }
7560 +
7561 + return IRQ_HANDLED;
7562 +}
7563 +
7564 +void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int data2)
7565 +{
7566 + unsigned int client_id = data1;
7567 +
7568 + if (client_id >= HIF_CLIENTS_MAX) {
7569 + pr_err("%s: client id %d out of bounds\n", __func__,
7570 + client_id);
7571 + return;
7572 + }
7573 +
7574 + switch (req) {
7575 + case REQUEST_CL_REGISTER:
7576 + /* Request for register a client */
7577 + pr_info("%s: register client_id %d\n",
7578 + __func__, client_id);
7579 + pfe_hif_client_register(hif, client_id, (struct
7580 + hif_client_shm *)&hif->shm->client[client_id]);
7581 + break;
7582 +
7583 + case REQUEST_CL_UNREGISTER:
7584 + pr_info("%s: unregister client_id %d\n",
7585 + __func__, client_id);
7586 +
7587 + /* Request for unregister a client */
7588 + pfe_hif_client_unregister(hif, client_id);
7589 +
7590 + break;
7591 +
7592 + default:
7593 + pr_err("%s: unsupported request %d\n",
7594 + __func__, req);
7595 + break;
7596 + }
7597 +
7598 + /*
7599 + * Process client Tx queues
7600 + * Currently we don't have checking for tx pending
7601 + */
7602 +}
7603 +
7604 +/*
7605 + * pfe_hif_rx_poll
7606 + * This function is NAPI poll function to process HIF Rx queue.
7607 + */
7608 +static int pfe_hif_rx_poll(struct napi_struct *napi, int budget)
7609 +{
7610 + struct pfe_hif *hif = container_of(napi, struct pfe_hif, napi);
7611 + int work_done;
7612 +
7613 +#ifdef HIF_NAPI_STATS
7614 + hif->napi_counters[NAPI_POLL_COUNT]++;
7615 +#endif
7616 +
7617 + work_done = pfe_hif_rx_process(hif, budget);
7618 +
7619 + if (work_done < budget) {
7620 + napi_complete(napi);
7621 + writel(readl_relaxed(HIF_INT_ENABLE) | HIF_RXPKT_INT,
7622 + HIF_INT_ENABLE);
7623 + }
7624 +#ifdef HIF_NAPI_STATS
7625 + else
7626 + hif->napi_counters[NAPI_FULL_BUDGET_COUNT]++;
7627 +#endif
7628 +
7629 + return work_done;
7630 +}
7631 +
7632 +/*
7633 + * pfe_hif_init
7634 + * This function initializes the baseaddresses and irq, etc.
7635 + */
7636 +int pfe_hif_init(struct pfe *pfe)
7637 +{
7638 + struct pfe_hif *hif = &pfe->hif;
7639 + int err;
7640 +
7641 + pr_info("%s\n", __func__);
7642 +
7643 + hif->dev = pfe->dev;
7644 + hif->irq = pfe->hif_irq;
7645 +
7646 + err = pfe_hif_alloc_descr(hif);
7647 + if (err)
7648 + goto err0;
7649 +
7650 + if (pfe_hif_init_buffers(hif)) {
7651 + pr_err("%s: Could not initialize buffer descriptors\n"
7652 + , __func__);
7653 + err = -ENOMEM;
7654 + goto err1;
7655 + }
7656 +
7657 + /* Initialize NAPI for Rx processing */
7658 + init_dummy_netdev(&hif->dummy_dev);
7659 + netif_napi_add(&hif->dummy_dev, &hif->napi, pfe_hif_rx_poll,
7660 + HIF_RX_POLL_WEIGHT);
7661 + napi_enable(&hif->napi);
7662 +
7663 + spin_lock_init(&hif->tx_lock);
7664 + spin_lock_init(&hif->lock);
7665 +
7666 + hif_init();
7667 + hif_rx_enable();
7668 + hif_tx_enable();
7669 +
7670 + /* Disable tx done interrupt */
7671 + writel(HIF_INT_MASK, HIF_INT_ENABLE);
7672 +
7673 + gpi_enable(HGPI_BASE_ADDR);
7674 +
7675 + err = request_irq(hif->irq, hif_isr, 0, "pfe_hif", hif);
7676 + if (err) {
7677 + pr_err("%s: failed to get the hif IRQ = %d\n",
7678 + __func__, hif->irq);
7679 + goto err1;
7680 + }
7681 +
7682 + err = request_irq(pfe->wol_irq, wol_isr, 0, "pfe_wol", pfe);
7683 + if (err) {
7684 + pr_err("%s: failed to get the wol IRQ = %d\n",
7685 + __func__, pfe->wol_irq);
7686 + goto err1;
7687 + }
7688 +
7689 + tasklet_init(&hif->tx_cleanup_tasklet,
7690 + (void(*)(unsigned long))pfe_tx_do_cleanup,
7691 + (unsigned long)hif);
7692 +
7693 + return 0;
7694 +err1:
7695 + pfe_hif_free_descr(hif);
7696 +err0:
7697 + return err;
7698 +}
7699 +
7700 +/* pfe_hif_exit- */
7701 +void pfe_hif_exit(struct pfe *pfe)
7702 +{
7703 + struct pfe_hif *hif = &pfe->hif;
7704 +
7705 + pr_info("%s\n", __func__);
7706 +
7707 + tasklet_kill(&hif->tx_cleanup_tasklet);
7708 +
7709 + spin_lock_bh(&hif->lock);
7710 + hif->shm->g_client_status[0] = 0;
7711 + /* Make sure all clients are disabled*/
7712 + hif->shm->g_client_status[1] = 0;
7713 +
7714 + spin_unlock_bh(&hif->lock);
7715 +
7716 + /*Disable Rx/Tx */
7717 + gpi_disable(HGPI_BASE_ADDR);
7718 + hif_rx_disable();
7719 + hif_tx_disable();
7720 +
7721 + napi_disable(&hif->napi);
7722 + netif_napi_del(&hif->napi);
7723 +
7724 + free_irq(pfe->wol_irq, pfe);
7725 + free_irq(hif->irq, hif);
7726 +
7727 + pfe_hif_release_buffers(hif);
7728 + pfe_hif_free_descr(hif);
7729 +}
7730 --- /dev/null
7731 +++ b/drivers/staging/fsl_ppfe/pfe_hif.h
7732 @@ -0,0 +1,211 @@
7733 +/*
7734 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
7735 + * Copyright 2017 NXP
7736 + *
7737 + * This program is free software; you can redistribute it and/or modify
7738 + * it under the terms of the GNU General Public License as published by
7739 + * the Free Software Foundation; either version 2 of the License, or
7740 + * (at your option) any later version.
7741 + *
7742 + * This program is distributed in the hope that it will be useful,
7743 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
7744 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
7745 + * GNU General Public License for more details.
7746 + *
7747 + * You should have received a copy of the GNU General Public License
7748 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
7749 + */
7750 +
7751 +#ifndef _PFE_HIF_H_
7752 +#define _PFE_HIF_H_
7753 +
7754 +#include <linux/netdevice.h>
7755 +
7756 +#define HIF_NAPI_STATS
7757 +
7758 +#define HIF_CLIENT_QUEUES_MAX 16
7759 +#define HIF_RX_POLL_WEIGHT 64
7760 +
7761 +#define HIF_RX_PKT_MIN_SIZE 0x800 /* 2KB */
7762 +#define HIF_RX_PKT_MIN_SIZE_MASK ~(HIF_RX_PKT_MIN_SIZE - 1)
7763 +#define ROUND_MIN_RX_SIZE(_sz) (((_sz) + (HIF_RX_PKT_MIN_SIZE - 1)) \
7764 + & HIF_RX_PKT_MIN_SIZE_MASK)
7765 +#define PRESENT_OFST_IN_PAGE(_buf) (((unsigned long int)(_buf) & (PAGE_SIZE \
7766 + - 1)) & HIF_RX_PKT_MIN_SIZE_MASK)
7767 +
7768 +enum {
7769 + NAPI_SCHED_COUNT = 0,
7770 + NAPI_POLL_COUNT,
7771 + NAPI_PACKET_COUNT,
7772 + NAPI_DESC_COUNT,
7773 + NAPI_FULL_BUDGET_COUNT,
7774 + NAPI_CLIENT_FULL_COUNT,
7775 + NAPI_MAX_COUNT
7776 +};
7777 +
7778 +/*
7779 + * HIF_TX_DESC_NT value should be always greter than 4,
7780 + * Otherwise HIF_TX_POLL_MARK will become zero.
7781 + */
7782 +#define HIF_RX_DESC_NT 256
7783 +#define HIF_TX_DESC_NT 2048
7784 +
7785 +#define HIF_FIRST_BUFFER BIT(0)
7786 +#define HIF_LAST_BUFFER BIT(1)
7787 +#define HIF_DONT_DMA_MAP BIT(2)
7788 +#define HIF_DATA_VALID BIT(3)
7789 +#define HIF_TSO BIT(4)
7790 +
7791 +enum {
7792 + PFE_CL_GEM0 = 0,
7793 + PFE_CL_GEM1,
7794 + HIF_CLIENTS_MAX
7795 +};
7796 +
7797 +/*structure to store client queue info */
7798 +struct hif_rx_queue {
7799 + struct rx_queue_desc *base;
7800 + u32 size;
7801 + u32 write_idx;
7802 +};
7803 +
7804 +struct hif_tx_queue {
7805 + struct tx_queue_desc *base;
7806 + u32 size;
7807 + u32 ack_idx;
7808 +};
7809 +
7810 +/*Structure to store the client info */
7811 +struct hif_client {
7812 + int rx_qn;
7813 + struct hif_rx_queue rx_q[HIF_CLIENT_QUEUES_MAX];
7814 + int tx_qn;
7815 + struct hif_tx_queue tx_q[HIF_CLIENT_QUEUES_MAX];
7816 +};
7817 +
7818 +/*HIF hardware buffer descriptor */
7819 +struct hif_desc {
7820 + u32 ctrl;
7821 + u32 status;
7822 + u32 data;
7823 + u32 next;
7824 +};
7825 +
7826 +struct __hif_desc {
7827 + u32 ctrl;
7828 + u32 status;
7829 + u32 data;
7830 +};
7831 +
7832 +struct hif_desc_sw {
7833 + dma_addr_t data;
7834 + u16 len;
7835 + u8 client_id;
7836 + u8 q_no;
7837 + u16 flags;
7838 +};
7839 +
7840 +struct hif_hdr {
7841 + u8 client_id;
7842 + u8 q_num;
7843 + u16 client_ctrl;
7844 + u16 client_ctrl1;
7845 +};
7846 +
7847 +struct __hif_hdr {
7848 + union {
7849 + struct hif_hdr hdr;
7850 + u32 word[2];
7851 + };
7852 +};
7853 +
7854 +struct hif_ipsec_hdr {
7855 + u16 sa_handle[2];
7856 +} __packed;
7857 +
7858 +/* HIF_CTRL_TX... defines */
7859 +#define HIF_CTRL_TX_CHECKSUM BIT(2)
7860 +
7861 +/* HIF_CTRL_RX... defines */
7862 +#define HIF_CTRL_RX_OFFSET_OFST (24)
7863 +#define HIF_CTRL_RX_CHECKSUMMED BIT(2)
7864 +#define HIF_CTRL_RX_CONTINUED BIT(1)
7865 +
7866 +struct pfe_hif {
7867 + /* To store registered clients in hif layer */
7868 + struct hif_client client[HIF_CLIENTS_MAX];
7869 + struct hif_shm *shm;
7870 + int irq;
7871 +
7872 + void *descr_baseaddr_v;
7873 + unsigned long descr_baseaddr_p;
7874 +
7875 + struct hif_desc *rx_base;
7876 + u32 rx_ring_size;
7877 + u32 rxtoclean_index;
7878 + void *rx_buf_addr[HIF_RX_DESC_NT];
7879 + int rx_buf_len[HIF_RX_DESC_NT];
7880 + unsigned int qno;
7881 + unsigned int client_id;
7882 + unsigned int client_ctrl;
7883 + unsigned int started;
7884 +
7885 + struct hif_desc *tx_base;
7886 + u32 tx_ring_size;
7887 + u32 txtosend;
7888 + u32 txtoclean;
7889 + u32 txavail;
7890 + u32 txtoflush;
7891 + struct hif_desc_sw tx_sw_queue[HIF_TX_DESC_NT];
7892 +
7893 +/* tx_lock synchronizes hif packet tx as well as pfe_hif structure access */
7894 + spinlock_t tx_lock;
7895 +/* lock synchronizes hif rx queue processing */
7896 + spinlock_t lock;
7897 + struct net_device dummy_dev;
7898 + struct napi_struct napi;
7899 + struct device *dev;
7900 +
7901 +#ifdef HIF_NAPI_STATS
7902 + unsigned int napi_counters[NAPI_MAX_COUNT];
7903 +#endif
7904 + struct tasklet_struct tx_cleanup_tasklet;
7905 +};
7906 +
7907 +void __hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int
7908 + q_no, void *data, u32 len, unsigned int flags);
7909 +int hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int q_no,
7910 + void *data, unsigned int len);
7911 +void __hif_tx_done_process(struct pfe_hif *hif, int count);
7912 +void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int
7913 + data2);
7914 +int pfe_hif_init(struct pfe *pfe);
7915 +void pfe_hif_exit(struct pfe *pfe);
7916 +void pfe_hif_rx_idle(struct pfe_hif *hif);
7917 +static inline void hif_tx_done_process(struct pfe_hif *hif, int count)
7918 +{
7919 + spin_lock_bh(&hif->tx_lock);
7920 + __hif_tx_done_process(hif, count);
7921 + spin_unlock_bh(&hif->tx_lock);
7922 +}
7923 +
7924 +static inline void hif_tx_lock(struct pfe_hif *hif)
7925 +{
7926 + spin_lock_bh(&hif->tx_lock);
7927 +}
7928 +
7929 +static inline void hif_tx_unlock(struct pfe_hif *hif)
7930 +{
7931 + spin_unlock_bh(&hif->tx_lock);
7932 +}
7933 +
7934 +static inline int __hif_tx_avail(struct pfe_hif *hif)
7935 +{
7936 + return hif->txavail;
7937 +}
7938 +
7939 +#define __memcpy8(dst, src) memcpy(dst, src, 8)
7940 +#define __memcpy12(dst, src) memcpy(dst, src, 12)
7941 +#define __memcpy(dst, src, len) memcpy(dst, src, len)
7942 +
7943 +#endif /* _PFE_HIF_H_ */
7944 --- /dev/null
7945 +++ b/drivers/staging/fsl_ppfe/pfe_hif_lib.c
7946 @@ -0,0 +1,601 @@
7947 +/*
7948 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
7949 + * Copyright 2017 NXP
7950 + *
7951 + * This program is free software; you can redistribute it and/or modify
7952 + * it under the terms of the GNU General Public License as published by
7953 + * the Free Software Foundation; either version 2 of the License, or
7954 + * (at your option) any later version.
7955 + *
7956 + * This program is distributed in the hope that it will be useful,
7957 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
7958 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
7959 + * GNU General Public License for more details.
7960 + *
7961 + * You should have received a copy of the GNU General Public License
7962 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
7963 + */
7964 +
7965 +#include <linux/version.h>
7966 +#include <linux/kernel.h>
7967 +#include <linux/slab.h>
7968 +#include <linux/interrupt.h>
7969 +#include <linux/workqueue.h>
7970 +#include <linux/dma-mapping.h>
7971 +#include <linux/dmapool.h>
7972 +#include <linux/sched.h>
7973 +#include <linux/skbuff.h>
7974 +#include <linux/moduleparam.h>
7975 +#include <linux/cpu.h>
7976 +
7977 +#include "pfe_mod.h"
7978 +#include "pfe_hif.h"
7979 +#include "pfe_hif_lib.h"
7980 +
7981 +unsigned int lro_mode;
7982 +unsigned int page_mode;
7983 +unsigned int tx_qos;
7984 +unsigned int pfe_pkt_size;
7985 +unsigned int pfe_pkt_headroom;
7986 +unsigned int emac_txq_cnt;
7987 +
7988 +/*
7989 + * @pfe_hal_lib.c.
7990 + * Common functions used by HIF client drivers
7991 + */
7992 +
7993 +/*HIF shared memory Global variable */
7994 +struct hif_shm ghif_shm;
7995 +
7996 +/* Cleanup the HIF shared memory, release HIF rx_buffer_pool.
7997 + * This function should be called after pfe_hif_exit
7998 + *
7999 + * @param[in] hif_shm Shared memory address location in DDR
8000 + */
8001 +static void pfe_hif_shm_clean(struct hif_shm *hif_shm)
8002 +{
8003 + int i;
8004 + void *pkt;
8005 +
8006 + for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
8007 + pkt = hif_shm->rx_buf_pool[i];
8008 + if (pkt) {
8009 + hif_shm->rx_buf_pool[i] = NULL;
8010 + pkt -= pfe_pkt_headroom;
8011 +
8012 + if (page_mode)
8013 + put_page(virt_to_page(pkt));
8014 + else
8015 + kfree(pkt);
8016 + }
8017 + }
8018 +}
8019 +
8020 +/* Initialize shared memory used between HIF driver and clients,
8021 + * allocate rx_buffer_pool required for HIF Rx descriptors.
8022 + * This function should be called before initializing HIF driver.
8023 + *
8024 + * @param[in] hif_shm Shared memory address location in DDR
8025 + * @rerurn 0 - on succes, <0 on fail to initialize
8026 + */
8027 +static int pfe_hif_shm_init(struct hif_shm *hif_shm)
8028 +{
8029 + int i;
8030 + void *pkt;
8031 +
8032 + memset(hif_shm, 0, sizeof(struct hif_shm));
8033 + hif_shm->rx_buf_pool_cnt = HIF_RX_DESC_NT;
8034 +
8035 + for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
8036 + if (page_mode) {
8037 + pkt = (void *)__get_free_page(GFP_KERNEL |
8038 + GFP_DMA_PFE);
8039 + } else {
8040 + pkt = kmalloc(PFE_BUF_SIZE, GFP_KERNEL | GFP_DMA_PFE);
8041 + }
8042 +
8043 + if (pkt)
8044 + hif_shm->rx_buf_pool[i] = pkt + pfe_pkt_headroom;
8045 + else
8046 + goto err0;
8047 + }
8048 +
8049 + return 0;
8050 +
8051 +err0:
8052 + pr_err("%s Low memory\n", __func__);
8053 + pfe_hif_shm_clean(hif_shm);
8054 + return -ENOMEM;
8055 +}
8056 +
8057 +/*This function sends indication to HIF driver
8058 + *
8059 + * @param[in] hif hif context
8060 + */
8061 +static void hif_lib_indicate_hif(struct pfe_hif *hif, int req, int data1, int
8062 + data2)
8063 +{
8064 + hif_process_client_req(hif, req, data1, data2);
8065 +}
8066 +
8067 +void hif_lib_indicate_client(int client_id, int event_type, int qno)
8068 +{
8069 + struct hif_client_s *client = pfe->hif_client[client_id];
8070 +
8071 + if (!client || (event_type >= HIF_EVENT_MAX) || (qno >=
8072 + HIF_CLIENT_QUEUES_MAX))
8073 + return;
8074 +
8075 + if (!test_and_set_bit(qno, &client->queue_mask[event_type]))
8076 + client->event_handler(client->priv, event_type, qno);
8077 +}
8078 +
8079 +/*This function releases Rx queue descriptors memory and pre-filled buffers
8080 + *
8081 + * @param[in] client hif_client context
8082 + */
8083 +static void hif_lib_client_release_rx_buffers(struct hif_client_s *client)
8084 +{
8085 + struct rx_queue_desc *desc;
8086 + int qno, ii;
8087 + void *buf;
8088 +
8089 + for (qno = 0; qno < client->rx_qn; qno++) {
8090 + desc = client->rx_q[qno].base;
8091 +
8092 + for (ii = 0; ii < client->rx_q[qno].size; ii++) {
8093 + buf = (void *)desc->data;
8094 + if (buf) {
8095 + buf -= pfe_pkt_headroom;
8096 +
8097 + if (page_mode)
8098 + free_page((unsigned long)buf);
8099 + else
8100 + kfree(buf);
8101 +
8102 + desc->ctrl = 0;
8103 + }
8104 +
8105 + desc++;
8106 + }
8107 + }
8108 +
8109 + kfree(client->rx_qbase);
8110 +}
8111 +
8112 +/*This function allocates memory for the rxq descriptors and pre-fill rx queues
8113 + * with buffers.
8114 + * @param[in] client client context
8115 + * @param[in] q_size size of the rxQ, all queues are of same size
8116 + */
8117 +static int hif_lib_client_init_rx_buffers(struct hif_client_s *client, int
8118 + q_size)
8119 +{
8120 + struct rx_queue_desc *desc;
8121 + struct hif_client_rx_queue *queue;
8122 + int ii, qno;
8123 +
8124 + /*Allocate memory for the client queues */
8125 + client->rx_qbase = kzalloc(client->rx_qn * q_size * sizeof(struct
8126 + rx_queue_desc), GFP_KERNEL);
8127 + if (!client->rx_qbase)
8128 + goto err;
8129 +
8130 + for (qno = 0; qno < client->rx_qn; qno++) {
8131 + queue = &client->rx_q[qno];
8132 +
8133 + queue->base = client->rx_qbase + qno * q_size * sizeof(struct
8134 + rx_queue_desc);
8135 + queue->size = q_size;
8136 + queue->read_idx = 0;
8137 + queue->write_idx = 0;
8138 +
8139 + pr_debug("rx queue: %d, base: %p, size: %d\n", qno,
8140 + queue->base, queue->size);
8141 + }
8142 +
8143 + for (qno = 0; qno < client->rx_qn; qno++) {
8144 + queue = &client->rx_q[qno];
8145 + desc = queue->base;
8146 +
8147 + for (ii = 0; ii < queue->size; ii++) {
8148 + desc->ctrl = CL_DESC_BUF_LEN(pfe_pkt_size) |
8149 + CL_DESC_OWN;
8150 + desc++;
8151 + }
8152 + }
8153 +
8154 + return 0;
8155 +
8156 +err:
8157 + return 1;
8158 +}
8159 +
8160 +
8161 +static void hif_lib_client_cleanup_tx_queue(struct hif_client_tx_queue *queue)
8162 +{
8163 + pr_debug("%s\n", __func__);
8164 +
8165 + /*
8166 + * Check if there are any pending packets. Client must flush the tx
8167 + * queues before unregistering, by calling by calling
8168 + * hif_lib_tx_get_next_complete()
8169 + *
8170 + * Hif no longer calls since we are no longer registered
8171 + */
8172 + if (queue->tx_pending)
8173 + pr_err("%s: pending transmit packets\n", __func__);
8174 +}
8175 +
8176 +static void hif_lib_client_release_tx_buffers(struct hif_client_s *client)
8177 +{
8178 + int qno;
8179 +
8180 + pr_debug("%s\n", __func__);
8181 +
8182 + for (qno = 0; qno < client->tx_qn; qno++)
8183 + hif_lib_client_cleanup_tx_queue(&client->tx_q[qno]);
8184 +
8185 + kfree(client->tx_qbase);
8186 +}
8187 +
8188 +static int hif_lib_client_init_tx_buffers(struct hif_client_s *client, int
8189 + q_size)
8190 +{
8191 + struct hif_client_tx_queue *queue;
8192 + int qno;
8193 +
8194 + client->tx_qbase = kzalloc(client->tx_qn * q_size * sizeof(struct
8195 + tx_queue_desc), GFP_KERNEL);
8196 + if (!client->tx_qbase)
8197 + return 1;
8198 +
8199 + for (qno = 0; qno < client->tx_qn; qno++) {
8200 + queue = &client->tx_q[qno];
8201 +
8202 + queue->base = client->tx_qbase + qno * q_size * sizeof(struct
8203 + tx_queue_desc);
8204 + queue->size = q_size;
8205 + queue->read_idx = 0;
8206 + queue->write_idx = 0;
8207 + queue->tx_pending = 0;
8208 + queue->nocpy_flag = 0;
8209 + queue->prev_tmu_tx_pkts = 0;
8210 + queue->done_tmu_tx_pkts = 0;
8211 +
8212 + pr_debug("tx queue: %d, base: %p, size: %d\n", qno,
8213 + queue->base, queue->size);
8214 + }
8215 +
8216 + return 0;
8217 +}
8218 +
8219 +static int hif_lib_event_dummy(void *priv, int event_type, int qno)
8220 +{
8221 + return 0;
8222 +}
8223 +
8224 +int hif_lib_client_register(struct hif_client_s *client)
8225 +{
8226 + struct hif_shm *hif_shm;
8227 + struct hif_client_shm *client_shm;
8228 + int err, i;
8229 + /* int loop_cnt = 0; */
8230 +
8231 + pr_debug("%s\n", __func__);
8232 +
8233 + /*Allocate memory before spin_lock*/
8234 + if (hif_lib_client_init_rx_buffers(client, client->rx_qsize)) {
8235 + err = -ENOMEM;
8236 + goto err_rx;
8237 + }
8238 +
8239 + if (hif_lib_client_init_tx_buffers(client, client->tx_qsize)) {
8240 + err = -ENOMEM;
8241 + goto err_tx;
8242 + }
8243 +
8244 + spin_lock_bh(&pfe->hif.lock);
8245 + if (!(client->pfe) || (client->id >= HIF_CLIENTS_MAX) ||
8246 + (pfe->hif_client[client->id])) {
8247 + err = -EINVAL;
8248 + goto err;
8249 + }
8250 +
8251 + hif_shm = client->pfe->hif.shm;
8252 +
8253 + if (!client->event_handler)
8254 + client->event_handler = hif_lib_event_dummy;
8255 +
8256 + /*Initialize client specific shared memory */
8257 + client_shm = (struct hif_client_shm *)&hif_shm->client[client->id];
8258 + client_shm->rx_qbase = (unsigned long int)client->rx_qbase;
8259 + client_shm->rx_qsize = client->rx_qsize;
8260 + client_shm->tx_qbase = (unsigned long int)client->tx_qbase;
8261 + client_shm->tx_qsize = client->tx_qsize;
8262 + client_shm->ctrl = (client->tx_qn << CLIENT_CTRL_TX_Q_CNT_OFST) |
8263 + (client->rx_qn << CLIENT_CTRL_RX_Q_CNT_OFST);
8264 + /* spin_lock_init(&client->rx_lock); */
8265 +
8266 + for (i = 0; i < HIF_EVENT_MAX; i++) {
8267 + client->queue_mask[i] = 0; /*
8268 + * By default all events are
8269 + * unmasked
8270 + */
8271 + }
8272 +
8273 + /*Indicate to HIF driver*/
8274 + hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_REGISTER, client->id, 0);
8275 +
8276 + pr_debug("%s: client: %p, client_id: %d, tx_qsize: %d, rx_qsize: %d\n",
8277 + __func__, client, client->id, client->tx_qsize,
8278 + client->rx_qsize);
8279 +
8280 + client->cpu_id = -1;
8281 +
8282 + pfe->hif_client[client->id] = client;
8283 + spin_unlock_bh(&pfe->hif.lock);
8284 +
8285 + return 0;
8286 +
8287 +err:
8288 + spin_unlock_bh(&pfe->hif.lock);
8289 + hif_lib_client_release_tx_buffers(client);
8290 +
8291 +err_tx:
8292 + hif_lib_client_release_rx_buffers(client);
8293 +
8294 +err_rx:
8295 + return err;
8296 +}
8297 +
8298 +int hif_lib_client_unregister(struct hif_client_s *client)
8299 +{
8300 + struct pfe *pfe = client->pfe;
8301 + u32 client_id = client->id;
8302 +
8303 + pr_info(
8304 + "%s : client: %p, client_id: %d, txQ_depth: %d, rxQ_depth: %d\n"
8305 + , __func__, client, client->id, client->tx_qsize,
8306 + client->rx_qsize);
8307 +
8308 + spin_lock_bh(&pfe->hif.lock);
8309 + hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_UNREGISTER, client->id, 0);
8310 +
8311 + hif_lib_client_release_tx_buffers(client);
8312 + hif_lib_client_release_rx_buffers(client);
8313 + pfe->hif_client[client_id] = NULL;
8314 + spin_unlock_bh(&pfe->hif.lock);
8315 +
8316 + return 0;
8317 +}
8318 +
8319 +int hif_lib_event_handler_start(struct hif_client_s *client, int event,
8320 + int qno)
8321 +{
8322 + struct hif_client_rx_queue *queue = &client->rx_q[qno];
8323 + struct rx_queue_desc *desc = queue->base + queue->read_idx;
8324 +
8325 + if ((event >= HIF_EVENT_MAX) || (qno >= HIF_CLIENT_QUEUES_MAX)) {
8326 + pr_debug("%s: Unsupported event : %d queue number : %d\n",
8327 + __func__, event, qno);
8328 + return -1;
8329 + }
8330 +
8331 + test_and_clear_bit(qno, &client->queue_mask[event]);
8332 +
8333 + switch (event) {
8334 + case EVENT_RX_PKT_IND:
8335 + if (!(desc->ctrl & CL_DESC_OWN))
8336 + hif_lib_indicate_client(client->id,
8337 + EVENT_RX_PKT_IND, qno);
8338 + break;
8339 +
8340 + case EVENT_HIGH_RX_WM:
8341 + case EVENT_TXDONE_IND:
8342 + default:
8343 + break;
8344 + }
8345 +
8346 + return 0;
8347 +}
8348 +
8349 +/*
8350 + * This function gets one packet from the specified client queue
8351 + * It also refill the rx buffer
8352 + */
8353 +void *hif_lib_receive_pkt(struct hif_client_s *client, int qno, int *len, int
8354 + *ofst, unsigned int *rx_ctrl,
8355 + unsigned int *desc_ctrl, void **priv_data)
8356 +{
8357 + struct hif_client_rx_queue *queue = &client->rx_q[qno];
8358 + struct rx_queue_desc *desc;
8359 + void *pkt = NULL;
8360 +
8361 + /*
8362 + * Following lock is to protect rx queue access from,
8363 + * hif_lib_event_handler_start.
8364 + * In general below lock is not required, because hif_lib_xmit_pkt and
8365 + * hif_lib_event_handler_start are called from napi poll and which is
8366 + * not re-entrant. But if some client use in different way this lock is
8367 + * required.
8368 + */
8369 + /*spin_lock_irqsave(&client->rx_lock, flags); */
8370 + desc = queue->base + queue->read_idx;
8371 + if (!(desc->ctrl & CL_DESC_OWN)) {
8372 + pkt = desc->data - pfe_pkt_headroom;
8373 +
8374 + *rx_ctrl = desc->client_ctrl;
8375 + *desc_ctrl = desc->ctrl;
8376 +
8377 + if (desc->ctrl & CL_DESC_FIRST) {
8378 + u16 size = *rx_ctrl >> HIF_CTRL_RX_OFFSET_OFST;
8379 +
8380 + if (size) {
8381 + *len = CL_DESC_BUF_LEN(desc->ctrl) -
8382 + PFE_PKT_HEADER_SZ - size;
8383 + *ofst = pfe_pkt_headroom + PFE_PKT_HEADER_SZ
8384 + + size;
8385 + *priv_data = desc->data + PFE_PKT_HEADER_SZ;
8386 + } else {
8387 + *len = CL_DESC_BUF_LEN(desc->ctrl) -
8388 + PFE_PKT_HEADER_SZ;
8389 + *ofst = pfe_pkt_headroom + PFE_PKT_HEADER_SZ;
8390 + *priv_data = NULL;
8391 + }
8392 +
8393 + } else {
8394 + *len = CL_DESC_BUF_LEN(desc->ctrl);
8395 + *ofst = pfe_pkt_headroom;
8396 + }
8397 +
8398 + /*
8399 + * Needed so we don't free a buffer/page
8400 + * twice on module_exit
8401 + */
8402 + desc->data = NULL;
8403 +
8404 + /*
8405 + * Ensure everything else is written to DDR before
8406 + * writing bd->ctrl
8407 + */
8408 + smp_wmb();
8409 +
8410 + desc->ctrl = CL_DESC_BUF_LEN(pfe_pkt_size) | CL_DESC_OWN;
8411 + queue->read_idx = (queue->read_idx + 1) & (queue->size - 1);
8412 + }
8413 +
8414 + /*spin_unlock_irqrestore(&client->rx_lock, flags); */
8415 + return pkt;
8416 +}
8417 +
8418 +static inline void hif_hdr_write(struct hif_hdr *pkt_hdr, unsigned int
8419 + client_id, unsigned int qno,
8420 + u32 client_ctrl)
8421 +{
8422 + /* Optimize the write since the destinaton may be non-cacheable */
8423 + if (!((unsigned long)pkt_hdr & 0x3)) {
8424 + ((u32 *)pkt_hdr)[0] = (client_ctrl << 16) | (qno << 8) |
8425 + client_id;
8426 + } else {
8427 + ((u16 *)pkt_hdr)[0] = (qno << 8) | (client_id & 0xFF);
8428 + ((u16 *)pkt_hdr)[1] = (client_ctrl & 0xFFFF);
8429 + }
8430 +}
8431 +
8432 +/*This function puts the given packet in the specific client queue */
8433 +void __hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void
8434 + *data, unsigned int len, u32 client_ctrl,
8435 + unsigned int flags, void *client_data)
8436 +{
8437 + struct hif_client_tx_queue *queue = &client->tx_q[qno];
8438 + struct tx_queue_desc *desc = queue->base + queue->write_idx;
8439 +
8440 + /* First buffer */
8441 + if (flags & HIF_FIRST_BUFFER) {
8442 + data -= sizeof(struct hif_hdr);
8443 + len += sizeof(struct hif_hdr);
8444 +
8445 + hif_hdr_write(data, client->id, qno, client_ctrl);
8446 + }
8447 +
8448 + desc->data = client_data;
8449 + desc->ctrl = CL_DESC_OWN | CL_DESC_FLAGS(flags);
8450 +
8451 + __hif_xmit_pkt(&pfe->hif, client->id, qno, data, len, flags);
8452 +
8453 + queue->write_idx = (queue->write_idx + 1) & (queue->size - 1);
8454 + queue->tx_pending++;
8455 + queue->jiffies_last_packet = jiffies;
8456 +}
8457 +
8458 +void *hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno,
8459 + unsigned int *flags, int count)
8460 +{
8461 + struct hif_client_tx_queue *queue = &client->tx_q[qno];
8462 + struct tx_queue_desc *desc = queue->base + queue->read_idx;
8463 +
8464 + pr_debug("%s: qno : %d rd_indx: %d pending:%d\n", __func__, qno,
8465 + queue->read_idx, queue->tx_pending);
8466 +
8467 + if (!queue->tx_pending)
8468 + return NULL;
8469 +
8470 + if (queue->nocpy_flag && !queue->done_tmu_tx_pkts) {
8471 + u32 tmu_tx_pkts = be32_to_cpu(pe_dmem_read(TMU0_ID +
8472 + client->id, TMU_DM_TX_TRANS, 4));
8473 +
8474 + if (queue->prev_tmu_tx_pkts > tmu_tx_pkts)
8475 + queue->done_tmu_tx_pkts = UINT_MAX -
8476 + queue->prev_tmu_tx_pkts + tmu_tx_pkts;
8477 + else
8478 + queue->done_tmu_tx_pkts = tmu_tx_pkts -
8479 + queue->prev_tmu_tx_pkts;
8480 +
8481 + queue->prev_tmu_tx_pkts = tmu_tx_pkts;
8482 +
8483 + if (!queue->done_tmu_tx_pkts)
8484 + return NULL;
8485 + }
8486 +
8487 + if (desc->ctrl & CL_DESC_OWN)
8488 + return NULL;
8489 +
8490 + queue->read_idx = (queue->read_idx + 1) & (queue->size - 1);
8491 + queue->tx_pending--;
8492 +
8493 + *flags = CL_DESC_GET_FLAGS(desc->ctrl);
8494 +
8495 + if (queue->done_tmu_tx_pkts && (*flags & HIF_LAST_BUFFER))
8496 + queue->done_tmu_tx_pkts--;
8497 +
8498 + return desc->data;
8499 +}
8500 +
8501 +static void hif_lib_tmu_credit_init(struct pfe *pfe)
8502 +{
8503 + int i, q;
8504 +
8505 + for (i = 0; i < NUM_GEMAC_SUPPORT; i++)
8506 + for (q = 0; q < emac_txq_cnt; q++) {
8507 + pfe->tmu_credit.tx_credit_max[i][q] = (q == 0) ?
8508 + DEFAULT_Q0_QDEPTH : DEFAULT_MAX_QDEPTH;
8509 + pfe->tmu_credit.tx_credit[i][q] =
8510 + pfe->tmu_credit.tx_credit_max[i][q];
8511 + }
8512 +}
8513 +
8514 +int pfe_hif_lib_init(struct pfe *pfe)
8515 +{
8516 + int rc;
8517 +
8518 + pr_info("%s\n", __func__);
8519 +
8520 + if (lro_mode) {
8521 + page_mode = 1;
8522 + pfe_pkt_size = min(PAGE_SIZE, MAX_PFE_PKT_SIZE);
8523 + pfe_pkt_headroom = 0;
8524 + } else {
8525 + page_mode = 0;
8526 + pfe_pkt_size = PFE_PKT_SIZE;
8527 + pfe_pkt_headroom = PFE_PKT_HEADROOM;
8528 + }
8529 +
8530 + if (tx_qos)
8531 + emac_txq_cnt = EMAC_TXQ_CNT / 2;
8532 + else
8533 + emac_txq_cnt = EMAC_TXQ_CNT;
8534 +
8535 + hif_lib_tmu_credit_init(pfe);
8536 + pfe->hif.shm = &ghif_shm;
8537 + rc = pfe_hif_shm_init(pfe->hif.shm);
8538 +
8539 + return rc;
8540 +}
8541 +
8542 +void pfe_hif_lib_exit(struct pfe *pfe)
8543 +{
8544 + pr_info("%s\n", __func__);
8545 +
8546 + pfe_hif_shm_clean(pfe->hif.shm);
8547 +}
8548 --- /dev/null
8549 +++ b/drivers/staging/fsl_ppfe/pfe_hif_lib.h
8550 @@ -0,0 +1,239 @@
8551 +/*
8552 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
8553 + * Copyright 2017 NXP
8554 + *
8555 + * This program is free software; you can redistribute it and/or modify
8556 + * it under the terms of the GNU General Public License as published by
8557 + * the Free Software Foundation; either version 2 of the License, or
8558 + * (at your option) any later version.
8559 + *
8560 + * This program is distributed in the hope that it will be useful,
8561 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
8562 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8563 + * GNU General Public License for more details.
8564 + *
8565 + * You should have received a copy of the GNU General Public License
8566 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
8567 + */
8568 +
8569 +#ifndef _PFE_HIF_LIB_H_
8570 +#define _PFE_HIF_LIB_H_
8571 +
8572 +#include "pfe_hif.h"
8573 +
8574 +#define HIF_CL_REQ_TIMEOUT 10
8575 +#define GFP_DMA_PFE 0
8576 +
8577 +enum {
8578 + REQUEST_CL_REGISTER = 0,
8579 + REQUEST_CL_UNREGISTER,
8580 + HIF_REQUEST_MAX
8581 +};
8582 +
8583 +enum {
8584 + /* Event to indicate that client rx queue is reached water mark level */
8585 + EVENT_HIGH_RX_WM = 0,
8586 + /* Event to indicate that, packet received for client */
8587 + EVENT_RX_PKT_IND,
8588 + /* Event to indicate that, packet tx done for client */
8589 + EVENT_TXDONE_IND,
8590 + HIF_EVENT_MAX
8591 +};
8592 +
8593 +/*structure to store client queue info */
8594 +
8595 +/*structure to store client queue info */
8596 +struct hif_client_rx_queue {
8597 + struct rx_queue_desc *base;
8598 + u32 size;
8599 + u32 read_idx;
8600 + u32 write_idx;
8601 +};
8602 +
8603 +struct hif_client_tx_queue {
8604 + struct tx_queue_desc *base;
8605 + u32 size;
8606 + u32 read_idx;
8607 + u32 write_idx;
8608 + u32 tx_pending;
8609 + unsigned long jiffies_last_packet;
8610 + u32 nocpy_flag;
8611 + u32 prev_tmu_tx_pkts;
8612 + u32 done_tmu_tx_pkts;
8613 +};
8614 +
8615 +struct hif_client_s {
8616 + int id;
8617 + int tx_qn;
8618 + int rx_qn;
8619 + void *rx_qbase;
8620 + void *tx_qbase;
8621 + int tx_qsize;
8622 + int rx_qsize;
8623 + int cpu_id;
8624 + struct hif_client_tx_queue tx_q[HIF_CLIENT_QUEUES_MAX];
8625 + struct hif_client_rx_queue rx_q[HIF_CLIENT_QUEUES_MAX];
8626 + int (*event_handler)(void *priv, int event, int data);
8627 + unsigned long queue_mask[HIF_EVENT_MAX];
8628 + struct pfe *pfe;
8629 + void *priv;
8630 +};
8631 +
8632 +/*
8633 + * Client specific shared memory
8634 + * It contains number of Rx/Tx queues, base addresses and queue sizes
8635 + */
8636 +struct hif_client_shm {
8637 + u32 ctrl; /*0-7: number of Rx queues, 8-15: number of tx queues */
8638 + unsigned long rx_qbase; /*Rx queue base address */
8639 + u32 rx_qsize; /*each Rx queue size, all Rx queues are of same size */
8640 + unsigned long tx_qbase; /* Tx queue base address */
8641 + u32 tx_qsize; /*each Tx queue size, all Tx queues are of same size */
8642 +};
8643 +
8644 +/*Client shared memory ctrl bit description */
8645 +#define CLIENT_CTRL_RX_Q_CNT_OFST 0
8646 +#define CLIENT_CTRL_TX_Q_CNT_OFST 8
8647 +#define CLIENT_CTRL_RX_Q_CNT(ctrl) (((ctrl) >> CLIENT_CTRL_RX_Q_CNT_OFST) \
8648 + & 0xFF)
8649 +#define CLIENT_CTRL_TX_Q_CNT(ctrl) (((ctrl) >> CLIENT_CTRL_TX_Q_CNT_OFST) \
8650 + & 0xFF)
8651 +
8652 +/*
8653 + * Shared memory used to communicate between HIF driver and host/client drivers
8654 + * Before starting the hif driver rx_buf_pool ans rx_buf_pool_cnt should be
8655 + * initialized with host buffers and buffers count in the pool.
8656 + * rx_buf_pool_cnt should be >= HIF_RX_DESC_NT.
8657 + *
8658 + */
8659 +struct hif_shm {
8660 + u32 rx_buf_pool_cnt; /*Number of rx buffers available*/
8661 + /*Rx buffers required to initialize HIF rx descriptors */
8662 + void *rx_buf_pool[HIF_RX_DESC_NT];
8663 + unsigned long g_client_status[2]; /*Global client status bit mask */
8664 + /* Client specific shared memory */
8665 + struct hif_client_shm client[HIF_CLIENTS_MAX];
8666 +};
8667 +
8668 +#define CL_DESC_OWN BIT(31)
8669 +/* This sets owner ship to HIF driver */
8670 +#define CL_DESC_LAST BIT(30)
8671 +/* This indicates last packet for multi buffers handling */
8672 +#define CL_DESC_FIRST BIT(29)
8673 +/* This indicates first packet for multi buffers handling */
8674 +
8675 +#define CL_DESC_BUF_LEN(x) ((x) & 0xFFFF)
8676 +#define CL_DESC_FLAGS(x) (((x) & 0xF) << 16)
8677 +#define CL_DESC_GET_FLAGS(x) (((x) >> 16) & 0xF)
8678 +
8679 +struct rx_queue_desc {
8680 + void *data;
8681 + u32 ctrl; /*0-15bit len, 16-20bit flags, 31bit owner*/
8682 + u32 client_ctrl;
8683 +};
8684 +
8685 +struct tx_queue_desc {
8686 + void *data;
8687 + u32 ctrl; /*0-15bit len, 16-20bit flags, 31bit owner*/
8688 +};
8689 +
8690 +/* HIF Rx is not working properly for 2-byte aligned buffers and
8691 + * ip_header should be 4byte aligned for better iperformance.
8692 + * "ip_header = 64 + 6(hif_header) + 14 (MAC Header)" will be 4byte aligned.
8693 + */
8694 +#define PFE_PKT_HEADER_SZ sizeof(struct hif_hdr)
8695 +/* must be big enough for headroom, pkt size and skb shared info */
8696 +#define PFE_BUF_SIZE 2048
8697 +#define PFE_PKT_HEADROOM 128
8698 +
8699 +#define SKB_SHARED_INFO_SIZE (sizeof(struct skb_shared_info))
8700 +#define PFE_PKT_SIZE (PFE_BUF_SIZE - PFE_PKT_HEADROOM \
8701 + - SKB_SHARED_INFO_SIZE)
8702 +#define MAX_L2_HDR_SIZE 14 /* Not correct for VLAN/PPPoE */
8703 +#define MAX_L3_HDR_SIZE 20 /* Not correct for IPv6 */
8704 +#define MAX_L4_HDR_SIZE 60 /* TCP with maximum options */
8705 +#define MAX_HDR_SIZE (MAX_L2_HDR_SIZE + MAX_L3_HDR_SIZE \
8706 + + MAX_L4_HDR_SIZE)
8707 +/* Used in page mode to clamp packet size to the maximum supported by the hif
8708 + *hw interface (<16KiB)
8709 + */
8710 +#define MAX_PFE_PKT_SIZE 16380UL
8711 +
8712 +extern unsigned int pfe_pkt_size;
8713 +extern unsigned int pfe_pkt_headroom;
8714 +extern unsigned int page_mode;
8715 +extern unsigned int lro_mode;
8716 +extern unsigned int tx_qos;
8717 +extern unsigned int emac_txq_cnt;
8718 +
8719 +int pfe_hif_lib_init(struct pfe *pfe);
8720 +void pfe_hif_lib_exit(struct pfe *pfe);
8721 +int hif_lib_client_register(struct hif_client_s *client);
8722 +int hif_lib_client_unregister(struct hif_client_s *client);
8723 +void __hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void
8724 + *data, unsigned int len, u32 client_ctrl,
8725 + unsigned int flags, void *client_data);
8726 +int hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void *data,
8727 + unsigned int len, u32 client_ctrl, void *client_data);
8728 +void hif_lib_indicate_client(int cl_id, int event, int data);
8729 +int hif_lib_event_handler_start(struct hif_client_s *client, int event, int
8730 + data);
8731 +int hif_lib_tmu_queue_start(struct hif_client_s *client, int qno);
8732 +int hif_lib_tmu_queue_stop(struct hif_client_s *client, int qno);
8733 +void *hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno,
8734 + unsigned int *flags, int count);
8735 +void *hif_lib_receive_pkt(struct hif_client_s *client, int qno, int *len, int
8736 + *ofst, unsigned int *rx_ctrl,
8737 + unsigned int *desc_ctrl, void **priv_data);
8738 +void hif_lib_set_rx_cpu_affinity(struct hif_client_s *client, int cpu_id);
8739 +void hif_lib_set_tx_queue_nocpy(struct hif_client_s *client, int qno, int
8740 + enable);
8741 +static inline int hif_lib_tx_avail(struct hif_client_s *client, unsigned int
8742 + qno)
8743 +{
8744 + struct hif_client_tx_queue *queue = &client->tx_q[qno];
8745 +
8746 + return (queue->size - queue->tx_pending);
8747 +}
8748 +
8749 +static inline int hif_lib_get_tx_wr_index(struct hif_client_s *client, unsigned
8750 + int qno)
8751 +{
8752 + struct hif_client_tx_queue *queue = &client->tx_q[qno];
8753 +
8754 + return queue->write_idx;
8755 +}
8756 +
8757 +static inline int hif_lib_tx_pending(struct hif_client_s *client, unsigned int
8758 + qno)
8759 +{
8760 + struct hif_client_tx_queue *queue = &client->tx_q[qno];
8761 +
8762 + return queue->tx_pending;
8763 +}
8764 +
8765 +#define hif_lib_tx_credit_avail(pfe, id, qno) \
8766 + ((pfe)->tmu_credit.tx_credit[id][qno])
8767 +
8768 +#define hif_lib_tx_credit_max(pfe, id, qno) \
8769 + ((pfe)->tmu_credit.tx_credit_max[id][qno])
8770 +
8771 +/*
8772 + * Test comment
8773 + */
8774 +#define hif_lib_tx_credit_use(pfe, id, qno, credit) \
8775 + ({ typeof(pfe) pfe_ = pfe; \
8776 + typeof(id) id_ = id; \
8777 + typeof(qno) qno_ = qno_; \
8778 + typeof(credit) credit_ = credit; \
8779 + do { \
8780 + if (tx_qos) { \
8781 + (pfe_)->tmu_credit.tx_credit[id_][qno_]\
8782 + -= credit_; \
8783 + (pfe_)->tmu_credit.tx_packets[id_][qno_]\
8784 + += credit_; \
8785 + } \
8786 + } while (0); \
8787 + })
8788 +
8789 +#endif /* _PFE_HIF_LIB_H_ */
8790 --- /dev/null
8791 +++ b/drivers/staging/fsl_ppfe/pfe_hw.c
8792 @@ -0,0 +1,176 @@
8793 +/*
8794 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
8795 + * Copyright 2017 NXP
8796 + *
8797 + * This program is free software; you can redistribute it and/or modify
8798 + * it under the terms of the GNU General Public License as published by
8799 + * the Free Software Foundation; either version 2 of the License, or
8800 + * (at your option) any later version.
8801 + *
8802 + * This program is distributed in the hope that it will be useful,
8803 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
8804 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8805 + * GNU General Public License for more details.
8806 + *
8807 + * You should have received a copy of the GNU General Public License
8808 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
8809 + */
8810 +
8811 +#include "pfe_mod.h"
8812 +#include "pfe_hw.h"
8813 +
8814 +/* Functions to handle most of pfe hw register initialization */
8815 +int pfe_hw_init(struct pfe *pfe, int resume)
8816 +{
8817 + struct class_cfg class_cfg = {
8818 + .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
8819 + .route_table_baseaddr = pfe->ddr_phys_baseaddr +
8820 + ROUTE_TABLE_BASEADDR,
8821 + .route_table_hash_bits = ROUTE_TABLE_HASH_BITS,
8822 + };
8823 +
8824 + struct tmu_cfg tmu_cfg = {
8825 + .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
8826 + .llm_base_addr = pfe->ddr_phys_baseaddr + TMU_LLM_BASEADDR,
8827 + .llm_queue_len = TMU_LLM_QUEUE_LEN,
8828 + };
8829 +
8830 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
8831 + struct util_cfg util_cfg = {
8832 + .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
8833 + };
8834 +#endif
8835 +
8836 + struct BMU_CFG bmu1_cfg = {
8837 + .baseaddr = CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR +
8838 + BMU1_LMEM_BASEADDR),
8839 + .count = BMU1_BUF_COUNT,
8840 + .size = BMU1_BUF_SIZE,
8841 + .low_watermark = 10,
8842 + .high_watermark = 15,
8843 + };
8844 +
8845 + struct BMU_CFG bmu2_cfg = {
8846 + .baseaddr = DDR_PHYS_TO_PFE(pfe->ddr_phys_baseaddr +
8847 + BMU2_DDR_BASEADDR),
8848 + .count = BMU2_BUF_COUNT,
8849 + .size = BMU2_BUF_SIZE,
8850 + .low_watermark = 250,
8851 + .high_watermark = 253,
8852 + };
8853 +
8854 + struct gpi_cfg egpi1_cfg = {
8855 + .lmem_rtry_cnt = EGPI1_LMEM_RTRY_CNT,
8856 + .tmlf_txthres = EGPI1_TMLF_TXTHRES,
8857 + .aseq_len = EGPI1_ASEQ_LEN,
8858 + .mtip_pause_reg = CBUS_VIRT_TO_PFE(EMAC1_BASE_ADDR +
8859 + EMAC_TCNTRL_REG),
8860 + };
8861 +
8862 + struct gpi_cfg egpi2_cfg = {
8863 + .lmem_rtry_cnt = EGPI2_LMEM_RTRY_CNT,
8864 + .tmlf_txthres = EGPI2_TMLF_TXTHRES,
8865 + .aseq_len = EGPI2_ASEQ_LEN,
8866 + .mtip_pause_reg = CBUS_VIRT_TO_PFE(EMAC2_BASE_ADDR +
8867 + EMAC_TCNTRL_REG),
8868 + };
8869 +
8870 + struct gpi_cfg hgpi_cfg = {
8871 + .lmem_rtry_cnt = HGPI_LMEM_RTRY_CNT,
8872 + .tmlf_txthres = HGPI_TMLF_TXTHRES,
8873 + .aseq_len = HGPI_ASEQ_LEN,
8874 + .mtip_pause_reg = 0,
8875 + };
8876 +
8877 + pr_info("%s\n", __func__);
8878 +
8879 +#if !defined(LS1012A_PFE_RESET_WA)
8880 + /* LS1012A needs this to make PE work correctly */
8881 + writel(0x3, CLASS_PE_SYS_CLK_RATIO);
8882 + writel(0x3, TMU_PE_SYS_CLK_RATIO);
8883 + writel(0x3, UTIL_PE_SYS_CLK_RATIO);
8884 + usleep_range(10, 20);
8885 +#endif
8886 +
8887 + pr_info("CLASS version: %x\n", readl(CLASS_VERSION));
8888 + pr_info("TMU version: %x\n", readl(TMU_VERSION));
8889 +
8890 + pr_info("BMU1 version: %x\n", readl(BMU1_BASE_ADDR +
8891 + BMU_VERSION));
8892 + pr_info("BMU2 version: %x\n", readl(BMU2_BASE_ADDR +
8893 + BMU_VERSION));
8894 +
8895 + pr_info("EGPI1 version: %x\n", readl(EGPI1_BASE_ADDR +
8896 + GPI_VERSION));
8897 + pr_info("EGPI2 version: %x\n", readl(EGPI2_BASE_ADDR +
8898 + GPI_VERSION));
8899 + pr_info("HGPI version: %x\n", readl(HGPI_BASE_ADDR +
8900 + GPI_VERSION));
8901 +
8902 + pr_info("HIF version: %x\n", readl(HIF_VERSION));
8903 + pr_info("HIF NOPCY version: %x\n", readl(HIF_NOCPY_VERSION));
8904 +
8905 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
8906 + pr_info("UTIL version: %x\n", readl(UTIL_VERSION));
8907 +#endif
8908 + while (!(readl(TMU_CTRL) & ECC_MEM_INIT_DONE))
8909 + ;
8910 +
8911 + hif_rx_disable();
8912 + hif_tx_disable();
8913 +
8914 + bmu_init(BMU1_BASE_ADDR, &bmu1_cfg);
8915 +
8916 + pr_info("bmu_init(1) done\n");
8917 +
8918 + bmu_init(BMU2_BASE_ADDR, &bmu2_cfg);
8919 +
8920 + pr_info("bmu_init(2) done\n");
8921 +
8922 + class_cfg.resume = resume ? 1 : 0;
8923 +
8924 + class_init(&class_cfg);
8925 +
8926 + pr_info("class_init() done\n");
8927 +
8928 + tmu_init(&tmu_cfg);
8929 +
8930 + pr_info("tmu_init() done\n");
8931 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
8932 + util_init(&util_cfg);
8933 +
8934 + pr_info("util_init() done\n");
8935 +#endif
8936 + gpi_init(EGPI1_BASE_ADDR, &egpi1_cfg);
8937 +
8938 + pr_info("gpi_init(1) done\n");
8939 +
8940 + gpi_init(EGPI2_BASE_ADDR, &egpi2_cfg);
8941 +
8942 + pr_info("gpi_init(2) done\n");
8943 +
8944 + gpi_init(HGPI_BASE_ADDR, &hgpi_cfg);
8945 +
8946 + pr_info("gpi_init(hif) done\n");
8947 +
8948 + bmu_enable(BMU1_BASE_ADDR);
8949 +
8950 + pr_info("bmu_enable(1) done\n");
8951 +
8952 + bmu_enable(BMU2_BASE_ADDR);
8953 +
8954 + pr_info("bmu_enable(2) done\n");
8955 +
8956 + return 0;
8957 +}
8958 +
8959 +void pfe_hw_exit(struct pfe *pfe)
8960 +{
8961 + pr_info("%s\n", __func__);
8962 +
8963 + bmu_disable(BMU1_BASE_ADDR);
8964 + bmu_reset(BMU1_BASE_ADDR);
8965 +
8966 + bmu_disable(BMU2_BASE_ADDR);
8967 + bmu_reset(BMU2_BASE_ADDR);
8968 +}
8969 --- /dev/null
8970 +++ b/drivers/staging/fsl_ppfe/pfe_hw.h
8971 @@ -0,0 +1,27 @@
8972 +/*
8973 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
8974 + * Copyright 2017 NXP
8975 + *
8976 + * This program is free software; you can redistribute it and/or modify
8977 + * it under the terms of the GNU General Public License as published by
8978 + * the Free Software Foundation; either version 2 of the License, or
8979 + * (at your option) any later version.
8980 + *
8981 + * This program is distributed in the hope that it will be useful,
8982 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
8983 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8984 + * GNU General Public License for more details.
8985 + *
8986 + * You should have received a copy of the GNU General Public License
8987 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
8988 + */
8989 +
8990 +#ifndef _PFE_HW_H_
8991 +#define _PFE_HW_H_
8992 +
8993 +#define PE_SYS_CLK_RATIO 1 /* SYS/AXI = 250MHz, HFE = 500MHz */
8994 +
8995 +int pfe_hw_init(struct pfe *pfe, int resume);
8996 +void pfe_hw_exit(struct pfe *pfe);
8997 +
8998 +#endif /* _PFE_HW_H_ */
8999 --- /dev/null
9000 +++ b/drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c
9001 @@ -0,0 +1,394 @@
9002 +/*
9003 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
9004 + * Copyright 2017 NXP
9005 + *
9006 + * This program is free software; you can redistribute it and/or modify
9007 + * it under the terms of the GNU General Public License as published by
9008 + * the Free Software Foundation; either version 2 of the License, or
9009 + * (at your option) any later version.
9010 + *
9011 + * This program is distributed in the hope that it will be useful,
9012 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
9013 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9014 + * GNU General Public License for more details.
9015 + *
9016 + * You should have received a copy of the GNU General Public License
9017 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
9018 + */
9019 +
9020 +#include <linux/module.h>
9021 +#include <linux/device.h>
9022 +#include <linux/of_net.h>
9023 +#include <linux/of_address.h>
9024 +#include <linux/platform_device.h>
9025 +#include <linux/slab.h>
9026 +#include <linux/clk.h>
9027 +#include <linux/mfd/syscon.h>
9028 +#include <linux/regmap.h>
9029 +
9030 +#include "pfe_mod.h"
9031 +
9032 +struct ls1012a_pfe_platform_data pfe_platform_data;
9033 +
9034 +static int pfe_get_gemac_if_proprties(struct device_node *parent, int port, int
9035 + if_cnt,
9036 + struct ls1012a_pfe_platform_data
9037 + *pdata)
9038 +{
9039 + struct device_node *gem = NULL, *phy = NULL;
9040 + int size;
9041 + int ii = 0, phy_id = 0;
9042 + const u32 *addr;
9043 + const void *mac_addr;
9044 +
9045 + for (ii = 0; ii < if_cnt; ii++) {
9046 + gem = of_get_next_child(parent, gem);
9047 + if (!gem)
9048 + goto err;
9049 + addr = of_get_property(gem, "reg", &size);
9050 + if (addr && (be32_to_cpup(addr) == port))
9051 + break;
9052 + }
9053 +
9054 + if (ii >= if_cnt) {
9055 + pr_err("%s:%d Failed to find interface = %d\n",
9056 + __func__, __LINE__, if_cnt);
9057 + goto err;
9058 + }
9059 +
9060 + pdata->ls1012a_eth_pdata[port].gem_id = port;
9061 +
9062 + mac_addr = of_get_mac_address(gem);
9063 +
9064 + if (mac_addr) {
9065 + memcpy(pdata->ls1012a_eth_pdata[port].mac_addr, mac_addr,
9066 + ETH_ALEN);
9067 + }
9068 +
9069 + pdata->ls1012a_eth_pdata[port].mii_config = of_get_phy_mode(gem);
9070 +
9071 + if ((pdata->ls1012a_eth_pdata[port].mii_config) < 0)
9072 + pr_err("%s:%d Incorrect Phy mode....\n", __func__,
9073 + __LINE__);
9074 +
9075 + addr = of_get_property(gem, "fsl,gemac-bus-id", &size);
9076 + if (!addr)
9077 + pr_err("%s:%d Invalid gemac-bus-id....\n", __func__,
9078 + __LINE__);
9079 + else
9080 + pdata->ls1012a_eth_pdata[port].bus_id = be32_to_cpup(addr);
9081 +
9082 + addr = of_get_property(gem, "fsl,gemac-phy-id", &size);
9083 + if (!addr) {
9084 + pr_err("%s:%d Invalid gemac-phy-id....\n", __func__,
9085 + __LINE__);
9086 + } else {
9087 + phy_id = be32_to_cpup(addr);
9088 + pdata->ls1012a_eth_pdata[port].phy_id = phy_id;
9089 + pdata->ls1012a_mdio_pdata[0].phy_mask &= ~(1 << phy_id);
9090 + }
9091 +
9092 + addr = of_get_property(gem, "fsl,mdio-mux-val", &size);
9093 + if (!addr)
9094 + pr_err("%s: Invalid mdio-mux-val....\n", __func__);
9095 + else
9096 + phy_id = be32_to_cpup(addr);
9097 + pdata->ls1012a_eth_pdata[port].mdio_muxval = phy_id;
9098 +
9099 + if (pdata->ls1012a_eth_pdata[port].phy_id < 32)
9100 + pfe->mdio_muxval[pdata->ls1012a_eth_pdata[port].phy_id] =
9101 + pdata->ls1012a_eth_pdata[port].mdio_muxval;
9102 +
9103 + addr = of_get_property(gem, "fsl,pfe-phy-if-flags", &size);
9104 + if (!addr)
9105 + pr_err("%s:%d Invalid pfe-phy-if-flags....\n",
9106 + __func__, __LINE__);
9107 + else
9108 + pdata->ls1012a_eth_pdata[port].phy_flags = be32_to_cpup(addr);
9109 +
9110 + /* If PHY is enabled, read mdio properties */
9111 + if (pdata->ls1012a_eth_pdata[port].phy_flags & GEMAC_NO_PHY)
9112 + goto done;
9113 +
9114 + phy = of_get_next_child(gem, NULL);
9115 +
9116 + addr = of_get_property(phy, "reg", &size);
9117 +
9118 + if (!addr)
9119 + pr_err("%s:%d Invalid phy enable flag....\n",
9120 + __func__, __LINE__);
9121 + else
9122 + pdata->ls1012a_mdio_pdata[port].enabled = be32_to_cpup(addr);
9123 +
9124 + pdata->ls1012a_mdio_pdata[port].irq[0] = PHY_POLL;
9125 +
9126 +done:
9127 +
9128 + return 0;
9129 +
9130 +err:
9131 + return -1;
9132 +}
9133 +
9134 +/*
9135 + *
9136 + * pfe_platform_probe -
9137 + *
9138 + *
9139 + */
9140 +static int pfe_platform_probe(struct platform_device *pdev)
9141 +{
9142 + struct resource res;
9143 + int ii, rc, interface_count = 0, size = 0;
9144 + const u32 *prop;
9145 + struct device_node *np;
9146 + struct clk *pfe_clk;
9147 +
9148 + np = pdev->dev.of_node;
9149 +
9150 + if (!np) {
9151 + pr_err("Invalid device node\n");
9152 + return -EINVAL;
9153 + }
9154 +
9155 + pfe = kzalloc(sizeof(*pfe), GFP_KERNEL);
9156 + if (!pfe) {
9157 + rc = -ENOMEM;
9158 + goto err_alloc;
9159 + }
9160 +
9161 + platform_set_drvdata(pdev, pfe);
9162 +
9163 + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9164 +
9165 + if (of_address_to_resource(np, 1, &res)) {
9166 + rc = -ENOMEM;
9167 + pr_err("failed to get ddr resource\n");
9168 + goto err_ddr;
9169 + }
9170 +
9171 + pfe->ddr_phys_baseaddr = res.start;
9172 + pfe->ddr_size = resource_size(&res);
9173 +
9174 + pfe->ddr_baseaddr = phys_to_virt(res.start);
9175 + if (!pfe->ddr_baseaddr) {
9176 + pr_err("ioremap() ddr failed\n");
9177 + rc = -ENOMEM;
9178 + goto err_ddr;
9179 + }
9180 +
9181 + pfe->scfg =
9182 + syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
9183 + "fsl,pfe-scfg");
9184 + if (IS_ERR(pfe->scfg)) {
9185 + dev_err(&pdev->dev, "No syscfg phandle specified\n");
9186 + return PTR_ERR(pfe->scfg);
9187 + }
9188 +
9189 + pfe->cbus_baseaddr = of_iomap(np, 0);
9190 + if (!pfe->cbus_baseaddr) {
9191 + rc = -ENOMEM;
9192 + pr_err("failed to get axi resource\n");
9193 + goto err_axi;
9194 + }
9195 +
9196 + pfe->hif_irq = platform_get_irq(pdev, 0);
9197 + if (pfe->hif_irq < 0) {
9198 + pr_err("platform_get_irq for hif failed\n");
9199 + rc = pfe->hif_irq;
9200 + goto err_hif_irq;
9201 + }
9202 +
9203 + pfe->wol_irq = platform_get_irq(pdev, 2);
9204 + if (pfe->wol_irq < 0) {
9205 + pr_err("platform_get_irq for WoL failed\n");
9206 + rc = pfe->wol_irq;
9207 + goto err_hif_irq;
9208 + }
9209 +
9210 + /* Read interface count */
9211 + prop = of_get_property(np, "fsl,pfe-num-interfaces", &size);
9212 + if (!prop) {
9213 + pr_err("Failed to read number of interfaces\n");
9214 + rc = -ENXIO;
9215 + goto err_prop;
9216 + }
9217 +
9218 + interface_count = be32_to_cpup(prop);
9219 + if (interface_count <= 0) {
9220 + pr_err("No ethernet interface count : %d\n",
9221 + interface_count);
9222 + rc = -ENXIO;
9223 + goto err_prop;
9224 + }
9225 +
9226 + pfe_platform_data.ls1012a_mdio_pdata[0].phy_mask = 0xffffffff;
9227 +
9228 + for (ii = 0; ii < interface_count; ii++) {
9229 + pfe_get_gemac_if_proprties(np, ii, interface_count,
9230 + &pfe_platform_data);
9231 + }
9232 +
9233 + pfe->dev = &pdev->dev;
9234 +
9235 + pfe->dev->platform_data = &pfe_platform_data;
9236 +
9237 + /* declare WoL capabilities */
9238 + device_init_wakeup(&pdev->dev, true);
9239 +
9240 + /* find the clocks */
9241 + pfe_clk = devm_clk_get(pfe->dev, "pfe");
9242 + if (IS_ERR(pfe_clk))
9243 + return PTR_ERR(pfe_clk);
9244 +
9245 + /* PFE clock is (platform clock / 2) */
9246 + /* save sys_clk value as KHz */
9247 + pfe->ctrl.sys_clk = clk_get_rate(pfe_clk) / (2 * 1000);
9248 +
9249 + rc = pfe_probe(pfe);
9250 + if (rc < 0)
9251 + goto err_probe;
9252 +
9253 + return 0;
9254 +
9255 +err_probe:
9256 +err_prop:
9257 +err_hif_irq:
9258 + iounmap(pfe->cbus_baseaddr);
9259 +
9260 +err_axi:
9261 + iounmap(pfe->ddr_baseaddr);
9262 +
9263 +err_ddr:
9264 + platform_set_drvdata(pdev, NULL);
9265 +
9266 + kfree(pfe);
9267 +
9268 +err_alloc:
9269 + return rc;
9270 +}
9271 +
9272 +/*
9273 + * pfe_platform_remove -
9274 + */
9275 +static int pfe_platform_remove(struct platform_device *pdev)
9276 +{
9277 + struct pfe *pfe = platform_get_drvdata(pdev);
9278 + int rc;
9279 +
9280 + pr_info("%s\n", __func__);
9281 +
9282 + rc = pfe_remove(pfe);
9283 +
9284 + iounmap(pfe->cbus_baseaddr);
9285 + iounmap(pfe->ddr_baseaddr);
9286 +
9287 + platform_set_drvdata(pdev, NULL);
9288 +
9289 + kfree(pfe);
9290 +
9291 + return rc;
9292 +}
9293 +
9294 +#ifdef CONFIG_PM
9295 +#ifdef CONFIG_PM_SLEEP
9296 +int pfe_platform_suspend(struct device *dev)
9297 +{
9298 + struct pfe *pfe = platform_get_drvdata(to_platform_device(dev));
9299 + struct net_device *netdev;
9300 + int i;
9301 +
9302 + pfe->wake = 0;
9303 +
9304 + for (i = 0; i < (NUM_GEMAC_SUPPORT); i++) {
9305 + netdev = pfe->eth.eth_priv[i]->ndev;
9306 +
9307 + netif_device_detach(netdev);
9308 +
9309 + if (netif_running(netdev))
9310 + if (pfe_eth_suspend(netdev))
9311 + pfe->wake = 1;
9312 + }
9313 +
9314 + /* Shutdown PFE only if we're not waking up the system */
9315 + if (!pfe->wake) {
9316 +#if defined(LS1012A_PFE_RESET_WA)
9317 + pfe_hif_rx_idle(&pfe->hif);
9318 +#endif
9319 + pfe_ctrl_suspend(&pfe->ctrl);
9320 + pfe_firmware_exit(pfe);
9321 +
9322 + pfe_hif_exit(pfe);
9323 + pfe_hif_lib_exit(pfe);
9324 +
9325 + pfe_hw_exit(pfe);
9326 + }
9327 +
9328 + return 0;
9329 +}
9330 +
9331 +static int pfe_platform_resume(struct device *dev)
9332 +{
9333 + struct pfe *pfe = platform_get_drvdata(to_platform_device(dev));
9334 + struct net_device *netdev;
9335 + int i;
9336 +
9337 + if (!pfe->wake) {
9338 + pfe_hw_init(pfe, 1);
9339 + pfe_hif_lib_init(pfe);
9340 + pfe_hif_init(pfe);
9341 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
9342 + util_enable();
9343 +#endif
9344 + tmu_enable(0xf);
9345 + class_enable();
9346 + pfe_ctrl_resume(&pfe->ctrl);
9347 + }
9348 +
9349 + for (i = 0; i < (NUM_GEMAC_SUPPORT); i++) {
9350 + netdev = pfe->eth.eth_priv[i]->ndev;
9351 +
9352 + if (pfe->eth.eth_priv[i]->mii_bus)
9353 + pfe_eth_mdio_reset(pfe->eth.eth_priv[i]->mii_bus);
9354 +
9355 + if (netif_running(netdev))
9356 + pfe_eth_resume(netdev);
9357 +
9358 + netif_device_attach(netdev);
9359 + }
9360 + return 0;
9361 +}
9362 +#else
9363 +#define pfe_platform_suspend NULL
9364 +#define pfe_platform_resume NULL
9365 +#endif
9366 +
9367 +static const struct dev_pm_ops pfe_platform_pm_ops = {
9368 + SET_SYSTEM_SLEEP_PM_OPS(pfe_platform_suspend, pfe_platform_resume)
9369 +};
9370 +#endif
9371 +
9372 +static const struct of_device_id pfe_match[] = {
9373 + {
9374 + .compatible = "fsl,pfe",
9375 + },
9376 + {},
9377 +};
9378 +MODULE_DEVICE_TABLE(of, pfe_match);
9379 +
9380 +static struct platform_driver pfe_platform_driver = {
9381 + .probe = pfe_platform_probe,
9382 + .remove = pfe_platform_remove,
9383 + .driver = {
9384 + .name = "pfe",
9385 + .of_match_table = pfe_match,
9386 +#ifdef CONFIG_PM
9387 + .pm = &pfe_platform_pm_ops,
9388 +#endif
9389 + },
9390 +};
9391 +
9392 +module_platform_driver(pfe_platform_driver);
9393 +MODULE_LICENSE("GPL");
9394 +MODULE_DESCRIPTION("PFE Ethernet driver");
9395 +MODULE_AUTHOR("NXP DNCPE");
9396 --- /dev/null
9397 +++ b/drivers/staging/fsl_ppfe/pfe_mod.c
9398 @@ -0,0 +1,141 @@
9399 +/*
9400 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
9401 + * Copyright 2017 NXP
9402 + *
9403 + * This program is free software; you can redistribute it and/or modify
9404 + * it under the terms of the GNU General Public License as published by
9405 + * the Free Software Foundation; either version 2 of the License, or
9406 + * (at your option) any later version.
9407 + *
9408 + * This program is distributed in the hope that it will be useful,
9409 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
9410 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9411 + * GNU General Public License for more details.
9412 + *
9413 + * You should have received a copy of the GNU General Public License
9414 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
9415 + */
9416 +
9417 +#include <linux/dma-mapping.h>
9418 +#include "pfe_mod.h"
9419 +
9420 +struct pfe *pfe;
9421 +
9422 +/*
9423 + * pfe_probe -
9424 + */
9425 +int pfe_probe(struct pfe *pfe)
9426 +{
9427 + int rc;
9428 +
9429 + if (pfe->ddr_size < DDR_MAX_SIZE) {
9430 + pr_err("%s: required DDR memory (%x) above platform ddr memory (%x)\n",
9431 + __func__, (unsigned int)DDR_MAX_SIZE, pfe->ddr_size);
9432 + rc = -ENOMEM;
9433 + goto err_hw;
9434 + }
9435 +
9436 + if (((int)(pfe->ddr_phys_baseaddr + BMU2_DDR_BASEADDR) &
9437 + (8 * SZ_1M - 1)) != 0) {
9438 + pr_err("%s: BMU2 base address (0x%x) must be aligned on 8MB boundary\n",
9439 + __func__, (int)pfe->ddr_phys_baseaddr +
9440 + BMU2_DDR_BASEADDR);
9441 + rc = -ENOMEM;
9442 + goto err_hw;
9443 + }
9444 +
9445 + pr_info("cbus_baseaddr: %lx, ddr_baseaddr: %lx, ddr_phys_baseaddr: %lx, ddr_size: %x\n",
9446 + (unsigned long)pfe->cbus_baseaddr,
9447 + (unsigned long)pfe->ddr_baseaddr,
9448 + pfe->ddr_phys_baseaddr, pfe->ddr_size);
9449 +
9450 + pfe_lib_init(pfe->cbus_baseaddr, pfe->ddr_baseaddr,
9451 + pfe->ddr_phys_baseaddr, pfe->ddr_size);
9452 +
9453 + rc = pfe_hw_init(pfe, 0);
9454 + if (rc < 0)
9455 + goto err_hw;
9456 +
9457 + rc = pfe_hif_lib_init(pfe);
9458 + if (rc < 0)
9459 + goto err_hif_lib;
9460 +
9461 + rc = pfe_hif_init(pfe);
9462 + if (rc < 0)
9463 + goto err_hif;
9464 +
9465 + rc = pfe_firmware_init(pfe);
9466 + if (rc < 0)
9467 + goto err_firmware;
9468 +
9469 + rc = pfe_ctrl_init(pfe);
9470 + if (rc < 0)
9471 + goto err_ctrl;
9472 +
9473 + rc = pfe_eth_init(pfe);
9474 + if (rc < 0)
9475 + goto err_eth;
9476 +
9477 + rc = pfe_sysfs_init(pfe);
9478 + if (rc < 0)
9479 + goto err_sysfs;
9480 +
9481 + rc = pfe_debugfs_init(pfe);
9482 + if (rc < 0)
9483 + goto err_debugfs;
9484 +
9485 + return 0;
9486 +
9487 +err_debugfs:
9488 + pfe_sysfs_exit(pfe);
9489 +
9490 +err_sysfs:
9491 + pfe_eth_exit(pfe);
9492 +
9493 +err_eth:
9494 + pfe_ctrl_exit(pfe);
9495 +
9496 +err_ctrl:
9497 + pfe_firmware_exit(pfe);
9498 +
9499 +err_firmware:
9500 + pfe_hif_exit(pfe);
9501 +
9502 +err_hif:
9503 + pfe_hif_lib_exit(pfe);
9504 +
9505 +err_hif_lib:
9506 + pfe_hw_exit(pfe);
9507 +
9508 +err_hw:
9509 + return rc;
9510 +}
9511 +
9512 +/*
9513 + * pfe_remove -
9514 + */
9515 +int pfe_remove(struct pfe *pfe)
9516 +{
9517 + pr_info("%s\n", __func__);
9518 +
9519 + pfe_debugfs_exit(pfe);
9520 +
9521 + pfe_sysfs_exit(pfe);
9522 +
9523 + pfe_eth_exit(pfe);
9524 +
9525 + pfe_ctrl_exit(pfe);
9526 +
9527 +#if defined(LS1012A_PFE_RESET_WA)
9528 + pfe_hif_rx_idle(&pfe->hif);
9529 +#endif
9530 + pfe_firmware_exit(pfe);
9531 +
9532 + pfe_hif_exit(pfe);
9533 +
9534 + pfe_hif_lib_exit(pfe);
9535 +
9536 + pfe_hw_exit(pfe);
9537 +
9538 + return 0;
9539 +}
9540 --- /dev/null
9541 +++ b/drivers/staging/fsl_ppfe/pfe_mod.h
9542 @@ -0,0 +1,112 @@
9543 +/*
9544 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
9545 + * Copyright 2017 NXP
9546 + *
9547 + * This program is free software; you can redistribute it and/or modify
9548 + * it under the terms of the GNU General Public License as published by
9549 + * the Free Software Foundation; either version 2 of the License, or
9550 + * (at your option) any later version.
9551 + *
9552 + * This program is distributed in the hope that it will be useful,
9553 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
9554 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9555 + * GNU General Public License for more details.
9556 + *
9557 + * You should have received a copy of the GNU General Public License
9558 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
9559 + */
9560 +
9561 +#ifndef _PFE_MOD_H_
9562 +#define _PFE_MOD_H_
9563 +
9564 +#include <linux/device.h>
9565 +#include <linux/elf.h>
9566 +
9567 +struct pfe;
9568 +
9569 +#include "pfe_hw.h"
9570 +#include "pfe_firmware.h"
9571 +#include "pfe_ctrl.h"
9572 +#include "pfe_hif.h"
9573 +#include "pfe_hif_lib.h"
9574 +#include "pfe_eth.h"
9575 +#include "pfe_sysfs.h"
9576 +#include "pfe_perfmon.h"
9577 +#include "pfe_debugfs.h"
9578 +
9579 +#define PHYID_MAX_VAL 32
9580 +
9581 +struct pfe_tmu_credit {
9582 + /* Number of allowed TX packet in-flight, matches TMU queue size */
9583 + unsigned int tx_credit[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
9584 + unsigned int tx_credit_max[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
9585 + unsigned int tx_packets[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
9586 +};
9587 +
9588 +struct pfe {
9589 + struct regmap *scfg;
9590 + unsigned long ddr_phys_baseaddr;
9591 + void *ddr_baseaddr;
9592 + unsigned int ddr_size;
9593 + void *cbus_baseaddr;
9594 + void *apb_baseaddr;
9595 + unsigned long iram_phys_baseaddr;
9596 + void *iram_baseaddr;
9597 + unsigned long ipsec_phys_baseaddr;
9598 + void *ipsec_baseaddr;
9599 + int hif_irq;
9600 + int wol_irq;
9601 + int hif_client_irq;
9602 + struct device *dev;
9603 + struct dentry *dentry;
9604 + struct pfe_ctrl ctrl;
9605 + struct pfe_hif hif;
9606 + struct pfe_eth eth;
9607 + struct hif_client_s *hif_client[HIF_CLIENTS_MAX];
9608 +#if defined(CFG_DIAGS)
9609 + struct pfe_diags diags;
9610 +#endif
9611 + struct pfe_tmu_credit tmu_credit;
9612 + struct pfe_cpumon cpumon;
9613 + struct pfe_memmon memmon;
9614 + int wake;
9615 + int mdio_muxval[PHYID_MAX_VAL];
9616 + struct clk *hfe_clock;
9617 +};
9618 +
9619 +extern struct pfe *pfe;
9620 +
9621 +int pfe_probe(struct pfe *pfe);
9622 +int pfe_remove(struct pfe *pfe);
9623 +
9624 +/* DDR Mapping in reserved memory*/
9625 +#define ROUTE_TABLE_BASEADDR 0
9626 +#define ROUTE_TABLE_HASH_BITS 15 /* 32K entries */
9627 +#define ROUTE_TABLE_SIZE ((1 << ROUTE_TABLE_HASH_BITS) \
9628 + * CLASS_ROUTE_SIZE)
9629 +#define BMU2_DDR_BASEADDR (ROUTE_TABLE_BASEADDR + ROUTE_TABLE_SIZE)
9630 +#define BMU2_BUF_COUNT (4096 - 256)
9631 +/* This is to get a total DDR size of 12MiB */
9632 +#define BMU2_DDR_SIZE (DDR_BUF_SIZE * BMU2_BUF_COUNT)
9633 +#define UTIL_CODE_BASEADDR (BMU2_DDR_BASEADDR + BMU2_DDR_SIZE)
9634 +#define UTIL_CODE_SIZE (128 * SZ_1K)
9635 +#define UTIL_DDR_DATA_BASEADDR (UTIL_CODE_BASEADDR + UTIL_CODE_SIZE)
9636 +#define UTIL_DDR_DATA_SIZE (64 * SZ_1K)
9637 +#define CLASS_DDR_DATA_BASEADDR (UTIL_DDR_DATA_BASEADDR + UTIL_DDR_DATA_SIZE)
9638 +#define CLASS_DDR_DATA_SIZE (32 * SZ_1K)
9639 +#define TMU_DDR_DATA_BASEADDR (CLASS_DDR_DATA_BASEADDR + CLASS_DDR_DATA_SIZE)
9640 +#define TMU_DDR_DATA_SIZE (32 * SZ_1K)
9641 +#define TMU_LLM_BASEADDR (TMU_DDR_DATA_BASEADDR + TMU_DDR_DATA_SIZE)
9642 +#define TMU_LLM_QUEUE_LEN (8 * 512)
9643 +/* Must be power of two and at least 16 * 8 = 128 bytes */
9644 +#define TMU_LLM_SIZE (4 * 16 * TMU_LLM_QUEUE_LEN)
9645 +/* (4 TMU's x 16 queues x queue_len) */
9646 +
9647 +#define DDR_MAX_SIZE (TMU_LLM_BASEADDR + TMU_LLM_SIZE)
9648 +
9649 +/* LMEM Mapping */
9650 +#define BMU1_LMEM_BASEADDR 0
9651 +#define BMU1_BUF_COUNT 256
9652 +#define BMU1_LMEM_SIZE (LMEM_BUF_SIZE * BMU1_BUF_COUNT)
9653 +
9654 +#endif /* _PFE_MOD_H */
9655 --- /dev/null
9656 +++ b/drivers/staging/fsl_ppfe/pfe_perfmon.h
9657 @@ -0,0 +1,38 @@
9658 +/*
9659 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
9660 + * Copyright 2017 NXP
9661 + *
9662 + * This program is free software; you can redistribute it and/or modify
9663 + * it under the terms of the GNU General Public License as published by
9664 + * the Free Software Foundation; either version 2 of the License, or
9665 + * (at your option) any later version.
9666 + *
9667 + * This program is distributed in the hope that it will be useful,
9668 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
9669 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9670 + * GNU General Public License for more details.
9671 + *
9672 + * You should have received a copy of the GNU General Public License
9673 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
9674 + */
9675 +
9676 +#ifndef _PFE_PERFMON_H_
9677 +#define _PFE_PERFMON_H_
9678 +
9679 +#include "pfe/pfe.h"
9680 +
9681 +#define CT_CPUMON_INTERVAL (1 * TIMER_TICKS_PER_SEC)
9682 +
9683 +struct pfe_cpumon {
9684 + u32 cpu_usage_pct[MAX_PE];
9685 + u32 class_usage_pct;
9686 +};
9687 +
9688 +struct pfe_memmon {
9689 + u32 kernel_memory_allocated;
9690 +};
9691 +
9692 +int pfe_perfmon_init(struct pfe *pfe);
9693 +void pfe_perfmon_exit(struct pfe *pfe);
9694 +
9695 +#endif /* _PFE_PERFMON_H_ */
9696 --- /dev/null
9697 +++ b/drivers/staging/fsl_ppfe/pfe_sysfs.c
9698 @@ -0,0 +1,818 @@
9699 +/*
9700 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
9701 + * Copyright 2017 NXP
9702 + *
9703 + * This program is free software; you can redistribute it and/or modify
9704 + * it under the terms of the GNU General Public License as published by
9705 + * the Free Software Foundation; either version 2 of the License, or
9706 + * (at your option) any later version.
9707 + *
9708 + * This program is distributed in the hope that it will be useful,
9709 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
9710 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9711 + * GNU General Public License for more details.
9712 + *
9713 + * You should have received a copy of the GNU General Public License
9714 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
9715 + */
9716 +
9717 +#include <linux/module.h>
9718 +#include <linux/platform_device.h>
9719 +
9720 +#include "pfe_mod.h"
9721 +
9722 +#define PE_EXCEPTION_DUMP_ADDRESS 0x1fa8
9723 +#define NUM_QUEUES 16
9724 +
9725 +static char register_name[20][5] = {
9726 + "EPC", "ECAS", "EID", "ED",
9727 + "r0", "r1", "r2", "r3",
9728 + "r4", "r5", "r6", "r7",
9729 + "r8", "r9", "r10", "r11",
9730 + "r12", "r13", "r14", "r15",
9731 +};
9732 +
9733 +static char exception_name[14][20] = {
9734 + "Reset",
9735 + "HardwareFailure",
9736 + "NMI",
9737 + "InstBreakpoint",
9738 + "DataBreakpoint",
9739 + "Unsupported",
9740 + "PrivilegeViolation",
9741 + "InstBusError",
9742 + "DataBusError",
9743 + "AlignmentError",
9744 + "ArithmeticError",
9745 + "SystemCall",
9746 + "MemoryManagement",
9747 + "Interrupt",
9748 +};
9749 +
9750 +static unsigned long class_do_clear;
9751 +static unsigned long tmu_do_clear;
9752 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
9753 +static unsigned long util_do_clear;
9754 +#endif
9755 +
9756 +static ssize_t display_pe_status(char *buf, int id, u32 dmem_addr, unsigned long
9757 + do_clear)
9758 +{
9759 + ssize_t len = 0;
9760 + u32 val;
9761 + char statebuf[5];
9762 + struct pfe_cpumon *cpumon = &pfe->cpumon;
9763 + u32 debug_indicator;
9764 + u32 debug[20];
9765 +
9766 + *(u32 *)statebuf = pe_dmem_read(id, dmem_addr, 4);
9767 + dmem_addr += 4;
9768 +
9769 + statebuf[4] = '\0';
9770 + len += sprintf(buf + len, "state=%4s ", statebuf);
9771 +
9772 + val = pe_dmem_read(id, dmem_addr, 4);
9773 + dmem_addr += 4;
9774 + len += sprintf(buf + len, "ctr=%08x ", cpu_to_be32(val));
9775 +
9776 + val = pe_dmem_read(id, dmem_addr, 4);
9777 + if (do_clear && val)
9778 + pe_dmem_write(id, 0, dmem_addr, 4);
9779 + dmem_addr += 4;
9780 + len += sprintf(buf + len, "rx=%u ", cpu_to_be32(val));
9781 +
9782 + val = pe_dmem_read(id, dmem_addr, 4);
9783 + if (do_clear && val)
9784 + pe_dmem_write(id, 0, dmem_addr, 4);
9785 + dmem_addr += 4;
9786 + if (id >= TMU0_ID && id <= TMU_MAX_ID)
9787 + len += sprintf(buf + len, "qstatus=%x", cpu_to_be32(val));
9788 + else
9789 + len += sprintf(buf + len, "tx=%u", cpu_to_be32(val));
9790 +
9791 + val = pe_dmem_read(id, dmem_addr, 4);
9792 + if (do_clear && val)
9793 + pe_dmem_write(id, 0, dmem_addr, 4);
9794 + dmem_addr += 4;
9795 + if (val)
9796 + len += sprintf(buf + len, " drop=%u", cpu_to_be32(val));
9797 +
9798 + len += sprintf(buf + len, " load=%d%%", cpumon->cpu_usage_pct[id]);
9799 +
9800 + len += sprintf(buf + len, "\n");
9801 +
9802 + debug_indicator = pe_dmem_read(id, dmem_addr, 4);
9803 + dmem_addr += 4;
9804 + if (!strncmp((char *)&debug_indicator, "DBUG", 4)) {
9805 + int j, last = 0;
9806 +
9807 + for (j = 0; j < 16; j++) {
9808 + debug[j] = pe_dmem_read(id, dmem_addr, 4);
9809 + if (debug[j]) {
9810 + if (do_clear)
9811 + pe_dmem_write(id, 0, dmem_addr, 4);
9812 + last = j + 1;
9813 + }
9814 + dmem_addr += 4;
9815 + }
9816 + for (j = 0; j < last; j++) {
9817 + len += sprintf(buf + len, "%08x%s",
9818 + cpu_to_be32(debug[j]),
9819 + (j & 0x7) == 0x7 || j == last - 1 ? "\n" : " ");
9820 + }
9821 + }
9822 +
9823 + if (!strncmp(statebuf, "DEAD", 4)) {
9824 + u32 i, dump = PE_EXCEPTION_DUMP_ADDRESS;
9825 +
9826 + len += sprintf(buf + len, "Exception details:\n");
9827 + for (i = 0; i < 20; i++) {
9828 + debug[i] = pe_dmem_read(id, dump, 4);
9829 + dump += 4;
9830 + if (i == 2)
9831 + len += sprintf(buf + len, "%4s = %08x (=%s) ",
9832 + register_name[i], cpu_to_be32(debug[i]),
9833 + exception_name[min((u32)
9834 + cpu_to_be32(debug[i]), (u32)13)]);
9835 + else
9836 + len += sprintf(buf + len, "%4s = %08x%s",
9837 + register_name[i], cpu_to_be32(debug[i]),
9838 + (i & 0x3) == 0x3 || i == 19 ? "\n" : " ");
9839 + }
9840 + }
9841 +
9842 + return len;
9843 +}
9844 +
9845 +static ssize_t class_phy_stats(char *buf, int phy)
9846 +{
9847 + ssize_t len = 0;
9848 + int off1 = phy * 0x28;
9849 + int off2 = phy * 0x10;
9850 +
9851 + if (phy == 3)
9852 + off1 = CLASS_PHY4_RX_PKTS - CLASS_PHY1_RX_PKTS;
9853 +
9854 + len += sprintf(buf + len, "phy: %d\n", phy);
9855 + len += sprintf(buf + len,
9856 + " rx: %10u, tx: %10u, intf: %10u, ipv4: %10u, ipv6: %10u\n",
9857 + readl(CLASS_PHY1_RX_PKTS + off1),
9858 + readl(CLASS_PHY1_TX_PKTS + off1),
9859 + readl(CLASS_PHY1_INTF_MATCH_PKTS + off1),
9860 + readl(CLASS_PHY1_V4_PKTS + off1),
9861 + readl(CLASS_PHY1_V6_PKTS + off1));
9862 +
9863 + len += sprintf(buf + len,
9864 + " icmp: %10u, igmp: %10u, tcp: %10u, udp: %10u\n",
9865 + readl(CLASS_PHY1_ICMP_PKTS + off2),
9866 + readl(CLASS_PHY1_IGMP_PKTS + off2),
9867 + readl(CLASS_PHY1_TCP_PKTS + off2),
9868 + readl(CLASS_PHY1_UDP_PKTS + off2));
9869 +
9870 + len += sprintf(buf + len, " err\n");
9871 + len += sprintf(buf + len,
9872 + " lp: %10u, intf: %10u, l3: %10u, chcksum: %10u, ttl: %10u\n",
9873 + readl(CLASS_PHY1_LP_FAIL_PKTS + off1),
9874 + readl(CLASS_PHY1_INTF_FAIL_PKTS + off1),
9875 + readl(CLASS_PHY1_L3_FAIL_PKTS + off1),
9876 + readl(CLASS_PHY1_CHKSUM_ERR_PKTS + off1),
9877 + readl(CLASS_PHY1_TTL_ERR_PKTS + off1));
9878 +
9879 + return len;
9880 +}
9881 +
9882 +/* qm_read_drop_stat
9883 + * This function is used to read the drop statistics from the TMU
9884 + * hw drop counter. Since the hw counter is always cleared afer
9885 + * reading, this function maintains the previous drop count, and
9886 + * adds the new value to it. That value can be retrieved by
9887 + * passing a pointer to it with the total_drops arg.
9888 + *
9889 + * @param tmu TMU number (0 - 3)
9890 + * @param queue queue number (0 - 15)
9891 + * @param total_drops pointer to location to store total drops (or NULL)
9892 + * @param do_reset if TRUE, clear total drops after updating
9893 + */
9894 +u32 qm_read_drop_stat(u32 tmu, u32 queue, u32 *total_drops, int do_reset)
9895 +{
9896 + static u32 qtotal[TMU_MAX_ID + 1][NUM_QUEUES];
9897 + u32 val;
9898 +
9899 + writel((tmu << 8) | queue, TMU_TEQ_CTRL);
9900 + writel((tmu << 8) | queue, TMU_LLM_CTRL);
9901 + val = readl(TMU_TEQ_DROP_STAT);
9902 + qtotal[tmu][queue] += val;
9903 + if (total_drops)
9904 + *total_drops = qtotal[tmu][queue];
9905 + if (do_reset)
9906 + qtotal[tmu][queue] = 0;
9907 + return val;
9908 +}
9909 +
9910 +static ssize_t tmu_queue_stats(char *buf, int tmu, int queue)
9911 +{
9912 + ssize_t len = 0;
9913 + u32 drops;
9914 +
9915 + len += sprintf(buf + len, "%d-%02d, ", tmu, queue);
9916 +
9917 + drops = qm_read_drop_stat(tmu, queue, NULL, 0);
9918 +
9919 + /* Select queue */
9920 + writel((tmu << 8) | queue, TMU_TEQ_CTRL);
9921 + writel((tmu << 8) | queue, TMU_LLM_CTRL);
9922 +
9923 + len += sprintf(buf + len,
9924 + "(teq) drop: %10u, tx: %10u (llm) head: %08x, tail: %08x, drop: %10u\n",
9925 + drops, readl(TMU_TEQ_TRANS_STAT),
9926 + readl(TMU_LLM_QUE_HEADPTR), readl(TMU_LLM_QUE_TAILPTR),
9927 + readl(TMU_LLM_QUE_DROPCNT));
9928 +
9929 + return len;
9930 +}
9931 +
9932 +static ssize_t tmu_queues(char *buf, int tmu)
9933 +{
9934 + ssize_t len = 0;
9935 + int queue;
9936 +
9937 + for (queue = 0; queue < 16; queue++)
9938 + len += tmu_queue_stats(buf + len, tmu, queue);
9939 +
9940 + return len;
9941 +}
9942 +
9943 +static ssize_t block_version(char *buf, void *addr)
9944 +{
9945 + ssize_t len = 0;
9946 + u32 val;
9947 +
9948 + val = readl(addr);
9949 + len += sprintf(buf + len, "revision: %x, version: %x, id: %x\n",
9950 + (val >> 24) & 0xff, (val >> 16) & 0xff, val & 0xffff);
9951 +
9952 + return len;
9953 +}
9954 +
9955 +static ssize_t bmu(char *buf, int id, void *base)
9956 +{
9957 + ssize_t len = 0;
9958 +
9959 + len += sprintf(buf + len, "%s: %d\n ", __func__, id);
9960 +
9961 + len += block_version(buf + len, base + BMU_VERSION);
9962 +
9963 + len += sprintf(buf + len, " buf size: %x\n", (1 << readl(base +
9964 + BMU_BUF_SIZE)));
9965 + len += sprintf(buf + len, " buf count: %x\n", readl(base +
9966 + BMU_BUF_CNT));
9967 + len += sprintf(buf + len, " buf rem: %x\n", readl(base +
9968 + BMU_REM_BUF_CNT));
9969 + len += sprintf(buf + len, " buf curr: %x\n", readl(base +
9970 + BMU_CURR_BUF_CNT));
9971 + len += sprintf(buf + len, " free err: %x\n", readl(base +
9972 + BMU_FREE_ERR_ADDR));
9973 +
9974 + return len;
9975 +}
9976 +
9977 +static ssize_t gpi(char *buf, int id, void *base)
9978 +{
9979 + ssize_t len = 0;
9980 + u32 val;
9981 +
9982 + len += sprintf(buf + len, "%s%d:\n ", __func__, id);
9983 + len += block_version(buf + len, base + GPI_VERSION);
9984 +
9985 + len += sprintf(buf + len, " tx under stick: %x\n", readl(base +
9986 + GPI_FIFO_STATUS));
9987 + val = readl(base + GPI_FIFO_DEBUG);
9988 + len += sprintf(buf + len, " tx pkts: %x\n", (val >> 23) &
9989 + 0x3f);
9990 + len += sprintf(buf + len, " rx pkts: %x\n", (val >> 18) &
9991 + 0x3f);
9992 + len += sprintf(buf + len, " tx bytes: %x\n", (val >> 9) &
9993 + 0x1ff);
9994 + len += sprintf(buf + len, " rx bytes: %x\n", (val >> 0) &
9995 + 0x1ff);
9996 + len += sprintf(buf + len, " overrun: %x\n", readl(base +
9997 + GPI_OVERRUN_DROPCNT));
9998 +
9999 + return len;
10000 +}
10001 +
10002 +static ssize_t pfe_set_class(struct device *dev, struct device_attribute *attr,
10003 + const char *buf, size_t count)
10004 +{
10005 + class_do_clear = kstrtoul(buf, 0, 0);
10006 + return count;
10007 +}
10008 +
10009 +static ssize_t pfe_show_class(struct device *dev, struct device_attribute *attr,
10010 + char *buf)
10011 +{
10012 + ssize_t len = 0;
10013 + int id;
10014 + u32 val;
10015 + struct pfe_cpumon *cpumon = &pfe->cpumon;
10016 +
10017 + len += block_version(buf + len, CLASS_VERSION);
10018 +
10019 + for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
10020 + len += sprintf(buf + len, "%d: ", id - CLASS0_ID);
10021 +
10022 + val = readl(CLASS_PE0_DEBUG + id * 4);
10023 + len += sprintf(buf + len, "pc=1%04x ", val & 0xffff);
10024 +
10025 + len += display_pe_status(buf + len, id, CLASS_DM_PESTATUS,
10026 + class_do_clear);
10027 + }
10028 + len += sprintf(buf + len, "aggregate load=%d%%\n\n",
10029 + cpumon->class_usage_pct);
10030 +
10031 + len += sprintf(buf + len, "pe status: 0x%x\n",
10032 + readl(CLASS_PE_STATUS));
10033 + len += sprintf(buf + len, "max buf cnt: 0x%x afull thres: 0x%x\n",
10034 + readl(CLASS_MAX_BUF_CNT), readl(CLASS_AFULL_THRES));
10035 + len += sprintf(buf + len, "tsq max cnt: 0x%x tsq fifo thres: 0x%x\n",
10036 + readl(CLASS_TSQ_MAX_CNT), readl(CLASS_TSQ_FIFO_THRES));
10037 + len += sprintf(buf + len, "state: 0x%x\n", readl(CLASS_STATE));
10038 +
10039 + len += class_phy_stats(buf + len, 0);
10040 + len += class_phy_stats(buf + len, 1);
10041 + len += class_phy_stats(buf + len, 2);
10042 + len += class_phy_stats(buf + len, 3);
10043 +
10044 + return len;
10045 +}
10046 +
10047 +static ssize_t pfe_set_tmu(struct device *dev, struct device_attribute *attr,
10048 + const char *buf, size_t count)
10049 +{
10050 + tmu_do_clear = kstrtoul(buf, 0, 0);
10051 + return count;
10052 +}
10053 +
10054 +static ssize_t pfe_show_tmu(struct device *dev, struct device_attribute *attr,
10055 + char *buf)
10056 +{
10057 + ssize_t len = 0;
10058 + int id;
10059 + u32 val;
10060 +
10061 + len += block_version(buf + len, TMU_VERSION);
10062 +
10063 + for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
10064 + if (id == TMU2_ID)
10065 + continue;
10066 + len += sprintf(buf + len, "%d: ", id - TMU0_ID);
10067 +
10068 + len += display_pe_status(buf + len, id, TMU_DM_PESTATUS,
10069 + tmu_do_clear);
10070 + }
10071 +
10072 + len += sprintf(buf + len, "pe status: %x\n", readl(TMU_PE_STATUS));
10073 + len += sprintf(buf + len, "inq fifo cnt: %x\n",
10074 + readl(TMU_PHY_INQ_FIFO_CNT));
10075 + val = readl(TMU_INQ_STAT);
10076 + len += sprintf(buf + len, "inq wr ptr: %x\n", val & 0x3ff);
10077 + len += sprintf(buf + len, "inq rd ptr: %x\n", val >> 10);
10078 +
10079 + return len;
10080 +}
10081 +
10082 +static unsigned long drops_do_clear;
10083 +static u32 class_drop_counter[CLASS_NUM_DROP_COUNTERS];
10084 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10085 +static u32 util_drop_counter[UTIL_NUM_DROP_COUNTERS];
10086 +#endif
10087 +
10088 +char *class_drop_description[CLASS_NUM_DROP_COUNTERS] = {
10089 + "ICC",
10090 + "Host Pkt Error",
10091 + "Rx Error",
10092 + "IPsec Outbound",
10093 + "IPsec Inbound",
10094 + "EXPT IPsec Error",
10095 + "Reassembly",
10096 + "Fragmenter",
10097 + "NAT-T",
10098 + "Socket",
10099 + "Multicast",
10100 + "NAT-PT",
10101 + "Tx Disabled",
10102 +};
10103 +
10104 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10105 +char *util_drop_description[UTIL_NUM_DROP_COUNTERS] = {
10106 + "IPsec Outbound",
10107 + "IPsec Inbound",
10108 + "IPsec Rate Limiter",
10109 + "Fragmenter",
10110 + "Socket",
10111 + "Tx Disabled",
10112 + "Rx Error",
10113 +};
10114 +#endif
10115 +
10116 +static ssize_t pfe_set_drops(struct device *dev, struct device_attribute *attr,
10117 + const char *buf, size_t count)
10118 +{
10119 + drops_do_clear = kstrtoul(buf, 0, 0);
10120 + return count;
10121 +}
10122 +
10123 +static u32 tmu_drops[4][16];
10124 +static ssize_t pfe_show_drops(struct device *dev, struct device_attribute *attr,
10125 + char *buf)
10126 +{
10127 + ssize_t len = 0;
10128 + int id, dropnum;
10129 + int tmu, queue;
10130 + u32 val;
10131 + u32 dmem_addr;
10132 + int num_class_drops = 0, num_tmu_drops = 0, num_util_drops = 0;
10133 + struct pfe_ctrl *ctrl = &pfe->ctrl;
10134 +
10135 + memset(class_drop_counter, 0, sizeof(class_drop_counter));
10136 + for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
10137 + if (drops_do_clear)
10138 + pe_sync_stop(ctrl, (1 << id));
10139 + for (dropnum = 0; dropnum < CLASS_NUM_DROP_COUNTERS;
10140 + dropnum++) {
10141 + dmem_addr = CLASS_DM_DROP_CNTR;
10142 + val = be32_to_cpu(pe_dmem_read(id, dmem_addr, 4));
10143 + class_drop_counter[dropnum] += val;
10144 + num_class_drops += val;
10145 + if (drops_do_clear)
10146 + pe_dmem_write(id, 0, dmem_addr, 4);
10147 + }
10148 + if (drops_do_clear)
10149 + pe_start(ctrl, (1 << id));
10150 + }
10151 +
10152 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10153 + if (drops_do_clear)
10154 + pe_sync_stop(ctrl, (1 << UTIL_ID));
10155 + for (dropnum = 0; dropnum < UTIL_NUM_DROP_COUNTERS; dropnum++) {
10156 + dmem_addr = UTIL_DM_DROP_CNTR;
10157 + val = be32_to_cpu(pe_dmem_read(UTIL_ID, dmem_addr, 4));
10158 + util_drop_counter[dropnum] = val;
10159 + num_util_drops += val;
10160 + if (drops_do_clear)
10161 + pe_dmem_write(UTIL_ID, 0, dmem_addr, 4);
10162 + }
10163 + if (drops_do_clear)
10164 + pe_start(ctrl, (1 << UTIL_ID));
10165 +#endif
10166 + for (tmu = 0; tmu < 4; tmu++) {
10167 + for (queue = 0; queue < 16; queue++) {
10168 + qm_read_drop_stat(tmu, queue, &tmu_drops[tmu][queue],
10169 + drops_do_clear);
10170 + num_tmu_drops += tmu_drops[tmu][queue];
10171 + }
10172 + }
10173 +
10174 + if (num_class_drops == 0 && num_util_drops == 0 && num_tmu_drops == 0)
10175 + len += sprintf(buf + len, "No PE drops\n\n");
10176 +
10177 + if (num_class_drops > 0) {
10178 + len += sprintf(buf + len, "Class PE drops --\n");
10179 + for (dropnum = 0; dropnum < CLASS_NUM_DROP_COUNTERS;
10180 + dropnum++) {
10181 + if (class_drop_counter[dropnum] > 0)
10182 + len += sprintf(buf + len, " %s: %d\n",
10183 + class_drop_description[dropnum],
10184 + class_drop_counter[dropnum]);
10185 + }
10186 + len += sprintf(buf + len, "\n");
10187 + }
10188 +
10189 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10190 + if (num_util_drops > 0) {
10191 + len += sprintf(buf + len, "Util PE drops --\n");
10192 + for (dropnum = 0; dropnum < UTIL_NUM_DROP_COUNTERS; dropnum++) {
10193 + if (util_drop_counter[dropnum] > 0)
10194 + len += sprintf(buf + len, " %s: %d\n",
10195 + util_drop_description[dropnum],
10196 + util_drop_counter[dropnum]);
10197 + }
10198 + len += sprintf(buf + len, "\n");
10199 + }
10200 +#endif
10201 + if (num_tmu_drops > 0) {
10202 + len += sprintf(buf + len, "TMU drops --\n");
10203 + for (tmu = 0; tmu < 4; tmu++) {
10204 + for (queue = 0; queue < 16; queue++) {
10205 + if (tmu_drops[tmu][queue] > 0)
10206 + len += sprintf(buf + len,
10207 + " TMU%d-Q%d: %d\n"
10208 + , tmu, queue, tmu_drops[tmu][queue]);
10209 + }
10210 + }
10211 + len += sprintf(buf + len, "\n");
10212 + }
10213 +
10214 + return len;
10215 +}
10216 +
10217 +static ssize_t pfe_show_tmu0_queues(struct device *dev, struct device_attribute
10218 + *attr, char *buf)
10219 +{
10220 + return tmu_queues(buf, 0);
10221 +}
10222 +
10223 +static ssize_t pfe_show_tmu1_queues(struct device *dev, struct device_attribute
10224 + *attr, char *buf)
10225 +{
10226 + return tmu_queues(buf, 1);
10227 +}
10228 +
10229 +static ssize_t pfe_show_tmu2_queues(struct device *dev, struct device_attribute
10230 + *attr, char *buf)
10231 +{
10232 + return tmu_queues(buf, 2);
10233 +}
10234 +
10235 +static ssize_t pfe_show_tmu3_queues(struct device *dev, struct device_attribute
10236 + *attr, char *buf)
10237 +{
10238 + return tmu_queues(buf, 3);
10239 +}
10240 +
10241 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10242 +static ssize_t pfe_set_util(struct device *dev, struct device_attribute *attr,
10243 + const char *buf, size_t count)
10244 +{
10245 + util_do_clear = kstrtoul(buf, NULL, 0);
10246 + return count;
10247 +}
10248 +
10249 +static ssize_t pfe_show_util(struct device *dev, struct device_attribute *attr,
10250 + char *buf)
10251 +{
10252 + ssize_t len = 0;
10253 + struct pfe_ctrl *ctrl = &pfe->ctrl;
10254 +
10255 + len += block_version(buf + len, UTIL_VERSION);
10256 +
10257 + pe_sync_stop(ctrl, (1 << UTIL_ID));
10258 + len += display_pe_status(buf + len, UTIL_ID, UTIL_DM_PESTATUS,
10259 + util_do_clear);
10260 + pe_start(ctrl, (1 << UTIL_ID));
10261 +
10262 + len += sprintf(buf + len, "pe status: %x\n", readl(UTIL_PE_STATUS));
10263 + len += sprintf(buf + len, "max buf cnt: %x\n",
10264 + readl(UTIL_MAX_BUF_CNT));
10265 + len += sprintf(buf + len, "tsq max cnt: %x\n",
10266 + readl(UTIL_TSQ_MAX_CNT));
10267 +
10268 + return len;
10269 +}
10270 +#endif
10271 +
10272 +static ssize_t pfe_show_bmu(struct device *dev, struct device_attribute *attr,
10273 + char *buf)
10274 +{
10275 + ssize_t len = 0;
10276 +
10277 + len += bmu(buf + len, 1, BMU1_BASE_ADDR);
10278 + len += bmu(buf + len, 2, BMU2_BASE_ADDR);
10279 +
10280 + return len;
10281 +}
10282 +
10283 +static ssize_t pfe_show_hif(struct device *dev, struct device_attribute *attr,
10284 + char *buf)
10285 +{
10286 + ssize_t len = 0;
10287 +
10288 + len += sprintf(buf + len, "hif:\n ");
10289 + len += block_version(buf + len, HIF_VERSION);
10290 +
10291 + len += sprintf(buf + len, " tx curr bd: %x\n",
10292 + readl(HIF_TX_CURR_BD_ADDR));
10293 + len += sprintf(buf + len, " tx status: %x\n",
10294 + readl(HIF_TX_STATUS));
10295 + len += sprintf(buf + len, " tx dma status: %x\n",
10296 + readl(HIF_TX_DMA_STATUS));
10297 +
10298 + len += sprintf(buf + len, " rx curr bd: %x\n",
10299 + readl(HIF_RX_CURR_BD_ADDR));
10300 + len += sprintf(buf + len, " rx status: %x\n",
10301 + readl(HIF_RX_STATUS));
10302 + len += sprintf(buf + len, " rx dma status: %x\n",
10303 + readl(HIF_RX_DMA_STATUS));
10304 +
10305 + len += sprintf(buf + len, "hif nocopy:\n ");
10306 + len += block_version(buf + len, HIF_NOCPY_VERSION);
10307 +
10308 + len += sprintf(buf + len, " tx curr bd: %x\n",
10309 + readl(HIF_NOCPY_TX_CURR_BD_ADDR));
10310 + len += sprintf(buf + len, " tx status: %x\n",
10311 + readl(HIF_NOCPY_TX_STATUS));
10312 + len += sprintf(buf + len, " tx dma status: %x\n",
10313 + readl(HIF_NOCPY_TX_DMA_STATUS));
10314 +
10315 + len += sprintf(buf + len, " rx curr bd: %x\n",
10316 + readl(HIF_NOCPY_RX_CURR_BD_ADDR));
10317 + len += sprintf(buf + len, " rx status: %x\n",
10318 + readl(HIF_NOCPY_RX_STATUS));
10319 + len += sprintf(buf + len, " rx dma status: %x\n",
10320 + readl(HIF_NOCPY_RX_DMA_STATUS));
10321 +
10322 + return len;
10323 +}
10324 +
10325 +static ssize_t pfe_show_gpi(struct device *dev, struct device_attribute *attr,
10326 + char *buf)
10327 +{
10328 + ssize_t len = 0;
10329 +
10330 + len += gpi(buf + len, 0, EGPI1_BASE_ADDR);
10331 + len += gpi(buf + len, 1, EGPI2_BASE_ADDR);
10332 + len += gpi(buf + len, 3, HGPI_BASE_ADDR);
10333 +
10334 + return len;
10335 +}
10336 +
10337 +static ssize_t pfe_show_pfemem(struct device *dev, struct device_attribute
10338 + *attr, char *buf)
10339 +{
10340 + ssize_t len = 0;
10341 + struct pfe_memmon *memmon = &pfe->memmon;
10342 +
10343 + len += sprintf(buf + len, "Kernel Memory: %d Bytes (%d KB)\n",
10344 + memmon->kernel_memory_allocated,
10345 + (memmon->kernel_memory_allocated + 1023) / 1024);
10346 +
10347 + return len;
10348 +}
10349 +
10350 +#ifdef HIF_NAPI_STATS
10351 +static ssize_t pfe_show_hif_napi_stats(struct device *dev,
10352 + struct device_attribute *attr,
10353 + char *buf)
10354 +{
10355 + struct platform_device *pdev = to_platform_device(dev);
10356 + struct pfe *pfe = platform_get_drvdata(pdev);
10357 + ssize_t len = 0;
10358 +
10359 + len += sprintf(buf + len, "sched: %u\n",
10360 + pfe->hif.napi_counters[NAPI_SCHED_COUNT]);
10361 + len += sprintf(buf + len, "poll: %u\n",
10362 + pfe->hif.napi_counters[NAPI_POLL_COUNT]);
10363 + len += sprintf(buf + len, "packet: %u\n",
10364 + pfe->hif.napi_counters[NAPI_PACKET_COUNT]);
10365 + len += sprintf(buf + len, "budget: %u\n",
10366 + pfe->hif.napi_counters[NAPI_FULL_BUDGET_COUNT]);
10367 + len += sprintf(buf + len, "desc: %u\n",
10368 + pfe->hif.napi_counters[NAPI_DESC_COUNT]);
10369 + len += sprintf(buf + len, "full: %u\n",
10370 + pfe->hif.napi_counters[NAPI_CLIENT_FULL_COUNT]);
10371 +
10372 + return len;
10373 +}
10374 +
10375 +static ssize_t pfe_set_hif_napi_stats(struct device *dev,
10376 + struct device_attribute *attr,
10377 + const char *buf, size_t count)
10378 +{
10379 + struct platform_device *pdev = to_platform_device(dev);
10380 + struct pfe *pfe = platform_get_drvdata(pdev);
10381 +
10382 + memset(pfe->hif.napi_counters, 0, sizeof(pfe->hif.napi_counters));
10383 +
10384 + return count;
10385 +}
10386 +
10387 +static DEVICE_ATTR(hif_napi_stats, 0644, pfe_show_hif_napi_stats,
10388 + pfe_set_hif_napi_stats);
10389 +#endif
10390 +
10391 +static DEVICE_ATTR(class, 0644, pfe_show_class, pfe_set_class);
10392 +static DEVICE_ATTR(tmu, 0644, pfe_show_tmu, pfe_set_tmu);
10393 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10394 +static DEVICE_ATTR(util, 0644, pfe_show_util, pfe_set_util);
10395 +#endif
10396 +static DEVICE_ATTR(bmu, 0444, pfe_show_bmu, NULL);
10397 +static DEVICE_ATTR(hif, 0444, pfe_show_hif, NULL);
10398 +static DEVICE_ATTR(gpi, 0444, pfe_show_gpi, NULL);
10399 +static DEVICE_ATTR(drops, 0644, pfe_show_drops, pfe_set_drops);
10400 +static DEVICE_ATTR(tmu0_queues, 0444, pfe_show_tmu0_queues, NULL);
10401 +static DEVICE_ATTR(tmu1_queues, 0444, pfe_show_tmu1_queues, NULL);
10402 +static DEVICE_ATTR(tmu2_queues, 0444, pfe_show_tmu2_queues, NULL);
10403 +static DEVICE_ATTR(tmu3_queues, 0444, pfe_show_tmu3_queues, NULL);
10404 +static DEVICE_ATTR(pfemem, 0444, pfe_show_pfemem, NULL);
10405 +
10406 +int pfe_sysfs_init(struct pfe *pfe)
10407 +{
10408 + if (device_create_file(pfe->dev, &dev_attr_class))
10409 + goto err_class;
10410 +
10411 + if (device_create_file(pfe->dev, &dev_attr_tmu))
10412 + goto err_tmu;
10413 +
10414 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10415 + if (device_create_file(pfe->dev, &dev_attr_util))
10416 + goto err_util;
10417 +#endif
10418 +
10419 + if (device_create_file(pfe->dev, &dev_attr_bmu))
10420 + goto err_bmu;
10421 +
10422 + if (device_create_file(pfe->dev, &dev_attr_hif))
10423 + goto err_hif;
10424 +
10425 + if (device_create_file(pfe->dev, &dev_attr_gpi))
10426 + goto err_gpi;
10427 +
10428 + if (device_create_file(pfe->dev, &dev_attr_drops))
10429 + goto err_drops;
10430 +
10431 + if (device_create_file(pfe->dev, &dev_attr_tmu0_queues))
10432 + goto err_tmu0_queues;
10433 +
10434 + if (device_create_file(pfe->dev, &dev_attr_tmu1_queues))
10435 + goto err_tmu1_queues;
10436 +
10437 + if (device_create_file(pfe->dev, &dev_attr_tmu2_queues))
10438 + goto err_tmu2_queues;
10439 +
10440 + if (device_create_file(pfe->dev, &dev_attr_tmu3_queues))
10441 + goto err_tmu3_queues;
10442 +
10443 + if (device_create_file(pfe->dev, &dev_attr_pfemem))
10444 + goto err_pfemem;
10445 +
10446 +#ifdef HIF_NAPI_STATS
10447 + if (device_create_file(pfe->dev, &dev_attr_hif_napi_stats))
10448 + goto err_hif_napi_stats;
10449 +#endif
10450 +
10451 + return 0;
10452 +
10453 +#ifdef HIF_NAPI_STATS
10454 +err_hif_napi_stats:
10455 + device_remove_file(pfe->dev, &dev_attr_pfemem);
10456 +#endif
10457 +
10458 +err_pfemem:
10459 + device_remove_file(pfe->dev, &dev_attr_tmu3_queues);
10460 +
10461 +err_tmu3_queues:
10462 + device_remove_file(pfe->dev, &dev_attr_tmu2_queues);
10463 +
10464 +err_tmu2_queues:
10465 + device_remove_file(pfe->dev, &dev_attr_tmu1_queues);
10466 +
10467 +err_tmu1_queues:
10468 + device_remove_file(pfe->dev, &dev_attr_tmu0_queues);
10469 +
10470 +err_tmu0_queues:
10471 + device_remove_file(pfe->dev, &dev_attr_drops);
10472 +
10473 +err_drops:
10474 + device_remove_file(pfe->dev, &dev_attr_gpi);
10475 +
10476 +err_gpi:
10477 + device_remove_file(pfe->dev, &dev_attr_hif);
10478 +
10479 +err_hif:
10480 + device_remove_file(pfe->dev, &dev_attr_bmu);
10481 +
10482 +err_bmu:
10483 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10484 + device_remove_file(pfe->dev, &dev_attr_util);
10485 +
10486 +err_util:
10487 +#endif
10488 + device_remove_file(pfe->dev, &dev_attr_tmu);
10489 +
10490 +err_tmu:
10491 + device_remove_file(pfe->dev, &dev_attr_class);
10492 +
10493 +err_class:
10494 + return -1;
10495 +}
10496 +
10497 +void pfe_sysfs_exit(struct pfe *pfe)
10498 +{
10499 +#ifdef HIF_NAPI_STATS
10500 + device_remove_file(pfe->dev, &dev_attr_hif_napi_stats);
10501 +#endif
10502 + device_remove_file(pfe->dev, &dev_attr_pfemem);
10503 + device_remove_file(pfe->dev, &dev_attr_tmu3_queues);
10504 + device_remove_file(pfe->dev, &dev_attr_tmu2_queues);
10505 + device_remove_file(pfe->dev, &dev_attr_tmu1_queues);
10506 + device_remove_file(pfe->dev, &dev_attr_tmu0_queues);
10507 + device_remove_file(pfe->dev, &dev_attr_drops);
10508 + device_remove_file(pfe->dev, &dev_attr_gpi);
10509 + device_remove_file(pfe->dev, &dev_attr_hif);
10510 + device_remove_file(pfe->dev, &dev_attr_bmu);
10511 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10512 + device_remove_file(pfe->dev, &dev_attr_util);
10513 +#endif
10514 + device_remove_file(pfe->dev, &dev_attr_tmu);
10515 + device_remove_file(pfe->dev, &dev_attr_class);
10516 +}
10517 --- /dev/null
10518 +++ b/drivers/staging/fsl_ppfe/pfe_sysfs.h
10519 @@ -0,0 +1,29 @@
10520 +/*
10521 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
10522 + * Copyright 2017 NXP
10523 + *
10524 + * This program is free software; you can redistribute it and/or modify
10525 + * it under the terms of the GNU General Public License as published by
10526 + * the Free Software Foundation; either version 2 of the License, or
10527 + * (at your option) any later version.
10528 + *
10529 + * This program is distributed in the hope that it will be useful,
10530 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
10531 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10532 + * GNU General Public License for more details.
10533 + *
10534 + * You should have received a copy of the GNU General Public License
10535 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
10536 + */
10537 +
10538 +#ifndef _PFE_SYSFS_H_
10539 +#define _PFE_SYSFS_H_
10540 +
10541 +#include <linux/proc_fs.h>
10542 +
10543 +u32 qm_read_drop_stat(u32 tmu, u32 queue, u32 *total_drops, int do_reset);
10544 +
10545 +int pfe_sysfs_init(struct pfe *pfe);
10546 +void pfe_sysfs_exit(struct pfe *pfe);
10547 +
10548 +#endif /* _PFE_SYSFS_H_ */