844475adfefc8fe8b46c8fa8f3accad36604f4f3
[openwrt/openwrt.git] / target / linux / layerscape / patches-4.9 / 706-fsl_ppfe-support-layercape.patch
1 From 8b7935a883d42187716fe486c83352f24d01ddcd Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Thu, 19 Oct 2017 12:48:19 +0800
4 Subject: [PATCH] fsl_ppfe: support layercape
5
6 This is a integrated patch for layerscape pfe support.
7
8 Calvin Johnson <calvin.johnson@nxp.com>
9 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
10 ---
11 drivers/staging/fsl_ppfe/Kconfig | 20 +
12 drivers/staging/fsl_ppfe/Makefile | 19 +
13 drivers/staging/fsl_ppfe/TODO | 2 +
14 drivers/staging/fsl_ppfe/include/pfe/cbus.h | 78 +
15 drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h | 55 +
16 .../staging/fsl_ppfe/include/pfe/cbus/class_csr.h | 289 +++
17 .../staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h | 242 ++
18 drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h | 86 +
19 drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h | 100 +
20 .../staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h | 50 +
21 .../staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h | 168 ++
22 .../staging/fsl_ppfe/include/pfe/cbus/util_csr.h | 61 +
23 drivers/staging/fsl_ppfe/include/pfe/pfe.h | 372 +++
24 drivers/staging/fsl_ppfe/pfe_ctrl.c | 238 ++
25 drivers/staging/fsl_ppfe/pfe_ctrl.h | 112 +
26 drivers/staging/fsl_ppfe/pfe_debugfs.c | 111 +
27 drivers/staging/fsl_ppfe/pfe_debugfs.h | 25 +
28 drivers/staging/fsl_ppfe/pfe_eth.c | 2434 ++++++++++++++++++++
29 drivers/staging/fsl_ppfe/pfe_eth.h | 184 ++
30 drivers/staging/fsl_ppfe/pfe_firmware.c | 314 +++
31 drivers/staging/fsl_ppfe/pfe_firmware.h | 32 +
32 drivers/staging/fsl_ppfe/pfe_hal.c | 1516 ++++++++++++
33 drivers/staging/fsl_ppfe/pfe_hif.c | 1072 +++++++++
34 drivers/staging/fsl_ppfe/pfe_hif.h | 211 ++
35 drivers/staging/fsl_ppfe/pfe_hif_lib.c | 601 +++++
36 drivers/staging/fsl_ppfe/pfe_hif_lib.h | 239 ++
37 drivers/staging/fsl_ppfe/pfe_hw.c | 176 ++
38 drivers/staging/fsl_ppfe/pfe_hw.h | 27 +
39 drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c | 394 ++++
40 drivers/staging/fsl_ppfe/pfe_mod.c | 141 ++
41 drivers/staging/fsl_ppfe/pfe_mod.h | 112 +
42 drivers/staging/fsl_ppfe/pfe_perfmon.h | 38 +
43 drivers/staging/fsl_ppfe/pfe_sysfs.c | 818 +++++++
44 drivers/staging/fsl_ppfe/pfe_sysfs.h | 29 +
45 34 files changed, 10366 insertions(+)
46 create mode 100644 drivers/staging/fsl_ppfe/Kconfig
47 create mode 100644 drivers/staging/fsl_ppfe/Makefile
48 create mode 100644 drivers/staging/fsl_ppfe/TODO
49 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus.h
50 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h
51 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/class_csr.h
52 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h
53 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h
54 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h
55 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h
56 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h
57 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/util_csr.h
58 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/pfe.h
59 create mode 100644 drivers/staging/fsl_ppfe/pfe_ctrl.c
60 create mode 100644 drivers/staging/fsl_ppfe/pfe_ctrl.h
61 create mode 100644 drivers/staging/fsl_ppfe/pfe_debugfs.c
62 create mode 100644 drivers/staging/fsl_ppfe/pfe_debugfs.h
63 create mode 100644 drivers/staging/fsl_ppfe/pfe_eth.c
64 create mode 100644 drivers/staging/fsl_ppfe/pfe_eth.h
65 create mode 100644 drivers/staging/fsl_ppfe/pfe_firmware.c
66 create mode 100644 drivers/staging/fsl_ppfe/pfe_firmware.h
67 create mode 100644 drivers/staging/fsl_ppfe/pfe_hal.c
68 create mode 100644 drivers/staging/fsl_ppfe/pfe_hif.c
69 create mode 100644 drivers/staging/fsl_ppfe/pfe_hif.h
70 create mode 100644 drivers/staging/fsl_ppfe/pfe_hif_lib.c
71 create mode 100644 drivers/staging/fsl_ppfe/pfe_hif_lib.h
72 create mode 100644 drivers/staging/fsl_ppfe/pfe_hw.c
73 create mode 100644 drivers/staging/fsl_ppfe/pfe_hw.h
74 create mode 100644 drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c
75 create mode 100644 drivers/staging/fsl_ppfe/pfe_mod.c
76 create mode 100644 drivers/staging/fsl_ppfe/pfe_mod.h
77 create mode 100644 drivers/staging/fsl_ppfe/pfe_perfmon.h
78 create mode 100644 drivers/staging/fsl_ppfe/pfe_sysfs.c
79 create mode 100644 drivers/staging/fsl_ppfe/pfe_sysfs.h
80
81 diff --git a/drivers/staging/fsl_ppfe/Kconfig b/drivers/staging/fsl_ppfe/Kconfig
82 new file mode 100644
83 index 00000000..e4096435
84 --- /dev/null
85 +++ b/drivers/staging/fsl_ppfe/Kconfig
86 @@ -0,0 +1,20 @@
87 +#
88 +# Freescale Programmable Packet Forwarding Engine driver
89 +#
90 +config FSL_PPFE
91 + bool "Freescale PPFE Driver"
92 + default n
93 + ---help---
94 + Freescale LS1012A SoC has a Programmable Packet Forwarding Engine.
95 + It provides two high performance ethernet interfaces.
96 + This driver initializes, programs and controls the PPFE.
97 + Use this driver to enable network connectivity on LS1012A platforms.
98 +
99 +if FSL_PPFE
100 +
101 +config FSL_PPFE_UTIL_DISABLED
102 + bool "Disable PPFE UTIL Processor Engine"
103 + ---help---
104 + UTIL PE has to be enabled only if required.
105 +
106 +endif # FSL_PPFE
107 diff --git a/drivers/staging/fsl_ppfe/Makefile b/drivers/staging/fsl_ppfe/Makefile
108 new file mode 100644
109 index 00000000..07cd351b
110 --- /dev/null
111 +++ b/drivers/staging/fsl_ppfe/Makefile
112 @@ -0,0 +1,19 @@
113 +#
114 +# Makefile for Freesecale PPFE driver
115 +#
116 +
117 +ccflags-y += -I$(src)/include -I$(src)
118 +
119 +obj-m += pfe.o
120 +
121 +pfe-y += pfe_mod.o \
122 + pfe_hw.o \
123 + pfe_firmware.o \
124 + pfe_ctrl.o \
125 + pfe_hif.o \
126 + pfe_hif_lib.o\
127 + pfe_eth.o \
128 + pfe_sysfs.o \
129 + pfe_debugfs.o \
130 + pfe_ls1012a_platform.o \
131 + pfe_hal.o
132 diff --git a/drivers/staging/fsl_ppfe/TODO b/drivers/staging/fsl_ppfe/TODO
133 new file mode 100644
134 index 00000000..43c48ccd
135 --- /dev/null
136 +++ b/drivers/staging/fsl_ppfe/TODO
137 @@ -0,0 +1,2 @@
138 +TODO:
139 + - provide pfe pe monitoring support
140 diff --git a/drivers/staging/fsl_ppfe/include/pfe/cbus.h b/drivers/staging/fsl_ppfe/include/pfe/cbus.h
141 new file mode 100644
142 index 00000000..04503d28
143 --- /dev/null
144 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus.h
145 @@ -0,0 +1,78 @@
146 +/*
147 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
148 + * Copyright 2017 NXP
149 + *
150 + * This program is free software; you can redistribute it and/or modify
151 + * it under the terms of the GNU General Public License as published by
152 + * the Free Software Foundation; either version 2 of the License, or
153 + * (at your option) any later version.
154 + *
155 + * This program is distributed in the hope that it will be useful,
156 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
157 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
158 + * GNU General Public License for more details.
159 + *
160 + * You should have received a copy of the GNU General Public License
161 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
162 + */
163 +
164 +#ifndef _CBUS_H_
165 +#define _CBUS_H_
166 +
167 +#define EMAC1_BASE_ADDR (CBUS_BASE_ADDR + 0x200000)
168 +#define EGPI1_BASE_ADDR (CBUS_BASE_ADDR + 0x210000)
169 +#define EMAC2_BASE_ADDR (CBUS_BASE_ADDR + 0x220000)
170 +#define EGPI2_BASE_ADDR (CBUS_BASE_ADDR + 0x230000)
171 +#define BMU1_BASE_ADDR (CBUS_BASE_ADDR + 0x240000)
172 +#define BMU2_BASE_ADDR (CBUS_BASE_ADDR + 0x250000)
173 +#define ARB_BASE_ADDR (CBUS_BASE_ADDR + 0x260000)
174 +#define DDR_CONFIG_BASE_ADDR (CBUS_BASE_ADDR + 0x270000)
175 +#define HIF_BASE_ADDR (CBUS_BASE_ADDR + 0x280000)
176 +#define HGPI_BASE_ADDR (CBUS_BASE_ADDR + 0x290000)
177 +#define LMEM_BASE_ADDR (CBUS_BASE_ADDR + 0x300000)
178 +#define LMEM_SIZE 0x10000
179 +#define LMEM_END (LMEM_BASE_ADDR + LMEM_SIZE)
180 +#define TMU_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x310000)
181 +#define CLASS_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x320000)
182 +#define HIF_NOCPY_BASE_ADDR (CBUS_BASE_ADDR + 0x350000)
183 +#define UTIL_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x360000)
184 +#define CBUS_GPT_BASE_ADDR (CBUS_BASE_ADDR + 0x370000)
185 +
186 +/*
187 + * defgroup XXX_MEM_ACCESS_ADDR PE memory access through CSR
188 + * XXX_MEM_ACCESS_ADDR register bit definitions.
189 + */
190 +#define PE_MEM_ACCESS_WRITE BIT(31) /* Internal Memory Write. */
191 +#define PE_MEM_ACCESS_IMEM BIT(15)
192 +#define PE_MEM_ACCESS_DMEM BIT(16)
193 +
194 +/* Byte Enables of the Internal memory access. These are interpred in BE */
195 +#define PE_MEM_ACCESS_BYTE_ENABLE(offset, size) \
196 + ({ typeof(size) size_ = (size); \
197 + (((BIT(size_) - 1) << (4 - (offset) - (size_))) & 0xf) << 24; })
198 +
199 +#include "cbus/emac_mtip.h"
200 +#include "cbus/gpi.h"
201 +#include "cbus/bmu.h"
202 +#include "cbus/hif.h"
203 +#include "cbus/tmu_csr.h"
204 +#include "cbus/class_csr.h"
205 +#include "cbus/hif_nocpy.h"
206 +#include "cbus/util_csr.h"
207 +
208 +/* PFE cores states */
209 +#define CORE_DISABLE 0x00000000
210 +#define CORE_ENABLE 0x00000001
211 +#define CORE_SW_RESET 0x00000002
212 +
213 +/* LMEM defines */
214 +#define LMEM_HDR_SIZE 0x0010
215 +#define LMEM_BUF_SIZE_LN2 0x7
216 +#define LMEM_BUF_SIZE BIT(LMEM_BUF_SIZE_LN2)
217 +
218 +/* DDR defines */
219 +#define DDR_HDR_SIZE 0x0100
220 +#define DDR_BUF_SIZE_LN2 0xb
221 +#define DDR_BUF_SIZE BIT(DDR_BUF_SIZE_LN2)
222 +
223 +#endif /* _CBUS_H_ */
224 diff --git a/drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h b/drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h
225 new file mode 100644
226 index 00000000..87738ca3
227 --- /dev/null
228 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h
229 @@ -0,0 +1,55 @@
230 +/*
231 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
232 + * Copyright 2017 NXP
233 + *
234 + * This program is free software; you can redistribute it and/or modify
235 + * it under the terms of the GNU General Public License as published by
236 + * the Free Software Foundation; either version 2 of the License, or
237 + * (at your option) any later version.
238 + *
239 + * This program is distributed in the hope that it will be useful,
240 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
241 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
242 + * GNU General Public License for more details.
243 + *
244 + * You should have received a copy of the GNU General Public License
245 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
246 + */
247 +
248 +#ifndef _BMU_H_
249 +#define _BMU_H_
250 +
251 +#define BMU_VERSION 0x000
252 +#define BMU_CTRL 0x004
253 +#define BMU_UCAST_CONFIG 0x008
254 +#define BMU_UCAST_BASE_ADDR 0x00c
255 +#define BMU_BUF_SIZE 0x010
256 +#define BMU_BUF_CNT 0x014
257 +#define BMU_THRES 0x018
258 +#define BMU_INT_SRC 0x020
259 +#define BMU_INT_ENABLE 0x024
260 +#define BMU_ALLOC_CTRL 0x030
261 +#define BMU_FREE_CTRL 0x034
262 +#define BMU_FREE_ERR_ADDR 0x038
263 +#define BMU_CURR_BUF_CNT 0x03c
264 +#define BMU_MCAST_CNT 0x040
265 +#define BMU_MCAST_ALLOC_CTRL 0x044
266 +#define BMU_REM_BUF_CNT 0x048
267 +#define BMU_LOW_WATERMARK 0x050
268 +#define BMU_HIGH_WATERMARK 0x054
269 +#define BMU_INT_MEM_ACCESS 0x100
270 +
271 +struct BMU_CFG {
272 + unsigned long baseaddr;
273 + u32 count;
274 + u32 size;
275 + u32 low_watermark;
276 + u32 high_watermark;
277 +};
278 +
279 +#define BMU1_BUF_SIZE LMEM_BUF_SIZE_LN2
280 +#define BMU2_BUF_SIZE DDR_BUF_SIZE_LN2
281 +
282 +#define BMU2_MCAST_ALLOC_CTRL (BMU2_BASE_ADDR + BMU_MCAST_ALLOC_CTRL)
283 +
284 +#endif /* _BMU_H_ */
285 diff --git a/drivers/staging/fsl_ppfe/include/pfe/cbus/class_csr.h b/drivers/staging/fsl_ppfe/include/pfe/cbus/class_csr.h
286 new file mode 100644
287 index 00000000..e4dadff5
288 --- /dev/null
289 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/class_csr.h
290 @@ -0,0 +1,289 @@
291 +/*
292 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
293 + * Copyright 2017 NXP
294 + *
295 + * This program is free software; you can redistribute it and/or modify
296 + * it under the terms of the GNU General Public License as published by
297 + * the Free Software Foundation; either version 2 of the License, or
298 + * (at your option) any later version.
299 + *
300 + * This program is distributed in the hope that it will be useful,
301 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
302 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
303 + * GNU General Public License for more details.
304 + *
305 + * You should have received a copy of the GNU General Public License
306 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
307 + */
308 +
309 +#ifndef _CLASS_CSR_H_
310 +#define _CLASS_CSR_H_
311 +
312 +/* @file class_csr.h.
313 + * class_csr - block containing all the classifier control and status register.
314 + * Mapped on CBUS and accessible from all PE's and ARM.
315 + */
316 +#define CLASS_VERSION (CLASS_CSR_BASE_ADDR + 0x000)
317 +#define CLASS_TX_CTRL (CLASS_CSR_BASE_ADDR + 0x004)
318 +#define CLASS_INQ_PKTPTR (CLASS_CSR_BASE_ADDR + 0x010)
319 +
320 +/* (ddr_hdr_size[24:16], lmem_hdr_size[5:0]) */
321 +#define CLASS_HDR_SIZE (CLASS_CSR_BASE_ADDR + 0x014)
322 +
323 +/* LMEM header size for the Classifier block.\ Data in the LMEM
324 + * is written from this offset.
325 + */
326 +#define CLASS_HDR_SIZE_LMEM(off) ((off) & 0x3f)
327 +
328 +/* DDR header size for the Classifier block.\ Data in the DDR
329 + * is written from this offset.
330 + */
331 +#define CLASS_HDR_SIZE_DDR(off) (((off) & 0x1ff) << 16)
332 +
333 +#define CLASS_PE0_QB_DM_ADDR0 (CLASS_CSR_BASE_ADDR + 0x020)
334 +
335 +/* DMEM address of first [15:0] and second [31:16] buffers on QB side. */
336 +#define CLASS_PE0_QB_DM_ADDR1 (CLASS_CSR_BASE_ADDR + 0x024)
337 +
338 +/* DMEM address of third [15:0] and fourth [31:16] buffers on QB side. */
339 +#define CLASS_PE0_RO_DM_ADDR0 (CLASS_CSR_BASE_ADDR + 0x060)
340 +
341 +/* DMEM address of first [15:0] and second [31:16] buffers on RO side. */
342 +#define CLASS_PE0_RO_DM_ADDR1 (CLASS_CSR_BASE_ADDR + 0x064)
343 +
344 +/* DMEM address of third [15:0] and fourth [31:16] buffers on RO side. */
345 +
346 +/* @name Class PE memory access. Allows external PE's and HOST to
347 + * read/write PMEM/DMEM memory ranges for each classifier PE.
348 + */
349 +/* {sr_pe_mem_cmd[31], csr_pe_mem_wren[27:24], csr_pe_mem_addr[23:0]},
350 + * See \ref XXX_MEM_ACCESS_ADDR for details.
351 + */
352 +#define CLASS_MEM_ACCESS_ADDR (CLASS_CSR_BASE_ADDR + 0x100)
353 +
354 +/* Internal Memory Access Write Data [31:0] */
355 +#define CLASS_MEM_ACCESS_WDATA (CLASS_CSR_BASE_ADDR + 0x104)
356 +
357 +/* Internal Memory Access Read Data [31:0] */
358 +#define CLASS_MEM_ACCESS_RDATA (CLASS_CSR_BASE_ADDR + 0x108)
359 +#define CLASS_TM_INQ_ADDR (CLASS_CSR_BASE_ADDR + 0x114)
360 +#define CLASS_PE_STATUS (CLASS_CSR_BASE_ADDR + 0x118)
361 +
362 +#define CLASS_PHY1_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x11c)
363 +#define CLASS_PHY1_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x120)
364 +#define CLASS_PHY1_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x124)
365 +#define CLASS_PHY1_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x128)
366 +#define CLASS_PHY1_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x12c)
367 +#define CLASS_PHY1_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x130)
368 +#define CLASS_PHY1_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x134)
369 +#define CLASS_PHY1_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x138)
370 +#define CLASS_PHY1_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x13c)
371 +#define CLASS_PHY1_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x140)
372 +#define CLASS_PHY2_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x144)
373 +#define CLASS_PHY2_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x148)
374 +#define CLASS_PHY2_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x14c)
375 +#define CLASS_PHY2_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x150)
376 +#define CLASS_PHY2_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x154)
377 +#define CLASS_PHY2_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x158)
378 +#define CLASS_PHY2_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x15c)
379 +#define CLASS_PHY2_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x160)
380 +#define CLASS_PHY2_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x164)
381 +#define CLASS_PHY2_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x168)
382 +#define CLASS_PHY3_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x16c)
383 +#define CLASS_PHY3_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x170)
384 +#define CLASS_PHY3_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x174)
385 +#define CLASS_PHY3_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x178)
386 +#define CLASS_PHY3_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x17c)
387 +#define CLASS_PHY3_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x180)
388 +#define CLASS_PHY3_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x184)
389 +#define CLASS_PHY3_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x188)
390 +#define CLASS_PHY3_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x18c)
391 +#define CLASS_PHY3_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x190)
392 +#define CLASS_PHY1_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x194)
393 +#define CLASS_PHY1_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x198)
394 +#define CLASS_PHY1_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x19c)
395 +#define CLASS_PHY1_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a0)
396 +#define CLASS_PHY2_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a4)
397 +#define CLASS_PHY2_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a8)
398 +#define CLASS_PHY2_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1ac)
399 +#define CLASS_PHY2_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b0)
400 +#define CLASS_PHY3_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b4)
401 +#define CLASS_PHY3_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b8)
402 +#define CLASS_PHY3_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1bc)
403 +#define CLASS_PHY3_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c0)
404 +#define CLASS_PHY4_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c4)
405 +#define CLASS_PHY4_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c8)
406 +#define CLASS_PHY4_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1cc)
407 +#define CLASS_PHY4_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1d0)
408 +#define CLASS_PHY4_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x1d4)
409 +#define CLASS_PHY4_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x1d8)
410 +#define CLASS_PHY4_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1dc)
411 +#define CLASS_PHY4_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1e0)
412 +#define CLASS_PHY4_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x1e4)
413 +#define CLASS_PHY4_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1e8)
414 +#define CLASS_PHY4_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x1ec)
415 +#define CLASS_PHY4_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x1f0)
416 +#define CLASS_PHY4_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x1f4)
417 +#define CLASS_PHY4_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x1f8)
418 +
419 +#define CLASS_PE_SYS_CLK_RATIO (CLASS_CSR_BASE_ADDR + 0x200)
420 +#define CLASS_AFULL_THRES (CLASS_CSR_BASE_ADDR + 0x204)
421 +#define CLASS_GAP_BETWEEN_READS (CLASS_CSR_BASE_ADDR + 0x208)
422 +#define CLASS_MAX_BUF_CNT (CLASS_CSR_BASE_ADDR + 0x20c)
423 +#define CLASS_TSQ_FIFO_THRES (CLASS_CSR_BASE_ADDR + 0x210)
424 +#define CLASS_TSQ_MAX_CNT (CLASS_CSR_BASE_ADDR + 0x214)
425 +#define CLASS_IRAM_DATA_0 (CLASS_CSR_BASE_ADDR + 0x218)
426 +#define CLASS_IRAM_DATA_1 (CLASS_CSR_BASE_ADDR + 0x21c)
427 +#define CLASS_IRAM_DATA_2 (CLASS_CSR_BASE_ADDR + 0x220)
428 +#define CLASS_IRAM_DATA_3 (CLASS_CSR_BASE_ADDR + 0x224)
429 +
430 +#define CLASS_BUS_ACCESS_ADDR (CLASS_CSR_BASE_ADDR + 0x228)
431 +
432 +#define CLASS_BUS_ACCESS_WDATA (CLASS_CSR_BASE_ADDR + 0x22c)
433 +#define CLASS_BUS_ACCESS_RDATA (CLASS_CSR_BASE_ADDR + 0x230)
434 +
435 +/* (route_entry_size[9:0], route_hash_size[23:16]
436 + * (this is actually ln2(size)))
437 + */
438 +#define CLASS_ROUTE_HASH_ENTRY_SIZE (CLASS_CSR_BASE_ADDR + 0x234)
439 +
440 +#define CLASS_ROUTE_ENTRY_SIZE(size) ((size) & 0x1ff)
441 +#define CLASS_ROUTE_HASH_SIZE(hash_bits) (((hash_bits) & 0xff) << 16)
442 +
443 +#define CLASS_ROUTE_TABLE_BASE (CLASS_CSR_BASE_ADDR + 0x238)
444 +
445 +#define CLASS_ROUTE_MULTI (CLASS_CSR_BASE_ADDR + 0x23c)
446 +#define CLASS_SMEM_OFFSET (CLASS_CSR_BASE_ADDR + 0x240)
447 +#define CLASS_LMEM_BUF_SIZE (CLASS_CSR_BASE_ADDR + 0x244)
448 +#define CLASS_VLAN_ID (CLASS_CSR_BASE_ADDR + 0x248)
449 +#define CLASS_BMU1_BUF_FREE (CLASS_CSR_BASE_ADDR + 0x24c)
450 +#define CLASS_USE_TMU_INQ (CLASS_CSR_BASE_ADDR + 0x250)
451 +#define CLASS_VLAN_ID1 (CLASS_CSR_BASE_ADDR + 0x254)
452 +
453 +#define CLASS_BUS_ACCESS_BASE (CLASS_CSR_BASE_ADDR + 0x258)
454 +#define CLASS_BUS_ACCESS_BASE_MASK (0xFF000000)
455 +/* bit 31:24 of PE peripheral address are stored in CLASS_BUS_ACCESS_BASE */
456 +
457 +#define CLASS_HIF_PARSE (CLASS_CSR_BASE_ADDR + 0x25c)
458 +
459 +#define CLASS_HOST_PE0_GP (CLASS_CSR_BASE_ADDR + 0x260)
460 +#define CLASS_PE0_GP (CLASS_CSR_BASE_ADDR + 0x264)
461 +#define CLASS_HOST_PE1_GP (CLASS_CSR_BASE_ADDR + 0x268)
462 +#define CLASS_PE1_GP (CLASS_CSR_BASE_ADDR + 0x26c)
463 +#define CLASS_HOST_PE2_GP (CLASS_CSR_BASE_ADDR + 0x270)
464 +#define CLASS_PE2_GP (CLASS_CSR_BASE_ADDR + 0x274)
465 +#define CLASS_HOST_PE3_GP (CLASS_CSR_BASE_ADDR + 0x278)
466 +#define CLASS_PE3_GP (CLASS_CSR_BASE_ADDR + 0x27c)
467 +#define CLASS_HOST_PE4_GP (CLASS_CSR_BASE_ADDR + 0x280)
468 +#define CLASS_PE4_GP (CLASS_CSR_BASE_ADDR + 0x284)
469 +#define CLASS_HOST_PE5_GP (CLASS_CSR_BASE_ADDR + 0x288)
470 +#define CLASS_PE5_GP (CLASS_CSR_BASE_ADDR + 0x28c)
471 +
472 +#define CLASS_PE_INT_SRC (CLASS_CSR_BASE_ADDR + 0x290)
473 +#define CLASS_PE_INT_ENABLE (CLASS_CSR_BASE_ADDR + 0x294)
474 +
475 +#define CLASS_TPID0_TPID1 (CLASS_CSR_BASE_ADDR + 0x298)
476 +#define CLASS_TPID2 (CLASS_CSR_BASE_ADDR + 0x29c)
477 +
478 +#define CLASS_L4_CHKSUM_ADDR (CLASS_CSR_BASE_ADDR + 0x2a0)
479 +
480 +#define CLASS_PE0_DEBUG (CLASS_CSR_BASE_ADDR + 0x2a4)
481 +#define CLASS_PE1_DEBUG (CLASS_CSR_BASE_ADDR + 0x2a8)
482 +#define CLASS_PE2_DEBUG (CLASS_CSR_BASE_ADDR + 0x2ac)
483 +#define CLASS_PE3_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b0)
484 +#define CLASS_PE4_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b4)
485 +#define CLASS_PE5_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b8)
486 +
487 +#define CLASS_STATE (CLASS_CSR_BASE_ADDR + 0x2bc)
488 +
489 +/* CLASS defines */
490 +#define CLASS_PBUF_SIZE 0x100 /* Fixed by hardware */
491 +#define CLASS_PBUF_HEADER_OFFSET 0x80 /* Can be configured */
492 +
493 +/* Can be configured */
494 +#define CLASS_PBUF0_BASE_ADDR 0x000
495 +/* Can be configured */
496 +#define CLASS_PBUF1_BASE_ADDR (CLASS_PBUF0_BASE_ADDR + CLASS_PBUF_SIZE)
497 +/* Can be configured */
498 +#define CLASS_PBUF2_BASE_ADDR (CLASS_PBUF1_BASE_ADDR + CLASS_PBUF_SIZE)
499 +/* Can be configured */
500 +#define CLASS_PBUF3_BASE_ADDR (CLASS_PBUF2_BASE_ADDR + CLASS_PBUF_SIZE)
501 +
502 +#define CLASS_PBUF0_HEADER_BASE_ADDR (CLASS_PBUF0_BASE_ADDR + \
503 + CLASS_PBUF_HEADER_OFFSET)
504 +#define CLASS_PBUF1_HEADER_BASE_ADDR (CLASS_PBUF1_BASE_ADDR + \
505 + CLASS_PBUF_HEADER_OFFSET)
506 +#define CLASS_PBUF2_HEADER_BASE_ADDR (CLASS_PBUF2_BASE_ADDR + \
507 + CLASS_PBUF_HEADER_OFFSET)
508 +#define CLASS_PBUF3_HEADER_BASE_ADDR (CLASS_PBUF3_BASE_ADDR + \
509 + CLASS_PBUF_HEADER_OFFSET)
510 +
511 +#define CLASS_PE0_RO_DM_ADDR0_VAL ((CLASS_PBUF1_BASE_ADDR << 16) | \
512 + CLASS_PBUF0_BASE_ADDR)
513 +#define CLASS_PE0_RO_DM_ADDR1_VAL ((CLASS_PBUF3_BASE_ADDR << 16) | \
514 + CLASS_PBUF2_BASE_ADDR)
515 +
516 +#define CLASS_PE0_QB_DM_ADDR0_VAL ((CLASS_PBUF1_HEADER_BASE_ADDR << 16) |\
517 + CLASS_PBUF0_HEADER_BASE_ADDR)
518 +#define CLASS_PE0_QB_DM_ADDR1_VAL ((CLASS_PBUF3_HEADER_BASE_ADDR << 16) |\
519 + CLASS_PBUF2_HEADER_BASE_ADDR)
520 +
521 +#define CLASS_ROUTE_SIZE 128
522 +#define CLASS_MAX_ROUTE_SIZE 256
523 +#define CLASS_ROUTE_HASH_BITS 20
524 +#define CLASS_ROUTE_HASH_MASK (BIT(CLASS_ROUTE_HASH_BITS) - 1)
525 +
526 +/* Can be configured */
527 +#define CLASS_ROUTE0_BASE_ADDR 0x400
528 +/* Can be configured */
529 +#define CLASS_ROUTE1_BASE_ADDR (CLASS_ROUTE0_BASE_ADDR + CLASS_ROUTE_SIZE)
530 +/* Can be configured */
531 +#define CLASS_ROUTE2_BASE_ADDR (CLASS_ROUTE1_BASE_ADDR + CLASS_ROUTE_SIZE)
532 +/* Can be configured */
533 +#define CLASS_ROUTE3_BASE_ADDR (CLASS_ROUTE2_BASE_ADDR + CLASS_ROUTE_SIZE)
534 +
535 +#define CLASS_SA_SIZE 128
536 +#define CLASS_IPSEC_SA0_BASE_ADDR 0x600
537 +/* not used */
538 +#define CLASS_IPSEC_SA1_BASE_ADDR (CLASS_IPSEC_SA0_BASE_ADDR + CLASS_SA_SIZE)
539 +/* not used */
540 +#define CLASS_IPSEC_SA2_BASE_ADDR (CLASS_IPSEC_SA1_BASE_ADDR + CLASS_SA_SIZE)
541 +/* not used */
542 +#define CLASS_IPSEC_SA3_BASE_ADDR (CLASS_IPSEC_SA2_BASE_ADDR + CLASS_SA_SIZE)
543 +
544 +/* generic purpose free dmem buffer, last portion of 2K dmem pbuf */
545 +#define CLASS_GP_DMEM_BUF_SIZE (2048 - (CLASS_PBUF_SIZE * 4) - \
546 + (CLASS_ROUTE_SIZE * 4) - (CLASS_SA_SIZE))
547 +#define CLASS_GP_DMEM_BUF ((void *)(CLASS_IPSEC_SA0_BASE_ADDR + \
548 + CLASS_SA_SIZE))
549 +
550 +#define TWO_LEVEL_ROUTE BIT(0)
551 +#define PHYNO_IN_HASH BIT(1)
552 +#define HW_ROUTE_FETCH BIT(3)
553 +#define HW_BRIDGE_FETCH BIT(5)
554 +#define IP_ALIGNED BIT(6)
555 +#define ARC_HIT_CHECK_EN BIT(7)
556 +#define CLASS_TOE BIT(11)
557 +#define HASH_NORMAL (0 << 12)
558 +#define HASH_CRC_PORT BIT(12)
559 +#define HASH_CRC_IP (2 << 12)
560 +#define HASH_CRC_PORT_IP (3 << 12)
561 +#define QB2BUS_LE BIT(15)
562 +
563 +#define TCP_CHKSUM_DROP BIT(0)
564 +#define UDP_CHKSUM_DROP BIT(1)
565 +#define IPV4_CHKSUM_DROP BIT(9)
566 +
567 +/*CLASS_HIF_PARSE bits*/
568 +#define HIF_PKT_CLASS_EN BIT(0)
569 +#define HIF_PKT_OFFSET(ofst) (((ofst) & 0xF) << 1)
570 +
571 +struct class_cfg {
572 + u32 toe_mode;
573 + unsigned long route_table_baseaddr;
574 + u32 route_table_hash_bits;
575 + u32 pe_sys_clk_ratio;
576 + u32 resume;
577 +};
578 +
579 +#endif /* _CLASS_CSR_H_ */
580 diff --git a/drivers/staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h b/drivers/staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h
581 new file mode 100644
582 index 00000000..9c5d7919
583 --- /dev/null
584 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h
585 @@ -0,0 +1,242 @@
586 +/*
587 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
588 + * Copyright 2017 NXP
589 + *
590 + * This program is free software; you can redistribute it and/or modify
591 + * it under the terms of the GNU General Public License as published by
592 + * the Free Software Foundation; either version 2 of the License, or
593 + * (at your option) any later version.
594 + *
595 + * This program is distributed in the hope that it will be useful,
596 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
597 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
598 + * GNU General Public License for more details.
599 + *
600 + * You should have received a copy of the GNU General Public License
601 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
602 + */
603 +
604 +#ifndef _EMAC_H_
605 +#define _EMAC_H_
606 +
607 +#include <linux/ethtool.h>
608 +
609 +#define EMAC_IEVENT_REG 0x004
610 +#define EMAC_IMASK_REG 0x008
611 +#define EMAC_R_DES_ACTIVE_REG 0x010
612 +#define EMAC_X_DES_ACTIVE_REG 0x014
613 +#define EMAC_ECNTRL_REG 0x024
614 +#define EMAC_MII_DATA_REG 0x040
615 +#define EMAC_MII_CTRL_REG 0x044
616 +#define EMAC_MIB_CTRL_STS_REG 0x064
617 +#define EMAC_RCNTRL_REG 0x084
618 +#define EMAC_TCNTRL_REG 0x0C4
619 +#define EMAC_PHY_ADDR_LOW 0x0E4
620 +#define EMAC_PHY_ADDR_HIGH 0x0E8
621 +#define EMAC_GAUR 0x120
622 +#define EMAC_GALR 0x124
623 +#define EMAC_TFWR_STR_FWD 0x144
624 +#define EMAC_RX_SECTION_FULL 0x190
625 +#define EMAC_RX_SECTION_EMPTY 0x194
626 +#define EMAC_TX_SECTION_EMPTY 0x1A0
627 +#define EMAC_TRUNC_FL 0x1B0
628 +
629 +#define RMON_T_DROP 0x200 /* Count of frames not cntd correctly */
630 +#define RMON_T_PACKETS 0x204 /* RMON TX packet count */
631 +#define RMON_T_BC_PKT 0x208 /* RMON TX broadcast pkts */
632 +#define RMON_T_MC_PKT 0x20c /* RMON TX multicast pkts */
633 +#define RMON_T_CRC_ALIGN 0x210 /* RMON TX pkts with CRC align err */
634 +#define RMON_T_UNDERSIZE 0x214 /* RMON TX pkts < 64 bytes, good CRC */
635 +#define RMON_T_OVERSIZE 0x218 /* RMON TX pkts > MAX_FL bytes good CRC */
636 +#define RMON_T_FRAG 0x21c /* RMON TX pkts < 64 bytes, bad CRC */
637 +#define RMON_T_JAB 0x220 /* RMON TX pkts > MAX_FL bytes, bad CRC */
638 +#define RMON_T_COL 0x224 /* RMON TX collision count */
639 +#define RMON_T_P64 0x228 /* RMON TX 64 byte pkts */
640 +#define RMON_T_P65TO127 0x22c /* RMON TX 65 to 127 byte pkts */
641 +#define RMON_T_P128TO255 0x230 /* RMON TX 128 to 255 byte pkts */
642 +#define RMON_T_P256TO511 0x234 /* RMON TX 256 to 511 byte pkts */
643 +#define RMON_T_P512TO1023 0x238 /* RMON TX 512 to 1023 byte pkts */
644 +#define RMON_T_P1024TO2047 0x23c /* RMON TX 1024 to 2047 byte pkts */
645 +#define RMON_T_P_GTE2048 0x240 /* RMON TX pkts > 2048 bytes */
646 +#define RMON_T_OCTETS 0x244 /* RMON TX octets */
647 +#define IEEE_T_DROP 0x248 /* Count of frames not counted crtly */
648 +#define IEEE_T_FRAME_OK 0x24c /* Frames tx'd OK */
649 +#define IEEE_T_1COL 0x250 /* Frames tx'd with single collision */
650 +#define IEEE_T_MCOL 0x254 /* Frames tx'd with multiple collision */
651 +#define IEEE_T_DEF 0x258 /* Frames tx'd after deferral delay */
652 +#define IEEE_T_LCOL 0x25c /* Frames tx'd with late collision */
653 +#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excesv collisions */
654 +#define IEEE_T_MACERR 0x264 /* Frames tx'd with TX FIFO underrun */
655 +#define IEEE_T_CSERR 0x268 /* Frames tx'd with carrier sense err */
656 +#define IEEE_T_SQE 0x26c /* Frames tx'd with SQE err */
657 +#define IEEE_T_FDXFC 0x270 /* Flow control pause frames tx'd */
658 +#define IEEE_T_OCTETS_OK 0x274 /* Octet count for frames tx'd w/o err */
659 +#define RMON_R_PACKETS 0x284 /* RMON RX packet count */
660 +#define RMON_R_BC_PKT 0x288 /* RMON RX broadcast pkts */
661 +#define RMON_R_MC_PKT 0x28c /* RMON RX multicast pkts */
662 +#define RMON_R_CRC_ALIGN 0x290 /* RMON RX pkts with CRC alignment err */
663 +#define RMON_R_UNDERSIZE 0x294 /* RMON RX pkts < 64 bytes, good CRC */
664 +#define RMON_R_OVERSIZE 0x298 /* RMON RX pkts > MAX_FL bytes good CRC */
665 +#define RMON_R_FRAG 0x29c /* RMON RX pkts < 64 bytes, bad CRC */
666 +#define RMON_R_JAB 0x2a0 /* RMON RX pkts > MAX_FL bytes, bad CRC */
667 +#define RMON_R_RESVD_O 0x2a4 /* Reserved */
668 +#define RMON_R_P64 0x2a8 /* RMON RX 64 byte pkts */
669 +#define RMON_R_P65TO127 0x2ac /* RMON RX 65 to 127 byte pkts */
670 +#define RMON_R_P128TO255 0x2b0 /* RMON RX 128 to 255 byte pkts */
671 +#define RMON_R_P256TO511 0x2b4 /* RMON RX 256 to 511 byte pkts */
672 +#define RMON_R_P512TO1023 0x2b8 /* RMON RX 512 to 1023 byte pkts */
673 +#define RMON_R_P1024TO2047 0x2bc /* RMON RX 1024 to 2047 byte pkts */
674 +#define RMON_R_P_GTE2048 0x2c0 /* RMON RX pkts > 2048 bytes */
675 +#define RMON_R_OCTETS 0x2c4 /* RMON RX octets */
676 +#define IEEE_R_DROP 0x2c8 /* Count frames not counted correctly */
677 +#define IEEE_R_FRAME_OK 0x2cc /* Frames rx'd OK */
678 +#define IEEE_R_CRC 0x2d0 /* Frames rx'd with CRC err */
679 +#define IEEE_R_ALIGN 0x2d4 /* Frames rx'd with alignment err */
680 +#define IEEE_R_MACERR 0x2d8 /* Receive FIFO overflow count */
681 +#define IEEE_R_FDXFC 0x2dc /* Flow control pause frames rx'd */
682 +#define IEEE_R_OCTETS_OK 0x2e0 /* Octet cnt for frames rx'd w/o err */
683 +
684 +#define EMAC_SMAC_0_0 0x500 /*Supplemental MAC Address 0 (RW).*/
685 +#define EMAC_SMAC_0_1 0x504 /*Supplemental MAC Address 0 (RW).*/
686 +
687 +/* GEMAC definitions and settings */
688 +
689 +#define EMAC_PORT_0 0
690 +#define EMAC_PORT_1 1
691 +
692 +/* GEMAC Bit definitions */
693 +#define EMAC_IEVENT_HBERR 0x80000000
694 +#define EMAC_IEVENT_BABR 0x40000000
695 +#define EMAC_IEVENT_BABT 0x20000000
696 +#define EMAC_IEVENT_GRA 0x10000000
697 +#define EMAC_IEVENT_TXF 0x08000000
698 +#define EMAC_IEVENT_TXB 0x04000000
699 +#define EMAC_IEVENT_RXF 0x02000000
700 +#define EMAC_IEVENT_RXB 0x01000000
701 +#define EMAC_IEVENT_MII 0x00800000
702 +#define EMAC_IEVENT_EBERR 0x00400000
703 +#define EMAC_IEVENT_LC 0x00200000
704 +#define EMAC_IEVENT_RL 0x00100000
705 +#define EMAC_IEVENT_UN 0x00080000
706 +
707 +#define EMAC_IMASK_HBERR 0x80000000
708 +#define EMAC_IMASK_BABR 0x40000000
709 +#define EMAC_IMASKT_BABT 0x20000000
710 +#define EMAC_IMASK_GRA 0x10000000
711 +#define EMAC_IMASKT_TXF 0x08000000
712 +#define EMAC_IMASK_TXB 0x04000000
713 +#define EMAC_IMASKT_RXF 0x02000000
714 +#define EMAC_IMASK_RXB 0x01000000
715 +#define EMAC_IMASK_MII 0x00800000
716 +#define EMAC_IMASK_EBERR 0x00400000
717 +#define EMAC_IMASK_LC 0x00200000
718 +#define EMAC_IMASKT_RL 0x00100000
719 +#define EMAC_IMASK_UN 0x00080000
720 +
721 +#define EMAC_RCNTRL_MAX_FL_SHIFT 16
722 +#define EMAC_RCNTRL_LOOP 0x00000001
723 +#define EMAC_RCNTRL_DRT 0x00000002
724 +#define EMAC_RCNTRL_MII_MODE 0x00000004
725 +#define EMAC_RCNTRL_PROM 0x00000008
726 +#define EMAC_RCNTRL_BC_REJ 0x00000010
727 +#define EMAC_RCNTRL_FCE 0x00000020
728 +#define EMAC_RCNTRL_RGMII 0x00000040
729 +#define EMAC_RCNTRL_SGMII 0x00000080
730 +#define EMAC_RCNTRL_RMII 0x00000100
731 +#define EMAC_RCNTRL_RMII_10T 0x00000200
732 +#define EMAC_RCNTRL_CRC_FWD 0x00004000
733 +
734 +#define EMAC_TCNTRL_GTS 0x00000001
735 +#define EMAC_TCNTRL_HBC 0x00000002
736 +#define EMAC_TCNTRL_FDEN 0x00000004
737 +#define EMAC_TCNTRL_TFC_PAUSE 0x00000008
738 +#define EMAC_TCNTRL_RFC_PAUSE 0x00000010
739 +
740 +#define EMAC_ECNTRL_RESET 0x00000001 /* reset the EMAC */
741 +#define EMAC_ECNTRL_ETHER_EN 0x00000002 /* enable the EMAC */
742 +#define EMAC_ECNTRL_MAGIC_ENA 0x00000004
743 +#define EMAC_ECNTRL_SLEEP 0x00000008
744 +#define EMAC_ECNTRL_SPEED 0x00000020
745 +#define EMAC_ECNTRL_DBSWAP 0x00000100
746 +
747 +#define EMAC_X_WMRK_STRFWD 0x00000100
748 +
749 +#define EMAC_X_DES_ACTIVE_TDAR 0x01000000
750 +#define EMAC_R_DES_ACTIVE_RDAR 0x01000000
751 +
752 +#define EMAC_RX_SECTION_EMPTY_V 0x00010006
753 +/*
754 + * The possible operating speeds of the MAC, currently supporting 10, 100 and
755 + * 1000Mb modes.
756 + */
757 +enum mac_speed {SPEED_10M, SPEED_100M, SPEED_1000M, SPEED_1000M_PCS};
758 +
759 +/* MII-related definitios */
760 +#define EMAC_MII_DATA_ST 0x40000000 /* Start of frame delimiter */
761 +#define EMAC_MII_DATA_OP_RD 0x20000000 /* Perform a read operation */
762 +#define EMAC_MII_DATA_OP_CL45_RD 0x30000000 /* Perform a read operation */
763 +#define EMAC_MII_DATA_OP_WR 0x10000000 /* Perform a write operation */
764 +#define EMAC_MII_DATA_OP_CL45_WR 0x10000000 /* Perform a write operation */
765 +#define EMAC_MII_DATA_PA_MSK 0x0f800000 /* PHY Address field mask */
766 +#define EMAC_MII_DATA_RA_MSK 0x007c0000 /* PHY Register field mask */
767 +#define EMAC_MII_DATA_TA 0x00020000 /* Turnaround */
768 +#define EMAC_MII_DATA_DATAMSK 0x0000ffff /* PHY data field */
769 +
770 +#define EMAC_MII_DATA_RA_SHIFT 18 /* MII Register address bits */
771 +#define EMAC_MII_DATA_RA_MASK 0x1F /* MII Register address mask */
772 +#define EMAC_MII_DATA_PA_SHIFT 23 /* MII PHY address bits */
773 +#define EMAC_MII_DATA_PA_MASK 0x1F /* MII PHY address mask */
774 +
775 +#define EMAC_MII_DATA_RA(v) (((v) & EMAC_MII_DATA_RA_MASK) << \
776 + EMAC_MII_DATA_RA_SHIFT)
777 +#define EMAC_MII_DATA_PA(v) (((v) & EMAC_MII_DATA_RA_MASK) << \
778 + EMAC_MII_DATA_PA_SHIFT)
779 +#define EMAC_MII_DATA(v) ((v) & 0xffff)
780 +
781 +#define EMAC_MII_SPEED_SHIFT 1
782 +#define EMAC_HOLDTIME_SHIFT 8
783 +#define EMAC_HOLDTIME_MASK 0x7
784 +#define EMAC_HOLDTIME(v) (((v) & EMAC_HOLDTIME_MASK) << \
785 + EMAC_HOLDTIME_SHIFT)
786 +
787 +/*
788 + * The Address organisation for the MAC device. All addresses are split into
789 + * two 32-bit register fields. The first one (bottom) is the lower 32-bits of
790 + * the address and the other field are the high order bits - this may be 16-bits
791 + * in the case of MAC addresses, or 32-bits for the hash address.
792 + * In terms of memory storage, the first item (bottom) is assumed to be at a
793 + * lower address location than 'top'. i.e. top should be at address location of
794 + * 'bottom' + 4 bytes.
795 + */
796 +struct pfe_mac_addr {
797 + u32 bottom; /* Lower 32-bits of address. */
798 + u32 top; /* Upper 32-bits of address. */
799 +};
800 +
801 +/*
802 + * The following is the organisation of the address filters section of the MAC
803 + * registers. The Cadence MAC contains four possible specific address match
804 + * addresses, if an incoming frame corresponds to any one of these four
805 + * addresses then the frame will be copied to memory.
806 + * It is not necessary for all four of the address match registers to be
807 + * programmed, this is application dependent.
808 + */
809 +struct spec_addr {
810 + struct pfe_mac_addr one; /* Specific address register 1. */
811 + struct pfe_mac_addr two; /* Specific address register 2. */
812 + struct pfe_mac_addr three; /* Specific address register 3. */
813 + struct pfe_mac_addr four; /* Specific address register 4. */
814 +};
815 +
816 +struct gemac_cfg {
817 + u32 mode;
818 + u32 speed;
819 + u32 duplex;
820 +};
821 +
822 +/* EMAC Hash size */
823 +#define EMAC_HASH_REG_BITS 64
824 +
825 +#define EMAC_SPEC_ADDR_MAX 4
826 +
827 +#endif /* _EMAC_H_ */
828 diff --git a/drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h b/drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h
829 new file mode 100644
830 index 00000000..7b295830
831 --- /dev/null
832 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h
833 @@ -0,0 +1,86 @@
834 +/*
835 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
836 + * Copyright 2017 NXP
837 + *
838 + * This program is free software; you can redistribute it and/or modify
839 + * it under the terms of the GNU General Public License as published by
840 + * the Free Software Foundation; either version 2 of the License, or
841 + * (at your option) any later version.
842 + *
843 + * This program is distributed in the hope that it will be useful,
844 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
845 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
846 + * GNU General Public License for more details.
847 + *
848 + * You should have received a copy of the GNU General Public License
849 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
850 + */
851 +
852 +#ifndef _GPI_H_
853 +#define _GPI_H_
854 +
855 +#define GPI_VERSION 0x00
856 +#define GPI_CTRL 0x04
857 +#define GPI_RX_CONFIG 0x08
858 +#define GPI_HDR_SIZE 0x0c
859 +#define GPI_BUF_SIZE 0x10
860 +#define GPI_LMEM_ALLOC_ADDR 0x14
861 +#define GPI_LMEM_FREE_ADDR 0x18
862 +#define GPI_DDR_ALLOC_ADDR 0x1c
863 +#define GPI_DDR_FREE_ADDR 0x20
864 +#define GPI_CLASS_ADDR 0x24
865 +#define GPI_DRX_FIFO 0x28
866 +#define GPI_TRX_FIFO 0x2c
867 +#define GPI_INQ_PKTPTR 0x30
868 +#define GPI_DDR_DATA_OFFSET 0x34
869 +#define GPI_LMEM_DATA_OFFSET 0x38
870 +#define GPI_TMLF_TX 0x4c
871 +#define GPI_DTX_ASEQ 0x50
872 +#define GPI_FIFO_STATUS 0x54
873 +#define GPI_FIFO_DEBUG 0x58
874 +#define GPI_TX_PAUSE_TIME 0x5c
875 +#define GPI_LMEM_SEC_BUF_DATA_OFFSET 0x60
876 +#define GPI_DDR_SEC_BUF_DATA_OFFSET 0x64
877 +#define GPI_TOE_CHKSUM_EN 0x68
878 +#define GPI_OVERRUN_DROPCNT 0x6c
879 +#define GPI_CSR_MTIP_PAUSE_REG 0x74
880 +#define GPI_CSR_MTIP_PAUSE_QUANTUM 0x78
881 +#define GPI_CSR_RX_CNT 0x7c
882 +#define GPI_CSR_TX_CNT 0x80
883 +#define GPI_CSR_DEBUG1 0x84
884 +#define GPI_CSR_DEBUG2 0x88
885 +
886 +struct gpi_cfg {
887 + u32 lmem_rtry_cnt;
888 + u32 tmlf_txthres;
889 + u32 aseq_len;
890 + u32 mtip_pause_reg;
891 +};
892 +
893 +/* GPI commons defines */
894 +#define GPI_LMEM_BUF_EN 0x1
895 +#define GPI_DDR_BUF_EN 0x1
896 +
897 +/* EGPI 1 defines */
898 +#define EGPI1_LMEM_RTRY_CNT 0x40
899 +#define EGPI1_TMLF_TXTHRES 0xBC
900 +#define EGPI1_ASEQ_LEN 0x50
901 +
902 +/* EGPI 2 defines */
903 +#define EGPI2_LMEM_RTRY_CNT 0x40
904 +#define EGPI2_TMLF_TXTHRES 0xBC
905 +#define EGPI2_ASEQ_LEN 0x40
906 +
907 +/* EGPI 3 defines */
908 +#define EGPI3_LMEM_RTRY_CNT 0x40
909 +#define EGPI3_TMLF_TXTHRES 0xBC
910 +#define EGPI3_ASEQ_LEN 0x40
911 +
912 +/* HGPI defines */
913 +#define HGPI_LMEM_RTRY_CNT 0x40
914 +#define HGPI_TMLF_TXTHRES 0xBC
915 +#define HGPI_ASEQ_LEN 0x40
916 +
917 +#define EGPI_PAUSE_TIME 0x000007D0
918 +#define EGPI_PAUSE_ENABLE 0x40000000
919 +#endif /* _GPI_H_ */
920 diff --git a/drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h b/drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h
921 new file mode 100644
922 index 00000000..71cf81a7
923 --- /dev/null
924 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h
925 @@ -0,0 +1,100 @@
926 +/*
927 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
928 + * Copyright 2017 NXP
929 + *
930 + * This program is free software; you can redistribute it and/or modify
931 + * it under the terms of the GNU General Public License as published by
932 + * the Free Software Foundation; either version 2 of the License, or
933 + * (at your option) any later version.
934 + *
935 + * This program is distributed in the hope that it will be useful,
936 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
937 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
938 + * GNU General Public License for more details.
939 + *
940 + * You should have received a copy of the GNU General Public License
941 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
942 + */
943 +
944 +#ifndef _HIF_H_
945 +#define _HIF_H_
946 +
947 +/* @file hif.h.
948 + * hif - PFE hif block control and status register.
949 + * Mapped on CBUS and accessible from all PE's and ARM.
950 + */
951 +#define HIF_VERSION (HIF_BASE_ADDR + 0x00)
952 +#define HIF_TX_CTRL (HIF_BASE_ADDR + 0x04)
953 +#define HIF_TX_CURR_BD_ADDR (HIF_BASE_ADDR + 0x08)
954 +#define HIF_TX_ALLOC (HIF_BASE_ADDR + 0x0c)
955 +#define HIF_TX_BDP_ADDR (HIF_BASE_ADDR + 0x10)
956 +#define HIF_TX_STATUS (HIF_BASE_ADDR + 0x14)
957 +#define HIF_RX_CTRL (HIF_BASE_ADDR + 0x20)
958 +#define HIF_RX_BDP_ADDR (HIF_BASE_ADDR + 0x24)
959 +#define HIF_RX_STATUS (HIF_BASE_ADDR + 0x30)
960 +#define HIF_INT_SRC (HIF_BASE_ADDR + 0x34)
961 +#define HIF_INT_ENABLE (HIF_BASE_ADDR + 0x38)
962 +#define HIF_POLL_CTRL (HIF_BASE_ADDR + 0x3c)
963 +#define HIF_RX_CURR_BD_ADDR (HIF_BASE_ADDR + 0x40)
964 +#define HIF_RX_ALLOC (HIF_BASE_ADDR + 0x44)
965 +#define HIF_TX_DMA_STATUS (HIF_BASE_ADDR + 0x48)
966 +#define HIF_RX_DMA_STATUS (HIF_BASE_ADDR + 0x4c)
967 +#define HIF_INT_COAL (HIF_BASE_ADDR + 0x50)
968 +
969 +/* HIF_INT_SRC/ HIF_INT_ENABLE control bits */
970 +#define HIF_INT BIT(0)
971 +#define HIF_RXBD_INT BIT(1)
972 +#define HIF_RXPKT_INT BIT(2)
973 +#define HIF_TXBD_INT BIT(3)
974 +#define HIF_TXPKT_INT BIT(4)
975 +
976 +/* HIF_TX_CTRL bits */
977 +#define HIF_CTRL_DMA_EN BIT(0)
978 +#define HIF_CTRL_BDP_POLL_CTRL_EN BIT(1)
979 +#define HIF_CTRL_BDP_CH_START_WSTB BIT(2)
980 +
981 +/* HIF_RX_STATUS bits */
982 +#define BDP_CSR_RX_DMA_ACTV BIT(16)
983 +
984 +/* HIF_INT_ENABLE bits */
985 +#define HIF_INT_EN BIT(0)
986 +#define HIF_RXBD_INT_EN BIT(1)
987 +#define HIF_RXPKT_INT_EN BIT(2)
988 +#define HIF_TXBD_INT_EN BIT(3)
989 +#define HIF_TXPKT_INT_EN BIT(4)
990 +
991 +/* HIF_POLL_CTRL bits*/
992 +#define HIF_RX_POLL_CTRL_CYCLE 0x0400
993 +#define HIF_TX_POLL_CTRL_CYCLE 0x0400
994 +
995 +/* HIF_INT_COAL bits*/
996 +#define HIF_INT_COAL_ENABLE BIT(31)
997 +
998 +/* Buffer descriptor control bits */
999 +#define BD_CTRL_BUFLEN_MASK 0x3fff
1000 +#define BD_BUF_LEN(x) ((x) & BD_CTRL_BUFLEN_MASK)
1001 +#define BD_CTRL_CBD_INT_EN BIT(16)
1002 +#define BD_CTRL_PKT_INT_EN BIT(17)
1003 +#define BD_CTRL_LIFM BIT(18)
1004 +#define BD_CTRL_LAST_BD BIT(19)
1005 +#define BD_CTRL_DIR BIT(20)
1006 +#define BD_CTRL_LMEM_CPY BIT(21) /* Valid only for HIF_NOCPY */
1007 +#define BD_CTRL_PKT_XFER BIT(24)
1008 +#define BD_CTRL_DESC_EN BIT(31)
1009 +#define BD_CTRL_PARSE_DISABLE BIT(25)
1010 +#define BD_CTRL_BRFETCH_DISABLE BIT(26)
1011 +#define BD_CTRL_RTFETCH_DISABLE BIT(27)
1012 +
1013 +/* Buffer descriptor status bits*/
1014 +#define BD_STATUS_CONN_ID(x) ((x) & 0xffff)
1015 +#define BD_STATUS_DIR_PROC_ID BIT(16)
1016 +#define BD_STATUS_CONN_ID_EN BIT(17)
1017 +#define BD_STATUS_PE2PROC_ID(x) (((x) & 7) << 18)
1018 +#define BD_STATUS_LE_DATA BIT(21)
1019 +#define BD_STATUS_CHKSUM_EN BIT(22)
1020 +
1021 +/* HIF Buffer descriptor status bits */
1022 +#define DIR_PROC_ID BIT(16)
1023 +#define PROC_ID(id) ((id) << 18)
1024 +
1025 +#endif /* _HIF_H_ */
1026 diff --git a/drivers/staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h b/drivers/staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h
1027 new file mode 100644
1028 index 00000000..3d4d43ce
1029 --- /dev/null
1030 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h
1031 @@ -0,0 +1,50 @@
1032 +/*
1033 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
1034 + * Copyright 2017 NXP
1035 + *
1036 + * This program is free software; you can redistribute it and/or modify
1037 + * it under the terms of the GNU General Public License as published by
1038 + * the Free Software Foundation; either version 2 of the License, or
1039 + * (at your option) any later version.
1040 + *
1041 + * This program is distributed in the hope that it will be useful,
1042 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1043 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1044 + * GNU General Public License for more details.
1045 + *
1046 + * You should have received a copy of the GNU General Public License
1047 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
1048 + */
1049 +
1050 +#ifndef _HIF_NOCPY_H_
1051 +#define _HIF_NOCPY_H_
1052 +
1053 +#define HIF_NOCPY_VERSION (HIF_NOCPY_BASE_ADDR + 0x00)
1054 +#define HIF_NOCPY_TX_CTRL (HIF_NOCPY_BASE_ADDR + 0x04)
1055 +#define HIF_NOCPY_TX_CURR_BD_ADDR (HIF_NOCPY_BASE_ADDR + 0x08)
1056 +#define HIF_NOCPY_TX_ALLOC (HIF_NOCPY_BASE_ADDR + 0x0c)
1057 +#define HIF_NOCPY_TX_BDP_ADDR (HIF_NOCPY_BASE_ADDR + 0x10)
1058 +#define HIF_NOCPY_TX_STATUS (HIF_NOCPY_BASE_ADDR + 0x14)
1059 +#define HIF_NOCPY_RX_CTRL (HIF_NOCPY_BASE_ADDR + 0x20)
1060 +#define HIF_NOCPY_RX_BDP_ADDR (HIF_NOCPY_BASE_ADDR + 0x24)
1061 +#define HIF_NOCPY_RX_STATUS (HIF_NOCPY_BASE_ADDR + 0x30)
1062 +#define HIF_NOCPY_INT_SRC (HIF_NOCPY_BASE_ADDR + 0x34)
1063 +#define HIF_NOCPY_INT_ENABLE (HIF_NOCPY_BASE_ADDR + 0x38)
1064 +#define HIF_NOCPY_POLL_CTRL (HIF_NOCPY_BASE_ADDR + 0x3c)
1065 +#define HIF_NOCPY_RX_CURR_BD_ADDR (HIF_NOCPY_BASE_ADDR + 0x40)
1066 +#define HIF_NOCPY_RX_ALLOC (HIF_NOCPY_BASE_ADDR + 0x44)
1067 +#define HIF_NOCPY_TX_DMA_STATUS (HIF_NOCPY_BASE_ADDR + 0x48)
1068 +#define HIF_NOCPY_RX_DMA_STATUS (HIF_NOCPY_BASE_ADDR + 0x4c)
1069 +#define HIF_NOCPY_RX_INQ0_PKTPTR (HIF_NOCPY_BASE_ADDR + 0x50)
1070 +#define HIF_NOCPY_RX_INQ1_PKTPTR (HIF_NOCPY_BASE_ADDR + 0x54)
1071 +#define HIF_NOCPY_TX_PORT_NO (HIF_NOCPY_BASE_ADDR + 0x60)
1072 +#define HIF_NOCPY_LMEM_ALLOC_ADDR (HIF_NOCPY_BASE_ADDR + 0x64)
1073 +#define HIF_NOCPY_CLASS_ADDR (HIF_NOCPY_BASE_ADDR + 0x68)
1074 +#define HIF_NOCPY_TMU_PORT0_ADDR (HIF_NOCPY_BASE_ADDR + 0x70)
1075 +#define HIF_NOCPY_TMU_PORT1_ADDR (HIF_NOCPY_BASE_ADDR + 0x74)
1076 +#define HIF_NOCPY_TMU_PORT2_ADDR (HIF_NOCPY_BASE_ADDR + 0x7c)
1077 +#define HIF_NOCPY_TMU_PORT3_ADDR (HIF_NOCPY_BASE_ADDR + 0x80)
1078 +#define HIF_NOCPY_TMU_PORT4_ADDR (HIF_NOCPY_BASE_ADDR + 0x84)
1079 +#define HIF_NOCPY_INT_COAL (HIF_NOCPY_BASE_ADDR + 0x90)
1080 +
1081 +#endif /* _HIF_NOCPY_H_ */
1082 diff --git a/drivers/staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h b/drivers/staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h
1083 new file mode 100644
1084 index 00000000..05f3d681
1085 --- /dev/null
1086 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h
1087 @@ -0,0 +1,168 @@
1088 +/*
1089 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
1090 + * Copyright 2017 NXP
1091 + *
1092 + * This program is free software; you can redistribute it and/or modify
1093 + * it under the terms of the GNU General Public License as published by
1094 + * the Free Software Foundation; either version 2 of the License, or
1095 + * (at your option) any later version.
1096 + *
1097 + * This program is distributed in the hope that it will be useful,
1098 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1099 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1100 + * GNU General Public License for more details.
1101 + *
1102 + * You should have received a copy of the GNU General Public License
1103 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
1104 + */
1105 +
1106 +#ifndef _TMU_CSR_H_
1107 +#define _TMU_CSR_H_
1108 +
1109 +#define TMU_VERSION (TMU_CSR_BASE_ADDR + 0x000)
1110 +#define TMU_INQ_WATERMARK (TMU_CSR_BASE_ADDR + 0x004)
1111 +#define TMU_PHY_INQ_PKTPTR (TMU_CSR_BASE_ADDR + 0x008)
1112 +#define TMU_PHY_INQ_PKTINFO (TMU_CSR_BASE_ADDR + 0x00c)
1113 +#define TMU_PHY_INQ_FIFO_CNT (TMU_CSR_BASE_ADDR + 0x010)
1114 +#define TMU_SYS_GENERIC_CONTROL (TMU_CSR_BASE_ADDR + 0x014)
1115 +#define TMU_SYS_GENERIC_STATUS (TMU_CSR_BASE_ADDR + 0x018)
1116 +#define TMU_SYS_GEN_CON0 (TMU_CSR_BASE_ADDR + 0x01c)
1117 +#define TMU_SYS_GEN_CON1 (TMU_CSR_BASE_ADDR + 0x020)
1118 +#define TMU_SYS_GEN_CON2 (TMU_CSR_BASE_ADDR + 0x024)
1119 +#define TMU_SYS_GEN_CON3 (TMU_CSR_BASE_ADDR + 0x028)
1120 +#define TMU_SYS_GEN_CON4 (TMU_CSR_BASE_ADDR + 0x02c)
1121 +#define TMU_TEQ_DISABLE_DROPCHK (TMU_CSR_BASE_ADDR + 0x030)
1122 +#define TMU_TEQ_CTRL (TMU_CSR_BASE_ADDR + 0x034)
1123 +#define TMU_TEQ_QCFG (TMU_CSR_BASE_ADDR + 0x038)
1124 +#define TMU_TEQ_DROP_STAT (TMU_CSR_BASE_ADDR + 0x03c)
1125 +#define TMU_TEQ_QAVG (TMU_CSR_BASE_ADDR + 0x040)
1126 +#define TMU_TEQ_WREG_PROB (TMU_CSR_BASE_ADDR + 0x044)
1127 +#define TMU_TEQ_TRANS_STAT (TMU_CSR_BASE_ADDR + 0x048)
1128 +#define TMU_TEQ_HW_PROB_CFG0 (TMU_CSR_BASE_ADDR + 0x04c)
1129 +#define TMU_TEQ_HW_PROB_CFG1 (TMU_CSR_BASE_ADDR + 0x050)
1130 +#define TMU_TEQ_HW_PROB_CFG2 (TMU_CSR_BASE_ADDR + 0x054)
1131 +#define TMU_TEQ_HW_PROB_CFG3 (TMU_CSR_BASE_ADDR + 0x058)
1132 +#define TMU_TEQ_HW_PROB_CFG4 (TMU_CSR_BASE_ADDR + 0x05c)
1133 +#define TMU_TEQ_HW_PROB_CFG5 (TMU_CSR_BASE_ADDR + 0x060)
1134 +#define TMU_TEQ_HW_PROB_CFG6 (TMU_CSR_BASE_ADDR + 0x064)
1135 +#define TMU_TEQ_HW_PROB_CFG7 (TMU_CSR_BASE_ADDR + 0x068)
1136 +#define TMU_TEQ_HW_PROB_CFG8 (TMU_CSR_BASE_ADDR + 0x06c)
1137 +#define TMU_TEQ_HW_PROB_CFG9 (TMU_CSR_BASE_ADDR + 0x070)
1138 +#define TMU_TEQ_HW_PROB_CFG10 (TMU_CSR_BASE_ADDR + 0x074)
1139 +#define TMU_TEQ_HW_PROB_CFG11 (TMU_CSR_BASE_ADDR + 0x078)
1140 +#define TMU_TEQ_HW_PROB_CFG12 (TMU_CSR_BASE_ADDR + 0x07c)
1141 +#define TMU_TEQ_HW_PROB_CFG13 (TMU_CSR_BASE_ADDR + 0x080)
1142 +#define TMU_TEQ_HW_PROB_CFG14 (TMU_CSR_BASE_ADDR + 0x084)
1143 +#define TMU_TEQ_HW_PROB_CFG15 (TMU_CSR_BASE_ADDR + 0x088)
1144 +#define TMU_TEQ_HW_PROB_CFG16 (TMU_CSR_BASE_ADDR + 0x08c)
1145 +#define TMU_TEQ_HW_PROB_CFG17 (TMU_CSR_BASE_ADDR + 0x090)
1146 +#define TMU_TEQ_HW_PROB_CFG18 (TMU_CSR_BASE_ADDR + 0x094)
1147 +#define TMU_TEQ_HW_PROB_CFG19 (TMU_CSR_BASE_ADDR + 0x098)
1148 +#define TMU_TEQ_HW_PROB_CFG20 (TMU_CSR_BASE_ADDR + 0x09c)
1149 +#define TMU_TEQ_HW_PROB_CFG21 (TMU_CSR_BASE_ADDR + 0x0a0)
1150 +#define TMU_TEQ_HW_PROB_CFG22 (TMU_CSR_BASE_ADDR + 0x0a4)
1151 +#define TMU_TEQ_HW_PROB_CFG23 (TMU_CSR_BASE_ADDR + 0x0a8)
1152 +#define TMU_TEQ_HW_PROB_CFG24 (TMU_CSR_BASE_ADDR + 0x0ac)
1153 +#define TMU_TEQ_HW_PROB_CFG25 (TMU_CSR_BASE_ADDR + 0x0b0)
1154 +#define TMU_TDQ_IIFG_CFG (TMU_CSR_BASE_ADDR + 0x0b4)
1155 +/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
1156 + * This is a global Enable for all schedulers in PHY0
1157 + */
1158 +#define TMU_TDQ0_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x0b8)
1159 +
1160 +#define TMU_LLM_CTRL (TMU_CSR_BASE_ADDR + 0x0bc)
1161 +#define TMU_LLM_BASE_ADDR (TMU_CSR_BASE_ADDR + 0x0c0)
1162 +#define TMU_LLM_QUE_LEN (TMU_CSR_BASE_ADDR + 0x0c4)
1163 +#define TMU_LLM_QUE_HEADPTR (TMU_CSR_BASE_ADDR + 0x0c8)
1164 +#define TMU_LLM_QUE_TAILPTR (TMU_CSR_BASE_ADDR + 0x0cc)
1165 +#define TMU_LLM_QUE_DROPCNT (TMU_CSR_BASE_ADDR + 0x0d0)
1166 +#define TMU_INT_EN (TMU_CSR_BASE_ADDR + 0x0d4)
1167 +#define TMU_INT_SRC (TMU_CSR_BASE_ADDR + 0x0d8)
1168 +#define TMU_INQ_STAT (TMU_CSR_BASE_ADDR + 0x0dc)
1169 +#define TMU_CTRL (TMU_CSR_BASE_ADDR + 0x0e0)
1170 +
1171 +/* [31] Mem Access Command. 0 = Internal Memory Read, 1 = Internal memory
1172 + * Write [27:24] Byte Enables of the Internal memory access [23:0] Address of
1173 + * the internal memory. This address is used to access both the PM and DM of
1174 + * all the PE's
1175 + */
1176 +#define TMU_MEM_ACCESS_ADDR (TMU_CSR_BASE_ADDR + 0x0e4)
1177 +
1178 +/* Internal Memory Access Write Data */
1179 +#define TMU_MEM_ACCESS_WDATA (TMU_CSR_BASE_ADDR + 0x0e8)
1180 +/* Internal Memory Access Read Data. The commands are blocked
1181 + * at the mem_access only
1182 + */
1183 +#define TMU_MEM_ACCESS_RDATA (TMU_CSR_BASE_ADDR + 0x0ec)
1184 +
1185 +/* [31:0] PHY0 in queue address (must be initialized with one of the
1186 + * xxx_INQ_PKTPTR cbus addresses)
1187 + */
1188 +#define TMU_PHY0_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f0)
1189 +/* [31:0] PHY1 in queue address (must be initialized with one of the
1190 + * xxx_INQ_PKTPTR cbus addresses)
1191 + */
1192 +#define TMU_PHY1_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f4)
1193 +/* [31:0] PHY2 in queue address (must be initialized with one of the
1194 + * xxx_INQ_PKTPTR cbus addresses)
1195 + */
1196 +#define TMU_PHY2_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f8)
1197 +/* [31:0] PHY3 in queue address (must be initialized with one of the
1198 + * xxx_INQ_PKTPTR cbus addresses)
1199 + */
1200 +#define TMU_PHY3_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0fc)
1201 +#define TMU_BMU_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x100)
1202 +#define TMU_TX_CTRL (TMU_CSR_BASE_ADDR + 0x104)
1203 +
1204 +#define TMU_BUS_ACCESS_WDATA (TMU_CSR_BASE_ADDR + 0x108)
1205 +#define TMU_BUS_ACCESS (TMU_CSR_BASE_ADDR + 0x10c)
1206 +#define TMU_BUS_ACCESS_RDATA (TMU_CSR_BASE_ADDR + 0x110)
1207 +
1208 +#define TMU_PE_SYS_CLK_RATIO (TMU_CSR_BASE_ADDR + 0x114)
1209 +#define TMU_PE_STATUS (TMU_CSR_BASE_ADDR + 0x118)
1210 +#define TMU_TEQ_MAX_THRESHOLD (TMU_CSR_BASE_ADDR + 0x11c)
1211 +/* [31:0] PHY4 in queue address (must be initialized with one of the
1212 + * xxx_INQ_PKTPTR cbus addresses)
1213 + */
1214 +#define TMU_PHY4_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x134)
1215 +/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
1216 + * This is a global Enable for all schedulers in PHY1
1217 + */
1218 +#define TMU_TDQ1_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x138)
1219 +/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
1220 + * This is a global Enable for all schedulers in PHY2
1221 + */
1222 +#define TMU_TDQ2_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x13c)
1223 +/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
1224 + * This is a global Enable for all schedulers in PHY3
1225 + */
1226 +#define TMU_TDQ3_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x140)
1227 +#define TMU_BMU_BUF_SIZE (TMU_CSR_BASE_ADDR + 0x144)
1228 +/* [31:0] PHY5 in queue address (must be initialized with one of the
1229 + * xxx_INQ_PKTPTR cbus addresses)
1230 + */
1231 +#define TMU_PHY5_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x148)
1232 +
1233 +#define SW_RESET BIT(0) /* Global software reset */
1234 +#define INQ_RESET BIT(2)
1235 +#define TEQ_RESET BIT(3)
1236 +#define TDQ_RESET BIT(4)
1237 +#define PE_RESET BIT(5)
1238 +#define MEM_INIT BIT(6)
1239 +#define MEM_INIT_DONE BIT(7)
1240 +#define LLM_INIT BIT(8)
1241 +#define LLM_INIT_DONE BIT(9)
1242 +#define ECC_MEM_INIT_DONE BIT(10)
1243 +
1244 +struct tmu_cfg {
1245 + u32 pe_sys_clk_ratio;
1246 + unsigned long llm_base_addr;
1247 + u32 llm_queue_len;
1248 +};
1249 +
1250 +/* Not HW related for pfe_ctrl / pfe common defines */
1251 +#define DEFAULT_MAX_QDEPTH 80
1252 +#define DEFAULT_Q0_QDEPTH 511 /*We keep one large queue for host tx qos */
1253 +#define DEFAULT_TMU3_QDEPTH 127
1254 +
1255 +#endif /* _TMU_CSR_H_ */
1256 diff --git a/drivers/staging/fsl_ppfe/include/pfe/cbus/util_csr.h b/drivers/staging/fsl_ppfe/include/pfe/cbus/util_csr.h
1257 new file mode 100644
1258 index 00000000..ae623cda
1259 --- /dev/null
1260 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/util_csr.h
1261 @@ -0,0 +1,61 @@
1262 +/*
1263 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
1264 + * Copyright 2017 NXP
1265 + *
1266 + * This program is free software; you can redistribute it and/or modify
1267 + * it under the terms of the GNU General Public License as published by
1268 + * the Free Software Foundation; either version 2 of the License, or
1269 + * (at your option) any later version.
1270 + *
1271 + * This program is distributed in the hope that it will be useful,
1272 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1273 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1274 + * GNU General Public License for more details.
1275 + *
1276 + * You should have received a copy of the GNU General Public License
1277 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
1278 + */
1279 +
1280 +#ifndef _UTIL_CSR_H_
1281 +#define _UTIL_CSR_H_
1282 +
1283 +#define UTIL_VERSION (UTIL_CSR_BASE_ADDR + 0x000)
1284 +#define UTIL_TX_CTRL (UTIL_CSR_BASE_ADDR + 0x004)
1285 +#define UTIL_INQ_PKTPTR (UTIL_CSR_BASE_ADDR + 0x010)
1286 +
1287 +#define UTIL_HDR_SIZE (UTIL_CSR_BASE_ADDR + 0x014)
1288 +
1289 +#define UTIL_PE0_QB_DM_ADDR0 (UTIL_CSR_BASE_ADDR + 0x020)
1290 +#define UTIL_PE0_QB_DM_ADDR1 (UTIL_CSR_BASE_ADDR + 0x024)
1291 +#define UTIL_PE0_RO_DM_ADDR0 (UTIL_CSR_BASE_ADDR + 0x060)
1292 +#define UTIL_PE0_RO_DM_ADDR1 (UTIL_CSR_BASE_ADDR + 0x064)
1293 +
1294 +#define UTIL_MEM_ACCESS_ADDR (UTIL_CSR_BASE_ADDR + 0x100)
1295 +#define UTIL_MEM_ACCESS_WDATA (UTIL_CSR_BASE_ADDR + 0x104)
1296 +#define UTIL_MEM_ACCESS_RDATA (UTIL_CSR_BASE_ADDR + 0x108)
1297 +
1298 +#define UTIL_TM_INQ_ADDR (UTIL_CSR_BASE_ADDR + 0x114)
1299 +#define UTIL_PE_STATUS (UTIL_CSR_BASE_ADDR + 0x118)
1300 +
1301 +#define UTIL_PE_SYS_CLK_RATIO (UTIL_CSR_BASE_ADDR + 0x200)
1302 +#define UTIL_AFULL_THRES (UTIL_CSR_BASE_ADDR + 0x204)
1303 +#define UTIL_GAP_BETWEEN_READS (UTIL_CSR_BASE_ADDR + 0x208)
1304 +#define UTIL_MAX_BUF_CNT (UTIL_CSR_BASE_ADDR + 0x20c)
1305 +#define UTIL_TSQ_FIFO_THRES (UTIL_CSR_BASE_ADDR + 0x210)
1306 +#define UTIL_TSQ_MAX_CNT (UTIL_CSR_BASE_ADDR + 0x214)
1307 +#define UTIL_IRAM_DATA_0 (UTIL_CSR_BASE_ADDR + 0x218)
1308 +#define UTIL_IRAM_DATA_1 (UTIL_CSR_BASE_ADDR + 0x21c)
1309 +#define UTIL_IRAM_DATA_2 (UTIL_CSR_BASE_ADDR + 0x220)
1310 +#define UTIL_IRAM_DATA_3 (UTIL_CSR_BASE_ADDR + 0x224)
1311 +
1312 +#define UTIL_BUS_ACCESS_ADDR (UTIL_CSR_BASE_ADDR + 0x228)
1313 +#define UTIL_BUS_ACCESS_WDATA (UTIL_CSR_BASE_ADDR + 0x22c)
1314 +#define UTIL_BUS_ACCESS_RDATA (UTIL_CSR_BASE_ADDR + 0x230)
1315 +
1316 +#define UTIL_INQ_AFULL_THRES (UTIL_CSR_BASE_ADDR + 0x234)
1317 +
1318 +struct util_cfg {
1319 + u32 pe_sys_clk_ratio;
1320 +};
1321 +
1322 +#endif /* _UTIL_CSR_H_ */
1323 diff --git a/drivers/staging/fsl_ppfe/include/pfe/pfe.h b/drivers/staging/fsl_ppfe/include/pfe/pfe.h
1324 new file mode 100644
1325 index 00000000..d93ae4c6
1326 --- /dev/null
1327 +++ b/drivers/staging/fsl_ppfe/include/pfe/pfe.h
1328 @@ -0,0 +1,372 @@
1329 +/*
1330 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
1331 + * Copyright 2017 NXP
1332 + *
1333 + * This program is free software; you can redistribute it and/or modify
1334 + * it under the terms of the GNU General Public License as published by
1335 + * the Free Software Foundation; either version 2 of the License, or
1336 + * (at your option) any later version.
1337 + *
1338 + * This program is distributed in the hope that it will be useful,
1339 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1340 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1341 + * GNU General Public License for more details.
1342 + *
1343 + * You should have received a copy of the GNU General Public License
1344 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
1345 + */
1346 +
1347 +#ifndef _PFE_H_
1348 +#define _PFE_H_
1349 +
1350 +#include "cbus.h"
1351 +
1352 +#define CLASS_DMEM_BASE_ADDR(i) (0x00000000 | ((i) << 20))
1353 +/*
1354 + * Only valid for mem access register interface
1355 + */
1356 +#define CLASS_IMEM_BASE_ADDR(i) (0x00000000 | ((i) << 20))
1357 +#define CLASS_DMEM_SIZE 0x00002000
1358 +#define CLASS_IMEM_SIZE 0x00008000
1359 +
1360 +#define TMU_DMEM_BASE_ADDR(i) (0x00000000 + ((i) << 20))
1361 +/*
1362 + * Only valid for mem access register interface
1363 + */
1364 +#define TMU_IMEM_BASE_ADDR(i) (0x00000000 + ((i) << 20))
1365 +#define TMU_DMEM_SIZE 0x00000800
1366 +#define TMU_IMEM_SIZE 0x00002000
1367 +
1368 +#define UTIL_DMEM_BASE_ADDR 0x00000000
1369 +#define UTIL_DMEM_SIZE 0x00002000
1370 +
1371 +#define PE_LMEM_BASE_ADDR 0xc3010000
1372 +#define PE_LMEM_SIZE 0x8000
1373 +#define PE_LMEM_END (PE_LMEM_BASE_ADDR + PE_LMEM_SIZE)
1374 +
1375 +#define DMEM_BASE_ADDR 0x00000000
1376 +#define DMEM_SIZE 0x2000 /* TMU has less... */
1377 +#define DMEM_END (DMEM_BASE_ADDR + DMEM_SIZE)
1378 +
1379 +#define PMEM_BASE_ADDR 0x00010000
1380 +#define PMEM_SIZE 0x8000 /* TMU has less... */
1381 +#define PMEM_END (PMEM_BASE_ADDR + PMEM_SIZE)
1382 +
1383 +/* These check memory ranges from PE point of view/memory map */
1384 +#define IS_DMEM(addr, len) \
1385 + ({ typeof(addr) addr_ = (addr); \
1386 + ((unsigned long)(addr_) >= DMEM_BASE_ADDR) && \
1387 + (((unsigned long)(addr_) + (len)) <= DMEM_END); })
1388 +
1389 +#define IS_PMEM(addr, len) \
1390 + ({ typeof(addr) addr_ = (addr); \
1391 + ((unsigned long)(addr_) >= PMEM_BASE_ADDR) && \
1392 + (((unsigned long)(addr_) + (len)) <= PMEM_END); })
1393 +
1394 +#define IS_PE_LMEM(addr, len) \
1395 + ({ typeof(addr) addr_ = (addr); \
1396 + ((unsigned long)(addr_) >= \
1397 + PE_LMEM_BASE_ADDR) && \
1398 + (((unsigned long)(addr_) + \
1399 + (len)) <= PE_LMEM_END); })
1400 +
1401 +#define IS_PFE_LMEM(addr, len) \
1402 + ({ typeof(addr) addr_ = (addr); \
1403 + ((unsigned long)(addr_) >= \
1404 + CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR)) && \
1405 + (((unsigned long)(addr_) + (len)) <= \
1406 + CBUS_VIRT_TO_PFE(LMEM_END)); })
1407 +
1408 +#define __IS_PHYS_DDR(addr, len) \
1409 + ({ typeof(addr) addr_ = (addr); \
1410 + ((unsigned long)(addr_) >= \
1411 + DDR_PHYS_BASE_ADDR) && \
1412 + (((unsigned long)(addr_) + (len)) <= \
1413 + DDR_PHYS_END); })
1414 +
1415 +#define IS_PHYS_DDR(addr, len) __IS_PHYS_DDR(DDR_PFE_TO_PHYS(addr), len)
1416 +
1417 +/*
1418 + * If using a run-time virtual address for the cbus base address use this code
1419 + */
1420 +extern void *cbus_base_addr;
1421 +extern void *ddr_base_addr;
1422 +extern unsigned long ddr_phys_base_addr;
1423 +extern unsigned int ddr_size;
1424 +
1425 +#define CBUS_BASE_ADDR cbus_base_addr
1426 +#define DDR_PHYS_BASE_ADDR ddr_phys_base_addr
1427 +#define DDR_BASE_ADDR ddr_base_addr
1428 +#define DDR_SIZE ddr_size
1429 +
1430 +#define DDR_PHYS_END (DDR_PHYS_BASE_ADDR + DDR_SIZE)
1431 +
1432 +#define LS1012A_PFE_RESET_WA /*
1433 + * PFE doesn't have global reset and re-init
1434 + * should takecare few things to make PFE
1435 + * functional after reset
1436 + */
1437 +#define PFE_CBUS_PHYS_BASE_ADDR 0xc0000000 /* CBUS physical base address
1438 + * as seen by PE's.
1439 + */
1440 +/* CBUS physical base address as seen by PE's. */
1441 +#define PFE_CBUS_PHYS_BASE_ADDR_FROM_PFE 0xc0000000
1442 +
1443 +#define DDR_PHYS_TO_PFE(p) (((unsigned long int)(p)) & 0x7FFFFFFF)
1444 +#define DDR_PFE_TO_PHYS(p) (((unsigned long int)(p)) | 0x80000000)
1445 +#define CBUS_PHYS_TO_PFE(p) (((p) - PFE_CBUS_PHYS_BASE_ADDR) + \
1446 + PFE_CBUS_PHYS_BASE_ADDR_FROM_PFE)
1447 +/* Translates to PFE address map */
1448 +
1449 +#define DDR_PHYS_TO_VIRT(p) (((p) - DDR_PHYS_BASE_ADDR) + DDR_BASE_ADDR)
1450 +#define DDR_VIRT_TO_PHYS(v) (((v) - DDR_BASE_ADDR) + DDR_PHYS_BASE_ADDR)
1451 +#define DDR_VIRT_TO_PFE(p) (DDR_PHYS_TO_PFE(DDR_VIRT_TO_PHYS(p)))
1452 +
1453 +#define CBUS_VIRT_TO_PFE(v) (((v) - CBUS_BASE_ADDR) + \
1454 + PFE_CBUS_PHYS_BASE_ADDR)
1455 +#define CBUS_PFE_TO_VIRT(p) (((unsigned long int)(p) - \
1456 + PFE_CBUS_PHYS_BASE_ADDR) + CBUS_BASE_ADDR)
1457 +
1458 +/* The below part of the code is used in QOS control driver from host */
1459 +#define TMU_APB_BASE_ADDR 0xc1000000 /* TMU base address seen by
1460 + * pe's
1461 + */
1462 +
1463 +enum {
1464 + CLASS0_ID = 0,
1465 + CLASS1_ID,
1466 + CLASS2_ID,
1467 + CLASS3_ID,
1468 + CLASS4_ID,
1469 + CLASS5_ID,
1470 + TMU0_ID,
1471 + TMU1_ID,
1472 + TMU2_ID,
1473 + TMU3_ID,
1474 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
1475 + UTIL_ID,
1476 +#endif
1477 + MAX_PE
1478 +};
1479 +
1480 +#define CLASS_MASK (BIT(CLASS0_ID) | BIT(CLASS1_ID) |\
1481 + BIT(CLASS2_ID) | BIT(CLASS3_ID) |\
1482 + BIT(CLASS4_ID) | BIT(CLASS5_ID))
1483 +#define CLASS_MAX_ID CLASS5_ID
1484 +
1485 +#define TMU_MASK (BIT(TMU0_ID) | BIT(TMU1_ID) |\
1486 + BIT(TMU3_ID))
1487 +
1488 +#define TMU_MAX_ID TMU3_ID
1489 +
1490 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
1491 +#define UTIL_MASK BIT(UTIL_ID)
1492 +#endif
1493 +
1494 +struct pe_status {
1495 + u32 cpu_state;
1496 + u32 activity_counter;
1497 + u32 rx;
1498 + union {
1499 + u32 tx;
1500 + u32 tmu_qstatus;
1501 + };
1502 + u32 drop;
1503 +#if defined(CFG_PE_DEBUG)
1504 + u32 debug_indicator;
1505 + u32 debug[16];
1506 +#endif
1507 +} __aligned(16);
1508 +
1509 +struct pe_sync_mailbox {
1510 + u32 stop;
1511 + u32 stopped;
1512 +};
1513 +
1514 +/* Drop counter definitions */
1515 +
1516 +#define CLASS_NUM_DROP_COUNTERS 13
1517 +#define UTIL_NUM_DROP_COUNTERS 8
1518 +
1519 +/* PE information.
1520 + * Structure containing PE's specific information. It is used to create
1521 + * generic C functions common to all PE's.
1522 + * Before using the library functions this structure needs to be initialized
1523 + * with the different registers virtual addresses
1524 + * (according to the ARM MMU mmaping). The default initialization supports a
1525 + * virtual == physical mapping.
1526 + */
1527 +struct pe_info {
1528 + u32 dmem_base_addr; /* PE's dmem base address */
1529 + u32 pmem_base_addr; /* PE's pmem base address */
1530 + u32 pmem_size; /* PE's pmem size */
1531 +
1532 + void *mem_access_wdata; /* PE's _MEM_ACCESS_WDATA register
1533 + * address
1534 + */
1535 + void *mem_access_addr; /* PE's _MEM_ACCESS_ADDR register
1536 + * address
1537 + */
1538 + void *mem_access_rdata; /* PE's _MEM_ACCESS_RDATA register
1539 + * address
1540 + */
1541 +};
1542 +
1543 +void pe_lmem_read(u32 *dst, u32 len, u32 offset);
1544 +void pe_lmem_write(u32 *src, u32 len, u32 offset);
1545 +
1546 +void pe_dmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len);
1547 +void pe_pmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len);
1548 +
1549 +u32 pe_pmem_read(int id, u32 addr, u8 size);
1550 +
1551 +void pe_dmem_write(int id, u32 val, u32 addr, u8 size);
1552 +u32 pe_dmem_read(int id, u32 addr, u8 size);
1553 +void class_pe_lmem_memcpy_to32(u32 dst, const void *src, unsigned int len);
1554 +void class_pe_lmem_memset(u32 dst, int val, unsigned int len);
1555 +void class_bus_write(u32 val, u32 addr, u8 size);
1556 +u32 class_bus_read(u32 addr, u8 size);
1557 +
1558 +#define class_bus_readl(addr) class_bus_read(addr, 4)
1559 +#define class_bus_readw(addr) class_bus_read(addr, 2)
1560 +#define class_bus_readb(addr) class_bus_read(addr, 1)
1561 +
1562 +#define class_bus_writel(val, addr) class_bus_write(val, addr, 4)
1563 +#define class_bus_writew(val, addr) class_bus_write(val, addr, 2)
1564 +#define class_bus_writeb(val, addr) class_bus_write(val, addr, 1)
1565 +
1566 +#define pe_dmem_readl(id, addr) pe_dmem_read(id, addr, 4)
1567 +#define pe_dmem_readw(id, addr) pe_dmem_read(id, addr, 2)
1568 +#define pe_dmem_readb(id, addr) pe_dmem_read(id, addr, 1)
1569 +
1570 +#define pe_dmem_writel(id, val, addr) pe_dmem_write(id, val, addr, 4)
1571 +#define pe_dmem_writew(id, val, addr) pe_dmem_write(id, val, addr, 2)
1572 +#define pe_dmem_writeb(id, val, addr) pe_dmem_write(id, val, addr, 1)
1573 +
1574 +/*int pe_load_elf_section(int id, const void *data, elf32_shdr *shdr); */
1575 +int pe_load_elf_section(int id, const void *data, struct elf32_shdr *shdr,
1576 + struct device *dev);
1577 +
1578 +void pfe_lib_init(void *cbus_base, void *ddr_base, unsigned long ddr_phys_base,
1579 + unsigned int ddr_size);
1580 +void bmu_init(void *base, struct BMU_CFG *cfg);
1581 +void bmu_reset(void *base);
1582 +void bmu_enable(void *base);
1583 +void bmu_disable(void *base);
1584 +void bmu_set_config(void *base, struct BMU_CFG *cfg);
1585 +
1586 +/*
1587 + * An enumerated type for loopback values. This can be one of three values, no
1588 + * loopback -normal operation, local loopback with internal loopback module of
1589 + * MAC or PHY loopback which is through the external PHY.
1590 + */
1591 +#ifndef __MAC_LOOP_ENUM__
1592 +#define __MAC_LOOP_ENUM__
1593 +enum mac_loop {LB_NONE, LB_EXT, LB_LOCAL};
1594 +#endif
1595 +
1596 +void gemac_init(void *base, void *config);
1597 +void gemac_disable_rx_checksum_offload(void *base);
1598 +void gemac_enable_rx_checksum_offload(void *base);
1599 +void gemac_set_mdc_div(void *base, int mdc_div);
1600 +void gemac_set_speed(void *base, enum mac_speed gem_speed);
1601 +void gemac_set_duplex(void *base, int duplex);
1602 +void gemac_set_mode(void *base, int mode);
1603 +void gemac_enable(void *base);
1604 +void gemac_tx_disable(void *base);
1605 +void gemac_tx_enable(void *base);
1606 +void gemac_disable(void *base);
1607 +void gemac_reset(void *base);
1608 +void gemac_set_address(void *base, struct spec_addr *addr);
1609 +struct spec_addr gemac_get_address(void *base);
1610 +void gemac_set_loop(void *base, enum mac_loop gem_loop);
1611 +void gemac_set_laddr1(void *base, struct pfe_mac_addr *address);
1612 +void gemac_set_laddr2(void *base, struct pfe_mac_addr *address);
1613 +void gemac_set_laddr3(void *base, struct pfe_mac_addr *address);
1614 +void gemac_set_laddr4(void *base, struct pfe_mac_addr *address);
1615 +void gemac_set_laddrN(void *base, struct pfe_mac_addr *address,
1616 + unsigned int entry_index);
1617 +void gemac_clear_laddr1(void *base);
1618 +void gemac_clear_laddr2(void *base);
1619 +void gemac_clear_laddr3(void *base);
1620 +void gemac_clear_laddr4(void *base);
1621 +void gemac_clear_laddrN(void *base, unsigned int entry_index);
1622 +struct pfe_mac_addr gemac_get_hash(void *base);
1623 +void gemac_set_hash(void *base, struct pfe_mac_addr *hash);
1624 +struct pfe_mac_addr gem_get_laddr1(void *base);
1625 +struct pfe_mac_addr gem_get_laddr2(void *base);
1626 +struct pfe_mac_addr gem_get_laddr3(void *base);
1627 +struct pfe_mac_addr gem_get_laddr4(void *base);
1628 +struct pfe_mac_addr gem_get_laddrN(void *base, unsigned int entry_index);
1629 +void gemac_set_config(void *base, struct gemac_cfg *cfg);
1630 +void gemac_allow_broadcast(void *base);
1631 +void gemac_no_broadcast(void *base);
1632 +void gemac_enable_1536_rx(void *base);
1633 +void gemac_disable_1536_rx(void *base);
1634 +void gemac_enable_rx_jmb(void *base);
1635 +void gemac_disable_rx_jmb(void *base);
1636 +void gemac_enable_stacked_vlan(void *base);
1637 +void gemac_disable_stacked_vlan(void *base);
1638 +void gemac_enable_pause_rx(void *base);
1639 +void gemac_disable_pause_rx(void *base);
1640 +void gemac_enable_copy_all(void *base);
1641 +void gemac_disable_copy_all(void *base);
1642 +void gemac_set_bus_width(void *base, int width);
1643 +void gemac_set_wol(void *base, u32 wol_conf);
1644 +
1645 +void gpi_init(void *base, struct gpi_cfg *cfg);
1646 +void gpi_reset(void *base);
1647 +void gpi_enable(void *base);
1648 +void gpi_disable(void *base);
1649 +void gpi_set_config(void *base, struct gpi_cfg *cfg);
1650 +
1651 +void class_init(struct class_cfg *cfg);
1652 +void class_reset(void);
1653 +void class_enable(void);
1654 +void class_disable(void);
1655 +void class_set_config(struct class_cfg *cfg);
1656 +
1657 +void tmu_reset(void);
1658 +void tmu_init(struct tmu_cfg *cfg);
1659 +void tmu_enable(u32 pe_mask);
1660 +void tmu_disable(u32 pe_mask);
1661 +u32 tmu_qstatus(u32 if_id);
1662 +u32 tmu_pkts_processed(u32 if_id);
1663 +
1664 +void util_init(struct util_cfg *cfg);
1665 +void util_reset(void);
1666 +void util_enable(void);
1667 +void util_disable(void);
1668 +
1669 +void hif_init(void);
1670 +void hif_tx_enable(void);
1671 +void hif_tx_disable(void);
1672 +void hif_rx_enable(void);
1673 +void hif_rx_disable(void);
1674 +
1675 +/* Get Chip Revision level
1676 + *
1677 + */
1678 +static inline unsigned int CHIP_REVISION(void)
1679 +{
1680 + /*For LS1012A return always 1 */
1681 + return 1;
1682 +}
1683 +
1684 +/* Start HIF rx DMA
1685 + *
1686 + */
1687 +static inline void hif_rx_dma_start(void)
1688 +{
1689 + writel(HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB, HIF_RX_CTRL);
1690 +}
1691 +
1692 +/* Start HIF tx DMA
1693 + *
1694 + */
1695 +static inline void hif_tx_dma_start(void)
1696 +{
1697 + writel(HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB, HIF_TX_CTRL);
1698 +}
1699 +
1700 +#endif /* _PFE_H_ */
1701 diff --git a/drivers/staging/fsl_ppfe/pfe_ctrl.c b/drivers/staging/fsl_ppfe/pfe_ctrl.c
1702 new file mode 100644
1703 index 00000000..dfa8547c
1704 --- /dev/null
1705 +++ b/drivers/staging/fsl_ppfe/pfe_ctrl.c
1706 @@ -0,0 +1,238 @@
1707 +/*
1708 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
1709 + * Copyright 2017 NXP
1710 + *
1711 + * This program is free software; you can redistribute it and/or modify
1712 + * it under the terms of the GNU General Public License as published by
1713 + * the Free Software Foundation; either version 2 of the License, or
1714 + * (at your option) any later version.
1715 + *
1716 + * This program is distributed in the hope that it will be useful,
1717 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1718 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1719 + * GNU General Public License for more details.
1720 + *
1721 + * You should have received a copy of the GNU General Public License
1722 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
1723 + */
1724 +
1725 +#include <linux/kernel.h>
1726 +#include <linux/sched.h>
1727 +#include <linux/module.h>
1728 +#include <linux/list.h>
1729 +#include <linux/kthread.h>
1730 +
1731 +#include "pfe_mod.h"
1732 +#include "pfe_ctrl.h"
1733 +
1734 +#define TIMEOUT_MS 1000
1735 +
1736 +int relax(unsigned long end)
1737 +{
1738 + if (time_after(jiffies, end)) {
1739 + if (time_after(jiffies, end + (TIMEOUT_MS * HZ) / 1000))
1740 + return -1;
1741 +
1742 + if (need_resched())
1743 + schedule();
1744 + }
1745 +
1746 + return 0;
1747 +}
1748 +
1749 +void pfe_ctrl_suspend(struct pfe_ctrl *ctrl)
1750 +{
1751 + int id;
1752 +
1753 + mutex_lock(&ctrl->mutex);
1754 +
1755 + for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++)
1756 + pe_dmem_write(id, cpu_to_be32(0x1), CLASS_DM_RESUME, 4);
1757 +
1758 + for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
1759 + if (id == TMU2_ID)
1760 + continue;
1761 + pe_dmem_write(id, cpu_to_be32(0x1), TMU_DM_RESUME, 4);
1762 + }
1763 +
1764 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
1765 + pe_dmem_write(UTIL_ID, cpu_to_be32(0x1), UTIL_DM_RESUME, 4);
1766 +#endif
1767 + mutex_unlock(&ctrl->mutex);
1768 +}
1769 +
1770 +void pfe_ctrl_resume(struct pfe_ctrl *ctrl)
1771 +{
1772 + int pe_mask = CLASS_MASK | TMU_MASK;
1773 +
1774 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
1775 + pe_mask |= UTIL_MASK;
1776 +#endif
1777 + mutex_lock(&ctrl->mutex);
1778 + pe_start(&pfe->ctrl, pe_mask);
1779 + mutex_unlock(&ctrl->mutex);
1780 +}
1781 +
1782 +/* PE sync stop.
1783 + * Stops packet processing for a list of PE's (specified using a bitmask).
1784 + * The caller must hold ctrl->mutex.
1785 + *
1786 + * @param ctrl Control context
1787 + * @param pe_mask Mask of PE id's to stop
1788 + *
1789 + */
1790 +int pe_sync_stop(struct pfe_ctrl *ctrl, int pe_mask)
1791 +{
1792 + struct pe_sync_mailbox *mbox;
1793 + int pe_stopped = 0;
1794 + unsigned long end = jiffies + 2;
1795 + int i;
1796 +
1797 + pe_mask &= 0x2FF; /*Exclude Util + TMU2 */
1798 +
1799 + for (i = 0; i < MAX_PE; i++)
1800 + if (pe_mask & (1 << i)) {
1801 + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
1802 +
1803 + pe_dmem_write(i, cpu_to_be32(0x1), (unsigned
1804 + long)&mbox->stop, 4);
1805 + }
1806 +
1807 + while (pe_stopped != pe_mask) {
1808 + for (i = 0; i < MAX_PE; i++)
1809 + if ((pe_mask & (1 << i)) && !(pe_stopped & (1 << i))) {
1810 + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
1811 +
1812 + if (pe_dmem_read(i, (unsigned
1813 + long)&mbox->stopped, 4) &
1814 + cpu_to_be32(0x1))
1815 + pe_stopped |= (1 << i);
1816 + }
1817 +
1818 + if (relax(end) < 0)
1819 + goto err;
1820 + }
1821 +
1822 + return 0;
1823 +
1824 +err:
1825 + pr_err("%s: timeout, %x %x\n", __func__, pe_mask, pe_stopped);
1826 +
1827 + for (i = 0; i < MAX_PE; i++)
1828 + if (pe_mask & (1 << i)) {
1829 + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
1830 +
1831 + pe_dmem_write(i, cpu_to_be32(0x0), (unsigned
1832 + long)&mbox->stop, 4);
1833 + }
1834 +
1835 + return -EIO;
1836 +}
1837 +
1838 +/* PE start.
1839 + * Starts packet processing for a list of PE's (specified using a bitmask).
1840 + * The caller must hold ctrl->mutex.
1841 + *
1842 + * @param ctrl Control context
1843 + * @param pe_mask Mask of PE id's to start
1844 + *
1845 + */
1846 +void pe_start(struct pfe_ctrl *ctrl, int pe_mask)
1847 +{
1848 + struct pe_sync_mailbox *mbox;
1849 + int i;
1850 +
1851 + for (i = 0; i < MAX_PE; i++)
1852 + if (pe_mask & (1 << i)) {
1853 + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
1854 +
1855 + pe_dmem_write(i, cpu_to_be32(0x0), (unsigned
1856 + long)&mbox->stop, 4);
1857 + }
1858 +}
1859 +
1860 +/* This function will ensure all PEs are put in to idle state */
1861 +int pe_reset_all(struct pfe_ctrl *ctrl)
1862 +{
1863 + struct pe_sync_mailbox *mbox;
1864 + int pe_stopped = 0;
1865 + unsigned long end = jiffies + 2;
1866 + int i;
1867 + int pe_mask = CLASS_MASK | TMU_MASK;
1868 +
1869 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
1870 + pe_mask |= UTIL_MASK;
1871 +#endif
1872 +
1873 + for (i = 0; i < MAX_PE; i++)
1874 + if (pe_mask & (1 << i)) {
1875 + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
1876 +
1877 + pe_dmem_write(i, cpu_to_be32(0x2), (unsigned
1878 + long)&mbox->stop, 4);
1879 + }
1880 +
1881 + while (pe_stopped != pe_mask) {
1882 + for (i = 0; i < MAX_PE; i++)
1883 + if ((pe_mask & (1 << i)) && !(pe_stopped & (1 << i))) {
1884 + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
1885 +
1886 + if (pe_dmem_read(i, (unsigned long)
1887 + &mbox->stopped, 4) &
1888 + cpu_to_be32(0x1))
1889 + pe_stopped |= (1 << i);
1890 + }
1891 +
1892 + if (relax(end) < 0)
1893 + goto err;
1894 + }
1895 +
1896 + return 0;
1897 +
1898 +err:
1899 + pr_err("%s: timeout, %x %x\n", __func__, pe_mask, pe_stopped);
1900 + return -EIO;
1901 +}
1902 +
1903 +int pfe_ctrl_init(struct pfe *pfe)
1904 +{
1905 + struct pfe_ctrl *ctrl = &pfe->ctrl;
1906 + int id;
1907 +
1908 + pr_info("%s\n", __func__);
1909 +
1910 + mutex_init(&ctrl->mutex);
1911 + spin_lock_init(&ctrl->lock);
1912 +
1913 + for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
1914 + ctrl->sync_mailbox_baseaddr[id] = CLASS_DM_SYNC_MBOX;
1915 + ctrl->msg_mailbox_baseaddr[id] = CLASS_DM_MSG_MBOX;
1916 + }
1917 +
1918 + for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
1919 + if (id == TMU2_ID)
1920 + continue;
1921 + ctrl->sync_mailbox_baseaddr[id] = TMU_DM_SYNC_MBOX;
1922 + ctrl->msg_mailbox_baseaddr[id] = TMU_DM_MSG_MBOX;
1923 + }
1924 +
1925 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
1926 + ctrl->sync_mailbox_baseaddr[UTIL_ID] = UTIL_DM_SYNC_MBOX;
1927 + ctrl->msg_mailbox_baseaddr[UTIL_ID] = UTIL_DM_MSG_MBOX;
1928 +#endif
1929 +
1930 + ctrl->hash_array_baseaddr = pfe->ddr_baseaddr + ROUTE_TABLE_BASEADDR;
1931 + ctrl->hash_array_phys_baseaddr = pfe->ddr_phys_baseaddr +
1932 + ROUTE_TABLE_BASEADDR;
1933 +
1934 + ctrl->dev = pfe->dev;
1935 +
1936 + pr_info("%s finished\n", __func__);
1937 +
1938 + return 0;
1939 +}
1940 +
1941 +void pfe_ctrl_exit(struct pfe *pfe)
1942 +{
1943 + pr_info("%s\n", __func__);
1944 +}
1945 diff --git a/drivers/staging/fsl_ppfe/pfe_ctrl.h b/drivers/staging/fsl_ppfe/pfe_ctrl.h
1946 new file mode 100644
1947 index 00000000..22115c76
1948 --- /dev/null
1949 +++ b/drivers/staging/fsl_ppfe/pfe_ctrl.h
1950 @@ -0,0 +1,112 @@
1951 +/*
1952 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
1953 + * Copyright 2017 NXP
1954 + *
1955 + * This program is free software; you can redistribute it and/or modify
1956 + * it under the terms of the GNU General Public License as published by
1957 + * the Free Software Foundation; either version 2 of the License, or
1958 + * (at your option) any later version.
1959 + *
1960 + * This program is distributed in the hope that it will be useful,
1961 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1962 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1963 + * GNU General Public License for more details.
1964 + *
1965 + * You should have received a copy of the GNU General Public License
1966 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
1967 + */
1968 +
1969 +#ifndef _PFE_CTRL_H_
1970 +#define _PFE_CTRL_H_
1971 +
1972 +#include <linux/dmapool.h>
1973 +
1974 +#include "pfe_mod.h"
1975 +#include "pfe/pfe.h"
1976 +
1977 +#define DMA_BUF_SIZE_128 0x80 /* enough for 1 conntracks */
1978 +#define DMA_BUF_SIZE_256 0x100
1979 +/* enough for 2 conntracks, 1 bridge entry or 1 multicast entry */
1980 +#define DMA_BUF_SIZE_512 0x200
1981 +/* 512bytes dma allocated buffers used by rtp relay feature */
1982 +#define DMA_BUF_MIN_ALIGNMENT 8
1983 +#define DMA_BUF_BOUNDARY (4 * 1024)
1984 +/* bursts can not cross 4k boundary */
1985 +
1986 +#define CMD_TX_ENABLE 0x0501
1987 +#define CMD_TX_DISABLE 0x0502
1988 +
1989 +#define CMD_RX_LRO 0x0011
1990 +#define CMD_PKTCAP_ENABLE 0x0d01
1991 +#define CMD_QM_EXPT_RATE 0x020c
1992 +
1993 +#define CLASS_DM_SH_STATIC (0x800)
1994 +#define CLASS_DM_CPU_TICKS (CLASS_DM_SH_STATIC)
1995 +#define CLASS_DM_SYNC_MBOX (0x808)
1996 +#define CLASS_DM_MSG_MBOX (0x810)
1997 +#define CLASS_DM_DROP_CNTR (0x820)
1998 +#define CLASS_DM_RESUME (0x854)
1999 +#define CLASS_DM_PESTATUS (0x860)
2000 +
2001 +#define TMU_DM_SH_STATIC (0x80)
2002 +#define TMU_DM_CPU_TICKS (TMU_DM_SH_STATIC)
2003 +#define TMU_DM_SYNC_MBOX (0x88)
2004 +#define TMU_DM_MSG_MBOX (0x90)
2005 +#define TMU_DM_RESUME (0xA0)
2006 +#define TMU_DM_PESTATUS (0xB0)
2007 +#define TMU_DM_CONTEXT (0x300)
2008 +#define TMU_DM_TX_TRANS (0x480)
2009 +
2010 +#define UTIL_DM_SH_STATIC (0x0)
2011 +#define UTIL_DM_CPU_TICKS (UTIL_DM_SH_STATIC)
2012 +#define UTIL_DM_SYNC_MBOX (0x8)
2013 +#define UTIL_DM_MSG_MBOX (0x10)
2014 +#define UTIL_DM_DROP_CNTR (0x20)
2015 +#define UTIL_DM_RESUME (0x40)
2016 +#define UTIL_DM_PESTATUS (0x50)
2017 +
2018 +struct pfe_ctrl {
2019 + struct mutex mutex; /* to serialize pfe control access */
2020 + spinlock_t lock;
2021 +
2022 + void *dma_pool;
2023 + void *dma_pool_512;
2024 + void *dma_pool_128;
2025 +
2026 + struct device *dev;
2027 +
2028 + void *hash_array_baseaddr; /*
2029 + * Virtual base address of
2030 + * the conntrack hash array
2031 + */
2032 + unsigned long hash_array_phys_baseaddr; /*
2033 + * Physical base address of
2034 + * the conntrack hash array
2035 + */
2036 +
2037 + int (*event_cb)(u16, u16, u16*);
2038 +
2039 + unsigned long sync_mailbox_baseaddr[MAX_PE]; /*
2040 + * Sync mailbox PFE
2041 + * internal address,
2042 + * initialized
2043 + * when parsing elf images
2044 + */
2045 + unsigned long msg_mailbox_baseaddr[MAX_PE]; /*
2046 + * Msg mailbox PFE internal
2047 + * address, initialized
2048 + * when parsing elf images
2049 + */
2050 + unsigned int sys_clk; /* AXI clock value, in KHz */
2051 +};
2052 +
2053 +int pfe_ctrl_init(struct pfe *pfe);
2054 +void pfe_ctrl_exit(struct pfe *pfe);
2055 +int pe_sync_stop(struct pfe_ctrl *ctrl, int pe_mask);
2056 +void pe_start(struct pfe_ctrl *ctrl, int pe_mask);
2057 +int pe_reset_all(struct pfe_ctrl *ctrl);
2058 +void pfe_ctrl_suspend(struct pfe_ctrl *ctrl);
2059 +void pfe_ctrl_resume(struct pfe_ctrl *ctrl);
2060 +int relax(unsigned long end);
2061 +
2062 +#endif /* _PFE_CTRL_H_ */
2063 diff --git a/drivers/staging/fsl_ppfe/pfe_debugfs.c b/drivers/staging/fsl_ppfe/pfe_debugfs.c
2064 new file mode 100644
2065 index 00000000..4156610d
2066 --- /dev/null
2067 +++ b/drivers/staging/fsl_ppfe/pfe_debugfs.c
2068 @@ -0,0 +1,111 @@
2069 +/*
2070 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
2071 + * Copyright 2017 NXP
2072 + *
2073 + * This program is free software; you can redistribute it and/or modify
2074 + * it under the terms of the GNU General Public License as published by
2075 + * the Free Software Foundation; either version 2 of the License, or
2076 + * (at your option) any later version.
2077 + *
2078 + * This program is distributed in the hope that it will be useful,
2079 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
2080 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2081 + * GNU General Public License for more details.
2082 + *
2083 + * You should have received a copy of the GNU General Public License
2084 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
2085 + */
2086 +
2087 +#include <linux/module.h>
2088 +#include <linux/debugfs.h>
2089 +#include <linux/platform_device.h>
2090 +
2091 +#include "pfe_mod.h"
2092 +
2093 +static int dmem_show(struct seq_file *s, void *unused)
2094 +{
2095 + u32 dmem_addr, val;
2096 + int id = (long int)s->private;
2097 + int i;
2098 +
2099 + for (dmem_addr = 0; dmem_addr < CLASS_DMEM_SIZE; dmem_addr += 8 * 4) {
2100 + seq_printf(s, "%04x:", dmem_addr);
2101 +
2102 + for (i = 0; i < 8; i++) {
2103 + val = pe_dmem_read(id, dmem_addr + i * 4, 4);
2104 + seq_printf(s, " %02x %02x %02x %02x", val & 0xff,
2105 + (val >> 8) & 0xff, (val >> 16) & 0xff,
2106 + (val >> 24) & 0xff);
2107 + }
2108 +
2109 + seq_puts(s, "\n");
2110 + }
2111 +
2112 + return 0;
2113 +}
2114 +
2115 +static int dmem_open(struct inode *inode, struct file *file)
2116 +{
2117 + return single_open(file, dmem_show, inode->i_private);
2118 +}
2119 +
2120 +static const struct file_operations dmem_fops = {
2121 + .open = dmem_open,
2122 + .read = seq_read,
2123 + .llseek = seq_lseek,
2124 + .release = single_release,
2125 +};
2126 +
2127 +int pfe_debugfs_init(struct pfe *pfe)
2128 +{
2129 + struct dentry *d;
2130 +
2131 + pr_info("%s\n", __func__);
2132 +
2133 + pfe->dentry = debugfs_create_dir("pfe", NULL);
2134 + if (IS_ERR_OR_NULL(pfe->dentry))
2135 + goto err_dir;
2136 +
2137 + d = debugfs_create_file("pe0_dmem", 0444, pfe->dentry, (void *)0,
2138 + &dmem_fops);
2139 + if (IS_ERR_OR_NULL(d))
2140 + goto err_pe;
2141 +
2142 + d = debugfs_create_file("pe1_dmem", 0444, pfe->dentry, (void *)1,
2143 + &dmem_fops);
2144 + if (IS_ERR_OR_NULL(d))
2145 + goto err_pe;
2146 +
2147 + d = debugfs_create_file("pe2_dmem", 0444, pfe->dentry, (void *)2,
2148 + &dmem_fops);
2149 + if (IS_ERR_OR_NULL(d))
2150 + goto err_pe;
2151 +
2152 + d = debugfs_create_file("pe3_dmem", 0444, pfe->dentry, (void *)3,
2153 + &dmem_fops);
2154 + if (IS_ERR_OR_NULL(d))
2155 + goto err_pe;
2156 +
2157 + d = debugfs_create_file("pe4_dmem", 0444, pfe->dentry, (void *)4,
2158 + &dmem_fops);
2159 + if (IS_ERR_OR_NULL(d))
2160 + goto err_pe;
2161 +
2162 + d = debugfs_create_file("pe5_dmem", 0444, pfe->dentry, (void *)5,
2163 + &dmem_fops);
2164 + if (IS_ERR_OR_NULL(d))
2165 + goto err_pe;
2166 +
2167 + return 0;
2168 +
2169 +err_pe:
2170 + debugfs_remove_recursive(pfe->dentry);
2171 +
2172 +err_dir:
2173 + return -1;
2174 +}
2175 +
2176 +void pfe_debugfs_exit(struct pfe *pfe)
2177 +{
2178 + debugfs_remove_recursive(pfe->dentry);
2179 +}
2180 diff --git a/drivers/staging/fsl_ppfe/pfe_debugfs.h b/drivers/staging/fsl_ppfe/pfe_debugfs.h
2181 new file mode 100644
2182 index 00000000..301d9fc2
2183 --- /dev/null
2184 +++ b/drivers/staging/fsl_ppfe/pfe_debugfs.h
2185 @@ -0,0 +1,25 @@
2186 +/*
2187 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
2188 + * Copyright 2017 NXP
2189 + *
2190 + * This program is free software; you can redistribute it and/or modify
2191 + * it under the terms of the GNU General Public License as published by
2192 + * the Free Software Foundation; either version 2 of the License, or
2193 + * (at your option) any later version.
2194 + *
2195 + * This program is distributed in the hope that it will be useful,
2196 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
2197 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2198 + * GNU General Public License for more details.
2199 + *
2200 + * You should have received a copy of the GNU General Public License
2201 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
2202 + */
2203 +
2204 +#ifndef _PFE_DEBUGFS_H_
2205 +#define _PFE_DEBUGFS_H_
2206 +
2207 +int pfe_debugfs_init(struct pfe *pfe);
2208 +void pfe_debugfs_exit(struct pfe *pfe);
2209 +
2210 +#endif /* _PFE_DEBUGFS_H_ */
2211 diff --git a/drivers/staging/fsl_ppfe/pfe_eth.c b/drivers/staging/fsl_ppfe/pfe_eth.c
2212 new file mode 100644
2213 index 00000000..02cd7c52
2214 --- /dev/null
2215 +++ b/drivers/staging/fsl_ppfe/pfe_eth.c
2216 @@ -0,0 +1,2434 @@
2217 +/*
2218 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
2219 + * Copyright 2017 NXP
2220 + *
2221 + * This program is free software; you can redistribute it and/or modify
2222 + * it under the terms of the GNU General Public License as published by
2223 + * the Free Software Foundation; either version 2 of the License, or
2224 + * (at your option) any later version.
2225 + *
2226 + * This program is distributed in the hope that it will be useful,
2227 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
2228 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2229 + * GNU General Public License for more details.
2230 + *
2231 + * You should have received a copy of the GNU General Public License
2232 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
2233 + */
2234 +
2235 +/* @pfe_eth.c.
2236 + * Ethernet driver for to handle exception path for PFE.
2237 + * - uses HIF functions to send/receive packets.
2238 + * - uses ctrl function to start/stop interfaces.
2239 + * - uses direct register accesses to control phy operation.
2240 + */
2241 +#include <linux/version.h>
2242 +#include <linux/kernel.h>
2243 +#include <linux/interrupt.h>
2244 +#include <linux/dma-mapping.h>
2245 +#include <linux/dmapool.h>
2246 +#include <linux/netdevice.h>
2247 +#include <linux/etherdevice.h>
2248 +#include <linux/ethtool.h>
2249 +#include <linux/mii.h>
2250 +#include <linux/phy.h>
2251 +#include <linux/timer.h>
2252 +#include <linux/hrtimer.h>
2253 +#include <linux/platform_device.h>
2254 +
2255 +#include <net/ip.h>
2256 +#include <net/sock.h>
2257 +
2258 +#include <linux/io.h>
2259 +#include <asm/irq.h>
2260 +#include <linux/delay.h>
2261 +#include <linux/regmap.h>
2262 +#include <linux/i2c.h>
2263 +
2264 +#if defined(CONFIG_NF_CONNTRACK_MARK)
2265 +#include <net/netfilter/nf_conntrack.h>
2266 +#endif
2267 +
2268 +#include "pfe_mod.h"
2269 +#include "pfe_eth.h"
2270 +
2271 +static void *cbus_emac_base[3];
2272 +static void *cbus_gpi_base[3];
2273 +
2274 +/* Forward Declaration */
2275 +static void pfe_eth_exit_one(struct pfe_eth_priv_s *priv);
2276 +static void pfe_eth_flush_tx(struct pfe_eth_priv_s *priv);
2277 +static void pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int
2278 + from_tx, int n_desc);
2279 +
2280 +unsigned int gemac_regs[] = {
2281 + 0x0004, /* Interrupt event */
2282 + 0x0008, /* Interrupt mask */
2283 + 0x0024, /* Ethernet control */
2284 + 0x0064, /* MIB Control/Status */
2285 + 0x0084, /* Receive control/status */
2286 + 0x00C4, /* Transmit control */
2287 + 0x00E4, /* Physical address low */
2288 + 0x00E8, /* Physical address high */
2289 + 0x0144, /* Transmit FIFO Watermark and Store and Forward Control*/
2290 + 0x0190, /* Receive FIFO Section Full Threshold */
2291 + 0x01A0, /* Transmit FIFO Section Empty Threshold */
2292 + 0x01B0, /* Frame Truncation Length */
2293 +};
2294 +
2295 +/********************************************************************/
2296 +/* SYSFS INTERFACE */
2297 +/********************************************************************/
2298 +
2299 +#ifdef PFE_ETH_NAPI_STATS
2300 +/*
2301 + * pfe_eth_show_napi_stats
2302 + */
2303 +static ssize_t pfe_eth_show_napi_stats(struct device *dev,
2304 + struct device_attribute *attr,
2305 + char *buf)
2306 +{
2307 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2308 + ssize_t len = 0;
2309 +
2310 + len += sprintf(buf + len, "sched: %u\n",
2311 + priv->napi_counters[NAPI_SCHED_COUNT]);
2312 + len += sprintf(buf + len, "poll: %u\n",
2313 + priv->napi_counters[NAPI_POLL_COUNT]);
2314 + len += sprintf(buf + len, "packet: %u\n",
2315 + priv->napi_counters[NAPI_PACKET_COUNT]);
2316 + len += sprintf(buf + len, "budget: %u\n",
2317 + priv->napi_counters[NAPI_FULL_BUDGET_COUNT]);
2318 + len += sprintf(buf + len, "desc: %u\n",
2319 + priv->napi_counters[NAPI_DESC_COUNT]);
2320 +
2321 + return len;
2322 +}
2323 +
2324 +/*
2325 + * pfe_eth_set_napi_stats
2326 + */
2327 +static ssize_t pfe_eth_set_napi_stats(struct device *dev,
2328 + struct device_attribute *attr,
2329 + const char *buf, size_t count)
2330 +{
2331 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2332 +
2333 + memset(priv->napi_counters, 0, sizeof(priv->napi_counters));
2334 +
2335 + return count;
2336 +}
2337 +#endif
2338 +#ifdef PFE_ETH_TX_STATS
2339 +/* pfe_eth_show_tx_stats
2340 + *
2341 + */
2342 +static ssize_t pfe_eth_show_tx_stats(struct device *dev,
2343 + struct device_attribute *attr,
2344 + char *buf)
2345 +{
2346 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2347 + ssize_t len = 0;
2348 + int i;
2349 +
2350 + len += sprintf(buf + len, "TX queues stats:\n");
2351 +
2352 + for (i = 0; i < emac_txq_cnt; i++) {
2353 + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
2354 + i);
2355 +
2356 + len += sprintf(buf + len, "\n");
2357 + __netif_tx_lock_bh(tx_queue);
2358 +
2359 + hif_tx_lock(&pfe->hif);
2360 + len += sprintf(buf + len,
2361 + "Queue %2d : credits = %10d\n"
2362 + , i, hif_lib_tx_credit_avail(pfe, priv->id, i));
2363 + len += sprintf(buf + len,
2364 + " tx packets = %10d\n"
2365 + , pfe->tmu_credit.tx_packets[priv->id][i]);
2366 + hif_tx_unlock(&pfe->hif);
2367 +
2368 + /* Don't output additionnal stats if queue never used */
2369 + if (!pfe->tmu_credit.tx_packets[priv->id][i])
2370 + goto skip;
2371 +
2372 + len += sprintf(buf + len,
2373 + " clean_fail = %10d\n"
2374 + , priv->clean_fail[i]);
2375 + len += sprintf(buf + len,
2376 + " stop_queue = %10d\n"
2377 + , priv->stop_queue_total[i]);
2378 + len += sprintf(buf + len,
2379 + " stop_queue_hif = %10d\n"
2380 + , priv->stop_queue_hif[i]);
2381 + len += sprintf(buf + len,
2382 + " stop_queue_hif_client = %10d\n"
2383 + , priv->stop_queue_hif_client[i]);
2384 + len += sprintf(buf + len,
2385 + " stop_queue_credit = %10d\n"
2386 + , priv->stop_queue_credit[i]);
2387 +skip:
2388 + __netif_tx_unlock_bh(tx_queue);
2389 + }
2390 + return len;
2391 +}
2392 +
2393 +/* pfe_eth_set_tx_stats
2394 + *
2395 + */
2396 +static ssize_t pfe_eth_set_tx_stats(struct device *dev,
2397 + struct device_attribute *attr,
2398 + const char *buf, size_t count)
2399 +{
2400 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2401 + int i;
2402 +
2403 + for (i = 0; i < emac_txq_cnt; i++) {
2404 + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
2405 + i);
2406 +
2407 + __netif_tx_lock_bh(tx_queue);
2408 + priv->clean_fail[i] = 0;
2409 + priv->stop_queue_total[i] = 0;
2410 + priv->stop_queue_hif[i] = 0;
2411 + priv->stop_queue_hif_client[i] = 0;
2412 + priv->stop_queue_credit[i] = 0;
2413 + __netif_tx_unlock_bh(tx_queue);
2414 + }
2415 +
2416 + return count;
2417 +}
2418 +#endif
2419 +/* pfe_eth_show_txavail
2420 + *
2421 + */
2422 +static ssize_t pfe_eth_show_txavail(struct device *dev,
2423 + struct device_attribute *attr,
2424 + char *buf)
2425 +{
2426 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2427 + ssize_t len = 0;
2428 + int i;
2429 +
2430 + for (i = 0; i < emac_txq_cnt; i++) {
2431 + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
2432 + i);
2433 +
2434 + __netif_tx_lock_bh(tx_queue);
2435 +
2436 + len += sprintf(buf + len, "%d",
2437 + hif_lib_tx_avail(&priv->client, i));
2438 +
2439 + __netif_tx_unlock_bh(tx_queue);
2440 +
2441 + if (i == (emac_txq_cnt - 1))
2442 + len += sprintf(buf + len, "\n");
2443 + else
2444 + len += sprintf(buf + len, " ");
2445 + }
2446 +
2447 + return len;
2448 +}
2449 +
2450 +/* pfe_eth_show_default_priority
2451 + *
2452 + */
2453 +static ssize_t pfe_eth_show_default_priority(struct device *dev,
2454 + struct device_attribute *attr,
2455 + char *buf)
2456 +{
2457 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2458 + unsigned long flags;
2459 + int rc;
2460 +
2461 + spin_lock_irqsave(&priv->lock, flags);
2462 + rc = sprintf(buf, "%d\n", priv->default_priority);
2463 + spin_unlock_irqrestore(&priv->lock, flags);
2464 +
2465 + return rc;
2466 +}
2467 +
2468 +/* pfe_eth_set_default_priority
2469 + *
2470 + */
2471 +
2472 +static ssize_t pfe_eth_set_default_priority(struct device *dev,
2473 + struct device_attribute *attr,
2474 + const char *buf, size_t count)
2475 +{
2476 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2477 + unsigned long flags;
2478 +
2479 + spin_lock_irqsave(&priv->lock, flags);
2480 + priv->default_priority = kstrtoul(buf, 0, 0);
2481 + spin_unlock_irqrestore(&priv->lock, flags);
2482 +
2483 + return count;
2484 +}
2485 +
2486 +static DEVICE_ATTR(txavail, 0444, pfe_eth_show_txavail, NULL);
2487 +static DEVICE_ATTR(default_priority, 0644, pfe_eth_show_default_priority,
2488 + pfe_eth_set_default_priority);
2489 +
2490 +#ifdef PFE_ETH_NAPI_STATS
2491 +static DEVICE_ATTR(napi_stats, 0644, pfe_eth_show_napi_stats,
2492 + pfe_eth_set_napi_stats);
2493 +#endif
2494 +
2495 +#ifdef PFE_ETH_TX_STATS
2496 +static DEVICE_ATTR(tx_stats, 0644, pfe_eth_show_tx_stats,
2497 + pfe_eth_set_tx_stats);
2498 +#endif
2499 +
2500 +/*
2501 + * pfe_eth_sysfs_init
2502 + *
2503 + */
2504 +static int pfe_eth_sysfs_init(struct net_device *ndev)
2505 +{
2506 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2507 + int err;
2508 +
2509 + /* Initialize the default values */
2510 +
2511 + /*
2512 + * By default, packets without conntrack will use this default high
2513 + * priority queue
2514 + */
2515 + priv->default_priority = 15;
2516 +
2517 + /* Create our sysfs files */
2518 + err = device_create_file(&ndev->dev, &dev_attr_default_priority);
2519 + if (err) {
2520 + netdev_err(ndev,
2521 + "failed to create default_priority sysfs files\n");
2522 + goto err_priority;
2523 + }
2524 +
2525 + err = device_create_file(&ndev->dev, &dev_attr_txavail);
2526 + if (err) {
2527 + netdev_err(ndev,
2528 + "failed to create default_priority sysfs files\n");
2529 + goto err_txavail;
2530 + }
2531 +
2532 +#ifdef PFE_ETH_NAPI_STATS
2533 + err = device_create_file(&ndev->dev, &dev_attr_napi_stats);
2534 + if (err) {
2535 + netdev_err(ndev, "failed to create napi stats sysfs files\n");
2536 + goto err_napi;
2537 + }
2538 +#endif
2539 +
2540 +#ifdef PFE_ETH_TX_STATS
2541 + err = device_create_file(&ndev->dev, &dev_attr_tx_stats);
2542 + if (err) {
2543 + netdev_err(ndev, "failed to create tx stats sysfs files\n");
2544 + goto err_tx;
2545 + }
2546 +#endif
2547 +
2548 + return 0;
2549 +
2550 +#ifdef PFE_ETH_TX_STATS
2551 +err_tx:
2552 +#endif
2553 +#ifdef PFE_ETH_NAPI_STATS
2554 + device_remove_file(&ndev->dev, &dev_attr_napi_stats);
2555 +
2556 +err_napi:
2557 +#endif
2558 + device_remove_file(&ndev->dev, &dev_attr_txavail);
2559 +
2560 +err_txavail:
2561 + device_remove_file(&ndev->dev, &dev_attr_default_priority);
2562 +
2563 +err_priority:
2564 + return -1;
2565 +}
2566 +
2567 +/* pfe_eth_sysfs_exit
2568 + *
2569 + */
2570 +void pfe_eth_sysfs_exit(struct net_device *ndev)
2571 +{
2572 +#ifdef PFE_ETH_TX_STATS
2573 + device_remove_file(&ndev->dev, &dev_attr_tx_stats);
2574 +#endif
2575 +
2576 +#ifdef PFE_ETH_NAPI_STATS
2577 + device_remove_file(&ndev->dev, &dev_attr_napi_stats);
2578 +#endif
2579 + device_remove_file(&ndev->dev, &dev_attr_txavail);
2580 + device_remove_file(&ndev->dev, &dev_attr_default_priority);
2581 +}
2582 +
2583 +/*************************************************************************/
2584 +/* ETHTOOL INTERCAE */
2585 +/*************************************************************************/
2586 +
2587 +/*MTIP GEMAC */
2588 +static const struct fec_stat {
2589 + char name[ETH_GSTRING_LEN];
2590 + u16 offset;
2591 +} fec_stats[] = {
2592 + /* RMON TX */
2593 + { "tx_dropped", RMON_T_DROP },
2594 + { "tx_packets", RMON_T_PACKETS },
2595 + { "tx_broadcast", RMON_T_BC_PKT },
2596 + { "tx_multicast", RMON_T_MC_PKT },
2597 + { "tx_crc_errors", RMON_T_CRC_ALIGN },
2598 + { "tx_undersize", RMON_T_UNDERSIZE },
2599 + { "tx_oversize", RMON_T_OVERSIZE },
2600 + { "tx_fragment", RMON_T_FRAG },
2601 + { "tx_jabber", RMON_T_JAB },
2602 + { "tx_collision", RMON_T_COL },
2603 + { "tx_64byte", RMON_T_P64 },
2604 + { "tx_65to127byte", RMON_T_P65TO127 },
2605 + { "tx_128to255byte", RMON_T_P128TO255 },
2606 + { "tx_256to511byte", RMON_T_P256TO511 },
2607 + { "tx_512to1023byte", RMON_T_P512TO1023 },
2608 + { "tx_1024to2047byte", RMON_T_P1024TO2047 },
2609 + { "tx_GTE2048byte", RMON_T_P_GTE2048 },
2610 + { "tx_octets", RMON_T_OCTETS },
2611 +
2612 + /* IEEE TX */
2613 + { "IEEE_tx_drop", IEEE_T_DROP },
2614 + { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
2615 + { "IEEE_tx_1col", IEEE_T_1COL },
2616 + { "IEEE_tx_mcol", IEEE_T_MCOL },
2617 + { "IEEE_tx_def", IEEE_T_DEF },
2618 + { "IEEE_tx_lcol", IEEE_T_LCOL },
2619 + { "IEEE_tx_excol", IEEE_T_EXCOL },
2620 + { "IEEE_tx_macerr", IEEE_T_MACERR },
2621 + { "IEEE_tx_cserr", IEEE_T_CSERR },
2622 + { "IEEE_tx_sqe", IEEE_T_SQE },
2623 + { "IEEE_tx_fdxfc", IEEE_T_FDXFC },
2624 + { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
2625 +
2626 + /* RMON RX */
2627 + { "rx_packets", RMON_R_PACKETS },
2628 + { "rx_broadcast", RMON_R_BC_PKT },
2629 + { "rx_multicast", RMON_R_MC_PKT },
2630 + { "rx_crc_errors", RMON_R_CRC_ALIGN },
2631 + { "rx_undersize", RMON_R_UNDERSIZE },
2632 + { "rx_oversize", RMON_R_OVERSIZE },
2633 + { "rx_fragment", RMON_R_FRAG },
2634 + { "rx_jabber", RMON_R_JAB },
2635 + { "rx_64byte", RMON_R_P64 },
2636 + { "rx_65to127byte", RMON_R_P65TO127 },
2637 + { "rx_128to255byte", RMON_R_P128TO255 },
2638 + { "rx_256to511byte", RMON_R_P256TO511 },
2639 + { "rx_512to1023byte", RMON_R_P512TO1023 },
2640 + { "rx_1024to2047byte", RMON_R_P1024TO2047 },
2641 + { "rx_GTE2048byte", RMON_R_P_GTE2048 },
2642 + { "rx_octets", RMON_R_OCTETS },
2643 +
2644 + /* IEEE RX */
2645 + { "IEEE_rx_drop", IEEE_R_DROP },
2646 + { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
2647 + { "IEEE_rx_crc", IEEE_R_CRC },
2648 + { "IEEE_rx_align", IEEE_R_ALIGN },
2649 + { "IEEE_rx_macerr", IEEE_R_MACERR },
2650 + { "IEEE_rx_fdxfc", IEEE_R_FDXFC },
2651 + { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
2652 +};
2653 +
2654 +static void pfe_eth_fill_stats(struct net_device *ndev, struct ethtool_stats
2655 + *stats, u64 *data)
2656 +{
2657 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2658 + int i;
2659 +
2660 + for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2661 + data[i] = readl(priv->EMAC_baseaddr + fec_stats[i].offset);
2662 +}
2663 +
2664 +static void pfe_eth_gstrings(struct net_device *netdev,
2665 + u32 stringset, u8 *data)
2666 +{
2667 + int i;
2668 +
2669 + switch (stringset) {
2670 + case ETH_SS_STATS:
2671 + for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2672 + memcpy(data + i * ETH_GSTRING_LEN,
2673 + fec_stats[i].name, ETH_GSTRING_LEN);
2674 + break;
2675 + }
2676 +}
2677 +
2678 +static int pfe_eth_stats_count(struct net_device *ndev, int sset)
2679 +{
2680 + switch (sset) {
2681 + case ETH_SS_STATS:
2682 + return ARRAY_SIZE(fec_stats);
2683 + default:
2684 + return -EOPNOTSUPP;
2685 + }
2686 +}
2687 +
2688 +/*
2689 + * pfe_eth_gemac_reglen - Return the length of the register structure.
2690 + *
2691 + */
2692 +static int pfe_eth_gemac_reglen(struct net_device *ndev)
2693 +{
2694 + pr_info("%s()\n", __func__);
2695 + return (sizeof(gemac_regs) / sizeof(u32));
2696 +}
2697 +
2698 +/*
2699 + * pfe_eth_gemac_get_regs - Return the gemac register structure.
2700 + *
2701 + */
2702 +static void pfe_eth_gemac_get_regs(struct net_device *ndev, struct ethtool_regs
2703 + *regs, void *regbuf)
2704 +{
2705 + int i;
2706 +
2707 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2708 + u32 *buf = (u32 *)regbuf;
2709 +
2710 + pr_info("%s()\n", __func__);
2711 + for (i = 0; i < sizeof(gemac_regs) / sizeof(u32); i++)
2712 + buf[i] = readl(priv->EMAC_baseaddr + gemac_regs[i]);
2713 +}
2714 +
2715 +/*
2716 + * pfe_eth_set_wol - Set the magic packet option, in WoL register.
2717 + *
2718 + */
2719 +static int pfe_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2720 +{
2721 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2722 +
2723 + if (wol->wolopts & ~WAKE_MAGIC)
2724 + return -EOPNOTSUPP;
2725 +
2726 + /* for MTIP we store wol->wolopts */
2727 + priv->wol = wol->wolopts;
2728 +
2729 + device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
2730 +
2731 + return 0;
2732 +}
2733 +
2734 +/*
2735 + *
2736 + * pfe_eth_get_wol - Get the WoL options.
2737 + *
2738 + */
2739 +static void pfe_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo
2740 + *wol)
2741 +{
2742 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2743 +
2744 + wol->supported = WAKE_MAGIC;
2745 + wol->wolopts = 0;
2746 +
2747 + if (priv->wol & WAKE_MAGIC)
2748 + wol->wolopts = WAKE_MAGIC;
2749 +
2750 + memset(&wol->sopass, 0, sizeof(wol->sopass));
2751 +}
2752 +
2753 +/*
2754 + * pfe_eth_get_drvinfo - Fills in the drvinfo structure with some basic info
2755 + *
2756 + */
2757 +static void pfe_eth_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo
2758 + *drvinfo)
2759 +{
2760 + strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
2761 + strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
2762 + strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
2763 + strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
2764 +}
2765 +
2766 +/*
2767 + * pfe_eth_set_settings - Used to send commands to PHY.
2768 + *
2769 + */
2770 +static int pfe_eth_set_settings(struct net_device *ndev,
2771 + const struct ethtool_link_ksettings *cmd)
2772 +{
2773 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2774 + struct phy_device *phydev = priv->phydev;
2775 +
2776 + if (!phydev)
2777 + return -ENODEV;
2778 +
2779 + return phy_ethtool_ksettings_set(phydev, cmd);
2780 +}
2781 +
2782 +/*
2783 + * pfe_eth_getsettings - Return the current settings in the ethtool_cmd
2784 + * structure.
2785 + *
2786 + */
2787 +static int pfe_eth_get_settings(struct net_device *ndev,
2788 + struct ethtool_link_ksettings *cmd)
2789 +{
2790 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2791 + struct phy_device *phydev = priv->phydev;
2792 +
2793 + if (!phydev)
2794 + return -ENODEV;
2795 +
2796 + return phy_ethtool_ksettings_get(phydev, cmd);
2797 +}
2798 +
2799 +/*
2800 + * pfe_eth_get_msglevel - Gets the debug message mask.
2801 + *
2802 + */
2803 +static uint32_t pfe_eth_get_msglevel(struct net_device *ndev)
2804 +{
2805 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2806 +
2807 + return priv->msg_enable;
2808 +}
2809 +
2810 +/*
2811 + * pfe_eth_set_msglevel - Sets the debug message mask.
2812 + *
2813 + */
2814 +static void pfe_eth_set_msglevel(struct net_device *ndev, uint32_t data)
2815 +{
2816 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2817 +
2818 + priv->msg_enable = data;
2819 +}
2820 +
2821 +#define HIF_RX_COAL_MAX_CLKS (~(1 << 31))
2822 +#define HIF_RX_COAL_CLKS_PER_USEC (pfe->ctrl.sys_clk / 1000)
2823 +#define HIF_RX_COAL_MAX_USECS (HIF_RX_COAL_MAX_CLKS / \
2824 + HIF_RX_COAL_CLKS_PER_USEC)
2825 +
2826 +/*
2827 + * pfe_eth_set_coalesce - Sets rx interrupt coalescing timer.
2828 + *
2829 + */
2830 +static int pfe_eth_set_coalesce(struct net_device *ndev,
2831 + struct ethtool_coalesce *ec)
2832 +{
2833 + if (ec->rx_coalesce_usecs > HIF_RX_COAL_MAX_USECS)
2834 + return -EINVAL;
2835 +
2836 + if (!ec->rx_coalesce_usecs) {
2837 + writel(0, HIF_INT_COAL);
2838 + return 0;
2839 + }
2840 +
2841 + writel((ec->rx_coalesce_usecs * HIF_RX_COAL_CLKS_PER_USEC) |
2842 + HIF_INT_COAL_ENABLE, HIF_INT_COAL);
2843 +
2844 + return 0;
2845 +}
2846 +
2847 +/*
2848 + * pfe_eth_get_coalesce - Gets rx interrupt coalescing timer value.
2849 + *
2850 + */
2851 +static int pfe_eth_get_coalesce(struct net_device *ndev,
2852 + struct ethtool_coalesce *ec)
2853 +{
2854 + int reg_val = readl(HIF_INT_COAL);
2855 +
2856 + if (reg_val & HIF_INT_COAL_ENABLE)
2857 + ec->rx_coalesce_usecs = (reg_val & HIF_RX_COAL_MAX_CLKS) /
2858 + HIF_RX_COAL_CLKS_PER_USEC;
2859 + else
2860 + ec->rx_coalesce_usecs = 0;
2861 +
2862 + return 0;
2863 +}
2864 +
2865 +/*
2866 + * pfe_eth_set_pauseparam - Sets pause parameters
2867 + *
2868 + */
2869 +static int pfe_eth_set_pauseparam(struct net_device *ndev,
2870 + struct ethtool_pauseparam *epause)
2871 +{
2872 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2873 +
2874 + if (epause->tx_pause != epause->rx_pause) {
2875 + netdev_info(ndev,
2876 + "hardware only support enable/disable both tx and rx\n");
2877 + return -EINVAL;
2878 + }
2879 +
2880 + priv->pause_flag = 0;
2881 + priv->pause_flag |= epause->rx_pause ? PFE_PAUSE_FLAG_ENABLE : 0;
2882 + priv->pause_flag |= epause->autoneg ? PFE_PAUSE_FLAG_AUTONEG : 0;
2883 +
2884 + if (epause->rx_pause || epause->autoneg) {
2885 + gemac_enable_pause_rx(priv->EMAC_baseaddr);
2886 + writel((readl(priv->GPI_baseaddr + GPI_TX_PAUSE_TIME) |
2887 + EGPI_PAUSE_ENABLE),
2888 + priv->GPI_baseaddr + GPI_TX_PAUSE_TIME);
2889 + if (priv->phydev) {
2890 + priv->phydev->supported |= ADVERTISED_Pause |
2891 + ADVERTISED_Asym_Pause;
2892 + priv->phydev->advertising |= ADVERTISED_Pause |
2893 + ADVERTISED_Asym_Pause;
2894 + }
2895 + } else {
2896 + gemac_disable_pause_rx(priv->EMAC_baseaddr);
2897 + writel((readl(priv->GPI_baseaddr + GPI_TX_PAUSE_TIME) &
2898 + ~EGPI_PAUSE_ENABLE),
2899 + priv->GPI_baseaddr + GPI_TX_PAUSE_TIME);
2900 + if (priv->phydev) {
2901 + priv->phydev->supported &= ~(ADVERTISED_Pause |
2902 + ADVERTISED_Asym_Pause);
2903 + priv->phydev->advertising &= ~(ADVERTISED_Pause |
2904 + ADVERTISED_Asym_Pause);
2905 + }
2906 + }
2907 +
2908 + return 0;
2909 +}
2910 +
2911 +/*
2912 + * pfe_eth_get_pauseparam - Gets pause parameters
2913 + *
2914 + */
2915 +static void pfe_eth_get_pauseparam(struct net_device *ndev,
2916 + struct ethtool_pauseparam *epause)
2917 +{
2918 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2919 +
2920 + epause->autoneg = (priv->pause_flag & PFE_PAUSE_FLAG_AUTONEG) != 0;
2921 + epause->tx_pause = (priv->pause_flag & PFE_PAUSE_FLAG_ENABLE) != 0;
2922 + epause->rx_pause = epause->tx_pause;
2923 +}
2924 +
2925 +/*
2926 + * pfe_eth_get_hash
2927 + */
2928 +#define PFE_HASH_BITS 6 /* #bits in hash */
2929 +#define CRC32_POLY 0xEDB88320
2930 +
2931 +static int pfe_eth_get_hash(u8 *addr)
2932 +{
2933 + unsigned int i, bit, data, crc, hash;
2934 +
2935 + /* calculate crc32 value of mac address */
2936 + crc = 0xffffffff;
2937 +
2938 + for (i = 0; i < 6; i++) {
2939 + data = addr[i];
2940 + for (bit = 0; bit < 8; bit++, data >>= 1) {
2941 + crc = (crc >> 1) ^
2942 + (((crc ^ data) & 1) ? CRC32_POLY : 0);
2943 + }
2944 + }
2945 +
2946 + /*
2947 + * only upper 6 bits (PFE_HASH_BITS) are used
2948 + * which point to specific bit in the hash registers
2949 + */
2950 + hash = (crc >> (32 - PFE_HASH_BITS)) & 0x3f;
2951 +
2952 + return hash;
2953 +}
2954 +
2955 +const struct ethtool_ops pfe_ethtool_ops = {
2956 + .get_drvinfo = pfe_eth_get_drvinfo,
2957 + .get_regs_len = pfe_eth_gemac_reglen,
2958 + .get_regs = pfe_eth_gemac_get_regs,
2959 + .get_link = ethtool_op_get_link,
2960 + .get_wol = pfe_eth_get_wol,
2961 + .set_wol = pfe_eth_set_wol,
2962 + .set_pauseparam = pfe_eth_set_pauseparam,
2963 + .get_pauseparam = pfe_eth_get_pauseparam,
2964 + .get_strings = pfe_eth_gstrings,
2965 + .get_sset_count = pfe_eth_stats_count,
2966 + .get_ethtool_stats = pfe_eth_fill_stats,
2967 + .get_msglevel = pfe_eth_get_msglevel,
2968 + .set_msglevel = pfe_eth_set_msglevel,
2969 + .set_coalesce = pfe_eth_set_coalesce,
2970 + .get_coalesce = pfe_eth_get_coalesce,
2971 + .get_link_ksettings = pfe_eth_get_settings,
2972 + .set_link_ksettings = pfe_eth_set_settings,
2973 +};
2974 +
2975 +/* pfe_eth_mdio_reset
2976 + */
2977 +int pfe_eth_mdio_reset(struct mii_bus *bus)
2978 +{
2979 + struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
2980 + u32 phy_speed;
2981 +
2982 + netif_info(priv, hw, priv->ndev, "%s\n", __func__);
2983 +
2984 + mutex_lock(&bus->mdio_lock);
2985 +
2986 + /*
2987 + * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
2988 + *
2989 + * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
2990 + * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'.
2991 + */
2992 + phy_speed = (DIV_ROUND_UP((pfe->ctrl.sys_clk * 1000), 4000000)
2993 + << EMAC_MII_SPEED_SHIFT);
2994 + phy_speed |= EMAC_HOLDTIME(0x5);
2995 + __raw_writel(phy_speed, priv->PHY_baseaddr + EMAC_MII_CTRL_REG);
2996 +
2997 + mutex_unlock(&bus->mdio_lock);
2998 +
2999 + return 0;
3000 +}
3001 +
3002 +/* pfe_eth_gemac_phy_timeout
3003 + *
3004 + */
3005 +static int pfe_eth_gemac_phy_timeout(struct pfe_eth_priv_s *priv, int timeout)
3006 +{
3007 + while (!(__raw_readl(priv->PHY_baseaddr + EMAC_IEVENT_REG) &
3008 + EMAC_IEVENT_MII)) {
3009 + if (timeout-- <= 0)
3010 + return -1;
3011 + usleep_range(10, 20);
3012 + }
3013 + __raw_writel(EMAC_IEVENT_MII, priv->PHY_baseaddr + EMAC_IEVENT_REG);
3014 + return 0;
3015 +}
3016 +
3017 +static int pfe_eth_mdio_mux(u8 muxval)
3018 +{
3019 + struct i2c_adapter *a;
3020 + struct i2c_msg msg;
3021 + unsigned char buf[2];
3022 + int ret;
3023 +
3024 + a = i2c_get_adapter(0);
3025 + if (!a)
3026 + return -ENODEV;
3027 +
3028 + /* set bit 1 (the second bit) of chip at 0x09, register 0x13 */
3029 + buf[0] = 0x54; /* reg number */
3030 + buf[1] = (muxval << 6) | 0x3; /* data */
3031 + msg.addr = 0x66;
3032 + msg.buf = buf;
3033 + msg.len = 2;
3034 + msg.flags = 0;
3035 + ret = i2c_transfer(a, &msg, 1);
3036 + i2c_put_adapter(a);
3037 + if (ret != 1)
3038 + return -ENODEV;
3039 + return 0;
3040 +}
3041 +
3042 +static int pfe_eth_mdio_write_addr(struct mii_bus *bus, int mii_id,
3043 + int dev_addr, int regnum)
3044 +{
3045 + struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
3046 +
3047 + __raw_writel(EMAC_MII_DATA_PA(mii_id) |
3048 + EMAC_MII_DATA_RA(dev_addr) |
3049 + EMAC_MII_DATA_TA | EMAC_MII_DATA(regnum),
3050 + priv->PHY_baseaddr + EMAC_MII_DATA_REG);
3051 +
3052 + if (pfe_eth_gemac_phy_timeout(priv, EMAC_MDIO_TIMEOUT)) {
3053 + netdev_err(priv->ndev, "%s: phy MDIO address write timeout\n",
3054 + __func__);
3055 + return -1;
3056 + }
3057 +
3058 + return 0;
3059 +}
3060 +
3061 +static int pfe_eth_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
3062 + u16 value)
3063 +{
3064 + struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
3065 +
3066 + /*To access external PHYs on QDS board mux needs to be configured*/
3067 + if ((mii_id) && (pfe->mdio_muxval[mii_id]))
3068 + pfe_eth_mdio_mux(pfe->mdio_muxval[mii_id]);
3069 +
3070 + if (regnum & MII_ADDR_C45) {
3071 + pfe_eth_mdio_write_addr(bus, mii_id, (regnum >> 16) & 0x1f,
3072 + regnum & 0xffff);
3073 + __raw_writel(EMAC_MII_DATA_OP_CL45_WR |
3074 + EMAC_MII_DATA_PA(mii_id) |
3075 + EMAC_MII_DATA_RA((regnum >> 16) & 0x1f) |
3076 + EMAC_MII_DATA_TA | EMAC_MII_DATA(value),
3077 + priv->PHY_baseaddr + EMAC_MII_DATA_REG);
3078 + } else {
3079 + /* start a write op */
3080 + __raw_writel(EMAC_MII_DATA_ST | EMAC_MII_DATA_OP_WR |
3081 + EMAC_MII_DATA_PA(mii_id) |
3082 + EMAC_MII_DATA_RA(regnum) |
3083 + EMAC_MII_DATA_TA | EMAC_MII_DATA(value),
3084 + priv->PHY_baseaddr + EMAC_MII_DATA_REG);
3085 + }
3086 +
3087 + if (pfe_eth_gemac_phy_timeout(priv, EMAC_MDIO_TIMEOUT)) {
3088 + netdev_err(priv->ndev, "%s: phy MDIO write timeout\n",
3089 + __func__);
3090 + return -1;
3091 + }
3092 + netif_info(priv, hw, priv->ndev, "%s: phy %x reg %x val %x\n", __func__,
3093 + mii_id, regnum, value);
3094 +
3095 + return 0;
3096 +}
3097 +
3098 +static int pfe_eth_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
3099 +{
3100 + struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
3101 + u16 value = 0;
3102 +
3103 + /*To access external PHYs on QDS board mux needs to be configured*/
3104 + if ((mii_id) && (pfe->mdio_muxval[mii_id]))
3105 + pfe_eth_mdio_mux(pfe->mdio_muxval[mii_id]);
3106 +
3107 + if (regnum & MII_ADDR_C45) {
3108 + pfe_eth_mdio_write_addr(bus, mii_id, (regnum >> 16) & 0x1f,
3109 + regnum & 0xffff);
3110 + __raw_writel(EMAC_MII_DATA_OP_CL45_RD |
3111 + EMAC_MII_DATA_PA(mii_id) |
3112 + EMAC_MII_DATA_RA((regnum >> 16) & 0x1f) |
3113 + EMAC_MII_DATA_TA,
3114 + priv->PHY_baseaddr + EMAC_MII_DATA_REG);
3115 + } else {
3116 + /* start a read op */
3117 + __raw_writel(EMAC_MII_DATA_ST | EMAC_MII_DATA_OP_RD |
3118 + EMAC_MII_DATA_PA(mii_id) |
3119 + EMAC_MII_DATA_RA(regnum) |
3120 + EMAC_MII_DATA_TA, priv->PHY_baseaddr +
3121 + EMAC_MII_DATA_REG);
3122 + }
3123 +
3124 + if (pfe_eth_gemac_phy_timeout(priv, EMAC_MDIO_TIMEOUT)) {
3125 + netdev_err(priv->ndev, "%s: phy MDIO read timeout\n", __func__);
3126 + return -1;
3127 + }
3128 +
3129 + value = EMAC_MII_DATA(__raw_readl(priv->PHY_baseaddr +
3130 + EMAC_MII_DATA_REG));
3131 + netif_info(priv, hw, priv->ndev, "%s: phy %x reg %x val %x\n", __func__,
3132 + mii_id, regnum, value);
3133 + return value;
3134 +}
3135 +
3136 +static int pfe_eth_mdio_init(struct pfe_eth_priv_s *priv,
3137 + struct ls1012a_mdio_platform_data *minfo)
3138 +{
3139 + struct mii_bus *bus;
3140 + int rc;
3141 +
3142 + netif_info(priv, drv, priv->ndev, "%s\n", __func__);
3143 + pr_info("%s\n", __func__);
3144 +
3145 + bus = mdiobus_alloc();
3146 + if (!bus) {
3147 + netdev_err(priv->ndev, "mdiobus_alloc() failed\n");
3148 + rc = -ENOMEM;
3149 + goto err0;
3150 + }
3151 +
3152 + bus->name = "ls1012a MDIO Bus";
3153 + bus->read = &pfe_eth_mdio_read;
3154 + bus->write = &pfe_eth_mdio_write;
3155 + bus->reset = &pfe_eth_mdio_reset;
3156 + snprintf(bus->id, MII_BUS_ID_SIZE, "ls1012a-%x", priv->id);
3157 + bus->priv = priv;
3158 +
3159 + bus->phy_mask = minfo->phy_mask;
3160 + priv->mdc_div = minfo->mdc_div;
3161 +
3162 + if (!priv->mdc_div)
3163 + priv->mdc_div = 64;
3164 +
3165 + bus->irq[0] = minfo->irq[0];
3166 +
3167 + bus->parent = priv->pfe->dev;
3168 +
3169 + netif_info(priv, drv, priv->ndev, "%s: mdc_div: %d, phy_mask: %x\n",
3170 + __func__, priv->mdc_div, bus->phy_mask);
3171 + rc = mdiobus_register(bus);
3172 + if (rc) {
3173 + netdev_err(priv->ndev, "mdiobus_register(%s) failed\n",
3174 + bus->name);
3175 + goto err1;
3176 + }
3177 +
3178 + priv->mii_bus = bus;
3179 + pfe_eth_mdio_reset(bus);
3180 +
3181 + return 0;
3182 +
3183 +err1:
3184 + mdiobus_free(bus);
3185 +err0:
3186 + return rc;
3187 +}
3188 +
3189 +/* pfe_eth_mdio_exit
3190 + */
3191 +static void pfe_eth_mdio_exit(struct mii_bus *bus)
3192 +{
3193 + if (!bus)
3194 + return;
3195 +
3196 + netif_info((struct pfe_eth_priv_s *)bus->priv, drv, ((struct
3197 + pfe_eth_priv_s *)(bus->priv))->ndev, "%s\n", __func__);
3198 +
3199 + mdiobus_unregister(bus);
3200 + mdiobus_free(bus);
3201 +}
3202 +
3203 +/* pfe_get_phydev_speed
3204 + */
3205 +static int pfe_get_phydev_speed(struct phy_device *phydev)
3206 +{
3207 + switch (phydev->speed) {
3208 + case 10:
3209 + return SPEED_10M;
3210 + case 100:
3211 + return SPEED_100M;
3212 + case 1000:
3213 + default:
3214 + return SPEED_1000M;
3215 + }
3216 +}
3217 +
3218 +/* pfe_set_rgmii_speed
3219 + */
3220 +#define RGMIIPCR 0x434
3221 +/* RGMIIPCR bit definitions*/
3222 +#define SCFG_RGMIIPCR_EN_AUTO (0x00000008)
3223 +#define SCFG_RGMIIPCR_SETSP_1000M (0x00000004)
3224 +#define SCFG_RGMIIPCR_SETSP_100M (0x00000000)
3225 +#define SCFG_RGMIIPCR_SETSP_10M (0x00000002)
3226 +#define SCFG_RGMIIPCR_SETFD (0x00000001)
3227 +
3228 +static void pfe_set_rgmii_speed(struct phy_device *phydev)
3229 +{
3230 + u32 rgmii_pcr;
3231 +
3232 + regmap_read(pfe->scfg, RGMIIPCR, &rgmii_pcr);
3233 + rgmii_pcr &= ~(SCFG_RGMIIPCR_SETSP_1000M | SCFG_RGMIIPCR_SETSP_10M);
3234 +
3235 + switch (phydev->speed) {
3236 + case 10:
3237 + rgmii_pcr |= SCFG_RGMIIPCR_SETSP_10M;
3238 + break;
3239 + case 1000:
3240 + rgmii_pcr |= SCFG_RGMIIPCR_SETSP_1000M;
3241 + break;
3242 + case 100:
3243 + default:
3244 + /* Default is 100M */
3245 + break;
3246 + }
3247 + regmap_write(pfe->scfg, RGMIIPCR, rgmii_pcr);
3248 +}
3249 +
3250 +/* pfe_get_phydev_duplex
3251 + */
3252 +static int pfe_get_phydev_duplex(struct phy_device *phydev)
3253 +{
3254 + /*return (phydev->duplex == DUPLEX_HALF) ? DUP_HALF:DUP_FULL ; */
3255 + return DUPLEX_FULL;
3256 +}
3257 +
3258 +/* pfe_eth_adjust_link
3259 + */
3260 +static void pfe_eth_adjust_link(struct net_device *ndev)
3261 +{
3262 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3263 + unsigned long flags;
3264 + struct phy_device *phydev = priv->phydev;
3265 + int new_state = 0;
3266 +
3267 + netif_info(priv, drv, ndev, "%s\n", __func__);
3268 +
3269 + spin_lock_irqsave(&priv->lock, flags);
3270 +
3271 + if (phydev->link) {
3272 + /*
3273 + * Now we make sure that we can be in full duplex mode.
3274 + * If not, we operate in half-duplex mode.
3275 + */
3276 + if (phydev->duplex != priv->oldduplex) {
3277 + new_state = 1;
3278 + gemac_set_duplex(priv->EMAC_baseaddr,
3279 + pfe_get_phydev_duplex(phydev));
3280 + priv->oldduplex = phydev->duplex;
3281 + }
3282 +
3283 + if (phydev->speed != priv->oldspeed) {
3284 + new_state = 1;
3285 + gemac_set_speed(priv->EMAC_baseaddr,
3286 + pfe_get_phydev_speed(phydev));
3287 + if (priv->einfo->mii_config == PHY_INTERFACE_MODE_RGMII_TXID)
3288 + pfe_set_rgmii_speed(phydev);
3289 + priv->oldspeed = phydev->speed;
3290 + }
3291 +
3292 + if (!priv->oldlink) {
3293 + new_state = 1;
3294 + priv->oldlink = 1;
3295 + }
3296 +
3297 + } else if (priv->oldlink) {
3298 + new_state = 1;
3299 + priv->oldlink = 0;
3300 + priv->oldspeed = 0;
3301 + priv->oldduplex = -1;
3302 + }
3303 +
3304 + if (new_state && netif_msg_link(priv))
3305 + phy_print_status(phydev);
3306 +
3307 + spin_unlock_irqrestore(&priv->lock, flags);
3308 +}
3309 +
3310 +/* pfe_phy_exit
3311 + */
3312 +static void pfe_phy_exit(struct net_device *ndev)
3313 +{
3314 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3315 +
3316 + netif_info(priv, drv, ndev, "%s\n", __func__);
3317 +
3318 + phy_disconnect(priv->phydev);
3319 + priv->phydev = NULL;
3320 +}
3321 +
3322 +/* pfe_eth_stop
3323 + */
3324 +static void pfe_eth_stop(struct net_device *ndev, int wake)
3325 +{
3326 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3327 +
3328 + netif_info(priv, drv, ndev, "%s\n", __func__);
3329 +
3330 + if (wake) {
3331 + gemac_tx_disable(priv->EMAC_baseaddr);
3332 + } else {
3333 + gemac_disable(priv->EMAC_baseaddr);
3334 + gpi_disable(priv->GPI_baseaddr);
3335 +
3336 + if (priv->phydev)
3337 + phy_stop(priv->phydev);
3338 + }
3339 +}
3340 +
3341 +/* pfe_eth_start
3342 + */
3343 +static int pfe_eth_start(struct pfe_eth_priv_s *priv)
3344 +{
3345 + netif_info(priv, drv, priv->ndev, "%s\n", __func__);
3346 +
3347 + if (priv->phydev)
3348 + phy_start(priv->phydev);
3349 +
3350 + gpi_enable(priv->GPI_baseaddr);
3351 + gemac_enable(priv->EMAC_baseaddr);
3352 +
3353 + return 0;
3354 +}
3355 +
3356 +/*
3357 + * Configure on chip serdes through mdio
3358 + */
3359 +static void ls1012a_configure_serdes(struct net_device *ndev)
3360 +{
3361 + struct pfe_eth_priv_s *priv = pfe->eth.eth_priv[0];
3362 + int sgmii_2500 = 0;
3363 + struct mii_bus *bus = priv->mii_bus;
3364 +
3365 + if (priv->einfo->mii_config == PHY_INTERFACE_MODE_SGMII_2500)
3366 + sgmii_2500 = 1;
3367 +
3368 + netif_info(priv, drv, ndev, "%s\n", __func__);
3369 + /* PCS configuration done with corresponding GEMAC */
3370 +
3371 + pfe_eth_mdio_read(bus, 0, 0);
3372 + pfe_eth_mdio_read(bus, 0, 1);
3373 +
3374 + /*These settings taken from validtion team */
3375 + pfe_eth_mdio_write(bus, 0, 0x0, 0x8000);
3376 + if (sgmii_2500) {
3377 + pfe_eth_mdio_write(bus, 0, 0x14, 0x9);
3378 + pfe_eth_mdio_write(bus, 0, 0x4, 0x4001);
3379 + pfe_eth_mdio_write(bus, 0, 0x12, 0xa120);
3380 + pfe_eth_mdio_write(bus, 0, 0x13, 0x7);
3381 + } else {
3382 + pfe_eth_mdio_write(bus, 0, 0x14, 0xb);
3383 + pfe_eth_mdio_write(bus, 0, 0x4, 0x1a1);
3384 + pfe_eth_mdio_write(bus, 0, 0x12, 0x400);
3385 + pfe_eth_mdio_write(bus, 0, 0x13, 0x0);
3386 + }
3387 +
3388 + pfe_eth_mdio_write(bus, 0, 0x0, 0x1140);
3389 +}
3390 +
3391 +/*
3392 + * pfe_phy_init
3393 + *
3394 + */
3395 +static int pfe_phy_init(struct net_device *ndev)
3396 +{
3397 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3398 + struct phy_device *phydev;
3399 + char phy_id[MII_BUS_ID_SIZE + 3];
3400 + char bus_id[MII_BUS_ID_SIZE];
3401 + phy_interface_t interface;
3402 +
3403 + priv->oldlink = 0;
3404 + priv->oldspeed = 0;
3405 + priv->oldduplex = -1;
3406 +
3407 + snprintf(bus_id, MII_BUS_ID_SIZE, "ls1012a-%d", 0);
3408 + snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
3409 + priv->einfo->phy_id);
3410 +
3411 + netif_info(priv, drv, ndev, "%s: %s\n", __func__, phy_id);
3412 + interface = priv->einfo->mii_config;
3413 + if ((interface == PHY_INTERFACE_MODE_SGMII) ||
3414 + (interface == PHY_INTERFACE_MODE_SGMII_2500)) {
3415 + /*Configure SGMII PCS */
3416 + if (pfe->scfg) {
3417 + /*Config MDIO from serdes */
3418 + regmap_write(pfe->scfg, 0x484, 0x00000000);
3419 + }
3420 + ls1012a_configure_serdes(ndev);
3421 + }
3422 +
3423 + if (pfe->scfg) {
3424 + /*Config MDIO from PAD */
3425 + regmap_write(pfe->scfg, 0x484, 0x80000000);
3426 + }
3427 +
3428 + priv->oldlink = 0;
3429 + priv->oldspeed = 0;
3430 + priv->oldduplex = -1;
3431 + pr_info("%s interface %x\n", __func__, interface);
3432 + phydev = phy_connect(ndev, phy_id, &pfe_eth_adjust_link, interface);
3433 +
3434 + if (IS_ERR(phydev)) {
3435 + netdev_err(ndev, "phy_connect() failed\n");
3436 + return PTR_ERR(phydev);
3437 + }
3438 +
3439 + priv->phydev = phydev;
3440 + phydev->irq = PHY_POLL;
3441 +
3442 + return 0;
3443 +}
3444 +
3445 +/* pfe_gemac_init
3446 + */
3447 +static int pfe_gemac_init(struct pfe_eth_priv_s *priv)
3448 +{
3449 + struct gemac_cfg cfg;
3450 +
3451 + netif_info(priv, ifup, priv->ndev, "%s\n", __func__);
3452 +
3453 + cfg.speed = SPEED_1000M;
3454 + cfg.duplex = DUPLEX_FULL;
3455 +
3456 + gemac_set_config(priv->EMAC_baseaddr, &cfg);
3457 + gemac_allow_broadcast(priv->EMAC_baseaddr);
3458 + gemac_enable_1536_rx(priv->EMAC_baseaddr);
3459 + gemac_enable_rx_jmb(priv->EMAC_baseaddr);
3460 + gemac_enable_stacked_vlan(priv->EMAC_baseaddr);
3461 + gemac_enable_pause_rx(priv->EMAC_baseaddr);
3462 + gemac_set_bus_width(priv->EMAC_baseaddr, 64);
3463 +
3464 + /*GEM will perform checksum verifications*/
3465 + if (priv->ndev->features & NETIF_F_RXCSUM)
3466 + gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
3467 + else
3468 + gemac_disable_rx_checksum_offload(priv->EMAC_baseaddr);
3469 +
3470 + return 0;
3471 +}
3472 +
3473 +/* pfe_eth_event_handler
3474 + */
3475 +static int pfe_eth_event_handler(void *data, int event, int qno)
3476 +{
3477 + struct pfe_eth_priv_s *priv = data;
3478 +
3479 + switch (event) {
3480 + case EVENT_RX_PKT_IND:
3481 +
3482 + if (qno == 0) {
3483 + if (napi_schedule_prep(&priv->high_napi)) {
3484 + netif_info(priv, intr, priv->ndev,
3485 + "%s: schedule high prio poll\n"
3486 + , __func__);
3487 +
3488 +#ifdef PFE_ETH_NAPI_STATS
3489 + priv->napi_counters[NAPI_SCHED_COUNT]++;
3490 +#endif
3491 +
3492 + __napi_schedule(&priv->high_napi);
3493 + }
3494 + } else if (qno == 1) {
3495 + if (napi_schedule_prep(&priv->low_napi)) {
3496 + netif_info(priv, intr, priv->ndev,
3497 + "%s: schedule low prio poll\n"
3498 + , __func__);
3499 +
3500 +#ifdef PFE_ETH_NAPI_STATS
3501 + priv->napi_counters[NAPI_SCHED_COUNT]++;
3502 +#endif
3503 + __napi_schedule(&priv->low_napi);
3504 + }
3505 + } else if (qno == 2) {
3506 + if (napi_schedule_prep(&priv->lro_napi)) {
3507 + netif_info(priv, intr, priv->ndev,
3508 + "%s: schedule lro prio poll\n"
3509 + , __func__);
3510 +
3511 +#ifdef PFE_ETH_NAPI_STATS
3512 + priv->napi_counters[NAPI_SCHED_COUNT]++;
3513 +#endif
3514 + __napi_schedule(&priv->lro_napi);
3515 + }
3516 + }
3517 +
3518 + break;
3519 +
3520 + case EVENT_TXDONE_IND:
3521 + pfe_eth_flush_tx(priv);
3522 + hif_lib_event_handler_start(&priv->client, EVENT_TXDONE_IND, 0);
3523 + break;
3524 + case EVENT_HIGH_RX_WM:
3525 + default:
3526 + break;
3527 + }
3528 +
3529 + return 0;
3530 +}
3531 +
3532 +/* pfe_eth_open
3533 + */
3534 +static int pfe_eth_open(struct net_device *ndev)
3535 +{
3536 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3537 + struct hif_client_s *client;
3538 + int rc;
3539 +
3540 + netif_info(priv, ifup, ndev, "%s\n", __func__);
3541 +
3542 + /* Register client driver with HIF */
3543 + client = &priv->client;
3544 + memset(client, 0, sizeof(*client));
3545 + client->id = PFE_CL_GEM0 + priv->id;
3546 + client->tx_qn = emac_txq_cnt;
3547 + client->rx_qn = EMAC_RXQ_CNT;
3548 + client->priv = priv;
3549 + client->pfe = priv->pfe;
3550 + client->event_handler = pfe_eth_event_handler;
3551 +
3552 + client->tx_qsize = EMAC_TXQ_DEPTH;
3553 + client->rx_qsize = EMAC_RXQ_DEPTH;
3554 +
3555 + rc = hif_lib_client_register(client);
3556 + if (rc) {
3557 + netdev_err(ndev, "%s: hif_lib_client_register(%d) failed\n",
3558 + __func__, client->id);
3559 + goto err0;
3560 + }
3561 +
3562 + netif_info(priv, drv, ndev, "%s: registered client: %p\n", __func__,
3563 + client);
3564 +
3565 + pfe_gemac_init(priv);
3566 +
3567 + if (!is_valid_ether_addr(ndev->dev_addr)) {
3568 + netdev_err(ndev, "%s: invalid MAC address\n", __func__);
3569 + rc = -EADDRNOTAVAIL;
3570 + goto err1;
3571 + }
3572 +
3573 + gemac_set_laddrN(priv->EMAC_baseaddr,
3574 + (struct pfe_mac_addr *)ndev->dev_addr, 1);
3575 +
3576 + napi_enable(&priv->high_napi);
3577 + napi_enable(&priv->low_napi);
3578 + napi_enable(&priv->lro_napi);
3579 +
3580 + rc = pfe_eth_start(priv);
3581 +
3582 + netif_tx_wake_all_queues(ndev);
3583 +
3584 + return rc;
3585 +
3586 +err1:
3587 + hif_lib_client_unregister(&priv->client);
3588 +
3589 +err0:
3590 + return rc;
3591 +}
3592 +
3593 +/*
3594 + * pfe_eth_shutdown
3595 + */
3596 +int pfe_eth_shutdown(struct net_device *ndev, int wake)
3597 +{
3598 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3599 + int i, qstatus;
3600 + unsigned long next_poll = jiffies + 1, end = jiffies +
3601 + (TX_POLL_TIMEOUT_MS * HZ) / 1000;
3602 + int tx_pkts, prv_tx_pkts;
3603 +
3604 + netif_info(priv, ifdown, ndev, "%s\n", __func__);
3605 +
3606 + for (i = 0; i < emac_txq_cnt; i++)
3607 + hrtimer_cancel(&priv->fast_tx_timeout[i].timer);
3608 +
3609 + netif_tx_stop_all_queues(ndev);
3610 +
3611 + do {
3612 + tx_pkts = 0;
3613 + pfe_eth_flush_tx(priv);
3614 +
3615 + for (i = 0; i < emac_txq_cnt; i++)
3616 + tx_pkts += hif_lib_tx_pending(&priv->client, i);
3617 +
3618 + if (tx_pkts) {
3619 + /*Don't wait forever, break if we cross max timeout */
3620 + if (time_after(jiffies, end)) {
3621 + pr_err(
3622 + "(%s)Tx is not complete after %dmsec\n",
3623 + ndev->name, TX_POLL_TIMEOUT_MS);
3624 + break;
3625 + }
3626 +
3627 + pr_info("%s : (%s) Waiting for tx packets to free. Pending tx pkts = %d.\n"
3628 + , __func__, ndev->name, tx_pkts);
3629 + if (need_resched())
3630 + schedule();
3631 + }
3632 +
3633 + } while (tx_pkts);
3634 +
3635 + end = jiffies + (TX_POLL_TIMEOUT_MS * HZ) / 1000;
3636 +
3637 + prv_tx_pkts = tmu_pkts_processed(priv->id);
3638 + /*
3639 + * Wait till TMU transmits all pending packets
3640 + * poll tmu_qstatus and pkts processed by TMU for every 10ms
3641 + * Consider TMU is busy, If we see TMU qeueu pending or any packets
3642 + * processed by TMU
3643 + */
3644 + while (1) {
3645 + if (time_after(jiffies, next_poll)) {
3646 + tx_pkts = tmu_pkts_processed(priv->id);
3647 + qstatus = tmu_qstatus(priv->id) & 0x7ffff;
3648 +
3649 + if (!qstatus && (tx_pkts == prv_tx_pkts))
3650 + break;
3651 + /* Don't wait forever, break if we cross max
3652 + * timeout(TX_POLL_TIMEOUT_MS)
3653 + */
3654 + if (time_after(jiffies, end)) {
3655 + pr_err("TMU%d is busy after %dmsec\n",
3656 + priv->id, TX_POLL_TIMEOUT_MS);
3657 + break;
3658 + }
3659 + prv_tx_pkts = tx_pkts;
3660 + next_poll++;
3661 + }
3662 + if (need_resched())
3663 + schedule();
3664 + }
3665 + /* Wait for some more time to complete transmitting packet if any */
3666 + next_poll = jiffies + 1;
3667 + while (1) {
3668 + if (time_after(jiffies, next_poll))
3669 + break;
3670 + if (need_resched())
3671 + schedule();
3672 + }
3673 +
3674 + pfe_eth_stop(ndev, wake);
3675 +
3676 + napi_disable(&priv->lro_napi);
3677 + napi_disable(&priv->low_napi);
3678 + napi_disable(&priv->high_napi);
3679 +
3680 + hif_lib_client_unregister(&priv->client);
3681 +
3682 + return 0;
3683 +}
3684 +
3685 +/* pfe_eth_close
3686 + *
3687 + */
3688 +static int pfe_eth_close(struct net_device *ndev)
3689 +{
3690 + pfe_eth_shutdown(ndev, 0);
3691 +
3692 + return 0;
3693 +}
3694 +
3695 +/* pfe_eth_suspend
3696 + *
3697 + * return value : 1 if netdevice is configured to wakeup system
3698 + * 0 otherwise
3699 + */
3700 +int pfe_eth_suspend(struct net_device *ndev)
3701 +{
3702 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3703 + int retval = 0;
3704 +
3705 + if (priv->wol) {
3706 + gemac_set_wol(priv->EMAC_baseaddr, priv->wol);
3707 + retval = 1;
3708 + }
3709 + pfe_eth_shutdown(ndev, priv->wol);
3710 +
3711 + return retval;
3712 +}
3713 +
3714 +/* pfe_eth_resume
3715 + *
3716 + */
3717 +int pfe_eth_resume(struct net_device *ndev)
3718 +{
3719 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3720 +
3721 + if (priv->wol)
3722 + gemac_set_wol(priv->EMAC_baseaddr, 0);
3723 + gemac_tx_enable(priv->EMAC_baseaddr);
3724 +
3725 + return pfe_eth_open(ndev);
3726 +}
3727 +
3728 +/* pfe_eth_get_queuenum
3729 + */
3730 +static int pfe_eth_get_queuenum(struct pfe_eth_priv_s *priv, struct sk_buff
3731 + *skb)
3732 +{
3733 + int queuenum = 0;
3734 + unsigned long flags;
3735 +
3736 + /* Get the Fast Path queue number */
3737 + /*
3738 + * Use conntrack mark (if conntrack exists), then packet mark (if any),
3739 + * then fallback to default
3740 + */
3741 +#if defined(CONFIG_IP_NF_CONNTRACK_MARK) || defined(CONFIG_NF_CONNTRACK_MARK)
3742 + if (skb->nfct) {
3743 + enum ip_conntrack_info cinfo;
3744 + struct nf_conn *ct;
3745 +
3746 + ct = nf_ct_get(skb, &cinfo);
3747 +
3748 + if (ct) {
3749 + u32 connmark;
3750 +
3751 + connmark = ct->mark;
3752 +
3753 + if ((connmark & 0x80000000) && priv->id != 0)
3754 + connmark >>= 16;
3755 +
3756 + queuenum = connmark & EMAC_QUEUENUM_MASK;
3757 + }
3758 + } else {/* continued after #endif ... */
3759 +#endif
3760 + if (skb->mark) {
3761 + queuenum = skb->mark & EMAC_QUEUENUM_MASK;
3762 + } else {
3763 + spin_lock_irqsave(&priv->lock, flags);
3764 + queuenum = priv->default_priority & EMAC_QUEUENUM_MASK;
3765 + spin_unlock_irqrestore(&priv->lock, flags);
3766 + }
3767 +#if defined(CONFIG_IP_NF_CONNTRACK_MARK) || defined(CONFIG_NF_CONNTRACK_MARK)
3768 + }
3769 +#endif
3770 + return queuenum;
3771 +}
3772 +
3773 +/* pfe_eth_might_stop_tx
3774 + *
3775 + */
3776 +static int pfe_eth_might_stop_tx(struct pfe_eth_priv_s *priv, int queuenum,
3777 + struct netdev_queue *tx_queue,
3778 + unsigned int n_desc,
3779 + unsigned int n_segs)
3780 +{
3781 + ktime_t kt;
3782 +
3783 + if (unlikely((__hif_tx_avail(&pfe->hif) < n_desc) ||
3784 + (hif_lib_tx_avail(&priv->client, queuenum) < n_desc) ||
3785 + (hif_lib_tx_credit_avail(pfe, priv->id, queuenum) < n_segs))) {
3786 +#ifdef PFE_ETH_TX_STATS
3787 + if (__hif_tx_avail(&pfe->hif) < n_desc) {
3788 + priv->stop_queue_hif[queuenum]++;
3789 + } else if (hif_lib_tx_avail(&priv->client, queuenum) < n_desc) {
3790 + priv->stop_queue_hif_client[queuenum]++;
3791 + } else if (hif_lib_tx_credit_avail(pfe, priv->id, queuenum) <
3792 + n_segs) {
3793 + priv->stop_queue_credit[queuenum]++;
3794 + }
3795 + priv->stop_queue_total[queuenum]++;
3796 +#endif
3797 + netif_tx_stop_queue(tx_queue);
3798 +
3799 + kt = ktime_set(0, LS1012A_TX_FAST_RECOVERY_TIMEOUT_MS *
3800 + NSEC_PER_MSEC);
3801 + hrtimer_start(&priv->fast_tx_timeout[queuenum].timer, kt,
3802 + HRTIMER_MODE_REL);
3803 + return -1;
3804 + } else {
3805 + return 0;
3806 + }
3807 +}
3808 +
3809 +#define SA_MAX_OP 2
3810 +/* pfe_hif_send_packet
3811 + *
3812 + * At this level if TX fails we drop the packet
3813 + */
3814 +static void pfe_hif_send_packet(struct sk_buff *skb, struct pfe_eth_priv_s
3815 + *priv, int queuenum)
3816 +{
3817 + struct skb_shared_info *sh = skb_shinfo(skb);
3818 + unsigned int nr_frags;
3819 + u32 ctrl = 0;
3820 +
3821 + netif_info(priv, tx_queued, priv->ndev, "%s\n", __func__);
3822 +
3823 + if (skb_is_gso(skb)) {
3824 + priv->stats.tx_dropped++;
3825 + return;
3826 + }
3827 +
3828 + if (skb->ip_summed == CHECKSUM_PARTIAL)
3829 + ctrl = HIF_CTRL_TX_CHECKSUM;
3830 +
3831 + nr_frags = sh->nr_frags;
3832 +
3833 + if (nr_frags) {
3834 + skb_frag_t *f;
3835 + int i;
3836 +
3837 + __hif_lib_xmit_pkt(&priv->client, queuenum, skb->data,
3838 + skb_headlen(skb), ctrl, HIF_FIRST_BUFFER,
3839 + skb);
3840 +
3841 + for (i = 0; i < nr_frags - 1; i++) {
3842 + f = &sh->frags[i];
3843 + __hif_lib_xmit_pkt(&priv->client, queuenum,
3844 + skb_frag_address(f),
3845 + skb_frag_size(f),
3846 + 0x0, 0x0, skb);
3847 + }
3848 +
3849 + f = &sh->frags[i];
3850 +
3851 + __hif_lib_xmit_pkt(&priv->client, queuenum,
3852 + skb_frag_address(f), skb_frag_size(f),
3853 + 0x0, HIF_LAST_BUFFER | HIF_DATA_VALID,
3854 + skb);
3855 +
3856 + netif_info(priv, tx_queued, priv->ndev,
3857 + "%s: pkt sent successfully skb:%p nr_frags:%d len:%d\n",
3858 + __func__, skb, nr_frags, skb->len);
3859 + } else {
3860 + __hif_lib_xmit_pkt(&priv->client, queuenum, skb->data,
3861 + skb->len, ctrl, HIF_FIRST_BUFFER |
3862 + HIF_LAST_BUFFER | HIF_DATA_VALID,
3863 + skb);
3864 + netif_info(priv, tx_queued, priv->ndev,
3865 + "%s: pkt sent successfully skb:%p len:%d\n",
3866 + __func__, skb, skb->len);
3867 + }
3868 + hif_tx_dma_start();
3869 + priv->stats.tx_packets++;
3870 + priv->stats.tx_bytes += skb->len;
3871 + hif_lib_tx_credit_use(pfe, priv->id, queuenum, 1);
3872 +}
3873 +
3874 +/* pfe_eth_flush_txQ
3875 + */
3876 +static void pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int
3877 + from_tx, int n_desc)
3878 +{
3879 + struct sk_buff *skb;
3880 + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
3881 + tx_q_num);
3882 + unsigned int flags;
3883 +
3884 + netif_info(priv, tx_done, priv->ndev, "%s\n", __func__);
3885 +
3886 + if (!from_tx)
3887 + __netif_tx_lock_bh(tx_queue);
3888 +
3889 + /* Clean HIF and client queue */
3890 + while ((skb = hif_lib_tx_get_next_complete(&priv->client,
3891 + tx_q_num, &flags,
3892 + HIF_TX_DESC_NT))) {
3893 + if (flags & HIF_DATA_VALID)
3894 + dev_kfree_skb_any(skb);
3895 + }
3896 + if (!from_tx)
3897 + __netif_tx_unlock_bh(tx_queue);
3898 +}
3899 +
3900 +/* pfe_eth_flush_tx
3901 + */
3902 +static void pfe_eth_flush_tx(struct pfe_eth_priv_s *priv)
3903 +{
3904 + int ii;
3905 +
3906 + netif_info(priv, tx_done, priv->ndev, "%s\n", __func__);
3907 +
3908 + for (ii = 0; ii < emac_txq_cnt; ii++)
3909 + pfe_eth_flush_txQ(priv, ii, 0, 0);
3910 +}
3911 +
3912 +void pfe_tx_get_req_desc(struct sk_buff *skb, unsigned int *n_desc, unsigned int
3913 + *n_segs)
3914 +{
3915 + struct skb_shared_info *sh = skb_shinfo(skb);
3916 +
3917 + /* Scattered data */
3918 + if (sh->nr_frags) {
3919 + *n_desc = sh->nr_frags + 1;
3920 + *n_segs = 1;
3921 + /* Regular case */
3922 + } else {
3923 + *n_desc = 1;
3924 + *n_segs = 1;
3925 + }
3926 +}
3927 +
3928 +/* pfe_eth_send_packet
3929 + */
3930 +static int pfe_eth_send_packet(struct sk_buff *skb, struct net_device *ndev)
3931 +{
3932 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3933 + int tx_q_num = skb_get_queue_mapping(skb);
3934 + int n_desc, n_segs;
3935 + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
3936 + tx_q_num);
3937 +
3938 + netif_info(priv, tx_queued, ndev, "%s\n", __func__);
3939 +
3940 + if ((!skb_is_gso(skb)) && (skb_headroom(skb) < (PFE_PKT_HEADER_SZ +
3941 + sizeof(unsigned long)))) {
3942 + netif_warn(priv, tx_err, priv->ndev, "%s: copying skb\n",
3943 + __func__);
3944 +
3945 + if (pskb_expand_head(skb, (PFE_PKT_HEADER_SZ + sizeof(unsigned
3946 + long)), 0, GFP_ATOMIC)) {
3947 + /* No need to re-transmit, no way to recover*/
3948 + kfree_skb(skb);
3949 + priv->stats.tx_dropped++;
3950 + return NETDEV_TX_OK;
3951 + }
3952 + }
3953 +
3954 + pfe_tx_get_req_desc(skb, &n_desc, &n_segs);
3955 +
3956 + hif_tx_lock(&pfe->hif);
3957 + if (unlikely(pfe_eth_might_stop_tx(priv, tx_q_num, tx_queue, n_desc,
3958 + n_segs))) {
3959 +#ifdef PFE_ETH_TX_STATS
3960 + if (priv->was_stopped[tx_q_num]) {
3961 + priv->clean_fail[tx_q_num]++;
3962 + priv->was_stopped[tx_q_num] = 0;
3963 + }
3964 +#endif
3965 + hif_tx_unlock(&pfe->hif);
3966 + return NETDEV_TX_BUSY;
3967 + }
3968 +
3969 + pfe_hif_send_packet(skb, priv, tx_q_num);
3970 +
3971 + hif_tx_unlock(&pfe->hif);
3972 +
3973 + tx_queue->trans_start = jiffies;
3974 +
3975 +#ifdef PFE_ETH_TX_STATS
3976 + priv->was_stopped[tx_q_num] = 0;
3977 +#endif
3978 +
3979 + return NETDEV_TX_OK;
3980 +}
3981 +
3982 +/* pfe_eth_select_queue
3983 + *
3984 + */
3985 +static u16 pfe_eth_select_queue(struct net_device *ndev, struct sk_buff *skb,
3986 + void *accel_priv,
3987 + select_queue_fallback_t fallback)
3988 +{
3989 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3990 +
3991 + return pfe_eth_get_queuenum(priv, skb);
3992 +}
3993 +
3994 +/* pfe_eth_get_stats
3995 + */
3996 +static struct net_device_stats *pfe_eth_get_stats(struct net_device *ndev)
3997 +{
3998 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3999 +
4000 + netif_info(priv, drv, ndev, "%s\n", __func__);
4001 +
4002 + return &priv->stats;
4003 +}
4004 +
4005 +/* pfe_eth_set_mac_address
4006 + */
4007 +static int pfe_eth_set_mac_address(struct net_device *ndev, void *addr)
4008 +{
4009 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
4010 + struct sockaddr *sa = addr;
4011 +
4012 + netif_info(priv, drv, ndev, "%s\n", __func__);
4013 +
4014 + if (!is_valid_ether_addr(sa->sa_data))
4015 + return -EADDRNOTAVAIL;
4016 +
4017 + memcpy(ndev->dev_addr, sa->sa_data, ETH_ALEN);
4018 +
4019 + gemac_set_laddrN(priv->EMAC_baseaddr,
4020 + (struct pfe_mac_addr *)ndev->dev_addr, 1);
4021 +
4022 + return 0;
4023 +}
4024 +
4025 +/* pfe_eth_enet_addr_byte_mac
4026 + */
4027 +int pfe_eth_enet_addr_byte_mac(u8 *enet_byte_addr,
4028 + struct pfe_mac_addr *enet_addr)
4029 +{
4030 + if (!enet_byte_addr || !enet_addr) {
4031 + return -1;
4032 +
4033 + } else {
4034 + enet_addr->bottom = enet_byte_addr[0] |
4035 + (enet_byte_addr[1] << 8) |
4036 + (enet_byte_addr[2] << 16) |
4037 + (enet_byte_addr[3] << 24);
4038 + enet_addr->top = enet_byte_addr[4] |
4039 + (enet_byte_addr[5] << 8);
4040 + return 0;
4041 + }
4042 +}
4043 +
4044 +/* pfe_eth_set_multi
4045 + */
4046 +static void pfe_eth_set_multi(struct net_device *ndev)
4047 +{
4048 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
4049 + struct pfe_mac_addr hash_addr; /* hash register structure */
4050 + /* specific mac address register structure */
4051 + struct pfe_mac_addr spec_addr;
4052 + int result; /* index into hash register to set.. */
4053 + int uc_count = 0;
4054 + struct netdev_hw_addr *ha;
4055 +
4056 + if (ndev->flags & IFF_PROMISC) {
4057 + netif_info(priv, drv, ndev, "entering promiscuous mode\n");
4058 +
4059 + priv->promisc = 1;
4060 + gemac_enable_copy_all(priv->EMAC_baseaddr);
4061 + } else {
4062 + priv->promisc = 0;
4063 + gemac_disable_copy_all(priv->EMAC_baseaddr);
4064 + }
4065 +
4066 + /* Enable broadcast frame reception if required. */
4067 + if (ndev->flags & IFF_BROADCAST) {
4068 + gemac_allow_broadcast(priv->EMAC_baseaddr);
4069 + } else {
4070 + netif_info(priv, drv, ndev,
4071 + "disabling broadcast frame reception\n");
4072 +
4073 + gemac_no_broadcast(priv->EMAC_baseaddr);
4074 + }
4075 +
4076 + if (ndev->flags & IFF_ALLMULTI) {
4077 + /* Set the hash to rx all multicast frames */
4078 + hash_addr.bottom = 0xFFFFFFFF;
4079 + hash_addr.top = 0xFFFFFFFF;
4080 + gemac_set_hash(priv->EMAC_baseaddr, &hash_addr);
4081 + netdev_for_each_uc_addr(ha, ndev) {
4082 + if (uc_count >= MAX_UC_SPEC_ADDR_REG)
4083 + break;
4084 + pfe_eth_enet_addr_byte_mac(ha->addr, &spec_addr);
4085 + gemac_set_laddrN(priv->EMAC_baseaddr, &spec_addr,
4086 + uc_count + 2);
4087 + uc_count++;
4088 + }
4089 + } else if ((netdev_mc_count(ndev) > 0) || (netdev_uc_count(ndev))) {
4090 + u8 *addr;
4091 +
4092 + hash_addr.bottom = 0;
4093 + hash_addr.top = 0;
4094 +
4095 + netdev_for_each_mc_addr(ha, ndev) {
4096 + addr = ha->addr;
4097 +
4098 + netif_info(priv, drv, ndev,
4099 + "adding multicast address %X:%X:%X:%X:%X:%X to gem filter\n",
4100 + addr[0], addr[1], addr[2],
4101 + addr[3], addr[4], addr[5]);
4102 +
4103 + result = pfe_eth_get_hash(addr);
4104 +
4105 + if (result < EMAC_HASH_REG_BITS) {
4106 + if (result < 32)
4107 + hash_addr.bottom |= (1 << result);
4108 + else
4109 + hash_addr.top |= (1 << (result - 32));
4110 + } else {
4111 + break;
4112 + }
4113 + }
4114 +
4115 + uc_count = -1;
4116 + netdev_for_each_uc_addr(ha, ndev) {
4117 + addr = ha->addr;
4118 +
4119 + if (++uc_count < MAX_UC_SPEC_ADDR_REG) {
4120 + netdev_info(ndev,
4121 + "adding unicast address %02x:%02x:%02x:%02x:%02x:%02x to gem filter\n",
4122 + addr[0], addr[1], addr[2],
4123 + addr[3], addr[4], addr[5]);
4124 + pfe_eth_enet_addr_byte_mac(addr, &spec_addr);
4125 + gemac_set_laddrN(priv->EMAC_baseaddr,
4126 + &spec_addr, uc_count + 2);
4127 + } else {
4128 + netif_info(priv, drv, ndev,
4129 + "adding unicast address %02x:%02x:%02x:%02x:%02x:%02x to gem hash\n",
4130 + addr[0], addr[1], addr[2],
4131 + addr[3], addr[4], addr[5]);
4132 +
4133 + result = pfe_eth_get_hash(addr);
4134 + if (result >= EMAC_HASH_REG_BITS) {
4135 + break;
4136 +
4137 + } else {
4138 + if (result < 32)
4139 + hash_addr.bottom |= (1 <<
4140 + result);
4141 + else
4142 + hash_addr.top |= (1 <<
4143 + (result - 32));
4144 + }
4145 + }
4146 + }
4147 +
4148 + gemac_set_hash(priv->EMAC_baseaddr, &hash_addr);
4149 + }
4150 +
4151 + if (!(netdev_uc_count(ndev) >= MAX_UC_SPEC_ADDR_REG)) {
4152 + /*
4153 + * Check if there are any specific address HW registers that
4154 + * need to be flushed
4155 + */
4156 + for (uc_count = netdev_uc_count(ndev); uc_count <
4157 + MAX_UC_SPEC_ADDR_REG; uc_count++)
4158 + gemac_clear_laddrN(priv->EMAC_baseaddr, uc_count + 2);
4159 + }
4160 +
4161 + if (ndev->flags & IFF_LOOPBACK)
4162 + gemac_set_loop(priv->EMAC_baseaddr, LB_LOCAL);
4163 +}
4164 +
4165 +/* pfe_eth_set_features
4166 + */
4167 +static int pfe_eth_set_features(struct net_device *ndev, netdev_features_t
4168 + features)
4169 +{
4170 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
4171 + int rc = 0;
4172 +
4173 + if (features & NETIF_F_RXCSUM)
4174 + gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
4175 + else
4176 + gemac_disable_rx_checksum_offload(priv->EMAC_baseaddr);
4177 + return rc;
4178 +}
4179 +
4180 +/* pfe_eth_fast_tx_timeout
4181 + */
4182 +static enum hrtimer_restart pfe_eth_fast_tx_timeout(struct hrtimer *timer)
4183 +{
4184 + struct pfe_eth_fast_timer *fast_tx_timeout = container_of(timer, struct
4185 + pfe_eth_fast_timer,
4186 + timer);
4187 + struct pfe_eth_priv_s *priv = container_of(fast_tx_timeout->base,
4188 + struct pfe_eth_priv_s,
4189 + fast_tx_timeout);
4190 + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
4191 + fast_tx_timeout->queuenum);
4192 +
4193 + if (netif_tx_queue_stopped(tx_queue)) {
4194 +#ifdef PFE_ETH_TX_STATS
4195 + priv->was_stopped[fast_tx_timeout->queuenum] = 1;
4196 +#endif
4197 + netif_tx_wake_queue(tx_queue);
4198 + }
4199 +
4200 + return HRTIMER_NORESTART;
4201 +}
4202 +
4203 +/* pfe_eth_fast_tx_timeout_init
4204 + */
4205 +static void pfe_eth_fast_tx_timeout_init(struct pfe_eth_priv_s *priv)
4206 +{
4207 + int i;
4208 +
4209 + for (i = 0; i < emac_txq_cnt; i++) {
4210 + priv->fast_tx_timeout[i].queuenum = i;
4211 + hrtimer_init(&priv->fast_tx_timeout[i].timer, CLOCK_MONOTONIC,
4212 + HRTIMER_MODE_REL);
4213 + priv->fast_tx_timeout[i].timer.function =
4214 + pfe_eth_fast_tx_timeout;
4215 + priv->fast_tx_timeout[i].base = priv->fast_tx_timeout;
4216 + }
4217 +}
4218 +
4219 +static struct sk_buff *pfe_eth_rx_skb(struct net_device *ndev,
4220 + struct pfe_eth_priv_s *priv,
4221 + unsigned int qno)
4222 +{
4223 + void *buf_addr;
4224 + unsigned int rx_ctrl;
4225 + unsigned int desc_ctrl = 0;
4226 + struct hif_ipsec_hdr *ipsec_hdr = NULL;
4227 + struct sk_buff *skb;
4228 + struct sk_buff *skb_frag, *skb_frag_last = NULL;
4229 + int length = 0, offset;
4230 +
4231 + skb = priv->skb_inflight[qno];
4232 +
4233 + if (skb) {
4234 + skb_frag_last = skb_shinfo(skb)->frag_list;
4235 + if (skb_frag_last) {
4236 + while (skb_frag_last->next)
4237 + skb_frag_last = skb_frag_last->next;
4238 + }
4239 + }
4240 +
4241 + while (!(desc_ctrl & CL_DESC_LAST)) {
4242 + buf_addr = hif_lib_receive_pkt(&priv->client, qno, &length,
4243 + &offset, &rx_ctrl, &desc_ctrl,
4244 + (void **)&ipsec_hdr);
4245 + if (!buf_addr)
4246 + goto incomplete;
4247 +
4248 +#ifdef PFE_ETH_NAPI_STATS
4249 + priv->napi_counters[NAPI_DESC_COUNT]++;
4250 +#endif
4251 +
4252 + /* First frag */
4253 + if (desc_ctrl & CL_DESC_FIRST) {
4254 + skb = build_skb(buf_addr, 0);
4255 + if (unlikely(!skb))
4256 + goto pkt_drop;
4257 +
4258 + skb_reserve(skb, offset);
4259 + skb_put(skb, length);
4260 + skb->dev = ndev;
4261 +
4262 + if ((ndev->features & NETIF_F_RXCSUM) && (rx_ctrl &
4263 + HIF_CTRL_RX_CHECKSUMMED))
4264 + skb->ip_summed = CHECKSUM_UNNECESSARY;
4265 + else
4266 + skb_checksum_none_assert(skb);
4267 +
4268 + } else {
4269 + /* Next frags */
4270 + if (unlikely(!skb)) {
4271 + pr_err("%s: NULL skb_inflight\n",
4272 + __func__);
4273 + goto pkt_drop;
4274 + }
4275 +
4276 + skb_frag = build_skb(buf_addr, 0);
4277 +
4278 + if (unlikely(!skb_frag)) {
4279 + kfree(buf_addr);
4280 + goto pkt_drop;
4281 + }
4282 +
4283 + skb_reserve(skb_frag, offset);
4284 + skb_put(skb_frag, length);
4285 +
4286 + skb_frag->dev = ndev;
4287 +
4288 + if (skb_shinfo(skb)->frag_list)
4289 + skb_frag_last->next = skb_frag;
4290 + else
4291 + skb_shinfo(skb)->frag_list = skb_frag;
4292 +
4293 + skb->truesize += skb_frag->truesize;
4294 + skb->data_len += length;
4295 + skb->len += length;
4296 + skb_frag_last = skb_frag;
4297 + }
4298 + }
4299 +
4300 + priv->skb_inflight[qno] = NULL;
4301 + return skb;
4302 +
4303 +incomplete:
4304 + priv->skb_inflight[qno] = skb;
4305 + return NULL;
4306 +
4307 +pkt_drop:
4308 + priv->skb_inflight[qno] = NULL;
4309 +
4310 + if (skb)
4311 + kfree_skb(skb);
4312 + else
4313 + kfree(buf_addr);
4314 +
4315 + priv->stats.rx_errors++;
4316 +
4317 + return NULL;
4318 +}
4319 +
4320 +/* pfe_eth_poll
4321 + */
4322 +static int pfe_eth_poll(struct pfe_eth_priv_s *priv, struct napi_struct *napi,
4323 + unsigned int qno, int budget)
4324 +{
4325 + struct net_device *ndev = priv->ndev;
4326 + struct sk_buff *skb;
4327 + int work_done = 0;
4328 + unsigned int len;
4329 +
4330 + netif_info(priv, intr, priv->ndev, "%s\n", __func__);
4331 +
4332 +#ifdef PFE_ETH_NAPI_STATS
4333 + priv->napi_counters[NAPI_POLL_COUNT]++;
4334 +#endif
4335 +
4336 + do {
4337 + skb = pfe_eth_rx_skb(ndev, priv, qno);
4338 +
4339 + if (!skb)
4340 + break;
4341 +
4342 + len = skb->len;
4343 +
4344 + /* Packet will be processed */
4345 + skb->protocol = eth_type_trans(skb, ndev);
4346 +
4347 + netif_receive_skb(skb);
4348 +
4349 + priv->stats.rx_packets++;
4350 + priv->stats.rx_bytes += len;
4351 +
4352 + work_done++;
4353 +
4354 +#ifdef PFE_ETH_NAPI_STATS
4355 + priv->napi_counters[NAPI_PACKET_COUNT]++;
4356 +#endif
4357 +
4358 + } while (work_done < budget);
4359 +
4360 + /*
4361 + * If no Rx receive nor cleanup work was done, exit polling mode.
4362 + * No more netif_running(dev) check is required here , as this is
4363 + * checked in net/core/dev.c (2.6.33.5 kernel specific).
4364 + */
4365 + if (work_done < budget) {
4366 + napi_complete(napi);
4367 +
4368 + hif_lib_event_handler_start(&priv->client, EVENT_RX_PKT_IND,
4369 + qno);
4370 + }
4371 +#ifdef PFE_ETH_NAPI_STATS
4372 + else
4373 + priv->napi_counters[NAPI_FULL_BUDGET_COUNT]++;
4374 +#endif
4375 +
4376 + return work_done;
4377 +}
4378 +
4379 +/*
4380 + * pfe_eth_lro_poll
4381 + */
4382 +static int pfe_eth_lro_poll(struct napi_struct *napi, int budget)
4383 +{
4384 + struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
4385 + lro_napi);
4386 +
4387 + netif_info(priv, intr, priv->ndev, "%s\n", __func__);
4388 +
4389 + return pfe_eth_poll(priv, napi, 2, budget);
4390 +}
4391 +
4392 +/* pfe_eth_low_poll
4393 + */
4394 +static int pfe_eth_low_poll(struct napi_struct *napi, int budget)
4395 +{
4396 + struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
4397 + low_napi);
4398 +
4399 + netif_info(priv, intr, priv->ndev, "%s\n", __func__);
4400 +
4401 + return pfe_eth_poll(priv, napi, 1, budget);
4402 +}
4403 +
4404 +/* pfe_eth_high_poll
4405 + */
4406 +static int pfe_eth_high_poll(struct napi_struct *napi, int budget)
4407 +{
4408 + struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
4409 + high_napi);
4410 +
4411 + netif_info(priv, intr, priv->ndev, "%s\n", __func__);
4412 +
4413 + return pfe_eth_poll(priv, napi, 0, budget);
4414 +}
4415 +
4416 +static const struct net_device_ops pfe_netdev_ops = {
4417 + .ndo_open = pfe_eth_open,
4418 + .ndo_stop = pfe_eth_close,
4419 + .ndo_start_xmit = pfe_eth_send_packet,
4420 + .ndo_select_queue = pfe_eth_select_queue,
4421 + .ndo_get_stats = pfe_eth_get_stats,
4422 + .ndo_set_mac_address = pfe_eth_set_mac_address,
4423 + .ndo_set_rx_mode = pfe_eth_set_multi,
4424 + .ndo_set_features = pfe_eth_set_features,
4425 + .ndo_validate_addr = eth_validate_addr,
4426 +};
4427 +
4428 +/* pfe_eth_init_one
4429 + */
4430 +static int pfe_eth_init_one(struct pfe *pfe, int id)
4431 +{
4432 + struct net_device *ndev = NULL;
4433 + struct pfe_eth_priv_s *priv = NULL;
4434 + struct ls1012a_eth_platform_data *einfo;
4435 + struct ls1012a_mdio_platform_data *minfo;
4436 + struct ls1012a_pfe_platform_data *pfe_info;
4437 + int err;
4438 +
4439 + /* Extract pltform data */
4440 + pfe_info = (struct ls1012a_pfe_platform_data *)
4441 + pfe->dev->platform_data;
4442 + if (!pfe_info) {
4443 + pr_err(
4444 + "%s: pfe missing additional platform data\n"
4445 + , __func__);
4446 + err = -ENODEV;
4447 + goto err0;
4448 + }
4449 +
4450 + einfo = (struct ls1012a_eth_platform_data *)
4451 + pfe_info->ls1012a_eth_pdata;
4452 +
4453 + /* einfo never be NULL, but no harm in having this check */
4454 + if (!einfo) {
4455 + pr_err(
4456 + "%s: pfe missing additional gemacs platform data\n"
4457 + , __func__);
4458 + err = -ENODEV;
4459 + goto err0;
4460 + }
4461 +
4462 + minfo = (struct ls1012a_mdio_platform_data *)
4463 + pfe_info->ls1012a_mdio_pdata;
4464 +
4465 + /* einfo never be NULL, but no harm in having this check */
4466 + if (!minfo) {
4467 + pr_err(
4468 + "%s: pfe missing additional mdios platform data\n",
4469 + __func__);
4470 + err = -ENODEV;
4471 + goto err0;
4472 + }
4473 +
4474 + /* Create an ethernet device instance */
4475 + ndev = alloc_etherdev_mq(sizeof(*priv), emac_txq_cnt);
4476 +
4477 + if (!ndev) {
4478 + pr_err("%s: gemac %d device allocation failed\n",
4479 + __func__, einfo[id].gem_id);
4480 + err = -ENOMEM;
4481 + goto err0;
4482 + }
4483 +
4484 + priv = netdev_priv(ndev);
4485 + priv->ndev = ndev;
4486 + priv->id = einfo[id].gem_id;
4487 + priv->pfe = pfe;
4488 +
4489 + SET_NETDEV_DEV(priv->ndev, priv->pfe->dev);
4490 +
4491 + pfe->eth.eth_priv[id] = priv;
4492 +
4493 + /* Set the info in the priv to the current info */
4494 + priv->einfo = &einfo[id];
4495 + priv->EMAC_baseaddr = cbus_emac_base[id];
4496 + priv->PHY_baseaddr = cbus_emac_base[0];
4497 + priv->GPI_baseaddr = cbus_gpi_base[id];
4498 +
4499 +#define HIF_GEMAC_TMUQ_BASE 6
4500 + priv->low_tmu_q = HIF_GEMAC_TMUQ_BASE + (id * 2);
4501 + priv->high_tmu_q = priv->low_tmu_q + 1;
4502 +
4503 + spin_lock_init(&priv->lock);
4504 +
4505 + pfe_eth_fast_tx_timeout_init(priv);
4506 +
4507 + /* Copy the station address into the dev structure, */
4508 + memcpy(ndev->dev_addr, einfo[id].mac_addr, ETH_ALEN);
4509 +
4510 + /* Initialize mdio */
4511 + if (minfo[id].enabled) {
4512 + err = pfe_eth_mdio_init(priv, &minfo[id]);
4513 + if (err) {
4514 + netdev_err(ndev, "%s: pfe_eth_mdio_init() failed\n",
4515 + __func__);
4516 + goto err2;
4517 + }
4518 + }
4519 +
4520 + ndev->mtu = 1500;
4521 +
4522 + /* Set MTU limits */
4523 + ndev->min_mtu = ETH_MIN_MTU;
4524 + ndev->max_mtu = JUMBO_FRAME_SIZE;
4525 +
4526 + /* supported features */
4527 + ndev->hw_features = NETIF_F_SG;
4528 +
4529 + /*Enable after checksum offload is validated */
4530 + ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
4531 + NETIF_F_IPV6_CSUM | NETIF_F_SG;
4532 +
4533 + /* enabled by default */
4534 + ndev->features = ndev->hw_features;
4535 +
4536 + priv->usr_features = ndev->features;
4537 +
4538 + ndev->netdev_ops = &pfe_netdev_ops;
4539 +
4540 + ndev->ethtool_ops = &pfe_ethtool_ops;
4541 +
4542 + /* Enable basic messages by default */
4543 + priv->msg_enable = NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_LINK |
4544 + NETIF_MSG_PROBE;
4545 +
4546 + netif_napi_add(ndev, &priv->low_napi, pfe_eth_low_poll,
4547 + HIF_RX_POLL_WEIGHT - 16);
4548 + netif_napi_add(ndev, &priv->high_napi, pfe_eth_high_poll,
4549 + HIF_RX_POLL_WEIGHT - 16);
4550 + netif_napi_add(ndev, &priv->lro_napi, pfe_eth_lro_poll,
4551 + HIF_RX_POLL_WEIGHT - 16);
4552 +
4553 + err = register_netdev(ndev);
4554 +
4555 + if (err) {
4556 + netdev_err(ndev, "register_netdev() failed\n");
4557 + goto err3;
4558 + }
4559 + device_init_wakeup(&ndev->dev, WAKE_MAGIC);
4560 +
4561 + if (!(priv->einfo->phy_flags & GEMAC_NO_PHY)) {
4562 + err = pfe_phy_init(ndev);
4563 + if (err) {
4564 + netdev_err(ndev, "%s: pfe_phy_init() failed\n",
4565 + __func__);
4566 + goto err4;
4567 + }
4568 + }
4569 +
4570 + netif_carrier_on(ndev);
4571 +
4572 + /* Create all the sysfs files */
4573 + if (pfe_eth_sysfs_init(ndev))
4574 + goto err4;
4575 +
4576 + netif_info(priv, probe, ndev, "%s: created interface, baseaddr: %p\n",
4577 + __func__, priv->EMAC_baseaddr);
4578 +
4579 + return 0;
4580 +err4:
4581 + unregister_netdev(ndev);
4582 +err3:
4583 + pfe_eth_mdio_exit(priv->mii_bus);
4584 +err2:
4585 + free_netdev(priv->ndev);
4586 +err0:
4587 + return err;
4588 +}
4589 +
4590 +/* pfe_eth_init
4591 + */
4592 +int pfe_eth_init(struct pfe *pfe)
4593 +{
4594 + int ii = 0;
4595 + int err;
4596 +
4597 + pr_info("%s\n", __func__);
4598 +
4599 + cbus_emac_base[0] = EMAC1_BASE_ADDR;
4600 + cbus_emac_base[1] = EMAC2_BASE_ADDR;
4601 +
4602 + cbus_gpi_base[0] = EGPI1_BASE_ADDR;
4603 + cbus_gpi_base[1] = EGPI2_BASE_ADDR;
4604 +
4605 + for (ii = 0; ii < NUM_GEMAC_SUPPORT; ii++) {
4606 + err = pfe_eth_init_one(pfe, ii);
4607 + if (err)
4608 + goto err0;
4609 + }
4610 +
4611 + return 0;
4612 +
4613 +err0:
4614 + while (ii--)
4615 + pfe_eth_exit_one(pfe->eth.eth_priv[ii]);
4616 +
4617 + /* Register three network devices in the kernel */
4618 + return err;
4619 +}
4620 +
4621 +/* pfe_eth_exit_one
4622 + */
4623 +static void pfe_eth_exit_one(struct pfe_eth_priv_s *priv)
4624 +{
4625 + netif_info(priv, probe, priv->ndev, "%s\n", __func__);
4626 +
4627 + pfe_eth_sysfs_exit(priv->ndev);
4628 +
4629 + unregister_netdev(priv->ndev);
4630 +
4631 + if (!(priv->einfo->phy_flags & GEMAC_NO_PHY))
4632 + pfe_phy_exit(priv->ndev);
4633 +
4634 + if (priv->mii_bus)
4635 + pfe_eth_mdio_exit(priv->mii_bus);
4636 +
4637 + free_netdev(priv->ndev);
4638 +}
4639 +
4640 +/* pfe_eth_exit
4641 + */
4642 +void pfe_eth_exit(struct pfe *pfe)
4643 +{
4644 + int ii;
4645 +
4646 + pr_info("%s\n", __func__);
4647 +
4648 + for (ii = NUM_GEMAC_SUPPORT - 1; ii >= 0; ii--)
4649 + pfe_eth_exit_one(pfe->eth.eth_priv[ii]);
4650 +}
4651 diff --git a/drivers/staging/fsl_ppfe/pfe_eth.h b/drivers/staging/fsl_ppfe/pfe_eth.h
4652 new file mode 100644
4653 index 00000000..721bef3e
4654 --- /dev/null
4655 +++ b/drivers/staging/fsl_ppfe/pfe_eth.h
4656 @@ -0,0 +1,184 @@
4657 +/*
4658 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
4659 + * Copyright 2017 NXP
4660 + *
4661 + * This program is free software; you can redistribute it and/or modify
4662 + * it under the terms of the GNU General Public License as published by
4663 + * the Free Software Foundation; either version 2 of the License, or
4664 + * (at your option) any later version.
4665 + *
4666 + * This program is distributed in the hope that it will be useful,
4667 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
4668 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4669 + * GNU General Public License for more details.
4670 + *
4671 + * You should have received a copy of the GNU General Public License
4672 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
4673 + */
4674 +
4675 +#ifndef _PFE_ETH_H_
4676 +#define _PFE_ETH_H_
4677 +#include <linux/kernel.h>
4678 +#include <linux/netdevice.h>
4679 +#include <linux/etherdevice.h>
4680 +#include <linux/ethtool.h>
4681 +#include <linux/mii.h>
4682 +#include <linux/phy.h>
4683 +#include <linux/clk.h>
4684 +#include <linux/interrupt.h>
4685 +#include <linux/time.h>
4686 +
4687 +#define PFE_ETH_NAPI_STATS
4688 +#define PFE_ETH_TX_STATS
4689 +
4690 +#define PFE_ETH_FRAGS_MAX (65536 / HIF_RX_PKT_MIN_SIZE)
4691 +#define LRO_LEN_COUNT_MAX 32
4692 +#define LRO_NB_COUNT_MAX 32
4693 +
4694 +#define PFE_PAUSE_FLAG_ENABLE 1
4695 +#define PFE_PAUSE_FLAG_AUTONEG 2
4696 +
4697 +/* GEMAC configured by SW */
4698 +/* GEMAC configured by phy lines (not for MII/GMII) */
4699 +
4700 +#define GEMAC_SW_FULL_DUPLEX BIT(9)
4701 +#define GEMAC_SW_SPEED_10M (0 << 12)
4702 +#define GEMAC_SW_SPEED_100M BIT(12)
4703 +#define GEMAC_SW_SPEED_1G (2 << 12)
4704 +
4705 +#define GEMAC_NO_PHY BIT(0)
4706 +
4707 +struct ls1012a_eth_platform_data {
4708 + /* device specific information */
4709 + u32 device_flags;
4710 + char name[16];
4711 +
4712 + /* board specific information */
4713 + u32 mii_config;
4714 + u32 phy_flags;
4715 + u32 gem_id;
4716 + u32 bus_id;
4717 + u32 phy_id;
4718 + u32 mdio_muxval;
4719 + u8 mac_addr[ETH_ALEN];
4720 +};
4721 +
4722 +struct ls1012a_mdio_platform_data {
4723 + int enabled;
4724 + int irq[32];
4725 + u32 phy_mask;
4726 + int mdc_div;
4727 +};
4728 +
4729 +struct ls1012a_pfe_platform_data {
4730 + struct ls1012a_eth_platform_data ls1012a_eth_pdata[3];
4731 + struct ls1012a_mdio_platform_data ls1012a_mdio_pdata[3];
4732 +};
4733 +
4734 +#define NUM_GEMAC_SUPPORT 2
4735 +#define DRV_NAME "pfe-eth"
4736 +#define DRV_VERSION "1.0"
4737 +
4738 +#define LS1012A_TX_FAST_RECOVERY_TIMEOUT_MS 3
4739 +#define TX_POLL_TIMEOUT_MS 1000
4740 +
4741 +#define EMAC_TXQ_CNT 16
4742 +#define EMAC_TXQ_DEPTH (HIF_TX_DESC_NT)
4743 +
4744 +#define JUMBO_FRAME_SIZE 10258
4745 +/*
4746 + * Client Tx queue threshold, for txQ flush condition.
4747 + * It must be smaller than the queue size (in case we ever change it in the
4748 + * future).
4749 + */
4750 +#define HIF_CL_TX_FLUSH_MARK 32
4751 +
4752 +/*
4753 + * Max number of TX resources (HIF descriptors or skbs) that will be released
4754 + * in a single go during batch recycling.
4755 + * Should be lower than the flush mark so the SW can provide the HW with a
4756 + * continuous stream of packets instead of bursts.
4757 + */
4758 +#define TX_FREE_MAX_COUNT 16
4759 +#define EMAC_RXQ_CNT 3
4760 +#define EMAC_RXQ_DEPTH HIF_RX_DESC_NT
4761 +/* make sure clients can receive a full burst of packets */
4762 +#define EMAC_RMON_TXBYTES_POS 0x00
4763 +#define EMAC_RMON_RXBYTES_POS 0x14
4764 +
4765 +#define EMAC_QUEUENUM_MASK (emac_txq_cnt - 1)
4766 +#define EMAC_MDIO_TIMEOUT 1000
4767 +#define MAX_UC_SPEC_ADDR_REG 31
4768 +
4769 +struct pfe_eth_fast_timer {
4770 + int queuenum;
4771 + struct hrtimer timer;
4772 + void *base;
4773 +};
4774 +
4775 +struct pfe_eth_priv_s {
4776 + struct pfe *pfe;
4777 + struct hif_client_s client;
4778 + struct napi_struct lro_napi;
4779 + struct napi_struct low_napi;
4780 + struct napi_struct high_napi;
4781 + int low_tmu_q;
4782 + int high_tmu_q;
4783 + struct net_device_stats stats;
4784 + struct net_device *ndev;
4785 + int id;
4786 + int promisc;
4787 + unsigned int msg_enable;
4788 + unsigned int usr_features;
4789 +
4790 + spinlock_t lock; /* protect member variables */
4791 + unsigned int event_status;
4792 + int irq;
4793 + void *EMAC_baseaddr;
4794 + /* This points to the EMAC base from where we access PHY */
4795 + void *PHY_baseaddr;
4796 + void *GPI_baseaddr;
4797 + /* PHY stuff */
4798 + struct phy_device *phydev;
4799 + int oldspeed;
4800 + int oldduplex;
4801 + int oldlink;
4802 + /* mdio info */
4803 + int mdc_div;
4804 + struct mii_bus *mii_bus;
4805 + struct clk *gemtx_clk;
4806 + int wol;
4807 + int pause_flag;
4808 +
4809 + int default_priority;
4810 + struct pfe_eth_fast_timer fast_tx_timeout[EMAC_TXQ_CNT];
4811 +
4812 + struct ls1012a_eth_platform_data *einfo;
4813 + struct sk_buff *skb_inflight[EMAC_RXQ_CNT + 6];
4814 +
4815 +#ifdef PFE_ETH_TX_STATS
4816 + unsigned int stop_queue_total[EMAC_TXQ_CNT];
4817 + unsigned int stop_queue_hif[EMAC_TXQ_CNT];
4818 + unsigned int stop_queue_hif_client[EMAC_TXQ_CNT];
4819 + unsigned int stop_queue_credit[EMAC_TXQ_CNT];
4820 + unsigned int clean_fail[EMAC_TXQ_CNT];
4821 + unsigned int was_stopped[EMAC_TXQ_CNT];
4822 +#endif
4823 +
4824 +#ifdef PFE_ETH_NAPI_STATS
4825 + unsigned int napi_counters[NAPI_MAX_COUNT];
4826 +#endif
4827 + unsigned int frags_inflight[EMAC_RXQ_CNT + 6];
4828 +};
4829 +
4830 +struct pfe_eth {
4831 + struct pfe_eth_priv_s *eth_priv[3];
4832 +};
4833 +
4834 +int pfe_eth_init(struct pfe *pfe);
4835 +void pfe_eth_exit(struct pfe *pfe);
4836 +int pfe_eth_suspend(struct net_device *dev);
4837 +int pfe_eth_resume(struct net_device *dev);
4838 +int pfe_eth_mdio_reset(struct mii_bus *bus);
4839 +
4840 +#endif /* _PFE_ETH_H_ */
4841 diff --git a/drivers/staging/fsl_ppfe/pfe_firmware.c b/drivers/staging/fsl_ppfe/pfe_firmware.c
4842 new file mode 100644
4843 index 00000000..47462b9f
4844 --- /dev/null
4845 +++ b/drivers/staging/fsl_ppfe/pfe_firmware.c
4846 @@ -0,0 +1,314 @@
4847 +/*
4848 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
4849 + * Copyright 2017 NXP
4850 + *
4851 + * This program is free software; you can redistribute it and/or modify
4852 + * it under the terms of the GNU General Public License as published by
4853 + * the Free Software Foundation; either version 2 of the License, or
4854 + * (at your option) any later version.
4855 + *
4856 + * This program is distributed in the hope that it will be useful,
4857 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
4858 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4859 + * GNU General Public License for more details.
4860 + *
4861 + * You should have received a copy of the GNU General Public License
4862 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
4863 + */
4864 +
4865 +/*
4866 + * @file
4867 + * Contains all the functions to handle parsing and loading of PE firmware
4868 + * files.
4869 + */
4870 +#include <linux/firmware.h>
4871 +
4872 +#include "pfe_mod.h"
4873 +#include "pfe_firmware.h"
4874 +#include "pfe/pfe.h"
4875 +
4876 +static struct elf32_shdr *get_elf_section_header(const struct firmware *fw,
4877 + const char *section)
4878 +{
4879 + struct elf32_hdr *elf_hdr = (struct elf32_hdr *)fw->data;
4880 + struct elf32_shdr *shdr;
4881 + struct elf32_shdr *shdr_shstr;
4882 + Elf32_Off e_shoff = be32_to_cpu(elf_hdr->e_shoff);
4883 + Elf32_Half e_shentsize = be16_to_cpu(elf_hdr->e_shentsize);
4884 + Elf32_Half e_shnum = be16_to_cpu(elf_hdr->e_shnum);
4885 + Elf32_Half e_shstrndx = be16_to_cpu(elf_hdr->e_shstrndx);
4886 + Elf32_Off shstr_offset;
4887 + Elf32_Word sh_name;
4888 + const char *name;
4889 + int i;
4890 +
4891 + /* Section header strings */
4892 + shdr_shstr = (struct elf32_shdr *)(fw->data + e_shoff + e_shstrndx *
4893 + e_shentsize);
4894 + shstr_offset = be32_to_cpu(shdr_shstr->sh_offset);
4895 +
4896 + for (i = 0; i < e_shnum; i++) {
4897 + shdr = (struct elf32_shdr *)(fw->data + e_shoff
4898 + + i * e_shentsize);
4899 +
4900 + sh_name = be32_to_cpu(shdr->sh_name);
4901 +
4902 + name = (const char *)(fw->data + shstr_offset + sh_name);
4903 +
4904 + if (!strcmp(name, section))
4905 + return shdr;
4906 + }
4907 +
4908 + pr_err("%s: didn't find section %s\n", __func__, section);
4909 +
4910 + return NULL;
4911 +}
4912 +
4913 +#if defined(CFG_DIAGS)
4914 +static int pfe_get_diags_info(const struct firmware *fw, struct pfe_diags_info
4915 + *diags_info)
4916 +{
4917 + struct elf32_shdr *shdr;
4918 + unsigned long offset, size;
4919 +
4920 + shdr = get_elf_section_header(fw, ".pfe_diags_str");
4921 + if (shdr) {
4922 + offset = be32_to_cpu(shdr->sh_offset);
4923 + size = be32_to_cpu(shdr->sh_size);
4924 + diags_info->diags_str_base = be32_to_cpu(shdr->sh_addr);
4925 + diags_info->diags_str_size = size;
4926 + diags_info->diags_str_array = kmalloc(size, GFP_KERNEL);
4927 + memcpy(diags_info->diags_str_array, fw->data + offset, size);
4928 +
4929 + return 0;
4930 + } else {
4931 + return -1;
4932 + }
4933 +}
4934 +#endif
4935 +
4936 +static void pfe_check_version_info(const struct firmware *fw)
4937 +{
4938 + /*static char *version = NULL;*/
4939 + static char *version;
4940 +
4941 + struct elf32_shdr *shdr = get_elf_section_header(fw, ".version");
4942 +
4943 + if (shdr) {
4944 + if (!version) {
4945 + /*
4946 + * this is the first fw we load, use its version
4947 + * string as reference (whatever it is)
4948 + */
4949 + version = (char *)(fw->data +
4950 + be32_to_cpu(shdr->sh_offset));
4951 +
4952 + pr_info("PFE binary version: %s\n", version);
4953 + } else {
4954 + /*
4955 + * already have loaded at least one firmware, check
4956 + * sequence can start now
4957 + */
4958 + if (strcmp(version, (char *)(fw->data +
4959 + be32_to_cpu(shdr->sh_offset)))) {
4960 + pr_info(
4961 + "WARNING: PFE firmware binaries from incompatible version\n");
4962 + }
4963 + }
4964 + } else {
4965 + /*
4966 + * version cannot be verified, a potential issue that should
4967 + * be reported
4968 + */
4969 + pr_info(
4970 + "WARNING: PFE firmware binaries from incompatible version\n");
4971 + }
4972 +}
4973 +
4974 +/* PFE elf firmware loader.
4975 + * Loads an elf firmware image into a list of PE's (specified using a bitmask)
4976 + *
4977 + * @param pe_mask Mask of PE id's to load firmware to
4978 + * @param fw Pointer to the firmware image
4979 + *
4980 + * @return 0 on success, a negative value on error
4981 + *
4982 + */
4983 +int pfe_load_elf(int pe_mask, const struct firmware *fw, struct pfe *pfe)
4984 +{
4985 + struct elf32_hdr *elf_hdr = (struct elf32_hdr *)fw->data;
4986 + Elf32_Half sections = be16_to_cpu(elf_hdr->e_shnum);
4987 + struct elf32_shdr *shdr = (struct elf32_shdr *)(fw->data +
4988 + be32_to_cpu(elf_hdr->e_shoff));
4989 + int id, section;
4990 + int rc;
4991 +
4992 + pr_info("%s\n", __func__);
4993 +
4994 + /* Some sanity checks */
4995 + if (strncmp(&elf_hdr->e_ident[EI_MAG0], ELFMAG, SELFMAG)) {
4996 + pr_err("%s: incorrect elf magic number\n", __func__);
4997 + return -EINVAL;
4998 + }
4999 +
5000 + if (elf_hdr->e_ident[EI_CLASS] != ELFCLASS32) {
5001 + pr_err("%s: incorrect elf class(%x)\n", __func__,
5002 + elf_hdr->e_ident[EI_CLASS]);
5003 + return -EINVAL;
5004 + }
5005 +
5006 + if (elf_hdr->e_ident[EI_DATA] != ELFDATA2MSB) {
5007 + pr_err("%s: incorrect elf data(%x)\n", __func__,
5008 + elf_hdr->e_ident[EI_DATA]);
5009 + return -EINVAL;
5010 + }
5011 +
5012 + if (be16_to_cpu(elf_hdr->e_type) != ET_EXEC) {
5013 + pr_err("%s: incorrect elf file type(%x)\n", __func__,
5014 + be16_to_cpu(elf_hdr->e_type));
5015 + return -EINVAL;
5016 + }
5017 +
5018 + for (section = 0; section < sections; section++, shdr++) {
5019 + if (!(be32_to_cpu(shdr->sh_flags) & (SHF_WRITE | SHF_ALLOC |
5020 + SHF_EXECINSTR)))
5021 + continue;
5022 +
5023 + for (id = 0; id < MAX_PE; id++)
5024 + if (pe_mask & (1 << id)) {
5025 + rc = pe_load_elf_section(id, fw->data, shdr,
5026 + pfe->dev);
5027 + if (rc < 0)
5028 + goto err;
5029 + }
5030 + }
5031 +
5032 + pfe_check_version_info(fw);
5033 +
5034 + return 0;
5035 +
5036 +err:
5037 + return rc;
5038 +}
5039 +
5040 +/* PFE firmware initialization.
5041 + * Loads different firmware files from filesystem.
5042 + * Initializes PE IMEM/DMEM and UTIL-PE DDR
5043 + * Initializes control path symbol addresses (by looking them up in the elf
5044 + * firmware files
5045 + * Takes PE's out of reset
5046 + *
5047 + * @return 0 on success, a negative value on error
5048 + *
5049 + */
5050 +int pfe_firmware_init(struct pfe *pfe)
5051 +{
5052 + const struct firmware *class_fw, *tmu_fw;
5053 + int rc = 0;
5054 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5055 + const char *util_fw_name;
5056 + const struct firmware *util_fw;
5057 +#endif
5058 +
5059 + pr_info("%s\n", __func__);
5060 +
5061 + if (request_firmware(&class_fw, CLASS_FIRMWARE_FILENAME, pfe->dev)) {
5062 + pr_err("%s: request firmware %s failed\n", __func__,
5063 + CLASS_FIRMWARE_FILENAME);
5064 + rc = -ETIMEDOUT;
5065 + goto err0;
5066 + }
5067 +
5068 + if (request_firmware(&tmu_fw, TMU_FIRMWARE_FILENAME, pfe->dev)) {
5069 + pr_err("%s: request firmware %s failed\n", __func__,
5070 + TMU_FIRMWARE_FILENAME);
5071 + rc = -ETIMEDOUT;
5072 + goto err1;
5073 +}
5074 +
5075 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5076 + util_fw_name = UTIL_FIRMWARE_FILENAME;
5077 +
5078 + if (request_firmware(&util_fw, util_fw_name, pfe->dev)) {
5079 + pr_err("%s: request firmware %s failed\n", __func__,
5080 + util_fw_name);
5081 + rc = -ETIMEDOUT;
5082 + goto err2;
5083 + }
5084 +#endif
5085 + rc = pfe_load_elf(CLASS_MASK, class_fw, pfe);
5086 + if (rc < 0) {
5087 + pr_err("%s: class firmware load failed\n", __func__);
5088 + goto err3;
5089 + }
5090 +
5091 +#if defined(CFG_DIAGS)
5092 + rc = pfe_get_diags_info(class_fw, &pfe->diags.class_diags_info);
5093 + if (rc < 0) {
5094 + pr_warn(
5095 + "PFE diags won't be available for class PEs\n");
5096 + rc = 0;
5097 + }
5098 +#endif
5099 +
5100 + rc = pfe_load_elf(TMU_MASK, tmu_fw, pfe);
5101 + if (rc < 0) {
5102 + pr_err("%s: tmu firmware load failed\n", __func__);
5103 + goto err3;
5104 + }
5105 +
5106 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5107 + rc = pfe_load_elf(UTIL_MASK, util_fw, pfe);
5108 + if (rc < 0) {
5109 + pr_err("%s: util firmware load failed\n", __func__);
5110 + goto err3;
5111 + }
5112 +
5113 +#if defined(CFG_DIAGS)
5114 + rc = pfe_get_diags_info(util_fw, &pfe->diags.util_diags_info);
5115 + if (rc < 0) {
5116 + pr_warn(
5117 + "PFE diags won't be available for util PE\n");
5118 + rc = 0;
5119 + }
5120 +#endif
5121 +
5122 + util_enable();
5123 +#endif
5124 +
5125 + tmu_enable(0xf);
5126 + class_enable();
5127 +
5128 +err3:
5129 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5130 + release_firmware(util_fw);
5131 +
5132 +err2:
5133 +#endif
5134 + release_firmware(tmu_fw);
5135 +
5136 +err1:
5137 + release_firmware(class_fw);
5138 +
5139 +err0:
5140 + return rc;
5141 +}
5142 +
5143 +/* PFE firmware cleanup
5144 + * Puts PE's in reset
5145 + *
5146 + *
5147 + */
5148 +void pfe_firmware_exit(struct pfe *pfe)
5149 +{
5150 + pr_info("%s\n", __func__);
5151 +
5152 + if (pe_reset_all(&pfe->ctrl) != 0)
5153 + pr_err("Error: Failed to stop PEs, PFE reload may not work correctly\n");
5154 +
5155 + class_disable();
5156 + tmu_disable(0xf);
5157 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5158 + util_disable();
5159 +#endif
5160 +}
5161 diff --git a/drivers/staging/fsl_ppfe/pfe_firmware.h b/drivers/staging/fsl_ppfe/pfe_firmware.h
5162 new file mode 100644
5163 index 00000000..5ade848b
5164 --- /dev/null
5165 +++ b/drivers/staging/fsl_ppfe/pfe_firmware.h
5166 @@ -0,0 +1,32 @@
5167 +/*
5168 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
5169 + * Copyright 2017 NXP
5170 + *
5171 + * This program is free software; you can redistribute it and/or modify
5172 + * it under the terms of the GNU General Public License as published by
5173 + * the Free Software Foundation; either version 2 of the License, or
5174 + * (at your option) any later version.
5175 + *
5176 + * This program is distributed in the hope that it will be useful,
5177 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
5178 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
5179 + * GNU General Public License for more details.
5180 + *
5181 + * You should have received a copy of the GNU General Public License
5182 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
5183 + */
5184 +
5185 +#ifndef _PFE_FIRMWARE_H_
5186 +#define _PFE_FIRMWARE_H_
5187 +
5188 +#define CLASS_FIRMWARE_FILENAME "ppfe_class_ls1012a.elf"
5189 +#define TMU_FIRMWARE_FILENAME "ppfe_tmu_ls1012a.elf"
5190 +
5191 +#define PFE_FW_CHECK_PASS 0
5192 +#define PFE_FW_CHECK_FAIL 1
5193 +#define NUM_PFE_FW 3
5194 +
5195 +int pfe_firmware_init(struct pfe *pfe);
5196 +void pfe_firmware_exit(struct pfe *pfe);
5197 +
5198 +#endif /* _PFE_FIRMWARE_H_ */
5199 diff --git a/drivers/staging/fsl_ppfe/pfe_hal.c b/drivers/staging/fsl_ppfe/pfe_hal.c
5200 new file mode 100644
5201 index 00000000..0915034b
5202 --- /dev/null
5203 +++ b/drivers/staging/fsl_ppfe/pfe_hal.c
5204 @@ -0,0 +1,1516 @@
5205 +/*
5206 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
5207 + * Copyright 2017 NXP
5208 + *
5209 + * This program is free software; you can redistribute it and/or modify
5210 + * it under the terms of the GNU General Public License as published by
5211 + * the Free Software Foundation; either version 2 of the License, or
5212 + * (at your option) any later version.
5213 + *
5214 + * This program is distributed in the hope that it will be useful,
5215 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
5216 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
5217 + * GNU General Public License for more details.
5218 + *
5219 + * You should have received a copy of the GNU General Public License
5220 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
5221 + */
5222 +
5223 +#include "pfe_mod.h"
5224 +#include "pfe/pfe.h"
5225 +
5226 +void *cbus_base_addr;
5227 +void *ddr_base_addr;
5228 +unsigned long ddr_phys_base_addr;
5229 +unsigned int ddr_size;
5230 +
5231 +static struct pe_info pe[MAX_PE];
5232 +
5233 +/* Initializes the PFE library.
5234 + * Must be called before using any of the library functions.
5235 + *
5236 + * @param[in] cbus_base CBUS virtual base address (as mapped in
5237 + * the host CPU address space)
5238 + * @param[in] ddr_base PFE DDR range virtual base address (as
5239 + * mapped in the host CPU address space)
5240 + * @param[in] ddr_phys_base PFE DDR range physical base address (as
5241 + * mapped in platform)
5242 + * @param[in] size PFE DDR range size (as defined by the host
5243 + * software)
5244 + */
5245 +void pfe_lib_init(void *cbus_base, void *ddr_base, unsigned long ddr_phys_base,
5246 + unsigned int size)
5247 +{
5248 + cbus_base_addr = cbus_base;
5249 + ddr_base_addr = ddr_base;
5250 + ddr_phys_base_addr = ddr_phys_base;
5251 + ddr_size = size;
5252 +
5253 + pe[CLASS0_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(0);
5254 + pe[CLASS0_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(0);
5255 + pe[CLASS0_ID].pmem_size = CLASS_IMEM_SIZE;
5256 + pe[CLASS0_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
5257 + pe[CLASS0_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
5258 + pe[CLASS0_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
5259 +
5260 + pe[CLASS1_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(1);
5261 + pe[CLASS1_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(1);
5262 + pe[CLASS1_ID].pmem_size = CLASS_IMEM_SIZE;
5263 + pe[CLASS1_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
5264 + pe[CLASS1_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
5265 + pe[CLASS1_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
5266 +
5267 + pe[CLASS2_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(2);
5268 + pe[CLASS2_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(2);
5269 + pe[CLASS2_ID].pmem_size = CLASS_IMEM_SIZE;
5270 + pe[CLASS2_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
5271 + pe[CLASS2_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
5272 + pe[CLASS2_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
5273 +
5274 + pe[CLASS3_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(3);
5275 + pe[CLASS3_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(3);
5276 + pe[CLASS3_ID].pmem_size = CLASS_IMEM_SIZE;
5277 + pe[CLASS3_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
5278 + pe[CLASS3_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
5279 + pe[CLASS3_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
5280 +
5281 + pe[CLASS4_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(4);
5282 + pe[CLASS4_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(4);
5283 + pe[CLASS4_ID].pmem_size = CLASS_IMEM_SIZE;
5284 + pe[CLASS4_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
5285 + pe[CLASS4_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
5286 + pe[CLASS4_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
5287 +
5288 + pe[CLASS5_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(5);
5289 + pe[CLASS5_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(5);
5290 + pe[CLASS5_ID].pmem_size = CLASS_IMEM_SIZE;
5291 + pe[CLASS5_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
5292 + pe[CLASS5_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
5293 + pe[CLASS5_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
5294 +
5295 + pe[TMU0_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(0);
5296 + pe[TMU0_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(0);
5297 + pe[TMU0_ID].pmem_size = TMU_IMEM_SIZE;
5298 + pe[TMU0_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
5299 + pe[TMU0_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
5300 + pe[TMU0_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
5301 +
5302 + pe[TMU1_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(1);
5303 + pe[TMU1_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(1);
5304 + pe[TMU1_ID].pmem_size = TMU_IMEM_SIZE;
5305 + pe[TMU1_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
5306 + pe[TMU1_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
5307 + pe[TMU1_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
5308 +
5309 + pe[TMU3_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(3);
5310 + pe[TMU3_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(3);
5311 + pe[TMU3_ID].pmem_size = TMU_IMEM_SIZE;
5312 + pe[TMU3_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
5313 + pe[TMU3_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
5314 + pe[TMU3_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
5315 +
5316 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5317 + pe[UTIL_ID].dmem_base_addr = UTIL_DMEM_BASE_ADDR;
5318 + pe[UTIL_ID].mem_access_wdata = UTIL_MEM_ACCESS_WDATA;
5319 + pe[UTIL_ID].mem_access_addr = UTIL_MEM_ACCESS_ADDR;
5320 + pe[UTIL_ID].mem_access_rdata = UTIL_MEM_ACCESS_RDATA;
5321 +#endif
5322 +}
5323 +
5324 +/* Writes a buffer to PE internal memory from the host
5325 + * through indirect access registers.
5326 + *
5327 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5328 + * ..., UTIL_ID)
5329 + * @param[in] src Buffer source address
5330 + * @param[in] mem_access_addr DMEM destination address (must be 32bit
5331 + * aligned)
5332 + * @param[in] len Number of bytes to copy
5333 + */
5334 +void pe_mem_memcpy_to32(int id, u32 mem_access_addr, const void *src, unsigned
5335 +int len)
5336 +{
5337 + u32 offset = 0, val, addr;
5338 + unsigned int len32 = len >> 2;
5339 + int i;
5340 +
5341 + addr = mem_access_addr | PE_MEM_ACCESS_WRITE |
5342 + PE_MEM_ACCESS_BYTE_ENABLE(0, 4);
5343 +
5344 + for (i = 0; i < len32; i++, offset += 4, src += 4) {
5345 + val = *(u32 *)src;
5346 + writel(cpu_to_be32(val), pe[id].mem_access_wdata);
5347 + writel(addr + offset, pe[id].mem_access_addr);
5348 + }
5349 +
5350 + len = (len & 0x3);
5351 + if (len) {
5352 + val = 0;
5353 +
5354 + addr = (mem_access_addr | PE_MEM_ACCESS_WRITE |
5355 + PE_MEM_ACCESS_BYTE_ENABLE(0, len)) + offset;
5356 +
5357 + for (i = 0; i < len; i++, src++)
5358 + val |= (*(u8 *)src) << (8 * i);
5359 +
5360 + writel(cpu_to_be32(val), pe[id].mem_access_wdata);
5361 + writel(addr, pe[id].mem_access_addr);
5362 + }
5363 +}
5364 +
5365 +/* Writes a buffer to PE internal data memory (DMEM) from the host
5366 + * through indirect access registers.
5367 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5368 + * ..., UTIL_ID)
5369 + * @param[in] src Buffer source address
5370 + * @param[in] dst DMEM destination address (must be 32bit
5371 + * aligned)
5372 + * @param[in] len Number of bytes to copy
5373 + */
5374 +void pe_dmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len)
5375 +{
5376 + pe_mem_memcpy_to32(id, pe[id].dmem_base_addr | dst |
5377 + PE_MEM_ACCESS_DMEM, src, len);
5378 +}
5379 +
5380 +/* Writes a buffer to PE internal program memory (PMEM) from the host
5381 + * through indirect access registers.
5382 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5383 + * ..., TMU3_ID)
5384 + * @param[in] src Buffer source address
5385 + * @param[in] dst PMEM destination address (must be 32bit
5386 + * aligned)
5387 + * @param[in] len Number of bytes to copy
5388 + */
5389 +void pe_pmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len)
5390 +{
5391 + pe_mem_memcpy_to32(id, pe[id].pmem_base_addr | (dst & (pe[id].pmem_size
5392 + - 1)) | PE_MEM_ACCESS_IMEM, src, len);
5393 +}
5394 +
5395 +/* Reads PE internal program memory (IMEM) from the host
5396 + * through indirect access registers.
5397 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5398 + * ..., TMU3_ID)
5399 + * @param[in] addr PMEM read address (must be aligned on size)
5400 + * @param[in] size Number of bytes to read (maximum 4, must not
5401 + * cross 32bit boundaries)
5402 + * @return the data read (in PE endianness, i.e BE).
5403 + */
5404 +u32 pe_pmem_read(int id, u32 addr, u8 size)
5405 +{
5406 + u32 offset = addr & 0x3;
5407 + u32 mask = 0xffffffff >> ((4 - size) << 3);
5408 + u32 val;
5409 +
5410 + addr = pe[id].pmem_base_addr | ((addr & ~0x3) & (pe[id].pmem_size - 1))
5411 + | PE_MEM_ACCESS_IMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
5412 +
5413 + writel(addr, pe[id].mem_access_addr);
5414 + val = be32_to_cpu(readl(pe[id].mem_access_rdata));
5415 +
5416 + return (val >> (offset << 3)) & mask;
5417 +}
5418 +
5419 +/* Writes PE internal data memory (DMEM) from the host
5420 + * through indirect access registers.
5421 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5422 + * ..., UTIL_ID)
5423 + * @param[in] addr DMEM write address (must be aligned on size)
5424 + * @param[in] val Value to write (in PE endianness, i.e BE)
5425 + * @param[in] size Number of bytes to write (maximum 4, must not
5426 + * cross 32bit boundaries)
5427 + */
5428 +void pe_dmem_write(int id, u32 val, u32 addr, u8 size)
5429 +{
5430 + u32 offset = addr & 0x3;
5431 +
5432 + addr = pe[id].dmem_base_addr | (addr & ~0x3) | PE_MEM_ACCESS_WRITE |
5433 + PE_MEM_ACCESS_DMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
5434 +
5435 + /* Indirect access interface is byte swapping data being written */
5436 + writel(cpu_to_be32(val << (offset << 3)), pe[id].mem_access_wdata);
5437 + writel(addr, pe[id].mem_access_addr);
5438 +}
5439 +
5440 +/* Reads PE internal data memory (DMEM) from the host
5441 + * through indirect access registers.
5442 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5443 + * ..., UTIL_ID)
5444 + * @param[in] addr DMEM read address (must be aligned on size)
5445 + * @param[in] size Number of bytes to read (maximum 4, must not
5446 + * cross 32bit boundaries)
5447 + * @return the data read (in PE endianness, i.e BE).
5448 + */
5449 +u32 pe_dmem_read(int id, u32 addr, u8 size)
5450 +{
5451 + u32 offset = addr & 0x3;
5452 + u32 mask = 0xffffffff >> ((4 - size) << 3);
5453 + u32 val;
5454 +
5455 + addr = pe[id].dmem_base_addr | (addr & ~0x3) | PE_MEM_ACCESS_DMEM |
5456 + PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
5457 +
5458 + writel(addr, pe[id].mem_access_addr);
5459 +
5460 + /* Indirect access interface is byte swapping data being read */
5461 + val = be32_to_cpu(readl(pe[id].mem_access_rdata));
5462 +
5463 + return (val >> (offset << 3)) & mask;
5464 +}
5465 +
5466 +/* This function is used to write to CLASS internal bus peripherals (ccu,
5467 + * pe-lem) from the host
5468 + * through indirect access registers.
5469 + * @param[in] val value to write
5470 + * @param[in] addr Address to write to (must be aligned on size)
5471 + * @param[in] size Number of bytes to write (1, 2 or 4)
5472 + *
5473 + */
5474 +void class_bus_write(u32 val, u32 addr, u8 size)
5475 +{
5476 + u32 offset = addr & 0x3;
5477 +
5478 + writel((addr & CLASS_BUS_ACCESS_BASE_MASK), CLASS_BUS_ACCESS_BASE);
5479 +
5480 + addr = (addr & ~CLASS_BUS_ACCESS_BASE_MASK) | PE_MEM_ACCESS_WRITE |
5481 + (size << 24);
5482 +
5483 + writel(cpu_to_be32(val << (offset << 3)), CLASS_BUS_ACCESS_WDATA);
5484 + writel(addr, CLASS_BUS_ACCESS_ADDR);
5485 +}
5486 +
5487 +/* Reads from CLASS internal bus peripherals (ccu, pe-lem) from the host
5488 + * through indirect access registers.
5489 + * @param[in] addr Address to read from (must be aligned on size)
5490 + * @param[in] size Number of bytes to read (1, 2 or 4)
5491 + * @return the read data
5492 + *
5493 + */
5494 +u32 class_bus_read(u32 addr, u8 size)
5495 +{
5496 + u32 offset = addr & 0x3;
5497 + u32 mask = 0xffffffff >> ((4 - size) << 3);
5498 + u32 val;
5499 +
5500 + writel((addr & CLASS_BUS_ACCESS_BASE_MASK), CLASS_BUS_ACCESS_BASE);
5501 +
5502 + addr = (addr & ~CLASS_BUS_ACCESS_BASE_MASK) | (size << 24);
5503 +
5504 + writel(addr, CLASS_BUS_ACCESS_ADDR);
5505 + val = be32_to_cpu(readl(CLASS_BUS_ACCESS_RDATA));
5506 +
5507 + return (val >> (offset << 3)) & mask;
5508 +}
5509 +
5510 +/* Writes data to the cluster memory (PE_LMEM)
5511 + * @param[in] dst PE LMEM destination address (must be 32bit aligned)
5512 + * @param[in] src Buffer source address
5513 + * @param[in] len Number of bytes to copy
5514 + */
5515 +void class_pe_lmem_memcpy_to32(u32 dst, const void *src, unsigned int len)
5516 +{
5517 + u32 len32 = len >> 2;
5518 + int i;
5519 +
5520 + for (i = 0; i < len32; i++, src += 4, dst += 4)
5521 + class_bus_write(*(u32 *)src, dst, 4);
5522 +
5523 + if (len & 0x2) {
5524 + class_bus_write(*(u16 *)src, dst, 2);
5525 + src += 2;
5526 + dst += 2;
5527 + }
5528 +
5529 + if (len & 0x1) {
5530 + class_bus_write(*(u8 *)src, dst, 1);
5531 + src++;
5532 + dst++;
5533 + }
5534 +}
5535 +
5536 +/* Writes value to the cluster memory (PE_LMEM)
5537 + * @param[in] dst PE LMEM destination address (must be 32bit aligned)
5538 + * @param[in] val Value to write
5539 + * @param[in] len Number of bytes to write
5540 + */
5541 +void class_pe_lmem_memset(u32 dst, int val, unsigned int len)
5542 +{
5543 + u32 len32 = len >> 2;
5544 + int i;
5545 +
5546 + val = val | (val << 8) | (val << 16) | (val << 24);
5547 +
5548 + for (i = 0; i < len32; i++, dst += 4)
5549 + class_bus_write(val, dst, 4);
5550 +
5551 + if (len & 0x2) {
5552 + class_bus_write(val, dst, 2);
5553 + dst += 2;
5554 + }
5555 +
5556 + if (len & 0x1) {
5557 + class_bus_write(val, dst, 1);
5558 + dst++;
5559 + }
5560 +}
5561 +
5562 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5563 +
5564 +/* Writes UTIL program memory (DDR) from the host.
5565 + *
5566 + * @param[in] addr Address to write (virtual, must be aligned on size)
5567 + * @param[in] val Value to write (in PE endianness, i.e BE)
5568 + * @param[in] size Number of bytes to write (2 or 4)
5569 + */
5570 +static void util_pmem_write(u32 val, void *addr, u8 size)
5571 +{
5572 + void *addr64 = (void *)((unsigned long)addr & ~0x7);
5573 + unsigned long off = 8 - ((unsigned long)addr & 0x7) - size;
5574 +
5575 + /*
5576 + * IMEM should be loaded as a 64bit swapped value in a 64bit aligned
5577 + * location
5578 + */
5579 + if (size == 4)
5580 + writel(be32_to_cpu(val), addr64 + off);
5581 + else
5582 + writew(be16_to_cpu((u16)val), addr64 + off);
5583 +}
5584 +
5585 +/* Writes a buffer to UTIL program memory (DDR) from the host.
5586 + *
5587 + * @param[in] dst Address to write (virtual, must be at least 16bit
5588 + * aligned)
5589 + * @param[in] src Buffer to write (in PE endianness, i.e BE, must have
5590 + * same alignment as dst)
5591 + * @param[in] len Number of bytes to write (must be at least 16bit
5592 + * aligned)
5593 + */
5594 +static void util_pmem_memcpy(void *dst, const void *src, unsigned int len)
5595 +{
5596 + unsigned int len32;
5597 + int i;
5598 +
5599 + if ((unsigned long)src & 0x2) {
5600 + util_pmem_write(*(u16 *)src, dst, 2);
5601 + src += 2;
5602 + dst += 2;
5603 + len -= 2;
5604 + }
5605 +
5606 + len32 = len >> 2;
5607 +
5608 + for (i = 0; i < len32; i++, dst += 4, src += 4)
5609 + util_pmem_write(*(u32 *)src, dst, 4);
5610 +
5611 + if (len & 0x2)
5612 + util_pmem_write(*(u16 *)src, dst, len & 0x2);
5613 +}
5614 +#endif
5615 +
5616 +/* Loads an elf section into pmem
5617 + * Code needs to be at least 16bit aligned and only PROGBITS sections are
5618 + * supported
5619 + *
5620 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID, ...,
5621 + * TMU3_ID)
5622 + * @param[in] data pointer to the elf firmware
5623 + * @param[in] shdr pointer to the elf section header
5624 + *
5625 + */
5626 +static int pe_load_pmem_section(int id, const void *data,
5627 + struct elf32_shdr *shdr)
5628 +{
5629 + u32 offset = be32_to_cpu(shdr->sh_offset);
5630 + u32 addr = be32_to_cpu(shdr->sh_addr);
5631 + u32 size = be32_to_cpu(shdr->sh_size);
5632 + u32 type = be32_to_cpu(shdr->sh_type);
5633 +
5634 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5635 + if (id == UTIL_ID) {
5636 + pr_err("%s: unsupported pmem section for UTIL\n",
5637 + __func__);
5638 + return -EINVAL;
5639 + }
5640 +#endif
5641 +
5642 + if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
5643 + pr_err(
5644 + "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
5645 + , __func__, addr, (unsigned long)data + offset);
5646 +
5647 + return -EINVAL;
5648 + }
5649 +
5650 + if (addr & 0x1) {
5651 + pr_err("%s: load address(%x) is not 16bit aligned\n",
5652 + __func__, addr);
5653 + return -EINVAL;
5654 + }
5655 +
5656 + if (size & 0x1) {
5657 + pr_err("%s: load size(%x) is not 16bit aligned\n",
5658 + __func__, size);
5659 + return -EINVAL;
5660 + }
5661 +
5662 + switch (type) {
5663 + case SHT_PROGBITS:
5664 + pe_pmem_memcpy_to32(id, addr, data + offset, size);
5665 +
5666 + break;
5667 +
5668 + default:
5669 + pr_err("%s: unsupported section type(%x)\n", __func__,
5670 + type);
5671 + return -EINVAL;
5672 + }
5673 +
5674 + return 0;
5675 +}
5676 +
5677 +/* Loads an elf section into dmem
5678 + * Data needs to be at least 32bit aligned, NOBITS sections are correctly
5679 + * initialized to 0
5680 + *
5681 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5682 + * ..., UTIL_ID)
5683 + * @param[in] data pointer to the elf firmware
5684 + * @param[in] shdr pointer to the elf section header
5685 + *
5686 + */
5687 +static int pe_load_dmem_section(int id, const void *data,
5688 + struct elf32_shdr *shdr)
5689 +{
5690 + u32 offset = be32_to_cpu(shdr->sh_offset);
5691 + u32 addr = be32_to_cpu(shdr->sh_addr);
5692 + u32 size = be32_to_cpu(shdr->sh_size);
5693 + u32 type = be32_to_cpu(shdr->sh_type);
5694 + u32 size32 = size >> 2;
5695 + int i;
5696 +
5697 + if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
5698 + pr_err(
5699 + "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
5700 + __func__, addr, (unsigned long)data + offset);
5701 +
5702 + return -EINVAL;
5703 + }
5704 +
5705 + if (addr & 0x3) {
5706 + pr_err("%s: load address(%x) is not 32bit aligned\n",
5707 + __func__, addr);
5708 + return -EINVAL;
5709 + }
5710 +
5711 + switch (type) {
5712 + case SHT_PROGBITS:
5713 + pe_dmem_memcpy_to32(id, addr, data + offset, size);
5714 + break;
5715 +
5716 + case SHT_NOBITS:
5717 + for (i = 0; i < size32; i++, addr += 4)
5718 + pe_dmem_write(id, 0, addr, 4);
5719 +
5720 + if (size & 0x3)
5721 + pe_dmem_write(id, 0, addr, size & 0x3);
5722 +
5723 + break;
5724 +
5725 + default:
5726 + pr_err("%s: unsupported section type(%x)\n", __func__,
5727 + type);
5728 + return -EINVAL;
5729 + }
5730 +
5731 + return 0;
5732 +}
5733 +
5734 +/* Loads an elf section into DDR
5735 + * Data needs to be at least 32bit aligned, NOBITS sections are correctly
5736 + * initialized to 0
5737 + *
5738 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5739 + * ..., UTIL_ID)
5740 + * @param[in] data pointer to the elf firmware
5741 + * @param[in] shdr pointer to the elf section header
5742 + *
5743 + */
5744 +static int pe_load_ddr_section(int id, const void *data,
5745 + struct elf32_shdr *shdr,
5746 + struct device *dev) {
5747 + u32 offset = be32_to_cpu(shdr->sh_offset);
5748 + u32 addr = be32_to_cpu(shdr->sh_addr);
5749 + u32 size = be32_to_cpu(shdr->sh_size);
5750 + u32 type = be32_to_cpu(shdr->sh_type);
5751 + u32 flags = be32_to_cpu(shdr->sh_flags);
5752 +
5753 + switch (type) {
5754 + case SHT_PROGBITS:
5755 + if (flags & SHF_EXECINSTR) {
5756 + if (id <= CLASS_MAX_ID) {
5757 + /* DO the loading only once in DDR */
5758 + if (id == CLASS0_ID) {
5759 + pr_err(
5760 + "%s: load address(%x) and elf file address(%lx) rcvd\n",
5761 + __func__, addr,
5762 + (unsigned long)data + offset);
5763 + if (((unsigned long)(data + offset)
5764 + & 0x3) != (addr & 0x3)) {
5765 + pr_err(
5766 + "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
5767 + , __func__, addr,
5768 + (unsigned long)data + offset);
5769 +
5770 + return -EINVAL;
5771 + }
5772 +
5773 + if (addr & 0x1) {
5774 + pr_err(
5775 + "%s: load address(%x) is not 16bit aligned\n"
5776 + , __func__, addr);
5777 + return -EINVAL;
5778 + }
5779 +
5780 + if (size & 0x1) {
5781 + pr_err(
5782 + "%s: load length(%x) is not 16bit aligned\n"
5783 + , __func__, size);
5784 + return -EINVAL;
5785 + }
5786 + memcpy(DDR_PHYS_TO_VIRT(
5787 + DDR_PFE_TO_PHYS(addr)),
5788 + data + offset, size);
5789 + }
5790 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5791 + } else if (id == UTIL_ID) {
5792 + if (((unsigned long)(data + offset) & 0x3)
5793 + != (addr & 0x3)) {
5794 + pr_err(
5795 + "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
5796 + , __func__, addr,
5797 + (unsigned long)data + offset);
5798 +
5799 + return -EINVAL;
5800 + }
5801 +
5802 + if (addr & 0x1) {
5803 + pr_err(
5804 + "%s: load address(%x) is not 16bit aligned\n"
5805 + , __func__, addr);
5806 + return -EINVAL;
5807 + }
5808 +
5809 + if (size & 0x1) {
5810 + pr_err(
5811 + "%s: load length(%x) is not 16bit aligned\n"
5812 + , __func__, size);
5813 + return -EINVAL;
5814 + }
5815 +
5816 + util_pmem_memcpy(DDR_PHYS_TO_VIRT(
5817 + DDR_PFE_TO_PHYS(addr)),
5818 + data + offset, size);
5819 + }
5820 +#endif
5821 + } else {
5822 + pr_err(
5823 + "%s: unsupported ddr section type(%x) for PE(%d)\n"
5824 + , __func__, type, id);
5825 + return -EINVAL;
5826 + }
5827 +
5828 + } else {
5829 + memcpy(DDR_PHYS_TO_VIRT(DDR_PFE_TO_PHYS(addr)), data
5830 + + offset, size);
5831 + }
5832 +
5833 + break;
5834 +
5835 + case SHT_NOBITS:
5836 + memset(DDR_PHYS_TO_VIRT(DDR_PFE_TO_PHYS(addr)), 0, size);
5837 +
5838 + break;
5839 +
5840 + default:
5841 + pr_err("%s: unsupported section type(%x)\n", __func__,
5842 + type);
5843 + return -EINVAL;
5844 + }
5845 +
5846 + return 0;
5847 +}
5848 +
5849 +/* Loads an elf section into pe lmem
5850 + * Data needs to be at least 32bit aligned, NOBITS sections are correctly
5851 + * initialized to 0
5852 + *
5853 + * @param[in] id PE identification (CLASS0_ID,..., CLASS5_ID)
5854 + * @param[in] data pointer to the elf firmware
5855 + * @param[in] shdr pointer to the elf section header
5856 + *
5857 + */
5858 +static int pe_load_pe_lmem_section(int id, const void *data,
5859 + struct elf32_shdr *shdr)
5860 +{
5861 + u32 offset = be32_to_cpu(shdr->sh_offset);
5862 + u32 addr = be32_to_cpu(shdr->sh_addr);
5863 + u32 size = be32_to_cpu(shdr->sh_size);
5864 + u32 type = be32_to_cpu(shdr->sh_type);
5865 +
5866 + if (id > CLASS_MAX_ID) {
5867 + pr_err(
5868 + "%s: unsupported pe-lmem section type(%x) for PE(%d)\n",
5869 + __func__, type, id);
5870 + return -EINVAL;
5871 + }
5872 +
5873 + if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
5874 + pr_err(
5875 + "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
5876 + __func__, addr, (unsigned long)data + offset);
5877 +
5878 + return -EINVAL;
5879 + }
5880 +
5881 + if (addr & 0x3) {
5882 + pr_err("%s: load address(%x) is not 32bit aligned\n",
5883 + __func__, addr);
5884 + return -EINVAL;
5885 + }
5886 +
5887 + switch (type) {
5888 + case SHT_PROGBITS:
5889 + class_pe_lmem_memcpy_to32(addr, data + offset, size);
5890 + break;
5891 +
5892 + case SHT_NOBITS:
5893 + class_pe_lmem_memset(addr, 0, size);
5894 + break;
5895 +
5896 + default:
5897 + pr_err("%s: unsupported section type(%x)\n", __func__,
5898 + type);
5899 + return -EINVAL;
5900 + }
5901 +
5902 + return 0;
5903 +}
5904 +
5905 +/* Loads an elf section into a PE
5906 + * For now only supports loading a section to dmem (all PE's), pmem (class and
5907 + * tmu PE's),
5908 + * DDDR (util PE code)
5909 + *
5910 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5911 + * ..., UTIL_ID)
5912 + * @param[in] data pointer to the elf firmware
5913 + * @param[in] shdr pointer to the elf section header
5914 + *
5915 + */
5916 +int pe_load_elf_section(int id, const void *data, struct elf32_shdr *shdr,
5917 + struct device *dev) {
5918 + u32 addr = be32_to_cpu(shdr->sh_addr);
5919 + u32 size = be32_to_cpu(shdr->sh_size);
5920 +
5921 + if (IS_DMEM(addr, size))
5922 + return pe_load_dmem_section(id, data, shdr);
5923 + else if (IS_PMEM(addr, size))
5924 + return pe_load_pmem_section(id, data, shdr);
5925 + else if (IS_PFE_LMEM(addr, size))
5926 + return 0;
5927 + else if (IS_PHYS_DDR(addr, size))
5928 + return pe_load_ddr_section(id, data, shdr, dev);
5929 + else if (IS_PE_LMEM(addr, size))
5930 + return pe_load_pe_lmem_section(id, data, shdr);
5931 +
5932 + pr_err("%s: unsupported memory range(%x)\n", __func__,
5933 + addr);
5934 + return 0;
5935 +}
5936 +
5937 +/**************************** BMU ***************************/
5938 +
5939 +/* Initializes a BMU block.
5940 + * @param[in] base BMU block base address
5941 + * @param[in] cfg BMU configuration
5942 + */
5943 +void bmu_init(void *base, struct BMU_CFG *cfg)
5944 +{
5945 + bmu_disable(base);
5946 +
5947 + bmu_set_config(base, cfg);
5948 +
5949 + bmu_reset(base);
5950 +}
5951 +
5952 +/* Resets a BMU block.
5953 + * @param[in] base BMU block base address
5954 + */
5955 +void bmu_reset(void *base)
5956 +{
5957 + writel(CORE_SW_RESET, base + BMU_CTRL);
5958 +
5959 + /* Wait for self clear */
5960 + while (readl(base + BMU_CTRL) & CORE_SW_RESET)
5961 + ;
5962 +}
5963 +
5964 +/* Enabled a BMU block.
5965 + * @param[in] base BMU block base address
5966 + */
5967 +void bmu_enable(void *base)
5968 +{
5969 + writel(CORE_ENABLE, base + BMU_CTRL);
5970 +}
5971 +
5972 +/* Disables a BMU block.
5973 + * @param[in] base BMU block base address
5974 + */
5975 +void bmu_disable(void *base)
5976 +{
5977 + writel(CORE_DISABLE, base + BMU_CTRL);
5978 +}
5979 +
5980 +/* Sets the configuration of a BMU block.
5981 + * @param[in] base BMU block base address
5982 + * @param[in] cfg BMU configuration
5983 + */
5984 +void bmu_set_config(void *base, struct BMU_CFG *cfg)
5985 +{
5986 + writel(cfg->baseaddr, base + BMU_UCAST_BASE_ADDR);
5987 + writel(cfg->count & 0xffff, base + BMU_UCAST_CONFIG);
5988 + writel(cfg->size & 0xffff, base + BMU_BUF_SIZE);
5989 +
5990 + /* Interrupts are never used */
5991 + writel(cfg->low_watermark, base + BMU_LOW_WATERMARK);
5992 + writel(cfg->high_watermark, base + BMU_HIGH_WATERMARK);
5993 + writel(0x0, base + BMU_INT_ENABLE);
5994 +}
5995 +
5996 +/**************************** MTIP GEMAC ***************************/
5997 +
5998 +/* Enable Rx Checksum Engine. With this enabled, Frame with bad IP,
5999 + * TCP or UDP checksums are discarded
6000 + *
6001 + * @param[in] base GEMAC base address.
6002 + */
6003 +void gemac_enable_rx_checksum_offload(void *base)
6004 +{
6005 + /*Do not find configuration to do this */
6006 +}
6007 +
6008 +/* Disable Rx Checksum Engine.
6009 + *
6010 + * @param[in] base GEMAC base address.
6011 + */
6012 +void gemac_disable_rx_checksum_offload(void *base)
6013 +{
6014 + /*Do not find configuration to do this */
6015 +}
6016 +
6017 +/* GEMAC set speed.
6018 + * @param[in] base GEMAC base address
6019 + * @param[in] speed GEMAC speed (10, 100 or 1000 Mbps)
6020 + */
6021 +void gemac_set_speed(void *base, enum mac_speed gem_speed)
6022 +{
6023 + u32 ecr = readl(base + EMAC_ECNTRL_REG) & ~EMAC_ECNTRL_SPEED;
6024 + u32 rcr = readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_RMII_10T;
6025 +
6026 + switch (gem_speed) {
6027 + case SPEED_10M:
6028 + rcr |= EMAC_RCNTRL_RMII_10T;
6029 + break;
6030 +
6031 + case SPEED_1000M:
6032 + ecr |= EMAC_ECNTRL_SPEED;
6033 + break;
6034 +
6035 + case SPEED_100M:
6036 + default:
6037 + /*It is in 100M mode */
6038 + break;
6039 + }
6040 + writel(ecr, (base + EMAC_ECNTRL_REG));
6041 + writel(rcr, (base + EMAC_RCNTRL_REG));
6042 +}
6043 +
6044 +/* GEMAC set duplex.
6045 + * @param[in] base GEMAC base address
6046 + * @param[in] duplex GEMAC duplex mode (Full, Half)
6047 + */
6048 +void gemac_set_duplex(void *base, int duplex)
6049 +{
6050 + if (duplex == DUPLEX_HALF) {
6051 + writel(readl(base + EMAC_TCNTRL_REG) & ~EMAC_TCNTRL_FDEN, base
6052 + + EMAC_TCNTRL_REG);
6053 + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_DRT, (base
6054 + + EMAC_RCNTRL_REG));
6055 + } else{
6056 + writel(readl(base + EMAC_TCNTRL_REG) | EMAC_TCNTRL_FDEN, base
6057 + + EMAC_TCNTRL_REG);
6058 + writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_DRT, (base
6059 + + EMAC_RCNTRL_REG));
6060 + }
6061 +}
6062 +
6063 +/* GEMAC set mode.
6064 + * @param[in] base GEMAC base address
6065 + * @param[in] mode GEMAC operation mode (MII, RMII, RGMII, SGMII)
6066 + */
6067 +void gemac_set_mode(void *base, int mode)
6068 +{
6069 + u32 val = readl(base + EMAC_RCNTRL_REG);
6070 +
6071 + /*Remove loopbank*/
6072 + val &= ~EMAC_RCNTRL_LOOP;
6073 +
6074 + /*Enable flow control and MII mode*/
6075 + val |= (EMAC_RCNTRL_FCE | EMAC_RCNTRL_MII_MODE);
6076 +
6077 + writel(val, base + EMAC_RCNTRL_REG);
6078 +}
6079 +
6080 +/* GEMAC enable function.
6081 + * @param[in] base GEMAC base address
6082 + */
6083 +void gemac_enable(void *base)
6084 +{
6085 + writel(readl(base + EMAC_ECNTRL_REG) | EMAC_ECNTRL_ETHER_EN, base +
6086 + EMAC_ECNTRL_REG);
6087 +}
6088 +
6089 +/* GEMAC disable function.
6090 + * @param[in] base GEMAC base address
6091 + */
6092 +void gemac_disable(void *base)
6093 +{
6094 + writel(readl(base + EMAC_ECNTRL_REG) & ~EMAC_ECNTRL_ETHER_EN, base +
6095 + EMAC_ECNTRL_REG);
6096 +}
6097 +
6098 +/* GEMAC TX disable function.
6099 + * @param[in] base GEMAC base address
6100 + */
6101 +void gemac_tx_disable(void *base)
6102 +{
6103 + writel(readl(base + EMAC_TCNTRL_REG) | EMAC_TCNTRL_GTS, base +
6104 + EMAC_TCNTRL_REG);
6105 +}
6106 +
6107 +void gemac_tx_enable(void *base)
6108 +{
6109 + writel(readl(base + EMAC_TCNTRL_REG) & ~EMAC_TCNTRL_GTS, base +
6110 + EMAC_TCNTRL_REG);
6111 +}
6112 +
6113 +/* Sets the hash register of the MAC.
6114 + * This register is used for matching unicast and multicast frames.
6115 + *
6116 + * @param[in] base GEMAC base address.
6117 + * @param[in] hash 64-bit hash to be configured.
6118 + */
6119 +void gemac_set_hash(void *base, struct pfe_mac_addr *hash)
6120 +{
6121 + writel(hash->bottom, base + EMAC_GALR);
6122 + writel(hash->top, base + EMAC_GAUR);
6123 +}
6124 +
6125 +void gemac_set_laddrN(void *base, struct pfe_mac_addr *address,
6126 + unsigned int entry_index)
6127 +{
6128 + if ((entry_index < 1) || (entry_index > EMAC_SPEC_ADDR_MAX))
6129 + return;
6130 +
6131 + entry_index = entry_index - 1;
6132 + if (entry_index < 1) {
6133 + writel(htonl(address->bottom), base + EMAC_PHY_ADDR_LOW);
6134 + writel((htonl(address->top) | 0x8808), base +
6135 + EMAC_PHY_ADDR_HIGH);
6136 + } else {
6137 + writel(htonl(address->bottom), base + ((entry_index - 1) * 8)
6138 + + EMAC_SMAC_0_0);
6139 + writel((htonl(address->top) | 0x8808), base + ((entry_index -
6140 + 1) * 8) + EMAC_SMAC_0_1);
6141 + }
6142 +}
6143 +
6144 +void gemac_clear_laddrN(void *base, unsigned int entry_index)
6145 +{
6146 + if ((entry_index < 1) || (entry_index > EMAC_SPEC_ADDR_MAX))
6147 + return;
6148 +
6149 + entry_index = entry_index - 1;
6150 + if (entry_index < 1) {
6151 + writel(0, base + EMAC_PHY_ADDR_LOW);
6152 + writel(0, base + EMAC_PHY_ADDR_HIGH);
6153 + } else {
6154 + writel(0, base + ((entry_index - 1) * 8) + EMAC_SMAC_0_0);
6155 + writel(0, base + ((entry_index - 1) * 8) + EMAC_SMAC_0_1);
6156 + }
6157 +}
6158 +
6159 +/* Set the loopback mode of the MAC. This can be either no loopback for
6160 + * normal operation, local loopback through MAC internal loopback module or PHY
6161 + * loopback for external loopback through a PHY. This asserts the external
6162 + * loop pin.
6163 + *
6164 + * @param[in] base GEMAC base address.
6165 + * @param[in] gem_loop Loopback mode to be enabled. LB_LOCAL - MAC
6166 + * Loopback,
6167 + * LB_EXT - PHY Loopback.
6168 + */
6169 +void gemac_set_loop(void *base, enum mac_loop gem_loop)
6170 +{
6171 + pr_info("%s()\n", __func__);
6172 + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_LOOP, (base +
6173 + EMAC_RCNTRL_REG));
6174 +}
6175 +
6176 +/* GEMAC allow frames
6177 + * @param[in] base GEMAC base address
6178 + */
6179 +void gemac_enable_copy_all(void *base)
6180 +{
6181 + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_PROM, (base +
6182 + EMAC_RCNTRL_REG));
6183 +}
6184 +
6185 +/* GEMAC do not allow frames
6186 + * @param[in] base GEMAC base address
6187 + */
6188 +void gemac_disable_copy_all(void *base)
6189 +{
6190 + writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_PROM, (base +
6191 + EMAC_RCNTRL_REG));
6192 +}
6193 +
6194 +/* GEMAC allow broadcast function.
6195 + * @param[in] base GEMAC base address
6196 + */
6197 +void gemac_allow_broadcast(void *base)
6198 +{
6199 + writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_BC_REJ, base +
6200 + EMAC_RCNTRL_REG);
6201 +}
6202 +
6203 +/* GEMAC no broadcast function.
6204 + * @param[in] base GEMAC base address
6205 + */
6206 +void gemac_no_broadcast(void *base)
6207 +{
6208 + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_BC_REJ, base +
6209 + EMAC_RCNTRL_REG);
6210 +}
6211 +
6212 +/* GEMAC enable 1536 rx function.
6213 + * @param[in] base GEMAC base address
6214 + */
6215 +void gemac_enable_1536_rx(void *base)
6216 +{
6217 + /* Set 1536 as Maximum frame length */
6218 + writel(readl(base + EMAC_RCNTRL_REG) | (1536 << 16), base +
6219 + EMAC_RCNTRL_REG);
6220 +}
6221 +
6222 +/* GEMAC enable jumbo function.
6223 + * @param[in] base GEMAC base address
6224 + */
6225 +void gemac_enable_rx_jmb(void *base)
6226 +{
6227 + writel(readl(base + EMAC_RCNTRL_REG) | (JUMBO_FRAME_SIZE << 16), base
6228 + + EMAC_RCNTRL_REG);
6229 +}
6230 +
6231 +/* GEMAC enable stacked vlan function.
6232 + * @param[in] base GEMAC base address
6233 + */
6234 +void gemac_enable_stacked_vlan(void *base)
6235 +{
6236 + /* MTIP doesn't support stacked vlan */
6237 +}
6238 +
6239 +/* GEMAC enable pause rx function.
6240 + * @param[in] base GEMAC base address
6241 + */
6242 +void gemac_enable_pause_rx(void *base)
6243 +{
6244 + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_FCE,
6245 + base + EMAC_RCNTRL_REG);
6246 +}
6247 +
6248 +/* GEMAC disable pause rx function.
6249 + * @param[in] base GEMAC base address
6250 + */
6251 +void gemac_disable_pause_rx(void *base)
6252 +{
6253 + writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_FCE,
6254 + base + EMAC_RCNTRL_REG);
6255 +}
6256 +
6257 +/* GEMAC enable pause tx function.
6258 + * @param[in] base GEMAC base address
6259 + */
6260 +void gemac_enable_pause_tx(void *base)
6261 +{
6262 + writel(EMAC_RX_SECTION_EMPTY_V, base + EMAC_RX_SECTION_EMPTY);
6263 +}
6264 +
6265 +/* GEMAC disable pause tx function.
6266 + * @param[in] base GEMAC base address
6267 + */
6268 +void gemac_disable_pause_tx(void *base)
6269 +{
6270 + writel(0x0, base + EMAC_RX_SECTION_EMPTY);
6271 +}
6272 +
6273 +/* GEMAC wol configuration
6274 + * @param[in] base GEMAC base address
6275 + * @param[in] wol_conf WoL register configuration
6276 + */
6277 +void gemac_set_wol(void *base, u32 wol_conf)
6278 +{
6279 + u32 val = readl(base + EMAC_ECNTRL_REG);
6280 +
6281 + if (wol_conf)
6282 + val |= (EMAC_ECNTRL_MAGIC_ENA | EMAC_ECNTRL_SLEEP);
6283 + else
6284 + val &= ~(EMAC_ECNTRL_MAGIC_ENA | EMAC_ECNTRL_SLEEP);
6285 + writel(val, base + EMAC_ECNTRL_REG);
6286 +}
6287 +
6288 +/* Sets Gemac bus width to 64bit
6289 + * @param[in] base GEMAC base address
6290 + * @param[in] width gemac bus width to be set possible values are 32/64/128
6291 + */
6292 +void gemac_set_bus_width(void *base, int width)
6293 +{
6294 +}
6295 +
6296 +/* Sets Gemac configuration.
6297 + * @param[in] base GEMAC base address
6298 + * @param[in] cfg GEMAC configuration
6299 + */
6300 +void gemac_set_config(void *base, struct gemac_cfg *cfg)
6301 +{
6302 + /*GEMAC config taken from VLSI */
6303 + writel(0x00000004, base + EMAC_TFWR_STR_FWD);
6304 + writel(0x00000005, base + EMAC_RX_SECTION_FULL);
6305 + writel(0x00003fff, base + EMAC_TRUNC_FL);
6306 + writel(0x00000030, base + EMAC_TX_SECTION_EMPTY);
6307 + writel(0x00000000, base + EMAC_MIB_CTRL_STS_REG);
6308 +
6309 + gemac_set_mode(base, cfg->mode);
6310 +
6311 + gemac_set_speed(base, cfg->speed);
6312 +
6313 + gemac_set_duplex(base, cfg->duplex);
6314 +}
6315 +
6316 +/**************************** GPI ***************************/
6317 +
6318 +/* Initializes a GPI block.
6319 + * @param[in] base GPI base address
6320 + * @param[in] cfg GPI configuration
6321 + */
6322 +void gpi_init(void *base, struct gpi_cfg *cfg)
6323 +{
6324 + gpi_reset(base);
6325 +
6326 + gpi_disable(base);
6327 +
6328 + gpi_set_config(base, cfg);
6329 +}
6330 +
6331 +/* Resets a GPI block.
6332 + * @param[in] base GPI base address
6333 + */
6334 +void gpi_reset(void *base)
6335 +{
6336 + writel(CORE_SW_RESET, base + GPI_CTRL);
6337 +}
6338 +
6339 +/* Enables a GPI block.
6340 + * @param[in] base GPI base address
6341 + */
6342 +void gpi_enable(void *base)
6343 +{
6344 + writel(CORE_ENABLE, base + GPI_CTRL);
6345 +}
6346 +
6347 +/* Disables a GPI block.
6348 + * @param[in] base GPI base address
6349 + */
6350 +void gpi_disable(void *base)
6351 +{
6352 + writel(CORE_DISABLE, base + GPI_CTRL);
6353 +}
6354 +
6355 +/* Sets the configuration of a GPI block.
6356 + * @param[in] base GPI base address
6357 + * @param[in] cfg GPI configuration
6358 + */
6359 +void gpi_set_config(void *base, struct gpi_cfg *cfg)
6360 +{
6361 + writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_ALLOC_CTRL), base
6362 + + GPI_LMEM_ALLOC_ADDR);
6363 + writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_FREE_CTRL), base
6364 + + GPI_LMEM_FREE_ADDR);
6365 + writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_ALLOC_CTRL), base
6366 + + GPI_DDR_ALLOC_ADDR);
6367 + writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL), base
6368 + + GPI_DDR_FREE_ADDR);
6369 + writel(CBUS_VIRT_TO_PFE(CLASS_INQ_PKTPTR), base + GPI_CLASS_ADDR);
6370 + writel(DDR_HDR_SIZE, base + GPI_DDR_DATA_OFFSET);
6371 + writel(LMEM_HDR_SIZE, base + GPI_LMEM_DATA_OFFSET);
6372 + writel(0, base + GPI_LMEM_SEC_BUF_DATA_OFFSET);
6373 + writel(0, base + GPI_DDR_SEC_BUF_DATA_OFFSET);
6374 + writel((DDR_HDR_SIZE << 16) | LMEM_HDR_SIZE, base + GPI_HDR_SIZE);
6375 + writel((DDR_BUF_SIZE << 16) | LMEM_BUF_SIZE, base + GPI_BUF_SIZE);
6376 +
6377 + writel(((cfg->lmem_rtry_cnt << 16) | (GPI_DDR_BUF_EN << 1) |
6378 + GPI_LMEM_BUF_EN), base + GPI_RX_CONFIG);
6379 + writel(cfg->tmlf_txthres, base + GPI_TMLF_TX);
6380 + writel(cfg->aseq_len, base + GPI_DTX_ASEQ);
6381 + writel(1, base + GPI_TOE_CHKSUM_EN);
6382 +
6383 + if (cfg->mtip_pause_reg) {
6384 + writel(cfg->mtip_pause_reg, base + GPI_CSR_MTIP_PAUSE_REG);
6385 + writel(EGPI_PAUSE_TIME, base + GPI_TX_PAUSE_TIME);
6386 + }
6387 +}
6388 +
6389 +/**************************** CLASSIFIER ***************************/
6390 +
6391 +/* Initializes CLASSIFIER block.
6392 + * @param[in] cfg CLASSIFIER configuration
6393 + */
6394 +void class_init(struct class_cfg *cfg)
6395 +{
6396 + class_reset();
6397 +
6398 + class_disable();
6399 +
6400 + class_set_config(cfg);
6401 +}
6402 +
6403 +/* Resets CLASSIFIER block.
6404 + *
6405 + */
6406 +void class_reset(void)
6407 +{
6408 + writel(CORE_SW_RESET, CLASS_TX_CTRL);
6409 +}
6410 +
6411 +/* Enables all CLASS-PE's cores.
6412 + *
6413 + */
6414 +void class_enable(void)
6415 +{
6416 + writel(CORE_ENABLE, CLASS_TX_CTRL);
6417 +}
6418 +
6419 +/* Disables all CLASS-PE's cores.
6420 + *
6421 + */
6422 +void class_disable(void)
6423 +{
6424 + writel(CORE_DISABLE, CLASS_TX_CTRL);
6425 +}
6426 +
6427 +/*
6428 + * Sets the configuration of the CLASSIFIER block.
6429 + * @param[in] cfg CLASSIFIER configuration
6430 + */
6431 +void class_set_config(struct class_cfg *cfg)
6432 +{
6433 + u32 val;
6434 +
6435 + /* Initialize route table */
6436 + if (!cfg->resume)
6437 + memset(DDR_PHYS_TO_VIRT(cfg->route_table_baseaddr), 0, (1 <<
6438 + cfg->route_table_hash_bits) * CLASS_ROUTE_SIZE);
6439 +
6440 +#if !defined(LS1012A_PFE_RESET_WA)
6441 + writel(cfg->pe_sys_clk_ratio, CLASS_PE_SYS_CLK_RATIO);
6442 +#endif
6443 +
6444 + writel((DDR_HDR_SIZE << 16) | LMEM_HDR_SIZE, CLASS_HDR_SIZE);
6445 + writel(LMEM_BUF_SIZE, CLASS_LMEM_BUF_SIZE);
6446 + writel(CLASS_ROUTE_ENTRY_SIZE(CLASS_ROUTE_SIZE) |
6447 + CLASS_ROUTE_HASH_SIZE(cfg->route_table_hash_bits),
6448 + CLASS_ROUTE_HASH_ENTRY_SIZE);
6449 + writel(HIF_PKT_CLASS_EN | HIF_PKT_OFFSET(sizeof(struct hif_hdr)),
6450 + CLASS_HIF_PARSE);
6451 +
6452 + val = HASH_CRC_PORT_IP | QB2BUS_LE;
6453 +
6454 +#if defined(CONFIG_IP_ALIGNED)
6455 + val |= IP_ALIGNED;
6456 +#endif
6457 +
6458 + /*
6459 + * Class PE packet steering will only work if TOE mode, bridge fetch or
6460 + * route fetch are enabled (see class/qb_fet.v). Route fetch would
6461 + * trigger additional memory copies (likely from DDR because of hash
6462 + * table size, which cannot be reduced because PE software still
6463 + * relies on hash value computed in HW), so when not in TOE mode we
6464 + * simply enable HW bridge fetch even though we don't use it.
6465 + */
6466 + if (cfg->toe_mode)
6467 + val |= CLASS_TOE;
6468 + else
6469 + val |= HW_BRIDGE_FETCH;
6470 +
6471 + writel(val, CLASS_ROUTE_MULTI);
6472 +
6473 + writel(DDR_PHYS_TO_PFE(cfg->route_table_baseaddr),
6474 + CLASS_ROUTE_TABLE_BASE);
6475 + writel(CLASS_PE0_RO_DM_ADDR0_VAL, CLASS_PE0_RO_DM_ADDR0);
6476 + writel(CLASS_PE0_RO_DM_ADDR1_VAL, CLASS_PE0_RO_DM_ADDR1);
6477 + writel(CLASS_PE0_QB_DM_ADDR0_VAL, CLASS_PE0_QB_DM_ADDR0);
6478 + writel(CLASS_PE0_QB_DM_ADDR1_VAL, CLASS_PE0_QB_DM_ADDR1);
6479 + writel(CBUS_VIRT_TO_PFE(TMU_PHY_INQ_PKTPTR), CLASS_TM_INQ_ADDR);
6480 +
6481 + writel(23, CLASS_AFULL_THRES);
6482 + writel(23, CLASS_TSQ_FIFO_THRES);
6483 +
6484 + writel(24, CLASS_MAX_BUF_CNT);
6485 + writel(24, CLASS_TSQ_MAX_CNT);
6486 +}
6487 +
6488 +/**************************** TMU ***************************/
6489 +
6490 +void tmu_reset(void)
6491 +{
6492 + writel(SW_RESET, TMU_CTRL);
6493 +}
6494 +
6495 +/* Initializes TMU block.
6496 + * @param[in] cfg TMU configuration
6497 + */
6498 +void tmu_init(struct tmu_cfg *cfg)
6499 +{
6500 + int q, phyno;
6501 +
6502 + tmu_disable(0xF);
6503 + mdelay(10);
6504 +
6505 +#if !defined(LS1012A_PFE_RESET_WA)
6506 + /* keep in soft reset */
6507 + writel(SW_RESET, TMU_CTRL);
6508 +#endif
6509 + writel(0x3, TMU_SYS_GENERIC_CONTROL);
6510 + writel(750, TMU_INQ_WATERMARK);
6511 + writel(CBUS_VIRT_TO_PFE(EGPI1_BASE_ADDR +
6512 + GPI_INQ_PKTPTR), TMU_PHY0_INQ_ADDR);
6513 + writel(CBUS_VIRT_TO_PFE(EGPI2_BASE_ADDR +
6514 + GPI_INQ_PKTPTR), TMU_PHY1_INQ_ADDR);
6515 + writel(CBUS_VIRT_TO_PFE(HGPI_BASE_ADDR +
6516 + GPI_INQ_PKTPTR), TMU_PHY3_INQ_ADDR);
6517 + writel(CBUS_VIRT_TO_PFE(HIF_NOCPY_RX_INQ0_PKTPTR), TMU_PHY4_INQ_ADDR);
6518 + writel(CBUS_VIRT_TO_PFE(UTIL_INQ_PKTPTR), TMU_PHY5_INQ_ADDR);
6519 + writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL),
6520 + TMU_BMU_INQ_ADDR);
6521 +
6522 + writel(0x3FF, TMU_TDQ0_SCH_CTRL); /*
6523 + * enabling all 10
6524 + * schedulers [9:0] of each TDQ
6525 + */
6526 + writel(0x3FF, TMU_TDQ1_SCH_CTRL);
6527 + writel(0x3FF, TMU_TDQ3_SCH_CTRL);
6528 +
6529 +#if !defined(LS1012A_PFE_RESET_WA)
6530 + writel(cfg->pe_sys_clk_ratio, TMU_PE_SYS_CLK_RATIO);
6531 +#endif
6532 +
6533 +#if !defined(LS1012A_PFE_RESET_WA)
6534 + writel(DDR_PHYS_TO_PFE(cfg->llm_base_addr), TMU_LLM_BASE_ADDR);
6535 + /* Extra packet pointers will be stored from this address onwards */
6536 +
6537 + writel(cfg->llm_queue_len, TMU_LLM_QUE_LEN);
6538 + writel(5, TMU_TDQ_IIFG_CFG);
6539 + writel(DDR_BUF_SIZE, TMU_BMU_BUF_SIZE);
6540 +
6541 + writel(0x0, TMU_CTRL);
6542 +
6543 + /* MEM init */
6544 + pr_info("%s: mem init\n", __func__);
6545 + writel(MEM_INIT, TMU_CTRL);
6546 +
6547 + while (!(readl(TMU_CTRL) & MEM_INIT_DONE))
6548 + ;
6549 +
6550 + /* LLM init */
6551 + pr_info("%s: lmem init\n", __func__);
6552 + writel(LLM_INIT, TMU_CTRL);
6553 +
6554 + while (!(readl(TMU_CTRL) & LLM_INIT_DONE))
6555 + ;
6556 +#endif
6557 + /* set up each queue for tail drop */
6558 + for (phyno = 0; phyno < 4; phyno++) {
6559 + if (phyno == 2)
6560 + continue;
6561 + for (q = 0; q < 16; q++) {
6562 + u32 qdepth;
6563 +
6564 + writel((phyno << 8) | q, TMU_TEQ_CTRL);
6565 + writel(1 << 22, TMU_TEQ_QCFG); /*Enable tail drop */
6566 +
6567 + if (phyno == 3)
6568 + qdepth = DEFAULT_TMU3_QDEPTH;
6569 + else
6570 + qdepth = (q == 0) ? DEFAULT_Q0_QDEPTH :
6571 + DEFAULT_MAX_QDEPTH;
6572 +
6573 + /* LOG: 68855 */
6574 + /*
6575 + * The following is a workaround for the reordered
6576 + * packet and BMU2 buffer leakage issue.
6577 + */
6578 + if (CHIP_REVISION() == 0)
6579 + qdepth = 31;
6580 +
6581 + writel(qdepth << 18, TMU_TEQ_HW_PROB_CFG2);
6582 + writel(qdepth >> 14, TMU_TEQ_HW_PROB_CFG3);
6583 + }
6584 + }
6585 +
6586 +#ifdef CFG_LRO
6587 + /* Set TMU-3 queue 5 (LRO) in no-drop mode */
6588 + writel((3 << 8) | TMU_QUEUE_LRO, TMU_TEQ_CTRL);
6589 + writel(0, TMU_TEQ_QCFG);
6590 +#endif
6591 +
6592 + writel(0x05, TMU_TEQ_DISABLE_DROPCHK);
6593 +
6594 + writel(0x0, TMU_CTRL);
6595 +}
6596 +
6597 +/* Enables TMU-PE cores.
6598 + * @param[in] pe_mask TMU PE mask
6599 + */
6600 +void tmu_enable(u32 pe_mask)
6601 +{
6602 + writel(readl(TMU_TX_CTRL) | (pe_mask & 0xF), TMU_TX_CTRL);
6603 +}
6604 +
6605 +/* Disables TMU cores.
6606 + * @param[in] pe_mask TMU PE mask
6607 + */
6608 +void tmu_disable(u32 pe_mask)
6609 +{
6610 + writel(readl(TMU_TX_CTRL) & ~(pe_mask & 0xF), TMU_TX_CTRL);
6611 +}
6612 +
6613 +/* This will return the tmu queue status
6614 + * @param[in] if_id gem interface id or TMU index
6615 + * @return returns the bit mask of busy queues, zero means all
6616 + * queues are empty
6617 + */
6618 +u32 tmu_qstatus(u32 if_id)
6619 +{
6620 + return cpu_to_be32(pe_dmem_read(TMU0_ID + if_id, TMU_DM_PESTATUS +
6621 + offsetof(struct pe_status, tmu_qstatus), 4));
6622 +}
6623 +
6624 +u32 tmu_pkts_processed(u32 if_id)
6625 +{
6626 + return cpu_to_be32(pe_dmem_read(TMU0_ID + if_id, TMU_DM_PESTATUS +
6627 + offsetof(struct pe_status, rx), 4));
6628 +}
6629 +
6630 +/**************************** UTIL ***************************/
6631 +
6632 +/* Resets UTIL block.
6633 + */
6634 +void util_reset(void)
6635 +{
6636 + writel(CORE_SW_RESET, UTIL_TX_CTRL);
6637 +}
6638 +
6639 +/* Initializes UTIL block.
6640 + * @param[in] cfg UTIL configuration
6641 + */
6642 +void util_init(struct util_cfg *cfg)
6643 +{
6644 + writel(cfg->pe_sys_clk_ratio, UTIL_PE_SYS_CLK_RATIO);
6645 +}
6646 +
6647 +/* Enables UTIL-PE core.
6648 + *
6649 + */
6650 +void util_enable(void)
6651 +{
6652 + writel(CORE_ENABLE, UTIL_TX_CTRL);
6653 +}
6654 +
6655 +/* Disables UTIL-PE core.
6656 + *
6657 + */
6658 +void util_disable(void)
6659 +{
6660 + writel(CORE_DISABLE, UTIL_TX_CTRL);
6661 +}
6662 +
6663 +/**************************** HIF ***************************/
6664 +/* Initializes HIF copy block.
6665 + *
6666 + */
6667 +void hif_init(void)
6668 +{
6669 + /*Initialize HIF registers*/
6670 + writel((HIF_RX_POLL_CTRL_CYCLE << 16) | HIF_TX_POLL_CTRL_CYCLE,
6671 + HIF_POLL_CTRL);
6672 +}
6673 +
6674 +/* Enable hif tx DMA and interrupt
6675 + *
6676 + */
6677 +void hif_tx_enable(void)
6678 +{
6679 + writel(HIF_CTRL_DMA_EN, HIF_TX_CTRL);
6680 + writel((readl(HIF_INT_ENABLE) | HIF_INT_EN | HIF_TXPKT_INT_EN),
6681 + HIF_INT_ENABLE);
6682 +}
6683 +
6684 +/* Disable hif tx DMA and interrupt
6685 + *
6686 + */
6687 +void hif_tx_disable(void)
6688 +{
6689 + u32 hif_int;
6690 +
6691 + writel(0, HIF_TX_CTRL);
6692 +
6693 + hif_int = readl(HIF_INT_ENABLE);
6694 + hif_int &= HIF_TXPKT_INT_EN;
6695 + writel(hif_int, HIF_INT_ENABLE);
6696 +}
6697 +
6698 +/* Enable hif rx DMA and interrupt
6699 + *
6700 + */
6701 +void hif_rx_enable(void)
6702 +{
6703 + hif_rx_dma_start();
6704 + writel((readl(HIF_INT_ENABLE) | HIF_INT_EN | HIF_RXPKT_INT_EN),
6705 + HIF_INT_ENABLE);
6706 +}
6707 +
6708 +/* Disable hif rx DMA and interrupt
6709 + *
6710 + */
6711 +void hif_rx_disable(void)
6712 +{
6713 + u32 hif_int;
6714 +
6715 + writel(0, HIF_RX_CTRL);
6716 +
6717 + hif_int = readl(HIF_INT_ENABLE);
6718 + hif_int &= HIF_RXPKT_INT_EN;
6719 + writel(hif_int, HIF_INT_ENABLE);
6720 +}
6721 diff --git a/drivers/staging/fsl_ppfe/pfe_hif.c b/drivers/staging/fsl_ppfe/pfe_hif.c
6722 new file mode 100644
6723 index 00000000..6835e140
6724 --- /dev/null
6725 +++ b/drivers/staging/fsl_ppfe/pfe_hif.c
6726 @@ -0,0 +1,1072 @@
6727 +/*
6728 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
6729 + * Copyright 2017 NXP
6730 + *
6731 + * This program is free software; you can redistribute it and/or modify
6732 + * it under the terms of the GNU General Public License as published by
6733 + * the Free Software Foundation; either version 2 of the License, or
6734 + * (at your option) any later version.
6735 + *
6736 + * This program is distributed in the hope that it will be useful,
6737 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
6738 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
6739 + * GNU General Public License for more details.
6740 + *
6741 + * You should have received a copy of the GNU General Public License
6742 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
6743 + */
6744 +
6745 +#include <linux/kernel.h>
6746 +#include <linux/interrupt.h>
6747 +#include <linux/dma-mapping.h>
6748 +#include <linux/dmapool.h>
6749 +#include <linux/sched.h>
6750 +#include <linux/module.h>
6751 +#include <linux/list.h>
6752 +#include <linux/kthread.h>
6753 +#include <linux/slab.h>
6754 +
6755 +#include <linux/io.h>
6756 +#include <asm/irq.h>
6757 +
6758 +#include "pfe_mod.h"
6759 +
6760 +#define HIF_INT_MASK (HIF_INT | HIF_RXPKT_INT | HIF_TXPKT_INT)
6761 +
6762 +unsigned char napi_first_batch;
6763 +
6764 +static void pfe_tx_do_cleanup(unsigned long data);
6765 +
6766 +static int pfe_hif_alloc_descr(struct pfe_hif *hif)
6767 +{
6768 + void *addr;
6769 + dma_addr_t dma_addr;
6770 + int err = 0;
6771 +
6772 + pr_info("%s\n", __func__);
6773 + addr = dma_alloc_coherent(pfe->dev,
6774 + HIF_RX_DESC_NT * sizeof(struct hif_desc) +
6775 + HIF_TX_DESC_NT * sizeof(struct hif_desc),
6776 + &dma_addr, GFP_KERNEL);
6777 +
6778 + if (!addr) {
6779 + pr_err("%s: Could not allocate buffer descriptors!\n"
6780 + , __func__);
6781 + err = -ENOMEM;
6782 + goto err0;
6783 + }
6784 +
6785 + hif->descr_baseaddr_p = dma_addr;
6786 + hif->descr_baseaddr_v = addr;
6787 + hif->rx_ring_size = HIF_RX_DESC_NT;
6788 + hif->tx_ring_size = HIF_TX_DESC_NT;
6789 +
6790 + return 0;
6791 +
6792 +err0:
6793 + return err;
6794 +}
6795 +
6796 +#if defined(LS1012A_PFE_RESET_WA)
6797 +static void pfe_hif_disable_rx_desc(struct pfe_hif *hif)
6798 +{
6799 + int ii;
6800 + struct hif_desc *desc = hif->rx_base;
6801 +
6802 + /*Mark all descriptors as LAST_BD */
6803 + for (ii = 0; ii < hif->rx_ring_size; ii++) {
6804 + desc->ctrl |= BD_CTRL_LAST_BD;
6805 + desc++;
6806 + }
6807 +}
6808 +
6809 +struct class_rx_hdr_t {
6810 + u32 next_ptr; /* ptr to the start of the first DDR buffer */
6811 + u16 length; /* total packet length */
6812 + u16 phyno; /* input physical port number */
6813 + u32 status; /* gemac status bits */
6814 + u32 status2; /* reserved for software usage */
6815 +};
6816 +
6817 +/* STATUS_BAD_FRAME_ERR is set for all errors (including checksums if enabled)
6818 + * except overflow
6819 + */
6820 +#define STATUS_BAD_FRAME_ERR BIT(16)
6821 +#define STATUS_LENGTH_ERR BIT(17)
6822 +#define STATUS_CRC_ERR BIT(18)
6823 +#define STATUS_TOO_SHORT_ERR BIT(19)
6824 +#define STATUS_TOO_LONG_ERR BIT(20)
6825 +#define STATUS_CODE_ERR BIT(21)
6826 +#define STATUS_MC_HASH_MATCH BIT(22)
6827 +#define STATUS_CUMULATIVE_ARC_HIT BIT(23)
6828 +#define STATUS_UNICAST_HASH_MATCH BIT(24)
6829 +#define STATUS_IP_CHECKSUM_CORRECT BIT(25)
6830 +#define STATUS_TCP_CHECKSUM_CORRECT BIT(26)
6831 +#define STATUS_UDP_CHECKSUM_CORRECT BIT(27)
6832 +#define STATUS_OVERFLOW_ERR BIT(28) /* GPI error */
6833 +#define MIN_PKT_SIZE 64
6834 +
6835 +static inline void copy_to_lmem(u32 *dst, u32 *src, int len)
6836 +{
6837 + int i;
6838 +
6839 + for (i = 0; i < len; i += sizeof(u32)) {
6840 + *dst = htonl(*src);
6841 + dst++; src++;
6842 + }
6843 +}
6844 +
6845 +static void send_dummy_pkt_to_hif(void)
6846 +{
6847 + void *lmem_ptr, *ddr_ptr, *lmem_virt_addr;
6848 + u32 physaddr;
6849 + struct class_rx_hdr_t local_hdr;
6850 + static u32 dummy_pkt[] = {
6851 + 0x33221100, 0x2b785544, 0xd73093cb, 0x01000608,
6852 + 0x04060008, 0x2b780200, 0xd73093cb, 0x0a01a8c0,
6853 + 0x33221100, 0xa8c05544, 0x00000301, 0x00000000,
6854 + 0x00000000, 0x00000000, 0x00000000, 0xbe86c51f };
6855 +
6856 + ddr_ptr = (void *)((u64)readl(BMU2_BASE_ADDR + BMU_ALLOC_CTRL));
6857 + if (!ddr_ptr)
6858 + return;
6859 +
6860 + lmem_ptr = (void *)((u64)readl(BMU1_BASE_ADDR + BMU_ALLOC_CTRL));
6861 + if (!lmem_ptr)
6862 + return;
6863 +
6864 + pr_info("Sending a dummy pkt to HIF %p %p\n", ddr_ptr, lmem_ptr);
6865 + physaddr = (u32)DDR_VIRT_TO_PFE(ddr_ptr);
6866 +
6867 + lmem_virt_addr = (void *)CBUS_PFE_TO_VIRT((unsigned long int)lmem_ptr);
6868 +
6869 + local_hdr.phyno = htons(0); /* RX_PHY_0 */
6870 + local_hdr.length = htons(MIN_PKT_SIZE);
6871 +
6872 + local_hdr.next_ptr = htonl((u32)physaddr);
6873 + /*Mark checksum is correct */
6874 + local_hdr.status = htonl((STATUS_IP_CHECKSUM_CORRECT |
6875 + STATUS_UDP_CHECKSUM_CORRECT |
6876 + STATUS_TCP_CHECKSUM_CORRECT |
6877 + STATUS_UNICAST_HASH_MATCH |
6878 + STATUS_CUMULATIVE_ARC_HIT));
6879 + copy_to_lmem((u32 *)lmem_virt_addr, (u32 *)&local_hdr,
6880 + sizeof(local_hdr));
6881 +
6882 + copy_to_lmem((u32 *)(lmem_virt_addr + LMEM_HDR_SIZE), (u32 *)dummy_pkt,
6883 + 0x40);
6884 +
6885 + writel((unsigned long int)lmem_ptr, CLASS_INQ_PKTPTR);
6886 +}
6887 +
6888 +void pfe_hif_rx_idle(struct pfe_hif *hif)
6889 +{
6890 + int hif_stop_loop = 10;
6891 + u32 rx_status;
6892 +
6893 + pfe_hif_disable_rx_desc(hif);
6894 + pr_info("Bringing hif to idle state...");
6895 + writel(0, HIF_INT_ENABLE);
6896 + /*If HIF Rx BDP is busy send a dummy packet */
6897 + do {
6898 + rx_status = readl(HIF_RX_STATUS);
6899 + if (rx_status & BDP_CSR_RX_DMA_ACTV)
6900 + send_dummy_pkt_to_hif();
6901 +
6902 + usleep_range(100, 150);
6903 + } while (--hif_stop_loop);
6904 +
6905 + if (readl(HIF_RX_STATUS) & BDP_CSR_RX_DMA_ACTV)
6906 + pr_info("Failed\n");
6907 + else
6908 + pr_info("Done\n");
6909 +}
6910 +#endif
6911 +
6912 +static void pfe_hif_free_descr(struct pfe_hif *hif)
6913 +{
6914 + pr_info("%s\n", __func__);
6915 +
6916 + dma_free_coherent(pfe->dev,
6917 + hif->rx_ring_size * sizeof(struct hif_desc) +
6918 + hif->tx_ring_size * sizeof(struct hif_desc),
6919 + hif->descr_baseaddr_v, hif->descr_baseaddr_p);
6920 +}
6921 +
6922 +void pfe_hif_desc_dump(struct pfe_hif *hif)
6923 +{
6924 + struct hif_desc *desc;
6925 + unsigned long desc_p;
6926 + int ii = 0;
6927 +
6928 + pr_info("%s\n", __func__);
6929 +
6930 + desc = hif->rx_base;
6931 + desc_p = (u32)((u64)desc - (u64)hif->descr_baseaddr_v +
6932 + hif->descr_baseaddr_p);
6933 +
6934 + pr_info("HIF Rx desc base %p physical %x\n", desc, (u32)desc_p);
6935 + for (ii = 0; ii < hif->rx_ring_size; ii++) {
6936 + pr_info("status: %08x, ctrl: %08x, data: %08x, next: %x\n",
6937 + readl(&desc->status), readl(&desc->ctrl),
6938 + readl(&desc->data), readl(&desc->next));
6939 + desc++;
6940 + }
6941 +
6942 + desc = hif->tx_base;
6943 + desc_p = ((u64)desc - (u64)hif->descr_baseaddr_v +
6944 + hif->descr_baseaddr_p);
6945 +
6946 + pr_info("HIF Tx desc base %p physical %x\n", desc, (u32)desc_p);
6947 + for (ii = 0; ii < hif->tx_ring_size; ii++) {
6948 + pr_info("status: %08x, ctrl: %08x, data: %08x, next: %x\n",
6949 + readl(&desc->status), readl(&desc->ctrl),
6950 + readl(&desc->data), readl(&desc->next));
6951 + desc++;
6952 + }
6953 +}
6954 +
6955 +/* pfe_hif_release_buffers */
6956 +static void pfe_hif_release_buffers(struct pfe_hif *hif)
6957 +{
6958 + struct hif_desc *desc;
6959 + int i = 0;
6960 +
6961 + hif->rx_base = hif->descr_baseaddr_v;
6962 +
6963 + pr_info("%s\n", __func__);
6964 +
6965 + /*Free Rx buffers */
6966 + desc = hif->rx_base;
6967 + for (i = 0; i < hif->rx_ring_size; i++) {
6968 + if (readl(&desc->data)) {
6969 + if ((i < hif->shm->rx_buf_pool_cnt) &&
6970 + (!hif->shm->rx_buf_pool[i])) {
6971 + /*
6972 + * dma_unmap_single(hif->dev, desc->data,
6973 + * hif->rx_buf_len[i], DMA_FROM_DEVICE);
6974 + */
6975 + dma_unmap_single(hif->dev,
6976 + DDR_PFE_TO_PHYS(
6977 + readl(&desc->data)),
6978 + hif->rx_buf_len[i],
6979 + DMA_FROM_DEVICE);
6980 + hif->shm->rx_buf_pool[i] = hif->rx_buf_addr[i];
6981 + } else {
6982 + pr_err("%s: buffer pool already full\n"
6983 + , __func__);
6984 + }
6985 + }
6986 +
6987 + writel(0, &desc->data);
6988 + writel(0, &desc->status);
6989 + writel(0, &desc->ctrl);
6990 + desc++;
6991 + }
6992 +}
6993 +
6994 +/*
6995 + * pfe_hif_init_buffers
6996 + * This function initializes the HIF Rx/Tx ring descriptors and
6997 + * initialize Rx queue with buffers.
6998 + */
6999 +static int pfe_hif_init_buffers(struct pfe_hif *hif)
7000 +{
7001 + struct hif_desc *desc, *first_desc_p;
7002 + u32 data;
7003 + int i = 0;
7004 +
7005 + pr_info("%s\n", __func__);
7006 +
7007 + /* Check enough Rx buffers available in the shared memory */
7008 + if (hif->shm->rx_buf_pool_cnt < hif->rx_ring_size)
7009 + return -ENOMEM;
7010 +
7011 + hif->rx_base = hif->descr_baseaddr_v;
7012 + memset(hif->rx_base, 0, hif->rx_ring_size * sizeof(struct hif_desc));
7013 +
7014 + /*Initialize Rx descriptors */
7015 + desc = hif->rx_base;
7016 + first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p;
7017 +
7018 + for (i = 0; i < hif->rx_ring_size; i++) {
7019 + /* Initialize Rx buffers from the shared memory */
7020 +
7021 + data = (u32)dma_map_single(hif->dev, hif->shm->rx_buf_pool[i],
7022 + pfe_pkt_size, DMA_FROM_DEVICE);
7023 + hif->rx_buf_addr[i] = hif->shm->rx_buf_pool[i];
7024 + hif->rx_buf_len[i] = pfe_pkt_size;
7025 + hif->shm->rx_buf_pool[i] = NULL;
7026 +
7027 + if (likely(dma_mapping_error(hif->dev, data) == 0)) {
7028 + writel(DDR_PHYS_TO_PFE(data), &desc->data);
7029 + } else {
7030 + pr_err("%s : low on mem\n", __func__);
7031 +
7032 + goto err;
7033 + }
7034 +
7035 + writel(0, &desc->status);
7036 +
7037 + /*
7038 + * Ensure everything else is written to DDR before
7039 + * writing bd->ctrl
7040 + */
7041 + wmb();
7042 +
7043 + writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM
7044 + | BD_CTRL_DIR | BD_CTRL_DESC_EN
7045 + | BD_BUF_LEN(pfe_pkt_size)), &desc->ctrl);
7046 +
7047 + /* Chain descriptors */
7048 + writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
7049 + desc++;
7050 + }
7051 +
7052 + /* Overwrite last descriptor to chain it to first one*/
7053 + desc--;
7054 + writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
7055 +
7056 + hif->rxtoclean_index = 0;
7057 +
7058 + /*Initialize Rx buffer descriptor ring base address */
7059 + writel(DDR_PHYS_TO_PFE(hif->descr_baseaddr_p), HIF_RX_BDP_ADDR);
7060 +
7061 + hif->tx_base = hif->rx_base + hif->rx_ring_size;
7062 + first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p +
7063 + hif->rx_ring_size;
7064 + memset(hif->tx_base, 0, hif->tx_ring_size * sizeof(struct hif_desc));
7065 +
7066 + /*Initialize tx descriptors */
7067 + desc = hif->tx_base;
7068 +
7069 + for (i = 0; i < hif->tx_ring_size; i++) {
7070 + /* Chain descriptors */
7071 + writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
7072 + writel(0, &desc->ctrl);
7073 + desc++;
7074 + }
7075 +
7076 + /* Overwrite last descriptor to chain it to first one */
7077 + desc--;
7078 + writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
7079 + hif->txavail = hif->tx_ring_size;
7080 + hif->txtosend = 0;
7081 + hif->txtoclean = 0;
7082 + hif->txtoflush = 0;
7083 +
7084 + /*Initialize Tx buffer descriptor ring base address */
7085 + writel((u32)DDR_PHYS_TO_PFE(first_desc_p), HIF_TX_BDP_ADDR);
7086 +
7087 + return 0;
7088 +
7089 +err:
7090 + pfe_hif_release_buffers(hif);
7091 + return -ENOMEM;
7092 +}
7093 +
7094 +/*
7095 + * pfe_hif_client_register
7096 + *
7097 + * This function used to register a client driver with the HIF driver.
7098 + *
7099 + * Return value:
7100 + * 0 - on Successful registration
7101 + */
7102 +static int pfe_hif_client_register(struct pfe_hif *hif, u32 client_id,
7103 + struct hif_client_shm *client_shm)
7104 +{
7105 + struct hif_client *client = &hif->client[client_id];
7106 + u32 i, cnt;
7107 + struct rx_queue_desc *rx_qbase;
7108 + struct tx_queue_desc *tx_qbase;
7109 + struct hif_rx_queue *rx_queue;
7110 + struct hif_tx_queue *tx_queue;
7111 + int err = 0;
7112 +
7113 + pr_info("%s\n", __func__);
7114 +
7115 + spin_lock_bh(&hif->tx_lock);
7116 +
7117 + if (test_bit(client_id, &hif->shm->g_client_status[0])) {
7118 + pr_err("%s: client %d already registered\n",
7119 + __func__, client_id);
7120 + err = -1;
7121 + goto unlock;
7122 + }
7123 +
7124 + memset(client, 0, sizeof(struct hif_client));
7125 +
7126 + /* Initialize client Rx queues baseaddr, size */
7127 +
7128 + cnt = CLIENT_CTRL_RX_Q_CNT(client_shm->ctrl);
7129 + /* Check if client is requesting for more queues than supported */
7130 + if (cnt > HIF_CLIENT_QUEUES_MAX)
7131 + cnt = HIF_CLIENT_QUEUES_MAX;
7132 +
7133 + client->rx_qn = cnt;
7134 + rx_qbase = (struct rx_queue_desc *)client_shm->rx_qbase;
7135 + for (i = 0; i < cnt; i++) {
7136 + rx_queue = &client->rx_q[i];
7137 + rx_queue->base = rx_qbase + i * client_shm->rx_qsize;
7138 + rx_queue->size = client_shm->rx_qsize;
7139 + rx_queue->write_idx = 0;
7140 + }
7141 +
7142 + /* Initialize client Tx queues baseaddr, size */
7143 + cnt = CLIENT_CTRL_TX_Q_CNT(client_shm->ctrl);
7144 +
7145 + /* Check if client is requesting for more queues than supported */
7146 + if (cnt > HIF_CLIENT_QUEUES_MAX)
7147 + cnt = HIF_CLIENT_QUEUES_MAX;
7148 +
7149 + client->tx_qn = cnt;
7150 + tx_qbase = (struct tx_queue_desc *)client_shm->tx_qbase;
7151 + for (i = 0; i < cnt; i++) {
7152 + tx_queue = &client->tx_q[i];
7153 + tx_queue->base = tx_qbase + i * client_shm->tx_qsize;
7154 + tx_queue->size = client_shm->tx_qsize;
7155 + tx_queue->ack_idx = 0;
7156 + }
7157 +
7158 + set_bit(client_id, &hif->shm->g_client_status[0]);
7159 +
7160 +unlock:
7161 + spin_unlock_bh(&hif->tx_lock);
7162 +
7163 + return err;
7164 +}
7165 +
7166 +/*
7167 + * pfe_hif_client_unregister
7168 + *
7169 + * This function used to unregister a client from the HIF driver.
7170 + *
7171 + */
7172 +static void pfe_hif_client_unregister(struct pfe_hif *hif, u32 client_id)
7173 +{
7174 + pr_info("%s\n", __func__);
7175 +
7176 + /*
7177 + * Mark client as no longer available (which prevents further packet
7178 + * receive for this client)
7179 + */
7180 + spin_lock_bh(&hif->tx_lock);
7181 +
7182 + if (!test_bit(client_id, &hif->shm->g_client_status[0])) {
7183 + pr_err("%s: client %d not registered\n", __func__,
7184 + client_id);
7185 +
7186 + spin_unlock_bh(&hif->tx_lock);
7187 + return;
7188 + }
7189 +
7190 + clear_bit(client_id, &hif->shm->g_client_status[0]);
7191 +
7192 + spin_unlock_bh(&hif->tx_lock);
7193 +}
7194 +
7195 +/*
7196 + * client_put_rxpacket-
7197 + * This functions puts the Rx pkt in the given client Rx queue.
7198 + * It actually swap the Rx pkt in the client Rx descriptor buffer
7199 + * and returns the free buffer from it.
7200 + *
7201 + * If the function returns NULL means client Rx queue is full and
7202 + * packet couldn't send to client queue.
7203 + */
7204 +static void *client_put_rxpacket(struct hif_rx_queue *queue, void *pkt, u32 len,
7205 + u32 flags, u32 client_ctrl, u32 *rem_len)
7206 +{
7207 + void *free_pkt = NULL;
7208 + struct rx_queue_desc *desc = queue->base + queue->write_idx;
7209 +
7210 + if (readl(&desc->ctrl) & CL_DESC_OWN) {
7211 + if (page_mode) {
7212 + int rem_page_size = PAGE_SIZE -
7213 + PRESENT_OFST_IN_PAGE(pkt);
7214 + int cur_pkt_size = ROUND_MIN_RX_SIZE(len +
7215 + pfe_pkt_headroom);
7216 + *rem_len = (rem_page_size - cur_pkt_size);
7217 + if (*rem_len) {
7218 + free_pkt = pkt + cur_pkt_size;
7219 + get_page(virt_to_page(free_pkt));
7220 + } else {
7221 + free_pkt = (void
7222 + *)__get_free_page(GFP_ATOMIC | GFP_DMA_PFE);
7223 + *rem_len = pfe_pkt_size;
7224 + }
7225 + } else {
7226 + free_pkt = kmalloc(PFE_BUF_SIZE, GFP_ATOMIC |
7227 + GFP_DMA_PFE);
7228 + *rem_len = PFE_BUF_SIZE - pfe_pkt_headroom;
7229 + }
7230 +
7231 + if (free_pkt) {
7232 + desc->data = pkt;
7233 + desc->client_ctrl = client_ctrl;
7234 + /*
7235 + * Ensure everything else is written to DDR before
7236 + * writing bd->ctrl
7237 + */
7238 + smp_wmb();
7239 + writel(CL_DESC_BUF_LEN(len) | flags, &desc->ctrl);
7240 + queue->write_idx = (queue->write_idx + 1)
7241 + & (queue->size - 1);
7242 +
7243 + free_pkt += pfe_pkt_headroom;
7244 + }
7245 + }
7246 +
7247 + return free_pkt;
7248 +}
7249 +
7250 +/*
7251 + * pfe_hif_rx_process-
7252 + * This function does pfe hif rx queue processing.
7253 + * Dequeue packet from Rx queue and send it to corresponding client queue
7254 + */
7255 +static int pfe_hif_rx_process(struct pfe_hif *hif, int budget)
7256 +{
7257 + struct hif_desc *desc;
7258 + struct hif_hdr *pkt_hdr;
7259 + struct __hif_hdr hif_hdr;
7260 + void *free_buf;
7261 + int rtc, len, rx_processed = 0;
7262 + struct __hif_desc local_desc;
7263 + int flags;
7264 + unsigned int desc_p;
7265 + unsigned int buf_size = 0;
7266 +
7267 + spin_lock_bh(&hif->lock);
7268 +
7269 + rtc = hif->rxtoclean_index;
7270 +
7271 + while (rx_processed < budget) {
7272 + desc = hif->rx_base + rtc;
7273 +
7274 + __memcpy12(&local_desc, desc);
7275 +
7276 + /* ACK pending Rx interrupt */
7277 + if (local_desc.ctrl & BD_CTRL_DESC_EN) {
7278 + writel(HIF_INT | HIF_RXPKT_INT, HIF_INT_SRC);
7279 +
7280 + if (rx_processed == 0) {
7281 + if (napi_first_batch == 1) {
7282 + desc_p = hif->descr_baseaddr_p +
7283 + ((unsigned long int)(desc) -
7284 + (unsigned long
7285 + int)hif->descr_baseaddr_v);
7286 + napi_first_batch = 0;
7287 + }
7288 + }
7289 +
7290 + __memcpy12(&local_desc, desc);
7291 +
7292 + if (local_desc.ctrl & BD_CTRL_DESC_EN)
7293 + break;
7294 + }
7295 +
7296 + napi_first_batch = 0;
7297 +
7298 +#ifdef HIF_NAPI_STATS
7299 + hif->napi_counters[NAPI_DESC_COUNT]++;
7300 +#endif
7301 + len = BD_BUF_LEN(local_desc.ctrl);
7302 + /*
7303 + * dma_unmap_single(hif->dev, DDR_PFE_TO_PHYS(local_desc.data),
7304 + * hif->rx_buf_len[rtc], DMA_FROM_DEVICE);
7305 + */
7306 + dma_unmap_single(hif->dev, DDR_PFE_TO_PHYS(local_desc.data),
7307 + hif->rx_buf_len[rtc], DMA_FROM_DEVICE);
7308 +
7309 + pkt_hdr = (struct hif_hdr *)hif->rx_buf_addr[rtc];
7310 +
7311 + /* Track last HIF header received */
7312 + if (!hif->started) {
7313 + hif->started = 1;
7314 +
7315 + __memcpy8(&hif_hdr, pkt_hdr);
7316 +
7317 + hif->qno = hif_hdr.hdr.q_num;
7318 + hif->client_id = hif_hdr.hdr.client_id;
7319 + hif->client_ctrl = (hif_hdr.hdr.client_ctrl1 << 16) |
7320 + hif_hdr.hdr.client_ctrl;
7321 + flags = CL_DESC_FIRST;
7322 +
7323 + } else {
7324 + flags = 0;
7325 + }
7326 +
7327 + if (local_desc.ctrl & BD_CTRL_LIFM)
7328 + flags |= CL_DESC_LAST;
7329 +
7330 + /* Check for valid client id and still registered */
7331 + if ((hif->client_id >= HIF_CLIENTS_MAX) ||
7332 + !(test_bit(hif->client_id,
7333 + &hif->shm->g_client_status[0]))) {
7334 + printk_ratelimited("%s: packet with invalid client id %d q_num %d\n",
7335 + __func__,
7336 + hif->client_id,
7337 + hif->qno);
7338 +
7339 + free_buf = pkt_hdr;
7340 +
7341 + goto pkt_drop;
7342 + }
7343 +
7344 + /* Check to valid queue number */
7345 + if (hif->client[hif->client_id].rx_qn <= hif->qno) {
7346 + pr_info("%s: packet with invalid queue: %d\n"
7347 + , __func__, hif->qno);
7348 + hif->qno = 0;
7349 + }
7350 +
7351 + free_buf =
7352 + client_put_rxpacket(&hif->client[hif->client_id].rx_q[hif->qno],
7353 + (void *)pkt_hdr, len, flags,
7354 + hif->client_ctrl, &buf_size);
7355 +
7356 + hif_lib_indicate_client(hif->client_id, EVENT_RX_PKT_IND,
7357 + hif->qno);
7358 +
7359 + if (unlikely(!free_buf)) {
7360 +#ifdef HIF_NAPI_STATS
7361 + hif->napi_counters[NAPI_CLIENT_FULL_COUNT]++;
7362 +#endif
7363 + /*
7364 + * If we want to keep in polling mode to retry later,
7365 + * we need to tell napi that we consumed
7366 + * the full budget or we will hit a livelock scenario.
7367 + * The core code keeps this napi instance
7368 + * at the head of the list and none of the other
7369 + * instances get to run
7370 + */
7371 + rx_processed = budget;
7372 +
7373 + if (flags & CL_DESC_FIRST)
7374 + hif->started = 0;
7375 +
7376 + break;
7377 + }
7378 +
7379 +pkt_drop:
7380 + /*Fill free buffer in the descriptor */
7381 + hif->rx_buf_addr[rtc] = free_buf;
7382 + hif->rx_buf_len[rtc] = min(pfe_pkt_size, buf_size);
7383 + writel((DDR_PHYS_TO_PFE
7384 + ((u32)dma_map_single(hif->dev,
7385 + free_buf, hif->rx_buf_len[rtc], DMA_FROM_DEVICE))),
7386 + &desc->data);
7387 + /*
7388 + * Ensure everything else is written to DDR before
7389 + * writing bd->ctrl
7390 + */
7391 + wmb();
7392 + writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM | BD_CTRL_DIR |
7393 + BD_CTRL_DESC_EN | BD_BUF_LEN(hif->rx_buf_len[rtc])),
7394 + &desc->ctrl);
7395 +
7396 + rtc = (rtc + 1) & (hif->rx_ring_size - 1);
7397 +
7398 + if (local_desc.ctrl & BD_CTRL_LIFM) {
7399 + if (!(hif->client_ctrl & HIF_CTRL_RX_CONTINUED)) {
7400 + rx_processed++;
7401 +
7402 +#ifdef HIF_NAPI_STATS
7403 + hif->napi_counters[NAPI_PACKET_COUNT]++;
7404 +#endif
7405 + }
7406 + hif->started = 0;
7407 + }
7408 + }
7409 +
7410 + hif->rxtoclean_index = rtc;
7411 + spin_unlock_bh(&hif->lock);
7412 +
7413 + /* we made some progress, re-start rx dma in case it stopped */
7414 + hif_rx_dma_start();
7415 +
7416 + return rx_processed;
7417 +}
7418 +
7419 +/*
7420 + * client_ack_txpacket-
7421 + * This function ack the Tx packet in the give client Tx queue by resetting
7422 + * ownership bit in the descriptor.
7423 + */
7424 +static int client_ack_txpacket(struct pfe_hif *hif, unsigned int client_id,
7425 + unsigned int q_no)
7426 +{
7427 + struct hif_tx_queue *queue = &hif->client[client_id].tx_q[q_no];
7428 + struct tx_queue_desc *desc = queue->base + queue->ack_idx;
7429 +
7430 + if (readl(&desc->ctrl) & CL_DESC_OWN) {
7431 + writel((readl(&desc->ctrl) & ~CL_DESC_OWN), &desc->ctrl);
7432 + queue->ack_idx = (queue->ack_idx + 1) & (queue->size - 1);
7433 +
7434 + return 0;
7435 +
7436 + } else {
7437 + /*This should not happen */
7438 + pr_err("%s: %d %d %d %d %d %p %d\n", __func__,
7439 + hif->txtosend, hif->txtoclean, hif->txavail,
7440 + client_id, q_no, queue, queue->ack_idx);
7441 + WARN(1, "%s: doesn't own this descriptor", __func__);
7442 + return 1;
7443 + }
7444 +}
7445 +
7446 +void __hif_tx_done_process(struct pfe_hif *hif, int count)
7447 +{
7448 + struct hif_desc *desc;
7449 + struct hif_desc_sw *desc_sw;
7450 + int ttc, tx_avl;
7451 + int pkts_done[HIF_CLIENTS_MAX] = {0, 0};
7452 +
7453 + ttc = hif->txtoclean;
7454 + tx_avl = hif->txavail;
7455 +
7456 + while ((tx_avl < hif->tx_ring_size) && count--) {
7457 + desc = hif->tx_base + ttc;
7458 +
7459 + if (readl(&desc->ctrl) & BD_CTRL_DESC_EN)
7460 + break;
7461 +
7462 + desc_sw = &hif->tx_sw_queue[ttc];
7463 +
7464 + if (desc_sw->data) {
7465 + /*
7466 + * dmap_unmap_single(hif->dev, desc_sw->data,
7467 + * desc_sw->len, DMA_TO_DEVICE);
7468 + */
7469 + dma_unmap_single(hif->dev, desc_sw->data,
7470 + desc_sw->len, DMA_TO_DEVICE);
7471 + }
7472 +
7473 + if (desc_sw->client_id > HIF_CLIENTS_MAX)
7474 + pr_err("Invalid cl id %d\n", desc_sw->client_id);
7475 +
7476 + pkts_done[desc_sw->client_id]++;
7477 +
7478 + client_ack_txpacket(hif, desc_sw->client_id, desc_sw->q_no);
7479 +
7480 + ttc = (ttc + 1) & (hif->tx_ring_size - 1);
7481 + tx_avl++;
7482 + }
7483 +
7484 + if (pkts_done[0])
7485 + hif_lib_indicate_client(0, EVENT_TXDONE_IND, 0);
7486 + if (pkts_done[1])
7487 + hif_lib_indicate_client(1, EVENT_TXDONE_IND, 0);
7488 +
7489 + hif->txtoclean = ttc;
7490 + hif->txavail = tx_avl;
7491 +
7492 + if (!count) {
7493 + tasklet_schedule(&hif->tx_cleanup_tasklet);
7494 + } else {
7495 + /*Enable Tx done interrupt */
7496 + writel(readl_relaxed(HIF_INT_ENABLE) | HIF_TXPKT_INT,
7497 + HIF_INT_ENABLE);
7498 + }
7499 +}
7500 +
7501 +static void pfe_tx_do_cleanup(unsigned long data)
7502 +{
7503 + struct pfe_hif *hif = (struct pfe_hif *)data;
7504 +
7505 + writel(HIF_INT | HIF_TXPKT_INT, HIF_INT_SRC);
7506 +
7507 + hif_tx_done_process(hif, 64);
7508 +}
7509 +
7510 +/*
7511 + * __hif_xmit_pkt -
7512 + * This function puts one packet in the HIF Tx queue
7513 + */
7514 +void __hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int
7515 + q_no, void *data, u32 len, unsigned int flags)
7516 +{
7517 + struct hif_desc *desc;
7518 + struct hif_desc_sw *desc_sw;
7519 +
7520 + desc = hif->tx_base + hif->txtosend;
7521 + desc_sw = &hif->tx_sw_queue[hif->txtosend];
7522 +
7523 + desc_sw->len = len;
7524 + desc_sw->client_id = client_id;
7525 + desc_sw->q_no = q_no;
7526 + desc_sw->flags = flags;
7527 +
7528 + if (flags & HIF_DONT_DMA_MAP) {
7529 + desc_sw->data = 0;
7530 + writel((u32)DDR_PHYS_TO_PFE(data), &desc->data);
7531 + } else {
7532 + desc_sw->data = dma_map_single(hif->dev, data, len,
7533 + DMA_TO_DEVICE);
7534 + writel((u32)DDR_PHYS_TO_PFE(desc_sw->data), &desc->data);
7535 + }
7536 +
7537 + hif->txtosend = (hif->txtosend + 1) & (hif->tx_ring_size - 1);
7538 + hif->txavail--;
7539 +
7540 + if ((!((flags & HIF_DATA_VALID) && (flags &
7541 + HIF_LAST_BUFFER))))
7542 + goto skip_tx;
7543 +
7544 + /*
7545 + * Ensure everything else is written to DDR before
7546 + * writing bd->ctrl
7547 + */
7548 + wmb();
7549 +
7550 + do {
7551 + desc_sw = &hif->tx_sw_queue[hif->txtoflush];
7552 + desc = hif->tx_base + hif->txtoflush;
7553 +
7554 + if (desc_sw->flags & HIF_LAST_BUFFER) {
7555 + writel((BD_CTRL_LIFM |
7556 + BD_CTRL_BRFETCH_DISABLE | BD_CTRL_RTFETCH_DISABLE
7557 + | BD_CTRL_PARSE_DISABLE | BD_CTRL_DESC_EN |
7558 + BD_CTRL_PKT_INT_EN | BD_BUF_LEN(desc_sw->len)),
7559 + &desc->ctrl);
7560 + } else {
7561 + writel((BD_CTRL_DESC_EN |
7562 + BD_BUF_LEN(desc_sw->len)), &desc->ctrl);
7563 + }
7564 + hif->txtoflush = (hif->txtoflush + 1) & (hif->tx_ring_size - 1);
7565 + }
7566 + while (hif->txtoflush != hif->txtosend)
7567 + ;
7568 +
7569 +skip_tx:
7570 + return;
7571 +}
7572 +
7573 +static irqreturn_t wol_isr(int irq, void *dev_id)
7574 +{
7575 + pr_info("WoL\n");
7576 + gemac_set_wol(EMAC1_BASE_ADDR, 0);
7577 + gemac_set_wol(EMAC2_BASE_ADDR, 0);
7578 + return IRQ_HANDLED;
7579 +}
7580 +
7581 +/*
7582 + * hif_isr-
7583 + * This ISR routine processes Rx/Tx done interrupts from the HIF hardware block
7584 + */
7585 +static irqreturn_t hif_isr(int irq, void *dev_id)
7586 +{
7587 + struct pfe_hif *hif = (struct pfe_hif *)dev_id;
7588 + int int_status;
7589 + int int_enable_mask;
7590 +
7591 + /*Read hif interrupt source register */
7592 + int_status = readl_relaxed(HIF_INT_SRC);
7593 + int_enable_mask = readl_relaxed(HIF_INT_ENABLE);
7594 +
7595 + if ((int_status & HIF_INT) == 0)
7596 + return IRQ_NONE;
7597 +
7598 + int_status &= ~(HIF_INT);
7599 +
7600 + if (int_status & HIF_RXPKT_INT) {
7601 + int_status &= ~(HIF_RXPKT_INT);
7602 + int_enable_mask &= ~(HIF_RXPKT_INT);
7603 +
7604 + napi_first_batch = 1;
7605 +
7606 + if (napi_schedule_prep(&hif->napi)) {
7607 +#ifdef HIF_NAPI_STATS
7608 + hif->napi_counters[NAPI_SCHED_COUNT]++;
7609 +#endif
7610 + __napi_schedule(&hif->napi);
7611 + }
7612 + }
7613 +
7614 + if (int_status & HIF_TXPKT_INT) {
7615 + int_status &= ~(HIF_TXPKT_INT);
7616 + int_enable_mask &= ~(HIF_TXPKT_INT);
7617 + /*Schedule tx cleanup tassklet */
7618 + tasklet_schedule(&hif->tx_cleanup_tasklet);
7619 + }
7620 +
7621 + /*Disable interrupts, they will be enabled after they are serviced */
7622 + writel_relaxed(int_enable_mask, HIF_INT_ENABLE);
7623 +
7624 + if (int_status) {
7625 + pr_info("%s : Invalid interrupt : %d\n", __func__,
7626 + int_status);
7627 + writel(int_status, HIF_INT_SRC);
7628 + }
7629 +
7630 + return IRQ_HANDLED;
7631 +}
7632 +
7633 +void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int data2)
7634 +{
7635 + unsigned int client_id = data1;
7636 +
7637 + if (client_id >= HIF_CLIENTS_MAX) {
7638 + pr_err("%s: client id %d out of bounds\n", __func__,
7639 + client_id);
7640 + return;
7641 + }
7642 +
7643 + switch (req) {
7644 + case REQUEST_CL_REGISTER:
7645 + /* Request for register a client */
7646 + pr_info("%s: register client_id %d\n",
7647 + __func__, client_id);
7648 + pfe_hif_client_register(hif, client_id, (struct
7649 + hif_client_shm *)&hif->shm->client[client_id]);
7650 + break;
7651 +
7652 + case REQUEST_CL_UNREGISTER:
7653 + pr_info("%s: unregister client_id %d\n",
7654 + __func__, client_id);
7655 +
7656 + /* Request for unregister a client */
7657 + pfe_hif_client_unregister(hif, client_id);
7658 +
7659 + break;
7660 +
7661 + default:
7662 + pr_err("%s: unsupported request %d\n",
7663 + __func__, req);
7664 + break;
7665 + }
7666 +
7667 + /*
7668 + * Process client Tx queues
7669 + * Currently we don't have checking for tx pending
7670 + */
7671 +}
7672 +
7673 +/*
7674 + * pfe_hif_rx_poll
7675 + * This function is NAPI poll function to process HIF Rx queue.
7676 + */
7677 +static int pfe_hif_rx_poll(struct napi_struct *napi, int budget)
7678 +{
7679 + struct pfe_hif *hif = container_of(napi, struct pfe_hif, napi);
7680 + int work_done;
7681 +
7682 +#ifdef HIF_NAPI_STATS
7683 + hif->napi_counters[NAPI_POLL_COUNT]++;
7684 +#endif
7685 +
7686 + work_done = pfe_hif_rx_process(hif, budget);
7687 +
7688 + if (work_done < budget) {
7689 + napi_complete(napi);
7690 + writel(readl_relaxed(HIF_INT_ENABLE) | HIF_RXPKT_INT,
7691 + HIF_INT_ENABLE);
7692 + }
7693 +#ifdef HIF_NAPI_STATS
7694 + else
7695 + hif->napi_counters[NAPI_FULL_BUDGET_COUNT]++;
7696 +#endif
7697 +
7698 + return work_done;
7699 +}
7700 +
7701 +/*
7702 + * pfe_hif_init
7703 + * This function initializes the baseaddresses and irq, etc.
7704 + */
7705 +int pfe_hif_init(struct pfe *pfe)
7706 +{
7707 + struct pfe_hif *hif = &pfe->hif;
7708 + int err;
7709 +
7710 + pr_info("%s\n", __func__);
7711 +
7712 + hif->dev = pfe->dev;
7713 + hif->irq = pfe->hif_irq;
7714 +
7715 + err = pfe_hif_alloc_descr(hif);
7716 + if (err)
7717 + goto err0;
7718 +
7719 + if (pfe_hif_init_buffers(hif)) {
7720 + pr_err("%s: Could not initialize buffer descriptors\n"
7721 + , __func__);
7722 + err = -ENOMEM;
7723 + goto err1;
7724 + }
7725 +
7726 + /* Initialize NAPI for Rx processing */
7727 + init_dummy_netdev(&hif->dummy_dev);
7728 + netif_napi_add(&hif->dummy_dev, &hif->napi, pfe_hif_rx_poll,
7729 + HIF_RX_POLL_WEIGHT);
7730 + napi_enable(&hif->napi);
7731 +
7732 + spin_lock_init(&hif->tx_lock);
7733 + spin_lock_init(&hif->lock);
7734 +
7735 + hif_init();
7736 + hif_rx_enable();
7737 + hif_tx_enable();
7738 +
7739 + /* Disable tx done interrupt */
7740 + writel(HIF_INT_MASK, HIF_INT_ENABLE);
7741 +
7742 + gpi_enable(HGPI_BASE_ADDR);
7743 +
7744 + err = request_irq(hif->irq, hif_isr, 0, "pfe_hif", hif);
7745 + if (err) {
7746 + pr_err("%s: failed to get the hif IRQ = %d\n",
7747 + __func__, hif->irq);
7748 + goto err1;
7749 + }
7750 +
7751 + err = request_irq(pfe->wol_irq, wol_isr, 0, "pfe_wol", pfe);
7752 + if (err) {
7753 + pr_err("%s: failed to get the wol IRQ = %d\n",
7754 + __func__, pfe->wol_irq);
7755 + goto err1;
7756 + }
7757 +
7758 + tasklet_init(&hif->tx_cleanup_tasklet,
7759 + (void(*)(unsigned long))pfe_tx_do_cleanup,
7760 + (unsigned long)hif);
7761 +
7762 + return 0;
7763 +err1:
7764 + pfe_hif_free_descr(hif);
7765 +err0:
7766 + return err;
7767 +}
7768 +
7769 +/* pfe_hif_exit- */
7770 +void pfe_hif_exit(struct pfe *pfe)
7771 +{
7772 + struct pfe_hif *hif = &pfe->hif;
7773 +
7774 + pr_info("%s\n", __func__);
7775 +
7776 + tasklet_kill(&hif->tx_cleanup_tasklet);
7777 +
7778 + spin_lock_bh(&hif->lock);
7779 + hif->shm->g_client_status[0] = 0;
7780 + /* Make sure all clients are disabled*/
7781 + hif->shm->g_client_status[1] = 0;
7782 +
7783 + spin_unlock_bh(&hif->lock);
7784 +
7785 + /*Disable Rx/Tx */
7786 + gpi_disable(HGPI_BASE_ADDR);
7787 + hif_rx_disable();
7788 + hif_tx_disable();
7789 +
7790 + napi_disable(&hif->napi);
7791 + netif_napi_del(&hif->napi);
7792 +
7793 + free_irq(pfe->wol_irq, pfe);
7794 + free_irq(hif->irq, hif);
7795 +
7796 + pfe_hif_release_buffers(hif);
7797 + pfe_hif_free_descr(hif);
7798 +}
7799 diff --git a/drivers/staging/fsl_ppfe/pfe_hif.h b/drivers/staging/fsl_ppfe/pfe_hif.h
7800 new file mode 100644
7801 index 00000000..6e36f0c1
7802 --- /dev/null
7803 +++ b/drivers/staging/fsl_ppfe/pfe_hif.h
7804 @@ -0,0 +1,211 @@
7805 +/*
7806 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
7807 + * Copyright 2017 NXP
7808 + *
7809 + * This program is free software; you can redistribute it and/or modify
7810 + * it under the terms of the GNU General Public License as published by
7811 + * the Free Software Foundation; either version 2 of the License, or
7812 + * (at your option) any later version.
7813 + *
7814 + * This program is distributed in the hope that it will be useful,
7815 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
7816 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
7817 + * GNU General Public License for more details.
7818 + *
7819 + * You should have received a copy of the GNU General Public License
7820 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
7821 + */
7822 +
7823 +#ifndef _PFE_HIF_H_
7824 +#define _PFE_HIF_H_
7825 +
7826 +#include <linux/netdevice.h>
7827 +
7828 +#define HIF_NAPI_STATS
7829 +
7830 +#define HIF_CLIENT_QUEUES_MAX 16
7831 +#define HIF_RX_POLL_WEIGHT 64
7832 +
7833 +#define HIF_RX_PKT_MIN_SIZE 0x800 /* 2KB */
7834 +#define HIF_RX_PKT_MIN_SIZE_MASK ~(HIF_RX_PKT_MIN_SIZE - 1)
7835 +#define ROUND_MIN_RX_SIZE(_sz) (((_sz) + (HIF_RX_PKT_MIN_SIZE - 1)) \
7836 + & HIF_RX_PKT_MIN_SIZE_MASK)
7837 +#define PRESENT_OFST_IN_PAGE(_buf) (((unsigned long int)(_buf) & (PAGE_SIZE \
7838 + - 1)) & HIF_RX_PKT_MIN_SIZE_MASK)
7839 +
7840 +enum {
7841 + NAPI_SCHED_COUNT = 0,
7842 + NAPI_POLL_COUNT,
7843 + NAPI_PACKET_COUNT,
7844 + NAPI_DESC_COUNT,
7845 + NAPI_FULL_BUDGET_COUNT,
7846 + NAPI_CLIENT_FULL_COUNT,
7847 + NAPI_MAX_COUNT
7848 +};
7849 +
7850 +/*
7851 + * HIF_TX_DESC_NT value should be always greter than 4,
7852 + * Otherwise HIF_TX_POLL_MARK will become zero.
7853 + */
7854 +#define HIF_RX_DESC_NT 256
7855 +#define HIF_TX_DESC_NT 2048
7856 +
7857 +#define HIF_FIRST_BUFFER BIT(0)
7858 +#define HIF_LAST_BUFFER BIT(1)
7859 +#define HIF_DONT_DMA_MAP BIT(2)
7860 +#define HIF_DATA_VALID BIT(3)
7861 +#define HIF_TSO BIT(4)
7862 +
7863 +enum {
7864 + PFE_CL_GEM0 = 0,
7865 + PFE_CL_GEM1,
7866 + HIF_CLIENTS_MAX
7867 +};
7868 +
7869 +/*structure to store client queue info */
7870 +struct hif_rx_queue {
7871 + struct rx_queue_desc *base;
7872 + u32 size;
7873 + u32 write_idx;
7874 +};
7875 +
7876 +struct hif_tx_queue {
7877 + struct tx_queue_desc *base;
7878 + u32 size;
7879 + u32 ack_idx;
7880 +};
7881 +
7882 +/*Structure to store the client info */
7883 +struct hif_client {
7884 + int rx_qn;
7885 + struct hif_rx_queue rx_q[HIF_CLIENT_QUEUES_MAX];
7886 + int tx_qn;
7887 + struct hif_tx_queue tx_q[HIF_CLIENT_QUEUES_MAX];
7888 +};
7889 +
7890 +/*HIF hardware buffer descriptor */
7891 +struct hif_desc {
7892 + u32 ctrl;
7893 + u32 status;
7894 + u32 data;
7895 + u32 next;
7896 +};
7897 +
7898 +struct __hif_desc {
7899 + u32 ctrl;
7900 + u32 status;
7901 + u32 data;
7902 +};
7903 +
7904 +struct hif_desc_sw {
7905 + dma_addr_t data;
7906 + u16 len;
7907 + u8 client_id;
7908 + u8 q_no;
7909 + u16 flags;
7910 +};
7911 +
7912 +struct hif_hdr {
7913 + u8 client_id;
7914 + u8 q_num;
7915 + u16 client_ctrl;
7916 + u16 client_ctrl1;
7917 +};
7918 +
7919 +struct __hif_hdr {
7920 + union {
7921 + struct hif_hdr hdr;
7922 + u32 word[2];
7923 + };
7924 +};
7925 +
7926 +struct hif_ipsec_hdr {
7927 + u16 sa_handle[2];
7928 +} __packed;
7929 +
7930 +/* HIF_CTRL_TX... defines */
7931 +#define HIF_CTRL_TX_CHECKSUM BIT(2)
7932 +
7933 +/* HIF_CTRL_RX... defines */
7934 +#define HIF_CTRL_RX_OFFSET_OFST (24)
7935 +#define HIF_CTRL_RX_CHECKSUMMED BIT(2)
7936 +#define HIF_CTRL_RX_CONTINUED BIT(1)
7937 +
7938 +struct pfe_hif {
7939 + /* To store registered clients in hif layer */
7940 + struct hif_client client[HIF_CLIENTS_MAX];
7941 + struct hif_shm *shm;
7942 + int irq;
7943 +
7944 + void *descr_baseaddr_v;
7945 + unsigned long descr_baseaddr_p;
7946 +
7947 + struct hif_desc *rx_base;
7948 + u32 rx_ring_size;
7949 + u32 rxtoclean_index;
7950 + void *rx_buf_addr[HIF_RX_DESC_NT];
7951 + int rx_buf_len[HIF_RX_DESC_NT];
7952 + unsigned int qno;
7953 + unsigned int client_id;
7954 + unsigned int client_ctrl;
7955 + unsigned int started;
7956 +
7957 + struct hif_desc *tx_base;
7958 + u32 tx_ring_size;
7959 + u32 txtosend;
7960 + u32 txtoclean;
7961 + u32 txavail;
7962 + u32 txtoflush;
7963 + struct hif_desc_sw tx_sw_queue[HIF_TX_DESC_NT];
7964 +
7965 +/* tx_lock synchronizes hif packet tx as well as pfe_hif structure access */
7966 + spinlock_t tx_lock;
7967 +/* lock synchronizes hif rx queue processing */
7968 + spinlock_t lock;
7969 + struct net_device dummy_dev;
7970 + struct napi_struct napi;
7971 + struct device *dev;
7972 +
7973 +#ifdef HIF_NAPI_STATS
7974 + unsigned int napi_counters[NAPI_MAX_COUNT];
7975 +#endif
7976 + struct tasklet_struct tx_cleanup_tasklet;
7977 +};
7978 +
7979 +void __hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int
7980 + q_no, void *data, u32 len, unsigned int flags);
7981 +int hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int q_no,
7982 + void *data, unsigned int len);
7983 +void __hif_tx_done_process(struct pfe_hif *hif, int count);
7984 +void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int
7985 + data2);
7986 +int pfe_hif_init(struct pfe *pfe);
7987 +void pfe_hif_exit(struct pfe *pfe);
7988 +void pfe_hif_rx_idle(struct pfe_hif *hif);
7989 +static inline void hif_tx_done_process(struct pfe_hif *hif, int count)
7990 +{
7991 + spin_lock_bh(&hif->tx_lock);
7992 + __hif_tx_done_process(hif, count);
7993 + spin_unlock_bh(&hif->tx_lock);
7994 +}
7995 +
7996 +static inline void hif_tx_lock(struct pfe_hif *hif)
7997 +{
7998 + spin_lock_bh(&hif->tx_lock);
7999 +}
8000 +
8001 +static inline void hif_tx_unlock(struct pfe_hif *hif)
8002 +{
8003 + spin_unlock_bh(&hif->tx_lock);
8004 +}
8005 +
8006 +static inline int __hif_tx_avail(struct pfe_hif *hif)
8007 +{
8008 + return hif->txavail;
8009 +}
8010 +
8011 +#define __memcpy8(dst, src) memcpy(dst, src, 8)
8012 +#define __memcpy12(dst, src) memcpy(dst, src, 12)
8013 +#define __memcpy(dst, src, len) memcpy(dst, src, len)
8014 +
8015 +#endif /* _PFE_HIF_H_ */
8016 diff --git a/drivers/staging/fsl_ppfe/pfe_hif_lib.c b/drivers/staging/fsl_ppfe/pfe_hif_lib.c
8017 new file mode 100644
8018 index 00000000..837eaa24
8019 --- /dev/null
8020 +++ b/drivers/staging/fsl_ppfe/pfe_hif_lib.c
8021 @@ -0,0 +1,601 @@
8022 +/*
8023 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
8024 + * Copyright 2017 NXP
8025 + *
8026 + * This program is free software; you can redistribute it and/or modify
8027 + * it under the terms of the GNU General Public License as published by
8028 + * the Free Software Foundation; either version 2 of the License, or
8029 + * (at your option) any later version.
8030 + *
8031 + * This program is distributed in the hope that it will be useful,
8032 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
8033 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8034 + * GNU General Public License for more details.
8035 + *
8036 + * You should have received a copy of the GNU General Public License
8037 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
8038 + */
8039 +
8040 +#include <linux/version.h>
8041 +#include <linux/kernel.h>
8042 +#include <linux/slab.h>
8043 +#include <linux/interrupt.h>
8044 +#include <linux/workqueue.h>
8045 +#include <linux/dma-mapping.h>
8046 +#include <linux/dmapool.h>
8047 +#include <linux/sched.h>
8048 +#include <linux/skbuff.h>
8049 +#include <linux/moduleparam.h>
8050 +#include <linux/cpu.h>
8051 +
8052 +#include "pfe_mod.h"
8053 +#include "pfe_hif.h"
8054 +#include "pfe_hif_lib.h"
8055 +
8056 +unsigned int lro_mode;
8057 +unsigned int page_mode;
8058 +unsigned int tx_qos;
8059 +unsigned int pfe_pkt_size;
8060 +unsigned int pfe_pkt_headroom;
8061 +unsigned int emac_txq_cnt;
8062 +
8063 +/*
8064 + * @pfe_hal_lib.c.
8065 + * Common functions used by HIF client drivers
8066 + */
8067 +
8068 +/*HIF shared memory Global variable */
8069 +struct hif_shm ghif_shm;
8070 +
8071 +/* Cleanup the HIF shared memory, release HIF rx_buffer_pool.
8072 + * This function should be called after pfe_hif_exit
8073 + *
8074 + * @param[in] hif_shm Shared memory address location in DDR
8075 + */
8076 +static void pfe_hif_shm_clean(struct hif_shm *hif_shm)
8077 +{
8078 + int i;
8079 + void *pkt;
8080 +
8081 + for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
8082 + pkt = hif_shm->rx_buf_pool[i];
8083 + if (pkt) {
8084 + hif_shm->rx_buf_pool[i] = NULL;
8085 + pkt -= pfe_pkt_headroom;
8086 +
8087 + if (page_mode)
8088 + put_page(virt_to_page(pkt));
8089 + else
8090 + kfree(pkt);
8091 + }
8092 + }
8093 +}
8094 +
8095 +/* Initialize shared memory used between HIF driver and clients,
8096 + * allocate rx_buffer_pool required for HIF Rx descriptors.
8097 + * This function should be called before initializing HIF driver.
8098 + *
8099 + * @param[in] hif_shm Shared memory address location in DDR
8100 + * @rerurn 0 - on succes, <0 on fail to initialize
8101 + */
8102 +static int pfe_hif_shm_init(struct hif_shm *hif_shm)
8103 +{
8104 + int i;
8105 + void *pkt;
8106 +
8107 + memset(hif_shm, 0, sizeof(struct hif_shm));
8108 + hif_shm->rx_buf_pool_cnt = HIF_RX_DESC_NT;
8109 +
8110 + for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
8111 + if (page_mode) {
8112 + pkt = (void *)__get_free_page(GFP_KERNEL |
8113 + GFP_DMA_PFE);
8114 + } else {
8115 + pkt = kmalloc(PFE_BUF_SIZE, GFP_KERNEL | GFP_DMA_PFE);
8116 + }
8117 +
8118 + if (pkt)
8119 + hif_shm->rx_buf_pool[i] = pkt + pfe_pkt_headroom;
8120 + else
8121 + goto err0;
8122 + }
8123 +
8124 + return 0;
8125 +
8126 +err0:
8127 + pr_err("%s Low memory\n", __func__);
8128 + pfe_hif_shm_clean(hif_shm);
8129 + return -ENOMEM;
8130 +}
8131 +
8132 +/*This function sends indication to HIF driver
8133 + *
8134 + * @param[in] hif hif context
8135 + */
8136 +static void hif_lib_indicate_hif(struct pfe_hif *hif, int req, int data1, int
8137 + data2)
8138 +{
8139 + hif_process_client_req(hif, req, data1, data2);
8140 +}
8141 +
8142 +void hif_lib_indicate_client(int client_id, int event_type, int qno)
8143 +{
8144 + struct hif_client_s *client = pfe->hif_client[client_id];
8145 +
8146 + if (!client || (event_type >= HIF_EVENT_MAX) || (qno >=
8147 + HIF_CLIENT_QUEUES_MAX))
8148 + return;
8149 +
8150 + if (!test_and_set_bit(qno, &client->queue_mask[event_type]))
8151 + client->event_handler(client->priv, event_type, qno);
8152 +}
8153 +
8154 +/*This function releases Rx queue descriptors memory and pre-filled buffers
8155 + *
8156 + * @param[in] client hif_client context
8157 + */
8158 +static void hif_lib_client_release_rx_buffers(struct hif_client_s *client)
8159 +{
8160 + struct rx_queue_desc *desc;
8161 + int qno, ii;
8162 + void *buf;
8163 +
8164 + for (qno = 0; qno < client->rx_qn; qno++) {
8165 + desc = client->rx_q[qno].base;
8166 +
8167 + for (ii = 0; ii < client->rx_q[qno].size; ii++) {
8168 + buf = (void *)desc->data;
8169 + if (buf) {
8170 + buf -= pfe_pkt_headroom;
8171 +
8172 + if (page_mode)
8173 + free_page((unsigned long)buf);
8174 + else
8175 + kfree(buf);
8176 +
8177 + desc->ctrl = 0;
8178 + }
8179 +
8180 + desc++;
8181 + }
8182 + }
8183 +
8184 + kfree(client->rx_qbase);
8185 +}
8186 +
8187 +/*This function allocates memory for the rxq descriptors and pre-fill rx queues
8188 + * with buffers.
8189 + * @param[in] client client context
8190 + * @param[in] q_size size of the rxQ, all queues are of same size
8191 + */
8192 +static int hif_lib_client_init_rx_buffers(struct hif_client_s *client, int
8193 + q_size)
8194 +{
8195 + struct rx_queue_desc *desc;
8196 + struct hif_client_rx_queue *queue;
8197 + int ii, qno;
8198 +
8199 + /*Allocate memory for the client queues */
8200 + client->rx_qbase = kzalloc(client->rx_qn * q_size * sizeof(struct
8201 + rx_queue_desc), GFP_KERNEL);
8202 + if (!client->rx_qbase)
8203 + goto err;
8204 +
8205 + for (qno = 0; qno < client->rx_qn; qno++) {
8206 + queue = &client->rx_q[qno];
8207 +
8208 + queue->base = client->rx_qbase + qno * q_size * sizeof(struct
8209 + rx_queue_desc);
8210 + queue->size = q_size;
8211 + queue->read_idx = 0;
8212 + queue->write_idx = 0;
8213 +
8214 + pr_debug("rx queue: %d, base: %p, size: %d\n", qno,
8215 + queue->base, queue->size);
8216 + }
8217 +
8218 + for (qno = 0; qno < client->rx_qn; qno++) {
8219 + queue = &client->rx_q[qno];
8220 + desc = queue->base;
8221 +
8222 + for (ii = 0; ii < queue->size; ii++) {
8223 + desc->ctrl = CL_DESC_BUF_LEN(pfe_pkt_size) |
8224 + CL_DESC_OWN;
8225 + desc++;
8226 + }
8227 + }
8228 +
8229 + return 0;
8230 +
8231 +err:
8232 + return 1;
8233 +}
8234 +
8235 +
8236 +static void hif_lib_client_cleanup_tx_queue(struct hif_client_tx_queue *queue)
8237 +{
8238 + pr_debug("%s\n", __func__);
8239 +
8240 + /*
8241 + * Check if there are any pending packets. Client must flush the tx
8242 + * queues before unregistering, by calling by calling
8243 + * hif_lib_tx_get_next_complete()
8244 + *
8245 + * Hif no longer calls since we are no longer registered
8246 + */
8247 + if (queue->tx_pending)
8248 + pr_err("%s: pending transmit packets\n", __func__);
8249 +}
8250 +
8251 +static void hif_lib_client_release_tx_buffers(struct hif_client_s *client)
8252 +{
8253 + int qno;
8254 +
8255 + pr_debug("%s\n", __func__);
8256 +
8257 + for (qno = 0; qno < client->tx_qn; qno++)
8258 + hif_lib_client_cleanup_tx_queue(&client->tx_q[qno]);
8259 +
8260 + kfree(client->tx_qbase);
8261 +}
8262 +
8263 +static int hif_lib_client_init_tx_buffers(struct hif_client_s *client, int
8264 + q_size)
8265 +{
8266 + struct hif_client_tx_queue *queue;
8267 + int qno;
8268 +
8269 + client->tx_qbase = kzalloc(client->tx_qn * q_size * sizeof(struct
8270 + tx_queue_desc), GFP_KERNEL);
8271 + if (!client->tx_qbase)
8272 + return 1;
8273 +
8274 + for (qno = 0; qno < client->tx_qn; qno++) {
8275 + queue = &client->tx_q[qno];
8276 +
8277 + queue->base = client->tx_qbase + qno * q_size * sizeof(struct
8278 + tx_queue_desc);
8279 + queue->size = q_size;
8280 + queue->read_idx = 0;
8281 + queue->write_idx = 0;
8282 + queue->tx_pending = 0;
8283 + queue->nocpy_flag = 0;
8284 + queue->prev_tmu_tx_pkts = 0;
8285 + queue->done_tmu_tx_pkts = 0;
8286 +
8287 + pr_debug("tx queue: %d, base: %p, size: %d\n", qno,
8288 + queue->base, queue->size);
8289 + }
8290 +
8291 + return 0;
8292 +}
8293 +
8294 +static int hif_lib_event_dummy(void *priv, int event_type, int qno)
8295 +{
8296 + return 0;
8297 +}
8298 +
8299 +int hif_lib_client_register(struct hif_client_s *client)
8300 +{
8301 + struct hif_shm *hif_shm;
8302 + struct hif_client_shm *client_shm;
8303 + int err, i;
8304 + /* int loop_cnt = 0; */
8305 +
8306 + pr_debug("%s\n", __func__);
8307 +
8308 + /*Allocate memory before spin_lock*/
8309 + if (hif_lib_client_init_rx_buffers(client, client->rx_qsize)) {
8310 + err = -ENOMEM;
8311 + goto err_rx;
8312 + }
8313 +
8314 + if (hif_lib_client_init_tx_buffers(client, client->tx_qsize)) {
8315 + err = -ENOMEM;
8316 + goto err_tx;
8317 + }
8318 +
8319 + spin_lock_bh(&pfe->hif.lock);
8320 + if (!(client->pfe) || (client->id >= HIF_CLIENTS_MAX) ||
8321 + (pfe->hif_client[client->id])) {
8322 + err = -EINVAL;
8323 + goto err;
8324 + }
8325 +
8326 + hif_shm = client->pfe->hif.shm;
8327 +
8328 + if (!client->event_handler)
8329 + client->event_handler = hif_lib_event_dummy;
8330 +
8331 + /*Initialize client specific shared memory */
8332 + client_shm = (struct hif_client_shm *)&hif_shm->client[client->id];
8333 + client_shm->rx_qbase = (unsigned long int)client->rx_qbase;
8334 + client_shm->rx_qsize = client->rx_qsize;
8335 + client_shm->tx_qbase = (unsigned long int)client->tx_qbase;
8336 + client_shm->tx_qsize = client->tx_qsize;
8337 + client_shm->ctrl = (client->tx_qn << CLIENT_CTRL_TX_Q_CNT_OFST) |
8338 + (client->rx_qn << CLIENT_CTRL_RX_Q_CNT_OFST);
8339 + /* spin_lock_init(&client->rx_lock); */
8340 +
8341 + for (i = 0; i < HIF_EVENT_MAX; i++) {
8342 + client->queue_mask[i] = 0; /*
8343 + * By default all events are
8344 + * unmasked
8345 + */
8346 + }
8347 +
8348 + /*Indicate to HIF driver*/
8349 + hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_REGISTER, client->id, 0);
8350 +
8351 + pr_debug("%s: client: %p, client_id: %d, tx_qsize: %d, rx_qsize: %d\n",
8352 + __func__, client, client->id, client->tx_qsize,
8353 + client->rx_qsize);
8354 +
8355 + client->cpu_id = -1;
8356 +
8357 + pfe->hif_client[client->id] = client;
8358 + spin_unlock_bh(&pfe->hif.lock);
8359 +
8360 + return 0;
8361 +
8362 +err:
8363 + spin_unlock_bh(&pfe->hif.lock);
8364 + hif_lib_client_release_tx_buffers(client);
8365 +
8366 +err_tx:
8367 + hif_lib_client_release_rx_buffers(client);
8368 +
8369 +err_rx:
8370 + return err;
8371 +}
8372 +
8373 +int hif_lib_client_unregister(struct hif_client_s *client)
8374 +{
8375 + struct pfe *pfe = client->pfe;
8376 + u32 client_id = client->id;
8377 +
8378 + pr_info(
8379 + "%s : client: %p, client_id: %d, txQ_depth: %d, rxQ_depth: %d\n"
8380 + , __func__, client, client->id, client->tx_qsize,
8381 + client->rx_qsize);
8382 +
8383 + spin_lock_bh(&pfe->hif.lock);
8384 + hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_UNREGISTER, client->id, 0);
8385 +
8386 + hif_lib_client_release_tx_buffers(client);
8387 + hif_lib_client_release_rx_buffers(client);
8388 + pfe->hif_client[client_id] = NULL;
8389 + spin_unlock_bh(&pfe->hif.lock);
8390 +
8391 + return 0;
8392 +}
8393 +
8394 +int hif_lib_event_handler_start(struct hif_client_s *client, int event,
8395 + int qno)
8396 +{
8397 + struct hif_client_rx_queue *queue = &client->rx_q[qno];
8398 + struct rx_queue_desc *desc = queue->base + queue->read_idx;
8399 +
8400 + if ((event >= HIF_EVENT_MAX) || (qno >= HIF_CLIENT_QUEUES_MAX)) {
8401 + pr_debug("%s: Unsupported event : %d queue number : %d\n",
8402 + __func__, event, qno);
8403 + return -1;
8404 + }
8405 +
8406 + test_and_clear_bit(qno, &client->queue_mask[event]);
8407 +
8408 + switch (event) {
8409 + case EVENT_RX_PKT_IND:
8410 + if (!(desc->ctrl & CL_DESC_OWN))
8411 + hif_lib_indicate_client(client->id,
8412 + EVENT_RX_PKT_IND, qno);
8413 + break;
8414 +
8415 + case EVENT_HIGH_RX_WM:
8416 + case EVENT_TXDONE_IND:
8417 + default:
8418 + break;
8419 + }
8420 +
8421 + return 0;
8422 +}
8423 +
8424 +/*
8425 + * This function gets one packet from the specified client queue
8426 + * It also refill the rx buffer
8427 + */
8428 +void *hif_lib_receive_pkt(struct hif_client_s *client, int qno, int *len, int
8429 + *ofst, unsigned int *rx_ctrl,
8430 + unsigned int *desc_ctrl, void **priv_data)
8431 +{
8432 + struct hif_client_rx_queue *queue = &client->rx_q[qno];
8433 + struct rx_queue_desc *desc;
8434 + void *pkt = NULL;
8435 +
8436 + /*
8437 + * Following lock is to protect rx queue access from,
8438 + * hif_lib_event_handler_start.
8439 + * In general below lock is not required, because hif_lib_xmit_pkt and
8440 + * hif_lib_event_handler_start are called from napi poll and which is
8441 + * not re-entrant. But if some client use in different way this lock is
8442 + * required.
8443 + */
8444 + /*spin_lock_irqsave(&client->rx_lock, flags); */
8445 + desc = queue->base + queue->read_idx;
8446 + if (!(desc->ctrl & CL_DESC_OWN)) {
8447 + pkt = desc->data - pfe_pkt_headroom;
8448 +
8449 + *rx_ctrl = desc->client_ctrl;
8450 + *desc_ctrl = desc->ctrl;
8451 +
8452 + if (desc->ctrl & CL_DESC_FIRST) {
8453 + u16 size = *rx_ctrl >> HIF_CTRL_RX_OFFSET_OFST;
8454 +
8455 + if (size) {
8456 + *len = CL_DESC_BUF_LEN(desc->ctrl) -
8457 + PFE_PKT_HEADER_SZ - size;
8458 + *ofst = pfe_pkt_headroom + PFE_PKT_HEADER_SZ
8459 + + size;
8460 + *priv_data = desc->data + PFE_PKT_HEADER_SZ;
8461 + } else {
8462 + *len = CL_DESC_BUF_LEN(desc->ctrl) -
8463 + PFE_PKT_HEADER_SZ;
8464 + *ofst = pfe_pkt_headroom + PFE_PKT_HEADER_SZ;
8465 + *priv_data = NULL;
8466 + }
8467 +
8468 + } else {
8469 + *len = CL_DESC_BUF_LEN(desc->ctrl);
8470 + *ofst = pfe_pkt_headroom;
8471 + }
8472 +
8473 + /*
8474 + * Needed so we don't free a buffer/page
8475 + * twice on module_exit
8476 + */
8477 + desc->data = NULL;
8478 +
8479 + /*
8480 + * Ensure everything else is written to DDR before
8481 + * writing bd->ctrl
8482 + */
8483 + smp_wmb();
8484 +
8485 + desc->ctrl = CL_DESC_BUF_LEN(pfe_pkt_size) | CL_DESC_OWN;
8486 + queue->read_idx = (queue->read_idx + 1) & (queue->size - 1);
8487 + }
8488 +
8489 + /*spin_unlock_irqrestore(&client->rx_lock, flags); */
8490 + return pkt;
8491 +}
8492 +
8493 +static inline void hif_hdr_write(struct hif_hdr *pkt_hdr, unsigned int
8494 + client_id, unsigned int qno,
8495 + u32 client_ctrl)
8496 +{
8497 + /* Optimize the write since the destinaton may be non-cacheable */
8498 + if (!((unsigned long)pkt_hdr & 0x3)) {
8499 + ((u32 *)pkt_hdr)[0] = (client_ctrl << 16) | (qno << 8) |
8500 + client_id;
8501 + } else {
8502 + ((u16 *)pkt_hdr)[0] = (qno << 8) | (client_id & 0xFF);
8503 + ((u16 *)pkt_hdr)[1] = (client_ctrl & 0xFFFF);
8504 + }
8505 +}
8506 +
8507 +/*This function puts the given packet in the specific client queue */
8508 +void __hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void
8509 + *data, unsigned int len, u32 client_ctrl,
8510 + unsigned int flags, void *client_data)
8511 +{
8512 + struct hif_client_tx_queue *queue = &client->tx_q[qno];
8513 + struct tx_queue_desc *desc = queue->base + queue->write_idx;
8514 +
8515 + /* First buffer */
8516 + if (flags & HIF_FIRST_BUFFER) {
8517 + data -= sizeof(struct hif_hdr);
8518 + len += sizeof(struct hif_hdr);
8519 +
8520 + hif_hdr_write(data, client->id, qno, client_ctrl);
8521 + }
8522 +
8523 + desc->data = client_data;
8524 + desc->ctrl = CL_DESC_OWN | CL_DESC_FLAGS(flags);
8525 +
8526 + __hif_xmit_pkt(&pfe->hif, client->id, qno, data, len, flags);
8527 +
8528 + queue->write_idx = (queue->write_idx + 1) & (queue->size - 1);
8529 + queue->tx_pending++;
8530 + queue->jiffies_last_packet = jiffies;
8531 +}
8532 +
8533 +void *hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno,
8534 + unsigned int *flags, int count)
8535 +{
8536 + struct hif_client_tx_queue *queue = &client->tx_q[qno];
8537 + struct tx_queue_desc *desc = queue->base + queue->read_idx;
8538 +
8539 + pr_debug("%s: qno : %d rd_indx: %d pending:%d\n", __func__, qno,
8540 + queue->read_idx, queue->tx_pending);
8541 +
8542 + if (!queue->tx_pending)
8543 + return NULL;
8544 +
8545 + if (queue->nocpy_flag && !queue->done_tmu_tx_pkts) {
8546 + u32 tmu_tx_pkts = be32_to_cpu(pe_dmem_read(TMU0_ID +
8547 + client->id, TMU_DM_TX_TRANS, 4));
8548 +
8549 + if (queue->prev_tmu_tx_pkts > tmu_tx_pkts)
8550 + queue->done_tmu_tx_pkts = UINT_MAX -
8551 + queue->prev_tmu_tx_pkts + tmu_tx_pkts;
8552 + else
8553 + queue->done_tmu_tx_pkts = tmu_tx_pkts -
8554 + queue->prev_tmu_tx_pkts;
8555 +
8556 + queue->prev_tmu_tx_pkts = tmu_tx_pkts;
8557 +
8558 + if (!queue->done_tmu_tx_pkts)
8559 + return NULL;
8560 + }
8561 +
8562 + if (desc->ctrl & CL_DESC_OWN)
8563 + return NULL;
8564 +
8565 + queue->read_idx = (queue->read_idx + 1) & (queue->size - 1);
8566 + queue->tx_pending--;
8567 +
8568 + *flags = CL_DESC_GET_FLAGS(desc->ctrl);
8569 +
8570 + if (queue->done_tmu_tx_pkts && (*flags & HIF_LAST_BUFFER))
8571 + queue->done_tmu_tx_pkts--;
8572 +
8573 + return desc->data;
8574 +}
8575 +
8576 +static void hif_lib_tmu_credit_init(struct pfe *pfe)
8577 +{
8578 + int i, q;
8579 +
8580 + for (i = 0; i < NUM_GEMAC_SUPPORT; i++)
8581 + for (q = 0; q < emac_txq_cnt; q++) {
8582 + pfe->tmu_credit.tx_credit_max[i][q] = (q == 0) ?
8583 + DEFAULT_Q0_QDEPTH : DEFAULT_MAX_QDEPTH;
8584 + pfe->tmu_credit.tx_credit[i][q] =
8585 + pfe->tmu_credit.tx_credit_max[i][q];
8586 + }
8587 +}
8588 +
8589 +int pfe_hif_lib_init(struct pfe *pfe)
8590 +{
8591 + int rc;
8592 +
8593 + pr_info("%s\n", __func__);
8594 +
8595 + if (lro_mode) {
8596 + page_mode = 1;
8597 + pfe_pkt_size = min(PAGE_SIZE, MAX_PFE_PKT_SIZE);
8598 + pfe_pkt_headroom = 0;
8599 + } else {
8600 + page_mode = 0;
8601 + pfe_pkt_size = PFE_PKT_SIZE;
8602 + pfe_pkt_headroom = PFE_PKT_HEADROOM;
8603 + }
8604 +
8605 + if (tx_qos)
8606 + emac_txq_cnt = EMAC_TXQ_CNT / 2;
8607 + else
8608 + emac_txq_cnt = EMAC_TXQ_CNT;
8609 +
8610 + hif_lib_tmu_credit_init(pfe);
8611 + pfe->hif.shm = &ghif_shm;
8612 + rc = pfe_hif_shm_init(pfe->hif.shm);
8613 +
8614 + return rc;
8615 +}
8616 +
8617 +void pfe_hif_lib_exit(struct pfe *pfe)
8618 +{
8619 + pr_info("%s\n", __func__);
8620 +
8621 + pfe_hif_shm_clean(pfe->hif.shm);
8622 +}
8623 diff --git a/drivers/staging/fsl_ppfe/pfe_hif_lib.h b/drivers/staging/fsl_ppfe/pfe_hif_lib.h
8624 new file mode 100644
8625 index 00000000..49e7b5f1
8626 --- /dev/null
8627 +++ b/drivers/staging/fsl_ppfe/pfe_hif_lib.h
8628 @@ -0,0 +1,239 @@
8629 +/*
8630 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
8631 + * Copyright 2017 NXP
8632 + *
8633 + * This program is free software; you can redistribute it and/or modify
8634 + * it under the terms of the GNU General Public License as published by
8635 + * the Free Software Foundation; either version 2 of the License, or
8636 + * (at your option) any later version.
8637 + *
8638 + * This program is distributed in the hope that it will be useful,
8639 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
8640 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8641 + * GNU General Public License for more details.
8642 + *
8643 + * You should have received a copy of the GNU General Public License
8644 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
8645 + */
8646 +
8647 +#ifndef _PFE_HIF_LIB_H_
8648 +#define _PFE_HIF_LIB_H_
8649 +
8650 +#include "pfe_hif.h"
8651 +
8652 +#define HIF_CL_REQ_TIMEOUT 10
8653 +#define GFP_DMA_PFE 0
8654 +
8655 +enum {
8656 + REQUEST_CL_REGISTER = 0,
8657 + REQUEST_CL_UNREGISTER,
8658 + HIF_REQUEST_MAX
8659 +};
8660 +
8661 +enum {
8662 + /* Event to indicate that client rx queue is reached water mark level */
8663 + EVENT_HIGH_RX_WM = 0,
8664 + /* Event to indicate that, packet received for client */
8665 + EVENT_RX_PKT_IND,
8666 + /* Event to indicate that, packet tx done for client */
8667 + EVENT_TXDONE_IND,
8668 + HIF_EVENT_MAX
8669 +};
8670 +
8671 +/*structure to store client queue info */
8672 +
8673 +/*structure to store client queue info */
8674 +struct hif_client_rx_queue {
8675 + struct rx_queue_desc *base;
8676 + u32 size;
8677 + u32 read_idx;
8678 + u32 write_idx;
8679 +};
8680 +
8681 +struct hif_client_tx_queue {
8682 + struct tx_queue_desc *base;
8683 + u32 size;
8684 + u32 read_idx;
8685 + u32 write_idx;
8686 + u32 tx_pending;
8687 + unsigned long jiffies_last_packet;
8688 + u32 nocpy_flag;
8689 + u32 prev_tmu_tx_pkts;
8690 + u32 done_tmu_tx_pkts;
8691 +};
8692 +
8693 +struct hif_client_s {
8694 + int id;
8695 + int tx_qn;
8696 + int rx_qn;
8697 + void *rx_qbase;
8698 + void *tx_qbase;
8699 + int tx_qsize;
8700 + int rx_qsize;
8701 + int cpu_id;
8702 + struct hif_client_tx_queue tx_q[HIF_CLIENT_QUEUES_MAX];
8703 + struct hif_client_rx_queue rx_q[HIF_CLIENT_QUEUES_MAX];
8704 + int (*event_handler)(void *priv, int event, int data);
8705 + unsigned long queue_mask[HIF_EVENT_MAX];
8706 + struct pfe *pfe;
8707 + void *priv;
8708 +};
8709 +
8710 +/*
8711 + * Client specific shared memory
8712 + * It contains number of Rx/Tx queues, base addresses and queue sizes
8713 + */
8714 +struct hif_client_shm {
8715 + u32 ctrl; /*0-7: number of Rx queues, 8-15: number of tx queues */
8716 + unsigned long rx_qbase; /*Rx queue base address */
8717 + u32 rx_qsize; /*each Rx queue size, all Rx queues are of same size */
8718 + unsigned long tx_qbase; /* Tx queue base address */
8719 + u32 tx_qsize; /*each Tx queue size, all Tx queues are of same size */
8720 +};
8721 +
8722 +/*Client shared memory ctrl bit description */
8723 +#define CLIENT_CTRL_RX_Q_CNT_OFST 0
8724 +#define CLIENT_CTRL_TX_Q_CNT_OFST 8
8725 +#define CLIENT_CTRL_RX_Q_CNT(ctrl) (((ctrl) >> CLIENT_CTRL_RX_Q_CNT_OFST) \
8726 + & 0xFF)
8727 +#define CLIENT_CTRL_TX_Q_CNT(ctrl) (((ctrl) >> CLIENT_CTRL_TX_Q_CNT_OFST) \
8728 + & 0xFF)
8729 +
8730 +/*
8731 + * Shared memory used to communicate between HIF driver and host/client drivers
8732 + * Before starting the hif driver rx_buf_pool ans rx_buf_pool_cnt should be
8733 + * initialized with host buffers and buffers count in the pool.
8734 + * rx_buf_pool_cnt should be >= HIF_RX_DESC_NT.
8735 + *
8736 + */
8737 +struct hif_shm {
8738 + u32 rx_buf_pool_cnt; /*Number of rx buffers available*/
8739 + /*Rx buffers required to initialize HIF rx descriptors */
8740 + void *rx_buf_pool[HIF_RX_DESC_NT];
8741 + unsigned long g_client_status[2]; /*Global client status bit mask */
8742 + /* Client specific shared memory */
8743 + struct hif_client_shm client[HIF_CLIENTS_MAX];
8744 +};
8745 +
8746 +#define CL_DESC_OWN BIT(31)
8747 +/* This sets owner ship to HIF driver */
8748 +#define CL_DESC_LAST BIT(30)
8749 +/* This indicates last packet for multi buffers handling */
8750 +#define CL_DESC_FIRST BIT(29)
8751 +/* This indicates first packet for multi buffers handling */
8752 +
8753 +#define CL_DESC_BUF_LEN(x) ((x) & 0xFFFF)
8754 +#define CL_DESC_FLAGS(x) (((x) & 0xF) << 16)
8755 +#define CL_DESC_GET_FLAGS(x) (((x) >> 16) & 0xF)
8756 +
8757 +struct rx_queue_desc {
8758 + void *data;
8759 + u32 ctrl; /*0-15bit len, 16-20bit flags, 31bit owner*/
8760 + u32 client_ctrl;
8761 +};
8762 +
8763 +struct tx_queue_desc {
8764 + void *data;
8765 + u32 ctrl; /*0-15bit len, 16-20bit flags, 31bit owner*/
8766 +};
8767 +
8768 +/* HIF Rx is not working properly for 2-byte aligned buffers and
8769 + * ip_header should be 4byte aligned for better iperformance.
8770 + * "ip_header = 64 + 6(hif_header) + 14 (MAC Header)" will be 4byte aligned.
8771 + */
8772 +#define PFE_PKT_HEADER_SZ sizeof(struct hif_hdr)
8773 +/* must be big enough for headroom, pkt size and skb shared info */
8774 +#define PFE_BUF_SIZE 2048
8775 +#define PFE_PKT_HEADROOM 128
8776 +
8777 +#define SKB_SHARED_INFO_SIZE (sizeof(struct skb_shared_info))
8778 +#define PFE_PKT_SIZE (PFE_BUF_SIZE - PFE_PKT_HEADROOM \
8779 + - SKB_SHARED_INFO_SIZE)
8780 +#define MAX_L2_HDR_SIZE 14 /* Not correct for VLAN/PPPoE */
8781 +#define MAX_L3_HDR_SIZE 20 /* Not correct for IPv6 */
8782 +#define MAX_L4_HDR_SIZE 60 /* TCP with maximum options */
8783 +#define MAX_HDR_SIZE (MAX_L2_HDR_SIZE + MAX_L3_HDR_SIZE \
8784 + + MAX_L4_HDR_SIZE)
8785 +/* Used in page mode to clamp packet size to the maximum supported by the hif
8786 + *hw interface (<16KiB)
8787 + */
8788 +#define MAX_PFE_PKT_SIZE 16380UL
8789 +
8790 +extern unsigned int pfe_pkt_size;
8791 +extern unsigned int pfe_pkt_headroom;
8792 +extern unsigned int page_mode;
8793 +extern unsigned int lro_mode;
8794 +extern unsigned int tx_qos;
8795 +extern unsigned int emac_txq_cnt;
8796 +
8797 +int pfe_hif_lib_init(struct pfe *pfe);
8798 +void pfe_hif_lib_exit(struct pfe *pfe);
8799 +int hif_lib_client_register(struct hif_client_s *client);
8800 +int hif_lib_client_unregister(struct hif_client_s *client);
8801 +void __hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void
8802 + *data, unsigned int len, u32 client_ctrl,
8803 + unsigned int flags, void *client_data);
8804 +int hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void *data,
8805 + unsigned int len, u32 client_ctrl, void *client_data);
8806 +void hif_lib_indicate_client(int cl_id, int event, int data);
8807 +int hif_lib_event_handler_start(struct hif_client_s *client, int event, int
8808 + data);
8809 +int hif_lib_tmu_queue_start(struct hif_client_s *client, int qno);
8810 +int hif_lib_tmu_queue_stop(struct hif_client_s *client, int qno);
8811 +void *hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno,
8812 + unsigned int *flags, int count);
8813 +void *hif_lib_receive_pkt(struct hif_client_s *client, int qno, int *len, int
8814 + *ofst, unsigned int *rx_ctrl,
8815 + unsigned int *desc_ctrl, void **priv_data);
8816 +void hif_lib_set_rx_cpu_affinity(struct hif_client_s *client, int cpu_id);
8817 +void hif_lib_set_tx_queue_nocpy(struct hif_client_s *client, int qno, int
8818 + enable);
8819 +static inline int hif_lib_tx_avail(struct hif_client_s *client, unsigned int
8820 + qno)
8821 +{
8822 + struct hif_client_tx_queue *queue = &client->tx_q[qno];
8823 +
8824 + return (queue->size - queue->tx_pending);
8825 +}
8826 +
8827 +static inline int hif_lib_get_tx_wr_index(struct hif_client_s *client, unsigned
8828 + int qno)
8829 +{
8830 + struct hif_client_tx_queue *queue = &client->tx_q[qno];
8831 +
8832 + return queue->write_idx;
8833 +}
8834 +
8835 +static inline int hif_lib_tx_pending(struct hif_client_s *client, unsigned int
8836 + qno)
8837 +{
8838 + struct hif_client_tx_queue *queue = &client->tx_q[qno];
8839 +
8840 + return queue->tx_pending;
8841 +}
8842 +
8843 +#define hif_lib_tx_credit_avail(pfe, id, qno) \
8844 + ((pfe)->tmu_credit.tx_credit[id][qno])
8845 +
8846 +#define hif_lib_tx_credit_max(pfe, id, qno) \
8847 + ((pfe)->tmu_credit.tx_credit_max[id][qno])
8848 +
8849 +/*
8850 + * Test comment
8851 + */
8852 +#define hif_lib_tx_credit_use(pfe, id, qno, credit) \
8853 + ({ typeof(pfe) pfe_ = pfe; \
8854 + typeof(id) id_ = id; \
8855 + typeof(qno) qno_ = qno_; \
8856 + typeof(credit) credit_ = credit; \
8857 + do { \
8858 + if (tx_qos) { \
8859 + (pfe_)->tmu_credit.tx_credit[id_][qno_]\
8860 + -= credit_; \
8861 + (pfe_)->tmu_credit.tx_packets[id_][qno_]\
8862 + += credit_; \
8863 + } \
8864 + } while (0); \
8865 + })
8866 +
8867 +#endif /* _PFE_HIF_LIB_H_ */
8868 diff --git a/drivers/staging/fsl_ppfe/pfe_hw.c b/drivers/staging/fsl_ppfe/pfe_hw.c
8869 new file mode 100644
8870 index 00000000..16ea2c65
8871 --- /dev/null
8872 +++ b/drivers/staging/fsl_ppfe/pfe_hw.c
8873 @@ -0,0 +1,176 @@
8874 +/*
8875 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
8876 + * Copyright 2017 NXP
8877 + *
8878 + * This program is free software; you can redistribute it and/or modify
8879 + * it under the terms of the GNU General Public License as published by
8880 + * the Free Software Foundation; either version 2 of the License, or
8881 + * (at your option) any later version.
8882 + *
8883 + * This program is distributed in the hope that it will be useful,
8884 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
8885 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8886 + * GNU General Public License for more details.
8887 + *
8888 + * You should have received a copy of the GNU General Public License
8889 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
8890 + */
8891 +
8892 +#include "pfe_mod.h"
8893 +#include "pfe_hw.h"
8894 +
8895 +/* Functions to handle most of pfe hw register initialization */
8896 +int pfe_hw_init(struct pfe *pfe, int resume)
8897 +{
8898 + struct class_cfg class_cfg = {
8899 + .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
8900 + .route_table_baseaddr = pfe->ddr_phys_baseaddr +
8901 + ROUTE_TABLE_BASEADDR,
8902 + .route_table_hash_bits = ROUTE_TABLE_HASH_BITS,
8903 + };
8904 +
8905 + struct tmu_cfg tmu_cfg = {
8906 + .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
8907 + .llm_base_addr = pfe->ddr_phys_baseaddr + TMU_LLM_BASEADDR,
8908 + .llm_queue_len = TMU_LLM_QUEUE_LEN,
8909 + };
8910 +
8911 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
8912 + struct util_cfg util_cfg = {
8913 + .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
8914 + };
8915 +#endif
8916 +
8917 + struct BMU_CFG bmu1_cfg = {
8918 + .baseaddr = CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR +
8919 + BMU1_LMEM_BASEADDR),
8920 + .count = BMU1_BUF_COUNT,
8921 + .size = BMU1_BUF_SIZE,
8922 + .low_watermark = 10,
8923 + .high_watermark = 15,
8924 + };
8925 +
8926 + struct BMU_CFG bmu2_cfg = {
8927 + .baseaddr = DDR_PHYS_TO_PFE(pfe->ddr_phys_baseaddr +
8928 + BMU2_DDR_BASEADDR),
8929 + .count = BMU2_BUF_COUNT,
8930 + .size = BMU2_BUF_SIZE,
8931 + .low_watermark = 250,
8932 + .high_watermark = 253,
8933 + };
8934 +
8935 + struct gpi_cfg egpi1_cfg = {
8936 + .lmem_rtry_cnt = EGPI1_LMEM_RTRY_CNT,
8937 + .tmlf_txthres = EGPI1_TMLF_TXTHRES,
8938 + .aseq_len = EGPI1_ASEQ_LEN,
8939 + .mtip_pause_reg = CBUS_VIRT_TO_PFE(EMAC1_BASE_ADDR +
8940 + EMAC_TCNTRL_REG),
8941 + };
8942 +
8943 + struct gpi_cfg egpi2_cfg = {
8944 + .lmem_rtry_cnt = EGPI2_LMEM_RTRY_CNT,
8945 + .tmlf_txthres = EGPI2_TMLF_TXTHRES,
8946 + .aseq_len = EGPI2_ASEQ_LEN,
8947 + .mtip_pause_reg = CBUS_VIRT_TO_PFE(EMAC2_BASE_ADDR +
8948 + EMAC_TCNTRL_REG),
8949 + };
8950 +
8951 + struct gpi_cfg hgpi_cfg = {
8952 + .lmem_rtry_cnt = HGPI_LMEM_RTRY_CNT,
8953 + .tmlf_txthres = HGPI_TMLF_TXTHRES,
8954 + .aseq_len = HGPI_ASEQ_LEN,
8955 + .mtip_pause_reg = 0,
8956 + };
8957 +
8958 + pr_info("%s\n", __func__);
8959 +
8960 +#if !defined(LS1012A_PFE_RESET_WA)
8961 + /* LS1012A needs this to make PE work correctly */
8962 + writel(0x3, CLASS_PE_SYS_CLK_RATIO);
8963 + writel(0x3, TMU_PE_SYS_CLK_RATIO);
8964 + writel(0x3, UTIL_PE_SYS_CLK_RATIO);
8965 + usleep_range(10, 20);
8966 +#endif
8967 +
8968 + pr_info("CLASS version: %x\n", readl(CLASS_VERSION));
8969 + pr_info("TMU version: %x\n", readl(TMU_VERSION));
8970 +
8971 + pr_info("BMU1 version: %x\n", readl(BMU1_BASE_ADDR +
8972 + BMU_VERSION));
8973 + pr_info("BMU2 version: %x\n", readl(BMU2_BASE_ADDR +
8974 + BMU_VERSION));
8975 +
8976 + pr_info("EGPI1 version: %x\n", readl(EGPI1_BASE_ADDR +
8977 + GPI_VERSION));
8978 + pr_info("EGPI2 version: %x\n", readl(EGPI2_BASE_ADDR +
8979 + GPI_VERSION));
8980 + pr_info("HGPI version: %x\n", readl(HGPI_BASE_ADDR +
8981 + GPI_VERSION));
8982 +
8983 + pr_info("HIF version: %x\n", readl(HIF_VERSION));
8984 + pr_info("HIF NOPCY version: %x\n", readl(HIF_NOCPY_VERSION));
8985 +
8986 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
8987 + pr_info("UTIL version: %x\n", readl(UTIL_VERSION));
8988 +#endif
8989 + while (!(readl(TMU_CTRL) & ECC_MEM_INIT_DONE))
8990 + ;
8991 +
8992 + hif_rx_disable();
8993 + hif_tx_disable();
8994 +
8995 + bmu_init(BMU1_BASE_ADDR, &bmu1_cfg);
8996 +
8997 + pr_info("bmu_init(1) done\n");
8998 +
8999 + bmu_init(BMU2_BASE_ADDR, &bmu2_cfg);
9000 +
9001 + pr_info("bmu_init(2) done\n");
9002 +
9003 + class_cfg.resume = resume ? 1 : 0;
9004 +
9005 + class_init(&class_cfg);
9006 +
9007 + pr_info("class_init() done\n");
9008 +
9009 + tmu_init(&tmu_cfg);
9010 +
9011 + pr_info("tmu_init() done\n");
9012 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
9013 + util_init(&util_cfg);
9014 +
9015 + pr_info("util_init() done\n");
9016 +#endif
9017 + gpi_init(EGPI1_BASE_ADDR, &egpi1_cfg);
9018 +
9019 + pr_info("gpi_init(1) done\n");
9020 +
9021 + gpi_init(EGPI2_BASE_ADDR, &egpi2_cfg);
9022 +
9023 + pr_info("gpi_init(2) done\n");
9024 +
9025 + gpi_init(HGPI_BASE_ADDR, &hgpi_cfg);
9026 +
9027 + pr_info("gpi_init(hif) done\n");
9028 +
9029 + bmu_enable(BMU1_BASE_ADDR);
9030 +
9031 + pr_info("bmu_enable(1) done\n");
9032 +
9033 + bmu_enable(BMU2_BASE_ADDR);
9034 +
9035 + pr_info("bmu_enable(2) done\n");
9036 +
9037 + return 0;
9038 +}
9039 +
9040 +void pfe_hw_exit(struct pfe *pfe)
9041 +{
9042 + pr_info("%s\n", __func__);
9043 +
9044 + bmu_disable(BMU1_BASE_ADDR);
9045 + bmu_reset(BMU1_BASE_ADDR);
9046 +
9047 + bmu_disable(BMU2_BASE_ADDR);
9048 + bmu_reset(BMU2_BASE_ADDR);
9049 +}
9050 diff --git a/drivers/staging/fsl_ppfe/pfe_hw.h b/drivers/staging/fsl_ppfe/pfe_hw.h
9051 new file mode 100644
9052 index 00000000..53b5fe14
9053 --- /dev/null
9054 +++ b/drivers/staging/fsl_ppfe/pfe_hw.h
9055 @@ -0,0 +1,27 @@
9056 +/*
9057 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
9058 + * Copyright 2017 NXP
9059 + *
9060 + * This program is free software; you can redistribute it and/or modify
9061 + * it under the terms of the GNU General Public License as published by
9062 + * the Free Software Foundation; either version 2 of the License, or
9063 + * (at your option) any later version.
9064 + *
9065 + * This program is distributed in the hope that it will be useful,
9066 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
9067 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9068 + * GNU General Public License for more details.
9069 + *
9070 + * You should have received a copy of the GNU General Public License
9071 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
9072 + */
9073 +
9074 +#ifndef _PFE_HW_H_
9075 +#define _PFE_HW_H_
9076 +
9077 +#define PE_SYS_CLK_RATIO 1 /* SYS/AXI = 250MHz, HFE = 500MHz */
9078 +
9079 +int pfe_hw_init(struct pfe *pfe, int resume);
9080 +void pfe_hw_exit(struct pfe *pfe);
9081 +
9082 +#endif /* _PFE_HW_H_ */
9083 diff --git a/drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c b/drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c
9084 new file mode 100644
9085 index 00000000..c579eb58
9086 --- /dev/null
9087 +++ b/drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c
9088 @@ -0,0 +1,394 @@
9089 +/*
9090 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
9091 + * Copyright 2017 NXP
9092 + *
9093 + * This program is free software; you can redistribute it and/or modify
9094 + * it under the terms of the GNU General Public License as published by
9095 + * the Free Software Foundation; either version 2 of the License, or
9096 + * (at your option) any later version.
9097 + *
9098 + * This program is distributed in the hope that it will be useful,
9099 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
9100 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9101 + * GNU General Public License for more details.
9102 + *
9103 + * You should have received a copy of the GNU General Public License
9104 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
9105 + */
9106 +
9107 +#include <linux/module.h>
9108 +#include <linux/device.h>
9109 +#include <linux/of_net.h>
9110 +#include <linux/of_address.h>
9111 +#include <linux/platform_device.h>
9112 +#include <linux/slab.h>
9113 +#include <linux/clk.h>
9114 +#include <linux/mfd/syscon.h>
9115 +#include <linux/regmap.h>
9116 +
9117 +#include "pfe_mod.h"
9118 +
9119 +struct ls1012a_pfe_platform_data pfe_platform_data;
9120 +
9121 +static int pfe_get_gemac_if_proprties(struct device_node *parent, int port, int
9122 + if_cnt,
9123 + struct ls1012a_pfe_platform_data
9124 + *pdata)
9125 +{
9126 + struct device_node *gem = NULL, *phy = NULL;
9127 + int size;
9128 + int ii = 0, phy_id = 0;
9129 + const u32 *addr;
9130 + const void *mac_addr;
9131 +
9132 + for (ii = 0; ii < if_cnt; ii++) {
9133 + gem = of_get_next_child(parent, gem);
9134 + if (!gem)
9135 + goto err;
9136 + addr = of_get_property(gem, "reg", &size);
9137 + if (addr && (be32_to_cpup(addr) == port))
9138 + break;
9139 + }
9140 +
9141 + if (ii >= if_cnt) {
9142 + pr_err("%s:%d Failed to find interface = %d\n",
9143 + __func__, __LINE__, if_cnt);
9144 + goto err;
9145 + }
9146 +
9147 + pdata->ls1012a_eth_pdata[port].gem_id = port;
9148 +
9149 + mac_addr = of_get_mac_address(gem);
9150 +
9151 + if (mac_addr) {
9152 + memcpy(pdata->ls1012a_eth_pdata[port].mac_addr, mac_addr,
9153 + ETH_ALEN);
9154 + }
9155 +
9156 + pdata->ls1012a_eth_pdata[port].mii_config = of_get_phy_mode(gem);
9157 +
9158 + if ((pdata->ls1012a_eth_pdata[port].mii_config) < 0)
9159 + pr_err("%s:%d Incorrect Phy mode....\n", __func__,
9160 + __LINE__);
9161 +
9162 + addr = of_get_property(gem, "fsl,gemac-bus-id", &size);
9163 + if (!addr)
9164 + pr_err("%s:%d Invalid gemac-bus-id....\n", __func__,
9165 + __LINE__);
9166 + else
9167 + pdata->ls1012a_eth_pdata[port].bus_id = be32_to_cpup(addr);
9168 +
9169 + addr = of_get_property(gem, "fsl,gemac-phy-id", &size);
9170 + if (!addr) {
9171 + pr_err("%s:%d Invalid gemac-phy-id....\n", __func__,
9172 + __LINE__);
9173 + } else {
9174 + phy_id = be32_to_cpup(addr);
9175 + pdata->ls1012a_eth_pdata[port].phy_id = phy_id;
9176 + pdata->ls1012a_mdio_pdata[0].phy_mask &= ~(1 << phy_id);
9177 + }
9178 +
9179 + addr = of_get_property(gem, "fsl,mdio-mux-val", &size);
9180 + if (!addr)
9181 + pr_err("%s: Invalid mdio-mux-val....\n", __func__);
9182 + else
9183 + phy_id = be32_to_cpup(addr);
9184 + pdata->ls1012a_eth_pdata[port].mdio_muxval = phy_id;
9185 +
9186 + if (pdata->ls1012a_eth_pdata[port].phy_id < 32)
9187 + pfe->mdio_muxval[pdata->ls1012a_eth_pdata[port].phy_id] =
9188 + pdata->ls1012a_eth_pdata[port].mdio_muxval;
9189 +
9190 + addr = of_get_property(gem, "fsl,pfe-phy-if-flags", &size);
9191 + if (!addr)
9192 + pr_err("%s:%d Invalid pfe-phy-if-flags....\n",
9193 + __func__, __LINE__);
9194 + else
9195 + pdata->ls1012a_eth_pdata[port].phy_flags = be32_to_cpup(addr);
9196 +
9197 + /* If PHY is enabled, read mdio properties */
9198 + if (pdata->ls1012a_eth_pdata[port].phy_flags & GEMAC_NO_PHY)
9199 + goto done;
9200 +
9201 + phy = of_get_next_child(gem, NULL);
9202 +
9203 + addr = of_get_property(phy, "reg", &size);
9204 +
9205 + if (!addr)
9206 + pr_err("%s:%d Invalid phy enable flag....\n",
9207 + __func__, __LINE__);
9208 + else
9209 + pdata->ls1012a_mdio_pdata[port].enabled = be32_to_cpup(addr);
9210 +
9211 + pdata->ls1012a_mdio_pdata[port].irq[0] = PHY_POLL;
9212 +
9213 +done:
9214 +
9215 + return 0;
9216 +
9217 +err:
9218 + return -1;
9219 +}
9220 +
9221 +/*
9222 + *
9223 + * pfe_platform_probe -
9224 + *
9225 + *
9226 + */
9227 +static int pfe_platform_probe(struct platform_device *pdev)
9228 +{
9229 + struct resource res;
9230 + int ii, rc, interface_count = 0, size = 0;
9231 + const u32 *prop;
9232 + struct device_node *np;
9233 + struct clk *pfe_clk;
9234 +
9235 + np = pdev->dev.of_node;
9236 +
9237 + if (!np) {
9238 + pr_err("Invalid device node\n");
9239 + return -EINVAL;
9240 + }
9241 +
9242 + pfe = kzalloc(sizeof(*pfe), GFP_KERNEL);
9243 + if (!pfe) {
9244 + rc = -ENOMEM;
9245 + goto err_alloc;
9246 + }
9247 +
9248 + platform_set_drvdata(pdev, pfe);
9249 +
9250 + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9251 +
9252 + if (of_address_to_resource(np, 1, &res)) {
9253 + rc = -ENOMEM;
9254 + pr_err("failed to get ddr resource\n");
9255 + goto err_ddr;
9256 + }
9257 +
9258 + pfe->ddr_phys_baseaddr = res.start;
9259 + pfe->ddr_size = resource_size(&res);
9260 +
9261 + pfe->ddr_baseaddr = phys_to_virt(res.start);
9262 + if (!pfe->ddr_baseaddr) {
9263 + pr_err("ioremap() ddr failed\n");
9264 + rc = -ENOMEM;
9265 + goto err_ddr;
9266 + }
9267 +
9268 + pfe->scfg =
9269 + syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
9270 + "fsl,pfe-scfg");
9271 + if (IS_ERR(pfe->scfg)) {
9272 + dev_err(&pdev->dev, "No syscfg phandle specified\n");
9273 + return PTR_ERR(pfe->scfg);
9274 + }
9275 +
9276 + pfe->cbus_baseaddr = of_iomap(np, 0);
9277 + if (!pfe->cbus_baseaddr) {
9278 + rc = -ENOMEM;
9279 + pr_err("failed to get axi resource\n");
9280 + goto err_axi;
9281 + }
9282 +
9283 + pfe->hif_irq = platform_get_irq(pdev, 0);
9284 + if (pfe->hif_irq < 0) {
9285 + pr_err("platform_get_irq for hif failed\n");
9286 + rc = pfe->hif_irq;
9287 + goto err_hif_irq;
9288 + }
9289 +
9290 + pfe->wol_irq = platform_get_irq(pdev, 2);
9291 + if (pfe->wol_irq < 0) {
9292 + pr_err("platform_get_irq for WoL failed\n");
9293 + rc = pfe->wol_irq;
9294 + goto err_hif_irq;
9295 + }
9296 +
9297 + /* Read interface count */
9298 + prop = of_get_property(np, "fsl,pfe-num-interfaces", &size);
9299 + if (!prop) {
9300 + pr_err("Failed to read number of interfaces\n");
9301 + rc = -ENXIO;
9302 + goto err_prop;
9303 + }
9304 +
9305 + interface_count = be32_to_cpup(prop);
9306 + if (interface_count <= 0) {
9307 + pr_err("No ethernet interface count : %d\n",
9308 + interface_count);
9309 + rc = -ENXIO;
9310 + goto err_prop;
9311 + }
9312 +
9313 + pfe_platform_data.ls1012a_mdio_pdata[0].phy_mask = 0xffffffff;
9314 +
9315 + for (ii = 0; ii < interface_count; ii++) {
9316 + pfe_get_gemac_if_proprties(np, ii, interface_count,
9317 + &pfe_platform_data);
9318 + }
9319 +
9320 + pfe->dev = &pdev->dev;
9321 +
9322 + pfe->dev->platform_data = &pfe_platform_data;
9323 +
9324 + /* declare WoL capabilities */
9325 + device_init_wakeup(&pdev->dev, true);
9326 +
9327 + /* find the clocks */
9328 + pfe_clk = devm_clk_get(pfe->dev, "pfe");
9329 + if (IS_ERR(pfe_clk))
9330 + return PTR_ERR(pfe_clk);
9331 +
9332 + /* PFE clock is (platform clock / 2) */
9333 + /* save sys_clk value as KHz */
9334 + pfe->ctrl.sys_clk = clk_get_rate(pfe_clk) / (2 * 1000);
9335 +
9336 + rc = pfe_probe(pfe);
9337 + if (rc < 0)
9338 + goto err_probe;
9339 +
9340 + return 0;
9341 +
9342 +err_probe:
9343 +err_prop:
9344 +err_hif_irq:
9345 + iounmap(pfe->cbus_baseaddr);
9346 +
9347 +err_axi:
9348 + iounmap(pfe->ddr_baseaddr);
9349 +
9350 +err_ddr:
9351 + platform_set_drvdata(pdev, NULL);
9352 +
9353 + kfree(pfe);
9354 +
9355 +err_alloc:
9356 + return rc;
9357 +}
9358 +
9359 +/*
9360 + * pfe_platform_remove -
9361 + */
9362 +static int pfe_platform_remove(struct platform_device *pdev)
9363 +{
9364 + struct pfe *pfe = platform_get_drvdata(pdev);
9365 + int rc;
9366 +
9367 + pr_info("%s\n", __func__);
9368 +
9369 + rc = pfe_remove(pfe);
9370 +
9371 + iounmap(pfe->cbus_baseaddr);
9372 + iounmap(pfe->ddr_baseaddr);
9373 +
9374 + platform_set_drvdata(pdev, NULL);
9375 +
9376 + kfree(pfe);
9377 +
9378 + return rc;
9379 +}
9380 +
9381 +#ifdef CONFIG_PM
9382 +#ifdef CONFIG_PM_SLEEP
9383 +int pfe_platform_suspend(struct device *dev)
9384 +{
9385 + struct pfe *pfe = platform_get_drvdata(to_platform_device(dev));
9386 + struct net_device *netdev;
9387 + int i;
9388 +
9389 + pfe->wake = 0;
9390 +
9391 + for (i = 0; i < (NUM_GEMAC_SUPPORT); i++) {
9392 + netdev = pfe->eth.eth_priv[i]->ndev;
9393 +
9394 + netif_device_detach(netdev);
9395 +
9396 + if (netif_running(netdev))
9397 + if (pfe_eth_suspend(netdev))
9398 + pfe->wake = 1;
9399 + }
9400 +
9401 + /* Shutdown PFE only if we're not waking up the system */
9402 + if (!pfe->wake) {
9403 +#if defined(LS1012A_PFE_RESET_WA)
9404 + pfe_hif_rx_idle(&pfe->hif);
9405 +#endif
9406 + pfe_ctrl_suspend(&pfe->ctrl);
9407 + pfe_firmware_exit(pfe);
9408 +
9409 + pfe_hif_exit(pfe);
9410 + pfe_hif_lib_exit(pfe);
9411 +
9412 + pfe_hw_exit(pfe);
9413 + }
9414 +
9415 + return 0;
9416 +}
9417 +
9418 +static int pfe_platform_resume(struct device *dev)
9419 +{
9420 + struct pfe *pfe = platform_get_drvdata(to_platform_device(dev));
9421 + struct net_device *netdev;
9422 + int i;
9423 +
9424 + if (!pfe->wake) {
9425 + pfe_hw_init(pfe, 1);
9426 + pfe_hif_lib_init(pfe);
9427 + pfe_hif_init(pfe);
9428 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
9429 + util_enable();
9430 +#endif
9431 + tmu_enable(0xf);
9432 + class_enable();
9433 + pfe_ctrl_resume(&pfe->ctrl);
9434 + }
9435 +
9436 + for (i = 0; i < (NUM_GEMAC_SUPPORT); i++) {
9437 + netdev = pfe->eth.eth_priv[i]->ndev;
9438 +
9439 + if (pfe->eth.eth_priv[i]->mii_bus)
9440 + pfe_eth_mdio_reset(pfe->eth.eth_priv[i]->mii_bus);
9441 +
9442 + if (netif_running(netdev))
9443 + pfe_eth_resume(netdev);
9444 +
9445 + netif_device_attach(netdev);
9446 + }
9447 + return 0;
9448 +}
9449 +#else
9450 +#define pfe_platform_suspend NULL
9451 +#define pfe_platform_resume NULL
9452 +#endif
9453 +
9454 +static const struct dev_pm_ops pfe_platform_pm_ops = {
9455 + SET_SYSTEM_SLEEP_PM_OPS(pfe_platform_suspend, pfe_platform_resume)
9456 +};
9457 +#endif
9458 +
9459 +static const struct of_device_id pfe_match[] = {
9460 + {
9461 + .compatible = "fsl,pfe",
9462 + },
9463 + {},
9464 +};
9465 +MODULE_DEVICE_TABLE(of, pfe_match);
9466 +
9467 +static struct platform_driver pfe_platform_driver = {
9468 + .probe = pfe_platform_probe,
9469 + .remove = pfe_platform_remove,
9470 + .driver = {
9471 + .name = "pfe",
9472 + .of_match_table = pfe_match,
9473 +#ifdef CONFIG_PM
9474 + .pm = &pfe_platform_pm_ops,
9475 +#endif
9476 + },
9477 +};
9478 +
9479 +module_platform_driver(pfe_platform_driver);
9480 +MODULE_LICENSE("GPL");
9481 +MODULE_DESCRIPTION("PFE Ethernet driver");
9482 +MODULE_AUTHOR("NXP DNCPE");
9483 diff --git a/drivers/staging/fsl_ppfe/pfe_mod.c b/drivers/staging/fsl_ppfe/pfe_mod.c
9484 new file mode 100644
9485 index 00000000..d5ba56a3
9486 --- /dev/null
9487 +++ b/drivers/staging/fsl_ppfe/pfe_mod.c
9488 @@ -0,0 +1,141 @@
9489 +/*
9490 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
9491 + * Copyright 2017 NXP
9492 + *
9493 + * This program is free software; you can redistribute it and/or modify
9494 + * it under the terms of the GNU General Public License as published by
9495 + * the Free Software Foundation; either version 2 of the License, or
9496 + * (at your option) any later version.
9497 + *
9498 + * This program is distributed in the hope that it will be useful,
9499 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
9500 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9501 + * GNU General Public License for more details.
9502 + *
9503 + * You should have received a copy of the GNU General Public License
9504 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
9505 + */
9506 +
9507 +#include <linux/dma-mapping.h>
9508 +#include "pfe_mod.h"
9509 +
9510 +struct pfe *pfe;
9511 +
9512 +/*
9513 + * pfe_probe -
9514 + */
9515 +int pfe_probe(struct pfe *pfe)
9516 +{
9517 + int rc;
9518 +
9519 + if (pfe->ddr_size < DDR_MAX_SIZE) {
9520 + pr_err("%s: required DDR memory (%x) above platform ddr memory (%x)\n",
9521 + __func__, (unsigned int)DDR_MAX_SIZE, pfe->ddr_size);
9522 + rc = -ENOMEM;
9523 + goto err_hw;
9524 + }
9525 +
9526 + if (((int)(pfe->ddr_phys_baseaddr + BMU2_DDR_BASEADDR) &
9527 + (8 * SZ_1M - 1)) != 0) {
9528 + pr_err("%s: BMU2 base address (0x%x) must be aligned on 8MB boundary\n",
9529 + __func__, (int)pfe->ddr_phys_baseaddr +
9530 + BMU2_DDR_BASEADDR);
9531 + rc = -ENOMEM;
9532 + goto err_hw;
9533 + }
9534 +
9535 + pr_info("cbus_baseaddr: %lx, ddr_baseaddr: %lx, ddr_phys_baseaddr: %lx, ddr_size: %x\n",
9536 + (unsigned long)pfe->cbus_baseaddr,
9537 + (unsigned long)pfe->ddr_baseaddr,
9538 + pfe->ddr_phys_baseaddr, pfe->ddr_size);
9539 +
9540 + pfe_lib_init(pfe->cbus_baseaddr, pfe->ddr_baseaddr,
9541 + pfe->ddr_phys_baseaddr, pfe->ddr_size);
9542 +
9543 + rc = pfe_hw_init(pfe, 0);
9544 + if (rc < 0)
9545 + goto err_hw;
9546 +
9547 + rc = pfe_hif_lib_init(pfe);
9548 + if (rc < 0)
9549 + goto err_hif_lib;
9550 +
9551 + rc = pfe_hif_init(pfe);
9552 + if (rc < 0)
9553 + goto err_hif;
9554 +
9555 + rc = pfe_firmware_init(pfe);
9556 + if (rc < 0)
9557 + goto err_firmware;
9558 +
9559 + rc = pfe_ctrl_init(pfe);
9560 + if (rc < 0)
9561 + goto err_ctrl;
9562 +
9563 + rc = pfe_eth_init(pfe);
9564 + if (rc < 0)
9565 + goto err_eth;
9566 +
9567 + rc = pfe_sysfs_init(pfe);
9568 + if (rc < 0)
9569 + goto err_sysfs;
9570 +
9571 + rc = pfe_debugfs_init(pfe);
9572 + if (rc < 0)
9573 + goto err_debugfs;
9574 +
9575 + return 0;
9576 +
9577 +err_debugfs:
9578 + pfe_sysfs_exit(pfe);
9579 +
9580 +err_sysfs:
9581 + pfe_eth_exit(pfe);
9582 +
9583 +err_eth:
9584 + pfe_ctrl_exit(pfe);
9585 +
9586 +err_ctrl:
9587 + pfe_firmware_exit(pfe);
9588 +
9589 +err_firmware:
9590 + pfe_hif_exit(pfe);
9591 +
9592 +err_hif:
9593 + pfe_hif_lib_exit(pfe);
9594 +
9595 +err_hif_lib:
9596 + pfe_hw_exit(pfe);
9597 +
9598 +err_hw:
9599 + return rc;
9600 +}
9601 +
9602 +/*
9603 + * pfe_remove -
9604 + */
9605 +int pfe_remove(struct pfe *pfe)
9606 +{
9607 + pr_info("%s\n", __func__);
9608 +
9609 + pfe_debugfs_exit(pfe);
9610 +
9611 + pfe_sysfs_exit(pfe);
9612 +
9613 + pfe_eth_exit(pfe);
9614 +
9615 + pfe_ctrl_exit(pfe);
9616 +
9617 +#if defined(LS1012A_PFE_RESET_WA)
9618 + pfe_hif_rx_idle(&pfe->hif);
9619 +#endif
9620 + pfe_firmware_exit(pfe);
9621 +
9622 + pfe_hif_exit(pfe);
9623 +
9624 + pfe_hif_lib_exit(pfe);
9625 +
9626 + pfe_hw_exit(pfe);
9627 +
9628 + return 0;
9629 +}
9630 diff --git a/drivers/staging/fsl_ppfe/pfe_mod.h b/drivers/staging/fsl_ppfe/pfe_mod.h
9631 new file mode 100644
9632 index 00000000..3012f17f
9633 --- /dev/null
9634 +++ b/drivers/staging/fsl_ppfe/pfe_mod.h
9635 @@ -0,0 +1,112 @@
9636 +/*
9637 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
9638 + * Copyright 2017 NXP
9639 + *
9640 + * This program is free software; you can redistribute it and/or modify
9641 + * it under the terms of the GNU General Public License as published by
9642 + * the Free Software Foundation; either version 2 of the License, or
9643 + * (at your option) any later version.
9644 + *
9645 + * This program is distributed in the hope that it will be useful,
9646 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
9647 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9648 + * GNU General Public License for more details.
9649 + *
9650 + * You should have received a copy of the GNU General Public License
9651 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
9652 + */
9653 +
9654 +#ifndef _PFE_MOD_H_
9655 +#define _PFE_MOD_H_
9656 +
9657 +#include <linux/device.h>
9658 +#include <linux/elf.h>
9659 +
9660 +struct pfe;
9661 +
9662 +#include "pfe_hw.h"
9663 +#include "pfe_firmware.h"
9664 +#include "pfe_ctrl.h"
9665 +#include "pfe_hif.h"
9666 +#include "pfe_hif_lib.h"
9667 +#include "pfe_eth.h"
9668 +#include "pfe_sysfs.h"
9669 +#include "pfe_perfmon.h"
9670 +#include "pfe_debugfs.h"
9671 +
9672 +#define PHYID_MAX_VAL 32
9673 +
9674 +struct pfe_tmu_credit {
9675 + /* Number of allowed TX packet in-flight, matches TMU queue size */
9676 + unsigned int tx_credit[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
9677 + unsigned int tx_credit_max[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
9678 + unsigned int tx_packets[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
9679 +};
9680 +
9681 +struct pfe {
9682 + struct regmap *scfg;
9683 + unsigned long ddr_phys_baseaddr;
9684 + void *ddr_baseaddr;
9685 + unsigned int ddr_size;
9686 + void *cbus_baseaddr;
9687 + void *apb_baseaddr;
9688 + unsigned long iram_phys_baseaddr;
9689 + void *iram_baseaddr;
9690 + unsigned long ipsec_phys_baseaddr;
9691 + void *ipsec_baseaddr;
9692 + int hif_irq;
9693 + int wol_irq;
9694 + int hif_client_irq;
9695 + struct device *dev;
9696 + struct dentry *dentry;
9697 + struct pfe_ctrl ctrl;
9698 + struct pfe_hif hif;
9699 + struct pfe_eth eth;
9700 + struct hif_client_s *hif_client[HIF_CLIENTS_MAX];
9701 +#if defined(CFG_DIAGS)
9702 + struct pfe_diags diags;
9703 +#endif
9704 + struct pfe_tmu_credit tmu_credit;
9705 + struct pfe_cpumon cpumon;
9706 + struct pfe_memmon memmon;
9707 + int wake;
9708 + int mdio_muxval[PHYID_MAX_VAL];
9709 + struct clk *hfe_clock;
9710 +};
9711 +
9712 +extern struct pfe *pfe;
9713 +
9714 +int pfe_probe(struct pfe *pfe);
9715 +int pfe_remove(struct pfe *pfe);
9716 +
9717 +/* DDR Mapping in reserved memory*/
9718 +#define ROUTE_TABLE_BASEADDR 0
9719 +#define ROUTE_TABLE_HASH_BITS 15 /* 32K entries */
9720 +#define ROUTE_TABLE_SIZE ((1 << ROUTE_TABLE_HASH_BITS) \
9721 + * CLASS_ROUTE_SIZE)
9722 +#define BMU2_DDR_BASEADDR (ROUTE_TABLE_BASEADDR + ROUTE_TABLE_SIZE)
9723 +#define BMU2_BUF_COUNT (4096 - 256)
9724 +/* This is to get a total DDR size of 12MiB */
9725 +#define BMU2_DDR_SIZE (DDR_BUF_SIZE * BMU2_BUF_COUNT)
9726 +#define UTIL_CODE_BASEADDR (BMU2_DDR_BASEADDR + BMU2_DDR_SIZE)
9727 +#define UTIL_CODE_SIZE (128 * SZ_1K)
9728 +#define UTIL_DDR_DATA_BASEADDR (UTIL_CODE_BASEADDR + UTIL_CODE_SIZE)
9729 +#define UTIL_DDR_DATA_SIZE (64 * SZ_1K)
9730 +#define CLASS_DDR_DATA_BASEADDR (UTIL_DDR_DATA_BASEADDR + UTIL_DDR_DATA_SIZE)
9731 +#define CLASS_DDR_DATA_SIZE (32 * SZ_1K)
9732 +#define TMU_DDR_DATA_BASEADDR (CLASS_DDR_DATA_BASEADDR + CLASS_DDR_DATA_SIZE)
9733 +#define TMU_DDR_DATA_SIZE (32 * SZ_1K)
9734 +#define TMU_LLM_BASEADDR (TMU_DDR_DATA_BASEADDR + TMU_DDR_DATA_SIZE)
9735 +#define TMU_LLM_QUEUE_LEN (8 * 512)
9736 +/* Must be power of two and at least 16 * 8 = 128 bytes */
9737 +#define TMU_LLM_SIZE (4 * 16 * TMU_LLM_QUEUE_LEN)
9738 +/* (4 TMU's x 16 queues x queue_len) */
9739 +
9740 +#define DDR_MAX_SIZE (TMU_LLM_BASEADDR + TMU_LLM_SIZE)
9741 +
9742 +/* LMEM Mapping */
9743 +#define BMU1_LMEM_BASEADDR 0
9744 +#define BMU1_BUF_COUNT 256
9745 +#define BMU1_LMEM_SIZE (LMEM_BUF_SIZE * BMU1_BUF_COUNT)
9746 +
9747 +#endif /* _PFE_MOD_H */
9748 diff --git a/drivers/staging/fsl_ppfe/pfe_perfmon.h b/drivers/staging/fsl_ppfe/pfe_perfmon.h
9749 new file mode 100644
9750 index 00000000..84908121
9751 --- /dev/null
9752 +++ b/drivers/staging/fsl_ppfe/pfe_perfmon.h
9753 @@ -0,0 +1,38 @@
9754 +/*
9755 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
9756 + * Copyright 2017 NXP
9757 + *
9758 + * This program is free software; you can redistribute it and/or modify
9759 + * it under the terms of the GNU General Public License as published by
9760 + * the Free Software Foundation; either version 2 of the License, or
9761 + * (at your option) any later version.
9762 + *
9763 + * This program is distributed in the hope that it will be useful,
9764 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
9765 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9766 + * GNU General Public License for more details.
9767 + *
9768 + * You should have received a copy of the GNU General Public License
9769 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
9770 + */
9771 +
9772 +#ifndef _PFE_PERFMON_H_
9773 +#define _PFE_PERFMON_H_
9774 +
9775 +#include "pfe/pfe.h"
9776 +
9777 +#define CT_CPUMON_INTERVAL (1 * TIMER_TICKS_PER_SEC)
9778 +
9779 +struct pfe_cpumon {
9780 + u32 cpu_usage_pct[MAX_PE];
9781 + u32 class_usage_pct;
9782 +};
9783 +
9784 +struct pfe_memmon {
9785 + u32 kernel_memory_allocated;
9786 +};
9787 +
9788 +int pfe_perfmon_init(struct pfe *pfe);
9789 +void pfe_perfmon_exit(struct pfe *pfe);
9790 +
9791 +#endif /* _PFE_PERFMON_H_ */
9792 diff --git a/drivers/staging/fsl_ppfe/pfe_sysfs.c b/drivers/staging/fsl_ppfe/pfe_sysfs.c
9793 new file mode 100644
9794 index 00000000..2a763844
9795 --- /dev/null
9796 +++ b/drivers/staging/fsl_ppfe/pfe_sysfs.c
9797 @@ -0,0 +1,818 @@
9798 +/*
9799 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
9800 + * Copyright 2017 NXP
9801 + *
9802 + * This program is free software; you can redistribute it and/or modify
9803 + * it under the terms of the GNU General Public License as published by
9804 + * the Free Software Foundation; either version 2 of the License, or
9805 + * (at your option) any later version.
9806 + *
9807 + * This program is distributed in the hope that it will be useful,
9808 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
9809 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9810 + * GNU General Public License for more details.
9811 + *
9812 + * You should have received a copy of the GNU General Public License
9813 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
9814 + */
9815 +
9816 +#include <linux/module.h>
9817 +#include <linux/platform_device.h>
9818 +
9819 +#include "pfe_mod.h"
9820 +
9821 +#define PE_EXCEPTION_DUMP_ADDRESS 0x1fa8
9822 +#define NUM_QUEUES 16
9823 +
9824 +static char register_name[20][5] = {
9825 + "EPC", "ECAS", "EID", "ED",
9826 + "r0", "r1", "r2", "r3",
9827 + "r4", "r5", "r6", "r7",
9828 + "r8", "r9", "r10", "r11",
9829 + "r12", "r13", "r14", "r15",
9830 +};
9831 +
9832 +static char exception_name[14][20] = {
9833 + "Reset",
9834 + "HardwareFailure",
9835 + "NMI",
9836 + "InstBreakpoint",
9837 + "DataBreakpoint",
9838 + "Unsupported",
9839 + "PrivilegeViolation",
9840 + "InstBusError",
9841 + "DataBusError",
9842 + "AlignmentError",
9843 + "ArithmeticError",
9844 + "SystemCall",
9845 + "MemoryManagement",
9846 + "Interrupt",
9847 +};
9848 +
9849 +static unsigned long class_do_clear;
9850 +static unsigned long tmu_do_clear;
9851 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
9852 +static unsigned long util_do_clear;
9853 +#endif
9854 +
9855 +static ssize_t display_pe_status(char *buf, int id, u32 dmem_addr, unsigned long
9856 + do_clear)
9857 +{
9858 + ssize_t len = 0;
9859 + u32 val;
9860 + char statebuf[5];
9861 + struct pfe_cpumon *cpumon = &pfe->cpumon;
9862 + u32 debug_indicator;
9863 + u32 debug[20];
9864 +
9865 + *(u32 *)statebuf = pe_dmem_read(id, dmem_addr, 4);
9866 + dmem_addr += 4;
9867 +
9868 + statebuf[4] = '\0';
9869 + len += sprintf(buf + len, "state=%4s ", statebuf);
9870 +
9871 + val = pe_dmem_read(id, dmem_addr, 4);
9872 + dmem_addr += 4;
9873 + len += sprintf(buf + len, "ctr=%08x ", cpu_to_be32(val));
9874 +
9875 + val = pe_dmem_read(id, dmem_addr, 4);
9876 + if (do_clear && val)
9877 + pe_dmem_write(id, 0, dmem_addr, 4);
9878 + dmem_addr += 4;
9879 + len += sprintf(buf + len, "rx=%u ", cpu_to_be32(val));
9880 +
9881 + val = pe_dmem_read(id, dmem_addr, 4);
9882 + if (do_clear && val)
9883 + pe_dmem_write(id, 0, dmem_addr, 4);
9884 + dmem_addr += 4;
9885 + if (id >= TMU0_ID && id <= TMU_MAX_ID)
9886 + len += sprintf(buf + len, "qstatus=%x", cpu_to_be32(val));
9887 + else
9888 + len += sprintf(buf + len, "tx=%u", cpu_to_be32(val));
9889 +
9890 + val = pe_dmem_read(id, dmem_addr, 4);
9891 + if (do_clear && val)
9892 + pe_dmem_write(id, 0, dmem_addr, 4);
9893 + dmem_addr += 4;
9894 + if (val)
9895 + len += sprintf(buf + len, " drop=%u", cpu_to_be32(val));
9896 +
9897 + len += sprintf(buf + len, " load=%d%%", cpumon->cpu_usage_pct[id]);
9898 +
9899 + len += sprintf(buf + len, "\n");
9900 +
9901 + debug_indicator = pe_dmem_read(id, dmem_addr, 4);
9902 + dmem_addr += 4;
9903 + if (!strncmp((char *)&debug_indicator, "DBUG", 4)) {
9904 + int j, last = 0;
9905 +
9906 + for (j = 0; j < 16; j++) {
9907 + debug[j] = pe_dmem_read(id, dmem_addr, 4);
9908 + if (debug[j]) {
9909 + if (do_clear)
9910 + pe_dmem_write(id, 0, dmem_addr, 4);
9911 + last = j + 1;
9912 + }
9913 + dmem_addr += 4;
9914 + }
9915 + for (j = 0; j < last; j++) {
9916 + len += sprintf(buf + len, "%08x%s",
9917 + cpu_to_be32(debug[j]),
9918 + (j & 0x7) == 0x7 || j == last - 1 ? "\n" : " ");
9919 + }
9920 + }
9921 +
9922 + if (!strncmp(statebuf, "DEAD", 4)) {
9923 + u32 i, dump = PE_EXCEPTION_DUMP_ADDRESS;
9924 +
9925 + len += sprintf(buf + len, "Exception details:\n");
9926 + for (i = 0; i < 20; i++) {
9927 + debug[i] = pe_dmem_read(id, dump, 4);
9928 + dump += 4;
9929 + if (i == 2)
9930 + len += sprintf(buf + len, "%4s = %08x (=%s) ",
9931 + register_name[i], cpu_to_be32(debug[i]),
9932 + exception_name[min((u32)
9933 + cpu_to_be32(debug[i]), (u32)13)]);
9934 + else
9935 + len += sprintf(buf + len, "%4s = %08x%s",
9936 + register_name[i], cpu_to_be32(debug[i]),
9937 + (i & 0x3) == 0x3 || i == 19 ? "\n" : " ");
9938 + }
9939 + }
9940 +
9941 + return len;
9942 +}
9943 +
9944 +static ssize_t class_phy_stats(char *buf, int phy)
9945 +{
9946 + ssize_t len = 0;
9947 + int off1 = phy * 0x28;
9948 + int off2 = phy * 0x10;
9949 +
9950 + if (phy == 3)
9951 + off1 = CLASS_PHY4_RX_PKTS - CLASS_PHY1_RX_PKTS;
9952 +
9953 + len += sprintf(buf + len, "phy: %d\n", phy);
9954 + len += sprintf(buf + len,
9955 + " rx: %10u, tx: %10u, intf: %10u, ipv4: %10u, ipv6: %10u\n",
9956 + readl(CLASS_PHY1_RX_PKTS + off1),
9957 + readl(CLASS_PHY1_TX_PKTS + off1),
9958 + readl(CLASS_PHY1_INTF_MATCH_PKTS + off1),
9959 + readl(CLASS_PHY1_V4_PKTS + off1),
9960 + readl(CLASS_PHY1_V6_PKTS + off1));
9961 +
9962 + len += sprintf(buf + len,
9963 + " icmp: %10u, igmp: %10u, tcp: %10u, udp: %10u\n",
9964 + readl(CLASS_PHY1_ICMP_PKTS + off2),
9965 + readl(CLASS_PHY1_IGMP_PKTS + off2),
9966 + readl(CLASS_PHY1_TCP_PKTS + off2),
9967 + readl(CLASS_PHY1_UDP_PKTS + off2));
9968 +
9969 + len += sprintf(buf + len, " err\n");
9970 + len += sprintf(buf + len,
9971 + " lp: %10u, intf: %10u, l3: %10u, chcksum: %10u, ttl: %10u\n",
9972 + readl(CLASS_PHY1_LP_FAIL_PKTS + off1),
9973 + readl(CLASS_PHY1_INTF_FAIL_PKTS + off1),
9974 + readl(CLASS_PHY1_L3_FAIL_PKTS + off1),
9975 + readl(CLASS_PHY1_CHKSUM_ERR_PKTS + off1),
9976 + readl(CLASS_PHY1_TTL_ERR_PKTS + off1));
9977 +
9978 + return len;
9979 +}
9980 +
9981 +/* qm_read_drop_stat
9982 + * This function is used to read the drop statistics from the TMU
9983 + * hw drop counter. Since the hw counter is always cleared afer
9984 + * reading, this function maintains the previous drop count, and
9985 + * adds the new value to it. That value can be retrieved by
9986 + * passing a pointer to it with the total_drops arg.
9987 + *
9988 + * @param tmu TMU number (0 - 3)
9989 + * @param queue queue number (0 - 15)
9990 + * @param total_drops pointer to location to store total drops (or NULL)
9991 + * @param do_reset if TRUE, clear total drops after updating
9992 + */
9993 +u32 qm_read_drop_stat(u32 tmu, u32 queue, u32 *total_drops, int do_reset)
9994 +{
9995 + static u32 qtotal[TMU_MAX_ID + 1][NUM_QUEUES];
9996 + u32 val;
9997 +
9998 + writel((tmu << 8) | queue, TMU_TEQ_CTRL);
9999 + writel((tmu << 8) | queue, TMU_LLM_CTRL);
10000 + val = readl(TMU_TEQ_DROP_STAT);
10001 + qtotal[tmu][queue] += val;
10002 + if (total_drops)
10003 + *total_drops = qtotal[tmu][queue];
10004 + if (do_reset)
10005 + qtotal[tmu][queue] = 0;
10006 + return val;
10007 +}
10008 +
10009 +static ssize_t tmu_queue_stats(char *buf, int tmu, int queue)
10010 +{
10011 + ssize_t len = 0;
10012 + u32 drops;
10013 +
10014 + len += sprintf(buf + len, "%d-%02d, ", tmu, queue);
10015 +
10016 + drops = qm_read_drop_stat(tmu, queue, NULL, 0);
10017 +
10018 + /* Select queue */
10019 + writel((tmu << 8) | queue, TMU_TEQ_CTRL);
10020 + writel((tmu << 8) | queue, TMU_LLM_CTRL);
10021 +
10022 + len += sprintf(buf + len,
10023 + "(teq) drop: %10u, tx: %10u (llm) head: %08x, tail: %08x, drop: %10u\n",
10024 + drops, readl(TMU_TEQ_TRANS_STAT),
10025 + readl(TMU_LLM_QUE_HEADPTR), readl(TMU_LLM_QUE_TAILPTR),
10026 + readl(TMU_LLM_QUE_DROPCNT));
10027 +
10028 + return len;
10029 +}
10030 +
10031 +static ssize_t tmu_queues(char *buf, int tmu)
10032 +{
10033 + ssize_t len = 0;
10034 + int queue;
10035 +
10036 + for (queue = 0; queue < 16; queue++)
10037 + len += tmu_queue_stats(buf + len, tmu, queue);
10038 +
10039 + return len;
10040 +}
10041 +
10042 +static ssize_t block_version(char *buf, void *addr)
10043 +{
10044 + ssize_t len = 0;
10045 + u32 val;
10046 +
10047 + val = readl(addr);
10048 + len += sprintf(buf + len, "revision: %x, version: %x, id: %x\n",
10049 + (val >> 24) & 0xff, (val >> 16) & 0xff, val & 0xffff);
10050 +
10051 + return len;
10052 +}
10053 +
10054 +static ssize_t bmu(char *buf, int id, void *base)
10055 +{
10056 + ssize_t len = 0;
10057 +
10058 + len += sprintf(buf + len, "%s: %d\n ", __func__, id);
10059 +
10060 + len += block_version(buf + len, base + BMU_VERSION);
10061 +
10062 + len += sprintf(buf + len, " buf size: %x\n", (1 << readl(base +
10063 + BMU_BUF_SIZE)));
10064 + len += sprintf(buf + len, " buf count: %x\n", readl(base +
10065 + BMU_BUF_CNT));
10066 + len += sprintf(buf + len, " buf rem: %x\n", readl(base +
10067 + BMU_REM_BUF_CNT));
10068 + len += sprintf(buf + len, " buf curr: %x\n", readl(base +
10069 + BMU_CURR_BUF_CNT));
10070 + len += sprintf(buf + len, " free err: %x\n", readl(base +
10071 + BMU_FREE_ERR_ADDR));
10072 +
10073 + return len;
10074 +}
10075 +
10076 +static ssize_t gpi(char *buf, int id, void *base)
10077 +{
10078 + ssize_t len = 0;
10079 + u32 val;
10080 +
10081 + len += sprintf(buf + len, "%s%d:\n ", __func__, id);
10082 + len += block_version(buf + len, base + GPI_VERSION);
10083 +
10084 + len += sprintf(buf + len, " tx under stick: %x\n", readl(base +
10085 + GPI_FIFO_STATUS));
10086 + val = readl(base + GPI_FIFO_DEBUG);
10087 + len += sprintf(buf + len, " tx pkts: %x\n", (val >> 23) &
10088 + 0x3f);
10089 + len += sprintf(buf + len, " rx pkts: %x\n", (val >> 18) &
10090 + 0x3f);
10091 + len += sprintf(buf + len, " tx bytes: %x\n", (val >> 9) &
10092 + 0x1ff);
10093 + len += sprintf(buf + len, " rx bytes: %x\n", (val >> 0) &
10094 + 0x1ff);
10095 + len += sprintf(buf + len, " overrun: %x\n", readl(base +
10096 + GPI_OVERRUN_DROPCNT));
10097 +
10098 + return len;
10099 +}
10100 +
10101 +static ssize_t pfe_set_class(struct device *dev, struct device_attribute *attr,
10102 + const char *buf, size_t count)
10103 +{
10104 + class_do_clear = kstrtoul(buf, 0, 0);
10105 + return count;
10106 +}
10107 +
10108 +static ssize_t pfe_show_class(struct device *dev, struct device_attribute *attr,
10109 + char *buf)
10110 +{
10111 + ssize_t len = 0;
10112 + int id;
10113 + u32 val;
10114 + struct pfe_cpumon *cpumon = &pfe->cpumon;
10115 +
10116 + len += block_version(buf + len, CLASS_VERSION);
10117 +
10118 + for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
10119 + len += sprintf(buf + len, "%d: ", id - CLASS0_ID);
10120 +
10121 + val = readl(CLASS_PE0_DEBUG + id * 4);
10122 + len += sprintf(buf + len, "pc=1%04x ", val & 0xffff);
10123 +
10124 + len += display_pe_status(buf + len, id, CLASS_DM_PESTATUS,
10125 + class_do_clear);
10126 + }
10127 + len += sprintf(buf + len, "aggregate load=%d%%\n\n",
10128 + cpumon->class_usage_pct);
10129 +
10130 + len += sprintf(buf + len, "pe status: 0x%x\n",
10131 + readl(CLASS_PE_STATUS));
10132 + len += sprintf(buf + len, "max buf cnt: 0x%x afull thres: 0x%x\n",
10133 + readl(CLASS_MAX_BUF_CNT), readl(CLASS_AFULL_THRES));
10134 + len += sprintf(buf + len, "tsq max cnt: 0x%x tsq fifo thres: 0x%x\n",
10135 + readl(CLASS_TSQ_MAX_CNT), readl(CLASS_TSQ_FIFO_THRES));
10136 + len += sprintf(buf + len, "state: 0x%x\n", readl(CLASS_STATE));
10137 +
10138 + len += class_phy_stats(buf + len, 0);
10139 + len += class_phy_stats(buf + len, 1);
10140 + len += class_phy_stats(buf + len, 2);
10141 + len += class_phy_stats(buf + len, 3);
10142 +
10143 + return len;
10144 +}
10145 +
10146 +static ssize_t pfe_set_tmu(struct device *dev, struct device_attribute *attr,
10147 + const char *buf, size_t count)
10148 +{
10149 + tmu_do_clear = kstrtoul(buf, 0, 0);
10150 + return count;
10151 +}
10152 +
10153 +static ssize_t pfe_show_tmu(struct device *dev, struct device_attribute *attr,
10154 + char *buf)
10155 +{
10156 + ssize_t len = 0;
10157 + int id;
10158 + u32 val;
10159 +
10160 + len += block_version(buf + len, TMU_VERSION);
10161 +
10162 + for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
10163 + if (id == TMU2_ID)
10164 + continue;
10165 + len += sprintf(buf + len, "%d: ", id - TMU0_ID);
10166 +
10167 + len += display_pe_status(buf + len, id, TMU_DM_PESTATUS,
10168 + tmu_do_clear);
10169 + }
10170 +
10171 + len += sprintf(buf + len, "pe status: %x\n", readl(TMU_PE_STATUS));
10172 + len += sprintf(buf + len, "inq fifo cnt: %x\n",
10173 + readl(TMU_PHY_INQ_FIFO_CNT));
10174 + val = readl(TMU_INQ_STAT);
10175 + len += sprintf(buf + len, "inq wr ptr: %x\n", val & 0x3ff);
10176 + len += sprintf(buf + len, "inq rd ptr: %x\n", val >> 10);
10177 +
10178 + return len;
10179 +}
10180 +
10181 +static unsigned long drops_do_clear;
10182 +static u32 class_drop_counter[CLASS_NUM_DROP_COUNTERS];
10183 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10184 +static u32 util_drop_counter[UTIL_NUM_DROP_COUNTERS];
10185 +#endif
10186 +
10187 +char *class_drop_description[CLASS_NUM_DROP_COUNTERS] = {
10188 + "ICC",
10189 + "Host Pkt Error",
10190 + "Rx Error",
10191 + "IPsec Outbound",
10192 + "IPsec Inbound",
10193 + "EXPT IPsec Error",
10194 + "Reassembly",
10195 + "Fragmenter",
10196 + "NAT-T",
10197 + "Socket",
10198 + "Multicast",
10199 + "NAT-PT",
10200 + "Tx Disabled",
10201 +};
10202 +
10203 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10204 +char *util_drop_description[UTIL_NUM_DROP_COUNTERS] = {
10205 + "IPsec Outbound",
10206 + "IPsec Inbound",
10207 + "IPsec Rate Limiter",
10208 + "Fragmenter",
10209 + "Socket",
10210 + "Tx Disabled",
10211 + "Rx Error",
10212 +};
10213 +#endif
10214 +
10215 +static ssize_t pfe_set_drops(struct device *dev, struct device_attribute *attr,
10216 + const char *buf, size_t count)
10217 +{
10218 + drops_do_clear = kstrtoul(buf, 0, 0);
10219 + return count;
10220 +}
10221 +
10222 +static u32 tmu_drops[4][16];
10223 +static ssize_t pfe_show_drops(struct device *dev, struct device_attribute *attr,
10224 + char *buf)
10225 +{
10226 + ssize_t len = 0;
10227 + int id, dropnum;
10228 + int tmu, queue;
10229 + u32 val;
10230 + u32 dmem_addr;
10231 + int num_class_drops = 0, num_tmu_drops = 0, num_util_drops = 0;
10232 + struct pfe_ctrl *ctrl = &pfe->ctrl;
10233 +
10234 + memset(class_drop_counter, 0, sizeof(class_drop_counter));
10235 + for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
10236 + if (drops_do_clear)
10237 + pe_sync_stop(ctrl, (1 << id));
10238 + for (dropnum = 0; dropnum < CLASS_NUM_DROP_COUNTERS;
10239 + dropnum++) {
10240 + dmem_addr = CLASS_DM_DROP_CNTR;
10241 + val = be32_to_cpu(pe_dmem_read(id, dmem_addr, 4));
10242 + class_drop_counter[dropnum] += val;
10243 + num_class_drops += val;
10244 + if (drops_do_clear)
10245 + pe_dmem_write(id, 0, dmem_addr, 4);
10246 + }
10247 + if (drops_do_clear)
10248 + pe_start(ctrl, (1 << id));
10249 + }
10250 +
10251 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10252 + if (drops_do_clear)
10253 + pe_sync_stop(ctrl, (1 << UTIL_ID));
10254 + for (dropnum = 0; dropnum < UTIL_NUM_DROP_COUNTERS; dropnum++) {
10255 + dmem_addr = UTIL_DM_DROP_CNTR;
10256 + val = be32_to_cpu(pe_dmem_read(UTIL_ID, dmem_addr, 4));
10257 + util_drop_counter[dropnum] = val;
10258 + num_util_drops += val;
10259 + if (drops_do_clear)
10260 + pe_dmem_write(UTIL_ID, 0, dmem_addr, 4);
10261 + }
10262 + if (drops_do_clear)
10263 + pe_start(ctrl, (1 << UTIL_ID));
10264 +#endif
10265 + for (tmu = 0; tmu < 4; tmu++) {
10266 + for (queue = 0; queue < 16; queue++) {
10267 + qm_read_drop_stat(tmu, queue, &tmu_drops[tmu][queue],
10268 + drops_do_clear);
10269 + num_tmu_drops += tmu_drops[tmu][queue];
10270 + }
10271 + }
10272 +
10273 + if (num_class_drops == 0 && num_util_drops == 0 && num_tmu_drops == 0)
10274 + len += sprintf(buf + len, "No PE drops\n\n");
10275 +
10276 + if (num_class_drops > 0) {
10277 + len += sprintf(buf + len, "Class PE drops --\n");
10278 + for (dropnum = 0; dropnum < CLASS_NUM_DROP_COUNTERS;
10279 + dropnum++) {
10280 + if (class_drop_counter[dropnum] > 0)
10281 + len += sprintf(buf + len, " %s: %d\n",
10282 + class_drop_description[dropnum],
10283 + class_drop_counter[dropnum]);
10284 + }
10285 + len += sprintf(buf + len, "\n");
10286 + }
10287 +
10288 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10289 + if (num_util_drops > 0) {
10290 + len += sprintf(buf + len, "Util PE drops --\n");
10291 + for (dropnum = 0; dropnum < UTIL_NUM_DROP_COUNTERS; dropnum++) {
10292 + if (util_drop_counter[dropnum] > 0)
10293 + len += sprintf(buf + len, " %s: %d\n",
10294 + util_drop_description[dropnum],
10295 + util_drop_counter[dropnum]);
10296 + }
10297 + len += sprintf(buf + len, "\n");
10298 + }
10299 +#endif
10300 + if (num_tmu_drops > 0) {
10301 + len += sprintf(buf + len, "TMU drops --\n");
10302 + for (tmu = 0; tmu < 4; tmu++) {
10303 + for (queue = 0; queue < 16; queue++) {
10304 + if (tmu_drops[tmu][queue] > 0)
10305 + len += sprintf(buf + len,
10306 + " TMU%d-Q%d: %d\n"
10307 + , tmu, queue, tmu_drops[tmu][queue]);
10308 + }
10309 + }
10310 + len += sprintf(buf + len, "\n");
10311 + }
10312 +
10313 + return len;
10314 +}
10315 +
10316 +static ssize_t pfe_show_tmu0_queues(struct device *dev, struct device_attribute
10317 + *attr, char *buf)
10318 +{
10319 + return tmu_queues(buf, 0);
10320 +}
10321 +
10322 +static ssize_t pfe_show_tmu1_queues(struct device *dev, struct device_attribute
10323 + *attr, char *buf)
10324 +{
10325 + return tmu_queues(buf, 1);
10326 +}
10327 +
10328 +static ssize_t pfe_show_tmu2_queues(struct device *dev, struct device_attribute
10329 + *attr, char *buf)
10330 +{
10331 + return tmu_queues(buf, 2);
10332 +}
10333 +
10334 +static ssize_t pfe_show_tmu3_queues(struct device *dev, struct device_attribute
10335 + *attr, char *buf)
10336 +{
10337 + return tmu_queues(buf, 3);
10338 +}
10339 +
10340 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10341 +static ssize_t pfe_set_util(struct device *dev, struct device_attribute *attr,
10342 + const char *buf, size_t count)
10343 +{
10344 + util_do_clear = kstrtoul(buf, NULL, 0);
10345 + return count;
10346 +}
10347 +
10348 +static ssize_t pfe_show_util(struct device *dev, struct device_attribute *attr,
10349 + char *buf)
10350 +{
10351 + ssize_t len = 0;
10352 + struct pfe_ctrl *ctrl = &pfe->ctrl;
10353 +
10354 + len += block_version(buf + len, UTIL_VERSION);
10355 +
10356 + pe_sync_stop(ctrl, (1 << UTIL_ID));
10357 + len += display_pe_status(buf + len, UTIL_ID, UTIL_DM_PESTATUS,
10358 + util_do_clear);
10359 + pe_start(ctrl, (1 << UTIL_ID));
10360 +
10361 + len += sprintf(buf + len, "pe status: %x\n", readl(UTIL_PE_STATUS));
10362 + len += sprintf(buf + len, "max buf cnt: %x\n",
10363 + readl(UTIL_MAX_BUF_CNT));
10364 + len += sprintf(buf + len, "tsq max cnt: %x\n",
10365 + readl(UTIL_TSQ_MAX_CNT));
10366 +
10367 + return len;
10368 +}
10369 +#endif
10370 +
10371 +static ssize_t pfe_show_bmu(struct device *dev, struct device_attribute *attr,
10372 + char *buf)
10373 +{
10374 + ssize_t len = 0;
10375 +
10376 + len += bmu(buf + len, 1, BMU1_BASE_ADDR);
10377 + len += bmu(buf + len, 2, BMU2_BASE_ADDR);
10378 +
10379 + return len;
10380 +}
10381 +
10382 +static ssize_t pfe_show_hif(struct device *dev, struct device_attribute *attr,
10383 + char *buf)
10384 +{
10385 + ssize_t len = 0;
10386 +
10387 + len += sprintf(buf + len, "hif:\n ");
10388 + len += block_version(buf + len, HIF_VERSION);
10389 +
10390 + len += sprintf(buf + len, " tx curr bd: %x\n",
10391 + readl(HIF_TX_CURR_BD_ADDR));
10392 + len += sprintf(buf + len, " tx status: %x\n",
10393 + readl(HIF_TX_STATUS));
10394 + len += sprintf(buf + len, " tx dma status: %x\n",
10395 + readl(HIF_TX_DMA_STATUS));
10396 +
10397 + len += sprintf(buf + len, " rx curr bd: %x\n",
10398 + readl(HIF_RX_CURR_BD_ADDR));
10399 + len += sprintf(buf + len, " rx status: %x\n",
10400 + readl(HIF_RX_STATUS));
10401 + len += sprintf(buf + len, " rx dma status: %x\n",
10402 + readl(HIF_RX_DMA_STATUS));
10403 +
10404 + len += sprintf(buf + len, "hif nocopy:\n ");
10405 + len += block_version(buf + len, HIF_NOCPY_VERSION);
10406 +
10407 + len += sprintf(buf + len, " tx curr bd: %x\n",
10408 + readl(HIF_NOCPY_TX_CURR_BD_ADDR));
10409 + len += sprintf(buf + len, " tx status: %x\n",
10410 + readl(HIF_NOCPY_TX_STATUS));
10411 + len += sprintf(buf + len, " tx dma status: %x\n",
10412 + readl(HIF_NOCPY_TX_DMA_STATUS));
10413 +
10414 + len += sprintf(buf + len, " rx curr bd: %x\n",
10415 + readl(HIF_NOCPY_RX_CURR_BD_ADDR));
10416 + len += sprintf(buf + len, " rx status: %x\n",
10417 + readl(HIF_NOCPY_RX_STATUS));
10418 + len += sprintf(buf + len, " rx dma status: %x\n",
10419 + readl(HIF_NOCPY_RX_DMA_STATUS));
10420 +
10421 + return len;
10422 +}
10423 +
10424 +static ssize_t pfe_show_gpi(struct device *dev, struct device_attribute *attr,
10425 + char *buf)
10426 +{
10427 + ssize_t len = 0;
10428 +
10429 + len += gpi(buf + len, 0, EGPI1_BASE_ADDR);
10430 + len += gpi(buf + len, 1, EGPI2_BASE_ADDR);
10431 + len += gpi(buf + len, 3, HGPI_BASE_ADDR);
10432 +
10433 + return len;
10434 +}
10435 +
10436 +static ssize_t pfe_show_pfemem(struct device *dev, struct device_attribute
10437 + *attr, char *buf)
10438 +{
10439 + ssize_t len = 0;
10440 + struct pfe_memmon *memmon = &pfe->memmon;
10441 +
10442 + len += sprintf(buf + len, "Kernel Memory: %d Bytes (%d KB)\n",
10443 + memmon->kernel_memory_allocated,
10444 + (memmon->kernel_memory_allocated + 1023) / 1024);
10445 +
10446 + return len;
10447 +}
10448 +
10449 +#ifdef HIF_NAPI_STATS
10450 +static ssize_t pfe_show_hif_napi_stats(struct device *dev,
10451 + struct device_attribute *attr,
10452 + char *buf)
10453 +{
10454 + struct platform_device *pdev = to_platform_device(dev);
10455 + struct pfe *pfe = platform_get_drvdata(pdev);
10456 + ssize_t len = 0;
10457 +
10458 + len += sprintf(buf + len, "sched: %u\n",
10459 + pfe->hif.napi_counters[NAPI_SCHED_COUNT]);
10460 + len += sprintf(buf + len, "poll: %u\n",
10461 + pfe->hif.napi_counters[NAPI_POLL_COUNT]);
10462 + len += sprintf(buf + len, "packet: %u\n",
10463 + pfe->hif.napi_counters[NAPI_PACKET_COUNT]);
10464 + len += sprintf(buf + len, "budget: %u\n",
10465 + pfe->hif.napi_counters[NAPI_FULL_BUDGET_COUNT]);
10466 + len += sprintf(buf + len, "desc: %u\n",
10467 + pfe->hif.napi_counters[NAPI_DESC_COUNT]);
10468 + len += sprintf(buf + len, "full: %u\n",
10469 + pfe->hif.napi_counters[NAPI_CLIENT_FULL_COUNT]);
10470 +
10471 + return len;
10472 +}
10473 +
10474 +static ssize_t pfe_set_hif_napi_stats(struct device *dev,
10475 + struct device_attribute *attr,
10476 + const char *buf, size_t count)
10477 +{
10478 + struct platform_device *pdev = to_platform_device(dev);
10479 + struct pfe *pfe = platform_get_drvdata(pdev);
10480 +
10481 + memset(pfe->hif.napi_counters, 0, sizeof(pfe->hif.napi_counters));
10482 +
10483 + return count;
10484 +}
10485 +
10486 +static DEVICE_ATTR(hif_napi_stats, 0644, pfe_show_hif_napi_stats,
10487 + pfe_set_hif_napi_stats);
10488 +#endif
10489 +
10490 +static DEVICE_ATTR(class, 0644, pfe_show_class, pfe_set_class);
10491 +static DEVICE_ATTR(tmu, 0644, pfe_show_tmu, pfe_set_tmu);
10492 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10493 +static DEVICE_ATTR(util, 0644, pfe_show_util, pfe_set_util);
10494 +#endif
10495 +static DEVICE_ATTR(bmu, 0444, pfe_show_bmu, NULL);
10496 +static DEVICE_ATTR(hif, 0444, pfe_show_hif, NULL);
10497 +static DEVICE_ATTR(gpi, 0444, pfe_show_gpi, NULL);
10498 +static DEVICE_ATTR(drops, 0644, pfe_show_drops, pfe_set_drops);
10499 +static DEVICE_ATTR(tmu0_queues, 0444, pfe_show_tmu0_queues, NULL);
10500 +static DEVICE_ATTR(tmu1_queues, 0444, pfe_show_tmu1_queues, NULL);
10501 +static DEVICE_ATTR(tmu2_queues, 0444, pfe_show_tmu2_queues, NULL);
10502 +static DEVICE_ATTR(tmu3_queues, 0444, pfe_show_tmu3_queues, NULL);
10503 +static DEVICE_ATTR(pfemem, 0444, pfe_show_pfemem, NULL);
10504 +
10505 +int pfe_sysfs_init(struct pfe *pfe)
10506 +{
10507 + if (device_create_file(pfe->dev, &dev_attr_class))
10508 + goto err_class;
10509 +
10510 + if (device_create_file(pfe->dev, &dev_attr_tmu))
10511 + goto err_tmu;
10512 +
10513 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10514 + if (device_create_file(pfe->dev, &dev_attr_util))
10515 + goto err_util;
10516 +#endif
10517 +
10518 + if (device_create_file(pfe->dev, &dev_attr_bmu))
10519 + goto err_bmu;
10520 +
10521 + if (device_create_file(pfe->dev, &dev_attr_hif))
10522 + goto err_hif;
10523 +
10524 + if (device_create_file(pfe->dev, &dev_attr_gpi))
10525 + goto err_gpi;
10526 +
10527 + if (device_create_file(pfe->dev, &dev_attr_drops))
10528 + goto err_drops;
10529 +
10530 + if (device_create_file(pfe->dev, &dev_attr_tmu0_queues))
10531 + goto err_tmu0_queues;
10532 +
10533 + if (device_create_file(pfe->dev, &dev_attr_tmu1_queues))
10534 + goto err_tmu1_queues;
10535 +
10536 + if (device_create_file(pfe->dev, &dev_attr_tmu2_queues))
10537 + goto err_tmu2_queues;
10538 +
10539 + if (device_create_file(pfe->dev, &dev_attr_tmu3_queues))
10540 + goto err_tmu3_queues;
10541 +
10542 + if (device_create_file(pfe->dev, &dev_attr_pfemem))
10543 + goto err_pfemem;
10544 +
10545 +#ifdef HIF_NAPI_STATS
10546 + if (device_create_file(pfe->dev, &dev_attr_hif_napi_stats))
10547 + goto err_hif_napi_stats;
10548 +#endif
10549 +
10550 + return 0;
10551 +
10552 +#ifdef HIF_NAPI_STATS
10553 +err_hif_napi_stats:
10554 + device_remove_file(pfe->dev, &dev_attr_pfemem);
10555 +#endif
10556 +
10557 +err_pfemem:
10558 + device_remove_file(pfe->dev, &dev_attr_tmu3_queues);
10559 +
10560 +err_tmu3_queues:
10561 + device_remove_file(pfe->dev, &dev_attr_tmu2_queues);
10562 +
10563 +err_tmu2_queues:
10564 + device_remove_file(pfe->dev, &dev_attr_tmu1_queues);
10565 +
10566 +err_tmu1_queues:
10567 + device_remove_file(pfe->dev, &dev_attr_tmu0_queues);
10568 +
10569 +err_tmu0_queues:
10570 + device_remove_file(pfe->dev, &dev_attr_drops);
10571 +
10572 +err_drops:
10573 + device_remove_file(pfe->dev, &dev_attr_gpi);
10574 +
10575 +err_gpi:
10576 + device_remove_file(pfe->dev, &dev_attr_hif);
10577 +
10578 +err_hif:
10579 + device_remove_file(pfe->dev, &dev_attr_bmu);
10580 +
10581 +err_bmu:
10582 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10583 + device_remove_file(pfe->dev, &dev_attr_util);
10584 +
10585 +err_util:
10586 +#endif
10587 + device_remove_file(pfe->dev, &dev_attr_tmu);
10588 +
10589 +err_tmu:
10590 + device_remove_file(pfe->dev, &dev_attr_class);
10591 +
10592 +err_class:
10593 + return -1;
10594 +}
10595 +
10596 +void pfe_sysfs_exit(struct pfe *pfe)
10597 +{
10598 +#ifdef HIF_NAPI_STATS
10599 + device_remove_file(pfe->dev, &dev_attr_hif_napi_stats);
10600 +#endif
10601 + device_remove_file(pfe->dev, &dev_attr_pfemem);
10602 + device_remove_file(pfe->dev, &dev_attr_tmu3_queues);
10603 + device_remove_file(pfe->dev, &dev_attr_tmu2_queues);
10604 + device_remove_file(pfe->dev, &dev_attr_tmu1_queues);
10605 + device_remove_file(pfe->dev, &dev_attr_tmu0_queues);
10606 + device_remove_file(pfe->dev, &dev_attr_drops);
10607 + device_remove_file(pfe->dev, &dev_attr_gpi);
10608 + device_remove_file(pfe->dev, &dev_attr_hif);
10609 + device_remove_file(pfe->dev, &dev_attr_bmu);
10610 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10611 + device_remove_file(pfe->dev, &dev_attr_util);
10612 +#endif
10613 + device_remove_file(pfe->dev, &dev_attr_tmu);
10614 + device_remove_file(pfe->dev, &dev_attr_class);
10615 +}
10616 diff --git a/drivers/staging/fsl_ppfe/pfe_sysfs.h b/drivers/staging/fsl_ppfe/pfe_sysfs.h
10617 new file mode 100644
10618 index 00000000..4fb39c93
10619 --- /dev/null
10620 +++ b/drivers/staging/fsl_ppfe/pfe_sysfs.h
10621 @@ -0,0 +1,29 @@
10622 +/*
10623 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
10624 + * Copyright 2017 NXP
10625 + *
10626 + * This program is free software; you can redistribute it and/or modify
10627 + * it under the terms of the GNU General Public License as published by
10628 + * the Free Software Foundation; either version 2 of the License, or
10629 + * (at your option) any later version.
10630 + *
10631 + * This program is distributed in the hope that it will be useful,
10632 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
10633 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10634 + * GNU General Public License for more details.
10635 + *
10636 + * You should have received a copy of the GNU General Public License
10637 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
10638 + */
10639 +
10640 +#ifndef _PFE_SYSFS_H_
10641 +#define _PFE_SYSFS_H_
10642 +
10643 +#include <linux/proc_fs.h>
10644 +
10645 +u32 qm_read_drop_stat(u32 tmu, u32 queue, u32 *total_drops, int do_reset);
10646 +
10647 +int pfe_sysfs_init(struct pfe *pfe);
10648 +void pfe_sysfs_exit(struct pfe *pfe);
10649 +
10650 +#endif /* _PFE_SYSFS_H_ */
10651 --
10652 2.14.1
10653