kernel: bump 4.9 to 4.9.146
[openwrt/openwrt.git] / target / linux / layerscape / patches-4.9 / 706-fsl_ppfe-support-layercape.patch
1 From 50fb2f2e93aeae0baed156eb4794a2f358376b77 Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Thu, 5 Jul 2018 17:19:20 +0800
4 Subject: [PATCH 12/32] fsl_ppfe: support layercape
5
6 This is an integrated patch for layerscape pfe support.
7
8 Calvin Johnson <calvin.johnson@nxp.com>
9 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
10 ---
11 drivers/staging/fsl_ppfe/Kconfig | 20 +
12 drivers/staging/fsl_ppfe/Makefile | 19 +
13 drivers/staging/fsl_ppfe/TODO | 2 +
14 drivers/staging/fsl_ppfe/include/pfe/cbus.h | 78 +
15 .../staging/fsl_ppfe/include/pfe/cbus/bmu.h | 55 +
16 .../fsl_ppfe/include/pfe/cbus/class_csr.h | 289 ++
17 .../fsl_ppfe/include/pfe/cbus/emac_mtip.h | 242 ++
18 .../staging/fsl_ppfe/include/pfe/cbus/gpi.h | 86 +
19 .../staging/fsl_ppfe/include/pfe/cbus/hif.h | 100 +
20 .../fsl_ppfe/include/pfe/cbus/hif_nocpy.h | 50 +
21 .../fsl_ppfe/include/pfe/cbus/tmu_csr.h | 168 ++
22 .../fsl_ppfe/include/pfe/cbus/util_csr.h | 61 +
23 drivers/staging/fsl_ppfe/include/pfe/pfe.h | 372 +++
24 drivers/staging/fsl_ppfe/pfe_ctrl.c | 238 ++
25 drivers/staging/fsl_ppfe/pfe_ctrl.h | 112 +
26 drivers/staging/fsl_ppfe/pfe_debugfs.c | 111 +
27 drivers/staging/fsl_ppfe/pfe_debugfs.h | 25 +
28 drivers/staging/fsl_ppfe/pfe_eth.c | 2491 +++++++++++++++++
29 drivers/staging/fsl_ppfe/pfe_eth.h | 184 ++
30 drivers/staging/fsl_ppfe/pfe_firmware.c | 314 +++
31 drivers/staging/fsl_ppfe/pfe_firmware.h | 32 +
32 drivers/staging/fsl_ppfe/pfe_hal.c | 1516 ++++++++++
33 drivers/staging/fsl_ppfe/pfe_hif.c | 1072 +++++++
34 drivers/staging/fsl_ppfe/pfe_hif.h | 211 ++
35 drivers/staging/fsl_ppfe/pfe_hif_lib.c | 640 +++++
36 drivers/staging/fsl_ppfe/pfe_hif_lib.h | 241 ++
37 drivers/staging/fsl_ppfe/pfe_hw.c | 176 ++
38 drivers/staging/fsl_ppfe/pfe_hw.h | 27 +
39 .../staging/fsl_ppfe/pfe_ls1012a_platform.c | 385 +++
40 drivers/staging/fsl_ppfe/pfe_mod.c | 156 ++
41 drivers/staging/fsl_ppfe/pfe_mod.h | 114 +
42 drivers/staging/fsl_ppfe/pfe_perfmon.h | 38 +
43 drivers/staging/fsl_ppfe/pfe_sysfs.c | 818 ++++++
44 drivers/staging/fsl_ppfe/pfe_sysfs.h | 29 +
45 34 files changed, 10472 insertions(+)
46 create mode 100644 drivers/staging/fsl_ppfe/Kconfig
47 create mode 100644 drivers/staging/fsl_ppfe/Makefile
48 create mode 100644 drivers/staging/fsl_ppfe/TODO
49 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus.h
50 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h
51 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/class_csr.h
52 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h
53 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h
54 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h
55 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h
56 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h
57 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/util_csr.h
58 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/pfe.h
59 create mode 100644 drivers/staging/fsl_ppfe/pfe_ctrl.c
60 create mode 100644 drivers/staging/fsl_ppfe/pfe_ctrl.h
61 create mode 100644 drivers/staging/fsl_ppfe/pfe_debugfs.c
62 create mode 100644 drivers/staging/fsl_ppfe/pfe_debugfs.h
63 create mode 100644 drivers/staging/fsl_ppfe/pfe_eth.c
64 create mode 100644 drivers/staging/fsl_ppfe/pfe_eth.h
65 create mode 100644 drivers/staging/fsl_ppfe/pfe_firmware.c
66 create mode 100644 drivers/staging/fsl_ppfe/pfe_firmware.h
67 create mode 100644 drivers/staging/fsl_ppfe/pfe_hal.c
68 create mode 100644 drivers/staging/fsl_ppfe/pfe_hif.c
69 create mode 100644 drivers/staging/fsl_ppfe/pfe_hif.h
70 create mode 100644 drivers/staging/fsl_ppfe/pfe_hif_lib.c
71 create mode 100644 drivers/staging/fsl_ppfe/pfe_hif_lib.h
72 create mode 100644 drivers/staging/fsl_ppfe/pfe_hw.c
73 create mode 100644 drivers/staging/fsl_ppfe/pfe_hw.h
74 create mode 100644 drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c
75 create mode 100644 drivers/staging/fsl_ppfe/pfe_mod.c
76 create mode 100644 drivers/staging/fsl_ppfe/pfe_mod.h
77 create mode 100644 drivers/staging/fsl_ppfe/pfe_perfmon.h
78 create mode 100644 drivers/staging/fsl_ppfe/pfe_sysfs.c
79 create mode 100644 drivers/staging/fsl_ppfe/pfe_sysfs.h
80
81 --- /dev/null
82 +++ b/drivers/staging/fsl_ppfe/Kconfig
83 @@ -0,0 +1,20 @@
84 +#
85 +# Freescale Programmable Packet Forwarding Engine driver
86 +#
87 +config FSL_PPFE
88 + bool "Freescale PPFE Driver"
89 + default n
90 + ---help---
91 + Freescale LS1012A SoC has a Programmable Packet Forwarding Engine.
92 + It provides two high performance ethernet interfaces.
93 + This driver initializes, programs and controls the PPFE.
94 + Use this driver to enable network connectivity on LS1012A platforms.
95 +
96 +if FSL_PPFE
97 +
98 +config FSL_PPFE_UTIL_DISABLED
99 + bool "Disable PPFE UTIL Processor Engine"
100 + ---help---
101 + UTIL PE has to be enabled only if required.
102 +
103 +endif # FSL_PPFE
104 --- /dev/null
105 +++ b/drivers/staging/fsl_ppfe/Makefile
106 @@ -0,0 +1,19 @@
107 +#
108 +# Makefile for Freesecale PPFE driver
109 +#
110 +
111 +ccflags-y += -I$(src)/include -I$(src)
112 +
113 +obj-m += pfe.o
114 +
115 +pfe-y += pfe_mod.o \
116 + pfe_hw.o \
117 + pfe_firmware.o \
118 + pfe_ctrl.o \
119 + pfe_hif.o \
120 + pfe_hif_lib.o\
121 + pfe_eth.o \
122 + pfe_sysfs.o \
123 + pfe_debugfs.o \
124 + pfe_ls1012a_platform.o \
125 + pfe_hal.o
126 --- /dev/null
127 +++ b/drivers/staging/fsl_ppfe/TODO
128 @@ -0,0 +1,2 @@
129 +TODO:
130 + - provide pfe pe monitoring support
131 --- /dev/null
132 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus.h
133 @@ -0,0 +1,78 @@
134 +/*
135 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
136 + * Copyright 2017 NXP
137 + *
138 + * This program is free software; you can redistribute it and/or modify
139 + * it under the terms of the GNU General Public License as published by
140 + * the Free Software Foundation; either version 2 of the License, or
141 + * (at your option) any later version.
142 + *
143 + * This program is distributed in the hope that it will be useful,
144 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
145 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
146 + * GNU General Public License for more details.
147 + *
148 + * You should have received a copy of the GNU General Public License
149 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
150 + */
151 +
152 +#ifndef _CBUS_H_
153 +#define _CBUS_H_
154 +
155 +#define EMAC1_BASE_ADDR (CBUS_BASE_ADDR + 0x200000)
156 +#define EGPI1_BASE_ADDR (CBUS_BASE_ADDR + 0x210000)
157 +#define EMAC2_BASE_ADDR (CBUS_BASE_ADDR + 0x220000)
158 +#define EGPI2_BASE_ADDR (CBUS_BASE_ADDR + 0x230000)
159 +#define BMU1_BASE_ADDR (CBUS_BASE_ADDR + 0x240000)
160 +#define BMU2_BASE_ADDR (CBUS_BASE_ADDR + 0x250000)
161 +#define ARB_BASE_ADDR (CBUS_BASE_ADDR + 0x260000)
162 +#define DDR_CONFIG_BASE_ADDR (CBUS_BASE_ADDR + 0x270000)
163 +#define HIF_BASE_ADDR (CBUS_BASE_ADDR + 0x280000)
164 +#define HGPI_BASE_ADDR (CBUS_BASE_ADDR + 0x290000)
165 +#define LMEM_BASE_ADDR (CBUS_BASE_ADDR + 0x300000)
166 +#define LMEM_SIZE 0x10000
167 +#define LMEM_END (LMEM_BASE_ADDR + LMEM_SIZE)
168 +#define TMU_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x310000)
169 +#define CLASS_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x320000)
170 +#define HIF_NOCPY_BASE_ADDR (CBUS_BASE_ADDR + 0x350000)
171 +#define UTIL_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x360000)
172 +#define CBUS_GPT_BASE_ADDR (CBUS_BASE_ADDR + 0x370000)
173 +
174 +/*
175 + * defgroup XXX_MEM_ACCESS_ADDR PE memory access through CSR
176 + * XXX_MEM_ACCESS_ADDR register bit definitions.
177 + */
178 +#define PE_MEM_ACCESS_WRITE BIT(31) /* Internal Memory Write. */
179 +#define PE_MEM_ACCESS_IMEM BIT(15)
180 +#define PE_MEM_ACCESS_DMEM BIT(16)
181 +
182 +/* Byte Enables of the Internal memory access. These are interpred in BE */
183 +#define PE_MEM_ACCESS_BYTE_ENABLE(offset, size) \
184 + ({ typeof(size) size_ = (size); \
185 + (((BIT(size_) - 1) << (4 - (offset) - (size_))) & 0xf) << 24; })
186 +
187 +#include "cbus/emac_mtip.h"
188 +#include "cbus/gpi.h"
189 +#include "cbus/bmu.h"
190 +#include "cbus/hif.h"
191 +#include "cbus/tmu_csr.h"
192 +#include "cbus/class_csr.h"
193 +#include "cbus/hif_nocpy.h"
194 +#include "cbus/util_csr.h"
195 +
196 +/* PFE cores states */
197 +#define CORE_DISABLE 0x00000000
198 +#define CORE_ENABLE 0x00000001
199 +#define CORE_SW_RESET 0x00000002
200 +
201 +/* LMEM defines */
202 +#define LMEM_HDR_SIZE 0x0010
203 +#define LMEM_BUF_SIZE_LN2 0x7
204 +#define LMEM_BUF_SIZE BIT(LMEM_BUF_SIZE_LN2)
205 +
206 +/* DDR defines */
207 +#define DDR_HDR_SIZE 0x0100
208 +#define DDR_BUF_SIZE_LN2 0xb
209 +#define DDR_BUF_SIZE BIT(DDR_BUF_SIZE_LN2)
210 +
211 +#endif /* _CBUS_H_ */
212 --- /dev/null
213 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h
214 @@ -0,0 +1,55 @@
215 +/*
216 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
217 + * Copyright 2017 NXP
218 + *
219 + * This program is free software; you can redistribute it and/or modify
220 + * it under the terms of the GNU General Public License as published by
221 + * the Free Software Foundation; either version 2 of the License, or
222 + * (at your option) any later version.
223 + *
224 + * This program is distributed in the hope that it will be useful,
225 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
226 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
227 + * GNU General Public License for more details.
228 + *
229 + * You should have received a copy of the GNU General Public License
230 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
231 + */
232 +
233 +#ifndef _BMU_H_
234 +#define _BMU_H_
235 +
236 +#define BMU_VERSION 0x000
237 +#define BMU_CTRL 0x004
238 +#define BMU_UCAST_CONFIG 0x008
239 +#define BMU_UCAST_BASE_ADDR 0x00c
240 +#define BMU_BUF_SIZE 0x010
241 +#define BMU_BUF_CNT 0x014
242 +#define BMU_THRES 0x018
243 +#define BMU_INT_SRC 0x020
244 +#define BMU_INT_ENABLE 0x024
245 +#define BMU_ALLOC_CTRL 0x030
246 +#define BMU_FREE_CTRL 0x034
247 +#define BMU_FREE_ERR_ADDR 0x038
248 +#define BMU_CURR_BUF_CNT 0x03c
249 +#define BMU_MCAST_CNT 0x040
250 +#define BMU_MCAST_ALLOC_CTRL 0x044
251 +#define BMU_REM_BUF_CNT 0x048
252 +#define BMU_LOW_WATERMARK 0x050
253 +#define BMU_HIGH_WATERMARK 0x054
254 +#define BMU_INT_MEM_ACCESS 0x100
255 +
256 +struct BMU_CFG {
257 + unsigned long baseaddr;
258 + u32 count;
259 + u32 size;
260 + u32 low_watermark;
261 + u32 high_watermark;
262 +};
263 +
264 +#define BMU1_BUF_SIZE LMEM_BUF_SIZE_LN2
265 +#define BMU2_BUF_SIZE DDR_BUF_SIZE_LN2
266 +
267 +#define BMU2_MCAST_ALLOC_CTRL (BMU2_BASE_ADDR + BMU_MCAST_ALLOC_CTRL)
268 +
269 +#endif /* _BMU_H_ */
270 --- /dev/null
271 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/class_csr.h
272 @@ -0,0 +1,289 @@
273 +/*
274 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
275 + * Copyright 2017 NXP
276 + *
277 + * This program is free software; you can redistribute it and/or modify
278 + * it under the terms of the GNU General Public License as published by
279 + * the Free Software Foundation; either version 2 of the License, or
280 + * (at your option) any later version.
281 + *
282 + * This program is distributed in the hope that it will be useful,
283 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
284 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
285 + * GNU General Public License for more details.
286 + *
287 + * You should have received a copy of the GNU General Public License
288 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
289 + */
290 +
291 +#ifndef _CLASS_CSR_H_
292 +#define _CLASS_CSR_H_
293 +
294 +/* @file class_csr.h.
295 + * class_csr - block containing all the classifier control and status register.
296 + * Mapped on CBUS and accessible from all PE's and ARM.
297 + */
298 +#define CLASS_VERSION (CLASS_CSR_BASE_ADDR + 0x000)
299 +#define CLASS_TX_CTRL (CLASS_CSR_BASE_ADDR + 0x004)
300 +#define CLASS_INQ_PKTPTR (CLASS_CSR_BASE_ADDR + 0x010)
301 +
302 +/* (ddr_hdr_size[24:16], lmem_hdr_size[5:0]) */
303 +#define CLASS_HDR_SIZE (CLASS_CSR_BASE_ADDR + 0x014)
304 +
305 +/* LMEM header size for the Classifier block.\ Data in the LMEM
306 + * is written from this offset.
307 + */
308 +#define CLASS_HDR_SIZE_LMEM(off) ((off) & 0x3f)
309 +
310 +/* DDR header size for the Classifier block.\ Data in the DDR
311 + * is written from this offset.
312 + */
313 +#define CLASS_HDR_SIZE_DDR(off) (((off) & 0x1ff) << 16)
314 +
315 +#define CLASS_PE0_QB_DM_ADDR0 (CLASS_CSR_BASE_ADDR + 0x020)
316 +
317 +/* DMEM address of first [15:0] and second [31:16] buffers on QB side. */
318 +#define CLASS_PE0_QB_DM_ADDR1 (CLASS_CSR_BASE_ADDR + 0x024)
319 +
320 +/* DMEM address of third [15:0] and fourth [31:16] buffers on QB side. */
321 +#define CLASS_PE0_RO_DM_ADDR0 (CLASS_CSR_BASE_ADDR + 0x060)
322 +
323 +/* DMEM address of first [15:0] and second [31:16] buffers on RO side. */
324 +#define CLASS_PE0_RO_DM_ADDR1 (CLASS_CSR_BASE_ADDR + 0x064)
325 +
326 +/* DMEM address of third [15:0] and fourth [31:16] buffers on RO side. */
327 +
328 +/* @name Class PE memory access. Allows external PE's and HOST to
329 + * read/write PMEM/DMEM memory ranges for each classifier PE.
330 + */
331 +/* {sr_pe_mem_cmd[31], csr_pe_mem_wren[27:24], csr_pe_mem_addr[23:0]},
332 + * See \ref XXX_MEM_ACCESS_ADDR for details.
333 + */
334 +#define CLASS_MEM_ACCESS_ADDR (CLASS_CSR_BASE_ADDR + 0x100)
335 +
336 +/* Internal Memory Access Write Data [31:0] */
337 +#define CLASS_MEM_ACCESS_WDATA (CLASS_CSR_BASE_ADDR + 0x104)
338 +
339 +/* Internal Memory Access Read Data [31:0] */
340 +#define CLASS_MEM_ACCESS_RDATA (CLASS_CSR_BASE_ADDR + 0x108)
341 +#define CLASS_TM_INQ_ADDR (CLASS_CSR_BASE_ADDR + 0x114)
342 +#define CLASS_PE_STATUS (CLASS_CSR_BASE_ADDR + 0x118)
343 +
344 +#define CLASS_PHY1_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x11c)
345 +#define CLASS_PHY1_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x120)
346 +#define CLASS_PHY1_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x124)
347 +#define CLASS_PHY1_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x128)
348 +#define CLASS_PHY1_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x12c)
349 +#define CLASS_PHY1_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x130)
350 +#define CLASS_PHY1_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x134)
351 +#define CLASS_PHY1_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x138)
352 +#define CLASS_PHY1_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x13c)
353 +#define CLASS_PHY1_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x140)
354 +#define CLASS_PHY2_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x144)
355 +#define CLASS_PHY2_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x148)
356 +#define CLASS_PHY2_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x14c)
357 +#define CLASS_PHY2_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x150)
358 +#define CLASS_PHY2_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x154)
359 +#define CLASS_PHY2_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x158)
360 +#define CLASS_PHY2_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x15c)
361 +#define CLASS_PHY2_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x160)
362 +#define CLASS_PHY2_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x164)
363 +#define CLASS_PHY2_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x168)
364 +#define CLASS_PHY3_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x16c)
365 +#define CLASS_PHY3_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x170)
366 +#define CLASS_PHY3_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x174)
367 +#define CLASS_PHY3_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x178)
368 +#define CLASS_PHY3_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x17c)
369 +#define CLASS_PHY3_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x180)
370 +#define CLASS_PHY3_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x184)
371 +#define CLASS_PHY3_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x188)
372 +#define CLASS_PHY3_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x18c)
373 +#define CLASS_PHY3_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x190)
374 +#define CLASS_PHY1_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x194)
375 +#define CLASS_PHY1_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x198)
376 +#define CLASS_PHY1_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x19c)
377 +#define CLASS_PHY1_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a0)
378 +#define CLASS_PHY2_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a4)
379 +#define CLASS_PHY2_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a8)
380 +#define CLASS_PHY2_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1ac)
381 +#define CLASS_PHY2_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b0)
382 +#define CLASS_PHY3_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b4)
383 +#define CLASS_PHY3_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b8)
384 +#define CLASS_PHY3_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1bc)
385 +#define CLASS_PHY3_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c0)
386 +#define CLASS_PHY4_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c4)
387 +#define CLASS_PHY4_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c8)
388 +#define CLASS_PHY4_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1cc)
389 +#define CLASS_PHY4_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1d0)
390 +#define CLASS_PHY4_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x1d4)
391 +#define CLASS_PHY4_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x1d8)
392 +#define CLASS_PHY4_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1dc)
393 +#define CLASS_PHY4_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1e0)
394 +#define CLASS_PHY4_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x1e4)
395 +#define CLASS_PHY4_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1e8)
396 +#define CLASS_PHY4_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x1ec)
397 +#define CLASS_PHY4_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x1f0)
398 +#define CLASS_PHY4_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x1f4)
399 +#define CLASS_PHY4_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x1f8)
400 +
401 +#define CLASS_PE_SYS_CLK_RATIO (CLASS_CSR_BASE_ADDR + 0x200)
402 +#define CLASS_AFULL_THRES (CLASS_CSR_BASE_ADDR + 0x204)
403 +#define CLASS_GAP_BETWEEN_READS (CLASS_CSR_BASE_ADDR + 0x208)
404 +#define CLASS_MAX_BUF_CNT (CLASS_CSR_BASE_ADDR + 0x20c)
405 +#define CLASS_TSQ_FIFO_THRES (CLASS_CSR_BASE_ADDR + 0x210)
406 +#define CLASS_TSQ_MAX_CNT (CLASS_CSR_BASE_ADDR + 0x214)
407 +#define CLASS_IRAM_DATA_0 (CLASS_CSR_BASE_ADDR + 0x218)
408 +#define CLASS_IRAM_DATA_1 (CLASS_CSR_BASE_ADDR + 0x21c)
409 +#define CLASS_IRAM_DATA_2 (CLASS_CSR_BASE_ADDR + 0x220)
410 +#define CLASS_IRAM_DATA_3 (CLASS_CSR_BASE_ADDR + 0x224)
411 +
412 +#define CLASS_BUS_ACCESS_ADDR (CLASS_CSR_BASE_ADDR + 0x228)
413 +
414 +#define CLASS_BUS_ACCESS_WDATA (CLASS_CSR_BASE_ADDR + 0x22c)
415 +#define CLASS_BUS_ACCESS_RDATA (CLASS_CSR_BASE_ADDR + 0x230)
416 +
417 +/* (route_entry_size[9:0], route_hash_size[23:16]
418 + * (this is actually ln2(size)))
419 + */
420 +#define CLASS_ROUTE_HASH_ENTRY_SIZE (CLASS_CSR_BASE_ADDR + 0x234)
421 +
422 +#define CLASS_ROUTE_ENTRY_SIZE(size) ((size) & 0x1ff)
423 +#define CLASS_ROUTE_HASH_SIZE(hash_bits) (((hash_bits) & 0xff) << 16)
424 +
425 +#define CLASS_ROUTE_TABLE_BASE (CLASS_CSR_BASE_ADDR + 0x238)
426 +
427 +#define CLASS_ROUTE_MULTI (CLASS_CSR_BASE_ADDR + 0x23c)
428 +#define CLASS_SMEM_OFFSET (CLASS_CSR_BASE_ADDR + 0x240)
429 +#define CLASS_LMEM_BUF_SIZE (CLASS_CSR_BASE_ADDR + 0x244)
430 +#define CLASS_VLAN_ID (CLASS_CSR_BASE_ADDR + 0x248)
431 +#define CLASS_BMU1_BUF_FREE (CLASS_CSR_BASE_ADDR + 0x24c)
432 +#define CLASS_USE_TMU_INQ (CLASS_CSR_BASE_ADDR + 0x250)
433 +#define CLASS_VLAN_ID1 (CLASS_CSR_BASE_ADDR + 0x254)
434 +
435 +#define CLASS_BUS_ACCESS_BASE (CLASS_CSR_BASE_ADDR + 0x258)
436 +#define CLASS_BUS_ACCESS_BASE_MASK (0xFF000000)
437 +/* bit 31:24 of PE peripheral address are stored in CLASS_BUS_ACCESS_BASE */
438 +
439 +#define CLASS_HIF_PARSE (CLASS_CSR_BASE_ADDR + 0x25c)
440 +
441 +#define CLASS_HOST_PE0_GP (CLASS_CSR_BASE_ADDR + 0x260)
442 +#define CLASS_PE0_GP (CLASS_CSR_BASE_ADDR + 0x264)
443 +#define CLASS_HOST_PE1_GP (CLASS_CSR_BASE_ADDR + 0x268)
444 +#define CLASS_PE1_GP (CLASS_CSR_BASE_ADDR + 0x26c)
445 +#define CLASS_HOST_PE2_GP (CLASS_CSR_BASE_ADDR + 0x270)
446 +#define CLASS_PE2_GP (CLASS_CSR_BASE_ADDR + 0x274)
447 +#define CLASS_HOST_PE3_GP (CLASS_CSR_BASE_ADDR + 0x278)
448 +#define CLASS_PE3_GP (CLASS_CSR_BASE_ADDR + 0x27c)
449 +#define CLASS_HOST_PE4_GP (CLASS_CSR_BASE_ADDR + 0x280)
450 +#define CLASS_PE4_GP (CLASS_CSR_BASE_ADDR + 0x284)
451 +#define CLASS_HOST_PE5_GP (CLASS_CSR_BASE_ADDR + 0x288)
452 +#define CLASS_PE5_GP (CLASS_CSR_BASE_ADDR + 0x28c)
453 +
454 +#define CLASS_PE_INT_SRC (CLASS_CSR_BASE_ADDR + 0x290)
455 +#define CLASS_PE_INT_ENABLE (CLASS_CSR_BASE_ADDR + 0x294)
456 +
457 +#define CLASS_TPID0_TPID1 (CLASS_CSR_BASE_ADDR + 0x298)
458 +#define CLASS_TPID2 (CLASS_CSR_BASE_ADDR + 0x29c)
459 +
460 +#define CLASS_L4_CHKSUM_ADDR (CLASS_CSR_BASE_ADDR + 0x2a0)
461 +
462 +#define CLASS_PE0_DEBUG (CLASS_CSR_BASE_ADDR + 0x2a4)
463 +#define CLASS_PE1_DEBUG (CLASS_CSR_BASE_ADDR + 0x2a8)
464 +#define CLASS_PE2_DEBUG (CLASS_CSR_BASE_ADDR + 0x2ac)
465 +#define CLASS_PE3_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b0)
466 +#define CLASS_PE4_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b4)
467 +#define CLASS_PE5_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b8)
468 +
469 +#define CLASS_STATE (CLASS_CSR_BASE_ADDR + 0x2bc)
470 +
471 +/* CLASS defines */
472 +#define CLASS_PBUF_SIZE 0x100 /* Fixed by hardware */
473 +#define CLASS_PBUF_HEADER_OFFSET 0x80 /* Can be configured */
474 +
475 +/* Can be configured */
476 +#define CLASS_PBUF0_BASE_ADDR 0x000
477 +/* Can be configured */
478 +#define CLASS_PBUF1_BASE_ADDR (CLASS_PBUF0_BASE_ADDR + CLASS_PBUF_SIZE)
479 +/* Can be configured */
480 +#define CLASS_PBUF2_BASE_ADDR (CLASS_PBUF1_BASE_ADDR + CLASS_PBUF_SIZE)
481 +/* Can be configured */
482 +#define CLASS_PBUF3_BASE_ADDR (CLASS_PBUF2_BASE_ADDR + CLASS_PBUF_SIZE)
483 +
484 +#define CLASS_PBUF0_HEADER_BASE_ADDR (CLASS_PBUF0_BASE_ADDR + \
485 + CLASS_PBUF_HEADER_OFFSET)
486 +#define CLASS_PBUF1_HEADER_BASE_ADDR (CLASS_PBUF1_BASE_ADDR + \
487 + CLASS_PBUF_HEADER_OFFSET)
488 +#define CLASS_PBUF2_HEADER_BASE_ADDR (CLASS_PBUF2_BASE_ADDR + \
489 + CLASS_PBUF_HEADER_OFFSET)
490 +#define CLASS_PBUF3_HEADER_BASE_ADDR (CLASS_PBUF3_BASE_ADDR + \
491 + CLASS_PBUF_HEADER_OFFSET)
492 +
493 +#define CLASS_PE0_RO_DM_ADDR0_VAL ((CLASS_PBUF1_BASE_ADDR << 16) | \
494 + CLASS_PBUF0_BASE_ADDR)
495 +#define CLASS_PE0_RO_DM_ADDR1_VAL ((CLASS_PBUF3_BASE_ADDR << 16) | \
496 + CLASS_PBUF2_BASE_ADDR)
497 +
498 +#define CLASS_PE0_QB_DM_ADDR0_VAL ((CLASS_PBUF1_HEADER_BASE_ADDR << 16) |\
499 + CLASS_PBUF0_HEADER_BASE_ADDR)
500 +#define CLASS_PE0_QB_DM_ADDR1_VAL ((CLASS_PBUF3_HEADER_BASE_ADDR << 16) |\
501 + CLASS_PBUF2_HEADER_BASE_ADDR)
502 +
503 +#define CLASS_ROUTE_SIZE 128
504 +#define CLASS_MAX_ROUTE_SIZE 256
505 +#define CLASS_ROUTE_HASH_BITS 20
506 +#define CLASS_ROUTE_HASH_MASK (BIT(CLASS_ROUTE_HASH_BITS) - 1)
507 +
508 +/* Can be configured */
509 +#define CLASS_ROUTE0_BASE_ADDR 0x400
510 +/* Can be configured */
511 +#define CLASS_ROUTE1_BASE_ADDR (CLASS_ROUTE0_BASE_ADDR + CLASS_ROUTE_SIZE)
512 +/* Can be configured */
513 +#define CLASS_ROUTE2_BASE_ADDR (CLASS_ROUTE1_BASE_ADDR + CLASS_ROUTE_SIZE)
514 +/* Can be configured */
515 +#define CLASS_ROUTE3_BASE_ADDR (CLASS_ROUTE2_BASE_ADDR + CLASS_ROUTE_SIZE)
516 +
517 +#define CLASS_SA_SIZE 128
518 +#define CLASS_IPSEC_SA0_BASE_ADDR 0x600
519 +/* not used */
520 +#define CLASS_IPSEC_SA1_BASE_ADDR (CLASS_IPSEC_SA0_BASE_ADDR + CLASS_SA_SIZE)
521 +/* not used */
522 +#define CLASS_IPSEC_SA2_BASE_ADDR (CLASS_IPSEC_SA1_BASE_ADDR + CLASS_SA_SIZE)
523 +/* not used */
524 +#define CLASS_IPSEC_SA3_BASE_ADDR (CLASS_IPSEC_SA2_BASE_ADDR + CLASS_SA_SIZE)
525 +
526 +/* generic purpose free dmem buffer, last portion of 2K dmem pbuf */
527 +#define CLASS_GP_DMEM_BUF_SIZE (2048 - (CLASS_PBUF_SIZE * 4) - \
528 + (CLASS_ROUTE_SIZE * 4) - (CLASS_SA_SIZE))
529 +#define CLASS_GP_DMEM_BUF ((void *)(CLASS_IPSEC_SA0_BASE_ADDR + \
530 + CLASS_SA_SIZE))
531 +
532 +#define TWO_LEVEL_ROUTE BIT(0)
533 +#define PHYNO_IN_HASH BIT(1)
534 +#define HW_ROUTE_FETCH BIT(3)
535 +#define HW_BRIDGE_FETCH BIT(5)
536 +#define IP_ALIGNED BIT(6)
537 +#define ARC_HIT_CHECK_EN BIT(7)
538 +#define CLASS_TOE BIT(11)
539 +#define HASH_NORMAL (0 << 12)
540 +#define HASH_CRC_PORT BIT(12)
541 +#define HASH_CRC_IP (2 << 12)
542 +#define HASH_CRC_PORT_IP (3 << 12)
543 +#define QB2BUS_LE BIT(15)
544 +
545 +#define TCP_CHKSUM_DROP BIT(0)
546 +#define UDP_CHKSUM_DROP BIT(1)
547 +#define IPV4_CHKSUM_DROP BIT(9)
548 +
549 +/*CLASS_HIF_PARSE bits*/
550 +#define HIF_PKT_CLASS_EN BIT(0)
551 +#define HIF_PKT_OFFSET(ofst) (((ofst) & 0xF) << 1)
552 +
553 +struct class_cfg {
554 + u32 toe_mode;
555 + unsigned long route_table_baseaddr;
556 + u32 route_table_hash_bits;
557 + u32 pe_sys_clk_ratio;
558 + u32 resume;
559 +};
560 +
561 +#endif /* _CLASS_CSR_H_ */
562 --- /dev/null
563 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h
564 @@ -0,0 +1,242 @@
565 +/*
566 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
567 + * Copyright 2017 NXP
568 + *
569 + * This program is free software; you can redistribute it and/or modify
570 + * it under the terms of the GNU General Public License as published by
571 + * the Free Software Foundation; either version 2 of the License, or
572 + * (at your option) any later version.
573 + *
574 + * This program is distributed in the hope that it will be useful,
575 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
576 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
577 + * GNU General Public License for more details.
578 + *
579 + * You should have received a copy of the GNU General Public License
580 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
581 + */
582 +
583 +#ifndef _EMAC_H_
584 +#define _EMAC_H_
585 +
586 +#include <linux/ethtool.h>
587 +
588 +#define EMAC_IEVENT_REG 0x004
589 +#define EMAC_IMASK_REG 0x008
590 +#define EMAC_R_DES_ACTIVE_REG 0x010
591 +#define EMAC_X_DES_ACTIVE_REG 0x014
592 +#define EMAC_ECNTRL_REG 0x024
593 +#define EMAC_MII_DATA_REG 0x040
594 +#define EMAC_MII_CTRL_REG 0x044
595 +#define EMAC_MIB_CTRL_STS_REG 0x064
596 +#define EMAC_RCNTRL_REG 0x084
597 +#define EMAC_TCNTRL_REG 0x0C4
598 +#define EMAC_PHY_ADDR_LOW 0x0E4
599 +#define EMAC_PHY_ADDR_HIGH 0x0E8
600 +#define EMAC_GAUR 0x120
601 +#define EMAC_GALR 0x124
602 +#define EMAC_TFWR_STR_FWD 0x144
603 +#define EMAC_RX_SECTION_FULL 0x190
604 +#define EMAC_RX_SECTION_EMPTY 0x194
605 +#define EMAC_TX_SECTION_EMPTY 0x1A0
606 +#define EMAC_TRUNC_FL 0x1B0
607 +
608 +#define RMON_T_DROP 0x200 /* Count of frames not cntd correctly */
609 +#define RMON_T_PACKETS 0x204 /* RMON TX packet count */
610 +#define RMON_T_BC_PKT 0x208 /* RMON TX broadcast pkts */
611 +#define RMON_T_MC_PKT 0x20c /* RMON TX multicast pkts */
612 +#define RMON_T_CRC_ALIGN 0x210 /* RMON TX pkts with CRC align err */
613 +#define RMON_T_UNDERSIZE 0x214 /* RMON TX pkts < 64 bytes, good CRC */
614 +#define RMON_T_OVERSIZE 0x218 /* RMON TX pkts > MAX_FL bytes good CRC */
615 +#define RMON_T_FRAG 0x21c /* RMON TX pkts < 64 bytes, bad CRC */
616 +#define RMON_T_JAB 0x220 /* RMON TX pkts > MAX_FL bytes, bad CRC */
617 +#define RMON_T_COL 0x224 /* RMON TX collision count */
618 +#define RMON_T_P64 0x228 /* RMON TX 64 byte pkts */
619 +#define RMON_T_P65TO127 0x22c /* RMON TX 65 to 127 byte pkts */
620 +#define RMON_T_P128TO255 0x230 /* RMON TX 128 to 255 byte pkts */
621 +#define RMON_T_P256TO511 0x234 /* RMON TX 256 to 511 byte pkts */
622 +#define RMON_T_P512TO1023 0x238 /* RMON TX 512 to 1023 byte pkts */
623 +#define RMON_T_P1024TO2047 0x23c /* RMON TX 1024 to 2047 byte pkts */
624 +#define RMON_T_P_GTE2048 0x240 /* RMON TX pkts > 2048 bytes */
625 +#define RMON_T_OCTETS 0x244 /* RMON TX octets */
626 +#define IEEE_T_DROP 0x248 /* Count of frames not counted crtly */
627 +#define IEEE_T_FRAME_OK 0x24c /* Frames tx'd OK */
628 +#define IEEE_T_1COL 0x250 /* Frames tx'd with single collision */
629 +#define IEEE_T_MCOL 0x254 /* Frames tx'd with multiple collision */
630 +#define IEEE_T_DEF 0x258 /* Frames tx'd after deferral delay */
631 +#define IEEE_T_LCOL 0x25c /* Frames tx'd with late collision */
632 +#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excesv collisions */
633 +#define IEEE_T_MACERR 0x264 /* Frames tx'd with TX FIFO underrun */
634 +#define IEEE_T_CSERR 0x268 /* Frames tx'd with carrier sense err */
635 +#define IEEE_T_SQE 0x26c /* Frames tx'd with SQE err */
636 +#define IEEE_T_FDXFC 0x270 /* Flow control pause frames tx'd */
637 +#define IEEE_T_OCTETS_OK 0x274 /* Octet count for frames tx'd w/o err */
638 +#define RMON_R_PACKETS 0x284 /* RMON RX packet count */
639 +#define RMON_R_BC_PKT 0x288 /* RMON RX broadcast pkts */
640 +#define RMON_R_MC_PKT 0x28c /* RMON RX multicast pkts */
641 +#define RMON_R_CRC_ALIGN 0x290 /* RMON RX pkts with CRC alignment err */
642 +#define RMON_R_UNDERSIZE 0x294 /* RMON RX pkts < 64 bytes, good CRC */
643 +#define RMON_R_OVERSIZE 0x298 /* RMON RX pkts > MAX_FL bytes good CRC */
644 +#define RMON_R_FRAG 0x29c /* RMON RX pkts < 64 bytes, bad CRC */
645 +#define RMON_R_JAB 0x2a0 /* RMON RX pkts > MAX_FL bytes, bad CRC */
646 +#define RMON_R_RESVD_O 0x2a4 /* Reserved */
647 +#define RMON_R_P64 0x2a8 /* RMON RX 64 byte pkts */
648 +#define RMON_R_P65TO127 0x2ac /* RMON RX 65 to 127 byte pkts */
649 +#define RMON_R_P128TO255 0x2b0 /* RMON RX 128 to 255 byte pkts */
650 +#define RMON_R_P256TO511 0x2b4 /* RMON RX 256 to 511 byte pkts */
651 +#define RMON_R_P512TO1023 0x2b8 /* RMON RX 512 to 1023 byte pkts */
652 +#define RMON_R_P1024TO2047 0x2bc /* RMON RX 1024 to 2047 byte pkts */
653 +#define RMON_R_P_GTE2048 0x2c0 /* RMON RX pkts > 2048 bytes */
654 +#define RMON_R_OCTETS 0x2c4 /* RMON RX octets */
655 +#define IEEE_R_DROP 0x2c8 /* Count frames not counted correctly */
656 +#define IEEE_R_FRAME_OK 0x2cc /* Frames rx'd OK */
657 +#define IEEE_R_CRC 0x2d0 /* Frames rx'd with CRC err */
658 +#define IEEE_R_ALIGN 0x2d4 /* Frames rx'd with alignment err */
659 +#define IEEE_R_MACERR 0x2d8 /* Receive FIFO overflow count */
660 +#define IEEE_R_FDXFC 0x2dc /* Flow control pause frames rx'd */
661 +#define IEEE_R_OCTETS_OK 0x2e0 /* Octet cnt for frames rx'd w/o err */
662 +
663 +#define EMAC_SMAC_0_0 0x500 /*Supplemental MAC Address 0 (RW).*/
664 +#define EMAC_SMAC_0_1 0x504 /*Supplemental MAC Address 0 (RW).*/
665 +
666 +/* GEMAC definitions and settings */
667 +
668 +#define EMAC_PORT_0 0
669 +#define EMAC_PORT_1 1
670 +
671 +/* GEMAC Bit definitions */
672 +#define EMAC_IEVENT_HBERR 0x80000000
673 +#define EMAC_IEVENT_BABR 0x40000000
674 +#define EMAC_IEVENT_BABT 0x20000000
675 +#define EMAC_IEVENT_GRA 0x10000000
676 +#define EMAC_IEVENT_TXF 0x08000000
677 +#define EMAC_IEVENT_TXB 0x04000000
678 +#define EMAC_IEVENT_RXF 0x02000000
679 +#define EMAC_IEVENT_RXB 0x01000000
680 +#define EMAC_IEVENT_MII 0x00800000
681 +#define EMAC_IEVENT_EBERR 0x00400000
682 +#define EMAC_IEVENT_LC 0x00200000
683 +#define EMAC_IEVENT_RL 0x00100000
684 +#define EMAC_IEVENT_UN 0x00080000
685 +
686 +#define EMAC_IMASK_HBERR 0x80000000
687 +#define EMAC_IMASK_BABR 0x40000000
688 +#define EMAC_IMASKT_BABT 0x20000000
689 +#define EMAC_IMASK_GRA 0x10000000
690 +#define EMAC_IMASKT_TXF 0x08000000
691 +#define EMAC_IMASK_TXB 0x04000000
692 +#define EMAC_IMASKT_RXF 0x02000000
693 +#define EMAC_IMASK_RXB 0x01000000
694 +#define EMAC_IMASK_MII 0x00800000
695 +#define EMAC_IMASK_EBERR 0x00400000
696 +#define EMAC_IMASK_LC 0x00200000
697 +#define EMAC_IMASKT_RL 0x00100000
698 +#define EMAC_IMASK_UN 0x00080000
699 +
700 +#define EMAC_RCNTRL_MAX_FL_SHIFT 16
701 +#define EMAC_RCNTRL_LOOP 0x00000001
702 +#define EMAC_RCNTRL_DRT 0x00000002
703 +#define EMAC_RCNTRL_MII_MODE 0x00000004
704 +#define EMAC_RCNTRL_PROM 0x00000008
705 +#define EMAC_RCNTRL_BC_REJ 0x00000010
706 +#define EMAC_RCNTRL_FCE 0x00000020
707 +#define EMAC_RCNTRL_RGMII 0x00000040
708 +#define EMAC_RCNTRL_SGMII 0x00000080
709 +#define EMAC_RCNTRL_RMII 0x00000100
710 +#define EMAC_RCNTRL_RMII_10T 0x00000200
711 +#define EMAC_RCNTRL_CRC_FWD 0x00004000
712 +
713 +#define EMAC_TCNTRL_GTS 0x00000001
714 +#define EMAC_TCNTRL_HBC 0x00000002
715 +#define EMAC_TCNTRL_FDEN 0x00000004
716 +#define EMAC_TCNTRL_TFC_PAUSE 0x00000008
717 +#define EMAC_TCNTRL_RFC_PAUSE 0x00000010
718 +
719 +#define EMAC_ECNTRL_RESET 0x00000001 /* reset the EMAC */
720 +#define EMAC_ECNTRL_ETHER_EN 0x00000002 /* enable the EMAC */
721 +#define EMAC_ECNTRL_MAGIC_ENA 0x00000004
722 +#define EMAC_ECNTRL_SLEEP 0x00000008
723 +#define EMAC_ECNTRL_SPEED 0x00000020
724 +#define EMAC_ECNTRL_DBSWAP 0x00000100
725 +
726 +#define EMAC_X_WMRK_STRFWD 0x00000100
727 +
728 +#define EMAC_X_DES_ACTIVE_TDAR 0x01000000
729 +#define EMAC_R_DES_ACTIVE_RDAR 0x01000000
730 +
731 +#define EMAC_RX_SECTION_EMPTY_V 0x00010006
732 +/*
733 + * The possible operating speeds of the MAC, currently supporting 10, 100 and
734 + * 1000Mb modes.
735 + */
736 +enum mac_speed {SPEED_10M, SPEED_100M, SPEED_1000M, SPEED_1000M_PCS};
737 +
738 +/* MII-related definitios */
739 +#define EMAC_MII_DATA_ST 0x40000000 /* Start of frame delimiter */
740 +#define EMAC_MII_DATA_OP_RD 0x20000000 /* Perform a read operation */
741 +#define EMAC_MII_DATA_OP_CL45_RD 0x30000000 /* Perform a read operation */
742 +#define EMAC_MII_DATA_OP_WR 0x10000000 /* Perform a write operation */
743 +#define EMAC_MII_DATA_OP_CL45_WR 0x10000000 /* Perform a write operation */
744 +#define EMAC_MII_DATA_PA_MSK 0x0f800000 /* PHY Address field mask */
745 +#define EMAC_MII_DATA_RA_MSK 0x007c0000 /* PHY Register field mask */
746 +#define EMAC_MII_DATA_TA 0x00020000 /* Turnaround */
747 +#define EMAC_MII_DATA_DATAMSK 0x0000ffff /* PHY data field */
748 +
749 +#define EMAC_MII_DATA_RA_SHIFT 18 /* MII Register address bits */
750 +#define EMAC_MII_DATA_RA_MASK 0x1F /* MII Register address mask */
751 +#define EMAC_MII_DATA_PA_SHIFT 23 /* MII PHY address bits */
752 +#define EMAC_MII_DATA_PA_MASK 0x1F /* MII PHY address mask */
753 +
754 +#define EMAC_MII_DATA_RA(v) (((v) & EMAC_MII_DATA_RA_MASK) << \
755 + EMAC_MII_DATA_RA_SHIFT)
756 +#define EMAC_MII_DATA_PA(v) (((v) & EMAC_MII_DATA_RA_MASK) << \
757 + EMAC_MII_DATA_PA_SHIFT)
758 +#define EMAC_MII_DATA(v) ((v) & 0xffff)
759 +
760 +#define EMAC_MII_SPEED_SHIFT 1
761 +#define EMAC_HOLDTIME_SHIFT 8
762 +#define EMAC_HOLDTIME_MASK 0x7
763 +#define EMAC_HOLDTIME(v) (((v) & EMAC_HOLDTIME_MASK) << \
764 + EMAC_HOLDTIME_SHIFT)
765 +
766 +/*
767 + * The Address organisation for the MAC device. All addresses are split into
768 + * two 32-bit register fields. The first one (bottom) is the lower 32-bits of
769 + * the address and the other field are the high order bits - this may be 16-bits
770 + * in the case of MAC addresses, or 32-bits for the hash address.
771 + * In terms of memory storage, the first item (bottom) is assumed to be at a
772 + * lower address location than 'top'. i.e. top should be at address location of
773 + * 'bottom' + 4 bytes.
774 + */
775 +struct pfe_mac_addr {
776 + u32 bottom; /* Lower 32-bits of address. */
777 + u32 top; /* Upper 32-bits of address. */
778 +};
779 +
780 +/*
781 + * The following is the organisation of the address filters section of the MAC
782 + * registers. The Cadence MAC contains four possible specific address match
783 + * addresses, if an incoming frame corresponds to any one of these four
784 + * addresses then the frame will be copied to memory.
785 + * It is not necessary for all four of the address match registers to be
786 + * programmed, this is application dependent.
787 + */
788 +struct spec_addr {
789 + struct pfe_mac_addr one; /* Specific address register 1. */
790 + struct pfe_mac_addr two; /* Specific address register 2. */
791 + struct pfe_mac_addr three; /* Specific address register 3. */
792 + struct pfe_mac_addr four; /* Specific address register 4. */
793 +};
794 +
795 +struct gemac_cfg {
796 + u32 mode;
797 + u32 speed;
798 + u32 duplex;
799 +};
800 +
801 +/* EMAC Hash size */
802 +#define EMAC_HASH_REG_BITS 64
803 +
804 +#define EMAC_SPEC_ADDR_MAX 4
805 +
806 +#endif /* _EMAC_H_ */
807 --- /dev/null
808 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h
809 @@ -0,0 +1,86 @@
810 +/*
811 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
812 + * Copyright 2017 NXP
813 + *
814 + * This program is free software; you can redistribute it and/or modify
815 + * it under the terms of the GNU General Public License as published by
816 + * the Free Software Foundation; either version 2 of the License, or
817 + * (at your option) any later version.
818 + *
819 + * This program is distributed in the hope that it will be useful,
820 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
821 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
822 + * GNU General Public License for more details.
823 + *
824 + * You should have received a copy of the GNU General Public License
825 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
826 + */
827 +
828 +#ifndef _GPI_H_
829 +#define _GPI_H_
830 +
831 +#define GPI_VERSION 0x00
832 +#define GPI_CTRL 0x04
833 +#define GPI_RX_CONFIG 0x08
834 +#define GPI_HDR_SIZE 0x0c
835 +#define GPI_BUF_SIZE 0x10
836 +#define GPI_LMEM_ALLOC_ADDR 0x14
837 +#define GPI_LMEM_FREE_ADDR 0x18
838 +#define GPI_DDR_ALLOC_ADDR 0x1c
839 +#define GPI_DDR_FREE_ADDR 0x20
840 +#define GPI_CLASS_ADDR 0x24
841 +#define GPI_DRX_FIFO 0x28
842 +#define GPI_TRX_FIFO 0x2c
843 +#define GPI_INQ_PKTPTR 0x30
844 +#define GPI_DDR_DATA_OFFSET 0x34
845 +#define GPI_LMEM_DATA_OFFSET 0x38
846 +#define GPI_TMLF_TX 0x4c
847 +#define GPI_DTX_ASEQ 0x50
848 +#define GPI_FIFO_STATUS 0x54
849 +#define GPI_FIFO_DEBUG 0x58
850 +#define GPI_TX_PAUSE_TIME 0x5c
851 +#define GPI_LMEM_SEC_BUF_DATA_OFFSET 0x60
852 +#define GPI_DDR_SEC_BUF_DATA_OFFSET 0x64
853 +#define GPI_TOE_CHKSUM_EN 0x68
854 +#define GPI_OVERRUN_DROPCNT 0x6c
855 +#define GPI_CSR_MTIP_PAUSE_REG 0x74
856 +#define GPI_CSR_MTIP_PAUSE_QUANTUM 0x78
857 +#define GPI_CSR_RX_CNT 0x7c
858 +#define GPI_CSR_TX_CNT 0x80
859 +#define GPI_CSR_DEBUG1 0x84
860 +#define GPI_CSR_DEBUG2 0x88
861 +
862 +struct gpi_cfg {
863 + u32 lmem_rtry_cnt;
864 + u32 tmlf_txthres;
865 + u32 aseq_len;
866 + u32 mtip_pause_reg;
867 +};
868 +
869 +/* GPI commons defines */
870 +#define GPI_LMEM_BUF_EN 0x1
871 +#define GPI_DDR_BUF_EN 0x1
872 +
873 +/* EGPI 1 defines */
874 +#define EGPI1_LMEM_RTRY_CNT 0x40
875 +#define EGPI1_TMLF_TXTHRES 0xBC
876 +#define EGPI1_ASEQ_LEN 0x50
877 +
878 +/* EGPI 2 defines */
879 +#define EGPI2_LMEM_RTRY_CNT 0x40
880 +#define EGPI2_TMLF_TXTHRES 0xBC
881 +#define EGPI2_ASEQ_LEN 0x40
882 +
883 +/* EGPI 3 defines */
884 +#define EGPI3_LMEM_RTRY_CNT 0x40
885 +#define EGPI3_TMLF_TXTHRES 0xBC
886 +#define EGPI3_ASEQ_LEN 0x40
887 +
888 +/* HGPI defines */
889 +#define HGPI_LMEM_RTRY_CNT 0x40
890 +#define HGPI_TMLF_TXTHRES 0xBC
891 +#define HGPI_ASEQ_LEN 0x40
892 +
893 +#define EGPI_PAUSE_TIME 0x000007D0
894 +#define EGPI_PAUSE_ENABLE 0x40000000
895 +#endif /* _GPI_H_ */
896 --- /dev/null
897 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h
898 @@ -0,0 +1,100 @@
899 +/*
900 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
901 + * Copyright 2017 NXP
902 + *
903 + * This program is free software; you can redistribute it and/or modify
904 + * it under the terms of the GNU General Public License as published by
905 + * the Free Software Foundation; either version 2 of the License, or
906 + * (at your option) any later version.
907 + *
908 + * This program is distributed in the hope that it will be useful,
909 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
910 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
911 + * GNU General Public License for more details.
912 + *
913 + * You should have received a copy of the GNU General Public License
914 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
915 + */
916 +
917 +#ifndef _HIF_H_
918 +#define _HIF_H_
919 +
920 +/* @file hif.h.
921 + * hif - PFE hif block control and status register.
922 + * Mapped on CBUS and accessible from all PE's and ARM.
923 + */
924 +#define HIF_VERSION (HIF_BASE_ADDR + 0x00)
925 +#define HIF_TX_CTRL (HIF_BASE_ADDR + 0x04)
926 +#define HIF_TX_CURR_BD_ADDR (HIF_BASE_ADDR + 0x08)
927 +#define HIF_TX_ALLOC (HIF_BASE_ADDR + 0x0c)
928 +#define HIF_TX_BDP_ADDR (HIF_BASE_ADDR + 0x10)
929 +#define HIF_TX_STATUS (HIF_BASE_ADDR + 0x14)
930 +#define HIF_RX_CTRL (HIF_BASE_ADDR + 0x20)
931 +#define HIF_RX_BDP_ADDR (HIF_BASE_ADDR + 0x24)
932 +#define HIF_RX_STATUS (HIF_BASE_ADDR + 0x30)
933 +#define HIF_INT_SRC (HIF_BASE_ADDR + 0x34)
934 +#define HIF_INT_ENABLE (HIF_BASE_ADDR + 0x38)
935 +#define HIF_POLL_CTRL (HIF_BASE_ADDR + 0x3c)
936 +#define HIF_RX_CURR_BD_ADDR (HIF_BASE_ADDR + 0x40)
937 +#define HIF_RX_ALLOC (HIF_BASE_ADDR + 0x44)
938 +#define HIF_TX_DMA_STATUS (HIF_BASE_ADDR + 0x48)
939 +#define HIF_RX_DMA_STATUS (HIF_BASE_ADDR + 0x4c)
940 +#define HIF_INT_COAL (HIF_BASE_ADDR + 0x50)
941 +
942 +/* HIF_INT_SRC/ HIF_INT_ENABLE control bits */
943 +#define HIF_INT BIT(0)
944 +#define HIF_RXBD_INT BIT(1)
945 +#define HIF_RXPKT_INT BIT(2)
946 +#define HIF_TXBD_INT BIT(3)
947 +#define HIF_TXPKT_INT BIT(4)
948 +
949 +/* HIF_TX_CTRL bits */
950 +#define HIF_CTRL_DMA_EN BIT(0)
951 +#define HIF_CTRL_BDP_POLL_CTRL_EN BIT(1)
952 +#define HIF_CTRL_BDP_CH_START_WSTB BIT(2)
953 +
954 +/* HIF_RX_STATUS bits */
955 +#define BDP_CSR_RX_DMA_ACTV BIT(16)
956 +
957 +/* HIF_INT_ENABLE bits */
958 +#define HIF_INT_EN BIT(0)
959 +#define HIF_RXBD_INT_EN BIT(1)
960 +#define HIF_RXPKT_INT_EN BIT(2)
961 +#define HIF_TXBD_INT_EN BIT(3)
962 +#define HIF_TXPKT_INT_EN BIT(4)
963 +
964 +/* HIF_POLL_CTRL bits*/
965 +#define HIF_RX_POLL_CTRL_CYCLE 0x0400
966 +#define HIF_TX_POLL_CTRL_CYCLE 0x0400
967 +
968 +/* HIF_INT_COAL bits*/
969 +#define HIF_INT_COAL_ENABLE BIT(31)
970 +
971 +/* Buffer descriptor control bits */
972 +#define BD_CTRL_BUFLEN_MASK 0x3fff
973 +#define BD_BUF_LEN(x) ((x) & BD_CTRL_BUFLEN_MASK)
974 +#define BD_CTRL_CBD_INT_EN BIT(16)
975 +#define BD_CTRL_PKT_INT_EN BIT(17)
976 +#define BD_CTRL_LIFM BIT(18)
977 +#define BD_CTRL_LAST_BD BIT(19)
978 +#define BD_CTRL_DIR BIT(20)
979 +#define BD_CTRL_LMEM_CPY BIT(21) /* Valid only for HIF_NOCPY */
980 +#define BD_CTRL_PKT_XFER BIT(24)
981 +#define BD_CTRL_DESC_EN BIT(31)
982 +#define BD_CTRL_PARSE_DISABLE BIT(25)
983 +#define BD_CTRL_BRFETCH_DISABLE BIT(26)
984 +#define BD_CTRL_RTFETCH_DISABLE BIT(27)
985 +
986 +/* Buffer descriptor status bits*/
987 +#define BD_STATUS_CONN_ID(x) ((x) & 0xffff)
988 +#define BD_STATUS_DIR_PROC_ID BIT(16)
989 +#define BD_STATUS_CONN_ID_EN BIT(17)
990 +#define BD_STATUS_PE2PROC_ID(x) (((x) & 7) << 18)
991 +#define BD_STATUS_LE_DATA BIT(21)
992 +#define BD_STATUS_CHKSUM_EN BIT(22)
993 +
994 +/* HIF Buffer descriptor status bits */
995 +#define DIR_PROC_ID BIT(16)
996 +#define PROC_ID(id) ((id) << 18)
997 +
998 +#endif /* _HIF_H_ */
999 --- /dev/null
1000 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h
1001 @@ -0,0 +1,50 @@
1002 +/*
1003 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
1004 + * Copyright 2017 NXP
1005 + *
1006 + * This program is free software; you can redistribute it and/or modify
1007 + * it under the terms of the GNU General Public License as published by
1008 + * the Free Software Foundation; either version 2 of the License, or
1009 + * (at your option) any later version.
1010 + *
1011 + * This program is distributed in the hope that it will be useful,
1012 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1013 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1014 + * GNU General Public License for more details.
1015 + *
1016 + * You should have received a copy of the GNU General Public License
1017 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
1018 + */
1019 +
1020 +#ifndef _HIF_NOCPY_H_
1021 +#define _HIF_NOCPY_H_
1022 +
1023 +#define HIF_NOCPY_VERSION (HIF_NOCPY_BASE_ADDR + 0x00)
1024 +#define HIF_NOCPY_TX_CTRL (HIF_NOCPY_BASE_ADDR + 0x04)
1025 +#define HIF_NOCPY_TX_CURR_BD_ADDR (HIF_NOCPY_BASE_ADDR + 0x08)
1026 +#define HIF_NOCPY_TX_ALLOC (HIF_NOCPY_BASE_ADDR + 0x0c)
1027 +#define HIF_NOCPY_TX_BDP_ADDR (HIF_NOCPY_BASE_ADDR + 0x10)
1028 +#define HIF_NOCPY_TX_STATUS (HIF_NOCPY_BASE_ADDR + 0x14)
1029 +#define HIF_NOCPY_RX_CTRL (HIF_NOCPY_BASE_ADDR + 0x20)
1030 +#define HIF_NOCPY_RX_BDP_ADDR (HIF_NOCPY_BASE_ADDR + 0x24)
1031 +#define HIF_NOCPY_RX_STATUS (HIF_NOCPY_BASE_ADDR + 0x30)
1032 +#define HIF_NOCPY_INT_SRC (HIF_NOCPY_BASE_ADDR + 0x34)
1033 +#define HIF_NOCPY_INT_ENABLE (HIF_NOCPY_BASE_ADDR + 0x38)
1034 +#define HIF_NOCPY_POLL_CTRL (HIF_NOCPY_BASE_ADDR + 0x3c)
1035 +#define HIF_NOCPY_RX_CURR_BD_ADDR (HIF_NOCPY_BASE_ADDR + 0x40)
1036 +#define HIF_NOCPY_RX_ALLOC (HIF_NOCPY_BASE_ADDR + 0x44)
1037 +#define HIF_NOCPY_TX_DMA_STATUS (HIF_NOCPY_BASE_ADDR + 0x48)
1038 +#define HIF_NOCPY_RX_DMA_STATUS (HIF_NOCPY_BASE_ADDR + 0x4c)
1039 +#define HIF_NOCPY_RX_INQ0_PKTPTR (HIF_NOCPY_BASE_ADDR + 0x50)
1040 +#define HIF_NOCPY_RX_INQ1_PKTPTR (HIF_NOCPY_BASE_ADDR + 0x54)
1041 +#define HIF_NOCPY_TX_PORT_NO (HIF_NOCPY_BASE_ADDR + 0x60)
1042 +#define HIF_NOCPY_LMEM_ALLOC_ADDR (HIF_NOCPY_BASE_ADDR + 0x64)
1043 +#define HIF_NOCPY_CLASS_ADDR (HIF_NOCPY_BASE_ADDR + 0x68)
1044 +#define HIF_NOCPY_TMU_PORT0_ADDR (HIF_NOCPY_BASE_ADDR + 0x70)
1045 +#define HIF_NOCPY_TMU_PORT1_ADDR (HIF_NOCPY_BASE_ADDR + 0x74)
1046 +#define HIF_NOCPY_TMU_PORT2_ADDR (HIF_NOCPY_BASE_ADDR + 0x7c)
1047 +#define HIF_NOCPY_TMU_PORT3_ADDR (HIF_NOCPY_BASE_ADDR + 0x80)
1048 +#define HIF_NOCPY_TMU_PORT4_ADDR (HIF_NOCPY_BASE_ADDR + 0x84)
1049 +#define HIF_NOCPY_INT_COAL (HIF_NOCPY_BASE_ADDR + 0x90)
1050 +
1051 +#endif /* _HIF_NOCPY_H_ */
1052 --- /dev/null
1053 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h
1054 @@ -0,0 +1,168 @@
1055 +/*
1056 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
1057 + * Copyright 2017 NXP
1058 + *
1059 + * This program is free software; you can redistribute it and/or modify
1060 + * it under the terms of the GNU General Public License as published by
1061 + * the Free Software Foundation; either version 2 of the License, or
1062 + * (at your option) any later version.
1063 + *
1064 + * This program is distributed in the hope that it will be useful,
1065 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1066 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1067 + * GNU General Public License for more details.
1068 + *
1069 + * You should have received a copy of the GNU General Public License
1070 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
1071 + */
1072 +
1073 +#ifndef _TMU_CSR_H_
1074 +#define _TMU_CSR_H_
1075 +
1076 +#define TMU_VERSION (TMU_CSR_BASE_ADDR + 0x000)
1077 +#define TMU_INQ_WATERMARK (TMU_CSR_BASE_ADDR + 0x004)
1078 +#define TMU_PHY_INQ_PKTPTR (TMU_CSR_BASE_ADDR + 0x008)
1079 +#define TMU_PHY_INQ_PKTINFO (TMU_CSR_BASE_ADDR + 0x00c)
1080 +#define TMU_PHY_INQ_FIFO_CNT (TMU_CSR_BASE_ADDR + 0x010)
1081 +#define TMU_SYS_GENERIC_CONTROL (TMU_CSR_BASE_ADDR + 0x014)
1082 +#define TMU_SYS_GENERIC_STATUS (TMU_CSR_BASE_ADDR + 0x018)
1083 +#define TMU_SYS_GEN_CON0 (TMU_CSR_BASE_ADDR + 0x01c)
1084 +#define TMU_SYS_GEN_CON1 (TMU_CSR_BASE_ADDR + 0x020)
1085 +#define TMU_SYS_GEN_CON2 (TMU_CSR_BASE_ADDR + 0x024)
1086 +#define TMU_SYS_GEN_CON3 (TMU_CSR_BASE_ADDR + 0x028)
1087 +#define TMU_SYS_GEN_CON4 (TMU_CSR_BASE_ADDR + 0x02c)
1088 +#define TMU_TEQ_DISABLE_DROPCHK (TMU_CSR_BASE_ADDR + 0x030)
1089 +#define TMU_TEQ_CTRL (TMU_CSR_BASE_ADDR + 0x034)
1090 +#define TMU_TEQ_QCFG (TMU_CSR_BASE_ADDR + 0x038)
1091 +#define TMU_TEQ_DROP_STAT (TMU_CSR_BASE_ADDR + 0x03c)
1092 +#define TMU_TEQ_QAVG (TMU_CSR_BASE_ADDR + 0x040)
1093 +#define TMU_TEQ_WREG_PROB (TMU_CSR_BASE_ADDR + 0x044)
1094 +#define TMU_TEQ_TRANS_STAT (TMU_CSR_BASE_ADDR + 0x048)
1095 +#define TMU_TEQ_HW_PROB_CFG0 (TMU_CSR_BASE_ADDR + 0x04c)
1096 +#define TMU_TEQ_HW_PROB_CFG1 (TMU_CSR_BASE_ADDR + 0x050)
1097 +#define TMU_TEQ_HW_PROB_CFG2 (TMU_CSR_BASE_ADDR + 0x054)
1098 +#define TMU_TEQ_HW_PROB_CFG3 (TMU_CSR_BASE_ADDR + 0x058)
1099 +#define TMU_TEQ_HW_PROB_CFG4 (TMU_CSR_BASE_ADDR + 0x05c)
1100 +#define TMU_TEQ_HW_PROB_CFG5 (TMU_CSR_BASE_ADDR + 0x060)
1101 +#define TMU_TEQ_HW_PROB_CFG6 (TMU_CSR_BASE_ADDR + 0x064)
1102 +#define TMU_TEQ_HW_PROB_CFG7 (TMU_CSR_BASE_ADDR + 0x068)
1103 +#define TMU_TEQ_HW_PROB_CFG8 (TMU_CSR_BASE_ADDR + 0x06c)
1104 +#define TMU_TEQ_HW_PROB_CFG9 (TMU_CSR_BASE_ADDR + 0x070)
1105 +#define TMU_TEQ_HW_PROB_CFG10 (TMU_CSR_BASE_ADDR + 0x074)
1106 +#define TMU_TEQ_HW_PROB_CFG11 (TMU_CSR_BASE_ADDR + 0x078)
1107 +#define TMU_TEQ_HW_PROB_CFG12 (TMU_CSR_BASE_ADDR + 0x07c)
1108 +#define TMU_TEQ_HW_PROB_CFG13 (TMU_CSR_BASE_ADDR + 0x080)
1109 +#define TMU_TEQ_HW_PROB_CFG14 (TMU_CSR_BASE_ADDR + 0x084)
1110 +#define TMU_TEQ_HW_PROB_CFG15 (TMU_CSR_BASE_ADDR + 0x088)
1111 +#define TMU_TEQ_HW_PROB_CFG16 (TMU_CSR_BASE_ADDR + 0x08c)
1112 +#define TMU_TEQ_HW_PROB_CFG17 (TMU_CSR_BASE_ADDR + 0x090)
1113 +#define TMU_TEQ_HW_PROB_CFG18 (TMU_CSR_BASE_ADDR + 0x094)
1114 +#define TMU_TEQ_HW_PROB_CFG19 (TMU_CSR_BASE_ADDR + 0x098)
1115 +#define TMU_TEQ_HW_PROB_CFG20 (TMU_CSR_BASE_ADDR + 0x09c)
1116 +#define TMU_TEQ_HW_PROB_CFG21 (TMU_CSR_BASE_ADDR + 0x0a0)
1117 +#define TMU_TEQ_HW_PROB_CFG22 (TMU_CSR_BASE_ADDR + 0x0a4)
1118 +#define TMU_TEQ_HW_PROB_CFG23 (TMU_CSR_BASE_ADDR + 0x0a8)
1119 +#define TMU_TEQ_HW_PROB_CFG24 (TMU_CSR_BASE_ADDR + 0x0ac)
1120 +#define TMU_TEQ_HW_PROB_CFG25 (TMU_CSR_BASE_ADDR + 0x0b0)
1121 +#define TMU_TDQ_IIFG_CFG (TMU_CSR_BASE_ADDR + 0x0b4)
1122 +/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
1123 + * This is a global Enable for all schedulers in PHY0
1124 + */
1125 +#define TMU_TDQ0_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x0b8)
1126 +
1127 +#define TMU_LLM_CTRL (TMU_CSR_BASE_ADDR + 0x0bc)
1128 +#define TMU_LLM_BASE_ADDR (TMU_CSR_BASE_ADDR + 0x0c0)
1129 +#define TMU_LLM_QUE_LEN (TMU_CSR_BASE_ADDR + 0x0c4)
1130 +#define TMU_LLM_QUE_HEADPTR (TMU_CSR_BASE_ADDR + 0x0c8)
1131 +#define TMU_LLM_QUE_TAILPTR (TMU_CSR_BASE_ADDR + 0x0cc)
1132 +#define TMU_LLM_QUE_DROPCNT (TMU_CSR_BASE_ADDR + 0x0d0)
1133 +#define TMU_INT_EN (TMU_CSR_BASE_ADDR + 0x0d4)
1134 +#define TMU_INT_SRC (TMU_CSR_BASE_ADDR + 0x0d8)
1135 +#define TMU_INQ_STAT (TMU_CSR_BASE_ADDR + 0x0dc)
1136 +#define TMU_CTRL (TMU_CSR_BASE_ADDR + 0x0e0)
1137 +
1138 +/* [31] Mem Access Command. 0 = Internal Memory Read, 1 = Internal memory
1139 + * Write [27:24] Byte Enables of the Internal memory access [23:0] Address of
1140 + * the internal memory. This address is used to access both the PM and DM of
1141 + * all the PE's
1142 + */
1143 +#define TMU_MEM_ACCESS_ADDR (TMU_CSR_BASE_ADDR + 0x0e4)
1144 +
1145 +/* Internal Memory Access Write Data */
1146 +#define TMU_MEM_ACCESS_WDATA (TMU_CSR_BASE_ADDR + 0x0e8)
1147 +/* Internal Memory Access Read Data. The commands are blocked
1148 + * at the mem_access only
1149 + */
1150 +#define TMU_MEM_ACCESS_RDATA (TMU_CSR_BASE_ADDR + 0x0ec)
1151 +
1152 +/* [31:0] PHY0 in queue address (must be initialized with one of the
1153 + * xxx_INQ_PKTPTR cbus addresses)
1154 + */
1155 +#define TMU_PHY0_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f0)
1156 +/* [31:0] PHY1 in queue address (must be initialized with one of the
1157 + * xxx_INQ_PKTPTR cbus addresses)
1158 + */
1159 +#define TMU_PHY1_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f4)
1160 +/* [31:0] PHY2 in queue address (must be initialized with one of the
1161 + * xxx_INQ_PKTPTR cbus addresses)
1162 + */
1163 +#define TMU_PHY2_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f8)
1164 +/* [31:0] PHY3 in queue address (must be initialized with one of the
1165 + * xxx_INQ_PKTPTR cbus addresses)
1166 + */
1167 +#define TMU_PHY3_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0fc)
1168 +#define TMU_BMU_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x100)
1169 +#define TMU_TX_CTRL (TMU_CSR_BASE_ADDR + 0x104)
1170 +
1171 +#define TMU_BUS_ACCESS_WDATA (TMU_CSR_BASE_ADDR + 0x108)
1172 +#define TMU_BUS_ACCESS (TMU_CSR_BASE_ADDR + 0x10c)
1173 +#define TMU_BUS_ACCESS_RDATA (TMU_CSR_BASE_ADDR + 0x110)
1174 +
1175 +#define TMU_PE_SYS_CLK_RATIO (TMU_CSR_BASE_ADDR + 0x114)
1176 +#define TMU_PE_STATUS (TMU_CSR_BASE_ADDR + 0x118)
1177 +#define TMU_TEQ_MAX_THRESHOLD (TMU_CSR_BASE_ADDR + 0x11c)
1178 +/* [31:0] PHY4 in queue address (must be initialized with one of the
1179 + * xxx_INQ_PKTPTR cbus addresses)
1180 + */
1181 +#define TMU_PHY4_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x134)
1182 +/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
1183 + * This is a global Enable for all schedulers in PHY1
1184 + */
1185 +#define TMU_TDQ1_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x138)
1186 +/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
1187 + * This is a global Enable for all schedulers in PHY2
1188 + */
1189 +#define TMU_TDQ2_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x13c)
1190 +/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
1191 + * This is a global Enable for all schedulers in PHY3
1192 + */
1193 +#define TMU_TDQ3_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x140)
1194 +#define TMU_BMU_BUF_SIZE (TMU_CSR_BASE_ADDR + 0x144)
1195 +/* [31:0] PHY5 in queue address (must be initialized with one of the
1196 + * xxx_INQ_PKTPTR cbus addresses)
1197 + */
1198 +#define TMU_PHY5_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x148)
1199 +
1200 +#define SW_RESET BIT(0) /* Global software reset */
1201 +#define INQ_RESET BIT(2)
1202 +#define TEQ_RESET BIT(3)
1203 +#define TDQ_RESET BIT(4)
1204 +#define PE_RESET BIT(5)
1205 +#define MEM_INIT BIT(6)
1206 +#define MEM_INIT_DONE BIT(7)
1207 +#define LLM_INIT BIT(8)
1208 +#define LLM_INIT_DONE BIT(9)
1209 +#define ECC_MEM_INIT_DONE BIT(10)
1210 +
1211 +struct tmu_cfg {
1212 + u32 pe_sys_clk_ratio;
1213 + unsigned long llm_base_addr;
1214 + u32 llm_queue_len;
1215 +};
1216 +
1217 +/* Not HW related for pfe_ctrl / pfe common defines */
1218 +#define DEFAULT_MAX_QDEPTH 80
1219 +#define DEFAULT_Q0_QDEPTH 511 /*We keep one large queue for host tx qos */
1220 +#define DEFAULT_TMU3_QDEPTH 127
1221 +
1222 +#endif /* _TMU_CSR_H_ */
1223 --- /dev/null
1224 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/util_csr.h
1225 @@ -0,0 +1,61 @@
1226 +/*
1227 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
1228 + * Copyright 2017 NXP
1229 + *
1230 + * This program is free software; you can redistribute it and/or modify
1231 + * it under the terms of the GNU General Public License as published by
1232 + * the Free Software Foundation; either version 2 of the License, or
1233 + * (at your option) any later version.
1234 + *
1235 + * This program is distributed in the hope that it will be useful,
1236 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1237 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1238 + * GNU General Public License for more details.
1239 + *
1240 + * You should have received a copy of the GNU General Public License
1241 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
1242 + */
1243 +
1244 +#ifndef _UTIL_CSR_H_
1245 +#define _UTIL_CSR_H_
1246 +
1247 +#define UTIL_VERSION (UTIL_CSR_BASE_ADDR + 0x000)
1248 +#define UTIL_TX_CTRL (UTIL_CSR_BASE_ADDR + 0x004)
1249 +#define UTIL_INQ_PKTPTR (UTIL_CSR_BASE_ADDR + 0x010)
1250 +
1251 +#define UTIL_HDR_SIZE (UTIL_CSR_BASE_ADDR + 0x014)
1252 +
1253 +#define UTIL_PE0_QB_DM_ADDR0 (UTIL_CSR_BASE_ADDR + 0x020)
1254 +#define UTIL_PE0_QB_DM_ADDR1 (UTIL_CSR_BASE_ADDR + 0x024)
1255 +#define UTIL_PE0_RO_DM_ADDR0 (UTIL_CSR_BASE_ADDR + 0x060)
1256 +#define UTIL_PE0_RO_DM_ADDR1 (UTIL_CSR_BASE_ADDR + 0x064)
1257 +
1258 +#define UTIL_MEM_ACCESS_ADDR (UTIL_CSR_BASE_ADDR + 0x100)
1259 +#define UTIL_MEM_ACCESS_WDATA (UTIL_CSR_BASE_ADDR + 0x104)
1260 +#define UTIL_MEM_ACCESS_RDATA (UTIL_CSR_BASE_ADDR + 0x108)
1261 +
1262 +#define UTIL_TM_INQ_ADDR (UTIL_CSR_BASE_ADDR + 0x114)
1263 +#define UTIL_PE_STATUS (UTIL_CSR_BASE_ADDR + 0x118)
1264 +
1265 +#define UTIL_PE_SYS_CLK_RATIO (UTIL_CSR_BASE_ADDR + 0x200)
1266 +#define UTIL_AFULL_THRES (UTIL_CSR_BASE_ADDR + 0x204)
1267 +#define UTIL_GAP_BETWEEN_READS (UTIL_CSR_BASE_ADDR + 0x208)
1268 +#define UTIL_MAX_BUF_CNT (UTIL_CSR_BASE_ADDR + 0x20c)
1269 +#define UTIL_TSQ_FIFO_THRES (UTIL_CSR_BASE_ADDR + 0x210)
1270 +#define UTIL_TSQ_MAX_CNT (UTIL_CSR_BASE_ADDR + 0x214)
1271 +#define UTIL_IRAM_DATA_0 (UTIL_CSR_BASE_ADDR + 0x218)
1272 +#define UTIL_IRAM_DATA_1 (UTIL_CSR_BASE_ADDR + 0x21c)
1273 +#define UTIL_IRAM_DATA_2 (UTIL_CSR_BASE_ADDR + 0x220)
1274 +#define UTIL_IRAM_DATA_3 (UTIL_CSR_BASE_ADDR + 0x224)
1275 +
1276 +#define UTIL_BUS_ACCESS_ADDR (UTIL_CSR_BASE_ADDR + 0x228)
1277 +#define UTIL_BUS_ACCESS_WDATA (UTIL_CSR_BASE_ADDR + 0x22c)
1278 +#define UTIL_BUS_ACCESS_RDATA (UTIL_CSR_BASE_ADDR + 0x230)
1279 +
1280 +#define UTIL_INQ_AFULL_THRES (UTIL_CSR_BASE_ADDR + 0x234)
1281 +
1282 +struct util_cfg {
1283 + u32 pe_sys_clk_ratio;
1284 +};
1285 +
1286 +#endif /* _UTIL_CSR_H_ */
1287 --- /dev/null
1288 +++ b/drivers/staging/fsl_ppfe/include/pfe/pfe.h
1289 @@ -0,0 +1,372 @@
1290 +/*
1291 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
1292 + * Copyright 2017 NXP
1293 + *
1294 + * This program is free software; you can redistribute it and/or modify
1295 + * it under the terms of the GNU General Public License as published by
1296 + * the Free Software Foundation; either version 2 of the License, or
1297 + * (at your option) any later version.
1298 + *
1299 + * This program is distributed in the hope that it will be useful,
1300 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1301 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1302 + * GNU General Public License for more details.
1303 + *
1304 + * You should have received a copy of the GNU General Public License
1305 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
1306 + */
1307 +
1308 +#ifndef _PFE_H_
1309 +#define _PFE_H_
1310 +
1311 +#include "cbus.h"
1312 +
1313 +#define CLASS_DMEM_BASE_ADDR(i) (0x00000000 | ((i) << 20))
1314 +/*
1315 + * Only valid for mem access register interface
1316 + */
1317 +#define CLASS_IMEM_BASE_ADDR(i) (0x00000000 | ((i) << 20))
1318 +#define CLASS_DMEM_SIZE 0x00002000
1319 +#define CLASS_IMEM_SIZE 0x00008000
1320 +
1321 +#define TMU_DMEM_BASE_ADDR(i) (0x00000000 + ((i) << 20))
1322 +/*
1323 + * Only valid for mem access register interface
1324 + */
1325 +#define TMU_IMEM_BASE_ADDR(i) (0x00000000 + ((i) << 20))
1326 +#define TMU_DMEM_SIZE 0x00000800
1327 +#define TMU_IMEM_SIZE 0x00002000
1328 +
1329 +#define UTIL_DMEM_BASE_ADDR 0x00000000
1330 +#define UTIL_DMEM_SIZE 0x00002000
1331 +
1332 +#define PE_LMEM_BASE_ADDR 0xc3010000
1333 +#define PE_LMEM_SIZE 0x8000
1334 +#define PE_LMEM_END (PE_LMEM_BASE_ADDR + PE_LMEM_SIZE)
1335 +
1336 +#define DMEM_BASE_ADDR 0x00000000
1337 +#define DMEM_SIZE 0x2000 /* TMU has less... */
1338 +#define DMEM_END (DMEM_BASE_ADDR + DMEM_SIZE)
1339 +
1340 +#define PMEM_BASE_ADDR 0x00010000
1341 +#define PMEM_SIZE 0x8000 /* TMU has less... */
1342 +#define PMEM_END (PMEM_BASE_ADDR + PMEM_SIZE)
1343 +
1344 +/* These check memory ranges from PE point of view/memory map */
1345 +#define IS_DMEM(addr, len) \
1346 + ({ typeof(addr) addr_ = (addr); \
1347 + ((unsigned long)(addr_) >= DMEM_BASE_ADDR) && \
1348 + (((unsigned long)(addr_) + (len)) <= DMEM_END); })
1349 +
1350 +#define IS_PMEM(addr, len) \
1351 + ({ typeof(addr) addr_ = (addr); \
1352 + ((unsigned long)(addr_) >= PMEM_BASE_ADDR) && \
1353 + (((unsigned long)(addr_) + (len)) <= PMEM_END); })
1354 +
1355 +#define IS_PE_LMEM(addr, len) \
1356 + ({ typeof(addr) addr_ = (addr); \
1357 + ((unsigned long)(addr_) >= \
1358 + PE_LMEM_BASE_ADDR) && \
1359 + (((unsigned long)(addr_) + \
1360 + (len)) <= PE_LMEM_END); })
1361 +
1362 +#define IS_PFE_LMEM(addr, len) \
1363 + ({ typeof(addr) addr_ = (addr); \
1364 + ((unsigned long)(addr_) >= \
1365 + CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR)) && \
1366 + (((unsigned long)(addr_) + (len)) <= \
1367 + CBUS_VIRT_TO_PFE(LMEM_END)); })
1368 +
1369 +#define __IS_PHYS_DDR(addr, len) \
1370 + ({ typeof(addr) addr_ = (addr); \
1371 + ((unsigned long)(addr_) >= \
1372 + DDR_PHYS_BASE_ADDR) && \
1373 + (((unsigned long)(addr_) + (len)) <= \
1374 + DDR_PHYS_END); })
1375 +
1376 +#define IS_PHYS_DDR(addr, len) __IS_PHYS_DDR(DDR_PFE_TO_PHYS(addr), len)
1377 +
1378 +/*
1379 + * If using a run-time virtual address for the cbus base address use this code
1380 + */
1381 +extern void *cbus_base_addr;
1382 +extern void *ddr_base_addr;
1383 +extern unsigned long ddr_phys_base_addr;
1384 +extern unsigned int ddr_size;
1385 +
1386 +#define CBUS_BASE_ADDR cbus_base_addr
1387 +#define DDR_PHYS_BASE_ADDR ddr_phys_base_addr
1388 +#define DDR_BASE_ADDR ddr_base_addr
1389 +#define DDR_SIZE ddr_size
1390 +
1391 +#define DDR_PHYS_END (DDR_PHYS_BASE_ADDR + DDR_SIZE)
1392 +
1393 +#define LS1012A_PFE_RESET_WA /*
1394 + * PFE doesn't have global reset and re-init
1395 + * should takecare few things to make PFE
1396 + * functional after reset
1397 + */
1398 +#define PFE_CBUS_PHYS_BASE_ADDR 0xc0000000 /* CBUS physical base address
1399 + * as seen by PE's.
1400 + */
1401 +/* CBUS physical base address as seen by PE's. */
1402 +#define PFE_CBUS_PHYS_BASE_ADDR_FROM_PFE 0xc0000000
1403 +
1404 +#define DDR_PHYS_TO_PFE(p) (((unsigned long int)(p)) & 0x7FFFFFFF)
1405 +#define DDR_PFE_TO_PHYS(p) (((unsigned long int)(p)) | 0x80000000)
1406 +#define CBUS_PHYS_TO_PFE(p) (((p) - PFE_CBUS_PHYS_BASE_ADDR) + \
1407 + PFE_CBUS_PHYS_BASE_ADDR_FROM_PFE)
1408 +/* Translates to PFE address map */
1409 +
1410 +#define DDR_PHYS_TO_VIRT(p) (((p) - DDR_PHYS_BASE_ADDR) + DDR_BASE_ADDR)
1411 +#define DDR_VIRT_TO_PHYS(v) (((v) - DDR_BASE_ADDR) + DDR_PHYS_BASE_ADDR)
1412 +#define DDR_VIRT_TO_PFE(p) (DDR_PHYS_TO_PFE(DDR_VIRT_TO_PHYS(p)))
1413 +
1414 +#define CBUS_VIRT_TO_PFE(v) (((v) - CBUS_BASE_ADDR) + \
1415 + PFE_CBUS_PHYS_BASE_ADDR)
1416 +#define CBUS_PFE_TO_VIRT(p) (((unsigned long int)(p) - \
1417 + PFE_CBUS_PHYS_BASE_ADDR) + CBUS_BASE_ADDR)
1418 +
1419 +/* The below part of the code is used in QOS control driver from host */
1420 +#define TMU_APB_BASE_ADDR 0xc1000000 /* TMU base address seen by
1421 + * pe's
1422 + */
1423 +
1424 +enum {
1425 + CLASS0_ID = 0,
1426 + CLASS1_ID,
1427 + CLASS2_ID,
1428 + CLASS3_ID,
1429 + CLASS4_ID,
1430 + CLASS5_ID,
1431 + TMU0_ID,
1432 + TMU1_ID,
1433 + TMU2_ID,
1434 + TMU3_ID,
1435 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
1436 + UTIL_ID,
1437 +#endif
1438 + MAX_PE
1439 +};
1440 +
1441 +#define CLASS_MASK (BIT(CLASS0_ID) | BIT(CLASS1_ID) |\
1442 + BIT(CLASS2_ID) | BIT(CLASS3_ID) |\
1443 + BIT(CLASS4_ID) | BIT(CLASS5_ID))
1444 +#define CLASS_MAX_ID CLASS5_ID
1445 +
1446 +#define TMU_MASK (BIT(TMU0_ID) | BIT(TMU1_ID) |\
1447 + BIT(TMU3_ID))
1448 +
1449 +#define TMU_MAX_ID TMU3_ID
1450 +
1451 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
1452 +#define UTIL_MASK BIT(UTIL_ID)
1453 +#endif
1454 +
1455 +struct pe_status {
1456 + u32 cpu_state;
1457 + u32 activity_counter;
1458 + u32 rx;
1459 + union {
1460 + u32 tx;
1461 + u32 tmu_qstatus;
1462 + };
1463 + u32 drop;
1464 +#if defined(CFG_PE_DEBUG)
1465 + u32 debug_indicator;
1466 + u32 debug[16];
1467 +#endif
1468 +} __aligned(16);
1469 +
1470 +struct pe_sync_mailbox {
1471 + u32 stop;
1472 + u32 stopped;
1473 +};
1474 +
1475 +/* Drop counter definitions */
1476 +
1477 +#define CLASS_NUM_DROP_COUNTERS 13
1478 +#define UTIL_NUM_DROP_COUNTERS 8
1479 +
1480 +/* PE information.
1481 + * Structure containing PE's specific information. It is used to create
1482 + * generic C functions common to all PE's.
1483 + * Before using the library functions this structure needs to be initialized
1484 + * with the different registers virtual addresses
1485 + * (according to the ARM MMU mmaping). The default initialization supports a
1486 + * virtual == physical mapping.
1487 + */
1488 +struct pe_info {
1489 + u32 dmem_base_addr; /* PE's dmem base address */
1490 + u32 pmem_base_addr; /* PE's pmem base address */
1491 + u32 pmem_size; /* PE's pmem size */
1492 +
1493 + void *mem_access_wdata; /* PE's _MEM_ACCESS_WDATA register
1494 + * address
1495 + */
1496 + void *mem_access_addr; /* PE's _MEM_ACCESS_ADDR register
1497 + * address
1498 + */
1499 + void *mem_access_rdata; /* PE's _MEM_ACCESS_RDATA register
1500 + * address
1501 + */
1502 +};
1503 +
1504 +void pe_lmem_read(u32 *dst, u32 len, u32 offset);
1505 +void pe_lmem_write(u32 *src, u32 len, u32 offset);
1506 +
1507 +void pe_dmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len);
1508 +void pe_pmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len);
1509 +
1510 +u32 pe_pmem_read(int id, u32 addr, u8 size);
1511 +
1512 +void pe_dmem_write(int id, u32 val, u32 addr, u8 size);
1513 +u32 pe_dmem_read(int id, u32 addr, u8 size);
1514 +void class_pe_lmem_memcpy_to32(u32 dst, const void *src, unsigned int len);
1515 +void class_pe_lmem_memset(u32 dst, int val, unsigned int len);
1516 +void class_bus_write(u32 val, u32 addr, u8 size);
1517 +u32 class_bus_read(u32 addr, u8 size);
1518 +
1519 +#define class_bus_readl(addr) class_bus_read(addr, 4)
1520 +#define class_bus_readw(addr) class_bus_read(addr, 2)
1521 +#define class_bus_readb(addr) class_bus_read(addr, 1)
1522 +
1523 +#define class_bus_writel(val, addr) class_bus_write(val, addr, 4)
1524 +#define class_bus_writew(val, addr) class_bus_write(val, addr, 2)
1525 +#define class_bus_writeb(val, addr) class_bus_write(val, addr, 1)
1526 +
1527 +#define pe_dmem_readl(id, addr) pe_dmem_read(id, addr, 4)
1528 +#define pe_dmem_readw(id, addr) pe_dmem_read(id, addr, 2)
1529 +#define pe_dmem_readb(id, addr) pe_dmem_read(id, addr, 1)
1530 +
1531 +#define pe_dmem_writel(id, val, addr) pe_dmem_write(id, val, addr, 4)
1532 +#define pe_dmem_writew(id, val, addr) pe_dmem_write(id, val, addr, 2)
1533 +#define pe_dmem_writeb(id, val, addr) pe_dmem_write(id, val, addr, 1)
1534 +
1535 +/*int pe_load_elf_section(int id, const void *data, elf32_shdr *shdr); */
1536 +int pe_load_elf_section(int id, const void *data, struct elf32_shdr *shdr,
1537 + struct device *dev);
1538 +
1539 +void pfe_lib_init(void *cbus_base, void *ddr_base, unsigned long ddr_phys_base,
1540 + unsigned int ddr_size);
1541 +void bmu_init(void *base, struct BMU_CFG *cfg);
1542 +void bmu_reset(void *base);
1543 +void bmu_enable(void *base);
1544 +void bmu_disable(void *base);
1545 +void bmu_set_config(void *base, struct BMU_CFG *cfg);
1546 +
1547 +/*
1548 + * An enumerated type for loopback values. This can be one of three values, no
1549 + * loopback -normal operation, local loopback with internal loopback module of
1550 + * MAC or PHY loopback which is through the external PHY.
1551 + */
1552 +#ifndef __MAC_LOOP_ENUM__
1553 +#define __MAC_LOOP_ENUM__
1554 +enum mac_loop {LB_NONE, LB_EXT, LB_LOCAL};
1555 +#endif
1556 +
1557 +void gemac_init(void *base, void *config);
1558 +void gemac_disable_rx_checksum_offload(void *base);
1559 +void gemac_enable_rx_checksum_offload(void *base);
1560 +void gemac_set_mdc_div(void *base, int mdc_div);
1561 +void gemac_set_speed(void *base, enum mac_speed gem_speed);
1562 +void gemac_set_duplex(void *base, int duplex);
1563 +void gemac_set_mode(void *base, int mode);
1564 +void gemac_enable(void *base);
1565 +void gemac_tx_disable(void *base);
1566 +void gemac_tx_enable(void *base);
1567 +void gemac_disable(void *base);
1568 +void gemac_reset(void *base);
1569 +void gemac_set_address(void *base, struct spec_addr *addr);
1570 +struct spec_addr gemac_get_address(void *base);
1571 +void gemac_set_loop(void *base, enum mac_loop gem_loop);
1572 +void gemac_set_laddr1(void *base, struct pfe_mac_addr *address);
1573 +void gemac_set_laddr2(void *base, struct pfe_mac_addr *address);
1574 +void gemac_set_laddr3(void *base, struct pfe_mac_addr *address);
1575 +void gemac_set_laddr4(void *base, struct pfe_mac_addr *address);
1576 +void gemac_set_laddrN(void *base, struct pfe_mac_addr *address,
1577 + unsigned int entry_index);
1578 +void gemac_clear_laddr1(void *base);
1579 +void gemac_clear_laddr2(void *base);
1580 +void gemac_clear_laddr3(void *base);
1581 +void gemac_clear_laddr4(void *base);
1582 +void gemac_clear_laddrN(void *base, unsigned int entry_index);
1583 +struct pfe_mac_addr gemac_get_hash(void *base);
1584 +void gemac_set_hash(void *base, struct pfe_mac_addr *hash);
1585 +struct pfe_mac_addr gem_get_laddr1(void *base);
1586 +struct pfe_mac_addr gem_get_laddr2(void *base);
1587 +struct pfe_mac_addr gem_get_laddr3(void *base);
1588 +struct pfe_mac_addr gem_get_laddr4(void *base);
1589 +struct pfe_mac_addr gem_get_laddrN(void *base, unsigned int entry_index);
1590 +void gemac_set_config(void *base, struct gemac_cfg *cfg);
1591 +void gemac_allow_broadcast(void *base);
1592 +void gemac_no_broadcast(void *base);
1593 +void gemac_enable_1536_rx(void *base);
1594 +void gemac_disable_1536_rx(void *base);
1595 +void gemac_enable_rx_jmb(void *base);
1596 +void gemac_disable_rx_jmb(void *base);
1597 +void gemac_enable_stacked_vlan(void *base);
1598 +void gemac_disable_stacked_vlan(void *base);
1599 +void gemac_enable_pause_rx(void *base);
1600 +void gemac_disable_pause_rx(void *base);
1601 +void gemac_enable_copy_all(void *base);
1602 +void gemac_disable_copy_all(void *base);
1603 +void gemac_set_bus_width(void *base, int width);
1604 +void gemac_set_wol(void *base, u32 wol_conf);
1605 +
1606 +void gpi_init(void *base, struct gpi_cfg *cfg);
1607 +void gpi_reset(void *base);
1608 +void gpi_enable(void *base);
1609 +void gpi_disable(void *base);
1610 +void gpi_set_config(void *base, struct gpi_cfg *cfg);
1611 +
1612 +void class_init(struct class_cfg *cfg);
1613 +void class_reset(void);
1614 +void class_enable(void);
1615 +void class_disable(void);
1616 +void class_set_config(struct class_cfg *cfg);
1617 +
1618 +void tmu_reset(void);
1619 +void tmu_init(struct tmu_cfg *cfg);
1620 +void tmu_enable(u32 pe_mask);
1621 +void tmu_disable(u32 pe_mask);
1622 +u32 tmu_qstatus(u32 if_id);
1623 +u32 tmu_pkts_processed(u32 if_id);
1624 +
1625 +void util_init(struct util_cfg *cfg);
1626 +void util_reset(void);
1627 +void util_enable(void);
1628 +void util_disable(void);
1629 +
1630 +void hif_init(void);
1631 +void hif_tx_enable(void);
1632 +void hif_tx_disable(void);
1633 +void hif_rx_enable(void);
1634 +void hif_rx_disable(void);
1635 +
1636 +/* Get Chip Revision level
1637 + *
1638 + */
1639 +static inline unsigned int CHIP_REVISION(void)
1640 +{
1641 + /*For LS1012A return always 1 */
1642 + return 1;
1643 +}
1644 +
1645 +/* Start HIF rx DMA
1646 + *
1647 + */
1648 +static inline void hif_rx_dma_start(void)
1649 +{
1650 + writel(HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB, HIF_RX_CTRL);
1651 +}
1652 +
1653 +/* Start HIF tx DMA
1654 + *
1655 + */
1656 +static inline void hif_tx_dma_start(void)
1657 +{
1658 + writel(HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB, HIF_TX_CTRL);
1659 +}
1660 +
1661 +#endif /* _PFE_H_ */
1662 --- /dev/null
1663 +++ b/drivers/staging/fsl_ppfe/pfe_ctrl.c
1664 @@ -0,0 +1,238 @@
1665 +/*
1666 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
1667 + * Copyright 2017 NXP
1668 + *
1669 + * This program is free software; you can redistribute it and/or modify
1670 + * it under the terms of the GNU General Public License as published by
1671 + * the Free Software Foundation; either version 2 of the License, or
1672 + * (at your option) any later version.
1673 + *
1674 + * This program is distributed in the hope that it will be useful,
1675 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1676 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1677 + * GNU General Public License for more details.
1678 + *
1679 + * You should have received a copy of the GNU General Public License
1680 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
1681 + */
1682 +
1683 +#include <linux/kernel.h>
1684 +#include <linux/sched.h>
1685 +#include <linux/module.h>
1686 +#include <linux/list.h>
1687 +#include <linux/kthread.h>
1688 +
1689 +#include "pfe_mod.h"
1690 +#include "pfe_ctrl.h"
1691 +
1692 +#define TIMEOUT_MS 1000
1693 +
1694 +int relax(unsigned long end)
1695 +{
1696 + if (time_after(jiffies, end)) {
1697 + if (time_after(jiffies, end + (TIMEOUT_MS * HZ) / 1000))
1698 + return -1;
1699 +
1700 + if (need_resched())
1701 + schedule();
1702 + }
1703 +
1704 + return 0;
1705 +}
1706 +
1707 +void pfe_ctrl_suspend(struct pfe_ctrl *ctrl)
1708 +{
1709 + int id;
1710 +
1711 + mutex_lock(&ctrl->mutex);
1712 +
1713 + for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++)
1714 + pe_dmem_write(id, cpu_to_be32(0x1), CLASS_DM_RESUME, 4);
1715 +
1716 + for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
1717 + if (id == TMU2_ID)
1718 + continue;
1719 + pe_dmem_write(id, cpu_to_be32(0x1), TMU_DM_RESUME, 4);
1720 + }
1721 +
1722 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
1723 + pe_dmem_write(UTIL_ID, cpu_to_be32(0x1), UTIL_DM_RESUME, 4);
1724 +#endif
1725 + mutex_unlock(&ctrl->mutex);
1726 +}
1727 +
1728 +void pfe_ctrl_resume(struct pfe_ctrl *ctrl)
1729 +{
1730 + int pe_mask = CLASS_MASK | TMU_MASK;
1731 +
1732 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
1733 + pe_mask |= UTIL_MASK;
1734 +#endif
1735 + mutex_lock(&ctrl->mutex);
1736 + pe_start(&pfe->ctrl, pe_mask);
1737 + mutex_unlock(&ctrl->mutex);
1738 +}
1739 +
1740 +/* PE sync stop.
1741 + * Stops packet processing for a list of PE's (specified using a bitmask).
1742 + * The caller must hold ctrl->mutex.
1743 + *
1744 + * @param ctrl Control context
1745 + * @param pe_mask Mask of PE id's to stop
1746 + *
1747 + */
1748 +int pe_sync_stop(struct pfe_ctrl *ctrl, int pe_mask)
1749 +{
1750 + struct pe_sync_mailbox *mbox;
1751 + int pe_stopped = 0;
1752 + unsigned long end = jiffies + 2;
1753 + int i;
1754 +
1755 + pe_mask &= 0x2FF; /*Exclude Util + TMU2 */
1756 +
1757 + for (i = 0; i < MAX_PE; i++)
1758 + if (pe_mask & (1 << i)) {
1759 + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
1760 +
1761 + pe_dmem_write(i, cpu_to_be32(0x1), (unsigned
1762 + long)&mbox->stop, 4);
1763 + }
1764 +
1765 + while (pe_stopped != pe_mask) {
1766 + for (i = 0; i < MAX_PE; i++)
1767 + if ((pe_mask & (1 << i)) && !(pe_stopped & (1 << i))) {
1768 + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
1769 +
1770 + if (pe_dmem_read(i, (unsigned
1771 + long)&mbox->stopped, 4) &
1772 + cpu_to_be32(0x1))
1773 + pe_stopped |= (1 << i);
1774 + }
1775 +
1776 + if (relax(end) < 0)
1777 + goto err;
1778 + }
1779 +
1780 + return 0;
1781 +
1782 +err:
1783 + pr_err("%s: timeout, %x %x\n", __func__, pe_mask, pe_stopped);
1784 +
1785 + for (i = 0; i < MAX_PE; i++)
1786 + if (pe_mask & (1 << i)) {
1787 + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
1788 +
1789 + pe_dmem_write(i, cpu_to_be32(0x0), (unsigned
1790 + long)&mbox->stop, 4);
1791 + }
1792 +
1793 + return -EIO;
1794 +}
1795 +
1796 +/* PE start.
1797 + * Starts packet processing for a list of PE's (specified using a bitmask).
1798 + * The caller must hold ctrl->mutex.
1799 + *
1800 + * @param ctrl Control context
1801 + * @param pe_mask Mask of PE id's to start
1802 + *
1803 + */
1804 +void pe_start(struct pfe_ctrl *ctrl, int pe_mask)
1805 +{
1806 + struct pe_sync_mailbox *mbox;
1807 + int i;
1808 +
1809 + for (i = 0; i < MAX_PE; i++)
1810 + if (pe_mask & (1 << i)) {
1811 + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
1812 +
1813 + pe_dmem_write(i, cpu_to_be32(0x0), (unsigned
1814 + long)&mbox->stop, 4);
1815 + }
1816 +}
1817 +
1818 +/* This function will ensure all PEs are put in to idle state */
1819 +int pe_reset_all(struct pfe_ctrl *ctrl)
1820 +{
1821 + struct pe_sync_mailbox *mbox;
1822 + int pe_stopped = 0;
1823 + unsigned long end = jiffies + 2;
1824 + int i;
1825 + int pe_mask = CLASS_MASK | TMU_MASK;
1826 +
1827 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
1828 + pe_mask |= UTIL_MASK;
1829 +#endif
1830 +
1831 + for (i = 0; i < MAX_PE; i++)
1832 + if (pe_mask & (1 << i)) {
1833 + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
1834 +
1835 + pe_dmem_write(i, cpu_to_be32(0x2), (unsigned
1836 + long)&mbox->stop, 4);
1837 + }
1838 +
1839 + while (pe_stopped != pe_mask) {
1840 + for (i = 0; i < MAX_PE; i++)
1841 + if ((pe_mask & (1 << i)) && !(pe_stopped & (1 << i))) {
1842 + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
1843 +
1844 + if (pe_dmem_read(i, (unsigned long)
1845 + &mbox->stopped, 4) &
1846 + cpu_to_be32(0x1))
1847 + pe_stopped |= (1 << i);
1848 + }
1849 +
1850 + if (relax(end) < 0)
1851 + goto err;
1852 + }
1853 +
1854 + return 0;
1855 +
1856 +err:
1857 + pr_err("%s: timeout, %x %x\n", __func__, pe_mask, pe_stopped);
1858 + return -EIO;
1859 +}
1860 +
1861 +int pfe_ctrl_init(struct pfe *pfe)
1862 +{
1863 + struct pfe_ctrl *ctrl = &pfe->ctrl;
1864 + int id;
1865 +
1866 + pr_info("%s\n", __func__);
1867 +
1868 + mutex_init(&ctrl->mutex);
1869 + spin_lock_init(&ctrl->lock);
1870 +
1871 + for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
1872 + ctrl->sync_mailbox_baseaddr[id] = CLASS_DM_SYNC_MBOX;
1873 + ctrl->msg_mailbox_baseaddr[id] = CLASS_DM_MSG_MBOX;
1874 + }
1875 +
1876 + for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
1877 + if (id == TMU2_ID)
1878 + continue;
1879 + ctrl->sync_mailbox_baseaddr[id] = TMU_DM_SYNC_MBOX;
1880 + ctrl->msg_mailbox_baseaddr[id] = TMU_DM_MSG_MBOX;
1881 + }
1882 +
1883 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
1884 + ctrl->sync_mailbox_baseaddr[UTIL_ID] = UTIL_DM_SYNC_MBOX;
1885 + ctrl->msg_mailbox_baseaddr[UTIL_ID] = UTIL_DM_MSG_MBOX;
1886 +#endif
1887 +
1888 + ctrl->hash_array_baseaddr = pfe->ddr_baseaddr + ROUTE_TABLE_BASEADDR;
1889 + ctrl->hash_array_phys_baseaddr = pfe->ddr_phys_baseaddr +
1890 + ROUTE_TABLE_BASEADDR;
1891 +
1892 + ctrl->dev = pfe->dev;
1893 +
1894 + pr_info("%s finished\n", __func__);
1895 +
1896 + return 0;
1897 +}
1898 +
1899 +void pfe_ctrl_exit(struct pfe *pfe)
1900 +{
1901 + pr_info("%s\n", __func__);
1902 +}
1903 --- /dev/null
1904 +++ b/drivers/staging/fsl_ppfe/pfe_ctrl.h
1905 @@ -0,0 +1,112 @@
1906 +/*
1907 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
1908 + * Copyright 2017 NXP
1909 + *
1910 + * This program is free software; you can redistribute it and/or modify
1911 + * it under the terms of the GNU General Public License as published by
1912 + * the Free Software Foundation; either version 2 of the License, or
1913 + * (at your option) any later version.
1914 + *
1915 + * This program is distributed in the hope that it will be useful,
1916 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1917 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1918 + * GNU General Public License for more details.
1919 + *
1920 + * You should have received a copy of the GNU General Public License
1921 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
1922 + */
1923 +
1924 +#ifndef _PFE_CTRL_H_
1925 +#define _PFE_CTRL_H_
1926 +
1927 +#include <linux/dmapool.h>
1928 +
1929 +#include "pfe_mod.h"
1930 +#include "pfe/pfe.h"
1931 +
1932 +#define DMA_BUF_SIZE_128 0x80 /* enough for 1 conntracks */
1933 +#define DMA_BUF_SIZE_256 0x100
1934 +/* enough for 2 conntracks, 1 bridge entry or 1 multicast entry */
1935 +#define DMA_BUF_SIZE_512 0x200
1936 +/* 512bytes dma allocated buffers used by rtp relay feature */
1937 +#define DMA_BUF_MIN_ALIGNMENT 8
1938 +#define DMA_BUF_BOUNDARY (4 * 1024)
1939 +/* bursts can not cross 4k boundary */
1940 +
1941 +#define CMD_TX_ENABLE 0x0501
1942 +#define CMD_TX_DISABLE 0x0502
1943 +
1944 +#define CMD_RX_LRO 0x0011
1945 +#define CMD_PKTCAP_ENABLE 0x0d01
1946 +#define CMD_QM_EXPT_RATE 0x020c
1947 +
1948 +#define CLASS_DM_SH_STATIC (0x800)
1949 +#define CLASS_DM_CPU_TICKS (CLASS_DM_SH_STATIC)
1950 +#define CLASS_DM_SYNC_MBOX (0x808)
1951 +#define CLASS_DM_MSG_MBOX (0x810)
1952 +#define CLASS_DM_DROP_CNTR (0x820)
1953 +#define CLASS_DM_RESUME (0x854)
1954 +#define CLASS_DM_PESTATUS (0x860)
1955 +
1956 +#define TMU_DM_SH_STATIC (0x80)
1957 +#define TMU_DM_CPU_TICKS (TMU_DM_SH_STATIC)
1958 +#define TMU_DM_SYNC_MBOX (0x88)
1959 +#define TMU_DM_MSG_MBOX (0x90)
1960 +#define TMU_DM_RESUME (0xA0)
1961 +#define TMU_DM_PESTATUS (0xB0)
1962 +#define TMU_DM_CONTEXT (0x300)
1963 +#define TMU_DM_TX_TRANS (0x480)
1964 +
1965 +#define UTIL_DM_SH_STATIC (0x0)
1966 +#define UTIL_DM_CPU_TICKS (UTIL_DM_SH_STATIC)
1967 +#define UTIL_DM_SYNC_MBOX (0x8)
1968 +#define UTIL_DM_MSG_MBOX (0x10)
1969 +#define UTIL_DM_DROP_CNTR (0x20)
1970 +#define UTIL_DM_RESUME (0x40)
1971 +#define UTIL_DM_PESTATUS (0x50)
1972 +
1973 +struct pfe_ctrl {
1974 + struct mutex mutex; /* to serialize pfe control access */
1975 + spinlock_t lock;
1976 +
1977 + void *dma_pool;
1978 + void *dma_pool_512;
1979 + void *dma_pool_128;
1980 +
1981 + struct device *dev;
1982 +
1983 + void *hash_array_baseaddr; /*
1984 + * Virtual base address of
1985 + * the conntrack hash array
1986 + */
1987 + unsigned long hash_array_phys_baseaddr; /*
1988 + * Physical base address of
1989 + * the conntrack hash array
1990 + */
1991 +
1992 + int (*event_cb)(u16, u16, u16*);
1993 +
1994 + unsigned long sync_mailbox_baseaddr[MAX_PE]; /*
1995 + * Sync mailbox PFE
1996 + * internal address,
1997 + * initialized
1998 + * when parsing elf images
1999 + */
2000 + unsigned long msg_mailbox_baseaddr[MAX_PE]; /*
2001 + * Msg mailbox PFE internal
2002 + * address, initialized
2003 + * when parsing elf images
2004 + */
2005 + unsigned int sys_clk; /* AXI clock value, in KHz */
2006 +};
2007 +
2008 +int pfe_ctrl_init(struct pfe *pfe);
2009 +void pfe_ctrl_exit(struct pfe *pfe);
2010 +int pe_sync_stop(struct pfe_ctrl *ctrl, int pe_mask);
2011 +void pe_start(struct pfe_ctrl *ctrl, int pe_mask);
2012 +int pe_reset_all(struct pfe_ctrl *ctrl);
2013 +void pfe_ctrl_suspend(struct pfe_ctrl *ctrl);
2014 +void pfe_ctrl_resume(struct pfe_ctrl *ctrl);
2015 +int relax(unsigned long end);
2016 +
2017 +#endif /* _PFE_CTRL_H_ */
2018 --- /dev/null
2019 +++ b/drivers/staging/fsl_ppfe/pfe_debugfs.c
2020 @@ -0,0 +1,111 @@
2021 +/*
2022 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
2023 + * Copyright 2017 NXP
2024 + *
2025 + * This program is free software; you can redistribute it and/or modify
2026 + * it under the terms of the GNU General Public License as published by
2027 + * the Free Software Foundation; either version 2 of the License, or
2028 + * (at your option) any later version.
2029 + *
2030 + * This program is distributed in the hope that it will be useful,
2031 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
2032 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2033 + * GNU General Public License for more details.
2034 + *
2035 + * You should have received a copy of the GNU General Public License
2036 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
2037 + */
2038 +
2039 +#include <linux/module.h>
2040 +#include <linux/debugfs.h>
2041 +#include <linux/platform_device.h>
2042 +
2043 +#include "pfe_mod.h"
2044 +
2045 +static int dmem_show(struct seq_file *s, void *unused)
2046 +{
2047 + u32 dmem_addr, val;
2048 + int id = (long int)s->private;
2049 + int i;
2050 +
2051 + for (dmem_addr = 0; dmem_addr < CLASS_DMEM_SIZE; dmem_addr += 8 * 4) {
2052 + seq_printf(s, "%04x:", dmem_addr);
2053 +
2054 + for (i = 0; i < 8; i++) {
2055 + val = pe_dmem_read(id, dmem_addr + i * 4, 4);
2056 + seq_printf(s, " %02x %02x %02x %02x", val & 0xff,
2057 + (val >> 8) & 0xff, (val >> 16) & 0xff,
2058 + (val >> 24) & 0xff);
2059 + }
2060 +
2061 + seq_puts(s, "\n");
2062 + }
2063 +
2064 + return 0;
2065 +}
2066 +
2067 +static int dmem_open(struct inode *inode, struct file *file)
2068 +{
2069 + return single_open(file, dmem_show, inode->i_private);
2070 +}
2071 +
2072 +static const struct file_operations dmem_fops = {
2073 + .open = dmem_open,
2074 + .read = seq_read,
2075 + .llseek = seq_lseek,
2076 + .release = single_release,
2077 +};
2078 +
2079 +int pfe_debugfs_init(struct pfe *pfe)
2080 +{
2081 + struct dentry *d;
2082 +
2083 + pr_info("%s\n", __func__);
2084 +
2085 + pfe->dentry = debugfs_create_dir("pfe", NULL);
2086 + if (IS_ERR_OR_NULL(pfe->dentry))
2087 + goto err_dir;
2088 +
2089 + d = debugfs_create_file("pe0_dmem", 0444, pfe->dentry, (void *)0,
2090 + &dmem_fops);
2091 + if (IS_ERR_OR_NULL(d))
2092 + goto err_pe;
2093 +
2094 + d = debugfs_create_file("pe1_dmem", 0444, pfe->dentry, (void *)1,
2095 + &dmem_fops);
2096 + if (IS_ERR_OR_NULL(d))
2097 + goto err_pe;
2098 +
2099 + d = debugfs_create_file("pe2_dmem", 0444, pfe->dentry, (void *)2,
2100 + &dmem_fops);
2101 + if (IS_ERR_OR_NULL(d))
2102 + goto err_pe;
2103 +
2104 + d = debugfs_create_file("pe3_dmem", 0444, pfe->dentry, (void *)3,
2105 + &dmem_fops);
2106 + if (IS_ERR_OR_NULL(d))
2107 + goto err_pe;
2108 +
2109 + d = debugfs_create_file("pe4_dmem", 0444, pfe->dentry, (void *)4,
2110 + &dmem_fops);
2111 + if (IS_ERR_OR_NULL(d))
2112 + goto err_pe;
2113 +
2114 + d = debugfs_create_file("pe5_dmem", 0444, pfe->dentry, (void *)5,
2115 + &dmem_fops);
2116 + if (IS_ERR_OR_NULL(d))
2117 + goto err_pe;
2118 +
2119 + return 0;
2120 +
2121 +err_pe:
2122 + debugfs_remove_recursive(pfe->dentry);
2123 +
2124 +err_dir:
2125 + return -1;
2126 +}
2127 +
2128 +void pfe_debugfs_exit(struct pfe *pfe)
2129 +{
2130 + debugfs_remove_recursive(pfe->dentry);
2131 +}
2132 --- /dev/null
2133 +++ b/drivers/staging/fsl_ppfe/pfe_debugfs.h
2134 @@ -0,0 +1,25 @@
2135 +/*
2136 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
2137 + * Copyright 2017 NXP
2138 + *
2139 + * This program is free software; you can redistribute it and/or modify
2140 + * it under the terms of the GNU General Public License as published by
2141 + * the Free Software Foundation; either version 2 of the License, or
2142 + * (at your option) any later version.
2143 + *
2144 + * This program is distributed in the hope that it will be useful,
2145 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
2146 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2147 + * GNU General Public License for more details.
2148 + *
2149 + * You should have received a copy of the GNU General Public License
2150 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
2151 + */
2152 +
2153 +#ifndef _PFE_DEBUGFS_H_
2154 +#define _PFE_DEBUGFS_H_
2155 +
2156 +int pfe_debugfs_init(struct pfe *pfe);
2157 +void pfe_debugfs_exit(struct pfe *pfe);
2158 +
2159 +#endif /* _PFE_DEBUGFS_H_ */
2160 --- /dev/null
2161 +++ b/drivers/staging/fsl_ppfe/pfe_eth.c
2162 @@ -0,0 +1,2491 @@
2163 +/*
2164 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
2165 + * Copyright 2017 NXP
2166 + *
2167 + * This program is free software; you can redistribute it and/or modify
2168 + * it under the terms of the GNU General Public License as published by
2169 + * the Free Software Foundation; either version 2 of the License, or
2170 + * (at your option) any later version.
2171 + *
2172 + * This program is distributed in the hope that it will be useful,
2173 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
2174 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2175 + * GNU General Public License for more details.
2176 + *
2177 + * You should have received a copy of the GNU General Public License
2178 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
2179 + */
2180 +
2181 +/* @pfe_eth.c.
2182 + * Ethernet driver for to handle exception path for PFE.
2183 + * - uses HIF functions to send/receive packets.
2184 + * - uses ctrl function to start/stop interfaces.
2185 + * - uses direct register accesses to control phy operation.
2186 + */
2187 +#include <linux/version.h>
2188 +#include <linux/kernel.h>
2189 +#include <linux/interrupt.h>
2190 +#include <linux/dma-mapping.h>
2191 +#include <linux/dmapool.h>
2192 +#include <linux/netdevice.h>
2193 +#include <linux/etherdevice.h>
2194 +#include <linux/ethtool.h>
2195 +#include <linux/mii.h>
2196 +#include <linux/phy.h>
2197 +#include <linux/timer.h>
2198 +#include <linux/hrtimer.h>
2199 +#include <linux/platform_device.h>
2200 +
2201 +#include <net/ip.h>
2202 +#include <net/sock.h>
2203 +
2204 +#include <linux/io.h>
2205 +#include <asm/irq.h>
2206 +#include <linux/delay.h>
2207 +#include <linux/regmap.h>
2208 +#include <linux/i2c.h>
2209 +
2210 +#if defined(CONFIG_NF_CONNTRACK_MARK)
2211 +#include <net/netfilter/nf_conntrack.h>
2212 +#endif
2213 +
2214 +#include "pfe_mod.h"
2215 +#include "pfe_eth.h"
2216 +
2217 +static void *cbus_emac_base[3];
2218 +static void *cbus_gpi_base[3];
2219 +
2220 +/* Forward Declaration */
2221 +static void pfe_eth_exit_one(struct pfe_eth_priv_s *priv);
2222 +static void pfe_eth_flush_tx(struct pfe_eth_priv_s *priv);
2223 +static void pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int
2224 + from_tx, int n_desc);
2225 +
2226 +unsigned int gemac_regs[] = {
2227 + 0x0004, /* Interrupt event */
2228 + 0x0008, /* Interrupt mask */
2229 + 0x0024, /* Ethernet control */
2230 + 0x0064, /* MIB Control/Status */
2231 + 0x0084, /* Receive control/status */
2232 + 0x00C4, /* Transmit control */
2233 + 0x00E4, /* Physical address low */
2234 + 0x00E8, /* Physical address high */
2235 + 0x0144, /* Transmit FIFO Watermark and Store and Forward Control*/
2236 + 0x0190, /* Receive FIFO Section Full Threshold */
2237 + 0x01A0, /* Transmit FIFO Section Empty Threshold */
2238 + 0x01B0, /* Frame Truncation Length */
2239 +};
2240 +
2241 +/********************************************************************/
2242 +/* SYSFS INTERFACE */
2243 +/********************************************************************/
2244 +
2245 +#ifdef PFE_ETH_NAPI_STATS
2246 +/*
2247 + * pfe_eth_show_napi_stats
2248 + */
2249 +static ssize_t pfe_eth_show_napi_stats(struct device *dev,
2250 + struct device_attribute *attr,
2251 + char *buf)
2252 +{
2253 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2254 + ssize_t len = 0;
2255 +
2256 + len += sprintf(buf + len, "sched: %u\n",
2257 + priv->napi_counters[NAPI_SCHED_COUNT]);
2258 + len += sprintf(buf + len, "poll: %u\n",
2259 + priv->napi_counters[NAPI_POLL_COUNT]);
2260 + len += sprintf(buf + len, "packet: %u\n",
2261 + priv->napi_counters[NAPI_PACKET_COUNT]);
2262 + len += sprintf(buf + len, "budget: %u\n",
2263 + priv->napi_counters[NAPI_FULL_BUDGET_COUNT]);
2264 + len += sprintf(buf + len, "desc: %u\n",
2265 + priv->napi_counters[NAPI_DESC_COUNT]);
2266 +
2267 + return len;
2268 +}
2269 +
2270 +/*
2271 + * pfe_eth_set_napi_stats
2272 + */
2273 +static ssize_t pfe_eth_set_napi_stats(struct device *dev,
2274 + struct device_attribute *attr,
2275 + const char *buf, size_t count)
2276 +{
2277 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2278 +
2279 + memset(priv->napi_counters, 0, sizeof(priv->napi_counters));
2280 +
2281 + return count;
2282 +}
2283 +#endif
2284 +#ifdef PFE_ETH_TX_STATS
2285 +/* pfe_eth_show_tx_stats
2286 + *
2287 + */
2288 +static ssize_t pfe_eth_show_tx_stats(struct device *dev,
2289 + struct device_attribute *attr,
2290 + char *buf)
2291 +{
2292 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2293 + ssize_t len = 0;
2294 + int i;
2295 +
2296 + len += sprintf(buf + len, "TX queues stats:\n");
2297 +
2298 + for (i = 0; i < emac_txq_cnt; i++) {
2299 + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
2300 + i);
2301 +
2302 + len += sprintf(buf + len, "\n");
2303 + __netif_tx_lock_bh(tx_queue);
2304 +
2305 + hif_tx_lock(&pfe->hif);
2306 + len += sprintf(buf + len,
2307 + "Queue %2d : credits = %10d\n"
2308 + , i, hif_lib_tx_credit_avail(pfe, priv->id, i));
2309 + len += sprintf(buf + len,
2310 + " tx packets = %10d\n"
2311 + , pfe->tmu_credit.tx_packets[priv->id][i]);
2312 + hif_tx_unlock(&pfe->hif);
2313 +
2314 + /* Don't output additionnal stats if queue never used */
2315 + if (!pfe->tmu_credit.tx_packets[priv->id][i])
2316 + goto skip;
2317 +
2318 + len += sprintf(buf + len,
2319 + " clean_fail = %10d\n"
2320 + , priv->clean_fail[i]);
2321 + len += sprintf(buf + len,
2322 + " stop_queue = %10d\n"
2323 + , priv->stop_queue_total[i]);
2324 + len += sprintf(buf + len,
2325 + " stop_queue_hif = %10d\n"
2326 + , priv->stop_queue_hif[i]);
2327 + len += sprintf(buf + len,
2328 + " stop_queue_hif_client = %10d\n"
2329 + , priv->stop_queue_hif_client[i]);
2330 + len += sprintf(buf + len,
2331 + " stop_queue_credit = %10d\n"
2332 + , priv->stop_queue_credit[i]);
2333 +skip:
2334 + __netif_tx_unlock_bh(tx_queue);
2335 + }
2336 + return len;
2337 +}
2338 +
2339 +/* pfe_eth_set_tx_stats
2340 + *
2341 + */
2342 +static ssize_t pfe_eth_set_tx_stats(struct device *dev,
2343 + struct device_attribute *attr,
2344 + const char *buf, size_t count)
2345 +{
2346 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2347 + int i;
2348 +
2349 + for (i = 0; i < emac_txq_cnt; i++) {
2350 + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
2351 + i);
2352 +
2353 + __netif_tx_lock_bh(tx_queue);
2354 + priv->clean_fail[i] = 0;
2355 + priv->stop_queue_total[i] = 0;
2356 + priv->stop_queue_hif[i] = 0;
2357 + priv->stop_queue_hif_client[i] = 0;
2358 + priv->stop_queue_credit[i] = 0;
2359 + __netif_tx_unlock_bh(tx_queue);
2360 + }
2361 +
2362 + return count;
2363 +}
2364 +#endif
2365 +/* pfe_eth_show_txavail
2366 + *
2367 + */
2368 +static ssize_t pfe_eth_show_txavail(struct device *dev,
2369 + struct device_attribute *attr,
2370 + char *buf)
2371 +{
2372 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2373 + ssize_t len = 0;
2374 + int i;
2375 +
2376 + for (i = 0; i < emac_txq_cnt; i++) {
2377 + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
2378 + i);
2379 +
2380 + __netif_tx_lock_bh(tx_queue);
2381 +
2382 + len += sprintf(buf + len, "%d",
2383 + hif_lib_tx_avail(&priv->client, i));
2384 +
2385 + __netif_tx_unlock_bh(tx_queue);
2386 +
2387 + if (i == (emac_txq_cnt - 1))
2388 + len += sprintf(buf + len, "\n");
2389 + else
2390 + len += sprintf(buf + len, " ");
2391 + }
2392 +
2393 + return len;
2394 +}
2395 +
2396 +/* pfe_eth_show_default_priority
2397 + *
2398 + */
2399 +static ssize_t pfe_eth_show_default_priority(struct device *dev,
2400 + struct device_attribute *attr,
2401 + char *buf)
2402 +{
2403 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2404 + unsigned long flags;
2405 + int rc;
2406 +
2407 + spin_lock_irqsave(&priv->lock, flags);
2408 + rc = sprintf(buf, "%d\n", priv->default_priority);
2409 + spin_unlock_irqrestore(&priv->lock, flags);
2410 +
2411 + return rc;
2412 +}
2413 +
2414 +/* pfe_eth_set_default_priority
2415 + *
2416 + */
2417 +
2418 +static ssize_t pfe_eth_set_default_priority(struct device *dev,
2419 + struct device_attribute *attr,
2420 + const char *buf, size_t count)
2421 +{
2422 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
2423 + unsigned long flags;
2424 +
2425 + spin_lock_irqsave(&priv->lock, flags);
2426 + priv->default_priority = kstrtoul(buf, 0, 0);
2427 + spin_unlock_irqrestore(&priv->lock, flags);
2428 +
2429 + return count;
2430 +}
2431 +
2432 +static DEVICE_ATTR(txavail, 0444, pfe_eth_show_txavail, NULL);
2433 +static DEVICE_ATTR(default_priority, 0644, pfe_eth_show_default_priority,
2434 + pfe_eth_set_default_priority);
2435 +
2436 +#ifdef PFE_ETH_NAPI_STATS
2437 +static DEVICE_ATTR(napi_stats, 0644, pfe_eth_show_napi_stats,
2438 + pfe_eth_set_napi_stats);
2439 +#endif
2440 +
2441 +#ifdef PFE_ETH_TX_STATS
2442 +static DEVICE_ATTR(tx_stats, 0644, pfe_eth_show_tx_stats,
2443 + pfe_eth_set_tx_stats);
2444 +#endif
2445 +
2446 +/*
2447 + * pfe_eth_sysfs_init
2448 + *
2449 + */
2450 +static int pfe_eth_sysfs_init(struct net_device *ndev)
2451 +{
2452 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2453 + int err;
2454 +
2455 + /* Initialize the default values */
2456 +
2457 + /*
2458 + * By default, packets without conntrack will use this default low
2459 + * priority queue
2460 + */
2461 + priv->default_priority = 0;
2462 +
2463 + /* Create our sysfs files */
2464 + err = device_create_file(&ndev->dev, &dev_attr_default_priority);
2465 + if (err) {
2466 + netdev_err(ndev,
2467 + "failed to create default_priority sysfs files\n");
2468 + goto err_priority;
2469 + }
2470 +
2471 + err = device_create_file(&ndev->dev, &dev_attr_txavail);
2472 + if (err) {
2473 + netdev_err(ndev,
2474 + "failed to create default_priority sysfs files\n");
2475 + goto err_txavail;
2476 + }
2477 +
2478 +#ifdef PFE_ETH_NAPI_STATS
2479 + err = device_create_file(&ndev->dev, &dev_attr_napi_stats);
2480 + if (err) {
2481 + netdev_err(ndev, "failed to create napi stats sysfs files\n");
2482 + goto err_napi;
2483 + }
2484 +#endif
2485 +
2486 +#ifdef PFE_ETH_TX_STATS
2487 + err = device_create_file(&ndev->dev, &dev_attr_tx_stats);
2488 + if (err) {
2489 + netdev_err(ndev, "failed to create tx stats sysfs files\n");
2490 + goto err_tx;
2491 + }
2492 +#endif
2493 +
2494 + return 0;
2495 +
2496 +#ifdef PFE_ETH_TX_STATS
2497 +err_tx:
2498 +#endif
2499 +#ifdef PFE_ETH_NAPI_STATS
2500 + device_remove_file(&ndev->dev, &dev_attr_napi_stats);
2501 +
2502 +err_napi:
2503 +#endif
2504 + device_remove_file(&ndev->dev, &dev_attr_txavail);
2505 +
2506 +err_txavail:
2507 + device_remove_file(&ndev->dev, &dev_attr_default_priority);
2508 +
2509 +err_priority:
2510 + return -1;
2511 +}
2512 +
2513 +/* pfe_eth_sysfs_exit
2514 + *
2515 + */
2516 +void pfe_eth_sysfs_exit(struct net_device *ndev)
2517 +{
2518 +#ifdef PFE_ETH_TX_STATS
2519 + device_remove_file(&ndev->dev, &dev_attr_tx_stats);
2520 +#endif
2521 +
2522 +#ifdef PFE_ETH_NAPI_STATS
2523 + device_remove_file(&ndev->dev, &dev_attr_napi_stats);
2524 +#endif
2525 + device_remove_file(&ndev->dev, &dev_attr_txavail);
2526 + device_remove_file(&ndev->dev, &dev_attr_default_priority);
2527 +}
2528 +
2529 +/*************************************************************************/
2530 +/* ETHTOOL INTERCAE */
2531 +/*************************************************************************/
2532 +
2533 +/*MTIP GEMAC */
2534 +static const struct fec_stat {
2535 + char name[ETH_GSTRING_LEN];
2536 + u16 offset;
2537 +} fec_stats[] = {
2538 + /* RMON TX */
2539 + { "tx_dropped", RMON_T_DROP },
2540 + { "tx_packets", RMON_T_PACKETS },
2541 + { "tx_broadcast", RMON_T_BC_PKT },
2542 + { "tx_multicast", RMON_T_MC_PKT },
2543 + { "tx_crc_errors", RMON_T_CRC_ALIGN },
2544 + { "tx_undersize", RMON_T_UNDERSIZE },
2545 + { "tx_oversize", RMON_T_OVERSIZE },
2546 + { "tx_fragment", RMON_T_FRAG },
2547 + { "tx_jabber", RMON_T_JAB },
2548 + { "tx_collision", RMON_T_COL },
2549 + { "tx_64byte", RMON_T_P64 },
2550 + { "tx_65to127byte", RMON_T_P65TO127 },
2551 + { "tx_128to255byte", RMON_T_P128TO255 },
2552 + { "tx_256to511byte", RMON_T_P256TO511 },
2553 + { "tx_512to1023byte", RMON_T_P512TO1023 },
2554 + { "tx_1024to2047byte", RMON_T_P1024TO2047 },
2555 + { "tx_GTE2048byte", RMON_T_P_GTE2048 },
2556 + { "tx_octets", RMON_T_OCTETS },
2557 +
2558 + /* IEEE TX */
2559 + { "IEEE_tx_drop", IEEE_T_DROP },
2560 + { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
2561 + { "IEEE_tx_1col", IEEE_T_1COL },
2562 + { "IEEE_tx_mcol", IEEE_T_MCOL },
2563 + { "IEEE_tx_def", IEEE_T_DEF },
2564 + { "IEEE_tx_lcol", IEEE_T_LCOL },
2565 + { "IEEE_tx_excol", IEEE_T_EXCOL },
2566 + { "IEEE_tx_macerr", IEEE_T_MACERR },
2567 + { "IEEE_tx_cserr", IEEE_T_CSERR },
2568 + { "IEEE_tx_sqe", IEEE_T_SQE },
2569 + { "IEEE_tx_fdxfc", IEEE_T_FDXFC },
2570 + { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
2571 +
2572 + /* RMON RX */
2573 + { "rx_packets", RMON_R_PACKETS },
2574 + { "rx_broadcast", RMON_R_BC_PKT },
2575 + { "rx_multicast", RMON_R_MC_PKT },
2576 + { "rx_crc_errors", RMON_R_CRC_ALIGN },
2577 + { "rx_undersize", RMON_R_UNDERSIZE },
2578 + { "rx_oversize", RMON_R_OVERSIZE },
2579 + { "rx_fragment", RMON_R_FRAG },
2580 + { "rx_jabber", RMON_R_JAB },
2581 + { "rx_64byte", RMON_R_P64 },
2582 + { "rx_65to127byte", RMON_R_P65TO127 },
2583 + { "rx_128to255byte", RMON_R_P128TO255 },
2584 + { "rx_256to511byte", RMON_R_P256TO511 },
2585 + { "rx_512to1023byte", RMON_R_P512TO1023 },
2586 + { "rx_1024to2047byte", RMON_R_P1024TO2047 },
2587 + { "rx_GTE2048byte", RMON_R_P_GTE2048 },
2588 + { "rx_octets", RMON_R_OCTETS },
2589 +
2590 + /* IEEE RX */
2591 + { "IEEE_rx_drop", IEEE_R_DROP },
2592 + { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
2593 + { "IEEE_rx_crc", IEEE_R_CRC },
2594 + { "IEEE_rx_align", IEEE_R_ALIGN },
2595 + { "IEEE_rx_macerr", IEEE_R_MACERR },
2596 + { "IEEE_rx_fdxfc", IEEE_R_FDXFC },
2597 + { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
2598 +};
2599 +
2600 +static void pfe_eth_fill_stats(struct net_device *ndev, struct ethtool_stats
2601 + *stats, u64 *data)
2602 +{
2603 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2604 + int i;
2605 +
2606 + for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2607 + data[i] = readl(priv->EMAC_baseaddr + fec_stats[i].offset);
2608 +}
2609 +
2610 +static void pfe_eth_gstrings(struct net_device *netdev,
2611 + u32 stringset, u8 *data)
2612 +{
2613 + int i;
2614 +
2615 + switch (stringset) {
2616 + case ETH_SS_STATS:
2617 + for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2618 + memcpy(data + i * ETH_GSTRING_LEN,
2619 + fec_stats[i].name, ETH_GSTRING_LEN);
2620 + break;
2621 + }
2622 +}
2623 +
2624 +static int pfe_eth_stats_count(struct net_device *ndev, int sset)
2625 +{
2626 + switch (sset) {
2627 + case ETH_SS_STATS:
2628 + return ARRAY_SIZE(fec_stats);
2629 + default:
2630 + return -EOPNOTSUPP;
2631 + }
2632 +}
2633 +
2634 +/*
2635 + * pfe_eth_gemac_reglen - Return the length of the register structure.
2636 + *
2637 + */
2638 +static int pfe_eth_gemac_reglen(struct net_device *ndev)
2639 +{
2640 + pr_info("%s()\n", __func__);
2641 + return (sizeof(gemac_regs) / sizeof(u32));
2642 +}
2643 +
2644 +/*
2645 + * pfe_eth_gemac_get_regs - Return the gemac register structure.
2646 + *
2647 + */
2648 +static void pfe_eth_gemac_get_regs(struct net_device *ndev, struct ethtool_regs
2649 + *regs, void *regbuf)
2650 +{
2651 + int i;
2652 +
2653 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2654 + u32 *buf = (u32 *)regbuf;
2655 +
2656 + pr_info("%s()\n", __func__);
2657 + for (i = 0; i < sizeof(gemac_regs) / sizeof(u32); i++)
2658 + buf[i] = readl(priv->EMAC_baseaddr + gemac_regs[i]);
2659 +}
2660 +
2661 +/*
2662 + * pfe_eth_set_wol - Set the magic packet option, in WoL register.
2663 + *
2664 + */
2665 +static int pfe_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2666 +{
2667 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2668 +
2669 + if (wol->wolopts & ~WAKE_MAGIC)
2670 + return -EOPNOTSUPP;
2671 +
2672 + /* for MTIP we store wol->wolopts */
2673 + priv->wol = wol->wolopts;
2674 +
2675 + device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
2676 +
2677 + return 0;
2678 +}
2679 +
2680 +/*
2681 + *
2682 + * pfe_eth_get_wol - Get the WoL options.
2683 + *
2684 + */
2685 +static void pfe_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo
2686 + *wol)
2687 +{
2688 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2689 +
2690 + wol->supported = WAKE_MAGIC;
2691 + wol->wolopts = 0;
2692 +
2693 + if (priv->wol & WAKE_MAGIC)
2694 + wol->wolopts = WAKE_MAGIC;
2695 +
2696 + memset(&wol->sopass, 0, sizeof(wol->sopass));
2697 +}
2698 +
2699 +/*
2700 + * pfe_eth_get_drvinfo - Fills in the drvinfo structure with some basic info
2701 + *
2702 + */
2703 +static void pfe_eth_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo
2704 + *drvinfo)
2705 +{
2706 + strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
2707 + strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
2708 + strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
2709 + strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
2710 +}
2711 +
2712 +/*
2713 + * pfe_eth_set_settings - Used to send commands to PHY.
2714 + *
2715 + */
2716 +static int pfe_eth_set_settings(struct net_device *ndev,
2717 + const struct ethtool_link_ksettings *cmd)
2718 +{
2719 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2720 + struct phy_device *phydev = priv->phydev;
2721 +
2722 + if (!phydev)
2723 + return -ENODEV;
2724 +
2725 + return phy_ethtool_ksettings_set(phydev, cmd);
2726 +}
2727 +
2728 +/*
2729 + * pfe_eth_getsettings - Return the current settings in the ethtool_cmd
2730 + * structure.
2731 + *
2732 + */
2733 +static int pfe_eth_get_settings(struct net_device *ndev,
2734 + struct ethtool_link_ksettings *cmd)
2735 +{
2736 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2737 + struct phy_device *phydev = priv->phydev;
2738 +
2739 + if (!phydev)
2740 + return -ENODEV;
2741 +
2742 + phy_ethtool_ksettings_get(phydev, cmd);
2743 +
2744 + return 0;
2745 +}
2746 +
2747 +/*
2748 + * pfe_eth_get_msglevel - Gets the debug message mask.
2749 + *
2750 + */
2751 +static uint32_t pfe_eth_get_msglevel(struct net_device *ndev)
2752 +{
2753 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2754 +
2755 + return priv->msg_enable;
2756 +}
2757 +
2758 +/*
2759 + * pfe_eth_set_msglevel - Sets the debug message mask.
2760 + *
2761 + */
2762 +static void pfe_eth_set_msglevel(struct net_device *ndev, uint32_t data)
2763 +{
2764 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2765 +
2766 + priv->msg_enable = data;
2767 +}
2768 +
2769 +#define HIF_RX_COAL_MAX_CLKS (~(1 << 31))
2770 +#define HIF_RX_COAL_CLKS_PER_USEC (pfe->ctrl.sys_clk / 1000)
2771 +#define HIF_RX_COAL_MAX_USECS (HIF_RX_COAL_MAX_CLKS / \
2772 + HIF_RX_COAL_CLKS_PER_USEC)
2773 +
2774 +/*
2775 + * pfe_eth_set_coalesce - Sets rx interrupt coalescing timer.
2776 + *
2777 + */
2778 +static int pfe_eth_set_coalesce(struct net_device *ndev,
2779 + struct ethtool_coalesce *ec)
2780 +{
2781 + if (ec->rx_coalesce_usecs > HIF_RX_COAL_MAX_USECS)
2782 + return -EINVAL;
2783 +
2784 + if (!ec->rx_coalesce_usecs) {
2785 + writel(0, HIF_INT_COAL);
2786 + return 0;
2787 + }
2788 +
2789 + writel((ec->rx_coalesce_usecs * HIF_RX_COAL_CLKS_PER_USEC) |
2790 + HIF_INT_COAL_ENABLE, HIF_INT_COAL);
2791 +
2792 + return 0;
2793 +}
2794 +
2795 +/*
2796 + * pfe_eth_get_coalesce - Gets rx interrupt coalescing timer value.
2797 + *
2798 + */
2799 +static int pfe_eth_get_coalesce(struct net_device *ndev,
2800 + struct ethtool_coalesce *ec)
2801 +{
2802 + int reg_val = readl(HIF_INT_COAL);
2803 +
2804 + if (reg_val & HIF_INT_COAL_ENABLE)
2805 + ec->rx_coalesce_usecs = (reg_val & HIF_RX_COAL_MAX_CLKS) /
2806 + HIF_RX_COAL_CLKS_PER_USEC;
2807 + else
2808 + ec->rx_coalesce_usecs = 0;
2809 +
2810 + return 0;
2811 +}
2812 +
2813 +/*
2814 + * pfe_eth_set_pauseparam - Sets pause parameters
2815 + *
2816 + */
2817 +static int pfe_eth_set_pauseparam(struct net_device *ndev,
2818 + struct ethtool_pauseparam *epause)
2819 +{
2820 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2821 +
2822 + if (epause->tx_pause != epause->rx_pause) {
2823 + netdev_info(ndev,
2824 + "hardware only support enable/disable both tx and rx\n");
2825 + return -EINVAL;
2826 + }
2827 +
2828 + priv->pause_flag = 0;
2829 + priv->pause_flag |= epause->rx_pause ? PFE_PAUSE_FLAG_ENABLE : 0;
2830 + priv->pause_flag |= epause->autoneg ? PFE_PAUSE_FLAG_AUTONEG : 0;
2831 +
2832 + if (epause->rx_pause || epause->autoneg) {
2833 + gemac_enable_pause_rx(priv->EMAC_baseaddr);
2834 + writel((readl(priv->GPI_baseaddr + GPI_TX_PAUSE_TIME) |
2835 + EGPI_PAUSE_ENABLE),
2836 + priv->GPI_baseaddr + GPI_TX_PAUSE_TIME);
2837 + if (priv->phydev) {
2838 + priv->phydev->supported |= ADVERTISED_Pause |
2839 + ADVERTISED_Asym_Pause;
2840 + priv->phydev->advertising |= ADVERTISED_Pause |
2841 + ADVERTISED_Asym_Pause;
2842 + }
2843 + } else {
2844 + gemac_disable_pause_rx(priv->EMAC_baseaddr);
2845 + writel((readl(priv->GPI_baseaddr + GPI_TX_PAUSE_TIME) &
2846 + ~EGPI_PAUSE_ENABLE),
2847 + priv->GPI_baseaddr + GPI_TX_PAUSE_TIME);
2848 + if (priv->phydev) {
2849 + priv->phydev->supported &= ~(ADVERTISED_Pause |
2850 + ADVERTISED_Asym_Pause);
2851 + priv->phydev->advertising &= ~(ADVERTISED_Pause |
2852 + ADVERTISED_Asym_Pause);
2853 + }
2854 + }
2855 +
2856 + return 0;
2857 +}
2858 +
2859 +/*
2860 + * pfe_eth_get_pauseparam - Gets pause parameters
2861 + *
2862 + */
2863 +static void pfe_eth_get_pauseparam(struct net_device *ndev,
2864 + struct ethtool_pauseparam *epause)
2865 +{
2866 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
2867 +
2868 + epause->autoneg = (priv->pause_flag & PFE_PAUSE_FLAG_AUTONEG) != 0;
2869 + epause->tx_pause = (priv->pause_flag & PFE_PAUSE_FLAG_ENABLE) != 0;
2870 + epause->rx_pause = epause->tx_pause;
2871 +}
2872 +
2873 +/*
2874 + * pfe_eth_get_hash
2875 + */
2876 +#define PFE_HASH_BITS 6 /* #bits in hash */
2877 +#define CRC32_POLY 0xEDB88320
2878 +
2879 +static int pfe_eth_get_hash(u8 *addr)
2880 +{
2881 + unsigned int i, bit, data, crc, hash;
2882 +
2883 + /* calculate crc32 value of mac address */
2884 + crc = 0xffffffff;
2885 +
2886 + for (i = 0; i < 6; i++) {
2887 + data = addr[i];
2888 + for (bit = 0; bit < 8; bit++, data >>= 1) {
2889 + crc = (crc >> 1) ^
2890 + (((crc ^ data) & 1) ? CRC32_POLY : 0);
2891 + }
2892 + }
2893 +
2894 + /*
2895 + * only upper 6 bits (PFE_HASH_BITS) are used
2896 + * which point to specific bit in the hash registers
2897 + */
2898 + hash = (crc >> (32 - PFE_HASH_BITS)) & 0x3f;
2899 +
2900 + return hash;
2901 +}
2902 +
2903 +const struct ethtool_ops pfe_ethtool_ops = {
2904 + .get_drvinfo = pfe_eth_get_drvinfo,
2905 + .get_regs_len = pfe_eth_gemac_reglen,
2906 + .get_regs = pfe_eth_gemac_get_regs,
2907 + .get_link = ethtool_op_get_link,
2908 + .get_wol = pfe_eth_get_wol,
2909 + .set_wol = pfe_eth_set_wol,
2910 + .set_pauseparam = pfe_eth_set_pauseparam,
2911 + .get_pauseparam = pfe_eth_get_pauseparam,
2912 + .get_strings = pfe_eth_gstrings,
2913 + .get_sset_count = pfe_eth_stats_count,
2914 + .get_ethtool_stats = pfe_eth_fill_stats,
2915 + .get_msglevel = pfe_eth_get_msglevel,
2916 + .set_msglevel = pfe_eth_set_msglevel,
2917 + .set_coalesce = pfe_eth_set_coalesce,
2918 + .get_coalesce = pfe_eth_get_coalesce,
2919 + .get_link_ksettings = pfe_eth_get_settings,
2920 + .set_link_ksettings = pfe_eth_set_settings,
2921 +};
2922 +
2923 +/* pfe_eth_mdio_reset
2924 + */
2925 +int pfe_eth_mdio_reset(struct mii_bus *bus)
2926 +{
2927 + struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
2928 + u32 phy_speed;
2929 +
2930 + netif_info(priv, hw, priv->ndev, "%s\n", __func__);
2931 +
2932 + mutex_lock(&bus->mdio_lock);
2933 +
2934 + /*
2935 + * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
2936 + *
2937 + * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
2938 + * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'.
2939 + */
2940 + phy_speed = (DIV_ROUND_UP((pfe->ctrl.sys_clk * 1000), 4000000)
2941 + << EMAC_MII_SPEED_SHIFT);
2942 + phy_speed |= EMAC_HOLDTIME(0x5);
2943 + __raw_writel(phy_speed, priv->PHY_baseaddr + EMAC_MII_CTRL_REG);
2944 +
2945 + mutex_unlock(&bus->mdio_lock);
2946 +
2947 + return 0;
2948 +}
2949 +
2950 +/* pfe_eth_gemac_phy_timeout
2951 + *
2952 + */
2953 +static int pfe_eth_gemac_phy_timeout(struct pfe_eth_priv_s *priv, int timeout)
2954 +{
2955 + while (!(__raw_readl(priv->PHY_baseaddr + EMAC_IEVENT_REG) &
2956 + EMAC_IEVENT_MII)) {
2957 + if (timeout-- <= 0)
2958 + return -1;
2959 + usleep_range(10, 20);
2960 + }
2961 + __raw_writel(EMAC_IEVENT_MII, priv->PHY_baseaddr + EMAC_IEVENT_REG);
2962 + return 0;
2963 +}
2964 +
2965 +static int pfe_eth_mdio_mux(u8 muxval)
2966 +{
2967 + struct i2c_adapter *a;
2968 + struct i2c_msg msg;
2969 + unsigned char buf[2];
2970 + int ret;
2971 +
2972 + a = i2c_get_adapter(0);
2973 + if (!a)
2974 + return -ENODEV;
2975 +
2976 + /* set bit 1 (the second bit) of chip at 0x09, register 0x13 */
2977 + buf[0] = 0x54; /* reg number */
2978 + buf[1] = (muxval << 6) | 0x3; /* data */
2979 + msg.addr = 0x66;
2980 + msg.buf = buf;
2981 + msg.len = 2;
2982 + msg.flags = 0;
2983 + ret = i2c_transfer(a, &msg, 1);
2984 + i2c_put_adapter(a);
2985 + if (ret != 1)
2986 + return -ENODEV;
2987 + return 0;
2988 +}
2989 +
2990 +static int pfe_eth_mdio_write_addr(struct mii_bus *bus, int mii_id,
2991 + int dev_addr, int regnum)
2992 +{
2993 + struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
2994 +
2995 + __raw_writel(EMAC_MII_DATA_PA(mii_id) |
2996 + EMAC_MII_DATA_RA(dev_addr) |
2997 + EMAC_MII_DATA_TA | EMAC_MII_DATA(regnum),
2998 + priv->PHY_baseaddr + EMAC_MII_DATA_REG);
2999 +
3000 + if (pfe_eth_gemac_phy_timeout(priv, EMAC_MDIO_TIMEOUT)) {
3001 + netdev_err(priv->ndev, "%s: phy MDIO address write timeout\n",
3002 + __func__);
3003 + return -1;
3004 + }
3005 +
3006 + return 0;
3007 +}
3008 +
3009 +static int pfe_eth_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
3010 + u16 value)
3011 +{
3012 + struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
3013 +
3014 + /*To access external PHYs on QDS board mux needs to be configured*/
3015 + if ((mii_id) && (pfe->mdio_muxval[mii_id]))
3016 + pfe_eth_mdio_mux(pfe->mdio_muxval[mii_id]);
3017 +
3018 + if (regnum & MII_ADDR_C45) {
3019 + pfe_eth_mdio_write_addr(bus, mii_id, (regnum >> 16) & 0x1f,
3020 + regnum & 0xffff);
3021 + __raw_writel(EMAC_MII_DATA_OP_CL45_WR |
3022 + EMAC_MII_DATA_PA(mii_id) |
3023 + EMAC_MII_DATA_RA((regnum >> 16) & 0x1f) |
3024 + EMAC_MII_DATA_TA | EMAC_MII_DATA(value),
3025 + priv->PHY_baseaddr + EMAC_MII_DATA_REG);
3026 + } else {
3027 + /* start a write op */
3028 + __raw_writel(EMAC_MII_DATA_ST | EMAC_MII_DATA_OP_WR |
3029 + EMAC_MII_DATA_PA(mii_id) |
3030 + EMAC_MII_DATA_RA(regnum) |
3031 + EMAC_MII_DATA_TA | EMAC_MII_DATA(value),
3032 + priv->PHY_baseaddr + EMAC_MII_DATA_REG);
3033 + }
3034 +
3035 + if (pfe_eth_gemac_phy_timeout(priv, EMAC_MDIO_TIMEOUT)) {
3036 + netdev_err(priv->ndev, "%s: phy MDIO write timeout\n",
3037 + __func__);
3038 + return -1;
3039 + }
3040 + netif_info(priv, hw, priv->ndev, "%s: phy %x reg %x val %x\n", __func__,
3041 + mii_id, regnum, value);
3042 +
3043 + return 0;
3044 +}
3045 +
3046 +static int pfe_eth_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
3047 +{
3048 + struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
3049 + u16 value = 0;
3050 +
3051 + /*To access external PHYs on QDS board mux needs to be configured*/
3052 + if ((mii_id) && (pfe->mdio_muxval[mii_id]))
3053 + pfe_eth_mdio_mux(pfe->mdio_muxval[mii_id]);
3054 +
3055 + if (regnum & MII_ADDR_C45) {
3056 + pfe_eth_mdio_write_addr(bus, mii_id, (regnum >> 16) & 0x1f,
3057 + regnum & 0xffff);
3058 + __raw_writel(EMAC_MII_DATA_OP_CL45_RD |
3059 + EMAC_MII_DATA_PA(mii_id) |
3060 + EMAC_MII_DATA_RA((regnum >> 16) & 0x1f) |
3061 + EMAC_MII_DATA_TA,
3062 + priv->PHY_baseaddr + EMAC_MII_DATA_REG);
3063 + } else {
3064 + /* start a read op */
3065 + __raw_writel(EMAC_MII_DATA_ST | EMAC_MII_DATA_OP_RD |
3066 + EMAC_MII_DATA_PA(mii_id) |
3067 + EMAC_MII_DATA_RA(regnum) |
3068 + EMAC_MII_DATA_TA, priv->PHY_baseaddr +
3069 + EMAC_MII_DATA_REG);
3070 + }
3071 +
3072 + if (pfe_eth_gemac_phy_timeout(priv, EMAC_MDIO_TIMEOUT)) {
3073 + netdev_err(priv->ndev, "%s: phy MDIO read timeout\n", __func__);
3074 + return -1;
3075 + }
3076 +
3077 + value = EMAC_MII_DATA(__raw_readl(priv->PHY_baseaddr +
3078 + EMAC_MII_DATA_REG));
3079 + netif_info(priv, hw, priv->ndev, "%s: phy %x reg %x val %x\n", __func__,
3080 + mii_id, regnum, value);
3081 + return value;
3082 +}
3083 +
3084 +static int pfe_eth_mdio_init(struct pfe_eth_priv_s *priv,
3085 + struct ls1012a_mdio_platform_data *minfo)
3086 +{
3087 + struct mii_bus *bus;
3088 + int rc, ii;
3089 + struct phy_device *phydev;
3090 +
3091 + netif_info(priv, drv, priv->ndev, "%s\n", __func__);
3092 + pr_info("%s\n", __func__);
3093 +
3094 + bus = mdiobus_alloc();
3095 + if (!bus) {
3096 + netdev_err(priv->ndev, "mdiobus_alloc() failed\n");
3097 + rc = -ENOMEM;
3098 + goto err0;
3099 + }
3100 +
3101 + bus->name = "ls1012a MDIO Bus";
3102 + bus->read = &pfe_eth_mdio_read;
3103 + bus->write = &pfe_eth_mdio_write;
3104 + bus->reset = &pfe_eth_mdio_reset;
3105 + snprintf(bus->id, MII_BUS_ID_SIZE, "ls1012a-%x", priv->id);
3106 + bus->priv = priv;
3107 +
3108 + bus->phy_mask = minfo->phy_mask;
3109 + priv->mdc_div = minfo->mdc_div;
3110 +
3111 + if (!priv->mdc_div)
3112 + priv->mdc_div = 64;
3113 +
3114 + bus->irq[0] = minfo->irq[0];
3115 +
3116 + bus->parent = priv->pfe->dev;
3117 +
3118 + netif_info(priv, drv, priv->ndev, "%s: mdc_div: %d, phy_mask: %x\n",
3119 + __func__, priv->mdc_div, bus->phy_mask);
3120 + rc = mdiobus_register(bus);
3121 + if (rc) {
3122 + netdev_err(priv->ndev, "mdiobus_register(%s) failed\n",
3123 + bus->name);
3124 + goto err1;
3125 + }
3126 +
3127 + priv->mii_bus = bus;
3128 +
3129 + /* For clause 45 we need to call get_phy_device() with it's
3130 + * 3rd argument as true and then register the phy device
3131 + * via phy_device_register()
3132 + */
3133 +
3134 + if (priv->einfo->mii_config == PHY_INTERFACE_MODE_2500SGMII) {
3135 + for (ii = 0; ii < NUM_GEMAC_SUPPORT; ii++) {
3136 + phydev = get_phy_device(priv->mii_bus,
3137 + priv->einfo->phy_id + ii, true);
3138 + if (!phydev || IS_ERR(phydev)) {
3139 + rc = -EIO;
3140 + netdev_err(priv->ndev, "fail to get device\n");
3141 + goto err1;
3142 + }
3143 + rc = phy_device_register(phydev);
3144 + if (rc) {
3145 + phy_device_free(phydev);
3146 + netdev_err(priv->ndev,
3147 + "phy_device_register() failed\n");
3148 + goto err1;
3149 + }
3150 + }
3151 + }
3152 +
3153 + pfe_eth_mdio_reset(bus);
3154 +
3155 + return 0;
3156 +
3157 +err1:
3158 + mdiobus_free(bus);
3159 +err0:
3160 + return rc;
3161 +}
3162 +
3163 +/* pfe_eth_mdio_exit
3164 + */
3165 +static void pfe_eth_mdio_exit(struct mii_bus *bus)
3166 +{
3167 + if (!bus)
3168 + return;
3169 +
3170 + netif_info((struct pfe_eth_priv_s *)bus->priv, drv, ((struct
3171 + pfe_eth_priv_s *)(bus->priv))->ndev, "%s\n", __func__);
3172 +
3173 + mdiobus_unregister(bus);
3174 + mdiobus_free(bus);
3175 +}
3176 +
3177 +/* pfe_get_phydev_speed
3178 + */
3179 +static int pfe_get_phydev_speed(struct phy_device *phydev)
3180 +{
3181 + switch (phydev->speed) {
3182 + case 10:
3183 + return SPEED_10M;
3184 + case 100:
3185 + return SPEED_100M;
3186 + case 1000:
3187 + default:
3188 + return SPEED_1000M;
3189 + }
3190 +}
3191 +
3192 +/* pfe_set_rgmii_speed
3193 + */
3194 +#define RGMIIPCR 0x434
3195 +/* RGMIIPCR bit definitions*/
3196 +#define SCFG_RGMIIPCR_EN_AUTO (0x00000008)
3197 +#define SCFG_RGMIIPCR_SETSP_1000M (0x00000004)
3198 +#define SCFG_RGMIIPCR_SETSP_100M (0x00000000)
3199 +#define SCFG_RGMIIPCR_SETSP_10M (0x00000002)
3200 +#define SCFG_RGMIIPCR_SETFD (0x00000001)
3201 +
3202 +static void pfe_set_rgmii_speed(struct phy_device *phydev)
3203 +{
3204 + u32 rgmii_pcr;
3205 +
3206 + regmap_read(pfe->scfg, RGMIIPCR, &rgmii_pcr);
3207 + rgmii_pcr &= ~(SCFG_RGMIIPCR_SETSP_1000M | SCFG_RGMIIPCR_SETSP_10M);
3208 +
3209 + switch (phydev->speed) {
3210 + case 10:
3211 + rgmii_pcr |= SCFG_RGMIIPCR_SETSP_10M;
3212 + break;
3213 + case 1000:
3214 + rgmii_pcr |= SCFG_RGMIIPCR_SETSP_1000M;
3215 + break;
3216 + case 100:
3217 + default:
3218 + /* Default is 100M */
3219 + break;
3220 + }
3221 + regmap_write(pfe->scfg, RGMIIPCR, rgmii_pcr);
3222 +}
3223 +
3224 +/* pfe_get_phydev_duplex
3225 + */
3226 +static int pfe_get_phydev_duplex(struct phy_device *phydev)
3227 +{
3228 + /*return (phydev->duplex == DUPLEX_HALF) ? DUP_HALF:DUP_FULL ; */
3229 + return DUPLEX_FULL;
3230 +}
3231 +
3232 +/* pfe_eth_adjust_link
3233 + */
3234 +static void pfe_eth_adjust_link(struct net_device *ndev)
3235 +{
3236 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3237 + unsigned long flags;
3238 + struct phy_device *phydev = priv->phydev;
3239 + int new_state = 0;
3240 +
3241 + netif_info(priv, drv, ndev, "%s\n", __func__);
3242 +
3243 + spin_lock_irqsave(&priv->lock, flags);
3244 +
3245 + if (phydev->link) {
3246 + /*
3247 + * Now we make sure that we can be in full duplex mode.
3248 + * If not, we operate in half-duplex mode.
3249 + */
3250 + if (phydev->duplex != priv->oldduplex) {
3251 + new_state = 1;
3252 + gemac_set_duplex(priv->EMAC_baseaddr,
3253 + pfe_get_phydev_duplex(phydev));
3254 + priv->oldduplex = phydev->duplex;
3255 + }
3256 +
3257 + if (phydev->speed != priv->oldspeed) {
3258 + new_state = 1;
3259 + gemac_set_speed(priv->EMAC_baseaddr,
3260 + pfe_get_phydev_speed(phydev));
3261 + if (priv->einfo->mii_config == PHY_INTERFACE_MODE_RGMII_TXID)
3262 + pfe_set_rgmii_speed(phydev);
3263 + priv->oldspeed = phydev->speed;
3264 + }
3265 +
3266 + if (!priv->oldlink) {
3267 + new_state = 1;
3268 + priv->oldlink = 1;
3269 + }
3270 +
3271 + } else if (priv->oldlink) {
3272 + new_state = 1;
3273 + priv->oldlink = 0;
3274 + priv->oldspeed = 0;
3275 + priv->oldduplex = -1;
3276 + }
3277 +
3278 + if (new_state && netif_msg_link(priv))
3279 + phy_print_status(phydev);
3280 +
3281 + spin_unlock_irqrestore(&priv->lock, flags);
3282 +}
3283 +
3284 +/* pfe_phy_exit
3285 + */
3286 +static void pfe_phy_exit(struct net_device *ndev)
3287 +{
3288 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3289 +
3290 + netif_info(priv, drv, ndev, "%s\n", __func__);
3291 +
3292 + phy_disconnect(priv->phydev);
3293 + priv->phydev = NULL;
3294 +}
3295 +
3296 +/* pfe_eth_stop
3297 + */
3298 +static void pfe_eth_stop(struct net_device *ndev, int wake)
3299 +{
3300 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3301 +
3302 + netif_info(priv, drv, ndev, "%s\n", __func__);
3303 +
3304 + if (wake) {
3305 + gemac_tx_disable(priv->EMAC_baseaddr);
3306 + } else {
3307 + gemac_disable(priv->EMAC_baseaddr);
3308 + gpi_disable(priv->GPI_baseaddr);
3309 +
3310 + if (priv->phydev)
3311 + phy_stop(priv->phydev);
3312 + }
3313 +}
3314 +
3315 +/* pfe_eth_start
3316 + */
3317 +static int pfe_eth_start(struct pfe_eth_priv_s *priv)
3318 +{
3319 + netif_info(priv, drv, priv->ndev, "%s\n", __func__);
3320 +
3321 + if (priv->phydev)
3322 + phy_start(priv->phydev);
3323 +
3324 + gpi_enable(priv->GPI_baseaddr);
3325 + gemac_enable(priv->EMAC_baseaddr);
3326 +
3327 + return 0;
3328 +}
3329 +
3330 +/*
3331 + * Configure on chip serdes through mdio
3332 + */
3333 +static void ls1012a_configure_serdes(struct net_device *ndev)
3334 +{
3335 + struct pfe_eth_priv_s *priv = pfe->eth.eth_priv[0];
3336 + int sgmii_2500 = 0;
3337 + struct mii_bus *bus = priv->mii_bus;
3338 + u16 value = 0;
3339 +
3340 + if (priv->einfo->mii_config == PHY_INTERFACE_MODE_2500SGMII)
3341 + sgmii_2500 = 1;
3342 +
3343 + netif_info(priv, drv, ndev, "%s\n", __func__);
3344 + /* PCS configuration done with corresponding GEMAC */
3345 +
3346 + pfe_eth_mdio_read(bus, 0, 0);
3347 + pfe_eth_mdio_read(bus, 0, 1);
3348 +
3349 + /*These settings taken from validtion team */
3350 + pfe_eth_mdio_write(bus, 0, 0x0, 0x8000);
3351 + if (sgmii_2500) {
3352 + pfe_eth_mdio_write(bus, 0, 0x14, 0x9);
3353 + pfe_eth_mdio_write(bus, 0, 0x4, 0x4001);
3354 + pfe_eth_mdio_write(bus, 0, 0x12, 0xa120);
3355 + pfe_eth_mdio_write(bus, 0, 0x13, 0x7);
3356 + /* Autonegotiation need to be disabled for 2.5G SGMII mode*/
3357 + value = 0x0140;
3358 + pfe_eth_mdio_write(bus, 0, 0x0, value);
3359 + } else {
3360 + pfe_eth_mdio_write(bus, 0, 0x14, 0xb);
3361 + pfe_eth_mdio_write(bus, 0, 0x4, 0x1a1);
3362 + pfe_eth_mdio_write(bus, 0, 0x12, 0x400);
3363 + pfe_eth_mdio_write(bus, 0, 0x13, 0x0);
3364 + pfe_eth_mdio_write(bus, 0, 0x0, 0x1140);
3365 + }
3366 +}
3367 +
3368 +/*
3369 + * pfe_phy_init
3370 + *
3371 + */
3372 +static int pfe_phy_init(struct net_device *ndev)
3373 +{
3374 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3375 + struct phy_device *phydev;
3376 + char phy_id[MII_BUS_ID_SIZE + 3];
3377 + char bus_id[MII_BUS_ID_SIZE];
3378 + phy_interface_t interface;
3379 +
3380 + priv->oldlink = 0;
3381 + priv->oldspeed = 0;
3382 + priv->oldduplex = -1;
3383 +
3384 + snprintf(bus_id, MII_BUS_ID_SIZE, "ls1012a-%d", 0);
3385 + snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
3386 + priv->einfo->phy_id);
3387 +
3388 + netif_info(priv, drv, ndev, "%s: %s\n", __func__, phy_id);
3389 + interface = priv->einfo->mii_config;
3390 + if ((interface == PHY_INTERFACE_MODE_SGMII) ||
3391 + (interface == PHY_INTERFACE_MODE_2500SGMII)) {
3392 + /*Configure SGMII PCS */
3393 + if (pfe->scfg) {
3394 + /*Config MDIO from serdes */
3395 + regmap_write(pfe->scfg, 0x484, 0x00000000);
3396 + }
3397 + ls1012a_configure_serdes(ndev);
3398 + }
3399 +
3400 + if (pfe->scfg) {
3401 + /*Config MDIO from PAD */
3402 + regmap_write(pfe->scfg, 0x484, 0x80000000);
3403 + }
3404 +
3405 + priv->oldlink = 0;
3406 + priv->oldspeed = 0;
3407 + priv->oldduplex = -1;
3408 + pr_info("%s interface %x\n", __func__, interface);
3409 + phydev = phy_connect(ndev, phy_id, &pfe_eth_adjust_link, interface);
3410 +
3411 + if (IS_ERR(phydev)) {
3412 + netdev_err(ndev, "phy_connect() failed\n");
3413 + return PTR_ERR(phydev);
3414 + }
3415 +
3416 + priv->phydev = phydev;
3417 + phydev->irq = PHY_POLL;
3418 +
3419 + return 0;
3420 +}
3421 +
3422 +/* pfe_gemac_init
3423 + */
3424 +static int pfe_gemac_init(struct pfe_eth_priv_s *priv)
3425 +{
3426 + struct gemac_cfg cfg;
3427 +
3428 + netif_info(priv, ifup, priv->ndev, "%s\n", __func__);
3429 +
3430 + cfg.speed = SPEED_1000M;
3431 + cfg.duplex = DUPLEX_FULL;
3432 +
3433 + gemac_set_config(priv->EMAC_baseaddr, &cfg);
3434 + gemac_allow_broadcast(priv->EMAC_baseaddr);
3435 + gemac_enable_1536_rx(priv->EMAC_baseaddr);
3436 + gemac_enable_rx_jmb(priv->EMAC_baseaddr);
3437 + gemac_enable_stacked_vlan(priv->EMAC_baseaddr);
3438 + gemac_enable_pause_rx(priv->EMAC_baseaddr);
3439 + gemac_set_bus_width(priv->EMAC_baseaddr, 64);
3440 +
3441 + /*GEM will perform checksum verifications*/
3442 + if (priv->ndev->features & NETIF_F_RXCSUM)
3443 + gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
3444 + else
3445 + gemac_disable_rx_checksum_offload(priv->EMAC_baseaddr);
3446 +
3447 + return 0;
3448 +}
3449 +
3450 +/* pfe_eth_event_handler
3451 + */
3452 +static int pfe_eth_event_handler(void *data, int event, int qno)
3453 +{
3454 + struct pfe_eth_priv_s *priv = data;
3455 +
3456 + switch (event) {
3457 + case EVENT_RX_PKT_IND:
3458 +
3459 + if (qno == 0) {
3460 + if (napi_schedule_prep(&priv->high_napi)) {
3461 + netif_info(priv, intr, priv->ndev,
3462 + "%s: schedule high prio poll\n"
3463 + , __func__);
3464 +
3465 +#ifdef PFE_ETH_NAPI_STATS
3466 + priv->napi_counters[NAPI_SCHED_COUNT]++;
3467 +#endif
3468 +
3469 + __napi_schedule(&priv->high_napi);
3470 + }
3471 + } else if (qno == 1) {
3472 + if (napi_schedule_prep(&priv->low_napi)) {
3473 + netif_info(priv, intr, priv->ndev,
3474 + "%s: schedule low prio poll\n"
3475 + , __func__);
3476 +
3477 +#ifdef PFE_ETH_NAPI_STATS
3478 + priv->napi_counters[NAPI_SCHED_COUNT]++;
3479 +#endif
3480 + __napi_schedule(&priv->low_napi);
3481 + }
3482 + } else if (qno == 2) {
3483 + if (napi_schedule_prep(&priv->lro_napi)) {
3484 + netif_info(priv, intr, priv->ndev,
3485 + "%s: schedule lro prio poll\n"
3486 + , __func__);
3487 +
3488 +#ifdef PFE_ETH_NAPI_STATS
3489 + priv->napi_counters[NAPI_SCHED_COUNT]++;
3490 +#endif
3491 + __napi_schedule(&priv->lro_napi);
3492 + }
3493 + }
3494 +
3495 + break;
3496 +
3497 + case EVENT_TXDONE_IND:
3498 + pfe_eth_flush_tx(priv);
3499 + hif_lib_event_handler_start(&priv->client, EVENT_TXDONE_IND, 0);
3500 + break;
3501 + case EVENT_HIGH_RX_WM:
3502 + default:
3503 + break;
3504 + }
3505 +
3506 + return 0;
3507 +}
3508 +
3509 +/* pfe_eth_open
3510 + */
3511 +static int pfe_eth_open(struct net_device *ndev)
3512 +{
3513 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3514 + struct hif_client_s *client;
3515 + int rc;
3516 +
3517 + netif_info(priv, ifup, ndev, "%s\n", __func__);
3518 +
3519 + /* Register client driver with HIF */
3520 + client = &priv->client;
3521 + memset(client, 0, sizeof(*client));
3522 + client->id = PFE_CL_GEM0 + priv->id;
3523 + client->tx_qn = emac_txq_cnt;
3524 + client->rx_qn = EMAC_RXQ_CNT;
3525 + client->priv = priv;
3526 + client->pfe = priv->pfe;
3527 + client->event_handler = pfe_eth_event_handler;
3528 +
3529 + client->tx_qsize = EMAC_TXQ_DEPTH;
3530 + client->rx_qsize = EMAC_RXQ_DEPTH;
3531 +
3532 + rc = hif_lib_client_register(client);
3533 + if (rc) {
3534 + netdev_err(ndev, "%s: hif_lib_client_register(%d) failed\n",
3535 + __func__, client->id);
3536 + goto err0;
3537 + }
3538 +
3539 + netif_info(priv, drv, ndev, "%s: registered client: %p\n", __func__,
3540 + client);
3541 +
3542 + pfe_gemac_init(priv);
3543 +
3544 + if (!is_valid_ether_addr(ndev->dev_addr)) {
3545 + netdev_err(ndev, "%s: invalid MAC address\n", __func__);
3546 + rc = -EADDRNOTAVAIL;
3547 + goto err1;
3548 + }
3549 +
3550 + gemac_set_laddrN(priv->EMAC_baseaddr,
3551 + (struct pfe_mac_addr *)ndev->dev_addr, 1);
3552 +
3553 + napi_enable(&priv->high_napi);
3554 + napi_enable(&priv->low_napi);
3555 + napi_enable(&priv->lro_napi);
3556 +
3557 + rc = pfe_eth_start(priv);
3558 +
3559 + netif_tx_wake_all_queues(ndev);
3560 +
3561 + return rc;
3562 +
3563 +err1:
3564 + hif_lib_client_unregister(&priv->client);
3565 +
3566 +err0:
3567 + return rc;
3568 +}
3569 +
3570 +/*
3571 + * pfe_eth_shutdown
3572 + */
3573 +int pfe_eth_shutdown(struct net_device *ndev, int wake)
3574 +{
3575 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3576 + int i, qstatus;
3577 + unsigned long next_poll = jiffies + 1, end = jiffies +
3578 + (TX_POLL_TIMEOUT_MS * HZ) / 1000;
3579 + int tx_pkts, prv_tx_pkts;
3580 +
3581 + netif_info(priv, ifdown, ndev, "%s\n", __func__);
3582 +
3583 + for (i = 0; i < emac_txq_cnt; i++)
3584 + hrtimer_cancel(&priv->fast_tx_timeout[i].timer);
3585 +
3586 + netif_tx_stop_all_queues(ndev);
3587 +
3588 + do {
3589 + tx_pkts = 0;
3590 + pfe_eth_flush_tx(priv);
3591 +
3592 + for (i = 0; i < emac_txq_cnt; i++)
3593 + tx_pkts += hif_lib_tx_pending(&priv->client, i);
3594 +
3595 + if (tx_pkts) {
3596 + /*Don't wait forever, break if we cross max timeout */
3597 + if (time_after(jiffies, end)) {
3598 + pr_err(
3599 + "(%s)Tx is not complete after %dmsec\n",
3600 + ndev->name, TX_POLL_TIMEOUT_MS);
3601 + break;
3602 + }
3603 +
3604 + pr_info("%s : (%s) Waiting for tx packets to free. Pending tx pkts = %d.\n"
3605 + , __func__, ndev->name, tx_pkts);
3606 + if (need_resched())
3607 + schedule();
3608 + }
3609 +
3610 + } while (tx_pkts);
3611 +
3612 + end = jiffies + (TX_POLL_TIMEOUT_MS * HZ) / 1000;
3613 +
3614 + prv_tx_pkts = tmu_pkts_processed(priv->id);
3615 + /*
3616 + * Wait till TMU transmits all pending packets
3617 + * poll tmu_qstatus and pkts processed by TMU for every 10ms
3618 + * Consider TMU is busy, If we see TMU qeueu pending or any packets
3619 + * processed by TMU
3620 + */
3621 + while (1) {
3622 + if (time_after(jiffies, next_poll)) {
3623 + tx_pkts = tmu_pkts_processed(priv->id);
3624 + qstatus = tmu_qstatus(priv->id) & 0x7ffff;
3625 +
3626 + if (!qstatus && (tx_pkts == prv_tx_pkts))
3627 + break;
3628 + /* Don't wait forever, break if we cross max
3629 + * timeout(TX_POLL_TIMEOUT_MS)
3630 + */
3631 + if (time_after(jiffies, end)) {
3632 + pr_err("TMU%d is busy after %dmsec\n",
3633 + priv->id, TX_POLL_TIMEOUT_MS);
3634 + break;
3635 + }
3636 + prv_tx_pkts = tx_pkts;
3637 + next_poll++;
3638 + }
3639 + if (need_resched())
3640 + schedule();
3641 + }
3642 + /* Wait for some more time to complete transmitting packet if any */
3643 + next_poll = jiffies + 1;
3644 + while (1) {
3645 + if (time_after(jiffies, next_poll))
3646 + break;
3647 + if (need_resched())
3648 + schedule();
3649 + }
3650 +
3651 + pfe_eth_stop(ndev, wake);
3652 +
3653 + napi_disable(&priv->lro_napi);
3654 + napi_disable(&priv->low_napi);
3655 + napi_disable(&priv->high_napi);
3656 +
3657 + hif_lib_client_unregister(&priv->client);
3658 +
3659 + return 0;
3660 +}
3661 +
3662 +/* pfe_eth_close
3663 + *
3664 + */
3665 +static int pfe_eth_close(struct net_device *ndev)
3666 +{
3667 + pfe_eth_shutdown(ndev, 0);
3668 +
3669 + return 0;
3670 +}
3671 +
3672 +/* pfe_eth_suspend
3673 + *
3674 + * return value : 1 if netdevice is configured to wakeup system
3675 + * 0 otherwise
3676 + */
3677 +int pfe_eth_suspend(struct net_device *ndev)
3678 +{
3679 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3680 + int retval = 0;
3681 +
3682 + if (priv->wol) {
3683 + gemac_set_wol(priv->EMAC_baseaddr, priv->wol);
3684 + retval = 1;
3685 + }
3686 + pfe_eth_shutdown(ndev, priv->wol);
3687 +
3688 + return retval;
3689 +}
3690 +
3691 +/* pfe_eth_resume
3692 + *
3693 + */
3694 +int pfe_eth_resume(struct net_device *ndev)
3695 +{
3696 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3697 +
3698 + if (priv->wol)
3699 + gemac_set_wol(priv->EMAC_baseaddr, 0);
3700 + gemac_tx_enable(priv->EMAC_baseaddr);
3701 +
3702 + return pfe_eth_open(ndev);
3703 +}
3704 +
3705 +/* pfe_eth_get_queuenum
3706 + */
3707 +static int pfe_eth_get_queuenum(struct pfe_eth_priv_s *priv, struct sk_buff
3708 + *skb)
3709 +{
3710 + int queuenum = 0;
3711 + unsigned long flags;
3712 +
3713 + /* Get the Fast Path queue number */
3714 + /*
3715 + * Use conntrack mark (if conntrack exists), then packet mark (if any),
3716 + * then fallback to default
3717 + */
3718 +#if defined(CONFIG_IP_NF_CONNTRACK_MARK) || defined(CONFIG_NF_CONNTRACK_MARK)
3719 + if (skb->nfct) {
3720 + enum ip_conntrack_info cinfo;
3721 + struct nf_conn *ct;
3722 +
3723 + ct = nf_ct_get(skb, &cinfo);
3724 +
3725 + if (ct) {
3726 + u32 connmark;
3727 +
3728 + connmark = ct->mark;
3729 +
3730 + if ((connmark & 0x80000000) && priv->id != 0)
3731 + connmark >>= 16;
3732 +
3733 + queuenum = connmark & EMAC_QUEUENUM_MASK;
3734 + }
3735 + } else {/* continued after #endif ... */
3736 +#endif
3737 + if (skb->mark) {
3738 + queuenum = skb->mark & EMAC_QUEUENUM_MASK;
3739 + } else {
3740 + spin_lock_irqsave(&priv->lock, flags);
3741 + queuenum = priv->default_priority & EMAC_QUEUENUM_MASK;
3742 + spin_unlock_irqrestore(&priv->lock, flags);
3743 + }
3744 +#if defined(CONFIG_IP_NF_CONNTRACK_MARK) || defined(CONFIG_NF_CONNTRACK_MARK)
3745 + }
3746 +#endif
3747 + return queuenum;
3748 +}
3749 +
3750 +/* pfe_eth_might_stop_tx
3751 + *
3752 + */
3753 +static int pfe_eth_might_stop_tx(struct pfe_eth_priv_s *priv, int queuenum,
3754 + struct netdev_queue *tx_queue,
3755 + unsigned int n_desc,
3756 + unsigned int n_segs)
3757 +{
3758 + ktime_t kt;
3759 + int tried = 0;
3760 +
3761 +try_again:
3762 + if (unlikely((__hif_tx_avail(&pfe->hif) < n_desc) ||
3763 + (hif_lib_tx_avail(&priv->client, queuenum) < n_desc) ||
3764 + (hif_lib_tx_credit_avail(pfe, priv->id, queuenum) < n_segs))) {
3765 + if (!tried) {
3766 + __hif_lib_update_credit(&priv->client, queuenum);
3767 + tried = 1;
3768 + goto try_again;
3769 + }
3770 +#ifdef PFE_ETH_TX_STATS
3771 + if (__hif_tx_avail(&pfe->hif) < n_desc) {
3772 + priv->stop_queue_hif[queuenum]++;
3773 + } else if (hif_lib_tx_avail(&priv->client, queuenum) < n_desc) {
3774 + priv->stop_queue_hif_client[queuenum]++;
3775 + } else if (hif_lib_tx_credit_avail(pfe, priv->id, queuenum) <
3776 + n_segs) {
3777 + priv->stop_queue_credit[queuenum]++;
3778 + }
3779 + priv->stop_queue_total[queuenum]++;
3780 +#endif
3781 + netif_tx_stop_queue(tx_queue);
3782 +
3783 + kt = ktime_set(0, LS1012A_TX_FAST_RECOVERY_TIMEOUT_MS *
3784 + NSEC_PER_MSEC);
3785 + hrtimer_start(&priv->fast_tx_timeout[queuenum].timer, kt,
3786 + HRTIMER_MODE_REL);
3787 + return -1;
3788 + } else {
3789 + return 0;
3790 + }
3791 +}
3792 +
3793 +#define SA_MAX_OP 2
3794 +/* pfe_hif_send_packet
3795 + *
3796 + * At this level if TX fails we drop the packet
3797 + */
3798 +static void pfe_hif_send_packet(struct sk_buff *skb, struct pfe_eth_priv_s
3799 + *priv, int queuenum)
3800 +{
3801 + struct skb_shared_info *sh = skb_shinfo(skb);
3802 + unsigned int nr_frags;
3803 + u32 ctrl = 0;
3804 +
3805 + netif_info(priv, tx_queued, priv->ndev, "%s\n", __func__);
3806 +
3807 + if (skb_is_gso(skb)) {
3808 + priv->stats.tx_dropped++;
3809 + return;
3810 + }
3811 +
3812 + if (skb->ip_summed == CHECKSUM_PARTIAL)
3813 + ctrl = HIF_CTRL_TX_CHECKSUM;
3814 +
3815 + nr_frags = sh->nr_frags;
3816 +
3817 + if (nr_frags) {
3818 + skb_frag_t *f;
3819 + int i;
3820 +
3821 + __hif_lib_xmit_pkt(&priv->client, queuenum, skb->data,
3822 + skb_headlen(skb), ctrl, HIF_FIRST_BUFFER,
3823 + skb);
3824 +
3825 + for (i = 0; i < nr_frags - 1; i++) {
3826 + f = &sh->frags[i];
3827 + __hif_lib_xmit_pkt(&priv->client, queuenum,
3828 + skb_frag_address(f),
3829 + skb_frag_size(f),
3830 + 0x0, 0x0, skb);
3831 + }
3832 +
3833 + f = &sh->frags[i];
3834 +
3835 + __hif_lib_xmit_pkt(&priv->client, queuenum,
3836 + skb_frag_address(f), skb_frag_size(f),
3837 + 0x0, HIF_LAST_BUFFER | HIF_DATA_VALID,
3838 + skb);
3839 +
3840 + netif_info(priv, tx_queued, priv->ndev,
3841 + "%s: pkt sent successfully skb:%p nr_frags:%d len:%d\n",
3842 + __func__, skb, nr_frags, skb->len);
3843 + } else {
3844 + __hif_lib_xmit_pkt(&priv->client, queuenum, skb->data,
3845 + skb->len, ctrl, HIF_FIRST_BUFFER |
3846 + HIF_LAST_BUFFER | HIF_DATA_VALID,
3847 + skb);
3848 + netif_info(priv, tx_queued, priv->ndev,
3849 + "%s: pkt sent successfully skb:%p len:%d\n",
3850 + __func__, skb, skb->len);
3851 + }
3852 + hif_tx_dma_start();
3853 + priv->stats.tx_packets++;
3854 + priv->stats.tx_bytes += skb->len;
3855 + hif_lib_tx_credit_use(pfe, priv->id, queuenum, 1);
3856 +}
3857 +
3858 +/* pfe_eth_flush_txQ
3859 + */
3860 +static void pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int
3861 + from_tx, int n_desc)
3862 +{
3863 + struct sk_buff *skb;
3864 + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
3865 + tx_q_num);
3866 + unsigned int flags;
3867 +
3868 + netif_info(priv, tx_done, priv->ndev, "%s\n", __func__);
3869 +
3870 + if (!from_tx)
3871 + __netif_tx_lock_bh(tx_queue);
3872 +
3873 + /* Clean HIF and client queue */
3874 + while ((skb = hif_lib_tx_get_next_complete(&priv->client,
3875 + tx_q_num, &flags,
3876 + HIF_TX_DESC_NT))) {
3877 + if (flags & HIF_DATA_VALID)
3878 + dev_kfree_skb_any(skb);
3879 + }
3880 + if (!from_tx)
3881 + __netif_tx_unlock_bh(tx_queue);
3882 +}
3883 +
3884 +/* pfe_eth_flush_tx
3885 + */
3886 +static void pfe_eth_flush_tx(struct pfe_eth_priv_s *priv)
3887 +{
3888 + int ii;
3889 +
3890 + netif_info(priv, tx_done, priv->ndev, "%s\n", __func__);
3891 +
3892 + for (ii = 0; ii < emac_txq_cnt; ii++) {
3893 + pfe_eth_flush_txQ(priv, ii, 0, 0);
3894 + __hif_lib_update_credit(&priv->client, ii);
3895 + }
3896 +}
3897 +
3898 +void pfe_tx_get_req_desc(struct sk_buff *skb, unsigned int *n_desc, unsigned int
3899 + *n_segs)
3900 +{
3901 + struct skb_shared_info *sh = skb_shinfo(skb);
3902 +
3903 + /* Scattered data */
3904 + if (sh->nr_frags) {
3905 + *n_desc = sh->nr_frags + 1;
3906 + *n_segs = 1;
3907 + /* Regular case */
3908 + } else {
3909 + *n_desc = 1;
3910 + *n_segs = 1;
3911 + }
3912 +}
3913 +
3914 +/* pfe_eth_send_packet
3915 + */
3916 +static int pfe_eth_send_packet(struct sk_buff *skb, struct net_device *ndev)
3917 +{
3918 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3919 + int tx_q_num = skb_get_queue_mapping(skb);
3920 + int n_desc, n_segs;
3921 + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
3922 + tx_q_num);
3923 +
3924 + netif_info(priv, tx_queued, ndev, "%s\n", __func__);
3925 +
3926 + if ((!skb_is_gso(skb)) && (skb_headroom(skb) < (PFE_PKT_HEADER_SZ +
3927 + sizeof(unsigned long)))) {
3928 + netif_warn(priv, tx_err, priv->ndev, "%s: copying skb\n",
3929 + __func__);
3930 +
3931 + if (pskb_expand_head(skb, (PFE_PKT_HEADER_SZ + sizeof(unsigned
3932 + long)), 0, GFP_ATOMIC)) {
3933 + /* No need to re-transmit, no way to recover*/
3934 + kfree_skb(skb);
3935 + priv->stats.tx_dropped++;
3936 + return NETDEV_TX_OK;
3937 + }
3938 + }
3939 +
3940 + pfe_tx_get_req_desc(skb, &n_desc, &n_segs);
3941 +
3942 + hif_tx_lock(&pfe->hif);
3943 + if (unlikely(pfe_eth_might_stop_tx(priv, tx_q_num, tx_queue, n_desc,
3944 + n_segs))) {
3945 +#ifdef PFE_ETH_TX_STATS
3946 + if (priv->was_stopped[tx_q_num]) {
3947 + priv->clean_fail[tx_q_num]++;
3948 + priv->was_stopped[tx_q_num] = 0;
3949 + }
3950 +#endif
3951 + hif_tx_unlock(&pfe->hif);
3952 + return NETDEV_TX_BUSY;
3953 + }
3954 +
3955 + pfe_hif_send_packet(skb, priv, tx_q_num);
3956 +
3957 + hif_tx_unlock(&pfe->hif);
3958 +
3959 + tx_queue->trans_start = jiffies;
3960 +
3961 +#ifdef PFE_ETH_TX_STATS
3962 + priv->was_stopped[tx_q_num] = 0;
3963 +#endif
3964 +
3965 + return NETDEV_TX_OK;
3966 +}
3967 +
3968 +/* pfe_eth_select_queue
3969 + *
3970 + */
3971 +static u16 pfe_eth_select_queue(struct net_device *ndev, struct sk_buff *skb,
3972 + void *accel_priv,
3973 + select_queue_fallback_t fallback)
3974 +{
3975 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3976 +
3977 + return pfe_eth_get_queuenum(priv, skb);
3978 +}
3979 +
3980 +/* pfe_eth_get_stats
3981 + */
3982 +static struct net_device_stats *pfe_eth_get_stats(struct net_device *ndev)
3983 +{
3984 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3985 +
3986 + netif_info(priv, drv, ndev, "%s\n", __func__);
3987 +
3988 + return &priv->stats;
3989 +}
3990 +
3991 +/* pfe_eth_set_mac_address
3992 + */
3993 +static int pfe_eth_set_mac_address(struct net_device *ndev, void *addr)
3994 +{
3995 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
3996 + struct sockaddr *sa = addr;
3997 +
3998 + netif_info(priv, drv, ndev, "%s\n", __func__);
3999 +
4000 + if (!is_valid_ether_addr(sa->sa_data))
4001 + return -EADDRNOTAVAIL;
4002 +
4003 + memcpy(ndev->dev_addr, sa->sa_data, ETH_ALEN);
4004 +
4005 + gemac_set_laddrN(priv->EMAC_baseaddr,
4006 + (struct pfe_mac_addr *)ndev->dev_addr, 1);
4007 +
4008 + return 0;
4009 +}
4010 +
4011 +/* pfe_eth_enet_addr_byte_mac
4012 + */
4013 +int pfe_eth_enet_addr_byte_mac(u8 *enet_byte_addr,
4014 + struct pfe_mac_addr *enet_addr)
4015 +{
4016 + if (!enet_byte_addr || !enet_addr) {
4017 + return -1;
4018 +
4019 + } else {
4020 + enet_addr->bottom = enet_byte_addr[0] |
4021 + (enet_byte_addr[1] << 8) |
4022 + (enet_byte_addr[2] << 16) |
4023 + (enet_byte_addr[3] << 24);
4024 + enet_addr->top = enet_byte_addr[4] |
4025 + (enet_byte_addr[5] << 8);
4026 + return 0;
4027 + }
4028 +}
4029 +
4030 +/* pfe_eth_set_multi
4031 + */
4032 +static void pfe_eth_set_multi(struct net_device *ndev)
4033 +{
4034 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
4035 + struct pfe_mac_addr hash_addr; /* hash register structure */
4036 + /* specific mac address register structure */
4037 + struct pfe_mac_addr spec_addr;
4038 + int result; /* index into hash register to set.. */
4039 + int uc_count = 0;
4040 + struct netdev_hw_addr *ha;
4041 +
4042 + if (ndev->flags & IFF_PROMISC) {
4043 + netif_info(priv, drv, ndev, "entering promiscuous mode\n");
4044 +
4045 + priv->promisc = 1;
4046 + gemac_enable_copy_all(priv->EMAC_baseaddr);
4047 + } else {
4048 + priv->promisc = 0;
4049 + gemac_disable_copy_all(priv->EMAC_baseaddr);
4050 + }
4051 +
4052 + /* Enable broadcast frame reception if required. */
4053 + if (ndev->flags & IFF_BROADCAST) {
4054 + gemac_allow_broadcast(priv->EMAC_baseaddr);
4055 + } else {
4056 + netif_info(priv, drv, ndev,
4057 + "disabling broadcast frame reception\n");
4058 +
4059 + gemac_no_broadcast(priv->EMAC_baseaddr);
4060 + }
4061 +
4062 + if (ndev->flags & IFF_ALLMULTI) {
4063 + /* Set the hash to rx all multicast frames */
4064 + hash_addr.bottom = 0xFFFFFFFF;
4065 + hash_addr.top = 0xFFFFFFFF;
4066 + gemac_set_hash(priv->EMAC_baseaddr, &hash_addr);
4067 + netdev_for_each_uc_addr(ha, ndev) {
4068 + if (uc_count >= MAX_UC_SPEC_ADDR_REG)
4069 + break;
4070 + pfe_eth_enet_addr_byte_mac(ha->addr, &spec_addr);
4071 + gemac_set_laddrN(priv->EMAC_baseaddr, &spec_addr,
4072 + uc_count + 2);
4073 + uc_count++;
4074 + }
4075 + } else if ((netdev_mc_count(ndev) > 0) || (netdev_uc_count(ndev))) {
4076 + u8 *addr;
4077 +
4078 + hash_addr.bottom = 0;
4079 + hash_addr.top = 0;
4080 +
4081 + netdev_for_each_mc_addr(ha, ndev) {
4082 + addr = ha->addr;
4083 +
4084 + netif_info(priv, drv, ndev,
4085 + "adding multicast address %X:%X:%X:%X:%X:%X to gem filter\n",
4086 + addr[0], addr[1], addr[2],
4087 + addr[3], addr[4], addr[5]);
4088 +
4089 + result = pfe_eth_get_hash(addr);
4090 +
4091 + if (result < EMAC_HASH_REG_BITS) {
4092 + if (result < 32)
4093 + hash_addr.bottom |= (1 << result);
4094 + else
4095 + hash_addr.top |= (1 << (result - 32));
4096 + } else {
4097 + break;
4098 + }
4099 + }
4100 +
4101 + uc_count = -1;
4102 + netdev_for_each_uc_addr(ha, ndev) {
4103 + addr = ha->addr;
4104 +
4105 + if (++uc_count < MAX_UC_SPEC_ADDR_REG) {
4106 + netdev_info(ndev,
4107 + "adding unicast address %02x:%02x:%02x:%02x:%02x:%02x to gem filter\n",
4108 + addr[0], addr[1], addr[2],
4109 + addr[3], addr[4], addr[5]);
4110 + pfe_eth_enet_addr_byte_mac(addr, &spec_addr);
4111 + gemac_set_laddrN(priv->EMAC_baseaddr,
4112 + &spec_addr, uc_count + 2);
4113 + } else {
4114 + netif_info(priv, drv, ndev,
4115 + "adding unicast address %02x:%02x:%02x:%02x:%02x:%02x to gem hash\n",
4116 + addr[0], addr[1], addr[2],
4117 + addr[3], addr[4], addr[5]);
4118 +
4119 + result = pfe_eth_get_hash(addr);
4120 + if (result >= EMAC_HASH_REG_BITS) {
4121 + break;
4122 +
4123 + } else {
4124 + if (result < 32)
4125 + hash_addr.bottom |= (1 <<
4126 + result);
4127 + else
4128 + hash_addr.top |= (1 <<
4129 + (result - 32));
4130 + }
4131 + }
4132 + }
4133 +
4134 + gemac_set_hash(priv->EMAC_baseaddr, &hash_addr);
4135 + }
4136 +
4137 + if (!(netdev_uc_count(ndev) >= MAX_UC_SPEC_ADDR_REG)) {
4138 + /*
4139 + * Check if there are any specific address HW registers that
4140 + * need to be flushed
4141 + */
4142 + for (uc_count = netdev_uc_count(ndev); uc_count <
4143 + MAX_UC_SPEC_ADDR_REG; uc_count++)
4144 + gemac_clear_laddrN(priv->EMAC_baseaddr, uc_count + 2);
4145 + }
4146 +
4147 + if (ndev->flags & IFF_LOOPBACK)
4148 + gemac_set_loop(priv->EMAC_baseaddr, LB_LOCAL);
4149 +}
4150 +
4151 +/* pfe_eth_set_features
4152 + */
4153 +static int pfe_eth_set_features(struct net_device *ndev, netdev_features_t
4154 + features)
4155 +{
4156 + struct pfe_eth_priv_s *priv = netdev_priv(ndev);
4157 + int rc = 0;
4158 +
4159 + if (features & NETIF_F_RXCSUM)
4160 + gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
4161 + else
4162 + gemac_disable_rx_checksum_offload(priv->EMAC_baseaddr);
4163 + return rc;
4164 +}
4165 +
4166 +/* pfe_eth_fast_tx_timeout
4167 + */
4168 +static enum hrtimer_restart pfe_eth_fast_tx_timeout(struct hrtimer *timer)
4169 +{
4170 + struct pfe_eth_fast_timer *fast_tx_timeout = container_of(timer, struct
4171 + pfe_eth_fast_timer,
4172 + timer);
4173 + struct pfe_eth_priv_s *priv = container_of(fast_tx_timeout->base,
4174 + struct pfe_eth_priv_s,
4175 + fast_tx_timeout);
4176 + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
4177 + fast_tx_timeout->queuenum);
4178 +
4179 + if (netif_tx_queue_stopped(tx_queue)) {
4180 +#ifdef PFE_ETH_TX_STATS
4181 + priv->was_stopped[fast_tx_timeout->queuenum] = 1;
4182 +#endif
4183 + netif_tx_wake_queue(tx_queue);
4184 + }
4185 +
4186 + return HRTIMER_NORESTART;
4187 +}
4188 +
4189 +/* pfe_eth_fast_tx_timeout_init
4190 + */
4191 +static void pfe_eth_fast_tx_timeout_init(struct pfe_eth_priv_s *priv)
4192 +{
4193 + int i;
4194 +
4195 + for (i = 0; i < emac_txq_cnt; i++) {
4196 + priv->fast_tx_timeout[i].queuenum = i;
4197 + hrtimer_init(&priv->fast_tx_timeout[i].timer, CLOCK_MONOTONIC,
4198 + HRTIMER_MODE_REL);
4199 + priv->fast_tx_timeout[i].timer.function =
4200 + pfe_eth_fast_tx_timeout;
4201 + priv->fast_tx_timeout[i].base = priv->fast_tx_timeout;
4202 + }
4203 +}
4204 +
4205 +static struct sk_buff *pfe_eth_rx_skb(struct net_device *ndev,
4206 + struct pfe_eth_priv_s *priv,
4207 + unsigned int qno)
4208 +{
4209 + void *buf_addr;
4210 + unsigned int rx_ctrl;
4211 + unsigned int desc_ctrl = 0;
4212 + struct hif_ipsec_hdr *ipsec_hdr = NULL;
4213 + struct sk_buff *skb;
4214 + struct sk_buff *skb_frag, *skb_frag_last = NULL;
4215 + int length = 0, offset;
4216 +
4217 + skb = priv->skb_inflight[qno];
4218 +
4219 + if (skb) {
4220 + skb_frag_last = skb_shinfo(skb)->frag_list;
4221 + if (skb_frag_last) {
4222 + while (skb_frag_last->next)
4223 + skb_frag_last = skb_frag_last->next;
4224 + }
4225 + }
4226 +
4227 + while (!(desc_ctrl & CL_DESC_LAST)) {
4228 + buf_addr = hif_lib_receive_pkt(&priv->client, qno, &length,
4229 + &offset, &rx_ctrl, &desc_ctrl,
4230 + (void **)&ipsec_hdr);
4231 + if (!buf_addr)
4232 + goto incomplete;
4233 +
4234 +#ifdef PFE_ETH_NAPI_STATS
4235 + priv->napi_counters[NAPI_DESC_COUNT]++;
4236 +#endif
4237 +
4238 + /* First frag */
4239 + if (desc_ctrl & CL_DESC_FIRST) {
4240 + skb = build_skb(buf_addr, 0);
4241 + if (unlikely(!skb))
4242 + goto pkt_drop;
4243 +
4244 + skb_reserve(skb, offset);
4245 + skb_put(skb, length);
4246 + skb->dev = ndev;
4247 +
4248 + if ((ndev->features & NETIF_F_RXCSUM) && (rx_ctrl &
4249 + HIF_CTRL_RX_CHECKSUMMED))
4250 + skb->ip_summed = CHECKSUM_UNNECESSARY;
4251 + else
4252 + skb_checksum_none_assert(skb);
4253 +
4254 + } else {
4255 + /* Next frags */
4256 + if (unlikely(!skb)) {
4257 + pr_err("%s: NULL skb_inflight\n",
4258 + __func__);
4259 + goto pkt_drop;
4260 + }
4261 +
4262 + skb_frag = build_skb(buf_addr, 0);
4263 +
4264 + if (unlikely(!skb_frag)) {
4265 + kfree(buf_addr);
4266 + goto pkt_drop;
4267 + }
4268 +
4269 + skb_reserve(skb_frag, offset);
4270 + skb_put(skb_frag, length);
4271 +
4272 + skb_frag->dev = ndev;
4273 +
4274 + if (skb_shinfo(skb)->frag_list)
4275 + skb_frag_last->next = skb_frag;
4276 + else
4277 + skb_shinfo(skb)->frag_list = skb_frag;
4278 +
4279 + skb->truesize += skb_frag->truesize;
4280 + skb->data_len += length;
4281 + skb->len += length;
4282 + skb_frag_last = skb_frag;
4283 + }
4284 + }
4285 +
4286 + priv->skb_inflight[qno] = NULL;
4287 + return skb;
4288 +
4289 +incomplete:
4290 + priv->skb_inflight[qno] = skb;
4291 + return NULL;
4292 +
4293 +pkt_drop:
4294 + priv->skb_inflight[qno] = NULL;
4295 +
4296 + if (skb)
4297 + kfree_skb(skb);
4298 + else
4299 + kfree(buf_addr);
4300 +
4301 + priv->stats.rx_errors++;
4302 +
4303 + return NULL;
4304 +}
4305 +
4306 +/* pfe_eth_poll
4307 + */
4308 +static int pfe_eth_poll(struct pfe_eth_priv_s *priv, struct napi_struct *napi,
4309 + unsigned int qno, int budget)
4310 +{
4311 + struct net_device *ndev = priv->ndev;
4312 + struct sk_buff *skb;
4313 + int work_done = 0;
4314 + unsigned int len;
4315 +
4316 + netif_info(priv, intr, priv->ndev, "%s\n", __func__);
4317 +
4318 +#ifdef PFE_ETH_NAPI_STATS
4319 + priv->napi_counters[NAPI_POLL_COUNT]++;
4320 +#endif
4321 +
4322 + do {
4323 + skb = pfe_eth_rx_skb(ndev, priv, qno);
4324 +
4325 + if (!skb)
4326 + break;
4327 +
4328 + len = skb->len;
4329 +
4330 + /* Packet will be processed */
4331 + skb->protocol = eth_type_trans(skb, ndev);
4332 +
4333 + netif_receive_skb(skb);
4334 +
4335 + priv->stats.rx_packets++;
4336 + priv->stats.rx_bytes += len;
4337 +
4338 + work_done++;
4339 +
4340 +#ifdef PFE_ETH_NAPI_STATS
4341 + priv->napi_counters[NAPI_PACKET_COUNT]++;
4342 +#endif
4343 +
4344 + } while (work_done < budget);
4345 +
4346 + /*
4347 + * If no Rx receive nor cleanup work was done, exit polling mode.
4348 + * No more netif_running(dev) check is required here , as this is
4349 + * checked in net/core/dev.c (2.6.33.5 kernel specific).
4350 + */
4351 + if (work_done < budget) {
4352 + napi_complete(napi);
4353 +
4354 + hif_lib_event_handler_start(&priv->client, EVENT_RX_PKT_IND,
4355 + qno);
4356 + }
4357 +#ifdef PFE_ETH_NAPI_STATS
4358 + else
4359 + priv->napi_counters[NAPI_FULL_BUDGET_COUNT]++;
4360 +#endif
4361 +
4362 + return work_done;
4363 +}
4364 +
4365 +/*
4366 + * pfe_eth_lro_poll
4367 + */
4368 +static int pfe_eth_lro_poll(struct napi_struct *napi, int budget)
4369 +{
4370 + struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
4371 + lro_napi);
4372 +
4373 + netif_info(priv, intr, priv->ndev, "%s\n", __func__);
4374 +
4375 + return pfe_eth_poll(priv, napi, 2, budget);
4376 +}
4377 +
4378 +/* pfe_eth_low_poll
4379 + */
4380 +static int pfe_eth_low_poll(struct napi_struct *napi, int budget)
4381 +{
4382 + struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
4383 + low_napi);
4384 +
4385 + netif_info(priv, intr, priv->ndev, "%s\n", __func__);
4386 +
4387 + return pfe_eth_poll(priv, napi, 1, budget);
4388 +}
4389 +
4390 +/* pfe_eth_high_poll
4391 + */
4392 +static int pfe_eth_high_poll(struct napi_struct *napi, int budget)
4393 +{
4394 + struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
4395 + high_napi);
4396 +
4397 + netif_info(priv, intr, priv->ndev, "%s\n", __func__);
4398 +
4399 + return pfe_eth_poll(priv, napi, 0, budget);
4400 +}
4401 +
4402 +static const struct net_device_ops pfe_netdev_ops = {
4403 + .ndo_open = pfe_eth_open,
4404 + .ndo_stop = pfe_eth_close,
4405 + .ndo_start_xmit = pfe_eth_send_packet,
4406 + .ndo_select_queue = pfe_eth_select_queue,
4407 + .ndo_get_stats = pfe_eth_get_stats,
4408 + .ndo_set_mac_address = pfe_eth_set_mac_address,
4409 + .ndo_set_rx_mode = pfe_eth_set_multi,
4410 + .ndo_set_features = pfe_eth_set_features,
4411 + .ndo_validate_addr = eth_validate_addr,
4412 +};
4413 +
4414 +/* pfe_eth_init_one
4415 + */
4416 +static int pfe_eth_init_one(struct pfe *pfe, int id)
4417 +{
4418 + struct net_device *ndev = NULL;
4419 + struct pfe_eth_priv_s *priv = NULL;
4420 + struct ls1012a_eth_platform_data *einfo;
4421 + struct ls1012a_mdio_platform_data *minfo;
4422 + struct ls1012a_pfe_platform_data *pfe_info;
4423 + int err;
4424 +
4425 + /* Extract pltform data */
4426 + pfe_info = (struct ls1012a_pfe_platform_data *)
4427 + pfe->dev->platform_data;
4428 + if (!pfe_info) {
4429 + pr_err(
4430 + "%s: pfe missing additional platform data\n"
4431 + , __func__);
4432 + err = -ENODEV;
4433 + goto err0;
4434 + }
4435 +
4436 + einfo = (struct ls1012a_eth_platform_data *)
4437 + pfe_info->ls1012a_eth_pdata;
4438 +
4439 + /* einfo never be NULL, but no harm in having this check */
4440 + if (!einfo) {
4441 + pr_err(
4442 + "%s: pfe missing additional gemacs platform data\n"
4443 + , __func__);
4444 + err = -ENODEV;
4445 + goto err0;
4446 + }
4447 +
4448 + minfo = (struct ls1012a_mdio_platform_data *)
4449 + pfe_info->ls1012a_mdio_pdata;
4450 +
4451 + /* einfo never be NULL, but no harm in having this check */
4452 + if (!minfo) {
4453 + pr_err(
4454 + "%s: pfe missing additional mdios platform data\n",
4455 + __func__);
4456 + err = -ENODEV;
4457 + goto err0;
4458 + }
4459 +
4460 + if (us)
4461 + emac_txq_cnt = EMAC_TXQ_CNT;
4462 + /* Create an ethernet device instance */
4463 + ndev = alloc_etherdev_mq(sizeof(*priv), emac_txq_cnt);
4464 +
4465 + if (!ndev) {
4466 + pr_err("%s: gemac %d device allocation failed\n",
4467 + __func__, einfo[id].gem_id);
4468 + err = -ENOMEM;
4469 + goto err0;
4470 + }
4471 +
4472 + priv = netdev_priv(ndev);
4473 + priv->ndev = ndev;
4474 + priv->id = einfo[id].gem_id;
4475 + priv->pfe = pfe;
4476 +
4477 + SET_NETDEV_DEV(priv->ndev, priv->pfe->dev);
4478 +
4479 + pfe->eth.eth_priv[id] = priv;
4480 +
4481 + /* Set the info in the priv to the current info */
4482 + priv->einfo = &einfo[id];
4483 + priv->EMAC_baseaddr = cbus_emac_base[id];
4484 + priv->PHY_baseaddr = cbus_emac_base[0];
4485 + priv->GPI_baseaddr = cbus_gpi_base[id];
4486 +
4487 +#define HIF_GEMAC_TMUQ_BASE 6
4488 + priv->low_tmu_q = HIF_GEMAC_TMUQ_BASE + (id * 2);
4489 + priv->high_tmu_q = priv->low_tmu_q + 1;
4490 +
4491 + spin_lock_init(&priv->lock);
4492 +
4493 + pfe_eth_fast_tx_timeout_init(priv);
4494 +
4495 + /* Copy the station address into the dev structure, */
4496 + memcpy(ndev->dev_addr, einfo[id].mac_addr, ETH_ALEN);
4497 +
4498 + /* Initialize mdio */
4499 + if (minfo[id].enabled) {
4500 + err = pfe_eth_mdio_init(priv, &minfo[id]);
4501 + if (err) {
4502 + netdev_err(ndev, "%s: pfe_eth_mdio_init() failed\n",
4503 + __func__);
4504 + goto err2;
4505 + }
4506 + }
4507 +
4508 + if (us)
4509 + goto phy_init;
4510 +
4511 + ndev->mtu = 1500;
4512 +
4513 + /* Set MTU limits */
4514 + ndev->min_mtu = ETH_MIN_MTU;
4515 + ndev->max_mtu = JUMBO_FRAME_SIZE;
4516 +
4517 + /* supported features */
4518 + ndev->hw_features = NETIF_F_SG;
4519 +
4520 + /*Enable after checksum offload is validated */
4521 + ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
4522 + NETIF_F_IPV6_CSUM | NETIF_F_SG;
4523 +
4524 + /* enabled by default */
4525 + ndev->features = ndev->hw_features;
4526 +
4527 + priv->usr_features = ndev->features;
4528 +
4529 + ndev->netdev_ops = &pfe_netdev_ops;
4530 +
4531 + ndev->ethtool_ops = &pfe_ethtool_ops;
4532 +
4533 + /* Enable basic messages by default */
4534 + priv->msg_enable = NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_LINK |
4535 + NETIF_MSG_PROBE;
4536 +
4537 + netif_napi_add(ndev, &priv->low_napi, pfe_eth_low_poll,
4538 + HIF_RX_POLL_WEIGHT - 16);
4539 + netif_napi_add(ndev, &priv->high_napi, pfe_eth_high_poll,
4540 + HIF_RX_POLL_WEIGHT - 16);
4541 + netif_napi_add(ndev, &priv->lro_napi, pfe_eth_lro_poll,
4542 + HIF_RX_POLL_WEIGHT - 16);
4543 +
4544 + err = register_netdev(ndev);
4545 +
4546 + if (err) {
4547 + netdev_err(ndev, "register_netdev() failed\n");
4548 + goto err3;
4549 + }
4550 +
4551 +phy_init:
4552 + device_init_wakeup(&ndev->dev, WAKE_MAGIC);
4553 +
4554 + if (!(priv->einfo->phy_flags & GEMAC_NO_PHY)) {
4555 + err = pfe_phy_init(ndev);
4556 + if (err) {
4557 + netdev_err(ndev, "%s: pfe_phy_init() failed\n",
4558 + __func__);
4559 + goto err4;
4560 + }
4561 + }
4562 +
4563 + if (us) {
4564 + if (priv->phydev)
4565 + phy_start(priv->phydev);
4566 + return 0;
4567 + }
4568 +
4569 + netif_carrier_on(ndev);
4570 +
4571 + /* Create all the sysfs files */
4572 + if (pfe_eth_sysfs_init(ndev))
4573 + goto err4;
4574 +
4575 + netif_info(priv, probe, ndev, "%s: created interface, baseaddr: %p\n",
4576 + __func__, priv->EMAC_baseaddr);
4577 +
4578 + return 0;
4579 +err4:
4580 + if (us)
4581 + goto err3;
4582 + unregister_netdev(ndev);
4583 +err3:
4584 + pfe_eth_mdio_exit(priv->mii_bus);
4585 +err2:
4586 + free_netdev(priv->ndev);
4587 +err0:
4588 + return err;
4589 +}
4590 +
4591 +/* pfe_eth_init
4592 + */
4593 +int pfe_eth_init(struct pfe *pfe)
4594 +{
4595 + int ii = 0;
4596 + int err;
4597 +
4598 + pr_info("%s\n", __func__);
4599 +
4600 + cbus_emac_base[0] = EMAC1_BASE_ADDR;
4601 + cbus_emac_base[1] = EMAC2_BASE_ADDR;
4602 +
4603 + cbus_gpi_base[0] = EGPI1_BASE_ADDR;
4604 + cbus_gpi_base[1] = EGPI2_BASE_ADDR;
4605 +
4606 + for (ii = 0; ii < NUM_GEMAC_SUPPORT; ii++) {
4607 + err = pfe_eth_init_one(pfe, ii);
4608 + if (err)
4609 + goto err0;
4610 + }
4611 +
4612 + return 0;
4613 +
4614 +err0:
4615 + while (ii--)
4616 + pfe_eth_exit_one(pfe->eth.eth_priv[ii]);
4617 +
4618 + /* Register three network devices in the kernel */
4619 + return err;
4620 +}
4621 +
4622 +/* pfe_eth_exit_one
4623 + */
4624 +static void pfe_eth_exit_one(struct pfe_eth_priv_s *priv)
4625 +{
4626 + netif_info(priv, probe, priv->ndev, "%s\n", __func__);
4627 +
4628 + if (!us)
4629 + pfe_eth_sysfs_exit(priv->ndev);
4630 +
4631 + if (!(priv->einfo->phy_flags & GEMAC_NO_PHY))
4632 + pfe_phy_exit(priv->ndev);
4633 +
4634 + if (!us)
4635 + unregister_netdev(priv->ndev);
4636 +
4637 + if (priv->mii_bus)
4638 + pfe_eth_mdio_exit(priv->mii_bus);
4639 +
4640 + free_netdev(priv->ndev);
4641 +}
4642 +
4643 +/* pfe_eth_exit
4644 + */
4645 +void pfe_eth_exit(struct pfe *pfe)
4646 +{
4647 + int ii;
4648 +
4649 + pr_info("%s\n", __func__);
4650 +
4651 + for (ii = NUM_GEMAC_SUPPORT - 1; ii >= 0; ii--)
4652 + pfe_eth_exit_one(pfe->eth.eth_priv[ii]);
4653 +}
4654 --- /dev/null
4655 +++ b/drivers/staging/fsl_ppfe/pfe_eth.h
4656 @@ -0,0 +1,184 @@
4657 +/*
4658 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
4659 + * Copyright 2017 NXP
4660 + *
4661 + * This program is free software; you can redistribute it and/or modify
4662 + * it under the terms of the GNU General Public License as published by
4663 + * the Free Software Foundation; either version 2 of the License, or
4664 + * (at your option) any later version.
4665 + *
4666 + * This program is distributed in the hope that it will be useful,
4667 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
4668 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4669 + * GNU General Public License for more details.
4670 + *
4671 + * You should have received a copy of the GNU General Public License
4672 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
4673 + */
4674 +
4675 +#ifndef _PFE_ETH_H_
4676 +#define _PFE_ETH_H_
4677 +#include <linux/kernel.h>
4678 +#include <linux/netdevice.h>
4679 +#include <linux/etherdevice.h>
4680 +#include <linux/ethtool.h>
4681 +#include <linux/mii.h>
4682 +#include <linux/phy.h>
4683 +#include <linux/clk.h>
4684 +#include <linux/interrupt.h>
4685 +#include <linux/time.h>
4686 +
4687 +#define PFE_ETH_NAPI_STATS
4688 +#define PFE_ETH_TX_STATS
4689 +
4690 +#define PFE_ETH_FRAGS_MAX (65536 / HIF_RX_PKT_MIN_SIZE)
4691 +#define LRO_LEN_COUNT_MAX 32
4692 +#define LRO_NB_COUNT_MAX 32
4693 +
4694 +#define PFE_PAUSE_FLAG_ENABLE 1
4695 +#define PFE_PAUSE_FLAG_AUTONEG 2
4696 +
4697 +/* GEMAC configured by SW */
4698 +/* GEMAC configured by phy lines (not for MII/GMII) */
4699 +
4700 +#define GEMAC_SW_FULL_DUPLEX BIT(9)
4701 +#define GEMAC_SW_SPEED_10M (0 << 12)
4702 +#define GEMAC_SW_SPEED_100M BIT(12)
4703 +#define GEMAC_SW_SPEED_1G (2 << 12)
4704 +
4705 +#define GEMAC_NO_PHY BIT(0)
4706 +
4707 +struct ls1012a_eth_platform_data {
4708 + /* device specific information */
4709 + u32 device_flags;
4710 + char name[16];
4711 +
4712 + /* board specific information */
4713 + u32 mii_config;
4714 + u32 phy_flags;
4715 + u32 gem_id;
4716 + u32 bus_id;
4717 + u32 phy_id;
4718 + u32 mdio_muxval;
4719 + u8 mac_addr[ETH_ALEN];
4720 +};
4721 +
4722 +struct ls1012a_mdio_platform_data {
4723 + int enabled;
4724 + int irq[32];
4725 + u32 phy_mask;
4726 + int mdc_div;
4727 +};
4728 +
4729 +struct ls1012a_pfe_platform_data {
4730 + struct ls1012a_eth_platform_data ls1012a_eth_pdata[3];
4731 + struct ls1012a_mdio_platform_data ls1012a_mdio_pdata[3];
4732 +};
4733 +
4734 +#define NUM_GEMAC_SUPPORT 2
4735 +#define DRV_NAME "pfe-eth"
4736 +#define DRV_VERSION "1.0"
4737 +
4738 +#define LS1012A_TX_FAST_RECOVERY_TIMEOUT_MS 3
4739 +#define TX_POLL_TIMEOUT_MS 1000
4740 +
4741 +#define EMAC_TXQ_CNT 16
4742 +#define EMAC_TXQ_DEPTH (HIF_TX_DESC_NT)
4743 +
4744 +#define JUMBO_FRAME_SIZE 10258
4745 +/*
4746 + * Client Tx queue threshold, for txQ flush condition.
4747 + * It must be smaller than the queue size (in case we ever change it in the
4748 + * future).
4749 + */
4750 +#define HIF_CL_TX_FLUSH_MARK 32
4751 +
4752 +/*
4753 + * Max number of TX resources (HIF descriptors or skbs) that will be released
4754 + * in a single go during batch recycling.
4755 + * Should be lower than the flush mark so the SW can provide the HW with a
4756 + * continuous stream of packets instead of bursts.
4757 + */
4758 +#define TX_FREE_MAX_COUNT 16
4759 +#define EMAC_RXQ_CNT 3
4760 +#define EMAC_RXQ_DEPTH HIF_RX_DESC_NT
4761 +/* make sure clients can receive a full burst of packets */
4762 +#define EMAC_RMON_TXBYTES_POS 0x00
4763 +#define EMAC_RMON_RXBYTES_POS 0x14
4764 +
4765 +#define EMAC_QUEUENUM_MASK (emac_txq_cnt - 1)
4766 +#define EMAC_MDIO_TIMEOUT 1000
4767 +#define MAX_UC_SPEC_ADDR_REG 31
4768 +
4769 +struct pfe_eth_fast_timer {
4770 + int queuenum;
4771 + struct hrtimer timer;
4772 + void *base;
4773 +};
4774 +
4775 +struct pfe_eth_priv_s {
4776 + struct pfe *pfe;
4777 + struct hif_client_s client;
4778 + struct napi_struct lro_napi;
4779 + struct napi_struct low_napi;
4780 + struct napi_struct high_napi;
4781 + int low_tmu_q;
4782 + int high_tmu_q;
4783 + struct net_device_stats stats;
4784 + struct net_device *ndev;
4785 + int id;
4786 + int promisc;
4787 + unsigned int msg_enable;
4788 + unsigned int usr_features;
4789 +
4790 + spinlock_t lock; /* protect member variables */
4791 + unsigned int event_status;
4792 + int irq;
4793 + void *EMAC_baseaddr;
4794 + /* This points to the EMAC base from where we access PHY */
4795 + void *PHY_baseaddr;
4796 + void *GPI_baseaddr;
4797 + /* PHY stuff */
4798 + struct phy_device *phydev;
4799 + int oldspeed;
4800 + int oldduplex;
4801 + int oldlink;
4802 + /* mdio info */
4803 + int mdc_div;
4804 + struct mii_bus *mii_bus;
4805 + struct clk *gemtx_clk;
4806 + int wol;
4807 + int pause_flag;
4808 +
4809 + int default_priority;
4810 + struct pfe_eth_fast_timer fast_tx_timeout[EMAC_TXQ_CNT];
4811 +
4812 + struct ls1012a_eth_platform_data *einfo;
4813 + struct sk_buff *skb_inflight[EMAC_RXQ_CNT + 6];
4814 +
4815 +#ifdef PFE_ETH_TX_STATS
4816 + unsigned int stop_queue_total[EMAC_TXQ_CNT];
4817 + unsigned int stop_queue_hif[EMAC_TXQ_CNT];
4818 + unsigned int stop_queue_hif_client[EMAC_TXQ_CNT];
4819 + unsigned int stop_queue_credit[EMAC_TXQ_CNT];
4820 + unsigned int clean_fail[EMAC_TXQ_CNT];
4821 + unsigned int was_stopped[EMAC_TXQ_CNT];
4822 +#endif
4823 +
4824 +#ifdef PFE_ETH_NAPI_STATS
4825 + unsigned int napi_counters[NAPI_MAX_COUNT];
4826 +#endif
4827 + unsigned int frags_inflight[EMAC_RXQ_CNT + 6];
4828 +};
4829 +
4830 +struct pfe_eth {
4831 + struct pfe_eth_priv_s *eth_priv[3];
4832 +};
4833 +
4834 +int pfe_eth_init(struct pfe *pfe);
4835 +void pfe_eth_exit(struct pfe *pfe);
4836 +int pfe_eth_suspend(struct net_device *dev);
4837 +int pfe_eth_resume(struct net_device *dev);
4838 +int pfe_eth_mdio_reset(struct mii_bus *bus);
4839 +
4840 +#endif /* _PFE_ETH_H_ */
4841 --- /dev/null
4842 +++ b/drivers/staging/fsl_ppfe/pfe_firmware.c
4843 @@ -0,0 +1,314 @@
4844 +/*
4845 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
4846 + * Copyright 2017 NXP
4847 + *
4848 + * This program is free software; you can redistribute it and/or modify
4849 + * it under the terms of the GNU General Public License as published by
4850 + * the Free Software Foundation; either version 2 of the License, or
4851 + * (at your option) any later version.
4852 + *
4853 + * This program is distributed in the hope that it will be useful,
4854 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
4855 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4856 + * GNU General Public License for more details.
4857 + *
4858 + * You should have received a copy of the GNU General Public License
4859 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
4860 + */
4861 +
4862 +/*
4863 + * @file
4864 + * Contains all the functions to handle parsing and loading of PE firmware
4865 + * files.
4866 + */
4867 +#include <linux/firmware.h>
4868 +
4869 +#include "pfe_mod.h"
4870 +#include "pfe_firmware.h"
4871 +#include "pfe/pfe.h"
4872 +
4873 +static struct elf32_shdr *get_elf_section_header(const struct firmware *fw,
4874 + const char *section)
4875 +{
4876 + struct elf32_hdr *elf_hdr = (struct elf32_hdr *)fw->data;
4877 + struct elf32_shdr *shdr;
4878 + struct elf32_shdr *shdr_shstr;
4879 + Elf32_Off e_shoff = be32_to_cpu(elf_hdr->e_shoff);
4880 + Elf32_Half e_shentsize = be16_to_cpu(elf_hdr->e_shentsize);
4881 + Elf32_Half e_shnum = be16_to_cpu(elf_hdr->e_shnum);
4882 + Elf32_Half e_shstrndx = be16_to_cpu(elf_hdr->e_shstrndx);
4883 + Elf32_Off shstr_offset;
4884 + Elf32_Word sh_name;
4885 + const char *name;
4886 + int i;
4887 +
4888 + /* Section header strings */
4889 + shdr_shstr = (struct elf32_shdr *)(fw->data + e_shoff + e_shstrndx *
4890 + e_shentsize);
4891 + shstr_offset = be32_to_cpu(shdr_shstr->sh_offset);
4892 +
4893 + for (i = 0; i < e_shnum; i++) {
4894 + shdr = (struct elf32_shdr *)(fw->data + e_shoff
4895 + + i * e_shentsize);
4896 +
4897 + sh_name = be32_to_cpu(shdr->sh_name);
4898 +
4899 + name = (const char *)(fw->data + shstr_offset + sh_name);
4900 +
4901 + if (!strcmp(name, section))
4902 + return shdr;
4903 + }
4904 +
4905 + pr_err("%s: didn't find section %s\n", __func__, section);
4906 +
4907 + return NULL;
4908 +}
4909 +
4910 +#if defined(CFG_DIAGS)
4911 +static int pfe_get_diags_info(const struct firmware *fw, struct pfe_diags_info
4912 + *diags_info)
4913 +{
4914 + struct elf32_shdr *shdr;
4915 + unsigned long offset, size;
4916 +
4917 + shdr = get_elf_section_header(fw, ".pfe_diags_str");
4918 + if (shdr) {
4919 + offset = be32_to_cpu(shdr->sh_offset);
4920 + size = be32_to_cpu(shdr->sh_size);
4921 + diags_info->diags_str_base = be32_to_cpu(shdr->sh_addr);
4922 + diags_info->diags_str_size = size;
4923 + diags_info->diags_str_array = kmalloc(size, GFP_KERNEL);
4924 + memcpy(diags_info->diags_str_array, fw->data + offset, size);
4925 +
4926 + return 0;
4927 + } else {
4928 + return -1;
4929 + }
4930 +}
4931 +#endif
4932 +
4933 +static void pfe_check_version_info(const struct firmware *fw)
4934 +{
4935 + /*static char *version = NULL;*/
4936 + static char *version;
4937 +
4938 + struct elf32_shdr *shdr = get_elf_section_header(fw, ".version");
4939 +
4940 + if (shdr) {
4941 + if (!version) {
4942 + /*
4943 + * this is the first fw we load, use its version
4944 + * string as reference (whatever it is)
4945 + */
4946 + version = (char *)(fw->data +
4947 + be32_to_cpu(shdr->sh_offset));
4948 +
4949 + pr_info("PFE binary version: %s\n", version);
4950 + } else {
4951 + /*
4952 + * already have loaded at least one firmware, check
4953 + * sequence can start now
4954 + */
4955 + if (strcmp(version, (char *)(fw->data +
4956 + be32_to_cpu(shdr->sh_offset)))) {
4957 + pr_info(
4958 + "WARNING: PFE firmware binaries from incompatible version\n");
4959 + }
4960 + }
4961 + } else {
4962 + /*
4963 + * version cannot be verified, a potential issue that should
4964 + * be reported
4965 + */
4966 + pr_info(
4967 + "WARNING: PFE firmware binaries from incompatible version\n");
4968 + }
4969 +}
4970 +
4971 +/* PFE elf firmware loader.
4972 + * Loads an elf firmware image into a list of PE's (specified using a bitmask)
4973 + *
4974 + * @param pe_mask Mask of PE id's to load firmware to
4975 + * @param fw Pointer to the firmware image
4976 + *
4977 + * @return 0 on success, a negative value on error
4978 + *
4979 + */
4980 +int pfe_load_elf(int pe_mask, const struct firmware *fw, struct pfe *pfe)
4981 +{
4982 + struct elf32_hdr *elf_hdr = (struct elf32_hdr *)fw->data;
4983 + Elf32_Half sections = be16_to_cpu(elf_hdr->e_shnum);
4984 + struct elf32_shdr *shdr = (struct elf32_shdr *)(fw->data +
4985 + be32_to_cpu(elf_hdr->e_shoff));
4986 + int id, section;
4987 + int rc;
4988 +
4989 + pr_info("%s\n", __func__);
4990 +
4991 + /* Some sanity checks */
4992 + if (strncmp(&elf_hdr->e_ident[EI_MAG0], ELFMAG, SELFMAG)) {
4993 + pr_err("%s: incorrect elf magic number\n", __func__);
4994 + return -EINVAL;
4995 + }
4996 +
4997 + if (elf_hdr->e_ident[EI_CLASS] != ELFCLASS32) {
4998 + pr_err("%s: incorrect elf class(%x)\n", __func__,
4999 + elf_hdr->e_ident[EI_CLASS]);
5000 + return -EINVAL;
5001 + }
5002 +
5003 + if (elf_hdr->e_ident[EI_DATA] != ELFDATA2MSB) {
5004 + pr_err("%s: incorrect elf data(%x)\n", __func__,
5005 + elf_hdr->e_ident[EI_DATA]);
5006 + return -EINVAL;
5007 + }
5008 +
5009 + if (be16_to_cpu(elf_hdr->e_type) != ET_EXEC) {
5010 + pr_err("%s: incorrect elf file type(%x)\n", __func__,
5011 + be16_to_cpu(elf_hdr->e_type));
5012 + return -EINVAL;
5013 + }
5014 +
5015 + for (section = 0; section < sections; section++, shdr++) {
5016 + if (!(be32_to_cpu(shdr->sh_flags) & (SHF_WRITE | SHF_ALLOC |
5017 + SHF_EXECINSTR)))
5018 + continue;
5019 +
5020 + for (id = 0; id < MAX_PE; id++)
5021 + if (pe_mask & (1 << id)) {
5022 + rc = pe_load_elf_section(id, fw->data, shdr,
5023 + pfe->dev);
5024 + if (rc < 0)
5025 + goto err;
5026 + }
5027 + }
5028 +
5029 + pfe_check_version_info(fw);
5030 +
5031 + return 0;
5032 +
5033 +err:
5034 + return rc;
5035 +}
5036 +
5037 +/* PFE firmware initialization.
5038 + * Loads different firmware files from filesystem.
5039 + * Initializes PE IMEM/DMEM and UTIL-PE DDR
5040 + * Initializes control path symbol addresses (by looking them up in the elf
5041 + * firmware files
5042 + * Takes PE's out of reset
5043 + *
5044 + * @return 0 on success, a negative value on error
5045 + *
5046 + */
5047 +int pfe_firmware_init(struct pfe *pfe)
5048 +{
5049 + const struct firmware *class_fw, *tmu_fw;
5050 + int rc = 0;
5051 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5052 + const char *util_fw_name;
5053 + const struct firmware *util_fw;
5054 +#endif
5055 +
5056 + pr_info("%s\n", __func__);
5057 +
5058 + if (request_firmware(&class_fw, CLASS_FIRMWARE_FILENAME, pfe->dev)) {
5059 + pr_err("%s: request firmware %s failed\n", __func__,
5060 + CLASS_FIRMWARE_FILENAME);
5061 + rc = -ETIMEDOUT;
5062 + goto err0;
5063 + }
5064 +
5065 + if (request_firmware(&tmu_fw, TMU_FIRMWARE_FILENAME, pfe->dev)) {
5066 + pr_err("%s: request firmware %s failed\n", __func__,
5067 + TMU_FIRMWARE_FILENAME);
5068 + rc = -ETIMEDOUT;
5069 + goto err1;
5070 +}
5071 +
5072 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5073 + util_fw_name = UTIL_FIRMWARE_FILENAME;
5074 +
5075 + if (request_firmware(&util_fw, util_fw_name, pfe->dev)) {
5076 + pr_err("%s: request firmware %s failed\n", __func__,
5077 + util_fw_name);
5078 + rc = -ETIMEDOUT;
5079 + goto err2;
5080 + }
5081 +#endif
5082 + rc = pfe_load_elf(CLASS_MASK, class_fw, pfe);
5083 + if (rc < 0) {
5084 + pr_err("%s: class firmware load failed\n", __func__);
5085 + goto err3;
5086 + }
5087 +
5088 +#if defined(CFG_DIAGS)
5089 + rc = pfe_get_diags_info(class_fw, &pfe->diags.class_diags_info);
5090 + if (rc < 0) {
5091 + pr_warn(
5092 + "PFE diags won't be available for class PEs\n");
5093 + rc = 0;
5094 + }
5095 +#endif
5096 +
5097 + rc = pfe_load_elf(TMU_MASK, tmu_fw, pfe);
5098 + if (rc < 0) {
5099 + pr_err("%s: tmu firmware load failed\n", __func__);
5100 + goto err3;
5101 + }
5102 +
5103 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5104 + rc = pfe_load_elf(UTIL_MASK, util_fw, pfe);
5105 + if (rc < 0) {
5106 + pr_err("%s: util firmware load failed\n", __func__);
5107 + goto err3;
5108 + }
5109 +
5110 +#if defined(CFG_DIAGS)
5111 + rc = pfe_get_diags_info(util_fw, &pfe->diags.util_diags_info);
5112 + if (rc < 0) {
5113 + pr_warn(
5114 + "PFE diags won't be available for util PE\n");
5115 + rc = 0;
5116 + }
5117 +#endif
5118 +
5119 + util_enable();
5120 +#endif
5121 +
5122 + tmu_enable(0xf);
5123 + class_enable();
5124 +
5125 +err3:
5126 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5127 + release_firmware(util_fw);
5128 +
5129 +err2:
5130 +#endif
5131 + release_firmware(tmu_fw);
5132 +
5133 +err1:
5134 + release_firmware(class_fw);
5135 +
5136 +err0:
5137 + return rc;
5138 +}
5139 +
5140 +/* PFE firmware cleanup
5141 + * Puts PE's in reset
5142 + *
5143 + *
5144 + */
5145 +void pfe_firmware_exit(struct pfe *pfe)
5146 +{
5147 + pr_info("%s\n", __func__);
5148 +
5149 + if (pe_reset_all(&pfe->ctrl) != 0)
5150 + pr_err("Error: Failed to stop PEs, PFE reload may not work correctly\n");
5151 +
5152 + class_disable();
5153 + tmu_disable(0xf);
5154 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5155 + util_disable();
5156 +#endif
5157 +}
5158 --- /dev/null
5159 +++ b/drivers/staging/fsl_ppfe/pfe_firmware.h
5160 @@ -0,0 +1,32 @@
5161 +/*
5162 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
5163 + * Copyright 2017 NXP
5164 + *
5165 + * This program is free software; you can redistribute it and/or modify
5166 + * it under the terms of the GNU General Public License as published by
5167 + * the Free Software Foundation; either version 2 of the License, or
5168 + * (at your option) any later version.
5169 + *
5170 + * This program is distributed in the hope that it will be useful,
5171 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
5172 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
5173 + * GNU General Public License for more details.
5174 + *
5175 + * You should have received a copy of the GNU General Public License
5176 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
5177 + */
5178 +
5179 +#ifndef _PFE_FIRMWARE_H_
5180 +#define _PFE_FIRMWARE_H_
5181 +
5182 +#define CLASS_FIRMWARE_FILENAME "ppfe_class_ls1012a.elf"
5183 +#define TMU_FIRMWARE_FILENAME "ppfe_tmu_ls1012a.elf"
5184 +
5185 +#define PFE_FW_CHECK_PASS 0
5186 +#define PFE_FW_CHECK_FAIL 1
5187 +#define NUM_PFE_FW 3
5188 +
5189 +int pfe_firmware_init(struct pfe *pfe);
5190 +void pfe_firmware_exit(struct pfe *pfe);
5191 +
5192 +#endif /* _PFE_FIRMWARE_H_ */
5193 --- /dev/null
5194 +++ b/drivers/staging/fsl_ppfe/pfe_hal.c
5195 @@ -0,0 +1,1516 @@
5196 +/*
5197 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
5198 + * Copyright 2017 NXP
5199 + *
5200 + * This program is free software; you can redistribute it and/or modify
5201 + * it under the terms of the GNU General Public License as published by
5202 + * the Free Software Foundation; either version 2 of the License, or
5203 + * (at your option) any later version.
5204 + *
5205 + * This program is distributed in the hope that it will be useful,
5206 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
5207 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
5208 + * GNU General Public License for more details.
5209 + *
5210 + * You should have received a copy of the GNU General Public License
5211 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
5212 + */
5213 +
5214 +#include "pfe_mod.h"
5215 +#include "pfe/pfe.h"
5216 +
5217 +void *cbus_base_addr;
5218 +void *ddr_base_addr;
5219 +unsigned long ddr_phys_base_addr;
5220 +unsigned int ddr_size;
5221 +
5222 +static struct pe_info pe[MAX_PE];
5223 +
5224 +/* Initializes the PFE library.
5225 + * Must be called before using any of the library functions.
5226 + *
5227 + * @param[in] cbus_base CBUS virtual base address (as mapped in
5228 + * the host CPU address space)
5229 + * @param[in] ddr_base PFE DDR range virtual base address (as
5230 + * mapped in the host CPU address space)
5231 + * @param[in] ddr_phys_base PFE DDR range physical base address (as
5232 + * mapped in platform)
5233 + * @param[in] size PFE DDR range size (as defined by the host
5234 + * software)
5235 + */
5236 +void pfe_lib_init(void *cbus_base, void *ddr_base, unsigned long ddr_phys_base,
5237 + unsigned int size)
5238 +{
5239 + cbus_base_addr = cbus_base;
5240 + ddr_base_addr = ddr_base;
5241 + ddr_phys_base_addr = ddr_phys_base;
5242 + ddr_size = size;
5243 +
5244 + pe[CLASS0_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(0);
5245 + pe[CLASS0_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(0);
5246 + pe[CLASS0_ID].pmem_size = CLASS_IMEM_SIZE;
5247 + pe[CLASS0_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
5248 + pe[CLASS0_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
5249 + pe[CLASS0_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
5250 +
5251 + pe[CLASS1_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(1);
5252 + pe[CLASS1_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(1);
5253 + pe[CLASS1_ID].pmem_size = CLASS_IMEM_SIZE;
5254 + pe[CLASS1_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
5255 + pe[CLASS1_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
5256 + pe[CLASS1_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
5257 +
5258 + pe[CLASS2_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(2);
5259 + pe[CLASS2_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(2);
5260 + pe[CLASS2_ID].pmem_size = CLASS_IMEM_SIZE;
5261 + pe[CLASS2_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
5262 + pe[CLASS2_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
5263 + pe[CLASS2_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
5264 +
5265 + pe[CLASS3_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(3);
5266 + pe[CLASS3_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(3);
5267 + pe[CLASS3_ID].pmem_size = CLASS_IMEM_SIZE;
5268 + pe[CLASS3_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
5269 + pe[CLASS3_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
5270 + pe[CLASS3_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
5271 +
5272 + pe[CLASS4_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(4);
5273 + pe[CLASS4_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(4);
5274 + pe[CLASS4_ID].pmem_size = CLASS_IMEM_SIZE;
5275 + pe[CLASS4_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
5276 + pe[CLASS4_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
5277 + pe[CLASS4_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
5278 +
5279 + pe[CLASS5_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(5);
5280 + pe[CLASS5_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(5);
5281 + pe[CLASS5_ID].pmem_size = CLASS_IMEM_SIZE;
5282 + pe[CLASS5_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
5283 + pe[CLASS5_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
5284 + pe[CLASS5_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
5285 +
5286 + pe[TMU0_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(0);
5287 + pe[TMU0_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(0);
5288 + pe[TMU0_ID].pmem_size = TMU_IMEM_SIZE;
5289 + pe[TMU0_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
5290 + pe[TMU0_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
5291 + pe[TMU0_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
5292 +
5293 + pe[TMU1_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(1);
5294 + pe[TMU1_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(1);
5295 + pe[TMU1_ID].pmem_size = TMU_IMEM_SIZE;
5296 + pe[TMU1_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
5297 + pe[TMU1_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
5298 + pe[TMU1_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
5299 +
5300 + pe[TMU3_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(3);
5301 + pe[TMU3_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(3);
5302 + pe[TMU3_ID].pmem_size = TMU_IMEM_SIZE;
5303 + pe[TMU3_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
5304 + pe[TMU3_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
5305 + pe[TMU3_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
5306 +
5307 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5308 + pe[UTIL_ID].dmem_base_addr = UTIL_DMEM_BASE_ADDR;
5309 + pe[UTIL_ID].mem_access_wdata = UTIL_MEM_ACCESS_WDATA;
5310 + pe[UTIL_ID].mem_access_addr = UTIL_MEM_ACCESS_ADDR;
5311 + pe[UTIL_ID].mem_access_rdata = UTIL_MEM_ACCESS_RDATA;
5312 +#endif
5313 +}
5314 +
5315 +/* Writes a buffer to PE internal memory from the host
5316 + * through indirect access registers.
5317 + *
5318 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5319 + * ..., UTIL_ID)
5320 + * @param[in] src Buffer source address
5321 + * @param[in] mem_access_addr DMEM destination address (must be 32bit
5322 + * aligned)
5323 + * @param[in] len Number of bytes to copy
5324 + */
5325 +void pe_mem_memcpy_to32(int id, u32 mem_access_addr, const void *src, unsigned
5326 +int len)
5327 +{
5328 + u32 offset = 0, val, addr;
5329 + unsigned int len32 = len >> 2;
5330 + int i;
5331 +
5332 + addr = mem_access_addr | PE_MEM_ACCESS_WRITE |
5333 + PE_MEM_ACCESS_BYTE_ENABLE(0, 4);
5334 +
5335 + for (i = 0; i < len32; i++, offset += 4, src += 4) {
5336 + val = *(u32 *)src;
5337 + writel(cpu_to_be32(val), pe[id].mem_access_wdata);
5338 + writel(addr + offset, pe[id].mem_access_addr);
5339 + }
5340 +
5341 + len = (len & 0x3);
5342 + if (len) {
5343 + val = 0;
5344 +
5345 + addr = (mem_access_addr | PE_MEM_ACCESS_WRITE |
5346 + PE_MEM_ACCESS_BYTE_ENABLE(0, len)) + offset;
5347 +
5348 + for (i = 0; i < len; i++, src++)
5349 + val |= (*(u8 *)src) << (8 * i);
5350 +
5351 + writel(cpu_to_be32(val), pe[id].mem_access_wdata);
5352 + writel(addr, pe[id].mem_access_addr);
5353 + }
5354 +}
5355 +
5356 +/* Writes a buffer to PE internal data memory (DMEM) from the host
5357 + * through indirect access registers.
5358 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5359 + * ..., UTIL_ID)
5360 + * @param[in] src Buffer source address
5361 + * @param[in] dst DMEM destination address (must be 32bit
5362 + * aligned)
5363 + * @param[in] len Number of bytes to copy
5364 + */
5365 +void pe_dmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len)
5366 +{
5367 + pe_mem_memcpy_to32(id, pe[id].dmem_base_addr | dst |
5368 + PE_MEM_ACCESS_DMEM, src, len);
5369 +}
5370 +
5371 +/* Writes a buffer to PE internal program memory (PMEM) from the host
5372 + * through indirect access registers.
5373 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5374 + * ..., TMU3_ID)
5375 + * @param[in] src Buffer source address
5376 + * @param[in] dst PMEM destination address (must be 32bit
5377 + * aligned)
5378 + * @param[in] len Number of bytes to copy
5379 + */
5380 +void pe_pmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len)
5381 +{
5382 + pe_mem_memcpy_to32(id, pe[id].pmem_base_addr | (dst & (pe[id].pmem_size
5383 + - 1)) | PE_MEM_ACCESS_IMEM, src, len);
5384 +}
5385 +
5386 +/* Reads PE internal program memory (IMEM) from the host
5387 + * through indirect access registers.
5388 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5389 + * ..., TMU3_ID)
5390 + * @param[in] addr PMEM read address (must be aligned on size)
5391 + * @param[in] size Number of bytes to read (maximum 4, must not
5392 + * cross 32bit boundaries)
5393 + * @return the data read (in PE endianness, i.e BE).
5394 + */
5395 +u32 pe_pmem_read(int id, u32 addr, u8 size)
5396 +{
5397 + u32 offset = addr & 0x3;
5398 + u32 mask = 0xffffffff >> ((4 - size) << 3);
5399 + u32 val;
5400 +
5401 + addr = pe[id].pmem_base_addr | ((addr & ~0x3) & (pe[id].pmem_size - 1))
5402 + | PE_MEM_ACCESS_IMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
5403 +
5404 + writel(addr, pe[id].mem_access_addr);
5405 + val = be32_to_cpu(readl(pe[id].mem_access_rdata));
5406 +
5407 + return (val >> (offset << 3)) & mask;
5408 +}
5409 +
5410 +/* Writes PE internal data memory (DMEM) from the host
5411 + * through indirect access registers.
5412 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5413 + * ..., UTIL_ID)
5414 + * @param[in] addr DMEM write address (must be aligned on size)
5415 + * @param[in] val Value to write (in PE endianness, i.e BE)
5416 + * @param[in] size Number of bytes to write (maximum 4, must not
5417 + * cross 32bit boundaries)
5418 + */
5419 +void pe_dmem_write(int id, u32 val, u32 addr, u8 size)
5420 +{
5421 + u32 offset = addr & 0x3;
5422 +
5423 + addr = pe[id].dmem_base_addr | (addr & ~0x3) | PE_MEM_ACCESS_WRITE |
5424 + PE_MEM_ACCESS_DMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
5425 +
5426 + /* Indirect access interface is byte swapping data being written */
5427 + writel(cpu_to_be32(val << (offset << 3)), pe[id].mem_access_wdata);
5428 + writel(addr, pe[id].mem_access_addr);
5429 +}
5430 +
5431 +/* Reads PE internal data memory (DMEM) from the host
5432 + * through indirect access registers.
5433 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5434 + * ..., UTIL_ID)
5435 + * @param[in] addr DMEM read address (must be aligned on size)
5436 + * @param[in] size Number of bytes to read (maximum 4, must not
5437 + * cross 32bit boundaries)
5438 + * @return the data read (in PE endianness, i.e BE).
5439 + */
5440 +u32 pe_dmem_read(int id, u32 addr, u8 size)
5441 +{
5442 + u32 offset = addr & 0x3;
5443 + u32 mask = 0xffffffff >> ((4 - size) << 3);
5444 + u32 val;
5445 +
5446 + addr = pe[id].dmem_base_addr | (addr & ~0x3) | PE_MEM_ACCESS_DMEM |
5447 + PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
5448 +
5449 + writel(addr, pe[id].mem_access_addr);
5450 +
5451 + /* Indirect access interface is byte swapping data being read */
5452 + val = be32_to_cpu(readl(pe[id].mem_access_rdata));
5453 +
5454 + return (val >> (offset << 3)) & mask;
5455 +}
5456 +
5457 +/* This function is used to write to CLASS internal bus peripherals (ccu,
5458 + * pe-lem) from the host
5459 + * through indirect access registers.
5460 + * @param[in] val value to write
5461 + * @param[in] addr Address to write to (must be aligned on size)
5462 + * @param[in] size Number of bytes to write (1, 2 or 4)
5463 + *
5464 + */
5465 +void class_bus_write(u32 val, u32 addr, u8 size)
5466 +{
5467 + u32 offset = addr & 0x3;
5468 +
5469 + writel((addr & CLASS_BUS_ACCESS_BASE_MASK), CLASS_BUS_ACCESS_BASE);
5470 +
5471 + addr = (addr & ~CLASS_BUS_ACCESS_BASE_MASK) | PE_MEM_ACCESS_WRITE |
5472 + (size << 24);
5473 +
5474 + writel(cpu_to_be32(val << (offset << 3)), CLASS_BUS_ACCESS_WDATA);
5475 + writel(addr, CLASS_BUS_ACCESS_ADDR);
5476 +}
5477 +
5478 +/* Reads from CLASS internal bus peripherals (ccu, pe-lem) from the host
5479 + * through indirect access registers.
5480 + * @param[in] addr Address to read from (must be aligned on size)
5481 + * @param[in] size Number of bytes to read (1, 2 or 4)
5482 + * @return the read data
5483 + *
5484 + */
5485 +u32 class_bus_read(u32 addr, u8 size)
5486 +{
5487 + u32 offset = addr & 0x3;
5488 + u32 mask = 0xffffffff >> ((4 - size) << 3);
5489 + u32 val;
5490 +
5491 + writel((addr & CLASS_BUS_ACCESS_BASE_MASK), CLASS_BUS_ACCESS_BASE);
5492 +
5493 + addr = (addr & ~CLASS_BUS_ACCESS_BASE_MASK) | (size << 24);
5494 +
5495 + writel(addr, CLASS_BUS_ACCESS_ADDR);
5496 + val = be32_to_cpu(readl(CLASS_BUS_ACCESS_RDATA));
5497 +
5498 + return (val >> (offset << 3)) & mask;
5499 +}
5500 +
5501 +/* Writes data to the cluster memory (PE_LMEM)
5502 + * @param[in] dst PE LMEM destination address (must be 32bit aligned)
5503 + * @param[in] src Buffer source address
5504 + * @param[in] len Number of bytes to copy
5505 + */
5506 +void class_pe_lmem_memcpy_to32(u32 dst, const void *src, unsigned int len)
5507 +{
5508 + u32 len32 = len >> 2;
5509 + int i;
5510 +
5511 + for (i = 0; i < len32; i++, src += 4, dst += 4)
5512 + class_bus_write(*(u32 *)src, dst, 4);
5513 +
5514 + if (len & 0x2) {
5515 + class_bus_write(*(u16 *)src, dst, 2);
5516 + src += 2;
5517 + dst += 2;
5518 + }
5519 +
5520 + if (len & 0x1) {
5521 + class_bus_write(*(u8 *)src, dst, 1);
5522 + src++;
5523 + dst++;
5524 + }
5525 +}
5526 +
5527 +/* Writes value to the cluster memory (PE_LMEM)
5528 + * @param[in] dst PE LMEM destination address (must be 32bit aligned)
5529 + * @param[in] val Value to write
5530 + * @param[in] len Number of bytes to write
5531 + */
5532 +void class_pe_lmem_memset(u32 dst, int val, unsigned int len)
5533 +{
5534 + u32 len32 = len >> 2;
5535 + int i;
5536 +
5537 + val = val | (val << 8) | (val << 16) | (val << 24);
5538 +
5539 + for (i = 0; i < len32; i++, dst += 4)
5540 + class_bus_write(val, dst, 4);
5541 +
5542 + if (len & 0x2) {
5543 + class_bus_write(val, dst, 2);
5544 + dst += 2;
5545 + }
5546 +
5547 + if (len & 0x1) {
5548 + class_bus_write(val, dst, 1);
5549 + dst++;
5550 + }
5551 +}
5552 +
5553 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5554 +
5555 +/* Writes UTIL program memory (DDR) from the host.
5556 + *
5557 + * @param[in] addr Address to write (virtual, must be aligned on size)
5558 + * @param[in] val Value to write (in PE endianness, i.e BE)
5559 + * @param[in] size Number of bytes to write (2 or 4)
5560 + */
5561 +static void util_pmem_write(u32 val, void *addr, u8 size)
5562 +{
5563 + void *addr64 = (void *)((unsigned long)addr & ~0x7);
5564 + unsigned long off = 8 - ((unsigned long)addr & 0x7) - size;
5565 +
5566 + /*
5567 + * IMEM should be loaded as a 64bit swapped value in a 64bit aligned
5568 + * location
5569 + */
5570 + if (size == 4)
5571 + writel(be32_to_cpu(val), addr64 + off);
5572 + else
5573 + writew(be16_to_cpu((u16)val), addr64 + off);
5574 +}
5575 +
5576 +/* Writes a buffer to UTIL program memory (DDR) from the host.
5577 + *
5578 + * @param[in] dst Address to write (virtual, must be at least 16bit
5579 + * aligned)
5580 + * @param[in] src Buffer to write (in PE endianness, i.e BE, must have
5581 + * same alignment as dst)
5582 + * @param[in] len Number of bytes to write (must be at least 16bit
5583 + * aligned)
5584 + */
5585 +static void util_pmem_memcpy(void *dst, const void *src, unsigned int len)
5586 +{
5587 + unsigned int len32;
5588 + int i;
5589 +
5590 + if ((unsigned long)src & 0x2) {
5591 + util_pmem_write(*(u16 *)src, dst, 2);
5592 + src += 2;
5593 + dst += 2;
5594 + len -= 2;
5595 + }
5596 +
5597 + len32 = len >> 2;
5598 +
5599 + for (i = 0; i < len32; i++, dst += 4, src += 4)
5600 + util_pmem_write(*(u32 *)src, dst, 4);
5601 +
5602 + if (len & 0x2)
5603 + util_pmem_write(*(u16 *)src, dst, len & 0x2);
5604 +}
5605 +#endif
5606 +
5607 +/* Loads an elf section into pmem
5608 + * Code needs to be at least 16bit aligned and only PROGBITS sections are
5609 + * supported
5610 + *
5611 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID, ...,
5612 + * TMU3_ID)
5613 + * @param[in] data pointer to the elf firmware
5614 + * @param[in] shdr pointer to the elf section header
5615 + *
5616 + */
5617 +static int pe_load_pmem_section(int id, const void *data,
5618 + struct elf32_shdr *shdr)
5619 +{
5620 + u32 offset = be32_to_cpu(shdr->sh_offset);
5621 + u32 addr = be32_to_cpu(shdr->sh_addr);
5622 + u32 size = be32_to_cpu(shdr->sh_size);
5623 + u32 type = be32_to_cpu(shdr->sh_type);
5624 +
5625 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5626 + if (id == UTIL_ID) {
5627 + pr_err("%s: unsupported pmem section for UTIL\n",
5628 + __func__);
5629 + return -EINVAL;
5630 + }
5631 +#endif
5632 +
5633 + if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
5634 + pr_err(
5635 + "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
5636 + , __func__, addr, (unsigned long)data + offset);
5637 +
5638 + return -EINVAL;
5639 + }
5640 +
5641 + if (addr & 0x1) {
5642 + pr_err("%s: load address(%x) is not 16bit aligned\n",
5643 + __func__, addr);
5644 + return -EINVAL;
5645 + }
5646 +
5647 + if (size & 0x1) {
5648 + pr_err("%s: load size(%x) is not 16bit aligned\n",
5649 + __func__, size);
5650 + return -EINVAL;
5651 + }
5652 +
5653 + switch (type) {
5654 + case SHT_PROGBITS:
5655 + pe_pmem_memcpy_to32(id, addr, data + offset, size);
5656 +
5657 + break;
5658 +
5659 + default:
5660 + pr_err("%s: unsupported section type(%x)\n", __func__,
5661 + type);
5662 + return -EINVAL;
5663 + }
5664 +
5665 + return 0;
5666 +}
5667 +
5668 +/* Loads an elf section into dmem
5669 + * Data needs to be at least 32bit aligned, NOBITS sections are correctly
5670 + * initialized to 0
5671 + *
5672 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5673 + * ..., UTIL_ID)
5674 + * @param[in] data pointer to the elf firmware
5675 + * @param[in] shdr pointer to the elf section header
5676 + *
5677 + */
5678 +static int pe_load_dmem_section(int id, const void *data,
5679 + struct elf32_shdr *shdr)
5680 +{
5681 + u32 offset = be32_to_cpu(shdr->sh_offset);
5682 + u32 addr = be32_to_cpu(shdr->sh_addr);
5683 + u32 size = be32_to_cpu(shdr->sh_size);
5684 + u32 type = be32_to_cpu(shdr->sh_type);
5685 + u32 size32 = size >> 2;
5686 + int i;
5687 +
5688 + if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
5689 + pr_err(
5690 + "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
5691 + __func__, addr, (unsigned long)data + offset);
5692 +
5693 + return -EINVAL;
5694 + }
5695 +
5696 + if (addr & 0x3) {
5697 + pr_err("%s: load address(%x) is not 32bit aligned\n",
5698 + __func__, addr);
5699 + return -EINVAL;
5700 + }
5701 +
5702 + switch (type) {
5703 + case SHT_PROGBITS:
5704 + pe_dmem_memcpy_to32(id, addr, data + offset, size);
5705 + break;
5706 +
5707 + case SHT_NOBITS:
5708 + for (i = 0; i < size32; i++, addr += 4)
5709 + pe_dmem_write(id, 0, addr, 4);
5710 +
5711 + if (size & 0x3)
5712 + pe_dmem_write(id, 0, addr, size & 0x3);
5713 +
5714 + break;
5715 +
5716 + default:
5717 + pr_err("%s: unsupported section type(%x)\n", __func__,
5718 + type);
5719 + return -EINVAL;
5720 + }
5721 +
5722 + return 0;
5723 +}
5724 +
5725 +/* Loads an elf section into DDR
5726 + * Data needs to be at least 32bit aligned, NOBITS sections are correctly
5727 + * initialized to 0
5728 + *
5729 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5730 + * ..., UTIL_ID)
5731 + * @param[in] data pointer to the elf firmware
5732 + * @param[in] shdr pointer to the elf section header
5733 + *
5734 + */
5735 +static int pe_load_ddr_section(int id, const void *data,
5736 + struct elf32_shdr *shdr,
5737 + struct device *dev) {
5738 + u32 offset = be32_to_cpu(shdr->sh_offset);
5739 + u32 addr = be32_to_cpu(shdr->sh_addr);
5740 + u32 size = be32_to_cpu(shdr->sh_size);
5741 + u32 type = be32_to_cpu(shdr->sh_type);
5742 + u32 flags = be32_to_cpu(shdr->sh_flags);
5743 +
5744 + switch (type) {
5745 + case SHT_PROGBITS:
5746 + if (flags & SHF_EXECINSTR) {
5747 + if (id <= CLASS_MAX_ID) {
5748 + /* DO the loading only once in DDR */
5749 + if (id == CLASS0_ID) {
5750 + pr_err(
5751 + "%s: load address(%x) and elf file address(%lx) rcvd\n",
5752 + __func__, addr,
5753 + (unsigned long)data + offset);
5754 + if (((unsigned long)(data + offset)
5755 + & 0x3) != (addr & 0x3)) {
5756 + pr_err(
5757 + "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
5758 + , __func__, addr,
5759 + (unsigned long)data + offset);
5760 +
5761 + return -EINVAL;
5762 + }
5763 +
5764 + if (addr & 0x1) {
5765 + pr_err(
5766 + "%s: load address(%x) is not 16bit aligned\n"
5767 + , __func__, addr);
5768 + return -EINVAL;
5769 + }
5770 +
5771 + if (size & 0x1) {
5772 + pr_err(
5773 + "%s: load length(%x) is not 16bit aligned\n"
5774 + , __func__, size);
5775 + return -EINVAL;
5776 + }
5777 + memcpy(DDR_PHYS_TO_VIRT(
5778 + DDR_PFE_TO_PHYS(addr)),
5779 + data + offset, size);
5780 + }
5781 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
5782 + } else if (id == UTIL_ID) {
5783 + if (((unsigned long)(data + offset) & 0x3)
5784 + != (addr & 0x3)) {
5785 + pr_err(
5786 + "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
5787 + , __func__, addr,
5788 + (unsigned long)data + offset);
5789 +
5790 + return -EINVAL;
5791 + }
5792 +
5793 + if (addr & 0x1) {
5794 + pr_err(
5795 + "%s: load address(%x) is not 16bit aligned\n"
5796 + , __func__, addr);
5797 + return -EINVAL;
5798 + }
5799 +
5800 + if (size & 0x1) {
5801 + pr_err(
5802 + "%s: load length(%x) is not 16bit aligned\n"
5803 + , __func__, size);
5804 + return -EINVAL;
5805 + }
5806 +
5807 + util_pmem_memcpy(DDR_PHYS_TO_VIRT(
5808 + DDR_PFE_TO_PHYS(addr)),
5809 + data + offset, size);
5810 + }
5811 +#endif
5812 + } else {
5813 + pr_err(
5814 + "%s: unsupported ddr section type(%x) for PE(%d)\n"
5815 + , __func__, type, id);
5816 + return -EINVAL;
5817 + }
5818 +
5819 + } else {
5820 + memcpy(DDR_PHYS_TO_VIRT(DDR_PFE_TO_PHYS(addr)), data
5821 + + offset, size);
5822 + }
5823 +
5824 + break;
5825 +
5826 + case SHT_NOBITS:
5827 + memset(DDR_PHYS_TO_VIRT(DDR_PFE_TO_PHYS(addr)), 0, size);
5828 +
5829 + break;
5830 +
5831 + default:
5832 + pr_err("%s: unsupported section type(%x)\n", __func__,
5833 + type);
5834 + return -EINVAL;
5835 + }
5836 +
5837 + return 0;
5838 +}
5839 +
5840 +/* Loads an elf section into pe lmem
5841 + * Data needs to be at least 32bit aligned, NOBITS sections are correctly
5842 + * initialized to 0
5843 + *
5844 + * @param[in] id PE identification (CLASS0_ID,..., CLASS5_ID)
5845 + * @param[in] data pointer to the elf firmware
5846 + * @param[in] shdr pointer to the elf section header
5847 + *
5848 + */
5849 +static int pe_load_pe_lmem_section(int id, const void *data,
5850 + struct elf32_shdr *shdr)
5851 +{
5852 + u32 offset = be32_to_cpu(shdr->sh_offset);
5853 + u32 addr = be32_to_cpu(shdr->sh_addr);
5854 + u32 size = be32_to_cpu(shdr->sh_size);
5855 + u32 type = be32_to_cpu(shdr->sh_type);
5856 +
5857 + if (id > CLASS_MAX_ID) {
5858 + pr_err(
5859 + "%s: unsupported pe-lmem section type(%x) for PE(%d)\n",
5860 + __func__, type, id);
5861 + return -EINVAL;
5862 + }
5863 +
5864 + if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
5865 + pr_err(
5866 + "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
5867 + __func__, addr, (unsigned long)data + offset);
5868 +
5869 + return -EINVAL;
5870 + }
5871 +
5872 + if (addr & 0x3) {
5873 + pr_err("%s: load address(%x) is not 32bit aligned\n",
5874 + __func__, addr);
5875 + return -EINVAL;
5876 + }
5877 +
5878 + switch (type) {
5879 + case SHT_PROGBITS:
5880 + class_pe_lmem_memcpy_to32(addr, data + offset, size);
5881 + break;
5882 +
5883 + case SHT_NOBITS:
5884 + class_pe_lmem_memset(addr, 0, size);
5885 + break;
5886 +
5887 + default:
5888 + pr_err("%s: unsupported section type(%x)\n", __func__,
5889 + type);
5890 + return -EINVAL;
5891 + }
5892 +
5893 + return 0;
5894 +}
5895 +
5896 +/* Loads an elf section into a PE
5897 + * For now only supports loading a section to dmem (all PE's), pmem (class and
5898 + * tmu PE's),
5899 + * DDDR (util PE code)
5900 + *
5901 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
5902 + * ..., UTIL_ID)
5903 + * @param[in] data pointer to the elf firmware
5904 + * @param[in] shdr pointer to the elf section header
5905 + *
5906 + */
5907 +int pe_load_elf_section(int id, const void *data, struct elf32_shdr *shdr,
5908 + struct device *dev) {
5909 + u32 addr = be32_to_cpu(shdr->sh_addr);
5910 + u32 size = be32_to_cpu(shdr->sh_size);
5911 +
5912 + if (IS_DMEM(addr, size))
5913 + return pe_load_dmem_section(id, data, shdr);
5914 + else if (IS_PMEM(addr, size))
5915 + return pe_load_pmem_section(id, data, shdr);
5916 + else if (IS_PFE_LMEM(addr, size))
5917 + return 0;
5918 + else if (IS_PHYS_DDR(addr, size))
5919 + return pe_load_ddr_section(id, data, shdr, dev);
5920 + else if (IS_PE_LMEM(addr, size))
5921 + return pe_load_pe_lmem_section(id, data, shdr);
5922 +
5923 + pr_err("%s: unsupported memory range(%x)\n", __func__,
5924 + addr);
5925 + return 0;
5926 +}
5927 +
5928 +/**************************** BMU ***************************/
5929 +
5930 +/* Initializes a BMU block.
5931 + * @param[in] base BMU block base address
5932 + * @param[in] cfg BMU configuration
5933 + */
5934 +void bmu_init(void *base, struct BMU_CFG *cfg)
5935 +{
5936 + bmu_disable(base);
5937 +
5938 + bmu_set_config(base, cfg);
5939 +
5940 + bmu_reset(base);
5941 +}
5942 +
5943 +/* Resets a BMU block.
5944 + * @param[in] base BMU block base address
5945 + */
5946 +void bmu_reset(void *base)
5947 +{
5948 + writel(CORE_SW_RESET, base + BMU_CTRL);
5949 +
5950 + /* Wait for self clear */
5951 + while (readl(base + BMU_CTRL) & CORE_SW_RESET)
5952 + ;
5953 +}
5954 +
5955 +/* Enabled a BMU block.
5956 + * @param[in] base BMU block base address
5957 + */
5958 +void bmu_enable(void *base)
5959 +{
5960 + writel(CORE_ENABLE, base + BMU_CTRL);
5961 +}
5962 +
5963 +/* Disables a BMU block.
5964 + * @param[in] base BMU block base address
5965 + */
5966 +void bmu_disable(void *base)
5967 +{
5968 + writel(CORE_DISABLE, base + BMU_CTRL);
5969 +}
5970 +
5971 +/* Sets the configuration of a BMU block.
5972 + * @param[in] base BMU block base address
5973 + * @param[in] cfg BMU configuration
5974 + */
5975 +void bmu_set_config(void *base, struct BMU_CFG *cfg)
5976 +{
5977 + writel(cfg->baseaddr, base + BMU_UCAST_BASE_ADDR);
5978 + writel(cfg->count & 0xffff, base + BMU_UCAST_CONFIG);
5979 + writel(cfg->size & 0xffff, base + BMU_BUF_SIZE);
5980 +
5981 + /* Interrupts are never used */
5982 + writel(cfg->low_watermark, base + BMU_LOW_WATERMARK);
5983 + writel(cfg->high_watermark, base + BMU_HIGH_WATERMARK);
5984 + writel(0x0, base + BMU_INT_ENABLE);
5985 +}
5986 +
5987 +/**************************** MTIP GEMAC ***************************/
5988 +
5989 +/* Enable Rx Checksum Engine. With this enabled, Frame with bad IP,
5990 + * TCP or UDP checksums are discarded
5991 + *
5992 + * @param[in] base GEMAC base address.
5993 + */
5994 +void gemac_enable_rx_checksum_offload(void *base)
5995 +{
5996 + /*Do not find configuration to do this */
5997 +}
5998 +
5999 +/* Disable Rx Checksum Engine.
6000 + *
6001 + * @param[in] base GEMAC base address.
6002 + */
6003 +void gemac_disable_rx_checksum_offload(void *base)
6004 +{
6005 + /*Do not find configuration to do this */
6006 +}
6007 +
6008 +/* GEMAC set speed.
6009 + * @param[in] base GEMAC base address
6010 + * @param[in] speed GEMAC speed (10, 100 or 1000 Mbps)
6011 + */
6012 +void gemac_set_speed(void *base, enum mac_speed gem_speed)
6013 +{
6014 + u32 ecr = readl(base + EMAC_ECNTRL_REG) & ~EMAC_ECNTRL_SPEED;
6015 + u32 rcr = readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_RMII_10T;
6016 +
6017 + switch (gem_speed) {
6018 + case SPEED_10M:
6019 + rcr |= EMAC_RCNTRL_RMII_10T;
6020 + break;
6021 +
6022 + case SPEED_1000M:
6023 + ecr |= EMAC_ECNTRL_SPEED;
6024 + break;
6025 +
6026 + case SPEED_100M:
6027 + default:
6028 + /*It is in 100M mode */
6029 + break;
6030 + }
6031 + writel(ecr, (base + EMAC_ECNTRL_REG));
6032 + writel(rcr, (base + EMAC_RCNTRL_REG));
6033 +}
6034 +
6035 +/* GEMAC set duplex.
6036 + * @param[in] base GEMAC base address
6037 + * @param[in] duplex GEMAC duplex mode (Full, Half)
6038 + */
6039 +void gemac_set_duplex(void *base, int duplex)
6040 +{
6041 + if (duplex == DUPLEX_HALF) {
6042 + writel(readl(base + EMAC_TCNTRL_REG) & ~EMAC_TCNTRL_FDEN, base
6043 + + EMAC_TCNTRL_REG);
6044 + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_DRT, (base
6045 + + EMAC_RCNTRL_REG));
6046 + } else{
6047 + writel(readl(base + EMAC_TCNTRL_REG) | EMAC_TCNTRL_FDEN, base
6048 + + EMAC_TCNTRL_REG);
6049 + writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_DRT, (base
6050 + + EMAC_RCNTRL_REG));
6051 + }
6052 +}
6053 +
6054 +/* GEMAC set mode.
6055 + * @param[in] base GEMAC base address
6056 + * @param[in] mode GEMAC operation mode (MII, RMII, RGMII, SGMII)
6057 + */
6058 +void gemac_set_mode(void *base, int mode)
6059 +{
6060 + u32 val = readl(base + EMAC_RCNTRL_REG);
6061 +
6062 + /*Remove loopbank*/
6063 + val &= ~EMAC_RCNTRL_LOOP;
6064 +
6065 + /*Enable flow control and MII mode*/
6066 + val |= (EMAC_RCNTRL_FCE | EMAC_RCNTRL_MII_MODE);
6067 +
6068 + writel(val, base + EMAC_RCNTRL_REG);
6069 +}
6070 +
6071 +/* GEMAC enable function.
6072 + * @param[in] base GEMAC base address
6073 + */
6074 +void gemac_enable(void *base)
6075 +{
6076 + writel(readl(base + EMAC_ECNTRL_REG) | EMAC_ECNTRL_ETHER_EN, base +
6077 + EMAC_ECNTRL_REG);
6078 +}
6079 +
6080 +/* GEMAC disable function.
6081 + * @param[in] base GEMAC base address
6082 + */
6083 +void gemac_disable(void *base)
6084 +{
6085 + writel(readl(base + EMAC_ECNTRL_REG) & ~EMAC_ECNTRL_ETHER_EN, base +
6086 + EMAC_ECNTRL_REG);
6087 +}
6088 +
6089 +/* GEMAC TX disable function.
6090 + * @param[in] base GEMAC base address
6091 + */
6092 +void gemac_tx_disable(void *base)
6093 +{
6094 + writel(readl(base + EMAC_TCNTRL_REG) | EMAC_TCNTRL_GTS, base +
6095 + EMAC_TCNTRL_REG);
6096 +}
6097 +
6098 +void gemac_tx_enable(void *base)
6099 +{
6100 + writel(readl(base + EMAC_TCNTRL_REG) & ~EMAC_TCNTRL_GTS, base +
6101 + EMAC_TCNTRL_REG);
6102 +}
6103 +
6104 +/* Sets the hash register of the MAC.
6105 + * This register is used for matching unicast and multicast frames.
6106 + *
6107 + * @param[in] base GEMAC base address.
6108 + * @param[in] hash 64-bit hash to be configured.
6109 + */
6110 +void gemac_set_hash(void *base, struct pfe_mac_addr *hash)
6111 +{
6112 + writel(hash->bottom, base + EMAC_GALR);
6113 + writel(hash->top, base + EMAC_GAUR);
6114 +}
6115 +
6116 +void gemac_set_laddrN(void *base, struct pfe_mac_addr *address,
6117 + unsigned int entry_index)
6118 +{
6119 + if ((entry_index < 1) || (entry_index > EMAC_SPEC_ADDR_MAX))
6120 + return;
6121 +
6122 + entry_index = entry_index - 1;
6123 + if (entry_index < 1) {
6124 + writel(htonl(address->bottom), base + EMAC_PHY_ADDR_LOW);
6125 + writel((htonl(address->top) | 0x8808), base +
6126 + EMAC_PHY_ADDR_HIGH);
6127 + } else {
6128 + writel(htonl(address->bottom), base + ((entry_index - 1) * 8)
6129 + + EMAC_SMAC_0_0);
6130 + writel((htonl(address->top) | 0x8808), base + ((entry_index -
6131 + 1) * 8) + EMAC_SMAC_0_1);
6132 + }
6133 +}
6134 +
6135 +void gemac_clear_laddrN(void *base, unsigned int entry_index)
6136 +{
6137 + if ((entry_index < 1) || (entry_index > EMAC_SPEC_ADDR_MAX))
6138 + return;
6139 +
6140 + entry_index = entry_index - 1;
6141 + if (entry_index < 1) {
6142 + writel(0, base + EMAC_PHY_ADDR_LOW);
6143 + writel(0, base + EMAC_PHY_ADDR_HIGH);
6144 + } else {
6145 + writel(0, base + ((entry_index - 1) * 8) + EMAC_SMAC_0_0);
6146 + writel(0, base + ((entry_index - 1) * 8) + EMAC_SMAC_0_1);
6147 + }
6148 +}
6149 +
6150 +/* Set the loopback mode of the MAC. This can be either no loopback for
6151 + * normal operation, local loopback through MAC internal loopback module or PHY
6152 + * loopback for external loopback through a PHY. This asserts the external
6153 + * loop pin.
6154 + *
6155 + * @param[in] base GEMAC base address.
6156 + * @param[in] gem_loop Loopback mode to be enabled. LB_LOCAL - MAC
6157 + * Loopback,
6158 + * LB_EXT - PHY Loopback.
6159 + */
6160 +void gemac_set_loop(void *base, enum mac_loop gem_loop)
6161 +{
6162 + pr_info("%s()\n", __func__);
6163 + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_LOOP, (base +
6164 + EMAC_RCNTRL_REG));
6165 +}
6166 +
6167 +/* GEMAC allow frames
6168 + * @param[in] base GEMAC base address
6169 + */
6170 +void gemac_enable_copy_all(void *base)
6171 +{
6172 + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_PROM, (base +
6173 + EMAC_RCNTRL_REG));
6174 +}
6175 +
6176 +/* GEMAC do not allow frames
6177 + * @param[in] base GEMAC base address
6178 + */
6179 +void gemac_disable_copy_all(void *base)
6180 +{
6181 + writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_PROM, (base +
6182 + EMAC_RCNTRL_REG));
6183 +}
6184 +
6185 +/* GEMAC allow broadcast function.
6186 + * @param[in] base GEMAC base address
6187 + */
6188 +void gemac_allow_broadcast(void *base)
6189 +{
6190 + writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_BC_REJ, base +
6191 + EMAC_RCNTRL_REG);
6192 +}
6193 +
6194 +/* GEMAC no broadcast function.
6195 + * @param[in] base GEMAC base address
6196 + */
6197 +void gemac_no_broadcast(void *base)
6198 +{
6199 + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_BC_REJ, base +
6200 + EMAC_RCNTRL_REG);
6201 +}
6202 +
6203 +/* GEMAC enable 1536 rx function.
6204 + * @param[in] base GEMAC base address
6205 + */
6206 +void gemac_enable_1536_rx(void *base)
6207 +{
6208 + /* Set 1536 as Maximum frame length */
6209 + writel(readl(base + EMAC_RCNTRL_REG) | (1536 << 16), base +
6210 + EMAC_RCNTRL_REG);
6211 +}
6212 +
6213 +/* GEMAC enable jumbo function.
6214 + * @param[in] base GEMAC base address
6215 + */
6216 +void gemac_enable_rx_jmb(void *base)
6217 +{
6218 + writel(readl(base + EMAC_RCNTRL_REG) | (JUMBO_FRAME_SIZE << 16), base
6219 + + EMAC_RCNTRL_REG);
6220 +}
6221 +
6222 +/* GEMAC enable stacked vlan function.
6223 + * @param[in] base GEMAC base address
6224 + */
6225 +void gemac_enable_stacked_vlan(void *base)
6226 +{
6227 + /* MTIP doesn't support stacked vlan */
6228 +}
6229 +
6230 +/* GEMAC enable pause rx function.
6231 + * @param[in] base GEMAC base address
6232 + */
6233 +void gemac_enable_pause_rx(void *base)
6234 +{
6235 + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_FCE,
6236 + base + EMAC_RCNTRL_REG);
6237 +}
6238 +
6239 +/* GEMAC disable pause rx function.
6240 + * @param[in] base GEMAC base address
6241 + */
6242 +void gemac_disable_pause_rx(void *base)
6243 +{
6244 + writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_FCE,
6245 + base + EMAC_RCNTRL_REG);
6246 +}
6247 +
6248 +/* GEMAC enable pause tx function.
6249 + * @param[in] base GEMAC base address
6250 + */
6251 +void gemac_enable_pause_tx(void *base)
6252 +{
6253 + writel(EMAC_RX_SECTION_EMPTY_V, base + EMAC_RX_SECTION_EMPTY);
6254 +}
6255 +
6256 +/* GEMAC disable pause tx function.
6257 + * @param[in] base GEMAC base address
6258 + */
6259 +void gemac_disable_pause_tx(void *base)
6260 +{
6261 + writel(0x0, base + EMAC_RX_SECTION_EMPTY);
6262 +}
6263 +
6264 +/* GEMAC wol configuration
6265 + * @param[in] base GEMAC base address
6266 + * @param[in] wol_conf WoL register configuration
6267 + */
6268 +void gemac_set_wol(void *base, u32 wol_conf)
6269 +{
6270 + u32 val = readl(base + EMAC_ECNTRL_REG);
6271 +
6272 + if (wol_conf)
6273 + val |= (EMAC_ECNTRL_MAGIC_ENA | EMAC_ECNTRL_SLEEP);
6274 + else
6275 + val &= ~(EMAC_ECNTRL_MAGIC_ENA | EMAC_ECNTRL_SLEEP);
6276 + writel(val, base + EMAC_ECNTRL_REG);
6277 +}
6278 +
6279 +/* Sets Gemac bus width to 64bit
6280 + * @param[in] base GEMAC base address
6281 + * @param[in] width gemac bus width to be set possible values are 32/64/128
6282 + */
6283 +void gemac_set_bus_width(void *base, int width)
6284 +{
6285 +}
6286 +
6287 +/* Sets Gemac configuration.
6288 + * @param[in] base GEMAC base address
6289 + * @param[in] cfg GEMAC configuration
6290 + */
6291 +void gemac_set_config(void *base, struct gemac_cfg *cfg)
6292 +{
6293 + /*GEMAC config taken from VLSI */
6294 + writel(0x00000004, base + EMAC_TFWR_STR_FWD);
6295 + writel(0x00000005, base + EMAC_RX_SECTION_FULL);
6296 + writel(0x00003fff, base + EMAC_TRUNC_FL);
6297 + writel(0x00000030, base + EMAC_TX_SECTION_EMPTY);
6298 + writel(0x00000000, base + EMAC_MIB_CTRL_STS_REG);
6299 +
6300 + gemac_set_mode(base, cfg->mode);
6301 +
6302 + gemac_set_speed(base, cfg->speed);
6303 +
6304 + gemac_set_duplex(base, cfg->duplex);
6305 +}
6306 +
6307 +/**************************** GPI ***************************/
6308 +
6309 +/* Initializes a GPI block.
6310 + * @param[in] base GPI base address
6311 + * @param[in] cfg GPI configuration
6312 + */
6313 +void gpi_init(void *base, struct gpi_cfg *cfg)
6314 +{
6315 + gpi_reset(base);
6316 +
6317 + gpi_disable(base);
6318 +
6319 + gpi_set_config(base, cfg);
6320 +}
6321 +
6322 +/* Resets a GPI block.
6323 + * @param[in] base GPI base address
6324 + */
6325 +void gpi_reset(void *base)
6326 +{
6327 + writel(CORE_SW_RESET, base + GPI_CTRL);
6328 +}
6329 +
6330 +/* Enables a GPI block.
6331 + * @param[in] base GPI base address
6332 + */
6333 +void gpi_enable(void *base)
6334 +{
6335 + writel(CORE_ENABLE, base + GPI_CTRL);
6336 +}
6337 +
6338 +/* Disables a GPI block.
6339 + * @param[in] base GPI base address
6340 + */
6341 +void gpi_disable(void *base)
6342 +{
6343 + writel(CORE_DISABLE, base + GPI_CTRL);
6344 +}
6345 +
6346 +/* Sets the configuration of a GPI block.
6347 + * @param[in] base GPI base address
6348 + * @param[in] cfg GPI configuration
6349 + */
6350 +void gpi_set_config(void *base, struct gpi_cfg *cfg)
6351 +{
6352 + writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_ALLOC_CTRL), base
6353 + + GPI_LMEM_ALLOC_ADDR);
6354 + writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_FREE_CTRL), base
6355 + + GPI_LMEM_FREE_ADDR);
6356 + writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_ALLOC_CTRL), base
6357 + + GPI_DDR_ALLOC_ADDR);
6358 + writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL), base
6359 + + GPI_DDR_FREE_ADDR);
6360 + writel(CBUS_VIRT_TO_PFE(CLASS_INQ_PKTPTR), base + GPI_CLASS_ADDR);
6361 + writel(DDR_HDR_SIZE, base + GPI_DDR_DATA_OFFSET);
6362 + writel(LMEM_HDR_SIZE, base + GPI_LMEM_DATA_OFFSET);
6363 + writel(0, base + GPI_LMEM_SEC_BUF_DATA_OFFSET);
6364 + writel(0, base + GPI_DDR_SEC_BUF_DATA_OFFSET);
6365 + writel((DDR_HDR_SIZE << 16) | LMEM_HDR_SIZE, base + GPI_HDR_SIZE);
6366 + writel((DDR_BUF_SIZE << 16) | LMEM_BUF_SIZE, base + GPI_BUF_SIZE);
6367 +
6368 + writel(((cfg->lmem_rtry_cnt << 16) | (GPI_DDR_BUF_EN << 1) |
6369 + GPI_LMEM_BUF_EN), base + GPI_RX_CONFIG);
6370 + writel(cfg->tmlf_txthres, base + GPI_TMLF_TX);
6371 + writel(cfg->aseq_len, base + GPI_DTX_ASEQ);
6372 + writel(1, base + GPI_TOE_CHKSUM_EN);
6373 +
6374 + if (cfg->mtip_pause_reg) {
6375 + writel(cfg->mtip_pause_reg, base + GPI_CSR_MTIP_PAUSE_REG);
6376 + writel(EGPI_PAUSE_TIME, base + GPI_TX_PAUSE_TIME);
6377 + }
6378 +}
6379 +
6380 +/**************************** CLASSIFIER ***************************/
6381 +
6382 +/* Initializes CLASSIFIER block.
6383 + * @param[in] cfg CLASSIFIER configuration
6384 + */
6385 +void class_init(struct class_cfg *cfg)
6386 +{
6387 + class_reset();
6388 +
6389 + class_disable();
6390 +
6391 + class_set_config(cfg);
6392 +}
6393 +
6394 +/* Resets CLASSIFIER block.
6395 + *
6396 + */
6397 +void class_reset(void)
6398 +{
6399 + writel(CORE_SW_RESET, CLASS_TX_CTRL);
6400 +}
6401 +
6402 +/* Enables all CLASS-PE's cores.
6403 + *
6404 + */
6405 +void class_enable(void)
6406 +{
6407 + writel(CORE_ENABLE, CLASS_TX_CTRL);
6408 +}
6409 +
6410 +/* Disables all CLASS-PE's cores.
6411 + *
6412 + */
6413 +void class_disable(void)
6414 +{
6415 + writel(CORE_DISABLE, CLASS_TX_CTRL);
6416 +}
6417 +
6418 +/*
6419 + * Sets the configuration of the CLASSIFIER block.
6420 + * @param[in] cfg CLASSIFIER configuration
6421 + */
6422 +void class_set_config(struct class_cfg *cfg)
6423 +{
6424 + u32 val;
6425 +
6426 + /* Initialize route table */
6427 + if (!cfg->resume)
6428 + memset(DDR_PHYS_TO_VIRT(cfg->route_table_baseaddr), 0, (1 <<
6429 + cfg->route_table_hash_bits) * CLASS_ROUTE_SIZE);
6430 +
6431 +#if !defined(LS1012A_PFE_RESET_WA)
6432 + writel(cfg->pe_sys_clk_ratio, CLASS_PE_SYS_CLK_RATIO);
6433 +#endif
6434 +
6435 + writel((DDR_HDR_SIZE << 16) | LMEM_HDR_SIZE, CLASS_HDR_SIZE);
6436 + writel(LMEM_BUF_SIZE, CLASS_LMEM_BUF_SIZE);
6437 + writel(CLASS_ROUTE_ENTRY_SIZE(CLASS_ROUTE_SIZE) |
6438 + CLASS_ROUTE_HASH_SIZE(cfg->route_table_hash_bits),
6439 + CLASS_ROUTE_HASH_ENTRY_SIZE);
6440 + writel(HIF_PKT_CLASS_EN | HIF_PKT_OFFSET(sizeof(struct hif_hdr)),
6441 + CLASS_HIF_PARSE);
6442 +
6443 + val = HASH_CRC_PORT_IP | QB2BUS_LE;
6444 +
6445 +#if defined(CONFIG_IP_ALIGNED)
6446 + val |= IP_ALIGNED;
6447 +#endif
6448 +
6449 + /*
6450 + * Class PE packet steering will only work if TOE mode, bridge fetch or
6451 + * route fetch are enabled (see class/qb_fet.v). Route fetch would
6452 + * trigger additional memory copies (likely from DDR because of hash
6453 + * table size, which cannot be reduced because PE software still
6454 + * relies on hash value computed in HW), so when not in TOE mode we
6455 + * simply enable HW bridge fetch even though we don't use it.
6456 + */
6457 + if (cfg->toe_mode)
6458 + val |= CLASS_TOE;
6459 + else
6460 + val |= HW_BRIDGE_FETCH;
6461 +
6462 + writel(val, CLASS_ROUTE_MULTI);
6463 +
6464 + writel(DDR_PHYS_TO_PFE(cfg->route_table_baseaddr),
6465 + CLASS_ROUTE_TABLE_BASE);
6466 + writel(CLASS_PE0_RO_DM_ADDR0_VAL, CLASS_PE0_RO_DM_ADDR0);
6467 + writel(CLASS_PE0_RO_DM_ADDR1_VAL, CLASS_PE0_RO_DM_ADDR1);
6468 + writel(CLASS_PE0_QB_DM_ADDR0_VAL, CLASS_PE0_QB_DM_ADDR0);
6469 + writel(CLASS_PE0_QB_DM_ADDR1_VAL, CLASS_PE0_QB_DM_ADDR1);
6470 + writel(CBUS_VIRT_TO_PFE(TMU_PHY_INQ_PKTPTR), CLASS_TM_INQ_ADDR);
6471 +
6472 + writel(23, CLASS_AFULL_THRES);
6473 + writel(23, CLASS_TSQ_FIFO_THRES);
6474 +
6475 + writel(24, CLASS_MAX_BUF_CNT);
6476 + writel(24, CLASS_TSQ_MAX_CNT);
6477 +}
6478 +
6479 +/**************************** TMU ***************************/
6480 +
6481 +void tmu_reset(void)
6482 +{
6483 + writel(SW_RESET, TMU_CTRL);
6484 +}
6485 +
6486 +/* Initializes TMU block.
6487 + * @param[in] cfg TMU configuration
6488 + */
6489 +void tmu_init(struct tmu_cfg *cfg)
6490 +{
6491 + int q, phyno;
6492 +
6493 + tmu_disable(0xF);
6494 + mdelay(10);
6495 +
6496 +#if !defined(LS1012A_PFE_RESET_WA)
6497 + /* keep in soft reset */
6498 + writel(SW_RESET, TMU_CTRL);
6499 +#endif
6500 + writel(0x3, TMU_SYS_GENERIC_CONTROL);
6501 + writel(750, TMU_INQ_WATERMARK);
6502 + writel(CBUS_VIRT_TO_PFE(EGPI1_BASE_ADDR +
6503 + GPI_INQ_PKTPTR), TMU_PHY0_INQ_ADDR);
6504 + writel(CBUS_VIRT_TO_PFE(EGPI2_BASE_ADDR +
6505 + GPI_INQ_PKTPTR), TMU_PHY1_INQ_ADDR);
6506 + writel(CBUS_VIRT_TO_PFE(HGPI_BASE_ADDR +
6507 + GPI_INQ_PKTPTR), TMU_PHY3_INQ_ADDR);
6508 + writel(CBUS_VIRT_TO_PFE(HIF_NOCPY_RX_INQ0_PKTPTR), TMU_PHY4_INQ_ADDR);
6509 + writel(CBUS_VIRT_TO_PFE(UTIL_INQ_PKTPTR), TMU_PHY5_INQ_ADDR);
6510 + writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL),
6511 + TMU_BMU_INQ_ADDR);
6512 +
6513 + writel(0x3FF, TMU_TDQ0_SCH_CTRL); /*
6514 + * enabling all 10
6515 + * schedulers [9:0] of each TDQ
6516 + */
6517 + writel(0x3FF, TMU_TDQ1_SCH_CTRL);
6518 + writel(0x3FF, TMU_TDQ3_SCH_CTRL);
6519 +
6520 +#if !defined(LS1012A_PFE_RESET_WA)
6521 + writel(cfg->pe_sys_clk_ratio, TMU_PE_SYS_CLK_RATIO);
6522 +#endif
6523 +
6524 +#if !defined(LS1012A_PFE_RESET_WA)
6525 + writel(DDR_PHYS_TO_PFE(cfg->llm_base_addr), TMU_LLM_BASE_ADDR);
6526 + /* Extra packet pointers will be stored from this address onwards */
6527 +
6528 + writel(cfg->llm_queue_len, TMU_LLM_QUE_LEN);
6529 + writel(5, TMU_TDQ_IIFG_CFG);
6530 + writel(DDR_BUF_SIZE, TMU_BMU_BUF_SIZE);
6531 +
6532 + writel(0x0, TMU_CTRL);
6533 +
6534 + /* MEM init */
6535 + pr_info("%s: mem init\n", __func__);
6536 + writel(MEM_INIT, TMU_CTRL);
6537 +
6538 + while (!(readl(TMU_CTRL) & MEM_INIT_DONE))
6539 + ;
6540 +
6541 + /* LLM init */
6542 + pr_info("%s: lmem init\n", __func__);
6543 + writel(LLM_INIT, TMU_CTRL);
6544 +
6545 + while (!(readl(TMU_CTRL) & LLM_INIT_DONE))
6546 + ;
6547 +#endif
6548 + /* set up each queue for tail drop */
6549 + for (phyno = 0; phyno < 4; phyno++) {
6550 + if (phyno == 2)
6551 + continue;
6552 + for (q = 0; q < 16; q++) {
6553 + u32 qdepth;
6554 +
6555 + writel((phyno << 8) | q, TMU_TEQ_CTRL);
6556 + writel(1 << 22, TMU_TEQ_QCFG); /*Enable tail drop */
6557 +
6558 + if (phyno == 3)
6559 + qdepth = DEFAULT_TMU3_QDEPTH;
6560 + else
6561 + qdepth = (q == 0) ? DEFAULT_Q0_QDEPTH :
6562 + DEFAULT_MAX_QDEPTH;
6563 +
6564 + /* LOG: 68855 */
6565 + /*
6566 + * The following is a workaround for the reordered
6567 + * packet and BMU2 buffer leakage issue.
6568 + */
6569 + if (CHIP_REVISION() == 0)
6570 + qdepth = 31;
6571 +
6572 + writel(qdepth << 18, TMU_TEQ_HW_PROB_CFG2);
6573 + writel(qdepth >> 14, TMU_TEQ_HW_PROB_CFG3);
6574 + }
6575 + }
6576 +
6577 +#ifdef CFG_LRO
6578 + /* Set TMU-3 queue 5 (LRO) in no-drop mode */
6579 + writel((3 << 8) | TMU_QUEUE_LRO, TMU_TEQ_CTRL);
6580 + writel(0, TMU_TEQ_QCFG);
6581 +#endif
6582 +
6583 + writel(0x05, TMU_TEQ_DISABLE_DROPCHK);
6584 +
6585 + writel(0x0, TMU_CTRL);
6586 +}
6587 +
6588 +/* Enables TMU-PE cores.
6589 + * @param[in] pe_mask TMU PE mask
6590 + */
6591 +void tmu_enable(u32 pe_mask)
6592 +{
6593 + writel(readl(TMU_TX_CTRL) | (pe_mask & 0xF), TMU_TX_CTRL);
6594 +}
6595 +
6596 +/* Disables TMU cores.
6597 + * @param[in] pe_mask TMU PE mask
6598 + */
6599 +void tmu_disable(u32 pe_mask)
6600 +{
6601 + writel(readl(TMU_TX_CTRL) & ~(pe_mask & 0xF), TMU_TX_CTRL);
6602 +}
6603 +
6604 +/* This will return the tmu queue status
6605 + * @param[in] if_id gem interface id or TMU index
6606 + * @return returns the bit mask of busy queues, zero means all
6607 + * queues are empty
6608 + */
6609 +u32 tmu_qstatus(u32 if_id)
6610 +{
6611 + return cpu_to_be32(pe_dmem_read(TMU0_ID + if_id, TMU_DM_PESTATUS +
6612 + offsetof(struct pe_status, tmu_qstatus), 4));
6613 +}
6614 +
6615 +u32 tmu_pkts_processed(u32 if_id)
6616 +{
6617 + return cpu_to_be32(pe_dmem_read(TMU0_ID + if_id, TMU_DM_PESTATUS +
6618 + offsetof(struct pe_status, rx), 4));
6619 +}
6620 +
6621 +/**************************** UTIL ***************************/
6622 +
6623 +/* Resets UTIL block.
6624 + */
6625 +void util_reset(void)
6626 +{
6627 + writel(CORE_SW_RESET, UTIL_TX_CTRL);
6628 +}
6629 +
6630 +/* Initializes UTIL block.
6631 + * @param[in] cfg UTIL configuration
6632 + */
6633 +void util_init(struct util_cfg *cfg)
6634 +{
6635 + writel(cfg->pe_sys_clk_ratio, UTIL_PE_SYS_CLK_RATIO);
6636 +}
6637 +
6638 +/* Enables UTIL-PE core.
6639 + *
6640 + */
6641 +void util_enable(void)
6642 +{
6643 + writel(CORE_ENABLE, UTIL_TX_CTRL);
6644 +}
6645 +
6646 +/* Disables UTIL-PE core.
6647 + *
6648 + */
6649 +void util_disable(void)
6650 +{
6651 + writel(CORE_DISABLE, UTIL_TX_CTRL);
6652 +}
6653 +
6654 +/**************************** HIF ***************************/
6655 +/* Initializes HIF copy block.
6656 + *
6657 + */
6658 +void hif_init(void)
6659 +{
6660 + /*Initialize HIF registers*/
6661 + writel((HIF_RX_POLL_CTRL_CYCLE << 16) | HIF_TX_POLL_CTRL_CYCLE,
6662 + HIF_POLL_CTRL);
6663 +}
6664 +
6665 +/* Enable hif tx DMA and interrupt
6666 + *
6667 + */
6668 +void hif_tx_enable(void)
6669 +{
6670 + writel(HIF_CTRL_DMA_EN, HIF_TX_CTRL);
6671 + writel((readl(HIF_INT_ENABLE) | HIF_INT_EN | HIF_TXPKT_INT_EN),
6672 + HIF_INT_ENABLE);
6673 +}
6674 +
6675 +/* Disable hif tx DMA and interrupt
6676 + *
6677 + */
6678 +void hif_tx_disable(void)
6679 +{
6680 + u32 hif_int;
6681 +
6682 + writel(0, HIF_TX_CTRL);
6683 +
6684 + hif_int = readl(HIF_INT_ENABLE);
6685 + hif_int &= HIF_TXPKT_INT_EN;
6686 + writel(hif_int, HIF_INT_ENABLE);
6687 +}
6688 +
6689 +/* Enable hif rx DMA and interrupt
6690 + *
6691 + */
6692 +void hif_rx_enable(void)
6693 +{
6694 + hif_rx_dma_start();
6695 + writel((readl(HIF_INT_ENABLE) | HIF_INT_EN | HIF_RXPKT_INT_EN),
6696 + HIF_INT_ENABLE);
6697 +}
6698 +
6699 +/* Disable hif rx DMA and interrupt
6700 + *
6701 + */
6702 +void hif_rx_disable(void)
6703 +{
6704 + u32 hif_int;
6705 +
6706 + writel(0, HIF_RX_CTRL);
6707 +
6708 + hif_int = readl(HIF_INT_ENABLE);
6709 + hif_int &= HIF_RXPKT_INT_EN;
6710 + writel(hif_int, HIF_INT_ENABLE);
6711 +}
6712 --- /dev/null
6713 +++ b/drivers/staging/fsl_ppfe/pfe_hif.c
6714 @@ -0,0 +1,1072 @@
6715 +/*
6716 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
6717 + * Copyright 2017 NXP
6718 + *
6719 + * This program is free software; you can redistribute it and/or modify
6720 + * it under the terms of the GNU General Public License as published by
6721 + * the Free Software Foundation; either version 2 of the License, or
6722 + * (at your option) any later version.
6723 + *
6724 + * This program is distributed in the hope that it will be useful,
6725 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
6726 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
6727 + * GNU General Public License for more details.
6728 + *
6729 + * You should have received a copy of the GNU General Public License
6730 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
6731 + */
6732 +
6733 +#include <linux/kernel.h>
6734 +#include <linux/interrupt.h>
6735 +#include <linux/dma-mapping.h>
6736 +#include <linux/dmapool.h>
6737 +#include <linux/sched.h>
6738 +#include <linux/module.h>
6739 +#include <linux/list.h>
6740 +#include <linux/kthread.h>
6741 +#include <linux/slab.h>
6742 +
6743 +#include <linux/io.h>
6744 +#include <asm/irq.h>
6745 +
6746 +#include "pfe_mod.h"
6747 +
6748 +#define HIF_INT_MASK (HIF_INT | HIF_RXPKT_INT | HIF_TXPKT_INT)
6749 +
6750 +unsigned char napi_first_batch;
6751 +
6752 +static void pfe_tx_do_cleanup(unsigned long data);
6753 +
6754 +static int pfe_hif_alloc_descr(struct pfe_hif *hif)
6755 +{
6756 + void *addr;
6757 + dma_addr_t dma_addr;
6758 + int err = 0;
6759 +
6760 + pr_info("%s\n", __func__);
6761 + addr = dma_alloc_coherent(pfe->dev,
6762 + HIF_RX_DESC_NT * sizeof(struct hif_desc) +
6763 + HIF_TX_DESC_NT * sizeof(struct hif_desc),
6764 + &dma_addr, GFP_KERNEL);
6765 +
6766 + if (!addr) {
6767 + pr_err("%s: Could not allocate buffer descriptors!\n"
6768 + , __func__);
6769 + err = -ENOMEM;
6770 + goto err0;
6771 + }
6772 +
6773 + hif->descr_baseaddr_p = dma_addr;
6774 + hif->descr_baseaddr_v = addr;
6775 + hif->rx_ring_size = HIF_RX_DESC_NT;
6776 + hif->tx_ring_size = HIF_TX_DESC_NT;
6777 +
6778 + return 0;
6779 +
6780 +err0:
6781 + return err;
6782 +}
6783 +
6784 +#if defined(LS1012A_PFE_RESET_WA)
6785 +static void pfe_hif_disable_rx_desc(struct pfe_hif *hif)
6786 +{
6787 + int ii;
6788 + struct hif_desc *desc = hif->rx_base;
6789 +
6790 + /*Mark all descriptors as LAST_BD */
6791 + for (ii = 0; ii < hif->rx_ring_size; ii++) {
6792 + desc->ctrl |= BD_CTRL_LAST_BD;
6793 + desc++;
6794 + }
6795 +}
6796 +
6797 +struct class_rx_hdr_t {
6798 + u32 next_ptr; /* ptr to the start of the first DDR buffer */
6799 + u16 length; /* total packet length */
6800 + u16 phyno; /* input physical port number */
6801 + u32 status; /* gemac status bits */
6802 + u32 status2; /* reserved for software usage */
6803 +};
6804 +
6805 +/* STATUS_BAD_FRAME_ERR is set for all errors (including checksums if enabled)
6806 + * except overflow
6807 + */
6808 +#define STATUS_BAD_FRAME_ERR BIT(16)
6809 +#define STATUS_LENGTH_ERR BIT(17)
6810 +#define STATUS_CRC_ERR BIT(18)
6811 +#define STATUS_TOO_SHORT_ERR BIT(19)
6812 +#define STATUS_TOO_LONG_ERR BIT(20)
6813 +#define STATUS_CODE_ERR BIT(21)
6814 +#define STATUS_MC_HASH_MATCH BIT(22)
6815 +#define STATUS_CUMULATIVE_ARC_HIT BIT(23)
6816 +#define STATUS_UNICAST_HASH_MATCH BIT(24)
6817 +#define STATUS_IP_CHECKSUM_CORRECT BIT(25)
6818 +#define STATUS_TCP_CHECKSUM_CORRECT BIT(26)
6819 +#define STATUS_UDP_CHECKSUM_CORRECT BIT(27)
6820 +#define STATUS_OVERFLOW_ERR BIT(28) /* GPI error */
6821 +#define MIN_PKT_SIZE 64
6822 +
6823 +static inline void copy_to_lmem(u32 *dst, u32 *src, int len)
6824 +{
6825 + int i;
6826 +
6827 + for (i = 0; i < len; i += sizeof(u32)) {
6828 + *dst = htonl(*src);
6829 + dst++; src++;
6830 + }
6831 +}
6832 +
6833 +static void send_dummy_pkt_to_hif(void)
6834 +{
6835 + void *lmem_ptr, *ddr_ptr, *lmem_virt_addr;
6836 + u32 physaddr;
6837 + struct class_rx_hdr_t local_hdr;
6838 + static u32 dummy_pkt[] = {
6839 + 0x33221100, 0x2b785544, 0xd73093cb, 0x01000608,
6840 + 0x04060008, 0x2b780200, 0xd73093cb, 0x0a01a8c0,
6841 + 0x33221100, 0xa8c05544, 0x00000301, 0x00000000,
6842 + 0x00000000, 0x00000000, 0x00000000, 0xbe86c51f };
6843 +
6844 + ddr_ptr = (void *)((u64)readl(BMU2_BASE_ADDR + BMU_ALLOC_CTRL));
6845 + if (!ddr_ptr)
6846 + return;
6847 +
6848 + lmem_ptr = (void *)((u64)readl(BMU1_BASE_ADDR + BMU_ALLOC_CTRL));
6849 + if (!lmem_ptr)
6850 + return;
6851 +
6852 + pr_info("Sending a dummy pkt to HIF %p %p\n", ddr_ptr, lmem_ptr);
6853 + physaddr = (u32)DDR_VIRT_TO_PFE(ddr_ptr);
6854 +
6855 + lmem_virt_addr = (void *)CBUS_PFE_TO_VIRT((unsigned long int)lmem_ptr);
6856 +
6857 + local_hdr.phyno = htons(0); /* RX_PHY_0 */
6858 + local_hdr.length = htons(MIN_PKT_SIZE);
6859 +
6860 + local_hdr.next_ptr = htonl((u32)physaddr);
6861 + /*Mark checksum is correct */
6862 + local_hdr.status = htonl((STATUS_IP_CHECKSUM_CORRECT |
6863 + STATUS_UDP_CHECKSUM_CORRECT |
6864 + STATUS_TCP_CHECKSUM_CORRECT |
6865 + STATUS_UNICAST_HASH_MATCH |
6866 + STATUS_CUMULATIVE_ARC_HIT));
6867 + copy_to_lmem((u32 *)lmem_virt_addr, (u32 *)&local_hdr,
6868 + sizeof(local_hdr));
6869 +
6870 + copy_to_lmem((u32 *)(lmem_virt_addr + LMEM_HDR_SIZE), (u32 *)dummy_pkt,
6871 + 0x40);
6872 +
6873 + writel((unsigned long int)lmem_ptr, CLASS_INQ_PKTPTR);
6874 +}
6875 +
6876 +void pfe_hif_rx_idle(struct pfe_hif *hif)
6877 +{
6878 + int hif_stop_loop = 10;
6879 + u32 rx_status;
6880 +
6881 + pfe_hif_disable_rx_desc(hif);
6882 + pr_info("Bringing hif to idle state...");
6883 + writel(0, HIF_INT_ENABLE);
6884 + /*If HIF Rx BDP is busy send a dummy packet */
6885 + do {
6886 + rx_status = readl(HIF_RX_STATUS);
6887 + if (rx_status & BDP_CSR_RX_DMA_ACTV)
6888 + send_dummy_pkt_to_hif();
6889 +
6890 + usleep_range(100, 150);
6891 + } while (--hif_stop_loop);
6892 +
6893 + if (readl(HIF_RX_STATUS) & BDP_CSR_RX_DMA_ACTV)
6894 + pr_info("Failed\n");
6895 + else
6896 + pr_info("Done\n");
6897 +}
6898 +#endif
6899 +
6900 +static void pfe_hif_free_descr(struct pfe_hif *hif)
6901 +{
6902 + pr_info("%s\n", __func__);
6903 +
6904 + dma_free_coherent(pfe->dev,
6905 + hif->rx_ring_size * sizeof(struct hif_desc) +
6906 + hif->tx_ring_size * sizeof(struct hif_desc),
6907 + hif->descr_baseaddr_v, hif->descr_baseaddr_p);
6908 +}
6909 +
6910 +void pfe_hif_desc_dump(struct pfe_hif *hif)
6911 +{
6912 + struct hif_desc *desc;
6913 + unsigned long desc_p;
6914 + int ii = 0;
6915 +
6916 + pr_info("%s\n", __func__);
6917 +
6918 + desc = hif->rx_base;
6919 + desc_p = (u32)((u64)desc - (u64)hif->descr_baseaddr_v +
6920 + hif->descr_baseaddr_p);
6921 +
6922 + pr_info("HIF Rx desc base %p physical %x\n", desc, (u32)desc_p);
6923 + for (ii = 0; ii < hif->rx_ring_size; ii++) {
6924 + pr_info("status: %08x, ctrl: %08x, data: %08x, next: %x\n",
6925 + readl(&desc->status), readl(&desc->ctrl),
6926 + readl(&desc->data), readl(&desc->next));
6927 + desc++;
6928 + }
6929 +
6930 + desc = hif->tx_base;
6931 + desc_p = ((u64)desc - (u64)hif->descr_baseaddr_v +
6932 + hif->descr_baseaddr_p);
6933 +
6934 + pr_info("HIF Tx desc base %p physical %x\n", desc, (u32)desc_p);
6935 + for (ii = 0; ii < hif->tx_ring_size; ii++) {
6936 + pr_info("status: %08x, ctrl: %08x, data: %08x, next: %x\n",
6937 + readl(&desc->status), readl(&desc->ctrl),
6938 + readl(&desc->data), readl(&desc->next));
6939 + desc++;
6940 + }
6941 +}
6942 +
6943 +/* pfe_hif_release_buffers */
6944 +static void pfe_hif_release_buffers(struct pfe_hif *hif)
6945 +{
6946 + struct hif_desc *desc;
6947 + int i = 0;
6948 +
6949 + hif->rx_base = hif->descr_baseaddr_v;
6950 +
6951 + pr_info("%s\n", __func__);
6952 +
6953 + /*Free Rx buffers */
6954 + desc = hif->rx_base;
6955 + for (i = 0; i < hif->rx_ring_size; i++) {
6956 + if (readl(&desc->data)) {
6957 + if ((i < hif->shm->rx_buf_pool_cnt) &&
6958 + (!hif->shm->rx_buf_pool[i])) {
6959 + /*
6960 + * dma_unmap_single(hif->dev, desc->data,
6961 + * hif->rx_buf_len[i], DMA_FROM_DEVICE);
6962 + */
6963 + dma_unmap_single(hif->dev,
6964 + DDR_PFE_TO_PHYS(
6965 + readl(&desc->data)),
6966 + hif->rx_buf_len[i],
6967 + DMA_FROM_DEVICE);
6968 + hif->shm->rx_buf_pool[i] = hif->rx_buf_addr[i];
6969 + } else {
6970 + pr_err("%s: buffer pool already full\n"
6971 + , __func__);
6972 + }
6973 + }
6974 +
6975 + writel(0, &desc->data);
6976 + writel(0, &desc->status);
6977 + writel(0, &desc->ctrl);
6978 + desc++;
6979 + }
6980 +}
6981 +
6982 +/*
6983 + * pfe_hif_init_buffers
6984 + * This function initializes the HIF Rx/Tx ring descriptors and
6985 + * initialize Rx queue with buffers.
6986 + */
6987 +static int pfe_hif_init_buffers(struct pfe_hif *hif)
6988 +{
6989 + struct hif_desc *desc, *first_desc_p;
6990 + u32 data;
6991 + int i = 0;
6992 +
6993 + pr_info("%s\n", __func__);
6994 +
6995 + /* Check enough Rx buffers available in the shared memory */
6996 + if (hif->shm->rx_buf_pool_cnt < hif->rx_ring_size)
6997 + return -ENOMEM;
6998 +
6999 + hif->rx_base = hif->descr_baseaddr_v;
7000 + memset(hif->rx_base, 0, hif->rx_ring_size * sizeof(struct hif_desc));
7001 +
7002 + /*Initialize Rx descriptors */
7003 + desc = hif->rx_base;
7004 + first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p;
7005 +
7006 + for (i = 0; i < hif->rx_ring_size; i++) {
7007 + /* Initialize Rx buffers from the shared memory */
7008 +
7009 + data = (u32)dma_map_single(hif->dev, hif->shm->rx_buf_pool[i],
7010 + pfe_pkt_size, DMA_FROM_DEVICE);
7011 + hif->rx_buf_addr[i] = hif->shm->rx_buf_pool[i];
7012 + hif->rx_buf_len[i] = pfe_pkt_size;
7013 + hif->shm->rx_buf_pool[i] = NULL;
7014 +
7015 + if (likely(dma_mapping_error(hif->dev, data) == 0)) {
7016 + writel(DDR_PHYS_TO_PFE(data), &desc->data);
7017 + } else {
7018 + pr_err("%s : low on mem\n", __func__);
7019 +
7020 + goto err;
7021 + }
7022 +
7023 + writel(0, &desc->status);
7024 +
7025 + /*
7026 + * Ensure everything else is written to DDR before
7027 + * writing bd->ctrl
7028 + */
7029 + wmb();
7030 +
7031 + writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM
7032 + | BD_CTRL_DIR | BD_CTRL_DESC_EN
7033 + | BD_BUF_LEN(pfe_pkt_size)), &desc->ctrl);
7034 +
7035 + /* Chain descriptors */
7036 + writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
7037 + desc++;
7038 + }
7039 +
7040 + /* Overwrite last descriptor to chain it to first one*/
7041 + desc--;
7042 + writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
7043 +
7044 + hif->rxtoclean_index = 0;
7045 +
7046 + /*Initialize Rx buffer descriptor ring base address */
7047 + writel(DDR_PHYS_TO_PFE(hif->descr_baseaddr_p), HIF_RX_BDP_ADDR);
7048 +
7049 + hif->tx_base = hif->rx_base + hif->rx_ring_size;
7050 + first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p +
7051 + hif->rx_ring_size;
7052 + memset(hif->tx_base, 0, hif->tx_ring_size * sizeof(struct hif_desc));
7053 +
7054 + /*Initialize tx descriptors */
7055 + desc = hif->tx_base;
7056 +
7057 + for (i = 0; i < hif->tx_ring_size; i++) {
7058 + /* Chain descriptors */
7059 + writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
7060 + writel(0, &desc->ctrl);
7061 + desc++;
7062 + }
7063 +
7064 + /* Overwrite last descriptor to chain it to first one */
7065 + desc--;
7066 + writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
7067 + hif->txavail = hif->tx_ring_size;
7068 + hif->txtosend = 0;
7069 + hif->txtoclean = 0;
7070 + hif->txtoflush = 0;
7071 +
7072 + /*Initialize Tx buffer descriptor ring base address */
7073 + writel((u32)DDR_PHYS_TO_PFE(first_desc_p), HIF_TX_BDP_ADDR);
7074 +
7075 + return 0;
7076 +
7077 +err:
7078 + pfe_hif_release_buffers(hif);
7079 + return -ENOMEM;
7080 +}
7081 +
7082 +/*
7083 + * pfe_hif_client_register
7084 + *
7085 + * This function used to register a client driver with the HIF driver.
7086 + *
7087 + * Return value:
7088 + * 0 - on Successful registration
7089 + */
7090 +static int pfe_hif_client_register(struct pfe_hif *hif, u32 client_id,
7091 + struct hif_client_shm *client_shm)
7092 +{
7093 + struct hif_client *client = &hif->client[client_id];
7094 + u32 i, cnt;
7095 + struct rx_queue_desc *rx_qbase;
7096 + struct tx_queue_desc *tx_qbase;
7097 + struct hif_rx_queue *rx_queue;
7098 + struct hif_tx_queue *tx_queue;
7099 + int err = 0;
7100 +
7101 + pr_info("%s\n", __func__);
7102 +
7103 + spin_lock_bh(&hif->tx_lock);
7104 +
7105 + if (test_bit(client_id, &hif->shm->g_client_status[0])) {
7106 + pr_err("%s: client %d already registered\n",
7107 + __func__, client_id);
7108 + err = -1;
7109 + goto unlock;
7110 + }
7111 +
7112 + memset(client, 0, sizeof(struct hif_client));
7113 +
7114 + /* Initialize client Rx queues baseaddr, size */
7115 +
7116 + cnt = CLIENT_CTRL_RX_Q_CNT(client_shm->ctrl);
7117 + /* Check if client is requesting for more queues than supported */
7118 + if (cnt > HIF_CLIENT_QUEUES_MAX)
7119 + cnt = HIF_CLIENT_QUEUES_MAX;
7120 +
7121 + client->rx_qn = cnt;
7122 + rx_qbase = (struct rx_queue_desc *)client_shm->rx_qbase;
7123 + for (i = 0; i < cnt; i++) {
7124 + rx_queue = &client->rx_q[i];
7125 + rx_queue->base = rx_qbase + i * client_shm->rx_qsize;
7126 + rx_queue->size = client_shm->rx_qsize;
7127 + rx_queue->write_idx = 0;
7128 + }
7129 +
7130 + /* Initialize client Tx queues baseaddr, size */
7131 + cnt = CLIENT_CTRL_TX_Q_CNT(client_shm->ctrl);
7132 +
7133 + /* Check if client is requesting for more queues than supported */
7134 + if (cnt > HIF_CLIENT_QUEUES_MAX)
7135 + cnt = HIF_CLIENT_QUEUES_MAX;
7136 +
7137 + client->tx_qn = cnt;
7138 + tx_qbase = (struct tx_queue_desc *)client_shm->tx_qbase;
7139 + for (i = 0; i < cnt; i++) {
7140 + tx_queue = &client->tx_q[i];
7141 + tx_queue->base = tx_qbase + i * client_shm->tx_qsize;
7142 + tx_queue->size = client_shm->tx_qsize;
7143 + tx_queue->ack_idx = 0;
7144 + }
7145 +
7146 + set_bit(client_id, &hif->shm->g_client_status[0]);
7147 +
7148 +unlock:
7149 + spin_unlock_bh(&hif->tx_lock);
7150 +
7151 + return err;
7152 +}
7153 +
7154 +/*
7155 + * pfe_hif_client_unregister
7156 + *
7157 + * This function used to unregister a client from the HIF driver.
7158 + *
7159 + */
7160 +static void pfe_hif_client_unregister(struct pfe_hif *hif, u32 client_id)
7161 +{
7162 + pr_info("%s\n", __func__);
7163 +
7164 + /*
7165 + * Mark client as no longer available (which prevents further packet
7166 + * receive for this client)
7167 + */
7168 + spin_lock_bh(&hif->tx_lock);
7169 +
7170 + if (!test_bit(client_id, &hif->shm->g_client_status[0])) {
7171 + pr_err("%s: client %d not registered\n", __func__,
7172 + client_id);
7173 +
7174 + spin_unlock_bh(&hif->tx_lock);
7175 + return;
7176 + }
7177 +
7178 + clear_bit(client_id, &hif->shm->g_client_status[0]);
7179 +
7180 + spin_unlock_bh(&hif->tx_lock);
7181 +}
7182 +
7183 +/*
7184 + * client_put_rxpacket-
7185 + * This functions puts the Rx pkt in the given client Rx queue.
7186 + * It actually swap the Rx pkt in the client Rx descriptor buffer
7187 + * and returns the free buffer from it.
7188 + *
7189 + * If the function returns NULL means client Rx queue is full and
7190 + * packet couldn't send to client queue.
7191 + */
7192 +static void *client_put_rxpacket(struct hif_rx_queue *queue, void *pkt, u32 len,
7193 + u32 flags, u32 client_ctrl, u32 *rem_len)
7194 +{
7195 + void *free_pkt = NULL;
7196 + struct rx_queue_desc *desc = queue->base + queue->write_idx;
7197 +
7198 + if (readl(&desc->ctrl) & CL_DESC_OWN) {
7199 + if (page_mode) {
7200 + int rem_page_size = PAGE_SIZE -
7201 + PRESENT_OFST_IN_PAGE(pkt);
7202 + int cur_pkt_size = ROUND_MIN_RX_SIZE(len +
7203 + pfe_pkt_headroom);
7204 + *rem_len = (rem_page_size - cur_pkt_size);
7205 + if (*rem_len) {
7206 + free_pkt = pkt + cur_pkt_size;
7207 + get_page(virt_to_page(free_pkt));
7208 + } else {
7209 + free_pkt = (void
7210 + *)__get_free_page(GFP_ATOMIC | GFP_DMA_PFE);
7211 + *rem_len = pfe_pkt_size;
7212 + }
7213 + } else {
7214 + free_pkt = kmalloc(PFE_BUF_SIZE, GFP_ATOMIC |
7215 + GFP_DMA_PFE);
7216 + *rem_len = PFE_BUF_SIZE - pfe_pkt_headroom;
7217 + }
7218 +
7219 + if (free_pkt) {
7220 + desc->data = pkt;
7221 + desc->client_ctrl = client_ctrl;
7222 + /*
7223 + * Ensure everything else is written to DDR before
7224 + * writing bd->ctrl
7225 + */
7226 + smp_wmb();
7227 + writel(CL_DESC_BUF_LEN(len) | flags, &desc->ctrl);
7228 + queue->write_idx = (queue->write_idx + 1)
7229 + & (queue->size - 1);
7230 +
7231 + free_pkt += pfe_pkt_headroom;
7232 + }
7233 + }
7234 +
7235 + return free_pkt;
7236 +}
7237 +
7238 +/*
7239 + * pfe_hif_rx_process-
7240 + * This function does pfe hif rx queue processing.
7241 + * Dequeue packet from Rx queue and send it to corresponding client queue
7242 + */
7243 +static int pfe_hif_rx_process(struct pfe_hif *hif, int budget)
7244 +{
7245 + struct hif_desc *desc;
7246 + struct hif_hdr *pkt_hdr;
7247 + struct __hif_hdr hif_hdr;
7248 + void *free_buf;
7249 + int rtc, len, rx_processed = 0;
7250 + struct __hif_desc local_desc;
7251 + int flags;
7252 + unsigned int desc_p;
7253 + unsigned int buf_size = 0;
7254 +
7255 + spin_lock_bh(&hif->lock);
7256 +
7257 + rtc = hif->rxtoclean_index;
7258 +
7259 + while (rx_processed < budget) {
7260 + desc = hif->rx_base + rtc;
7261 +
7262 + __memcpy12(&local_desc, desc);
7263 +
7264 + /* ACK pending Rx interrupt */
7265 + if (local_desc.ctrl & BD_CTRL_DESC_EN) {
7266 + writel(HIF_INT | HIF_RXPKT_INT, HIF_INT_SRC);
7267 +
7268 + if (rx_processed == 0) {
7269 + if (napi_first_batch == 1) {
7270 + desc_p = hif->descr_baseaddr_p +
7271 + ((unsigned long int)(desc) -
7272 + (unsigned long
7273 + int)hif->descr_baseaddr_v);
7274 + napi_first_batch = 0;
7275 + }
7276 + }
7277 +
7278 + __memcpy12(&local_desc, desc);
7279 +
7280 + if (local_desc.ctrl & BD_CTRL_DESC_EN)
7281 + break;
7282 + }
7283 +
7284 + napi_first_batch = 0;
7285 +
7286 +#ifdef HIF_NAPI_STATS
7287 + hif->napi_counters[NAPI_DESC_COUNT]++;
7288 +#endif
7289 + len = BD_BUF_LEN(local_desc.ctrl);
7290 + /*
7291 + * dma_unmap_single(hif->dev, DDR_PFE_TO_PHYS(local_desc.data),
7292 + * hif->rx_buf_len[rtc], DMA_FROM_DEVICE);
7293 + */
7294 + dma_unmap_single(hif->dev, DDR_PFE_TO_PHYS(local_desc.data),
7295 + hif->rx_buf_len[rtc], DMA_FROM_DEVICE);
7296 +
7297 + pkt_hdr = (struct hif_hdr *)hif->rx_buf_addr[rtc];
7298 +
7299 + /* Track last HIF header received */
7300 + if (!hif->started) {
7301 + hif->started = 1;
7302 +
7303 + __memcpy8(&hif_hdr, pkt_hdr);
7304 +
7305 + hif->qno = hif_hdr.hdr.q_num;
7306 + hif->client_id = hif_hdr.hdr.client_id;
7307 + hif->client_ctrl = (hif_hdr.hdr.client_ctrl1 << 16) |
7308 + hif_hdr.hdr.client_ctrl;
7309 + flags = CL_DESC_FIRST;
7310 +
7311 + } else {
7312 + flags = 0;
7313 + }
7314 +
7315 + if (local_desc.ctrl & BD_CTRL_LIFM)
7316 + flags |= CL_DESC_LAST;
7317 +
7318 + /* Check for valid client id and still registered */
7319 + if ((hif->client_id >= HIF_CLIENTS_MAX) ||
7320 + !(test_bit(hif->client_id,
7321 + &hif->shm->g_client_status[0]))) {
7322 + printk_ratelimited("%s: packet with invalid client id %d q_num %d\n",
7323 + __func__,
7324 + hif->client_id,
7325 + hif->qno);
7326 +
7327 + free_buf = pkt_hdr;
7328 +
7329 + goto pkt_drop;
7330 + }
7331 +
7332 + /* Check to valid queue number */
7333 + if (hif->client[hif->client_id].rx_qn <= hif->qno) {
7334 + pr_info("%s: packet with invalid queue: %d\n"
7335 + , __func__, hif->qno);
7336 + hif->qno = 0;
7337 + }
7338 +
7339 + free_buf =
7340 + client_put_rxpacket(&hif->client[hif->client_id].rx_q[hif->qno],
7341 + (void *)pkt_hdr, len, flags,
7342 + hif->client_ctrl, &buf_size);
7343 +
7344 + hif_lib_indicate_client(hif->client_id, EVENT_RX_PKT_IND,
7345 + hif->qno);
7346 +
7347 + if (unlikely(!free_buf)) {
7348 +#ifdef HIF_NAPI_STATS
7349 + hif->napi_counters[NAPI_CLIENT_FULL_COUNT]++;
7350 +#endif
7351 + /*
7352 + * If we want to keep in polling mode to retry later,
7353 + * we need to tell napi that we consumed
7354 + * the full budget or we will hit a livelock scenario.
7355 + * The core code keeps this napi instance
7356 + * at the head of the list and none of the other
7357 + * instances get to run
7358 + */
7359 + rx_processed = budget;
7360 +
7361 + if (flags & CL_DESC_FIRST)
7362 + hif->started = 0;
7363 +
7364 + break;
7365 + }
7366 +
7367 +pkt_drop:
7368 + /*Fill free buffer in the descriptor */
7369 + hif->rx_buf_addr[rtc] = free_buf;
7370 + hif->rx_buf_len[rtc] = min(pfe_pkt_size, buf_size);
7371 + writel((DDR_PHYS_TO_PFE
7372 + ((u32)dma_map_single(hif->dev,
7373 + free_buf, hif->rx_buf_len[rtc], DMA_FROM_DEVICE))),
7374 + &desc->data);
7375 + /*
7376 + * Ensure everything else is written to DDR before
7377 + * writing bd->ctrl
7378 + */
7379 + wmb();
7380 + writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM | BD_CTRL_DIR |
7381 + BD_CTRL_DESC_EN | BD_BUF_LEN(hif->rx_buf_len[rtc])),
7382 + &desc->ctrl);
7383 +
7384 + rtc = (rtc + 1) & (hif->rx_ring_size - 1);
7385 +
7386 + if (local_desc.ctrl & BD_CTRL_LIFM) {
7387 + if (!(hif->client_ctrl & HIF_CTRL_RX_CONTINUED)) {
7388 + rx_processed++;
7389 +
7390 +#ifdef HIF_NAPI_STATS
7391 + hif->napi_counters[NAPI_PACKET_COUNT]++;
7392 +#endif
7393 + }
7394 + hif->started = 0;
7395 + }
7396 + }
7397 +
7398 + hif->rxtoclean_index = rtc;
7399 + spin_unlock_bh(&hif->lock);
7400 +
7401 + /* we made some progress, re-start rx dma in case it stopped */
7402 + hif_rx_dma_start();
7403 +
7404 + return rx_processed;
7405 +}
7406 +
7407 +/*
7408 + * client_ack_txpacket-
7409 + * This function ack the Tx packet in the give client Tx queue by resetting
7410 + * ownership bit in the descriptor.
7411 + */
7412 +static int client_ack_txpacket(struct pfe_hif *hif, unsigned int client_id,
7413 + unsigned int q_no)
7414 +{
7415 + struct hif_tx_queue *queue = &hif->client[client_id].tx_q[q_no];
7416 + struct tx_queue_desc *desc = queue->base + queue->ack_idx;
7417 +
7418 + if (readl(&desc->ctrl) & CL_DESC_OWN) {
7419 + writel((readl(&desc->ctrl) & ~CL_DESC_OWN), &desc->ctrl);
7420 + queue->ack_idx = (queue->ack_idx + 1) & (queue->size - 1);
7421 +
7422 + return 0;
7423 +
7424 + } else {
7425 + /*This should not happen */
7426 + pr_err("%s: %d %d %d %d %d %p %d\n", __func__,
7427 + hif->txtosend, hif->txtoclean, hif->txavail,
7428 + client_id, q_no, queue, queue->ack_idx);
7429 + WARN(1, "%s: doesn't own this descriptor", __func__);
7430 + return 1;
7431 + }
7432 +}
7433 +
7434 +void __hif_tx_done_process(struct pfe_hif *hif, int count)
7435 +{
7436 + struct hif_desc *desc;
7437 + struct hif_desc_sw *desc_sw;
7438 + int ttc, tx_avl;
7439 + int pkts_done[HIF_CLIENTS_MAX] = {0, 0};
7440 +
7441 + ttc = hif->txtoclean;
7442 + tx_avl = hif->txavail;
7443 +
7444 + while ((tx_avl < hif->tx_ring_size) && count--) {
7445 + desc = hif->tx_base + ttc;
7446 +
7447 + if (readl(&desc->ctrl) & BD_CTRL_DESC_EN)
7448 + break;
7449 +
7450 + desc_sw = &hif->tx_sw_queue[ttc];
7451 +
7452 + if (desc_sw->data) {
7453 + /*
7454 + * dmap_unmap_single(hif->dev, desc_sw->data,
7455 + * desc_sw->len, DMA_TO_DEVICE);
7456 + */
7457 + dma_unmap_single(hif->dev, desc_sw->data,
7458 + desc_sw->len, DMA_TO_DEVICE);
7459 + }
7460 +
7461 + if (desc_sw->client_id > HIF_CLIENTS_MAX)
7462 + pr_err("Invalid cl id %d\n", desc_sw->client_id);
7463 +
7464 + pkts_done[desc_sw->client_id]++;
7465 +
7466 + client_ack_txpacket(hif, desc_sw->client_id, desc_sw->q_no);
7467 +
7468 + ttc = (ttc + 1) & (hif->tx_ring_size - 1);
7469 + tx_avl++;
7470 + }
7471 +
7472 + if (pkts_done[0])
7473 + hif_lib_indicate_client(0, EVENT_TXDONE_IND, 0);
7474 + if (pkts_done[1])
7475 + hif_lib_indicate_client(1, EVENT_TXDONE_IND, 0);
7476 +
7477 + hif->txtoclean = ttc;
7478 + hif->txavail = tx_avl;
7479 +
7480 + if (!count) {
7481 + tasklet_schedule(&hif->tx_cleanup_tasklet);
7482 + } else {
7483 + /*Enable Tx done interrupt */
7484 + writel(readl_relaxed(HIF_INT_ENABLE) | HIF_TXPKT_INT,
7485 + HIF_INT_ENABLE);
7486 + }
7487 +}
7488 +
7489 +static void pfe_tx_do_cleanup(unsigned long data)
7490 +{
7491 + struct pfe_hif *hif = (struct pfe_hif *)data;
7492 +
7493 + writel(HIF_INT | HIF_TXPKT_INT, HIF_INT_SRC);
7494 +
7495 + hif_tx_done_process(hif, 64);
7496 +}
7497 +
7498 +/*
7499 + * __hif_xmit_pkt -
7500 + * This function puts one packet in the HIF Tx queue
7501 + */
7502 +void __hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int
7503 + q_no, void *data, u32 len, unsigned int flags)
7504 +{
7505 + struct hif_desc *desc;
7506 + struct hif_desc_sw *desc_sw;
7507 +
7508 + desc = hif->tx_base + hif->txtosend;
7509 + desc_sw = &hif->tx_sw_queue[hif->txtosend];
7510 +
7511 + desc_sw->len = len;
7512 + desc_sw->client_id = client_id;
7513 + desc_sw->q_no = q_no;
7514 + desc_sw->flags = flags;
7515 +
7516 + if (flags & HIF_DONT_DMA_MAP) {
7517 + desc_sw->data = 0;
7518 + writel((u32)DDR_PHYS_TO_PFE(data), &desc->data);
7519 + } else {
7520 + desc_sw->data = dma_map_single(hif->dev, data, len,
7521 + DMA_TO_DEVICE);
7522 + writel((u32)DDR_PHYS_TO_PFE(desc_sw->data), &desc->data);
7523 + }
7524 +
7525 + hif->txtosend = (hif->txtosend + 1) & (hif->tx_ring_size - 1);
7526 + hif->txavail--;
7527 +
7528 + if ((!((flags & HIF_DATA_VALID) && (flags &
7529 + HIF_LAST_BUFFER))))
7530 + goto skip_tx;
7531 +
7532 + /*
7533 + * Ensure everything else is written to DDR before
7534 + * writing bd->ctrl
7535 + */
7536 + wmb();
7537 +
7538 + do {
7539 + desc_sw = &hif->tx_sw_queue[hif->txtoflush];
7540 + desc = hif->tx_base + hif->txtoflush;
7541 +
7542 + if (desc_sw->flags & HIF_LAST_BUFFER) {
7543 + writel((BD_CTRL_LIFM |
7544 + BD_CTRL_BRFETCH_DISABLE | BD_CTRL_RTFETCH_DISABLE
7545 + | BD_CTRL_PARSE_DISABLE | BD_CTRL_DESC_EN |
7546 + BD_CTRL_PKT_INT_EN | BD_BUF_LEN(desc_sw->len)),
7547 + &desc->ctrl);
7548 + } else {
7549 + writel((BD_CTRL_DESC_EN |
7550 + BD_BUF_LEN(desc_sw->len)), &desc->ctrl);
7551 + }
7552 + hif->txtoflush = (hif->txtoflush + 1) & (hif->tx_ring_size - 1);
7553 + }
7554 + while (hif->txtoflush != hif->txtosend)
7555 + ;
7556 +
7557 +skip_tx:
7558 + return;
7559 +}
7560 +
7561 +static irqreturn_t wol_isr(int irq, void *dev_id)
7562 +{
7563 + pr_info("WoL\n");
7564 + gemac_set_wol(EMAC1_BASE_ADDR, 0);
7565 + gemac_set_wol(EMAC2_BASE_ADDR, 0);
7566 + return IRQ_HANDLED;
7567 +}
7568 +
7569 +/*
7570 + * hif_isr-
7571 + * This ISR routine processes Rx/Tx done interrupts from the HIF hardware block
7572 + */
7573 +static irqreturn_t hif_isr(int irq, void *dev_id)
7574 +{
7575 + struct pfe_hif *hif = (struct pfe_hif *)dev_id;
7576 + int int_status;
7577 + int int_enable_mask;
7578 +
7579 + /*Read hif interrupt source register */
7580 + int_status = readl_relaxed(HIF_INT_SRC);
7581 + int_enable_mask = readl_relaxed(HIF_INT_ENABLE);
7582 +
7583 + if ((int_status & HIF_INT) == 0)
7584 + return IRQ_NONE;
7585 +
7586 + int_status &= ~(HIF_INT);
7587 +
7588 + if (int_status & HIF_RXPKT_INT) {
7589 + int_status &= ~(HIF_RXPKT_INT);
7590 + int_enable_mask &= ~(HIF_RXPKT_INT);
7591 +
7592 + napi_first_batch = 1;
7593 +
7594 + if (napi_schedule_prep(&hif->napi)) {
7595 +#ifdef HIF_NAPI_STATS
7596 + hif->napi_counters[NAPI_SCHED_COUNT]++;
7597 +#endif
7598 + __napi_schedule(&hif->napi);
7599 + }
7600 + }
7601 +
7602 + if (int_status & HIF_TXPKT_INT) {
7603 + int_status &= ~(HIF_TXPKT_INT);
7604 + int_enable_mask &= ~(HIF_TXPKT_INT);
7605 + /*Schedule tx cleanup tassklet */
7606 + tasklet_schedule(&hif->tx_cleanup_tasklet);
7607 + }
7608 +
7609 + /*Disable interrupts, they will be enabled after they are serviced */
7610 + writel_relaxed(int_enable_mask, HIF_INT_ENABLE);
7611 +
7612 + if (int_status) {
7613 + pr_info("%s : Invalid interrupt : %d\n", __func__,
7614 + int_status);
7615 + writel(int_status, HIF_INT_SRC);
7616 + }
7617 +
7618 + return IRQ_HANDLED;
7619 +}
7620 +
7621 +void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int data2)
7622 +{
7623 + unsigned int client_id = data1;
7624 +
7625 + if (client_id >= HIF_CLIENTS_MAX) {
7626 + pr_err("%s: client id %d out of bounds\n", __func__,
7627 + client_id);
7628 + return;
7629 + }
7630 +
7631 + switch (req) {
7632 + case REQUEST_CL_REGISTER:
7633 + /* Request for register a client */
7634 + pr_info("%s: register client_id %d\n",
7635 + __func__, client_id);
7636 + pfe_hif_client_register(hif, client_id, (struct
7637 + hif_client_shm *)&hif->shm->client[client_id]);
7638 + break;
7639 +
7640 + case REQUEST_CL_UNREGISTER:
7641 + pr_info("%s: unregister client_id %d\n",
7642 + __func__, client_id);
7643 +
7644 + /* Request for unregister a client */
7645 + pfe_hif_client_unregister(hif, client_id);
7646 +
7647 + break;
7648 +
7649 + default:
7650 + pr_err("%s: unsupported request %d\n",
7651 + __func__, req);
7652 + break;
7653 + }
7654 +
7655 + /*
7656 + * Process client Tx queues
7657 + * Currently we don't have checking for tx pending
7658 + */
7659 +}
7660 +
7661 +/*
7662 + * pfe_hif_rx_poll
7663 + * This function is NAPI poll function to process HIF Rx queue.
7664 + */
7665 +static int pfe_hif_rx_poll(struct napi_struct *napi, int budget)
7666 +{
7667 + struct pfe_hif *hif = container_of(napi, struct pfe_hif, napi);
7668 + int work_done;
7669 +
7670 +#ifdef HIF_NAPI_STATS
7671 + hif->napi_counters[NAPI_POLL_COUNT]++;
7672 +#endif
7673 +
7674 + work_done = pfe_hif_rx_process(hif, budget);
7675 +
7676 + if (work_done < budget) {
7677 + napi_complete(napi);
7678 + writel(readl_relaxed(HIF_INT_ENABLE) | HIF_RXPKT_INT,
7679 + HIF_INT_ENABLE);
7680 + }
7681 +#ifdef HIF_NAPI_STATS
7682 + else
7683 + hif->napi_counters[NAPI_FULL_BUDGET_COUNT]++;
7684 +#endif
7685 +
7686 + return work_done;
7687 +}
7688 +
7689 +/*
7690 + * pfe_hif_init
7691 + * This function initializes the baseaddresses and irq, etc.
7692 + */
7693 +int pfe_hif_init(struct pfe *pfe)
7694 +{
7695 + struct pfe_hif *hif = &pfe->hif;
7696 + int err;
7697 +
7698 + pr_info("%s\n", __func__);
7699 +
7700 + hif->dev = pfe->dev;
7701 + hif->irq = pfe->hif_irq;
7702 +
7703 + err = pfe_hif_alloc_descr(hif);
7704 + if (err)
7705 + goto err0;
7706 +
7707 + if (pfe_hif_init_buffers(hif)) {
7708 + pr_err("%s: Could not initialize buffer descriptors\n"
7709 + , __func__);
7710 + err = -ENOMEM;
7711 + goto err1;
7712 + }
7713 +
7714 + /* Initialize NAPI for Rx processing */
7715 + init_dummy_netdev(&hif->dummy_dev);
7716 + netif_napi_add(&hif->dummy_dev, &hif->napi, pfe_hif_rx_poll,
7717 + HIF_RX_POLL_WEIGHT);
7718 + napi_enable(&hif->napi);
7719 +
7720 + spin_lock_init(&hif->tx_lock);
7721 + spin_lock_init(&hif->lock);
7722 +
7723 + hif_init();
7724 + hif_rx_enable();
7725 + hif_tx_enable();
7726 +
7727 + /* Disable tx done interrupt */
7728 + writel(HIF_INT_MASK, HIF_INT_ENABLE);
7729 +
7730 + gpi_enable(HGPI_BASE_ADDR);
7731 +
7732 + err = request_irq(hif->irq, hif_isr, 0, "pfe_hif", hif);
7733 + if (err) {
7734 + pr_err("%s: failed to get the hif IRQ = %d\n",
7735 + __func__, hif->irq);
7736 + goto err1;
7737 + }
7738 +
7739 + err = request_irq(pfe->wol_irq, wol_isr, 0, "pfe_wol", pfe);
7740 + if (err) {
7741 + pr_err("%s: failed to get the wol IRQ = %d\n",
7742 + __func__, pfe->wol_irq);
7743 + goto err1;
7744 + }
7745 +
7746 + tasklet_init(&hif->tx_cleanup_tasklet,
7747 + (void(*)(unsigned long))pfe_tx_do_cleanup,
7748 + (unsigned long)hif);
7749 +
7750 + return 0;
7751 +err1:
7752 + pfe_hif_free_descr(hif);
7753 +err0:
7754 + return err;
7755 +}
7756 +
7757 +/* pfe_hif_exit- */
7758 +void pfe_hif_exit(struct pfe *pfe)
7759 +{
7760 + struct pfe_hif *hif = &pfe->hif;
7761 +
7762 + pr_info("%s\n", __func__);
7763 +
7764 + tasklet_kill(&hif->tx_cleanup_tasklet);
7765 +
7766 + spin_lock_bh(&hif->lock);
7767 + hif->shm->g_client_status[0] = 0;
7768 + /* Make sure all clients are disabled*/
7769 + hif->shm->g_client_status[1] = 0;
7770 +
7771 + spin_unlock_bh(&hif->lock);
7772 +
7773 + /*Disable Rx/Tx */
7774 + gpi_disable(HGPI_BASE_ADDR);
7775 + hif_rx_disable();
7776 + hif_tx_disable();
7777 +
7778 + napi_disable(&hif->napi);
7779 + netif_napi_del(&hif->napi);
7780 +
7781 + free_irq(pfe->wol_irq, pfe);
7782 + free_irq(hif->irq, hif);
7783 +
7784 + pfe_hif_release_buffers(hif);
7785 + pfe_hif_free_descr(hif);
7786 +}
7787 --- /dev/null
7788 +++ b/drivers/staging/fsl_ppfe/pfe_hif.h
7789 @@ -0,0 +1,211 @@
7790 +/*
7791 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
7792 + * Copyright 2017 NXP
7793 + *
7794 + * This program is free software; you can redistribute it and/or modify
7795 + * it under the terms of the GNU General Public License as published by
7796 + * the Free Software Foundation; either version 2 of the License, or
7797 + * (at your option) any later version.
7798 + *
7799 + * This program is distributed in the hope that it will be useful,
7800 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
7801 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
7802 + * GNU General Public License for more details.
7803 + *
7804 + * You should have received a copy of the GNU General Public License
7805 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
7806 + */
7807 +
7808 +#ifndef _PFE_HIF_H_
7809 +#define _PFE_HIF_H_
7810 +
7811 +#include <linux/netdevice.h>
7812 +
7813 +#define HIF_NAPI_STATS
7814 +
7815 +#define HIF_CLIENT_QUEUES_MAX 16
7816 +#define HIF_RX_POLL_WEIGHT 64
7817 +
7818 +#define HIF_RX_PKT_MIN_SIZE 0x800 /* 2KB */
7819 +#define HIF_RX_PKT_MIN_SIZE_MASK ~(HIF_RX_PKT_MIN_SIZE - 1)
7820 +#define ROUND_MIN_RX_SIZE(_sz) (((_sz) + (HIF_RX_PKT_MIN_SIZE - 1)) \
7821 + & HIF_RX_PKT_MIN_SIZE_MASK)
7822 +#define PRESENT_OFST_IN_PAGE(_buf) (((unsigned long int)(_buf) & (PAGE_SIZE \
7823 + - 1)) & HIF_RX_PKT_MIN_SIZE_MASK)
7824 +
7825 +enum {
7826 + NAPI_SCHED_COUNT = 0,
7827 + NAPI_POLL_COUNT,
7828 + NAPI_PACKET_COUNT,
7829 + NAPI_DESC_COUNT,
7830 + NAPI_FULL_BUDGET_COUNT,
7831 + NAPI_CLIENT_FULL_COUNT,
7832 + NAPI_MAX_COUNT
7833 +};
7834 +
7835 +/*
7836 + * HIF_TX_DESC_NT value should be always greter than 4,
7837 + * Otherwise HIF_TX_POLL_MARK will become zero.
7838 + */
7839 +#define HIF_RX_DESC_NT 256
7840 +#define HIF_TX_DESC_NT 2048
7841 +
7842 +#define HIF_FIRST_BUFFER BIT(0)
7843 +#define HIF_LAST_BUFFER BIT(1)
7844 +#define HIF_DONT_DMA_MAP BIT(2)
7845 +#define HIF_DATA_VALID BIT(3)
7846 +#define HIF_TSO BIT(4)
7847 +
7848 +enum {
7849 + PFE_CL_GEM0 = 0,
7850 + PFE_CL_GEM1,
7851 + HIF_CLIENTS_MAX
7852 +};
7853 +
7854 +/*structure to store client queue info */
7855 +struct hif_rx_queue {
7856 + struct rx_queue_desc *base;
7857 + u32 size;
7858 + u32 write_idx;
7859 +};
7860 +
7861 +struct hif_tx_queue {
7862 + struct tx_queue_desc *base;
7863 + u32 size;
7864 + u32 ack_idx;
7865 +};
7866 +
7867 +/*Structure to store the client info */
7868 +struct hif_client {
7869 + int rx_qn;
7870 + struct hif_rx_queue rx_q[HIF_CLIENT_QUEUES_MAX];
7871 + int tx_qn;
7872 + struct hif_tx_queue tx_q[HIF_CLIENT_QUEUES_MAX];
7873 +};
7874 +
7875 +/*HIF hardware buffer descriptor */
7876 +struct hif_desc {
7877 + u32 ctrl;
7878 + u32 status;
7879 + u32 data;
7880 + u32 next;
7881 +};
7882 +
7883 +struct __hif_desc {
7884 + u32 ctrl;
7885 + u32 status;
7886 + u32 data;
7887 +};
7888 +
7889 +struct hif_desc_sw {
7890 + dma_addr_t data;
7891 + u16 len;
7892 + u8 client_id;
7893 + u8 q_no;
7894 + u16 flags;
7895 +};
7896 +
7897 +struct hif_hdr {
7898 + u8 client_id;
7899 + u8 q_num;
7900 + u16 client_ctrl;
7901 + u16 client_ctrl1;
7902 +};
7903 +
7904 +struct __hif_hdr {
7905 + union {
7906 + struct hif_hdr hdr;
7907 + u32 word[2];
7908 + };
7909 +};
7910 +
7911 +struct hif_ipsec_hdr {
7912 + u16 sa_handle[2];
7913 +} __packed;
7914 +
7915 +/* HIF_CTRL_TX... defines */
7916 +#define HIF_CTRL_TX_CHECKSUM BIT(2)
7917 +
7918 +/* HIF_CTRL_RX... defines */
7919 +#define HIF_CTRL_RX_OFFSET_OFST (24)
7920 +#define HIF_CTRL_RX_CHECKSUMMED BIT(2)
7921 +#define HIF_CTRL_RX_CONTINUED BIT(1)
7922 +
7923 +struct pfe_hif {
7924 + /* To store registered clients in hif layer */
7925 + struct hif_client client[HIF_CLIENTS_MAX];
7926 + struct hif_shm *shm;
7927 + int irq;
7928 +
7929 + void *descr_baseaddr_v;
7930 + unsigned long descr_baseaddr_p;
7931 +
7932 + struct hif_desc *rx_base;
7933 + u32 rx_ring_size;
7934 + u32 rxtoclean_index;
7935 + void *rx_buf_addr[HIF_RX_DESC_NT];
7936 + int rx_buf_len[HIF_RX_DESC_NT];
7937 + unsigned int qno;
7938 + unsigned int client_id;
7939 + unsigned int client_ctrl;
7940 + unsigned int started;
7941 +
7942 + struct hif_desc *tx_base;
7943 + u32 tx_ring_size;
7944 + u32 txtosend;
7945 + u32 txtoclean;
7946 + u32 txavail;
7947 + u32 txtoflush;
7948 + struct hif_desc_sw tx_sw_queue[HIF_TX_DESC_NT];
7949 +
7950 +/* tx_lock synchronizes hif packet tx as well as pfe_hif structure access */
7951 + spinlock_t tx_lock;
7952 +/* lock synchronizes hif rx queue processing */
7953 + spinlock_t lock;
7954 + struct net_device dummy_dev;
7955 + struct napi_struct napi;
7956 + struct device *dev;
7957 +
7958 +#ifdef HIF_NAPI_STATS
7959 + unsigned int napi_counters[NAPI_MAX_COUNT];
7960 +#endif
7961 + struct tasklet_struct tx_cleanup_tasklet;
7962 +};
7963 +
7964 +void __hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int
7965 + q_no, void *data, u32 len, unsigned int flags);
7966 +int hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int q_no,
7967 + void *data, unsigned int len);
7968 +void __hif_tx_done_process(struct pfe_hif *hif, int count);
7969 +void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int
7970 + data2);
7971 +int pfe_hif_init(struct pfe *pfe);
7972 +void pfe_hif_exit(struct pfe *pfe);
7973 +void pfe_hif_rx_idle(struct pfe_hif *hif);
7974 +static inline void hif_tx_done_process(struct pfe_hif *hif, int count)
7975 +{
7976 + spin_lock_bh(&hif->tx_lock);
7977 + __hif_tx_done_process(hif, count);
7978 + spin_unlock_bh(&hif->tx_lock);
7979 +}
7980 +
7981 +static inline void hif_tx_lock(struct pfe_hif *hif)
7982 +{
7983 + spin_lock_bh(&hif->tx_lock);
7984 +}
7985 +
7986 +static inline void hif_tx_unlock(struct pfe_hif *hif)
7987 +{
7988 + spin_unlock_bh(&hif->tx_lock);
7989 +}
7990 +
7991 +static inline int __hif_tx_avail(struct pfe_hif *hif)
7992 +{
7993 + return hif->txavail;
7994 +}
7995 +
7996 +#define __memcpy8(dst, src) memcpy(dst, src, 8)
7997 +#define __memcpy12(dst, src) memcpy(dst, src, 12)
7998 +#define __memcpy(dst, src, len) memcpy(dst, src, len)
7999 +
8000 +#endif /* _PFE_HIF_H_ */
8001 --- /dev/null
8002 +++ b/drivers/staging/fsl_ppfe/pfe_hif_lib.c
8003 @@ -0,0 +1,640 @@
8004 +/*
8005 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
8006 + * Copyright 2017 NXP
8007 + *
8008 + * This program is free software; you can redistribute it and/or modify
8009 + * it under the terms of the GNU General Public License as published by
8010 + * the Free Software Foundation; either version 2 of the License, or
8011 + * (at your option) any later version.
8012 + *
8013 + * This program is distributed in the hope that it will be useful,
8014 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
8015 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8016 + * GNU General Public License for more details.
8017 + *
8018 + * You should have received a copy of the GNU General Public License
8019 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
8020 + */
8021 +
8022 +#include <linux/version.h>
8023 +#include <linux/kernel.h>
8024 +#include <linux/slab.h>
8025 +#include <linux/interrupt.h>
8026 +#include <linux/workqueue.h>
8027 +#include <linux/dma-mapping.h>
8028 +#include <linux/dmapool.h>
8029 +#include <linux/sched.h>
8030 +#include <linux/skbuff.h>
8031 +#include <linux/moduleparam.h>
8032 +#include <linux/cpu.h>
8033 +
8034 +#include "pfe_mod.h"
8035 +#include "pfe_hif.h"
8036 +#include "pfe_hif_lib.h"
8037 +
8038 +unsigned int lro_mode;
8039 +unsigned int page_mode;
8040 +unsigned int tx_qos = 1;
8041 +module_param(tx_qos, uint, 0444);
8042 +MODULE_PARM_DESC(tx_qos, "0: disable ,\n"
8043 + "1: enable (default), guarantee no packet drop at TMU level\n");
8044 +unsigned int pfe_pkt_size;
8045 +unsigned int pfe_pkt_headroom;
8046 +unsigned int emac_txq_cnt;
8047 +
8048 +/*
8049 + * @pfe_hal_lib.c.
8050 + * Common functions used by HIF client drivers
8051 + */
8052 +
8053 +/*HIF shared memory Global variable */
8054 +struct hif_shm ghif_shm;
8055 +
8056 +/* Cleanup the HIF shared memory, release HIF rx_buffer_pool.
8057 + * This function should be called after pfe_hif_exit
8058 + *
8059 + * @param[in] hif_shm Shared memory address location in DDR
8060 + */
8061 +static void pfe_hif_shm_clean(struct hif_shm *hif_shm)
8062 +{
8063 + int i;
8064 + void *pkt;
8065 +
8066 + for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
8067 + pkt = hif_shm->rx_buf_pool[i];
8068 + if (pkt) {
8069 + hif_shm->rx_buf_pool[i] = NULL;
8070 + pkt -= pfe_pkt_headroom;
8071 +
8072 + if (page_mode)
8073 + put_page(virt_to_page(pkt));
8074 + else
8075 + kfree(pkt);
8076 + }
8077 + }
8078 +}
8079 +
8080 +/* Initialize shared memory used between HIF driver and clients,
8081 + * allocate rx_buffer_pool required for HIF Rx descriptors.
8082 + * This function should be called before initializing HIF driver.
8083 + *
8084 + * @param[in] hif_shm Shared memory address location in DDR
8085 + * @rerurn 0 - on succes, <0 on fail to initialize
8086 + */
8087 +static int pfe_hif_shm_init(struct hif_shm *hif_shm)
8088 +{
8089 + int i;
8090 + void *pkt;
8091 +
8092 + memset(hif_shm, 0, sizeof(struct hif_shm));
8093 + hif_shm->rx_buf_pool_cnt = HIF_RX_DESC_NT;
8094 +
8095 + for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
8096 + if (page_mode) {
8097 + pkt = (void *)__get_free_page(GFP_KERNEL |
8098 + GFP_DMA_PFE);
8099 + } else {
8100 + pkt = kmalloc(PFE_BUF_SIZE, GFP_KERNEL | GFP_DMA_PFE);
8101 + }
8102 +
8103 + if (pkt)
8104 + hif_shm->rx_buf_pool[i] = pkt + pfe_pkt_headroom;
8105 + else
8106 + goto err0;
8107 + }
8108 +
8109 + return 0;
8110 +
8111 +err0:
8112 + pr_err("%s Low memory\n", __func__);
8113 + pfe_hif_shm_clean(hif_shm);
8114 + return -ENOMEM;
8115 +}
8116 +
8117 +/*This function sends indication to HIF driver
8118 + *
8119 + * @param[in] hif hif context
8120 + */
8121 +static void hif_lib_indicate_hif(struct pfe_hif *hif, int req, int data1, int
8122 + data2)
8123 +{
8124 + hif_process_client_req(hif, req, data1, data2);
8125 +}
8126 +
8127 +void hif_lib_indicate_client(int client_id, int event_type, int qno)
8128 +{
8129 + struct hif_client_s *client = pfe->hif_client[client_id];
8130 +
8131 + if (!client || (event_type >= HIF_EVENT_MAX) || (qno >=
8132 + HIF_CLIENT_QUEUES_MAX))
8133 + return;
8134 +
8135 + if (!test_and_set_bit(qno, &client->queue_mask[event_type]))
8136 + client->event_handler(client->priv, event_type, qno);
8137 +}
8138 +
8139 +/*This function releases Rx queue descriptors memory and pre-filled buffers
8140 + *
8141 + * @param[in] client hif_client context
8142 + */
8143 +static void hif_lib_client_release_rx_buffers(struct hif_client_s *client)
8144 +{
8145 + struct rx_queue_desc *desc;
8146 + int qno, ii;
8147 + void *buf;
8148 +
8149 + for (qno = 0; qno < client->rx_qn; qno++) {
8150 + desc = client->rx_q[qno].base;
8151 +
8152 + for (ii = 0; ii < client->rx_q[qno].size; ii++) {
8153 + buf = (void *)desc->data;
8154 + if (buf) {
8155 + buf -= pfe_pkt_headroom;
8156 +
8157 + if (page_mode)
8158 + free_page((unsigned long)buf);
8159 + else
8160 + kfree(buf);
8161 +
8162 + desc->ctrl = 0;
8163 + }
8164 +
8165 + desc++;
8166 + }
8167 + }
8168 +
8169 + kfree(client->rx_qbase);
8170 +}
8171 +
8172 +/*This function allocates memory for the rxq descriptors and pre-fill rx queues
8173 + * with buffers.
8174 + * @param[in] client client context
8175 + * @param[in] q_size size of the rxQ, all queues are of same size
8176 + */
8177 +static int hif_lib_client_init_rx_buffers(struct hif_client_s *client, int
8178 + q_size)
8179 +{
8180 + struct rx_queue_desc *desc;
8181 + struct hif_client_rx_queue *queue;
8182 + int ii, qno;
8183 +
8184 + /*Allocate memory for the client queues */
8185 + client->rx_qbase = kzalloc(client->rx_qn * q_size * sizeof(struct
8186 + rx_queue_desc), GFP_KERNEL);
8187 + if (!client->rx_qbase)
8188 + goto err;
8189 +
8190 + for (qno = 0; qno < client->rx_qn; qno++) {
8191 + queue = &client->rx_q[qno];
8192 +
8193 + queue->base = client->rx_qbase + qno * q_size * sizeof(struct
8194 + rx_queue_desc);
8195 + queue->size = q_size;
8196 + queue->read_idx = 0;
8197 + queue->write_idx = 0;
8198 +
8199 + pr_debug("rx queue: %d, base: %p, size: %d\n", qno,
8200 + queue->base, queue->size);
8201 + }
8202 +
8203 + for (qno = 0; qno < client->rx_qn; qno++) {
8204 + queue = &client->rx_q[qno];
8205 + desc = queue->base;
8206 +
8207 + for (ii = 0; ii < queue->size; ii++) {
8208 + desc->ctrl = CL_DESC_BUF_LEN(pfe_pkt_size) |
8209 + CL_DESC_OWN;
8210 + desc++;
8211 + }
8212 + }
8213 +
8214 + return 0;
8215 +
8216 +err:
8217 + return 1;
8218 +}
8219 +
8220 +
8221 +static void hif_lib_client_cleanup_tx_queue(struct hif_client_tx_queue *queue)
8222 +{
8223 + pr_debug("%s\n", __func__);
8224 +
8225 + /*
8226 + * Check if there are any pending packets. Client must flush the tx
8227 + * queues before unregistering, by calling by calling
8228 + * hif_lib_tx_get_next_complete()
8229 + *
8230 + * Hif no longer calls since we are no longer registered
8231 + */
8232 + if (queue->tx_pending)
8233 + pr_err("%s: pending transmit packets\n", __func__);
8234 +}
8235 +
8236 +static void hif_lib_client_release_tx_buffers(struct hif_client_s *client)
8237 +{
8238 + int qno;
8239 +
8240 + pr_debug("%s\n", __func__);
8241 +
8242 + for (qno = 0; qno < client->tx_qn; qno++)
8243 + hif_lib_client_cleanup_tx_queue(&client->tx_q[qno]);
8244 +
8245 + kfree(client->tx_qbase);
8246 +}
8247 +
8248 +static int hif_lib_client_init_tx_buffers(struct hif_client_s *client, int
8249 + q_size)
8250 +{
8251 + struct hif_client_tx_queue *queue;
8252 + int qno;
8253 +
8254 + client->tx_qbase = kzalloc(client->tx_qn * q_size * sizeof(struct
8255 + tx_queue_desc), GFP_KERNEL);
8256 + if (!client->tx_qbase)
8257 + return 1;
8258 +
8259 + for (qno = 0; qno < client->tx_qn; qno++) {
8260 + queue = &client->tx_q[qno];
8261 +
8262 + queue->base = client->tx_qbase + qno * q_size * sizeof(struct
8263 + tx_queue_desc);
8264 + queue->size = q_size;
8265 + queue->read_idx = 0;
8266 + queue->write_idx = 0;
8267 + queue->tx_pending = 0;
8268 + queue->nocpy_flag = 0;
8269 + queue->prev_tmu_tx_pkts = 0;
8270 + queue->done_tmu_tx_pkts = 0;
8271 +
8272 + pr_debug("tx queue: %d, base: %p, size: %d\n", qno,
8273 + queue->base, queue->size);
8274 + }
8275 +
8276 + return 0;
8277 +}
8278 +
8279 +static int hif_lib_event_dummy(void *priv, int event_type, int qno)
8280 +{
8281 + return 0;
8282 +}
8283 +
8284 +int hif_lib_client_register(struct hif_client_s *client)
8285 +{
8286 + struct hif_shm *hif_shm;
8287 + struct hif_client_shm *client_shm;
8288 + int err, i;
8289 + /* int loop_cnt = 0; */
8290 +
8291 + pr_debug("%s\n", __func__);
8292 +
8293 + /*Allocate memory before spin_lock*/
8294 + if (hif_lib_client_init_rx_buffers(client, client->rx_qsize)) {
8295 + err = -ENOMEM;
8296 + goto err_rx;
8297 + }
8298 +
8299 + if (hif_lib_client_init_tx_buffers(client, client->tx_qsize)) {
8300 + err = -ENOMEM;
8301 + goto err_tx;
8302 + }
8303 +
8304 + spin_lock_bh(&pfe->hif.lock);
8305 + if (!(client->pfe) || (client->id >= HIF_CLIENTS_MAX) ||
8306 + (pfe->hif_client[client->id])) {
8307 + err = -EINVAL;
8308 + goto err;
8309 + }
8310 +
8311 + hif_shm = client->pfe->hif.shm;
8312 +
8313 + if (!client->event_handler)
8314 + client->event_handler = hif_lib_event_dummy;
8315 +
8316 + /*Initialize client specific shared memory */
8317 + client_shm = (struct hif_client_shm *)&hif_shm->client[client->id];
8318 + client_shm->rx_qbase = (unsigned long int)client->rx_qbase;
8319 + client_shm->rx_qsize = client->rx_qsize;
8320 + client_shm->tx_qbase = (unsigned long int)client->tx_qbase;
8321 + client_shm->tx_qsize = client->tx_qsize;
8322 + client_shm->ctrl = (client->tx_qn << CLIENT_CTRL_TX_Q_CNT_OFST) |
8323 + (client->rx_qn << CLIENT_CTRL_RX_Q_CNT_OFST);
8324 + /* spin_lock_init(&client->rx_lock); */
8325 +
8326 + for (i = 0; i < HIF_EVENT_MAX; i++) {
8327 + client->queue_mask[i] = 0; /*
8328 + * By default all events are
8329 + * unmasked
8330 + */
8331 + }
8332 +
8333 + /*Indicate to HIF driver*/
8334 + hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_REGISTER, client->id, 0);
8335 +
8336 + pr_debug("%s: client: %p, client_id: %d, tx_qsize: %d, rx_qsize: %d\n",
8337 + __func__, client, client->id, client->tx_qsize,
8338 + client->rx_qsize);
8339 +
8340 + client->cpu_id = -1;
8341 +
8342 + pfe->hif_client[client->id] = client;
8343 + spin_unlock_bh(&pfe->hif.lock);
8344 +
8345 + return 0;
8346 +
8347 +err:
8348 + spin_unlock_bh(&pfe->hif.lock);
8349 + hif_lib_client_release_tx_buffers(client);
8350 +
8351 +err_tx:
8352 + hif_lib_client_release_rx_buffers(client);
8353 +
8354 +err_rx:
8355 + return err;
8356 +}
8357 +
8358 +int hif_lib_client_unregister(struct hif_client_s *client)
8359 +{
8360 + struct pfe *pfe = client->pfe;
8361 + u32 client_id = client->id;
8362 +
8363 + pr_info(
8364 + "%s : client: %p, client_id: %d, txQ_depth: %d, rxQ_depth: %d\n"
8365 + , __func__, client, client->id, client->tx_qsize,
8366 + client->rx_qsize);
8367 +
8368 + spin_lock_bh(&pfe->hif.lock);
8369 + hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_UNREGISTER, client->id, 0);
8370 +
8371 + hif_lib_client_release_tx_buffers(client);
8372 + hif_lib_client_release_rx_buffers(client);
8373 + pfe->hif_client[client_id] = NULL;
8374 + spin_unlock_bh(&pfe->hif.lock);
8375 +
8376 + return 0;
8377 +}
8378 +
8379 +int hif_lib_event_handler_start(struct hif_client_s *client, int event,
8380 + int qno)
8381 +{
8382 + struct hif_client_rx_queue *queue = &client->rx_q[qno];
8383 + struct rx_queue_desc *desc = queue->base + queue->read_idx;
8384 +
8385 + if ((event >= HIF_EVENT_MAX) || (qno >= HIF_CLIENT_QUEUES_MAX)) {
8386 + pr_debug("%s: Unsupported event : %d queue number : %d\n",
8387 + __func__, event, qno);
8388 + return -1;
8389 + }
8390 +
8391 + test_and_clear_bit(qno, &client->queue_mask[event]);
8392 +
8393 + switch (event) {
8394 + case EVENT_RX_PKT_IND:
8395 + if (!(desc->ctrl & CL_DESC_OWN))
8396 + hif_lib_indicate_client(client->id,
8397 + EVENT_RX_PKT_IND, qno);
8398 + break;
8399 +
8400 + case EVENT_HIGH_RX_WM:
8401 + case EVENT_TXDONE_IND:
8402 + default:
8403 + break;
8404 + }
8405 +
8406 + return 0;
8407 +}
8408 +
8409 +/*
8410 + * This function gets one packet from the specified client queue
8411 + * It also refill the rx buffer
8412 + */
8413 +void *hif_lib_receive_pkt(struct hif_client_s *client, int qno, int *len, int
8414 + *ofst, unsigned int *rx_ctrl,
8415 + unsigned int *desc_ctrl, void **priv_data)
8416 +{
8417 + struct hif_client_rx_queue *queue = &client->rx_q[qno];
8418 + struct rx_queue_desc *desc;
8419 + void *pkt = NULL;
8420 +
8421 + /*
8422 + * Following lock is to protect rx queue access from,
8423 + * hif_lib_event_handler_start.
8424 + * In general below lock is not required, because hif_lib_xmit_pkt and
8425 + * hif_lib_event_handler_start are called from napi poll and which is
8426 + * not re-entrant. But if some client use in different way this lock is
8427 + * required.
8428 + */
8429 + /*spin_lock_irqsave(&client->rx_lock, flags); */
8430 + desc = queue->base + queue->read_idx;
8431 + if (!(desc->ctrl & CL_DESC_OWN)) {
8432 + pkt = desc->data - pfe_pkt_headroom;
8433 +
8434 + *rx_ctrl = desc->client_ctrl;
8435 + *desc_ctrl = desc->ctrl;
8436 +
8437 + if (desc->ctrl & CL_DESC_FIRST) {
8438 + u16 size = *rx_ctrl >> HIF_CTRL_RX_OFFSET_OFST;
8439 +
8440 + if (size) {
8441 + size += PFE_PARSE_INFO_SIZE;
8442 + *len = CL_DESC_BUF_LEN(desc->ctrl) -
8443 + PFE_PKT_HEADER_SZ - size;
8444 + *ofst = pfe_pkt_headroom + PFE_PKT_HEADER_SZ
8445 + + size;
8446 + *priv_data = desc->data + PFE_PKT_HEADER_SZ;
8447 + } else {
8448 + *len = CL_DESC_BUF_LEN(desc->ctrl) -
8449 + PFE_PKT_HEADER_SZ - PFE_PARSE_INFO_SIZE;
8450 + *ofst = pfe_pkt_headroom
8451 + + PFE_PKT_HEADER_SZ
8452 + + PFE_PARSE_INFO_SIZE;
8453 + *priv_data = NULL;
8454 + }
8455 +
8456 + } else {
8457 + *len = CL_DESC_BUF_LEN(desc->ctrl);
8458 + *ofst = pfe_pkt_headroom;
8459 + }
8460 +
8461 + /*
8462 + * Needed so we don't free a buffer/page
8463 + * twice on module_exit
8464 + */
8465 + desc->data = NULL;
8466 +
8467 + /*
8468 + * Ensure everything else is written to DDR before
8469 + * writing bd->ctrl
8470 + */
8471 + smp_wmb();
8472 +
8473 + desc->ctrl = CL_DESC_BUF_LEN(pfe_pkt_size) | CL_DESC_OWN;
8474 + queue->read_idx = (queue->read_idx + 1) & (queue->size - 1);
8475 + }
8476 +
8477 + /*spin_unlock_irqrestore(&client->rx_lock, flags); */
8478 + return pkt;
8479 +}
8480 +
8481 +static inline void hif_hdr_write(struct hif_hdr *pkt_hdr, unsigned int
8482 + client_id, unsigned int qno,
8483 + u32 client_ctrl)
8484 +{
8485 + /* Optimize the write since the destinaton may be non-cacheable */
8486 + if (!((unsigned long)pkt_hdr & 0x3)) {
8487 + ((u32 *)pkt_hdr)[0] = (client_ctrl << 16) | (qno << 8) |
8488 + client_id;
8489 + } else {
8490 + ((u16 *)pkt_hdr)[0] = (qno << 8) | (client_id & 0xFF);
8491 + ((u16 *)pkt_hdr)[1] = (client_ctrl & 0xFFFF);
8492 + }
8493 +}
8494 +
8495 +/*This function puts the given packet in the specific client queue */
8496 +void __hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void
8497 + *data, unsigned int len, u32 client_ctrl,
8498 + unsigned int flags, void *client_data)
8499 +{
8500 + struct hif_client_tx_queue *queue = &client->tx_q[qno];
8501 + struct tx_queue_desc *desc = queue->base + queue->write_idx;
8502 +
8503 + /* First buffer */
8504 + if (flags & HIF_FIRST_BUFFER) {
8505 + data -= sizeof(struct hif_hdr);
8506 + len += sizeof(struct hif_hdr);
8507 +
8508 + hif_hdr_write(data, client->id, qno, client_ctrl);
8509 + }
8510 +
8511 + desc->data = client_data;
8512 + desc->ctrl = CL_DESC_OWN | CL_DESC_FLAGS(flags);
8513 +
8514 + __hif_xmit_pkt(&pfe->hif, client->id, qno, data, len, flags);
8515 +
8516 + queue->write_idx = (queue->write_idx + 1) & (queue->size - 1);
8517 + queue->tx_pending++;
8518 + queue->jiffies_last_packet = jiffies;
8519 +}
8520 +
8521 +void *hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno,
8522 + unsigned int *flags, int count)
8523 +{
8524 + struct hif_client_tx_queue *queue = &client->tx_q[qno];
8525 + struct tx_queue_desc *desc = queue->base + queue->read_idx;
8526 +
8527 + pr_debug("%s: qno : %d rd_indx: %d pending:%d\n", __func__, qno,
8528 + queue->read_idx, queue->tx_pending);
8529 +
8530 + if (!queue->tx_pending)
8531 + return NULL;
8532 +
8533 + if (queue->nocpy_flag && !queue->done_tmu_tx_pkts) {
8534 + u32 tmu_tx_pkts = be32_to_cpu(pe_dmem_read(TMU0_ID +
8535 + client->id, TMU_DM_TX_TRANS, 4));
8536 +
8537 + if (queue->prev_tmu_tx_pkts > tmu_tx_pkts)
8538 + queue->done_tmu_tx_pkts = UINT_MAX -
8539 + queue->prev_tmu_tx_pkts + tmu_tx_pkts;
8540 + else
8541 + queue->done_tmu_tx_pkts = tmu_tx_pkts -
8542 + queue->prev_tmu_tx_pkts;
8543 +
8544 + queue->prev_tmu_tx_pkts = tmu_tx_pkts;
8545 +
8546 + if (!queue->done_tmu_tx_pkts)
8547 + return NULL;
8548 + }
8549 +
8550 + if (desc->ctrl & CL_DESC_OWN)
8551 + return NULL;
8552 +
8553 + queue->read_idx = (queue->read_idx + 1) & (queue->size - 1);
8554 + queue->tx_pending--;
8555 +
8556 + *flags = CL_DESC_GET_FLAGS(desc->ctrl);
8557 +
8558 + if (queue->done_tmu_tx_pkts && (*flags & HIF_LAST_BUFFER))
8559 + queue->done_tmu_tx_pkts--;
8560 +
8561 + return desc->data;
8562 +}
8563 +
8564 +static void hif_lib_tmu_credit_init(struct pfe *pfe)
8565 +{
8566 + int i, q;
8567 +
8568 + for (i = 0; i < NUM_GEMAC_SUPPORT; i++)
8569 + for (q = 0; q < emac_txq_cnt; q++) {
8570 + pfe->tmu_credit.tx_credit_max[i][q] = (q == 0) ?
8571 + DEFAULT_Q0_QDEPTH : DEFAULT_MAX_QDEPTH;
8572 + pfe->tmu_credit.tx_credit[i][q] =
8573 + pfe->tmu_credit.tx_credit_max[i][q];
8574 + }
8575 +}
8576 +
8577 +/* __hif_lib_update_credit
8578 + *
8579 + * @param[in] client hif client context
8580 + * @param[in] queue queue number in match with TMU
8581 + */
8582 +void __hif_lib_update_credit(struct hif_client_s *client, unsigned int queue)
8583 +{
8584 + unsigned int tmu_tx_packets, tmp;
8585 +
8586 + if (tx_qos) {
8587 + tmu_tx_packets = be32_to_cpu(pe_dmem_read(TMU0_ID +
8588 + client->id, (TMU_DM_TX_TRANS + (queue * 4)), 4));
8589 +
8590 + /* tx_packets counter overflowed */
8591 + if (tmu_tx_packets >
8592 + pfe->tmu_credit.tx_packets[client->id][queue]) {
8593 + tmp = UINT_MAX - tmu_tx_packets +
8594 + pfe->tmu_credit.tx_packets[client->id][queue];
8595 +
8596 + pfe->tmu_credit.tx_credit[client->id][queue] =
8597 + pfe->tmu_credit.tx_credit_max[client->id][queue] - tmp;
8598 + } else {
8599 + /* TMU tx <= pfe_eth tx, normal case or both OF since
8600 + * last time
8601 + */
8602 + pfe->tmu_credit.tx_credit[client->id][queue] =
8603 + pfe->tmu_credit.tx_credit_max[client->id][queue] -
8604 + (pfe->tmu_credit.tx_packets[client->id][queue] -
8605 + tmu_tx_packets);
8606 + }
8607 + }
8608 +}
8609 +
8610 +int pfe_hif_lib_init(struct pfe *pfe)
8611 +{
8612 + int rc;
8613 +
8614 + pr_info("%s\n", __func__);
8615 +
8616 + if (lro_mode) {
8617 + page_mode = 1;
8618 + pfe_pkt_size = min(PAGE_SIZE, MAX_PFE_PKT_SIZE);
8619 + pfe_pkt_headroom = 0;
8620 + } else {
8621 + page_mode = 0;
8622 + pfe_pkt_size = PFE_PKT_SIZE;
8623 + pfe_pkt_headroom = PFE_PKT_HEADROOM;
8624 + }
8625 +
8626 + if (tx_qos)
8627 + emac_txq_cnt = EMAC_TXQ_CNT / 2;
8628 + else
8629 + emac_txq_cnt = EMAC_TXQ_CNT;
8630 +
8631 + hif_lib_tmu_credit_init(pfe);
8632 + pfe->hif.shm = &ghif_shm;
8633 + rc = pfe_hif_shm_init(pfe->hif.shm);
8634 +
8635 + return rc;
8636 +}
8637 +
8638 +void pfe_hif_lib_exit(struct pfe *pfe)
8639 +{
8640 + pr_info("%s\n", __func__);
8641 +
8642 + pfe_hif_shm_clean(pfe->hif.shm);
8643 +}
8644 --- /dev/null
8645 +++ b/drivers/staging/fsl_ppfe/pfe_hif_lib.h
8646 @@ -0,0 +1,241 @@
8647 +/*
8648 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
8649 + * Copyright 2017 NXP
8650 + *
8651 + * This program is free software; you can redistribute it and/or modify
8652 + * it under the terms of the GNU General Public License as published by
8653 + * the Free Software Foundation; either version 2 of the License, or
8654 + * (at your option) any later version.
8655 + *
8656 + * This program is distributed in the hope that it will be useful,
8657 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
8658 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8659 + * GNU General Public License for more details.
8660 + *
8661 + * You should have received a copy of the GNU General Public License
8662 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
8663 + */
8664 +
8665 +#ifndef _PFE_HIF_LIB_H_
8666 +#define _PFE_HIF_LIB_H_
8667 +
8668 +#include "pfe_hif.h"
8669 +
8670 +#define HIF_CL_REQ_TIMEOUT 10
8671 +#define GFP_DMA_PFE 0
8672 +#define PFE_PARSE_INFO_SIZE 16
8673 +
8674 +enum {
8675 + REQUEST_CL_REGISTER = 0,
8676 + REQUEST_CL_UNREGISTER,
8677 + HIF_REQUEST_MAX
8678 +};
8679 +
8680 +enum {
8681 + /* Event to indicate that client rx queue is reached water mark level */
8682 + EVENT_HIGH_RX_WM = 0,
8683 + /* Event to indicate that, packet received for client */
8684 + EVENT_RX_PKT_IND,
8685 + /* Event to indicate that, packet tx done for client */
8686 + EVENT_TXDONE_IND,
8687 + HIF_EVENT_MAX
8688 +};
8689 +
8690 +/*structure to store client queue info */
8691 +
8692 +/*structure to store client queue info */
8693 +struct hif_client_rx_queue {
8694 + struct rx_queue_desc *base;
8695 + u32 size;
8696 + u32 read_idx;
8697 + u32 write_idx;
8698 +};
8699 +
8700 +struct hif_client_tx_queue {
8701 + struct tx_queue_desc *base;
8702 + u32 size;
8703 + u32 read_idx;
8704 + u32 write_idx;
8705 + u32 tx_pending;
8706 + unsigned long jiffies_last_packet;
8707 + u32 nocpy_flag;
8708 + u32 prev_tmu_tx_pkts;
8709 + u32 done_tmu_tx_pkts;
8710 +};
8711 +
8712 +struct hif_client_s {
8713 + int id;
8714 + int tx_qn;
8715 + int rx_qn;
8716 + void *rx_qbase;
8717 + void *tx_qbase;
8718 + int tx_qsize;
8719 + int rx_qsize;
8720 + int cpu_id;
8721 + struct hif_client_tx_queue tx_q[HIF_CLIENT_QUEUES_MAX];
8722 + struct hif_client_rx_queue rx_q[HIF_CLIENT_QUEUES_MAX];
8723 + int (*event_handler)(void *priv, int event, int data);
8724 + unsigned long queue_mask[HIF_EVENT_MAX];
8725 + struct pfe *pfe;
8726 + void *priv;
8727 +};
8728 +
8729 +/*
8730 + * Client specific shared memory
8731 + * It contains number of Rx/Tx queues, base addresses and queue sizes
8732 + */
8733 +struct hif_client_shm {
8734 + u32 ctrl; /*0-7: number of Rx queues, 8-15: number of tx queues */
8735 + unsigned long rx_qbase; /*Rx queue base address */
8736 + u32 rx_qsize; /*each Rx queue size, all Rx queues are of same size */
8737 + unsigned long tx_qbase; /* Tx queue base address */
8738 + u32 tx_qsize; /*each Tx queue size, all Tx queues are of same size */
8739 +};
8740 +
8741 +/*Client shared memory ctrl bit description */
8742 +#define CLIENT_CTRL_RX_Q_CNT_OFST 0
8743 +#define CLIENT_CTRL_TX_Q_CNT_OFST 8
8744 +#define CLIENT_CTRL_RX_Q_CNT(ctrl) (((ctrl) >> CLIENT_CTRL_RX_Q_CNT_OFST) \
8745 + & 0xFF)
8746 +#define CLIENT_CTRL_TX_Q_CNT(ctrl) (((ctrl) >> CLIENT_CTRL_TX_Q_CNT_OFST) \
8747 + & 0xFF)
8748 +
8749 +/*
8750 + * Shared memory used to communicate between HIF driver and host/client drivers
8751 + * Before starting the hif driver rx_buf_pool ans rx_buf_pool_cnt should be
8752 + * initialized with host buffers and buffers count in the pool.
8753 + * rx_buf_pool_cnt should be >= HIF_RX_DESC_NT.
8754 + *
8755 + */
8756 +struct hif_shm {
8757 + u32 rx_buf_pool_cnt; /*Number of rx buffers available*/
8758 + /*Rx buffers required to initialize HIF rx descriptors */
8759 + void *rx_buf_pool[HIF_RX_DESC_NT];
8760 + unsigned long g_client_status[2]; /*Global client status bit mask */
8761 + /* Client specific shared memory */
8762 + struct hif_client_shm client[HIF_CLIENTS_MAX];
8763 +};
8764 +
8765 +#define CL_DESC_OWN BIT(31)
8766 +/* This sets owner ship to HIF driver */
8767 +#define CL_DESC_LAST BIT(30)
8768 +/* This indicates last packet for multi buffers handling */
8769 +#define CL_DESC_FIRST BIT(29)
8770 +/* This indicates first packet for multi buffers handling */
8771 +
8772 +#define CL_DESC_BUF_LEN(x) ((x) & 0xFFFF)
8773 +#define CL_DESC_FLAGS(x) (((x) & 0xF) << 16)
8774 +#define CL_DESC_GET_FLAGS(x) (((x) >> 16) & 0xF)
8775 +
8776 +struct rx_queue_desc {
8777 + void *data;
8778 + u32 ctrl; /*0-15bit len, 16-20bit flags, 31bit owner*/
8779 + u32 client_ctrl;
8780 +};
8781 +
8782 +struct tx_queue_desc {
8783 + void *data;
8784 + u32 ctrl; /*0-15bit len, 16-20bit flags, 31bit owner*/
8785 +};
8786 +
8787 +/* HIF Rx is not working properly for 2-byte aligned buffers and
8788 + * ip_header should be 4byte aligned for better iperformance.
8789 + * "ip_header = 64 + 6(hif_header) + 14 (MAC Header)" will be 4byte aligned.
8790 + */
8791 +#define PFE_PKT_HEADER_SZ sizeof(struct hif_hdr)
8792 +/* must be big enough for headroom, pkt size and skb shared info */
8793 +#define PFE_BUF_SIZE 2048
8794 +#define PFE_PKT_HEADROOM 128
8795 +
8796 +#define SKB_SHARED_INFO_SIZE SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
8797 +#define PFE_PKT_SIZE (PFE_BUF_SIZE - PFE_PKT_HEADROOM \
8798 + - SKB_SHARED_INFO_SIZE)
8799 +#define MAX_L2_HDR_SIZE 14 /* Not correct for VLAN/PPPoE */
8800 +#define MAX_L3_HDR_SIZE 20 /* Not correct for IPv6 */
8801 +#define MAX_L4_HDR_SIZE 60 /* TCP with maximum options */
8802 +#define MAX_HDR_SIZE (MAX_L2_HDR_SIZE + MAX_L3_HDR_SIZE \
8803 + + MAX_L4_HDR_SIZE)
8804 +/* Used in page mode to clamp packet size to the maximum supported by the hif
8805 + *hw interface (<16KiB)
8806 + */
8807 +#define MAX_PFE_PKT_SIZE 16380UL
8808 +
8809 +extern unsigned int pfe_pkt_size;
8810 +extern unsigned int pfe_pkt_headroom;
8811 +extern unsigned int page_mode;
8812 +extern unsigned int lro_mode;
8813 +extern unsigned int tx_qos;
8814 +extern unsigned int emac_txq_cnt;
8815 +
8816 +int pfe_hif_lib_init(struct pfe *pfe);
8817 +void pfe_hif_lib_exit(struct pfe *pfe);
8818 +int hif_lib_client_register(struct hif_client_s *client);
8819 +int hif_lib_client_unregister(struct hif_client_s *client);
8820 +void __hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void
8821 + *data, unsigned int len, u32 client_ctrl,
8822 + unsigned int flags, void *client_data);
8823 +int hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void *data,
8824 + unsigned int len, u32 client_ctrl, void *client_data);
8825 +void hif_lib_indicate_client(int cl_id, int event, int data);
8826 +int hif_lib_event_handler_start(struct hif_client_s *client, int event, int
8827 + data);
8828 +int hif_lib_tmu_queue_start(struct hif_client_s *client, int qno);
8829 +int hif_lib_tmu_queue_stop(struct hif_client_s *client, int qno);
8830 +void *hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno,
8831 + unsigned int *flags, int count);
8832 +void *hif_lib_receive_pkt(struct hif_client_s *client, int qno, int *len, int
8833 + *ofst, unsigned int *rx_ctrl,
8834 + unsigned int *desc_ctrl, void **priv_data);
8835 +void __hif_lib_update_credit(struct hif_client_s *client, unsigned int queue);
8836 +void hif_lib_set_rx_cpu_affinity(struct hif_client_s *client, int cpu_id);
8837 +void hif_lib_set_tx_queue_nocpy(struct hif_client_s *client, int qno, int
8838 + enable);
8839 +static inline int hif_lib_tx_avail(struct hif_client_s *client, unsigned int
8840 + qno)
8841 +{
8842 + struct hif_client_tx_queue *queue = &client->tx_q[qno];
8843 +
8844 + return (queue->size - queue->tx_pending);
8845 +}
8846 +
8847 +static inline int hif_lib_get_tx_wr_index(struct hif_client_s *client, unsigned
8848 + int qno)
8849 +{
8850 + struct hif_client_tx_queue *queue = &client->tx_q[qno];
8851 +
8852 + return queue->write_idx;
8853 +}
8854 +
8855 +static inline int hif_lib_tx_pending(struct hif_client_s *client, unsigned int
8856 + qno)
8857 +{
8858 + struct hif_client_tx_queue *queue = &client->tx_q[qno];
8859 +
8860 + return queue->tx_pending;
8861 +}
8862 +
8863 +#define hif_lib_tx_credit_avail(pfe, id, qno) \
8864 + ((pfe)->tmu_credit.tx_credit[id][qno])
8865 +
8866 +#define hif_lib_tx_credit_max(pfe, id, qno) \
8867 + ((pfe)->tmu_credit.tx_credit_max[id][qno])
8868 +
8869 +/*
8870 + * Test comment
8871 + */
8872 +#define hif_lib_tx_credit_use(pfe, id, qno, credit) \
8873 + ({ typeof(pfe) pfe_ = pfe; \
8874 + typeof(id) id_ = id; \
8875 + typeof(qno) qno_ = qno_; \
8876 + typeof(credit) credit_ = credit; \
8877 + do { \
8878 + if (tx_qos) { \
8879 + (pfe_)->tmu_credit.tx_credit[id_][qno_]\
8880 + -= credit_; \
8881 + (pfe_)->tmu_credit.tx_packets[id_][qno_]\
8882 + += credit_; \
8883 + } \
8884 + } while (0); \
8885 + })
8886 +
8887 +#endif /* _PFE_HIF_LIB_H_ */
8888 --- /dev/null
8889 +++ b/drivers/staging/fsl_ppfe/pfe_hw.c
8890 @@ -0,0 +1,176 @@
8891 +/*
8892 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
8893 + * Copyright 2017 NXP
8894 + *
8895 + * This program is free software; you can redistribute it and/or modify
8896 + * it under the terms of the GNU General Public License as published by
8897 + * the Free Software Foundation; either version 2 of the License, or
8898 + * (at your option) any later version.
8899 + *
8900 + * This program is distributed in the hope that it will be useful,
8901 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
8902 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8903 + * GNU General Public License for more details.
8904 + *
8905 + * You should have received a copy of the GNU General Public License
8906 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
8907 + */
8908 +
8909 +#include "pfe_mod.h"
8910 +#include "pfe_hw.h"
8911 +
8912 +/* Functions to handle most of pfe hw register initialization */
8913 +int pfe_hw_init(struct pfe *pfe, int resume)
8914 +{
8915 + struct class_cfg class_cfg = {
8916 + .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
8917 + .route_table_baseaddr = pfe->ddr_phys_baseaddr +
8918 + ROUTE_TABLE_BASEADDR,
8919 + .route_table_hash_bits = ROUTE_TABLE_HASH_BITS,
8920 + };
8921 +
8922 + struct tmu_cfg tmu_cfg = {
8923 + .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
8924 + .llm_base_addr = pfe->ddr_phys_baseaddr + TMU_LLM_BASEADDR,
8925 + .llm_queue_len = TMU_LLM_QUEUE_LEN,
8926 + };
8927 +
8928 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
8929 + struct util_cfg util_cfg = {
8930 + .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
8931 + };
8932 +#endif
8933 +
8934 + struct BMU_CFG bmu1_cfg = {
8935 + .baseaddr = CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR +
8936 + BMU1_LMEM_BASEADDR),
8937 + .count = BMU1_BUF_COUNT,
8938 + .size = BMU1_BUF_SIZE,
8939 + .low_watermark = 10,
8940 + .high_watermark = 15,
8941 + };
8942 +
8943 + struct BMU_CFG bmu2_cfg = {
8944 + .baseaddr = DDR_PHYS_TO_PFE(pfe->ddr_phys_baseaddr +
8945 + BMU2_DDR_BASEADDR),
8946 + .count = BMU2_BUF_COUNT,
8947 + .size = BMU2_BUF_SIZE,
8948 + .low_watermark = 250,
8949 + .high_watermark = 253,
8950 + };
8951 +
8952 + struct gpi_cfg egpi1_cfg = {
8953 + .lmem_rtry_cnt = EGPI1_LMEM_RTRY_CNT,
8954 + .tmlf_txthres = EGPI1_TMLF_TXTHRES,
8955 + .aseq_len = EGPI1_ASEQ_LEN,
8956 + .mtip_pause_reg = CBUS_VIRT_TO_PFE(EMAC1_BASE_ADDR +
8957 + EMAC_TCNTRL_REG),
8958 + };
8959 +
8960 + struct gpi_cfg egpi2_cfg = {
8961 + .lmem_rtry_cnt = EGPI2_LMEM_RTRY_CNT,
8962 + .tmlf_txthres = EGPI2_TMLF_TXTHRES,
8963 + .aseq_len = EGPI2_ASEQ_LEN,
8964 + .mtip_pause_reg = CBUS_VIRT_TO_PFE(EMAC2_BASE_ADDR +
8965 + EMAC_TCNTRL_REG),
8966 + };
8967 +
8968 + struct gpi_cfg hgpi_cfg = {
8969 + .lmem_rtry_cnt = HGPI_LMEM_RTRY_CNT,
8970 + .tmlf_txthres = HGPI_TMLF_TXTHRES,
8971 + .aseq_len = HGPI_ASEQ_LEN,
8972 + .mtip_pause_reg = 0,
8973 + };
8974 +
8975 + pr_info("%s\n", __func__);
8976 +
8977 +#if !defined(LS1012A_PFE_RESET_WA)
8978 + /* LS1012A needs this to make PE work correctly */
8979 + writel(0x3, CLASS_PE_SYS_CLK_RATIO);
8980 + writel(0x3, TMU_PE_SYS_CLK_RATIO);
8981 + writel(0x3, UTIL_PE_SYS_CLK_RATIO);
8982 + usleep_range(10, 20);
8983 +#endif
8984 +
8985 + pr_info("CLASS version: %x\n", readl(CLASS_VERSION));
8986 + pr_info("TMU version: %x\n", readl(TMU_VERSION));
8987 +
8988 + pr_info("BMU1 version: %x\n", readl(BMU1_BASE_ADDR +
8989 + BMU_VERSION));
8990 + pr_info("BMU2 version: %x\n", readl(BMU2_BASE_ADDR +
8991 + BMU_VERSION));
8992 +
8993 + pr_info("EGPI1 version: %x\n", readl(EGPI1_BASE_ADDR +
8994 + GPI_VERSION));
8995 + pr_info("EGPI2 version: %x\n", readl(EGPI2_BASE_ADDR +
8996 + GPI_VERSION));
8997 + pr_info("HGPI version: %x\n", readl(HGPI_BASE_ADDR +
8998 + GPI_VERSION));
8999 +
9000 + pr_info("HIF version: %x\n", readl(HIF_VERSION));
9001 + pr_info("HIF NOPCY version: %x\n", readl(HIF_NOCPY_VERSION));
9002 +
9003 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
9004 + pr_info("UTIL version: %x\n", readl(UTIL_VERSION));
9005 +#endif
9006 + while (!(readl(TMU_CTRL) & ECC_MEM_INIT_DONE))
9007 + ;
9008 +
9009 + hif_rx_disable();
9010 + hif_tx_disable();
9011 +
9012 + bmu_init(BMU1_BASE_ADDR, &bmu1_cfg);
9013 +
9014 + pr_info("bmu_init(1) done\n");
9015 +
9016 + bmu_init(BMU2_BASE_ADDR, &bmu2_cfg);
9017 +
9018 + pr_info("bmu_init(2) done\n");
9019 +
9020 + class_cfg.resume = resume ? 1 : 0;
9021 +
9022 + class_init(&class_cfg);
9023 +
9024 + pr_info("class_init() done\n");
9025 +
9026 + tmu_init(&tmu_cfg);
9027 +
9028 + pr_info("tmu_init() done\n");
9029 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
9030 + util_init(&util_cfg);
9031 +
9032 + pr_info("util_init() done\n");
9033 +#endif
9034 + gpi_init(EGPI1_BASE_ADDR, &egpi1_cfg);
9035 +
9036 + pr_info("gpi_init(1) done\n");
9037 +
9038 + gpi_init(EGPI2_BASE_ADDR, &egpi2_cfg);
9039 +
9040 + pr_info("gpi_init(2) done\n");
9041 +
9042 + gpi_init(HGPI_BASE_ADDR, &hgpi_cfg);
9043 +
9044 + pr_info("gpi_init(hif) done\n");
9045 +
9046 + bmu_enable(BMU1_BASE_ADDR);
9047 +
9048 + pr_info("bmu_enable(1) done\n");
9049 +
9050 + bmu_enable(BMU2_BASE_ADDR);
9051 +
9052 + pr_info("bmu_enable(2) done\n");
9053 +
9054 + return 0;
9055 +}
9056 +
9057 +void pfe_hw_exit(struct pfe *pfe)
9058 +{
9059 + pr_info("%s\n", __func__);
9060 +
9061 + bmu_disable(BMU1_BASE_ADDR);
9062 + bmu_reset(BMU1_BASE_ADDR);
9063 +
9064 + bmu_disable(BMU2_BASE_ADDR);
9065 + bmu_reset(BMU2_BASE_ADDR);
9066 +}
9067 --- /dev/null
9068 +++ b/drivers/staging/fsl_ppfe/pfe_hw.h
9069 @@ -0,0 +1,27 @@
9070 +/*
9071 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
9072 + * Copyright 2017 NXP
9073 + *
9074 + * This program is free software; you can redistribute it and/or modify
9075 + * it under the terms of the GNU General Public License as published by
9076 + * the Free Software Foundation; either version 2 of the License, or
9077 + * (at your option) any later version.
9078 + *
9079 + * This program is distributed in the hope that it will be useful,
9080 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
9081 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9082 + * GNU General Public License for more details.
9083 + *
9084 + * You should have received a copy of the GNU General Public License
9085 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
9086 + */
9087 +
9088 +#ifndef _PFE_HW_H_
9089 +#define _PFE_HW_H_
9090 +
9091 +#define PE_SYS_CLK_RATIO 1 /* SYS/AXI = 250MHz, HFE = 500MHz */
9092 +
9093 +int pfe_hw_init(struct pfe *pfe, int resume);
9094 +void pfe_hw_exit(struct pfe *pfe);
9095 +
9096 +#endif /* _PFE_HW_H_ */
9097 --- /dev/null
9098 +++ b/drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c
9099 @@ -0,0 +1,385 @@
9100 +/*
9101 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
9102 + * Copyright 2017 NXP
9103 + *
9104 + * This program is free software; you can redistribute it and/or modify
9105 + * it under the terms of the GNU General Public License as published by
9106 + * the Free Software Foundation; either version 2 of the License, or
9107 + * (at your option) any later version.
9108 + *
9109 + * This program is distributed in the hope that it will be useful,
9110 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
9111 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9112 + * GNU General Public License for more details.
9113 + *
9114 + * You should have received a copy of the GNU General Public License
9115 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
9116 + */
9117 +
9118 +#include <linux/module.h>
9119 +#include <linux/device.h>
9120 +#include <linux/of_net.h>
9121 +#include <linux/of_address.h>
9122 +#include <linux/platform_device.h>
9123 +#include <linux/slab.h>
9124 +#include <linux/clk.h>
9125 +#include <linux/mfd/syscon.h>
9126 +#include <linux/regmap.h>
9127 +
9128 +#include "pfe_mod.h"
9129 +
9130 +struct ls1012a_pfe_platform_data pfe_platform_data;
9131 +
9132 +static int pfe_get_gemac_if_proprties(struct device_node *parent, int port, int
9133 + if_cnt,
9134 + struct ls1012a_pfe_platform_data
9135 + *pdata)
9136 +{
9137 + struct device_node *gem = NULL, *phy = NULL;
9138 + int size;
9139 + int ii = 0, phy_id = 0;
9140 + const u32 *addr;
9141 + const void *mac_addr;
9142 +
9143 + for (ii = 0; ii < if_cnt; ii++) {
9144 + gem = of_get_next_child(parent, gem);
9145 + if (!gem)
9146 + goto err;
9147 + addr = of_get_property(gem, "reg", &size);
9148 + if (addr && (be32_to_cpup(addr) == port))
9149 + break;
9150 + }
9151 +
9152 + if (ii >= if_cnt) {
9153 + pr_err("%s:%d Failed to find interface = %d\n",
9154 + __func__, __LINE__, if_cnt);
9155 + goto err;
9156 + }
9157 +
9158 + pdata->ls1012a_eth_pdata[port].gem_id = port;
9159 +
9160 + mac_addr = of_get_mac_address(gem);
9161 +
9162 + if (mac_addr) {
9163 + memcpy(pdata->ls1012a_eth_pdata[port].mac_addr, mac_addr,
9164 + ETH_ALEN);
9165 + }
9166 +
9167 + pdata->ls1012a_eth_pdata[port].mii_config = of_get_phy_mode(gem);
9168 +
9169 + if ((pdata->ls1012a_eth_pdata[port].mii_config) < 0)
9170 + pr_err("%s:%d Incorrect Phy mode....\n", __func__,
9171 + __LINE__);
9172 +
9173 + addr = of_get_property(gem, "fsl,gemac-bus-id", &size);
9174 + if (!addr)
9175 + pr_err("%s:%d Invalid gemac-bus-id....\n", __func__,
9176 + __LINE__);
9177 + else
9178 + pdata->ls1012a_eth_pdata[port].bus_id = be32_to_cpup(addr);
9179 +
9180 + addr = of_get_property(gem, "fsl,gemac-phy-id", &size);
9181 + if (!addr) {
9182 + pr_err("%s:%d Invalid gemac-phy-id....\n", __func__,
9183 + __LINE__);
9184 + } else {
9185 + phy_id = be32_to_cpup(addr);
9186 + pdata->ls1012a_eth_pdata[port].phy_id = phy_id;
9187 + pdata->ls1012a_mdio_pdata[0].phy_mask &= ~(1 << phy_id);
9188 + }
9189 +
9190 + addr = of_get_property(gem, "fsl,mdio-mux-val", &size);
9191 + if (!addr)
9192 + pr_err("%s: Invalid mdio-mux-val....\n", __func__);
9193 + else
9194 + phy_id = be32_to_cpup(addr);
9195 + pdata->ls1012a_eth_pdata[port].mdio_muxval = phy_id;
9196 +
9197 + if (pdata->ls1012a_eth_pdata[port].phy_id < 32)
9198 + pfe->mdio_muxval[pdata->ls1012a_eth_pdata[port].phy_id] =
9199 + pdata->ls1012a_eth_pdata[port].mdio_muxval;
9200 +
9201 + addr = of_get_property(gem, "fsl,pfe-phy-if-flags", &size);
9202 + if (!addr)
9203 + pr_err("%s:%d Invalid pfe-phy-if-flags....\n",
9204 + __func__, __LINE__);
9205 + else
9206 + pdata->ls1012a_eth_pdata[port].phy_flags = be32_to_cpup(addr);
9207 +
9208 + /* If PHY is enabled, read mdio properties */
9209 + if (pdata->ls1012a_eth_pdata[port].phy_flags & GEMAC_NO_PHY)
9210 + goto done;
9211 +
9212 + phy = of_get_next_child(gem, NULL);
9213 +
9214 + addr = of_get_property(phy, "reg", &size);
9215 +
9216 + if (!addr)
9217 + pr_err("%s:%d Invalid phy enable flag....\n",
9218 + __func__, __LINE__);
9219 + else
9220 + pdata->ls1012a_mdio_pdata[port].enabled = be32_to_cpup(addr);
9221 +
9222 + pdata->ls1012a_mdio_pdata[port].irq[0] = PHY_POLL;
9223 +
9224 +done:
9225 +
9226 + return 0;
9227 +
9228 +err:
9229 + return -1;
9230 +}
9231 +
9232 +/*
9233 + *
9234 + * pfe_platform_probe -
9235 + *
9236 + *
9237 + */
9238 +static int pfe_platform_probe(struct platform_device *pdev)
9239 +{
9240 + struct resource res;
9241 + int ii, rc, interface_count = 0, size = 0;
9242 + const u32 *prop;
9243 + struct device_node *np;
9244 + struct clk *pfe_clk;
9245 +
9246 + np = pdev->dev.of_node;
9247 +
9248 + if (!np) {
9249 + pr_err("Invalid device node\n");
9250 + return -EINVAL;
9251 + }
9252 +
9253 + pfe = kzalloc(sizeof(*pfe), GFP_KERNEL);
9254 + if (!pfe) {
9255 + rc = -ENOMEM;
9256 + goto err_alloc;
9257 + }
9258 +
9259 + platform_set_drvdata(pdev, pfe);
9260 +
9261 + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9262 +
9263 + if (of_address_to_resource(np, 1, &res)) {
9264 + rc = -ENOMEM;
9265 + pr_err("failed to get ddr resource\n");
9266 + goto err_ddr;
9267 + }
9268 +
9269 + pfe->ddr_phys_baseaddr = res.start;
9270 + pfe->ddr_size = resource_size(&res);
9271 + pfe->ddr_baseaddr = phys_to_virt(res.start);
9272 +
9273 + pfe->scfg =
9274 + syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
9275 + "fsl,pfe-scfg");
9276 + if (IS_ERR(pfe->scfg)) {
9277 + dev_err(&pdev->dev, "No syscfg phandle specified\n");
9278 + return PTR_ERR(pfe->scfg);
9279 + }
9280 +
9281 + pfe->cbus_baseaddr = of_iomap(np, 0);
9282 + if (!pfe->cbus_baseaddr) {
9283 + rc = -ENOMEM;
9284 + pr_err("failed to get axi resource\n");
9285 + goto err_axi;
9286 + }
9287 +
9288 + pfe->hif_irq = platform_get_irq(pdev, 0);
9289 + if (pfe->hif_irq < 0) {
9290 + pr_err("platform_get_irq for hif failed\n");
9291 + rc = pfe->hif_irq;
9292 + goto err_hif_irq;
9293 + }
9294 +
9295 + pfe->wol_irq = platform_get_irq(pdev, 2);
9296 + if (pfe->wol_irq < 0) {
9297 + pr_err("platform_get_irq for WoL failed\n");
9298 + rc = pfe->wol_irq;
9299 + goto err_hif_irq;
9300 + }
9301 +
9302 + /* Read interface count */
9303 + prop = of_get_property(np, "fsl,pfe-num-interfaces", &size);
9304 + if (!prop) {
9305 + pr_err("Failed to read number of interfaces\n");
9306 + rc = -ENXIO;
9307 + goto err_prop;
9308 + }
9309 +
9310 + interface_count = be32_to_cpup(prop);
9311 + if (interface_count <= 0) {
9312 + pr_err("No ethernet interface count : %d\n",
9313 + interface_count);
9314 + rc = -ENXIO;
9315 + goto err_prop;
9316 + }
9317 +
9318 + pfe_platform_data.ls1012a_mdio_pdata[0].phy_mask = 0xffffffff;
9319 +
9320 + for (ii = 0; ii < interface_count; ii++) {
9321 + pfe_get_gemac_if_proprties(np, ii, interface_count,
9322 + &pfe_platform_data);
9323 + }
9324 +
9325 + pfe->dev = &pdev->dev;
9326 +
9327 + pfe->dev->platform_data = &pfe_platform_data;
9328 +
9329 + /* declare WoL capabilities */
9330 + device_init_wakeup(&pdev->dev, true);
9331 +
9332 + /* find the clocks */
9333 + pfe_clk = devm_clk_get(pfe->dev, "pfe");
9334 + if (IS_ERR(pfe_clk))
9335 + return PTR_ERR(pfe_clk);
9336 +
9337 + /* PFE clock is (platform clock / 2) */
9338 + /* save sys_clk value as KHz */
9339 + pfe->ctrl.sys_clk = clk_get_rate(pfe_clk) / (2 * 1000);
9340 +
9341 + rc = pfe_probe(pfe);
9342 + if (rc < 0)
9343 + goto err_probe;
9344 +
9345 + return 0;
9346 +
9347 +err_probe:
9348 +err_prop:
9349 +err_hif_irq:
9350 + iounmap(pfe->cbus_baseaddr);
9351 +
9352 +err_axi:
9353 +err_ddr:
9354 + platform_set_drvdata(pdev, NULL);
9355 +
9356 + kfree(pfe);
9357 +
9358 +err_alloc:
9359 + return rc;
9360 +}
9361 +
9362 +/*
9363 + * pfe_platform_remove -
9364 + */
9365 +static int pfe_platform_remove(struct platform_device *pdev)
9366 +{
9367 + struct pfe *pfe = platform_get_drvdata(pdev);
9368 + int rc;
9369 +
9370 + pr_info("%s\n", __func__);
9371 +
9372 + rc = pfe_remove(pfe);
9373 +
9374 + iounmap(pfe->cbus_baseaddr);
9375 +
9376 + platform_set_drvdata(pdev, NULL);
9377 +
9378 + kfree(pfe);
9379 +
9380 + return rc;
9381 +}
9382 +
9383 +#ifdef CONFIG_PM
9384 +#ifdef CONFIG_PM_SLEEP
9385 +int pfe_platform_suspend(struct device *dev)
9386 +{
9387 + struct pfe *pfe = platform_get_drvdata(to_platform_device(dev));
9388 + struct net_device *netdev;
9389 + int i;
9390 +
9391 + pfe->wake = 0;
9392 +
9393 + for (i = 0; i < (NUM_GEMAC_SUPPORT); i++) {
9394 + netdev = pfe->eth.eth_priv[i]->ndev;
9395 +
9396 + netif_device_detach(netdev);
9397 +
9398 + if (netif_running(netdev))
9399 + if (pfe_eth_suspend(netdev))
9400 + pfe->wake = 1;
9401 + }
9402 +
9403 + /* Shutdown PFE only if we're not waking up the system */
9404 + if (!pfe->wake) {
9405 +#if defined(LS1012A_PFE_RESET_WA)
9406 + pfe_hif_rx_idle(&pfe->hif);
9407 +#endif
9408 + pfe_ctrl_suspend(&pfe->ctrl);
9409 + pfe_firmware_exit(pfe);
9410 +
9411 + pfe_hif_exit(pfe);
9412 + pfe_hif_lib_exit(pfe);
9413 +
9414 + pfe_hw_exit(pfe);
9415 + }
9416 +
9417 + return 0;
9418 +}
9419 +
9420 +static int pfe_platform_resume(struct device *dev)
9421 +{
9422 + struct pfe *pfe = platform_get_drvdata(to_platform_device(dev));
9423 + struct net_device *netdev;
9424 + int i;
9425 +
9426 + if (!pfe->wake) {
9427 + pfe_hw_init(pfe, 1);
9428 + pfe_hif_lib_init(pfe);
9429 + pfe_hif_init(pfe);
9430 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
9431 + util_enable();
9432 +#endif
9433 + tmu_enable(0xf);
9434 + class_enable();
9435 + pfe_ctrl_resume(&pfe->ctrl);
9436 + }
9437 +
9438 + for (i = 0; i < (NUM_GEMAC_SUPPORT); i++) {
9439 + netdev = pfe->eth.eth_priv[i]->ndev;
9440 +
9441 + if (pfe->eth.eth_priv[i]->mii_bus)
9442 + pfe_eth_mdio_reset(pfe->eth.eth_priv[i]->mii_bus);
9443 +
9444 + if (netif_running(netdev))
9445 + pfe_eth_resume(netdev);
9446 +
9447 + netif_device_attach(netdev);
9448 + }
9449 + return 0;
9450 +}
9451 +#else
9452 +#define pfe_platform_suspend NULL
9453 +#define pfe_platform_resume NULL
9454 +#endif
9455 +
9456 +static const struct dev_pm_ops pfe_platform_pm_ops = {
9457 + SET_SYSTEM_SLEEP_PM_OPS(pfe_platform_suspend, pfe_platform_resume)
9458 +};
9459 +#endif
9460 +
9461 +static const struct of_device_id pfe_match[] = {
9462 + {
9463 + .compatible = "fsl,pfe",
9464 + },
9465 + {},
9466 +};
9467 +MODULE_DEVICE_TABLE(of, pfe_match);
9468 +
9469 +static struct platform_driver pfe_platform_driver = {
9470 + .probe = pfe_platform_probe,
9471 + .remove = pfe_platform_remove,
9472 + .driver = {
9473 + .name = "pfe",
9474 + .of_match_table = pfe_match,
9475 +#ifdef CONFIG_PM
9476 + .pm = &pfe_platform_pm_ops,
9477 +#endif
9478 + },
9479 +};
9480 +
9481 +module_platform_driver(pfe_platform_driver);
9482 +MODULE_LICENSE("GPL");
9483 +MODULE_DESCRIPTION("PFE Ethernet driver");
9484 +MODULE_AUTHOR("NXP DNCPE");
9485 --- /dev/null
9486 +++ b/drivers/staging/fsl_ppfe/pfe_mod.c
9487 @@ -0,0 +1,156 @@
9488 +/*
9489 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
9490 + * Copyright 2017 NXP
9491 + *
9492 + * This program is free software; you can redistribute it and/or modify
9493 + * it under the terms of the GNU General Public License as published by
9494 + * the Free Software Foundation; either version 2 of the License, or
9495 + * (at your option) any later version.
9496 + *
9497 + * This program is distributed in the hope that it will be useful,
9498 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
9499 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9500 + * GNU General Public License for more details.
9501 + *
9502 + * You should have received a copy of the GNU General Public License
9503 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
9504 + */
9505 +
9506 +#include <linux/dma-mapping.h>
9507 +#include "pfe_mod.h"
9508 +
9509 +unsigned int us;
9510 +module_param(us, uint, 0444);
9511 +MODULE_PARM_DESC(us, "0: module enabled for kernel networking (DEFAULT)\n"
9512 + "1: module enabled for userspace networking\n");
9513 +struct pfe *pfe;
9514 +
9515 +/*
9516 + * pfe_probe -
9517 + */
9518 +int pfe_probe(struct pfe *pfe)
9519 +{
9520 + int rc;
9521 +
9522 + if (pfe->ddr_size < DDR_MAX_SIZE) {
9523 + pr_err("%s: required DDR memory (%x) above platform ddr memory (%x)\n",
9524 + __func__, (unsigned int)DDR_MAX_SIZE, pfe->ddr_size);
9525 + rc = -ENOMEM;
9526 + goto err_hw;
9527 + }
9528 +
9529 + if (((int)(pfe->ddr_phys_baseaddr + BMU2_DDR_BASEADDR) &
9530 + (8 * SZ_1M - 1)) != 0) {
9531 + pr_err("%s: BMU2 base address (0x%x) must be aligned on 8MB boundary\n",
9532 + __func__, (int)pfe->ddr_phys_baseaddr +
9533 + BMU2_DDR_BASEADDR);
9534 + rc = -ENOMEM;
9535 + goto err_hw;
9536 + }
9537 +
9538 + pr_info("cbus_baseaddr: %lx, ddr_baseaddr: %lx, ddr_phys_baseaddr: %lx, ddr_size: %x\n",
9539 + (unsigned long)pfe->cbus_baseaddr,
9540 + (unsigned long)pfe->ddr_baseaddr,
9541 + pfe->ddr_phys_baseaddr, pfe->ddr_size);
9542 +
9543 + pfe_lib_init(pfe->cbus_baseaddr, pfe->ddr_baseaddr,
9544 + pfe->ddr_phys_baseaddr, pfe->ddr_size);
9545 +
9546 + rc = pfe_hw_init(pfe, 0);
9547 + if (rc < 0)
9548 + goto err_hw;
9549 +
9550 + if (us)
9551 + goto firmware_init;
9552 +
9553 + rc = pfe_hif_lib_init(pfe);
9554 + if (rc < 0)
9555 + goto err_hif_lib;
9556 +
9557 + rc = pfe_hif_init(pfe);
9558 + if (rc < 0)
9559 + goto err_hif;
9560 +
9561 +firmware_init:
9562 + rc = pfe_firmware_init(pfe);
9563 + if (rc < 0)
9564 + goto err_firmware;
9565 +
9566 + rc = pfe_ctrl_init(pfe);
9567 + if (rc < 0)
9568 + goto err_ctrl;
9569 +
9570 + rc = pfe_eth_init(pfe);
9571 + if (rc < 0)
9572 + goto err_eth;
9573 +
9574 + rc = pfe_sysfs_init(pfe);
9575 + if (rc < 0)
9576 + goto err_sysfs;
9577 +
9578 + rc = pfe_debugfs_init(pfe);
9579 + if (rc < 0)
9580 + goto err_debugfs;
9581 +
9582 + return 0;
9583 +
9584 +err_debugfs:
9585 + pfe_sysfs_exit(pfe);
9586 +
9587 +err_sysfs:
9588 + pfe_eth_exit(pfe);
9589 +
9590 +err_eth:
9591 + pfe_ctrl_exit(pfe);
9592 +
9593 +err_ctrl:
9594 + pfe_firmware_exit(pfe);
9595 +
9596 +err_firmware:
9597 + if (us)
9598 + goto err_hif_lib;
9599 +
9600 + pfe_hif_exit(pfe);
9601 +
9602 +err_hif:
9603 + pfe_hif_lib_exit(pfe);
9604 +
9605 +err_hif_lib:
9606 + pfe_hw_exit(pfe);
9607 +
9608 +err_hw:
9609 + return rc;
9610 +}
9611 +
9612 +/*
9613 + * pfe_remove -
9614 + */
9615 +int pfe_remove(struct pfe *pfe)
9616 +{
9617 + pr_info("%s\n", __func__);
9618 +
9619 + pfe_debugfs_exit(pfe);
9620 +
9621 + pfe_sysfs_exit(pfe);
9622 +
9623 + pfe_eth_exit(pfe);
9624 +
9625 + pfe_ctrl_exit(pfe);
9626 +
9627 +#if defined(LS1012A_PFE_RESET_WA)
9628 + pfe_hif_rx_idle(&pfe->hif);
9629 +#endif
9630 + pfe_firmware_exit(pfe);
9631 +
9632 + if (us)
9633 + goto hw_exit;
9634 +
9635 + pfe_hif_exit(pfe);
9636 +
9637 + pfe_hif_lib_exit(pfe);
9638 +
9639 +hw_exit:
9640 + pfe_hw_exit(pfe);
9641 +
9642 + return 0;
9643 +}
9644 --- /dev/null
9645 +++ b/drivers/staging/fsl_ppfe/pfe_mod.h
9646 @@ -0,0 +1,114 @@
9647 +/*
9648 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
9649 + * Copyright 2017 NXP
9650 + *
9651 + * This program is free software; you can redistribute it and/or modify
9652 + * it under the terms of the GNU General Public License as published by
9653 + * the Free Software Foundation; either version 2 of the License, or
9654 + * (at your option) any later version.
9655 + *
9656 + * This program is distributed in the hope that it will be useful,
9657 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
9658 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9659 + * GNU General Public License for more details.
9660 + *
9661 + * You should have received a copy of the GNU General Public License
9662 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
9663 + */
9664 +
9665 +#ifndef _PFE_MOD_H_
9666 +#define _PFE_MOD_H_
9667 +
9668 +#include <linux/device.h>
9669 +#include <linux/elf.h>
9670 +
9671 +extern unsigned int us;
9672 +
9673 +struct pfe;
9674 +
9675 +#include "pfe_hw.h"
9676 +#include "pfe_firmware.h"
9677 +#include "pfe_ctrl.h"
9678 +#include "pfe_hif.h"
9679 +#include "pfe_hif_lib.h"
9680 +#include "pfe_eth.h"
9681 +#include "pfe_sysfs.h"
9682 +#include "pfe_perfmon.h"
9683 +#include "pfe_debugfs.h"
9684 +
9685 +#define PHYID_MAX_VAL 32
9686 +
9687 +struct pfe_tmu_credit {
9688 + /* Number of allowed TX packet in-flight, matches TMU queue size */
9689 + unsigned int tx_credit[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
9690 + unsigned int tx_credit_max[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
9691 + unsigned int tx_packets[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
9692 +};
9693 +
9694 +struct pfe {
9695 + struct regmap *scfg;
9696 + unsigned long ddr_phys_baseaddr;
9697 + void *ddr_baseaddr;
9698 + unsigned int ddr_size;
9699 + void *cbus_baseaddr;
9700 + void *apb_baseaddr;
9701 + unsigned long iram_phys_baseaddr;
9702 + void *iram_baseaddr;
9703 + unsigned long ipsec_phys_baseaddr;
9704 + void *ipsec_baseaddr;
9705 + int hif_irq;
9706 + int wol_irq;
9707 + int hif_client_irq;
9708 + struct device *dev;
9709 + struct dentry *dentry;
9710 + struct pfe_ctrl ctrl;
9711 + struct pfe_hif hif;
9712 + struct pfe_eth eth;
9713 + struct hif_client_s *hif_client[HIF_CLIENTS_MAX];
9714 +#if defined(CFG_DIAGS)
9715 + struct pfe_diags diags;
9716 +#endif
9717 + struct pfe_tmu_credit tmu_credit;
9718 + struct pfe_cpumon cpumon;
9719 + struct pfe_memmon memmon;
9720 + int wake;
9721 + int mdio_muxval[PHYID_MAX_VAL];
9722 + struct clk *hfe_clock;
9723 +};
9724 +
9725 +extern struct pfe *pfe;
9726 +
9727 +int pfe_probe(struct pfe *pfe);
9728 +int pfe_remove(struct pfe *pfe);
9729 +
9730 +/* DDR Mapping in reserved memory*/
9731 +#define ROUTE_TABLE_BASEADDR 0
9732 +#define ROUTE_TABLE_HASH_BITS 15 /* 32K entries */
9733 +#define ROUTE_TABLE_SIZE ((1 << ROUTE_TABLE_HASH_BITS) \
9734 + * CLASS_ROUTE_SIZE)
9735 +#define BMU2_DDR_BASEADDR (ROUTE_TABLE_BASEADDR + ROUTE_TABLE_SIZE)
9736 +#define BMU2_BUF_COUNT (4096 - 256)
9737 +/* This is to get a total DDR size of 12MiB */
9738 +#define BMU2_DDR_SIZE (DDR_BUF_SIZE * BMU2_BUF_COUNT)
9739 +#define UTIL_CODE_BASEADDR (BMU2_DDR_BASEADDR + BMU2_DDR_SIZE)
9740 +#define UTIL_CODE_SIZE (128 * SZ_1K)
9741 +#define UTIL_DDR_DATA_BASEADDR (UTIL_CODE_BASEADDR + UTIL_CODE_SIZE)
9742 +#define UTIL_DDR_DATA_SIZE (64 * SZ_1K)
9743 +#define CLASS_DDR_DATA_BASEADDR (UTIL_DDR_DATA_BASEADDR + UTIL_DDR_DATA_SIZE)
9744 +#define CLASS_DDR_DATA_SIZE (32 * SZ_1K)
9745 +#define TMU_DDR_DATA_BASEADDR (CLASS_DDR_DATA_BASEADDR + CLASS_DDR_DATA_SIZE)
9746 +#define TMU_DDR_DATA_SIZE (32 * SZ_1K)
9747 +#define TMU_LLM_BASEADDR (TMU_DDR_DATA_BASEADDR + TMU_DDR_DATA_SIZE)
9748 +#define TMU_LLM_QUEUE_LEN (8 * 512)
9749 +/* Must be power of two and at least 16 * 8 = 128 bytes */
9750 +#define TMU_LLM_SIZE (4 * 16 * TMU_LLM_QUEUE_LEN)
9751 +/* (4 TMU's x 16 queues x queue_len) */
9752 +
9753 +#define DDR_MAX_SIZE (TMU_LLM_BASEADDR + TMU_LLM_SIZE)
9754 +
9755 +/* LMEM Mapping */
9756 +#define BMU1_LMEM_BASEADDR 0
9757 +#define BMU1_BUF_COUNT 256
9758 +#define BMU1_LMEM_SIZE (LMEM_BUF_SIZE * BMU1_BUF_COUNT)
9759 +
9760 +#endif /* _PFE_MOD_H */
9761 --- /dev/null
9762 +++ b/drivers/staging/fsl_ppfe/pfe_perfmon.h
9763 @@ -0,0 +1,38 @@
9764 +/*
9765 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
9766 + * Copyright 2017 NXP
9767 + *
9768 + * This program is free software; you can redistribute it and/or modify
9769 + * it under the terms of the GNU General Public License as published by
9770 + * the Free Software Foundation; either version 2 of the License, or
9771 + * (at your option) any later version.
9772 + *
9773 + * This program is distributed in the hope that it will be useful,
9774 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
9775 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9776 + * GNU General Public License for more details.
9777 + *
9778 + * You should have received a copy of the GNU General Public License
9779 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
9780 + */
9781 +
9782 +#ifndef _PFE_PERFMON_H_
9783 +#define _PFE_PERFMON_H_
9784 +
9785 +#include "pfe/pfe.h"
9786 +
9787 +#define CT_CPUMON_INTERVAL (1 * TIMER_TICKS_PER_SEC)
9788 +
9789 +struct pfe_cpumon {
9790 + u32 cpu_usage_pct[MAX_PE];
9791 + u32 class_usage_pct;
9792 +};
9793 +
9794 +struct pfe_memmon {
9795 + u32 kernel_memory_allocated;
9796 +};
9797 +
9798 +int pfe_perfmon_init(struct pfe *pfe);
9799 +void pfe_perfmon_exit(struct pfe *pfe);
9800 +
9801 +#endif /* _PFE_PERFMON_H_ */
9802 --- /dev/null
9803 +++ b/drivers/staging/fsl_ppfe/pfe_sysfs.c
9804 @@ -0,0 +1,818 @@
9805 +/*
9806 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
9807 + * Copyright 2017 NXP
9808 + *
9809 + * This program is free software; you can redistribute it and/or modify
9810 + * it under the terms of the GNU General Public License as published by
9811 + * the Free Software Foundation; either version 2 of the License, or
9812 + * (at your option) any later version.
9813 + *
9814 + * This program is distributed in the hope that it will be useful,
9815 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
9816 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9817 + * GNU General Public License for more details.
9818 + *
9819 + * You should have received a copy of the GNU General Public License
9820 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
9821 + */
9822 +
9823 +#include <linux/module.h>
9824 +#include <linux/platform_device.h>
9825 +
9826 +#include "pfe_mod.h"
9827 +
9828 +#define PE_EXCEPTION_DUMP_ADDRESS 0x1fa8
9829 +#define NUM_QUEUES 16
9830 +
9831 +static char register_name[20][5] = {
9832 + "EPC", "ECAS", "EID", "ED",
9833 + "r0", "r1", "r2", "r3",
9834 + "r4", "r5", "r6", "r7",
9835 + "r8", "r9", "r10", "r11",
9836 + "r12", "r13", "r14", "r15",
9837 +};
9838 +
9839 +static char exception_name[14][20] = {
9840 + "Reset",
9841 + "HardwareFailure",
9842 + "NMI",
9843 + "InstBreakpoint",
9844 + "DataBreakpoint",
9845 + "Unsupported",
9846 + "PrivilegeViolation",
9847 + "InstBusError",
9848 + "DataBusError",
9849 + "AlignmentError",
9850 + "ArithmeticError",
9851 + "SystemCall",
9852 + "MemoryManagement",
9853 + "Interrupt",
9854 +};
9855 +
9856 +static unsigned long class_do_clear;
9857 +static unsigned long tmu_do_clear;
9858 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
9859 +static unsigned long util_do_clear;
9860 +#endif
9861 +
9862 +static ssize_t display_pe_status(char *buf, int id, u32 dmem_addr, unsigned long
9863 + do_clear)
9864 +{
9865 + ssize_t len = 0;
9866 + u32 val;
9867 + char statebuf[5];
9868 + struct pfe_cpumon *cpumon = &pfe->cpumon;
9869 + u32 debug_indicator;
9870 + u32 debug[20];
9871 +
9872 + *(u32 *)statebuf = pe_dmem_read(id, dmem_addr, 4);
9873 + dmem_addr += 4;
9874 +
9875 + statebuf[4] = '\0';
9876 + len += sprintf(buf + len, "state=%4s ", statebuf);
9877 +
9878 + val = pe_dmem_read(id, dmem_addr, 4);
9879 + dmem_addr += 4;
9880 + len += sprintf(buf + len, "ctr=%08x ", cpu_to_be32(val));
9881 +
9882 + val = pe_dmem_read(id, dmem_addr, 4);
9883 + if (do_clear && val)
9884 + pe_dmem_write(id, 0, dmem_addr, 4);
9885 + dmem_addr += 4;
9886 + len += sprintf(buf + len, "rx=%u ", cpu_to_be32(val));
9887 +
9888 + val = pe_dmem_read(id, dmem_addr, 4);
9889 + if (do_clear && val)
9890 + pe_dmem_write(id, 0, dmem_addr, 4);
9891 + dmem_addr += 4;
9892 + if (id >= TMU0_ID && id <= TMU_MAX_ID)
9893 + len += sprintf(buf + len, "qstatus=%x", cpu_to_be32(val));
9894 + else
9895 + len += sprintf(buf + len, "tx=%u", cpu_to_be32(val));
9896 +
9897 + val = pe_dmem_read(id, dmem_addr, 4);
9898 + if (do_clear && val)
9899 + pe_dmem_write(id, 0, dmem_addr, 4);
9900 + dmem_addr += 4;
9901 + if (val)
9902 + len += sprintf(buf + len, " drop=%u", cpu_to_be32(val));
9903 +
9904 + len += sprintf(buf + len, " load=%d%%", cpumon->cpu_usage_pct[id]);
9905 +
9906 + len += sprintf(buf + len, "\n");
9907 +
9908 + debug_indicator = pe_dmem_read(id, dmem_addr, 4);
9909 + dmem_addr += 4;
9910 + if (!strncmp((char *)&debug_indicator, "DBUG", 4)) {
9911 + int j, last = 0;
9912 +
9913 + for (j = 0; j < 16; j++) {
9914 + debug[j] = pe_dmem_read(id, dmem_addr, 4);
9915 + if (debug[j]) {
9916 + if (do_clear)
9917 + pe_dmem_write(id, 0, dmem_addr, 4);
9918 + last = j + 1;
9919 + }
9920 + dmem_addr += 4;
9921 + }
9922 + for (j = 0; j < last; j++) {
9923 + len += sprintf(buf + len, "%08x%s",
9924 + cpu_to_be32(debug[j]),
9925 + (j & 0x7) == 0x7 || j == last - 1 ? "\n" : " ");
9926 + }
9927 + }
9928 +
9929 + if (!strncmp(statebuf, "DEAD", 4)) {
9930 + u32 i, dump = PE_EXCEPTION_DUMP_ADDRESS;
9931 +
9932 + len += sprintf(buf + len, "Exception details:\n");
9933 + for (i = 0; i < 20; i++) {
9934 + debug[i] = pe_dmem_read(id, dump, 4);
9935 + dump += 4;
9936 + if (i == 2)
9937 + len += sprintf(buf + len, "%4s = %08x (=%s) ",
9938 + register_name[i], cpu_to_be32(debug[i]),
9939 + exception_name[min((u32)
9940 + cpu_to_be32(debug[i]), (u32)13)]);
9941 + else
9942 + len += sprintf(buf + len, "%4s = %08x%s",
9943 + register_name[i], cpu_to_be32(debug[i]),
9944 + (i & 0x3) == 0x3 || i == 19 ? "\n" : " ");
9945 + }
9946 + }
9947 +
9948 + return len;
9949 +}
9950 +
9951 +static ssize_t class_phy_stats(char *buf, int phy)
9952 +{
9953 + ssize_t len = 0;
9954 + int off1 = phy * 0x28;
9955 + int off2 = phy * 0x10;
9956 +
9957 + if (phy == 3)
9958 + off1 = CLASS_PHY4_RX_PKTS - CLASS_PHY1_RX_PKTS;
9959 +
9960 + len += sprintf(buf + len, "phy: %d\n", phy);
9961 + len += sprintf(buf + len,
9962 + " rx: %10u, tx: %10u, intf: %10u, ipv4: %10u, ipv6: %10u\n",
9963 + readl(CLASS_PHY1_RX_PKTS + off1),
9964 + readl(CLASS_PHY1_TX_PKTS + off1),
9965 + readl(CLASS_PHY1_INTF_MATCH_PKTS + off1),
9966 + readl(CLASS_PHY1_V4_PKTS + off1),
9967 + readl(CLASS_PHY1_V6_PKTS + off1));
9968 +
9969 + len += sprintf(buf + len,
9970 + " icmp: %10u, igmp: %10u, tcp: %10u, udp: %10u\n",
9971 + readl(CLASS_PHY1_ICMP_PKTS + off2),
9972 + readl(CLASS_PHY1_IGMP_PKTS + off2),
9973 + readl(CLASS_PHY1_TCP_PKTS + off2),
9974 + readl(CLASS_PHY1_UDP_PKTS + off2));
9975 +
9976 + len += sprintf(buf + len, " err\n");
9977 + len += sprintf(buf + len,
9978 + " lp: %10u, intf: %10u, l3: %10u, chcksum: %10u, ttl: %10u\n",
9979 + readl(CLASS_PHY1_LP_FAIL_PKTS + off1),
9980 + readl(CLASS_PHY1_INTF_FAIL_PKTS + off1),
9981 + readl(CLASS_PHY1_L3_FAIL_PKTS + off1),
9982 + readl(CLASS_PHY1_CHKSUM_ERR_PKTS + off1),
9983 + readl(CLASS_PHY1_TTL_ERR_PKTS + off1));
9984 +
9985 + return len;
9986 +}
9987 +
9988 +/* qm_read_drop_stat
9989 + * This function is used to read the drop statistics from the TMU
9990 + * hw drop counter. Since the hw counter is always cleared afer
9991 + * reading, this function maintains the previous drop count, and
9992 + * adds the new value to it. That value can be retrieved by
9993 + * passing a pointer to it with the total_drops arg.
9994 + *
9995 + * @param tmu TMU number (0 - 3)
9996 + * @param queue queue number (0 - 15)
9997 + * @param total_drops pointer to location to store total drops (or NULL)
9998 + * @param do_reset if TRUE, clear total drops after updating
9999 + */
10000 +u32 qm_read_drop_stat(u32 tmu, u32 queue, u32 *total_drops, int do_reset)
10001 +{
10002 + static u32 qtotal[TMU_MAX_ID + 1][NUM_QUEUES];
10003 + u32 val;
10004 +
10005 + writel((tmu << 8) | queue, TMU_TEQ_CTRL);
10006 + writel((tmu << 8) | queue, TMU_LLM_CTRL);
10007 + val = readl(TMU_TEQ_DROP_STAT);
10008 + qtotal[tmu][queue] += val;
10009 + if (total_drops)
10010 + *total_drops = qtotal[tmu][queue];
10011 + if (do_reset)
10012 + qtotal[tmu][queue] = 0;
10013 + return val;
10014 +}
10015 +
10016 +static ssize_t tmu_queue_stats(char *buf, int tmu, int queue)
10017 +{
10018 + ssize_t len = 0;
10019 + u32 drops;
10020 +
10021 + len += sprintf(buf + len, "%d-%02d, ", tmu, queue);
10022 +
10023 + drops = qm_read_drop_stat(tmu, queue, NULL, 0);
10024 +
10025 + /* Select queue */
10026 + writel((tmu << 8) | queue, TMU_TEQ_CTRL);
10027 + writel((tmu << 8) | queue, TMU_LLM_CTRL);
10028 +
10029 + len += sprintf(buf + len,
10030 + "(teq) drop: %10u, tx: %10u (llm) head: %08x, tail: %08x, drop: %10u\n",
10031 + drops, readl(TMU_TEQ_TRANS_STAT),
10032 + readl(TMU_LLM_QUE_HEADPTR), readl(TMU_LLM_QUE_TAILPTR),
10033 + readl(TMU_LLM_QUE_DROPCNT));
10034 +
10035 + return len;
10036 +}
10037 +
10038 +static ssize_t tmu_queues(char *buf, int tmu)
10039 +{
10040 + ssize_t len = 0;
10041 + int queue;
10042 +
10043 + for (queue = 0; queue < 16; queue++)
10044 + len += tmu_queue_stats(buf + len, tmu, queue);
10045 +
10046 + return len;
10047 +}
10048 +
10049 +static ssize_t block_version(char *buf, void *addr)
10050 +{
10051 + ssize_t len = 0;
10052 + u32 val;
10053 +
10054 + val = readl(addr);
10055 + len += sprintf(buf + len, "revision: %x, version: %x, id: %x\n",
10056 + (val >> 24) & 0xff, (val >> 16) & 0xff, val & 0xffff);
10057 +
10058 + return len;
10059 +}
10060 +
10061 +static ssize_t bmu(char *buf, int id, void *base)
10062 +{
10063 + ssize_t len = 0;
10064 +
10065 + len += sprintf(buf + len, "%s: %d\n ", __func__, id);
10066 +
10067 + len += block_version(buf + len, base + BMU_VERSION);
10068 +
10069 + len += sprintf(buf + len, " buf size: %x\n", (1 << readl(base +
10070 + BMU_BUF_SIZE)));
10071 + len += sprintf(buf + len, " buf count: %x\n", readl(base +
10072 + BMU_BUF_CNT));
10073 + len += sprintf(buf + len, " buf rem: %x\n", readl(base +
10074 + BMU_REM_BUF_CNT));
10075 + len += sprintf(buf + len, " buf curr: %x\n", readl(base +
10076 + BMU_CURR_BUF_CNT));
10077 + len += sprintf(buf + len, " free err: %x\n", readl(base +
10078 + BMU_FREE_ERR_ADDR));
10079 +
10080 + return len;
10081 +}
10082 +
10083 +static ssize_t gpi(char *buf, int id, void *base)
10084 +{
10085 + ssize_t len = 0;
10086 + u32 val;
10087 +
10088 + len += sprintf(buf + len, "%s%d:\n ", __func__, id);
10089 + len += block_version(buf + len, base + GPI_VERSION);
10090 +
10091 + len += sprintf(buf + len, " tx under stick: %x\n", readl(base +
10092 + GPI_FIFO_STATUS));
10093 + val = readl(base + GPI_FIFO_DEBUG);
10094 + len += sprintf(buf + len, " tx pkts: %x\n", (val >> 23) &
10095 + 0x3f);
10096 + len += sprintf(buf + len, " rx pkts: %x\n", (val >> 18) &
10097 + 0x3f);
10098 + len += sprintf(buf + len, " tx bytes: %x\n", (val >> 9) &
10099 + 0x1ff);
10100 + len += sprintf(buf + len, " rx bytes: %x\n", (val >> 0) &
10101 + 0x1ff);
10102 + len += sprintf(buf + len, " overrun: %x\n", readl(base +
10103 + GPI_OVERRUN_DROPCNT));
10104 +
10105 + return len;
10106 +}
10107 +
10108 +static ssize_t pfe_set_class(struct device *dev, struct device_attribute *attr,
10109 + const char *buf, size_t count)
10110 +{
10111 + class_do_clear = kstrtoul(buf, 0, 0);
10112 + return count;
10113 +}
10114 +
10115 +static ssize_t pfe_show_class(struct device *dev, struct device_attribute *attr,
10116 + char *buf)
10117 +{
10118 + ssize_t len = 0;
10119 + int id;
10120 + u32 val;
10121 + struct pfe_cpumon *cpumon = &pfe->cpumon;
10122 +
10123 + len += block_version(buf + len, CLASS_VERSION);
10124 +
10125 + for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
10126 + len += sprintf(buf + len, "%d: ", id - CLASS0_ID);
10127 +
10128 + val = readl(CLASS_PE0_DEBUG + id * 4);
10129 + len += sprintf(buf + len, "pc=1%04x ", val & 0xffff);
10130 +
10131 + len += display_pe_status(buf + len, id, CLASS_DM_PESTATUS,
10132 + class_do_clear);
10133 + }
10134 + len += sprintf(buf + len, "aggregate load=%d%%\n\n",
10135 + cpumon->class_usage_pct);
10136 +
10137 + len += sprintf(buf + len, "pe status: 0x%x\n",
10138 + readl(CLASS_PE_STATUS));
10139 + len += sprintf(buf + len, "max buf cnt: 0x%x afull thres: 0x%x\n",
10140 + readl(CLASS_MAX_BUF_CNT), readl(CLASS_AFULL_THRES));
10141 + len += sprintf(buf + len, "tsq max cnt: 0x%x tsq fifo thres: 0x%x\n",
10142 + readl(CLASS_TSQ_MAX_CNT), readl(CLASS_TSQ_FIFO_THRES));
10143 + len += sprintf(buf + len, "state: 0x%x\n", readl(CLASS_STATE));
10144 +
10145 + len += class_phy_stats(buf + len, 0);
10146 + len += class_phy_stats(buf + len, 1);
10147 + len += class_phy_stats(buf + len, 2);
10148 + len += class_phy_stats(buf + len, 3);
10149 +
10150 + return len;
10151 +}
10152 +
10153 +static ssize_t pfe_set_tmu(struct device *dev, struct device_attribute *attr,
10154 + const char *buf, size_t count)
10155 +{
10156 + tmu_do_clear = kstrtoul(buf, 0, 0);
10157 + return count;
10158 +}
10159 +
10160 +static ssize_t pfe_show_tmu(struct device *dev, struct device_attribute *attr,
10161 + char *buf)
10162 +{
10163 + ssize_t len = 0;
10164 + int id;
10165 + u32 val;
10166 +
10167 + len += block_version(buf + len, TMU_VERSION);
10168 +
10169 + for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
10170 + if (id == TMU2_ID)
10171 + continue;
10172 + len += sprintf(buf + len, "%d: ", id - TMU0_ID);
10173 +
10174 + len += display_pe_status(buf + len, id, TMU_DM_PESTATUS,
10175 + tmu_do_clear);
10176 + }
10177 +
10178 + len += sprintf(buf + len, "pe status: %x\n", readl(TMU_PE_STATUS));
10179 + len += sprintf(buf + len, "inq fifo cnt: %x\n",
10180 + readl(TMU_PHY_INQ_FIFO_CNT));
10181 + val = readl(TMU_INQ_STAT);
10182 + len += sprintf(buf + len, "inq wr ptr: %x\n", val & 0x3ff);
10183 + len += sprintf(buf + len, "inq rd ptr: %x\n", val >> 10);
10184 +
10185 + return len;
10186 +}
10187 +
10188 +static unsigned long drops_do_clear;
10189 +static u32 class_drop_counter[CLASS_NUM_DROP_COUNTERS];
10190 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10191 +static u32 util_drop_counter[UTIL_NUM_DROP_COUNTERS];
10192 +#endif
10193 +
10194 +char *class_drop_description[CLASS_NUM_DROP_COUNTERS] = {
10195 + "ICC",
10196 + "Host Pkt Error",
10197 + "Rx Error",
10198 + "IPsec Outbound",
10199 + "IPsec Inbound",
10200 + "EXPT IPsec Error",
10201 + "Reassembly",
10202 + "Fragmenter",
10203 + "NAT-T",
10204 + "Socket",
10205 + "Multicast",
10206 + "NAT-PT",
10207 + "Tx Disabled",
10208 +};
10209 +
10210 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10211 +char *util_drop_description[UTIL_NUM_DROP_COUNTERS] = {
10212 + "IPsec Outbound",
10213 + "IPsec Inbound",
10214 + "IPsec Rate Limiter",
10215 + "Fragmenter",
10216 + "Socket",
10217 + "Tx Disabled",
10218 + "Rx Error",
10219 +};
10220 +#endif
10221 +
10222 +static ssize_t pfe_set_drops(struct device *dev, struct device_attribute *attr,
10223 + const char *buf, size_t count)
10224 +{
10225 + drops_do_clear = kstrtoul(buf, 0, 0);
10226 + return count;
10227 +}
10228 +
10229 +static u32 tmu_drops[4][16];
10230 +static ssize_t pfe_show_drops(struct device *dev, struct device_attribute *attr,
10231 + char *buf)
10232 +{
10233 + ssize_t len = 0;
10234 + int id, dropnum;
10235 + int tmu, queue;
10236 + u32 val;
10237 + u32 dmem_addr;
10238 + int num_class_drops = 0, num_tmu_drops = 0, num_util_drops = 0;
10239 + struct pfe_ctrl *ctrl = &pfe->ctrl;
10240 +
10241 + memset(class_drop_counter, 0, sizeof(class_drop_counter));
10242 + for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
10243 + if (drops_do_clear)
10244 + pe_sync_stop(ctrl, (1 << id));
10245 + for (dropnum = 0; dropnum < CLASS_NUM_DROP_COUNTERS;
10246 + dropnum++) {
10247 + dmem_addr = CLASS_DM_DROP_CNTR;
10248 + val = be32_to_cpu(pe_dmem_read(id, dmem_addr, 4));
10249 + class_drop_counter[dropnum] += val;
10250 + num_class_drops += val;
10251 + if (drops_do_clear)
10252 + pe_dmem_write(id, 0, dmem_addr, 4);
10253 + }
10254 + if (drops_do_clear)
10255 + pe_start(ctrl, (1 << id));
10256 + }
10257 +
10258 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10259 + if (drops_do_clear)
10260 + pe_sync_stop(ctrl, (1 << UTIL_ID));
10261 + for (dropnum = 0; dropnum < UTIL_NUM_DROP_COUNTERS; dropnum++) {
10262 + dmem_addr = UTIL_DM_DROP_CNTR;
10263 + val = be32_to_cpu(pe_dmem_read(UTIL_ID, dmem_addr, 4));
10264 + util_drop_counter[dropnum] = val;
10265 + num_util_drops += val;
10266 + if (drops_do_clear)
10267 + pe_dmem_write(UTIL_ID, 0, dmem_addr, 4);
10268 + }
10269 + if (drops_do_clear)
10270 + pe_start(ctrl, (1 << UTIL_ID));
10271 +#endif
10272 + for (tmu = 0; tmu < 4; tmu++) {
10273 + for (queue = 0; queue < 16; queue++) {
10274 + qm_read_drop_stat(tmu, queue, &tmu_drops[tmu][queue],
10275 + drops_do_clear);
10276 + num_tmu_drops += tmu_drops[tmu][queue];
10277 + }
10278 + }
10279 +
10280 + if (num_class_drops == 0 && num_util_drops == 0 && num_tmu_drops == 0)
10281 + len += sprintf(buf + len, "No PE drops\n\n");
10282 +
10283 + if (num_class_drops > 0) {
10284 + len += sprintf(buf + len, "Class PE drops --\n");
10285 + for (dropnum = 0; dropnum < CLASS_NUM_DROP_COUNTERS;
10286 + dropnum++) {
10287 + if (class_drop_counter[dropnum] > 0)
10288 + len += sprintf(buf + len, " %s: %d\n",
10289 + class_drop_description[dropnum],
10290 + class_drop_counter[dropnum]);
10291 + }
10292 + len += sprintf(buf + len, "\n");
10293 + }
10294 +
10295 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10296 + if (num_util_drops > 0) {
10297 + len += sprintf(buf + len, "Util PE drops --\n");
10298 + for (dropnum = 0; dropnum < UTIL_NUM_DROP_COUNTERS; dropnum++) {
10299 + if (util_drop_counter[dropnum] > 0)
10300 + len += sprintf(buf + len, " %s: %d\n",
10301 + util_drop_description[dropnum],
10302 + util_drop_counter[dropnum]);
10303 + }
10304 + len += sprintf(buf + len, "\n");
10305 + }
10306 +#endif
10307 + if (num_tmu_drops > 0) {
10308 + len += sprintf(buf + len, "TMU drops --\n");
10309 + for (tmu = 0; tmu < 4; tmu++) {
10310 + for (queue = 0; queue < 16; queue++) {
10311 + if (tmu_drops[tmu][queue] > 0)
10312 + len += sprintf(buf + len,
10313 + " TMU%d-Q%d: %d\n"
10314 + , tmu, queue, tmu_drops[tmu][queue]);
10315 + }
10316 + }
10317 + len += sprintf(buf + len, "\n");
10318 + }
10319 +
10320 + return len;
10321 +}
10322 +
10323 +static ssize_t pfe_show_tmu0_queues(struct device *dev, struct device_attribute
10324 + *attr, char *buf)
10325 +{
10326 + return tmu_queues(buf, 0);
10327 +}
10328 +
10329 +static ssize_t pfe_show_tmu1_queues(struct device *dev, struct device_attribute
10330 + *attr, char *buf)
10331 +{
10332 + return tmu_queues(buf, 1);
10333 +}
10334 +
10335 +static ssize_t pfe_show_tmu2_queues(struct device *dev, struct device_attribute
10336 + *attr, char *buf)
10337 +{
10338 + return tmu_queues(buf, 2);
10339 +}
10340 +
10341 +static ssize_t pfe_show_tmu3_queues(struct device *dev, struct device_attribute
10342 + *attr, char *buf)
10343 +{
10344 + return tmu_queues(buf, 3);
10345 +}
10346 +
10347 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10348 +static ssize_t pfe_set_util(struct device *dev, struct device_attribute *attr,
10349 + const char *buf, size_t count)
10350 +{
10351 + util_do_clear = kstrtoul(buf, NULL, 0);
10352 + return count;
10353 +}
10354 +
10355 +static ssize_t pfe_show_util(struct device *dev, struct device_attribute *attr,
10356 + char *buf)
10357 +{
10358 + ssize_t len = 0;
10359 + struct pfe_ctrl *ctrl = &pfe->ctrl;
10360 +
10361 + len += block_version(buf + len, UTIL_VERSION);
10362 +
10363 + pe_sync_stop(ctrl, (1 << UTIL_ID));
10364 + len += display_pe_status(buf + len, UTIL_ID, UTIL_DM_PESTATUS,
10365 + util_do_clear);
10366 + pe_start(ctrl, (1 << UTIL_ID));
10367 +
10368 + len += sprintf(buf + len, "pe status: %x\n", readl(UTIL_PE_STATUS));
10369 + len += sprintf(buf + len, "max buf cnt: %x\n",
10370 + readl(UTIL_MAX_BUF_CNT));
10371 + len += sprintf(buf + len, "tsq max cnt: %x\n",
10372 + readl(UTIL_TSQ_MAX_CNT));
10373 +
10374 + return len;
10375 +}
10376 +#endif
10377 +
10378 +static ssize_t pfe_show_bmu(struct device *dev, struct device_attribute *attr,
10379 + char *buf)
10380 +{
10381 + ssize_t len = 0;
10382 +
10383 + len += bmu(buf + len, 1, BMU1_BASE_ADDR);
10384 + len += bmu(buf + len, 2, BMU2_BASE_ADDR);
10385 +
10386 + return len;
10387 +}
10388 +
10389 +static ssize_t pfe_show_hif(struct device *dev, struct device_attribute *attr,
10390 + char *buf)
10391 +{
10392 + ssize_t len = 0;
10393 +
10394 + len += sprintf(buf + len, "hif:\n ");
10395 + len += block_version(buf + len, HIF_VERSION);
10396 +
10397 + len += sprintf(buf + len, " tx curr bd: %x\n",
10398 + readl(HIF_TX_CURR_BD_ADDR));
10399 + len += sprintf(buf + len, " tx status: %x\n",
10400 + readl(HIF_TX_STATUS));
10401 + len += sprintf(buf + len, " tx dma status: %x\n",
10402 + readl(HIF_TX_DMA_STATUS));
10403 +
10404 + len += sprintf(buf + len, " rx curr bd: %x\n",
10405 + readl(HIF_RX_CURR_BD_ADDR));
10406 + len += sprintf(buf + len, " rx status: %x\n",
10407 + readl(HIF_RX_STATUS));
10408 + len += sprintf(buf + len, " rx dma status: %x\n",
10409 + readl(HIF_RX_DMA_STATUS));
10410 +
10411 + len += sprintf(buf + len, "hif nocopy:\n ");
10412 + len += block_version(buf + len, HIF_NOCPY_VERSION);
10413 +
10414 + len += sprintf(buf + len, " tx curr bd: %x\n",
10415 + readl(HIF_NOCPY_TX_CURR_BD_ADDR));
10416 + len += sprintf(buf + len, " tx status: %x\n",
10417 + readl(HIF_NOCPY_TX_STATUS));
10418 + len += sprintf(buf + len, " tx dma status: %x\n",
10419 + readl(HIF_NOCPY_TX_DMA_STATUS));
10420 +
10421 + len += sprintf(buf + len, " rx curr bd: %x\n",
10422 + readl(HIF_NOCPY_RX_CURR_BD_ADDR));
10423 + len += sprintf(buf + len, " rx status: %x\n",
10424 + readl(HIF_NOCPY_RX_STATUS));
10425 + len += sprintf(buf + len, " rx dma status: %x\n",
10426 + readl(HIF_NOCPY_RX_DMA_STATUS));
10427 +
10428 + return len;
10429 +}
10430 +
10431 +static ssize_t pfe_show_gpi(struct device *dev, struct device_attribute *attr,
10432 + char *buf)
10433 +{
10434 + ssize_t len = 0;
10435 +
10436 + len += gpi(buf + len, 0, EGPI1_BASE_ADDR);
10437 + len += gpi(buf + len, 1, EGPI2_BASE_ADDR);
10438 + len += gpi(buf + len, 3, HGPI_BASE_ADDR);
10439 +
10440 + return len;
10441 +}
10442 +
10443 +static ssize_t pfe_show_pfemem(struct device *dev, struct device_attribute
10444 + *attr, char *buf)
10445 +{
10446 + ssize_t len = 0;
10447 + struct pfe_memmon *memmon = &pfe->memmon;
10448 +
10449 + len += sprintf(buf + len, "Kernel Memory: %d Bytes (%d KB)\n",
10450 + memmon->kernel_memory_allocated,
10451 + (memmon->kernel_memory_allocated + 1023) / 1024);
10452 +
10453 + return len;
10454 +}
10455 +
10456 +#ifdef HIF_NAPI_STATS
10457 +static ssize_t pfe_show_hif_napi_stats(struct device *dev,
10458 + struct device_attribute *attr,
10459 + char *buf)
10460 +{
10461 + struct platform_device *pdev = to_platform_device(dev);
10462 + struct pfe *pfe = platform_get_drvdata(pdev);
10463 + ssize_t len = 0;
10464 +
10465 + len += sprintf(buf + len, "sched: %u\n",
10466 + pfe->hif.napi_counters[NAPI_SCHED_COUNT]);
10467 + len += sprintf(buf + len, "poll: %u\n",
10468 + pfe->hif.napi_counters[NAPI_POLL_COUNT]);
10469 + len += sprintf(buf + len, "packet: %u\n",
10470 + pfe->hif.napi_counters[NAPI_PACKET_COUNT]);
10471 + len += sprintf(buf + len, "budget: %u\n",
10472 + pfe->hif.napi_counters[NAPI_FULL_BUDGET_COUNT]);
10473 + len += sprintf(buf + len, "desc: %u\n",
10474 + pfe->hif.napi_counters[NAPI_DESC_COUNT]);
10475 + len += sprintf(buf + len, "full: %u\n",
10476 + pfe->hif.napi_counters[NAPI_CLIENT_FULL_COUNT]);
10477 +
10478 + return len;
10479 +}
10480 +
10481 +static ssize_t pfe_set_hif_napi_stats(struct device *dev,
10482 + struct device_attribute *attr,
10483 + const char *buf, size_t count)
10484 +{
10485 + struct platform_device *pdev = to_platform_device(dev);
10486 + struct pfe *pfe = platform_get_drvdata(pdev);
10487 +
10488 + memset(pfe->hif.napi_counters, 0, sizeof(pfe->hif.napi_counters));
10489 +
10490 + return count;
10491 +}
10492 +
10493 +static DEVICE_ATTR(hif_napi_stats, 0644, pfe_show_hif_napi_stats,
10494 + pfe_set_hif_napi_stats);
10495 +#endif
10496 +
10497 +static DEVICE_ATTR(class, 0644, pfe_show_class, pfe_set_class);
10498 +static DEVICE_ATTR(tmu, 0644, pfe_show_tmu, pfe_set_tmu);
10499 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10500 +static DEVICE_ATTR(util, 0644, pfe_show_util, pfe_set_util);
10501 +#endif
10502 +static DEVICE_ATTR(bmu, 0444, pfe_show_bmu, NULL);
10503 +static DEVICE_ATTR(hif, 0444, pfe_show_hif, NULL);
10504 +static DEVICE_ATTR(gpi, 0444, pfe_show_gpi, NULL);
10505 +static DEVICE_ATTR(drops, 0644, pfe_show_drops, pfe_set_drops);
10506 +static DEVICE_ATTR(tmu0_queues, 0444, pfe_show_tmu0_queues, NULL);
10507 +static DEVICE_ATTR(tmu1_queues, 0444, pfe_show_tmu1_queues, NULL);
10508 +static DEVICE_ATTR(tmu2_queues, 0444, pfe_show_tmu2_queues, NULL);
10509 +static DEVICE_ATTR(tmu3_queues, 0444, pfe_show_tmu3_queues, NULL);
10510 +static DEVICE_ATTR(pfemem, 0444, pfe_show_pfemem, NULL);
10511 +
10512 +int pfe_sysfs_init(struct pfe *pfe)
10513 +{
10514 + if (device_create_file(pfe->dev, &dev_attr_class))
10515 + goto err_class;
10516 +
10517 + if (device_create_file(pfe->dev, &dev_attr_tmu))
10518 + goto err_tmu;
10519 +
10520 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10521 + if (device_create_file(pfe->dev, &dev_attr_util))
10522 + goto err_util;
10523 +#endif
10524 +
10525 + if (device_create_file(pfe->dev, &dev_attr_bmu))
10526 + goto err_bmu;
10527 +
10528 + if (device_create_file(pfe->dev, &dev_attr_hif))
10529 + goto err_hif;
10530 +
10531 + if (device_create_file(pfe->dev, &dev_attr_gpi))
10532 + goto err_gpi;
10533 +
10534 + if (device_create_file(pfe->dev, &dev_attr_drops))
10535 + goto err_drops;
10536 +
10537 + if (device_create_file(pfe->dev, &dev_attr_tmu0_queues))
10538 + goto err_tmu0_queues;
10539 +
10540 + if (device_create_file(pfe->dev, &dev_attr_tmu1_queues))
10541 + goto err_tmu1_queues;
10542 +
10543 + if (device_create_file(pfe->dev, &dev_attr_tmu2_queues))
10544 + goto err_tmu2_queues;
10545 +
10546 + if (device_create_file(pfe->dev, &dev_attr_tmu3_queues))
10547 + goto err_tmu3_queues;
10548 +
10549 + if (device_create_file(pfe->dev, &dev_attr_pfemem))
10550 + goto err_pfemem;
10551 +
10552 +#ifdef HIF_NAPI_STATS
10553 + if (device_create_file(pfe->dev, &dev_attr_hif_napi_stats))
10554 + goto err_hif_napi_stats;
10555 +#endif
10556 +
10557 + return 0;
10558 +
10559 +#ifdef HIF_NAPI_STATS
10560 +err_hif_napi_stats:
10561 + device_remove_file(pfe->dev, &dev_attr_pfemem);
10562 +#endif
10563 +
10564 +err_pfemem:
10565 + device_remove_file(pfe->dev, &dev_attr_tmu3_queues);
10566 +
10567 +err_tmu3_queues:
10568 + device_remove_file(pfe->dev, &dev_attr_tmu2_queues);
10569 +
10570 +err_tmu2_queues:
10571 + device_remove_file(pfe->dev, &dev_attr_tmu1_queues);
10572 +
10573 +err_tmu1_queues:
10574 + device_remove_file(pfe->dev, &dev_attr_tmu0_queues);
10575 +
10576 +err_tmu0_queues:
10577 + device_remove_file(pfe->dev, &dev_attr_drops);
10578 +
10579 +err_drops:
10580 + device_remove_file(pfe->dev, &dev_attr_gpi);
10581 +
10582 +err_gpi:
10583 + device_remove_file(pfe->dev, &dev_attr_hif);
10584 +
10585 +err_hif:
10586 + device_remove_file(pfe->dev, &dev_attr_bmu);
10587 +
10588 +err_bmu:
10589 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10590 + device_remove_file(pfe->dev, &dev_attr_util);
10591 +
10592 +err_util:
10593 +#endif
10594 + device_remove_file(pfe->dev, &dev_attr_tmu);
10595 +
10596 +err_tmu:
10597 + device_remove_file(pfe->dev, &dev_attr_class);
10598 +
10599 +err_class:
10600 + return -1;
10601 +}
10602 +
10603 +void pfe_sysfs_exit(struct pfe *pfe)
10604 +{
10605 +#ifdef HIF_NAPI_STATS
10606 + device_remove_file(pfe->dev, &dev_attr_hif_napi_stats);
10607 +#endif
10608 + device_remove_file(pfe->dev, &dev_attr_pfemem);
10609 + device_remove_file(pfe->dev, &dev_attr_tmu3_queues);
10610 + device_remove_file(pfe->dev, &dev_attr_tmu2_queues);
10611 + device_remove_file(pfe->dev, &dev_attr_tmu1_queues);
10612 + device_remove_file(pfe->dev, &dev_attr_tmu0_queues);
10613 + device_remove_file(pfe->dev, &dev_attr_drops);
10614 + device_remove_file(pfe->dev, &dev_attr_gpi);
10615 + device_remove_file(pfe->dev, &dev_attr_hif);
10616 + device_remove_file(pfe->dev, &dev_attr_bmu);
10617 +#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
10618 + device_remove_file(pfe->dev, &dev_attr_util);
10619 +#endif
10620 + device_remove_file(pfe->dev, &dev_attr_tmu);
10621 + device_remove_file(pfe->dev, &dev_attr_class);
10622 +}
10623 --- /dev/null
10624 +++ b/drivers/staging/fsl_ppfe/pfe_sysfs.h
10625 @@ -0,0 +1,29 @@
10626 +/*
10627 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
10628 + * Copyright 2017 NXP
10629 + *
10630 + * This program is free software; you can redistribute it and/or modify
10631 + * it under the terms of the GNU General Public License as published by
10632 + * the Free Software Foundation; either version 2 of the License, or
10633 + * (at your option) any later version.
10634 + *
10635 + * This program is distributed in the hope that it will be useful,
10636 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
10637 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10638 + * GNU General Public License for more details.
10639 + *
10640 + * You should have received a copy of the GNU General Public License
10641 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
10642 + */
10643 +
10644 +#ifndef _PFE_SYSFS_H_
10645 +#define _PFE_SYSFS_H_
10646 +
10647 +#include <linux/proc_fs.h>
10648 +
10649 +u32 qm_read_drop_stat(u32 tmu, u32 queue, u32 *total_drops, int do_reset);
10650 +
10651 +int pfe_sysfs_init(struct pfe *pfe);
10652 +void pfe_sysfs_exit(struct pfe *pfe);
10653 +
10654 +#endif /* _PFE_SYSFS_H_ */