0b549f842c418794c22f06b69820652868a8b620
[openwrt/svn-archive/archive.git] / target / linux / bcm53xx / patches-3.18 / 420-mtd-bcm5301x_nand.patch
1 --- a/drivers/mtd/nand/Kconfig
2 +++ b/drivers/mtd/nand/Kconfig
3 @@ -516,4 +516,10 @@ config MTD_NAND_XWAY
4 Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached
5 to the External Bus Unit (EBU).
6
7 +config MTD_NAND_BCM
8 + tristate "Support for NAND on some Broadcom SoC"
9 + help
10 + This driver is currently used for the NAND flash controller on the
11 + Broadcom BCM5301X (NorthStar) SoCs.
12 +
13 endif # MTD_NAND
14 --- a/drivers/mtd/nand/Makefile
15 +++ b/drivers/mtd/nand/Makefile
16 @@ -50,5 +50,6 @@ obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740
17 obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/
18 obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o
19 obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/
20 +obj-$(CONFIG_MTD_NAND_BCM) += bcm_nand.o
21
22 nand-objs := nand_base.o nand_bbt.o nand_timings.o
23 --- /dev/null
24 +++ b/drivers/mtd/nand/bcm_nand.c
25 @@ -0,0 +1,1591 @@
26 +/*
27 + * Nortstar NAND controller driver
28 + * for Linux NAND library and MTD interface
29 + *
30 + * (c) Broadcom, Inc. 2012 All Rights Reserved.
31 + * Copyright 2014 Hauke Mehrtens <hauke@hauke-m.de>
32 + *
33 + * Licensed under the GNU/GPL. See COPYING for details.
34 + *
35 + * This module interfaces the NAND controller and hardware ECC capabilities
36 + * tp the generic NAND chip support in the NAND library.
37 + *
38 + * Notes:
39 + * This driver depends on generic NAND driver, but works at the
40 + * page level for operations.
41 + *
42 + * When a page is written, the ECC calculated also protects the OOB
43 + * bytes not taken by ECC, and so the OOB must be combined with any
44 + * OOB data that preceded the page-write operation in order for the
45 + * ECC to be calculated correctly.
46 + * Also, when the page is erased, but OOB data is not, HW ECC will
47 + * indicate an error, because it checks OOB too, which calls for some
48 + * help from the software in this driver.
49 + *
50 + * TBD:
51 + * Block locking/unlocking support, OTP support
52 + */
53 +
54 +
55 +#include <linux/kernel.h>
56 +#include <linux/module.h>
57 +#include <linux/io.h>
58 +#include <linux/ioport.h>
59 +#include <linux/interrupt.h>
60 +#include <linux/delay.h>
61 +#include <linux/err.h>
62 +#include <linux/slab.h>
63 +#include <linux/bcma/bcma.h>
64 +#include <linux/of_irq.h>
65 +
66 +#include <linux/mtd/mtd.h>
67 +#include <linux/mtd/nand.h>
68 +
69 +#define NANDC_MAX_CHIPS 2 /* Only 2 CSn supported in NorthStar */
70 +
71 +#define DRV_NAME "bcmnand"
72 +#define DRV_DESC "Northstar on-chip NAND Flash Controller driver"
73 +
74 +/*
75 + * Driver private control structure
76 + */
77 +struct bcmnand_ctrl {
78 + struct mtd_info mtd;
79 + struct nand_chip nand;
80 + struct bcma_device *core;
81 +
82 + struct completion op_completion;
83 +
84 + struct nand_ecclayout ecclayout;
85 + int cmd_ret; /* saved error code */
86 + unsigned char oob_index;
87 + unsigned char id_byte_index;
88 + unsigned char chip_num;
89 + unsigned char last_cmd;
90 + unsigned char ecc_level;
91 + unsigned char sector_size_shift;
92 + unsigned char sec_per_page_shift;
93 +};
94 +
95 +
96 +/*
97 + * IRQ numbers - offset from first irq in nandc_irq resource
98 + */
99 +#define NANDC_IRQ_RD_MISS 0
100 +#define NANDC_IRQ_ERASE_COMPLETE 1
101 +#define NANDC_IRQ_COPYBACK_COMPLETE 2
102 +#define NANDC_IRQ_PROGRAM_COMPLETE 3
103 +#define NANDC_IRQ_CONTROLLER_RDY 4
104 +#define NANDC_IRQ_RDBSY_RDY 5
105 +#define NANDC_IRQ_ECC_UNCORRECTABLE 6
106 +#define NANDC_IRQ_ECC_CORRECTABLE 7
107 +#define NANDC_IRQ_NUM 8
108 +
109 +struct bcmnand_reg_field {
110 + unsigned int reg;
111 + unsigned int pos;
112 + unsigned int width;
113 +};
114 +
115 +/*
116 + * REGISTERS
117 + *
118 + * Individual bit-fields aof registers are specificed here
119 + * for clarity, and the rest of the code will access each field
120 + * as if it was its own register.
121 + *
122 + * Following registers are off <reg_base>:
123 + */
124 +#define REG_BIT_FIELD(r, p, w) ((struct bcmnand_reg_field){(r), (p), (w)})
125 +
126 +#define NANDC_8KB_PAGE_SUPPORT REG_BIT_FIELD(0x0, 31, 1)
127 +#define NANDC_REV_MAJOR REG_BIT_FIELD(0x0, 8, 8)
128 +#define NANDC_REV_MINOR REG_BIT_FIELD(0x0, 0, 8)
129 +
130 +#define NANDC_CMD_START_OPCODE REG_BIT_FIELD(0x4, 24, 5)
131 +
132 +#define NANDC_CMD_CS_SEL REG_BIT_FIELD(0x8, 16, 3)
133 +#define NANDC_CMD_EXT_ADDR REG_BIT_FIELD(0x8, 0, 16)
134 +
135 +#define NANDC_CMD_ADDRESS REG_BIT_FIELD(0xc, 0, 32)
136 +#define NANDC_CMD_END_ADDRESS REG_BIT_FIELD(0x10, 0, 32)
137 +
138 +#define NANDC_INT_STATUS REG_BIT_FIELD(0x14, 0, 32)
139 +#define NANDC_INT_STAT_CTLR_RDY REG_BIT_FIELD(0x14, 31, 1)
140 +#define NANDC_INT_STAT_FLASH_RDY REG_BIT_FIELD(0x14, 30, 1)
141 +#define NANDC_INT_STAT_CACHE_VALID REG_BIT_FIELD(0x14, 29, 1)
142 +#define NANDC_INT_STAT_SPARE_VALID REG_BIT_FIELD(0x14, 28, 1)
143 +#define NANDC_INT_STAT_ERASED REG_BIT_FIELD(0x14, 27, 1)
144 +#define NANDC_INT_STAT_PLANE_RDY REG_BIT_FIELD(0x14, 26, 1)
145 +#define NANDC_INT_STAT_FLASH_STATUS REG_BIT_FIELD(0x14, 0, 8)
146 +
147 +#define NANDC_CS_LOCK REG_BIT_FIELD(0x18, 31, 1)
148 +#define NANDC_CS_AUTO_CONFIG REG_BIT_FIELD(0x18, 30, 1)
149 +#define NANDC_CS_NAND_WP REG_BIT_FIELD(0x18, 29, 1)
150 +#define NANDC_CS_BLK0_WP REG_BIT_FIELD(0x18, 28, 1)
151 +#define NANDC_CS_SW_USING_CS(n) REG_BIT_FIELD(0x18, 8+(n), 1)
152 +#define NANDC_CS_MAP_SEL_CS(n) REG_BIT_FIELD(0x18, 0+(n), 1)
153 +
154 +#define NANDC_XOR_ADDR_BLK0_ONLY REG_BIT_FIELD(0x1c, 31, 1)
155 +#define NANDC_XOR_ADDR_CS(n) REG_BIT_FIELD(0x1c, 0+(n), 1)
156 +
157 +#define NANDC_LL_OP_RET_IDLE REG_BIT_FIELD(0x20, 31, 1)
158 +#define NANDC_LL_OP_CLE REG_BIT_FIELD(0x20, 19, 1)
159 +#define NANDC_LL_OP_ALE REG_BIT_FIELD(0x20, 18, 1)
160 +#define NANDC_LL_OP_WE REG_BIT_FIELD(0x20, 17, 1)
161 +#define NANDC_LL_OP_RE REG_BIT_FIELD(0x20, 16, 1)
162 +#define NANDC_LL_OP_DATA REG_BIT_FIELD(0x20, 0, 16)
163 +
164 +#define NANDC_MPLANE_ADDR_EXT REG_BIT_FIELD(0x24, 0, 16)
165 +#define NANDC_MPLANE_ADDR REG_BIT_FIELD(0x28, 0, 32)
166 +
167 +#define NANDC_ACC_CTRL_CS(n) REG_BIT_FIELD(0x50+((n)<<4), 0, 32)
168 +#define NANDC_ACC_CTRL_RD_ECC(n) REG_BIT_FIELD(0x50+((n)<<4), 31, 1)
169 +#define NANDC_ACC_CTRL_WR_ECC(n) REG_BIT_FIELD(0x50+((n)<<4), 30, 1)
170 +#define NANDC_ACC_CTRL_CE_CARE(n) REG_BIT_FIELD(0x50+((n)<<4), 29, 1)
171 +#define NANDC_ACC_CTRL_PGM_RDIN(n) REG_BIT_FIELD(0x50+((n)<<4), 28, 1)
172 +#define NANDC_ACC_CTRL_ERA_ECC_ERR(n) REG_BIT_FIELD(0x50+((n)<<4), 27, 1)
173 +#define NANDC_ACC_CTRL_PGM_PARTIAL(n) REG_BIT_FIELD(0x50+((n)<<4), 26, 1)
174 +#define NANDC_ACC_CTRL_WR_PREEMPT(n) REG_BIT_FIELD(0x50+((n)<<4), 25, 1)
175 +#define NANDC_ACC_CTRL_PG_HIT(n) REG_BIT_FIELD(0x50+((n)<<4), 24, 1)
176 +#define NANDC_ACC_CTRL_PREFETCH(n) REG_BIT_FIELD(0x50+((n)<<4), 23, 1)
177 +#define NANDC_ACC_CTRL_CACHE_MODE(n) REG_BIT_FIELD(0x50+((n)<<4), 22, 1)
178 +#define NANDC_ACC_CTRL_CACHE_LASTPG(n) REG_BIT_FIELD(0x50+((n)<<4), 21, 1)
179 +#define NANDC_ACC_CTRL_ECC_LEVEL(n) REG_BIT_FIELD(0x50+((n)<<4), 16, 5)
180 +#define NANDC_ACC_CTRL_SECTOR_1K(n) REG_BIT_FIELD(0x50+((n)<<4), 7, 1)
181 +#define NANDC_ACC_CTRL_SPARE_SIZE(n) REG_BIT_FIELD(0x50+((n)<<4), 0, 7)
182 +
183 +#define NANDC_CONFIG_CS(n) REG_BIT_FIELD(0x54+((n)<<4), 0, 32)
184 +#define NANDC_CONFIG_LOCK(n) REG_BIT_FIELD(0x54+((n)<<4), 31, 1)
185 +#define NANDC_CONFIG_BLK_SIZE(n) REG_BIT_FIELD(0x54+((n)<<4), 28, 3)
186 +#define NANDC_CONFIG_CHIP_SIZE(n) REG_BIT_FIELD(0x54+((n)<<4), 24, 4)
187 +#define NANDC_CONFIG_CHIP_WIDTH(n) REG_BIT_FIELD(0x54+((n)<<4), 23, 1)
188 +#define NANDC_CONFIG_PAGE_SIZE(n) REG_BIT_FIELD(0x54+((n)<<4), 20, 2)
189 +#define NANDC_CONFIG_FUL_ADDR_BYTES(n) REG_BIT_FIELD(0x54+((n)<<4), 16, 3)
190 +#define NANDC_CONFIG_COL_ADDR_BYTES(n) REG_BIT_FIELD(0x54+((n)<<4), 12, 3)
191 +#define NANDC_CONFIG_BLK_ADDR_BYTES(n) REG_BIT_FIELD(0x54+((n)<<4), 8, 3)
192 +
193 +#define NANDC_TIMING_1_CS(n) REG_BIT_FIELD(0x58+((n)<<4), 0, 32)
194 +#define NANDC_TIMING_2_CS(n) REG_BIT_FIELD(0x5c+((n)<<4), 0, 32)
195 + /* Individual bits for Timing registers - TBD */
196 +
197 +#define NANDC_CORR_STAT_THRESH_CS(n) REG_BIT_FIELD(0xc0, 6*(n), 6)
198 +
199 +#define NANDC_BLK_WP_END_ADDR REG_BIT_FIELD(0xc8, 0, 32)
200 +
201 +#define NANDC_MPLANE_ERASE_CYC2_OPCODE REG_BIT_FIELD(0xcc, 24, 8)
202 +#define NANDC_MPLANE_READ_STAT_OPCODE REG_BIT_FIELD(0xcc, 16, 8)
203 +#define NANDC_MPLANE_PROG_ODD_OPCODE REG_BIT_FIELD(0xcc, 8, 8)
204 +#define NANDC_MPLANE_PROG_TRL_OPCODE REG_BIT_FIELD(0xcc, 0, 8)
205 +
206 +#define NANDC_MPLANE_PGCACHE_TRL_OPCODE REG_BIT_FIELD(0xd0, 24, 8)
207 +#define NANDC_MPLANE_READ_STAT2_OPCODE REG_BIT_FIELD(0xd0, 16, 8)
208 +#define NANDC_MPLANE_READ_EVEN_OPCODE REG_BIT_FIELD(0xd0, 8, 8)
209 +#define NANDC_MPLANE_READ_ODD__OPCODE REG_BIT_FIELD(0xd0, 0, 8)
210 +
211 +#define NANDC_MPLANE_CTRL_ERASE_CYC2_EN REG_BIT_FIELD(0xd4, 31, 1)
212 +#define NANDC_MPLANE_CTRL_RD_ADDR_SIZE REG_BIT_FIELD(0xd4, 30, 1)
213 +#define NANDC_MPLANE_CTRL_RD_CYC_ADDR REG_BIT_FIELD(0xd4, 29, 1)
214 +#define NANDC_MPLANE_CTRL_RD_COL_ADDR REG_BIT_FIELD(0xd4, 28, 1)
215 +
216 +#define NANDC_UNCORR_ERR_COUNT REG_BIT_FIELD(0xfc, 0, 32)
217 +
218 +#define NANDC_CORR_ERR_COUNT REG_BIT_FIELD(0x100, 0, 32)
219 +
220 +#define NANDC_READ_CORR_BIT_COUNT REG_BIT_FIELD(0x104, 0, 32)
221 +
222 +#define NANDC_BLOCK_LOCK_STATUS REG_BIT_FIELD(0x108, 0, 8)
223 +
224 +#define NANDC_ECC_CORR_ADDR_CS REG_BIT_FIELD(0x10c, 16, 3)
225 +#define NANDC_ECC_CORR_ADDR_EXT REG_BIT_FIELD(0x10c, 0, 16)
226 +
227 +#define NANDC_ECC_CORR_ADDR REG_BIT_FIELD(0x110, 0, 32)
228 +
229 +#define NANDC_ECC_UNC_ADDR_CS REG_BIT_FIELD(0x114, 16, 3)
230 +#define NANDC_ECC_UNC_ADDR_EXT REG_BIT_FIELD(0x114, 0, 16)
231 +
232 +#define NANDC_ECC_UNC_ADDR REG_BIT_FIELD(0x118, 0, 32)
233 +
234 +#define NANDC_READ_ADDR_CS REG_BIT_FIELD(0x11c, 16, 3)
235 +#define NANDC_READ_ADDR_EXT REG_BIT_FIELD(0x11c, 0, 16)
236 +#define NANDC_READ_ADDR REG_BIT_FIELD(0x120, 0, 32)
237 +
238 +#define NANDC_PROG_ADDR_CS REG_BIT_FIELD(0x124, 16, 3)
239 +#define NANDC_PROG_ADDR_EXT REG_BIT_FIELD(0x124, 0, 16)
240 +#define NANDC_PROG_ADDR REG_BIT_FIELD(0x128, 0, 32)
241 +
242 +#define NANDC_CPYBK_ADDR_CS REG_BIT_FIELD(0x12c, 16, 3)
243 +#define NANDC_CPYBK_ADDR_EXT REG_BIT_FIELD(0x12c, 0, 16)
244 +#define NANDC_CPYBK_ADDR REG_BIT_FIELD(0x130, 0, 32)
245 +
246 +#define NANDC_ERASE_ADDR_CS REG_BIT_FIELD(0x134, 16, 3)
247 +#define NANDC_ERASE_ADDR_EXT REG_BIT_FIELD(0x134, 0, 16)
248 +#define NANDC_ERASE_ADDR REG_BIT_FIELD(0x138, 0, 32)
249 +
250 +#define NANDC_INV_READ_ADDR_CS REG_BIT_FIELD(0x13c, 16, 3)
251 +#define NANDC_INV_READ_ADDR_EXT REG_BIT_FIELD(0x13c, 0, 16)
252 +#define NANDC_INV_READ_ADDR REG_BIT_FIELD(0x140, 0, 32)
253 +
254 +#define NANDC_INIT_STAT REG_BIT_FIELD(0x144, 0, 32)
255 +#define NANDC_INIT_ONFI_DONE REG_BIT_FIELD(0x144, 31, 1)
256 +#define NANDC_INIT_DEVID_DONE REG_BIT_FIELD(0x144, 30, 1)
257 +#define NANDC_INIT_SUCCESS REG_BIT_FIELD(0x144, 29, 1)
258 +#define NANDC_INIT_FAIL REG_BIT_FIELD(0x144, 28, 1)
259 +#define NANDC_INIT_BLANK REG_BIT_FIELD(0x144, 27, 1)
260 +#define NANDC_INIT_TIMEOUT REG_BIT_FIELD(0x144, 26, 1)
261 +#define NANDC_INIT_UNC_ERROR REG_BIT_FIELD(0x144, 25, 1)
262 +#define NANDC_INIT_CORR_ERROR REG_BIT_FIELD(0x144, 24, 1)
263 +#define NANDC_INIT_PARAM_RDY REG_BIT_FIELD(0x144, 23, 1)
264 +#define NANDC_INIT_AUTH_FAIL REG_BIT_FIELD(0x144, 22, 1)
265 +
266 +#define NANDC_ONFI_STAT REG_BIT_FIELD(0x148, 0, 32)
267 +#define NANDC_ONFI_DEBUG REG_BIT_FIELD(0x148, 28, 4)
268 +#define NANDC_ONFI_PRESENT REG_BIT_FIELD(0x148, 27, 1)
269 +#define NANDC_ONFI_BADID_PG2 REG_BIT_FIELD(0x148, 5, 1)
270 +#define NANDC_ONFI_BADID_PG1 REG_BIT_FIELD(0x148, 4, 1)
271 +#define NANDC_ONFI_BADID_PG0 REG_BIT_FIELD(0x148, 3, 1)
272 +#define NANDC_ONFI_BADCRC_PG2 REG_BIT_FIELD(0x148, 2, 1)
273 +#define NANDC_ONFI_BADCRC_PG1 REG_BIT_FIELD(0x148, 1, 1)
274 +#define NANDC_ONFI_BADCRC_PG0 REG_BIT_FIELD(0x148, 0, 1)
275 +
276 +#define NANDC_ONFI_DEBUG_DATA REG_BIT_FIELD(0x14c, 0, 32)
277 +
278 +#define NANDC_SEMAPHORE REG_BIT_FIELD(0x150, 0, 8)
279 +
280 +#define NANDC_DEVID_BYTE(b) REG_BIT_FIELD(0x194+((b)&0x4), \
281 + 24-(((b)&3)<<3), 8)
282 +
283 +#define NANDC_LL_RDDATA REG_BIT_FIELD(0x19c, 0, 16)
284 +
285 +#define NANDC_INT_N_REG(n) REG_BIT_FIELD(0xf00|((n)<<2), 0, 1)
286 +#define NANDC_INT_DIREC_READ_MISS REG_BIT_FIELD(0xf00, 0, 1)
287 +#define NANDC_INT_ERASE_DONE REG_BIT_FIELD(0xf04, 0, 1)
288 +#define NANDC_INT_CPYBK_DONE REG_BIT_FIELD(0xf08, 0, 1)
289 +#define NANDC_INT_PROGRAM_DONE REG_BIT_FIELD(0xf0c, 0, 1)
290 +#define NANDC_INT_CONTROLLER_RDY REG_BIT_FIELD(0xf10, 0, 1)
291 +#define NANDC_INT_RDBSY_RDY REG_BIT_FIELD(0xf14, 0, 1)
292 +#define NANDC_INT_ECC_UNCORRECTABLE REG_BIT_FIELD(0xf18, 0, 1)
293 +#define NANDC_INT_ECC_CORRECTABLE REG_BIT_FIELD(0xf1c, 0, 1)
294 +
295 +/*
296 + * Following registers are treated as contigous IO memory, offset is from
297 + * <reg_base>, and the data is in big-endian byte order
298 + */
299 +#define NANDC_SPARE_AREA_READ_OFF 0x200
300 +#define NANDC_SPARE_AREA_WRITE_OFF 0x280
301 +#define NANDC_CACHE_OFF 0x400
302 +#define NANDC_CACHE_SIZE (128*4)
303 +
304 +struct bcmnand_areg_field {
305 + unsigned int reg;
306 + unsigned int pos;
307 + unsigned int width;
308 +};
309 +
310 +/*
311 + * Following are IDM (a.k.a. Slave Wrapper) registers are off <idm_base>:
312 + */
313 +#define IDMREG_BIT_FIELD(r, p, w) ((struct bcmnand_areg_field){(r), (p), (w)})
314 +
315 +#define NANDC_IDM_AXI_BIG_ENDIAN IDMREG_BIT_FIELD(0x408, 28, 1)
316 +#define NANDC_IDM_APB_LITTLE_ENDIAN IDMREG_BIT_FIELD(0x408, 24, 1)
317 +#define NANDC_IDM_TM IDMREG_BIT_FIELD(0x408, 16, 5)
318 +#define NANDC_IDM_IRQ_CORRECABLE_EN IDMREG_BIT_FIELD(0x408, 9, 1)
319 +#define NANDC_IDM_IRQ_UNCORRECABLE_EN IDMREG_BIT_FIELD(0x408, 8, 1)
320 +#define NANDC_IDM_IRQ_RDYBSY_RDY_EN IDMREG_BIT_FIELD(0x408, 7, 1)
321 +#define NANDC_IDM_IRQ_CONTROLLER_RDY_EN IDMREG_BIT_FIELD(0x408, 6, 1)
322 +#define NANDC_IDM_IRQ_PRPOGRAM_COMP_EN IDMREG_BIT_FIELD(0x408, 5, 1)
323 +#define NANDC_IDM_IRQ_COPYBK_COMP_EN IDMREG_BIT_FIELD(0x408, 4, 1)
324 +#define NANDC_IDM_IRQ_ERASE_COMP_EN IDMREG_BIT_FIELD(0x408, 3, 1)
325 +#define NANDC_IDM_IRQ_READ_MISS_EN IDMREG_BIT_FIELD(0x408, 2, 1)
326 +#define NANDC_IDM_IRQ_N_EN(n) IDMREG_BIT_FIELD(0x408, 2+(n), 1)
327 +
328 +#define NANDC_IDM_CLOCK_EN IDMREG_BIT_FIELD(0x408, 0, 1)
329 +
330 +#define NANDC_IDM_IO_ECC_CORR IDMREG_BIT_FIELD(0x500, 3, 1)
331 +#define NANDC_IDM_IO_ECC_UNCORR IDMREG_BIT_FIELD(0x500, 2, 1)
332 +#define NANDC_IDM_IO_RDYBSY IDMREG_BIT_FIELD(0x500, 1, 1)
333 +#define NANDC_IDM_IO_CTRL_RDY IDMREG_BIT_FIELD(0x500, 0, 1)
334 +
335 +#define NANDC_IDM_RESET IDMREG_BIT_FIELD(0x800, 0, 1)
336 + /* Remaining IDM registers do not seem to be useful, skipped */
337 +
338 +/*
339 + * NAND Controller has its own command opcodes
340 + * different from opcodes sent to the actual flash chip
341 + */
342 +#define NANDC_CMD_OPCODE_NULL 0
343 +#define NANDC_CMD_OPCODE_PAGE_READ 1
344 +#define NANDC_CMD_OPCODE_SPARE_READ 2
345 +#define NANDC_CMD_OPCODE_STATUS_READ 3
346 +#define NANDC_CMD_OPCODE_PAGE_PROG 4
347 +#define NANDC_CMD_OPCODE_SPARE_PROG 5
348 +#define NANDC_CMD_OPCODE_DEVID_READ 7
349 +#define NANDC_CMD_OPCODE_BLOCK_ERASE 8
350 +#define NANDC_CMD_OPCODE_FLASH_RESET 9
351 +
352 +/*
353 + * NAND Controller hardware ECC data size
354 + *
355 + * The following table contains the number of bytes needed for
356 + * each of the ECC levels, per "sector", which is either 512 or 1024 bytes.
357 + * The actual layout is as follows:
358 + * The entire spare area is equally divided into as many sections as there
359 + * are sectors per page, and the ECC data is located at the end of each
360 + * of these sections.
361 + * For example, given a 2K per page and 64 bytes spare device, configured for
362 + * sector size 1k and ECC level of 4, the spare area will be divided into 2
363 + * sections 32 bytes each, and the last 14 bytes of 32 in each section will
364 + * be filled with ECC data.
365 + * Note: the name of the algorythm and the number of error bits it can correct
366 + * is of no consequence to this driver, therefore omitted.
367 + */
368 +struct bcmnand_ecc_size_s {
369 + unsigned char sector_size_shift;
370 + unsigned char ecc_level;
371 + unsigned char ecc_bytes_per_sec;
372 + unsigned char reserved;
373 +};
374 +
375 +static const struct bcmnand_ecc_size_s bcmnand_ecc_sizes[] = {
376 + { 9, 0, 0 },
377 + { 10, 0, 0 },
378 + { 9, 1, 2 },
379 + { 10, 1, 4 },
380 + { 9, 2, 4 },
381 + { 10, 2, 7 },
382 + { 9, 3, 6 },
383 + { 10, 3, 11 },
384 + { 9, 4, 7 },
385 + { 10, 4, 14 },
386 + { 9, 5, 9 },
387 + { 10, 5, 18 },
388 + { 9, 6, 11 },
389 + { 10, 6, 21 },
390 + { 9, 7, 13 },
391 + { 10, 7, 25 },
392 + { 9, 8, 14 },
393 + { 10, 8, 28 },
394 +
395 + { 9, 9, 16 },
396 + { 9, 10, 18 },
397 + { 9, 11, 20 },
398 + { 9, 12, 21 },
399 +
400 + { 10, 9, 32 },
401 + { 10, 10, 35 },
402 + { 10, 11, 39 },
403 + { 10, 12, 42 },
404 +};
405 +
406 +/*
407 + * Populate the various fields that depend on how
408 + * the hardware ECC data is located in the spare area
409 + *
410 + * For this controiller, it is easier to fill-in these
411 + * structures at run time.
412 + *
413 + * The bad-block marker is assumed to occupy one byte
414 + * at chip->badblockpos, which must be in the first
415 + * sector of the spare area, namely it is either
416 + * at offset 0 or 5.
417 + * Some chips use both for manufacturer's bad block
418 + * markers, but we ingore that issue here, and assume only
419 + * one byte is used as bad-block marker always.
420 + */
421 +static int bcmnand_hw_ecc_layout(struct bcmnand_ctrl *ctrl)
422 +{
423 + struct nand_ecclayout *layout;
424 + unsigned int i, j, k;
425 + unsigned int ecc_per_sec, oob_per_sec;
426 + unsigned int bbm_pos = ctrl->nand.badblockpos;
427 +
428 + /* Caclculate spare area per sector size */
429 + oob_per_sec = ctrl->mtd.oobsize >> ctrl->sec_per_page_shift;
430 +
431 + /* Try to calculate the amount of ECC bytes per sector with a formula */
432 + if (ctrl->sector_size_shift == 9)
433 + ecc_per_sec = ((ctrl->ecc_level * 14) + 7) >> 3;
434 + else if (ctrl->sector_size_shift == 10)
435 + ecc_per_sec = ((ctrl->ecc_level * 14) + 3) >> 2;
436 + else
437 + ecc_per_sec = oob_per_sec + 1; /* cause an error if not in table */
438 +
439 + /* Now find out the answer according to the table */
440 + for (i = 0; i < ARRAY_SIZE(bcmnand_ecc_sizes); i++) {
441 + if (bcmnand_ecc_sizes[i].ecc_level == ctrl->ecc_level &&
442 + bcmnand_ecc_sizes[i].sector_size_shift ==
443 + ctrl->sector_size_shift) {
444 + break;
445 + }
446 + }
447 +
448 + /* Table match overrides formula */
449 + if (bcmnand_ecc_sizes[i].ecc_level == ctrl->ecc_level &&
450 + bcmnand_ecc_sizes[i].sector_size_shift == ctrl->sector_size_shift)
451 + ecc_per_sec = bcmnand_ecc_sizes[i].ecc_bytes_per_sec;
452 +
453 + /* Return an error if calculated ECC leaves no room for OOB */
454 + if ((ctrl->sec_per_page_shift != 0 && ecc_per_sec >= oob_per_sec) ||
455 + (ctrl->sec_per_page_shift == 0 && ecc_per_sec >= (oob_per_sec - 1))) {
456 + pr_err("%s: ECC level %d too high, leaves no room for OOB data\n",
457 + DRV_NAME, ctrl->ecc_level);
458 + return -EINVAL;
459 + }
460 +
461 + /* Fill in the needed fields */
462 + ctrl->nand.ecc.size = ctrl->mtd.writesize >> ctrl->sec_per_page_shift;
463 + ctrl->nand.ecc.bytes = ecc_per_sec;
464 + ctrl->nand.ecc.steps = 1 << ctrl->sec_per_page_shift;
465 + ctrl->nand.ecc.total = ecc_per_sec << ctrl->sec_per_page_shift;
466 + ctrl->nand.ecc.strength = ctrl->ecc_level;
467 +
468 + /* Build an ecc layout data structure */
469 + layout = &ctrl->ecclayout;
470 + memset(layout, 0, sizeof(*layout));
471 +
472 + /* Total number of bytes used by HW ECC */
473 + layout->eccbytes = ecc_per_sec << ctrl->sec_per_page_shift;
474 +
475 + /* Location for each of the HW ECC bytes */
476 + for (i = j = 0, k = 1;
477 + i < ARRAY_SIZE(layout->eccpos) && i < layout->eccbytes;
478 + i++, j++) {
479 + /* switch sector # */
480 + if (j == ecc_per_sec) {
481 + j = 0;
482 + k++;
483 + }
484 + /* save position of each HW-generated ECC byte */
485 + layout->eccpos[i] = (oob_per_sec * k) - ecc_per_sec + j;
486 +
487 + /* Check that HW ECC does not overlap bad-block marker */
488 + if (bbm_pos == layout->eccpos[i]) {
489 + pr_err("%s: ECC level %d too high, HW ECC collides with bad-block marker position\n",
490 + DRV_NAME, ctrl->ecc_level);
491 + return -EINVAL;
492 + }
493 + }
494 +
495 + /* Location of all user-available OOB byte-ranges */
496 + for (i = 0; i < ARRAY_SIZE(layout->oobfree); i++) {
497 + struct nand_oobfree *oobfree = &layout->oobfree[i];
498 +
499 + if (i >= (1 << ctrl->sec_per_page_shift))
500 + break;
501 + oobfree->offset = oob_per_sec * i;
502 + oobfree->length = oob_per_sec - ecc_per_sec;
503 +
504 + /* Bad-block marker must be in the first sector spare area */
505 + if (WARN_ON(bbm_pos >= (oobfree->offset + oobfree->length)))
506 + return -EINVAL;
507 +
508 + if (i != 0)
509 + continue;
510 +
511 + /* Remove bad-block marker from available byte range */
512 + if (bbm_pos == oobfree->offset) {
513 + oobfree->offset += 1;
514 + oobfree->length -= 1;
515 + } else if (bbm_pos == (oobfree->offset + oobfree->length - 1)) {
516 + oobfree->length -= 1;
517 + } else {
518 + layout->oobfree[i + 1].offset = bbm_pos + 1;
519 + layout->oobfree[i + 1].length =
520 + oobfree->length - bbm_pos - 1;
521 + oobfree->length = bbm_pos;
522 + i++;
523 + }
524 + }
525 +
526 + layout->oobavail = ((oob_per_sec - ecc_per_sec)
527 + << ctrl->sec_per_page_shift) - 1;
528 +
529 + ctrl->mtd.oobavail = layout->oobavail;
530 + ctrl->nand.ecc.layout = layout;
531 +
532 + /* Output layout for debugging */
533 + pr_debug("%s: Spare area=%d eccbytes %d, ecc bytes located at:\n",
534 + DRV_NAME, ctrl->mtd.oobsize, layout->eccbytes);
535 + for (i = j = 0;
536 + i < ARRAY_SIZE(layout->eccpos) && i < layout->eccbytes; i++)
537 + pr_debug(" %d", layout->eccpos[i]);
538 +
539 + pr_debug("\n%s: Available %d bytes at (off,len):\n", DRV_NAME,
540 + layout->oobavail);
541 + for (i = 0; i < ARRAY_SIZE(layout->oobfree); i++)
542 + pr_debug("(%d,%d) ", layout->oobfree[i].offset,
543 + layout->oobfree[i].length);
544 +
545 + pr_debug("\n");
546 +
547 + return 0;
548 +}
549 +
550 +/*
551 + * Register bit-field manipulation routines
552 + */
553 +
554 +static inline unsigned int bcmnand_reg_read(struct bcmnand_ctrl *ctrl,
555 + struct bcmnand_reg_field rbf)
556 +{
557 + u32 val;
558 +
559 + val = bcma_read32(ctrl->core, rbf.reg);
560 + val >>= rbf.pos;
561 + val &= (1 << rbf.width) - 1;
562 +
563 + return val;
564 +}
565 +
566 +static inline void bcmnand_reg_write(struct bcmnand_ctrl *ctrl,
567 + struct bcmnand_reg_field rbf,
568 + unsigned newval)
569 +{
570 + u32 val, msk;
571 +
572 + msk = (1 << rbf.width) - 1;
573 + msk <<= rbf.pos;
574 + newval <<= rbf.pos;
575 + newval &= msk;
576 +
577 + val = bcma_read32(ctrl->core, rbf.reg);
578 + val &= ~msk;
579 + val |= newval;
580 + bcma_write32(ctrl->core, rbf.reg, val);
581 +}
582 +
583 +static inline unsigned int bcmnand_reg_aread(struct bcmnand_ctrl *ctrl,
584 + struct bcmnand_areg_field rbf)
585 +{
586 + u32 val;
587 +
588 + val = bcma_aread32(ctrl->core, rbf.reg);
589 + val >>= rbf.pos;
590 + val &= (1 << rbf.width) - 1;
591 +
592 + return val;
593 +}
594 +
595 +static inline void bcmnand_reg_awrite(struct bcmnand_ctrl *ctrl,
596 + struct bcmnand_areg_field rbf,
597 + unsigned int newval)
598 +{
599 + u32 val, msk;
600 +
601 + msk = (1 << rbf.width) - 1;
602 + msk <<= rbf.pos;
603 + newval <<= rbf.pos;
604 + newval &= msk;
605 +
606 + val = bcma_aread32(ctrl->core, rbf.reg);
607 + val &= ~msk;
608 + val |= newval;
609 + bcma_awrite32(ctrl->core, rbf.reg, val);
610 +}
611 +
612 +/*
613 + * NAND Interface - dev_ready
614 + *
615 + * Return 1 iff device is ready, 0 otherwise
616 + */
617 +static int bcmnand_dev_ready(struct mtd_info *mtd)
618 +{
619 + struct nand_chip *chip = mtd->priv;
620 + struct bcmnand_ctrl *ctrl = chip->priv;
621 +
622 + return bcmnand_reg_aread(ctrl, NANDC_IDM_IO_CTRL_RDY);
623 +}
624 +
625 +/*
626 + * Interrupt service routines
627 + */
628 +static irqreturn_t bcmnand_isr(int irq, void *dev_id)
629 +{
630 + struct bcmnand_ctrl *ctrl = dev_id;
631 + int irq_off;
632 +
633 + irq_off = irq - ctrl->core->irq;
634 + WARN_ON(irq_off < 0 || irq_off >= NANDC_IRQ_NUM);
635 +
636 + if (!bcmnand_reg_read(ctrl, NANDC_INT_N_REG(irq_off)))
637 + return IRQ_NONE;
638 +
639 + /* Acknowledge interrupt */
640 + bcmnand_reg_write(ctrl, NANDC_INT_N_REG(irq_off), 1);
641 +
642 + /* Wake up task */
643 + complete(&ctrl->op_completion);
644 +
645 + return IRQ_HANDLED;
646 +}
647 +
648 +static int bcmnand_wait_interrupt(struct bcmnand_ctrl *ctrl,
649 + unsigned int irq_off,
650 + unsigned int timeout_usec)
651 +{
652 + long timeout_jiffies;
653 + int ret = 0;
654 +
655 + reinit_completion(&ctrl->op_completion);
656 +
657 + /* Acknowledge interrupt */
658 + bcmnand_reg_write(ctrl, NANDC_INT_N_REG(irq_off), 1);
659 +
660 + /* Enable IRQ to wait on */
661 + bcmnand_reg_awrite(ctrl, NANDC_IDM_IRQ_N_EN(irq_off), 1);
662 +
663 + timeout_jiffies = 1 + usecs_to_jiffies(timeout_usec);
664 +
665 + if (irq_off != NANDC_IRQ_CONTROLLER_RDY ||
666 + 0 == bcmnand_reg_aread(ctrl, NANDC_IDM_IO_CTRL_RDY)) {
667 +
668 + timeout_jiffies = wait_for_completion_interruptible_timeout(
669 + &ctrl->op_completion, timeout_jiffies);
670 +
671 + if (timeout_jiffies < 0)
672 + ret = timeout_jiffies;
673 + if (timeout_jiffies == 0)
674 + ret = -ETIME;
675 + }
676 +
677 + /* Disable IRQ, we're done waiting */
678 + bcmnand_reg_awrite(ctrl, NANDC_IDM_IRQ_N_EN(irq_off), 0);
679 +
680 + if (bcmnand_reg_aread(ctrl, NANDC_IDM_IO_CTRL_RDY))
681 + ret = 0;
682 +
683 + return ret;
684 +}
685 +
686 +/*
687 + * wait for command completion
688 + */
689 +static int bcmnand_wait_cmd(struct bcmnand_ctrl *ctrl, unsigned int timeout_usec)
690 +{
691 + unsigned int retries;
692 +
693 + if (bcmnand_reg_read(ctrl, NANDC_INT_STAT_CTLR_RDY))
694 + return 0;
695 +
696 + /* If the timeout is long, wait for interrupt */
697 + if (timeout_usec >= jiffies_to_usecs(1) >> 4)
698 + return bcmnand_wait_interrupt(
699 + ctrl, NANDC_IRQ_CONTROLLER_RDY, timeout_usec);
700 +
701 + /* Wait for completion of the prior command */
702 + retries = (timeout_usec >> 3) + 1;
703 +
704 + while (retries-- &&
705 + 0 == bcmnand_reg_read(ctrl, NANDC_INT_STAT_CTLR_RDY)) {
706 + cpu_relax();
707 + udelay(6);
708 + }
709 +
710 + if (retries == 0)
711 + return -ETIME;
712 +
713 + return 0;
714 +}
715 +
716 +
717 +/*
718 + * NAND Interface - waitfunc
719 + */
720 +static int bcmnand_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
721 +{
722 + struct bcmnand_ctrl *ctrl = chip->priv;
723 + unsigned int to;
724 + int ret;
725 +
726 + /* figure out timeout based on what command is on */
727 + switch (ctrl->last_cmd) {
728 + default:
729 + case NAND_CMD_ERASE1:
730 + case NAND_CMD_ERASE2:
731 + to = 1 << 16;
732 + break;
733 + case NAND_CMD_STATUS:
734 + case NAND_CMD_RESET:
735 + to = 256;
736 + break;
737 + case NAND_CMD_READID:
738 + to = 1024;
739 + break;
740 + case NAND_CMD_READ1:
741 + case NAND_CMD_READ0:
742 + to = 2048;
743 + break;
744 + case NAND_CMD_PAGEPROG:
745 + to = 4096;
746 + break;
747 + case NAND_CMD_READOOB:
748 + to = 512;
749 + break;
750 + }
751 +
752 + /* deliver deferred error code if any */
753 + ret = ctrl->cmd_ret;
754 + if (ret < 0)
755 + ctrl->cmd_ret = 0;
756 + else
757 + ret = bcmnand_wait_cmd(ctrl, to);
758 +
759 + /* Timeout */
760 + if (ret < 0)
761 + return NAND_STATUS_FAIL;
762 +
763 + ret = bcmnand_reg_read(ctrl, NANDC_INT_STAT_FLASH_STATUS);
764 +
765 + return ret;
766 +}
767 +
768 +/*
769 + * NAND Interface - read_oob
770 + */
771 +static int bcmnand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
772 + int page)
773 +{
774 + struct bcmnand_ctrl *ctrl = chip->priv;
775 + unsigned int n = ctrl->chip_num;
776 + void __iomem *ctrl_spare;
777 + unsigned int spare_per_sec, sector;
778 + u64 nand_addr;
779 +
780 + ctrl_spare = ctrl->core->io_addr + NANDC_SPARE_AREA_READ_OFF;
781 +
782 + /* Set the page address for the following commands */
783 + nand_addr = ((u64)page << chip->page_shift);
784 + bcmnand_reg_write(ctrl, NANDC_CMD_EXT_ADDR, nand_addr >> 32);
785 +
786 + spare_per_sec = mtd->oobsize >> ctrl->sec_per_page_shift;
787 +
788 + /* Disable ECC validation for spare area reads */
789 + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_RD_ECC(n), 0);
790 +
791 + /* Loop all sectors in page */
792 + for (sector = 0; sector < (1<<ctrl->sec_per_page_shift); sector++) {
793 + unsigned int col;
794 +
795 + col = (sector << ctrl->sector_size_shift);
796 +
797 + /* Issue command to read partial page */
798 + bcmnand_reg_write(ctrl, NANDC_CMD_ADDRESS, nand_addr + col);
799 +
800 + bcmnand_reg_write(ctrl, NANDC_CMD_START_OPCODE,
801 + NANDC_CMD_OPCODE_SPARE_READ);
802 +
803 + /* Wait for the command to complete */
804 + if (bcmnand_wait_cmd(ctrl, (sector == 0) ? 10000 : 100))
805 + return -EIO;
806 +
807 + if (!bcmnand_reg_read(ctrl, NANDC_INT_STAT_SPARE_VALID))
808 + return -EIO;
809 +
810 + /* Set controller to Little Endian mode for copying */
811 + bcmnand_reg_awrite(ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 1);
812 +
813 + memcpy(chip->oob_poi + sector * spare_per_sec,
814 + ctrl_spare,
815 + spare_per_sec);
816 +
817 + /* Return to Big Endian mode for commands etc */
818 + bcmnand_reg_awrite(ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 0);
819 + }
820 +
821 + return 0;
822 +}
823 +
824 +/*
825 + * NAND Interface - write_oob
826 + */
827 +static int bcmnand_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
828 + int page)
829 +{
830 + struct bcmnand_ctrl *ctrl = chip->priv;
831 + unsigned int n = ctrl->chip_num;
832 + void __iomem *ctrl_spare;
833 + unsigned int spare_per_sec, sector, num_sec;
834 + u64 nand_addr;
835 + int to, status = 0;
836 +
837 + ctrl_spare = ctrl->core->io_addr + NANDC_SPARE_AREA_WRITE_OFF;
838 +
839 + /* Disable ECC generation for spare area writes */
840 + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_WR_ECC(n), 0);
841 +
842 + spare_per_sec = mtd->oobsize >> ctrl->sec_per_page_shift;
843 +
844 + /* Set the page address for the following commands */
845 + nand_addr = ((u64)page << chip->page_shift);
846 + bcmnand_reg_write(ctrl, NANDC_CMD_EXT_ADDR, nand_addr >> 32);
847 +
848 + /* Must allow partial programming to change spare area only */
849 + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_PGM_PARTIAL(n), 1);
850 +
851 + num_sec = 1 << ctrl->sec_per_page_shift;
852 + /* Loop all sectors in page */
853 + for (sector = 0; sector < num_sec; sector++) {
854 + unsigned int col;
855 +
856 + /* Spare area accessed by the data sector offset */
857 + col = (sector << ctrl->sector_size_shift);
858 +
859 + bcmnand_reg_write(ctrl, NANDC_CMD_ADDRESS, nand_addr + col);
860 +
861 + /* Set controller to Little Endian mode for copying */
862 + bcmnand_reg_awrite(ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 1);
863 +
864 + memcpy(ctrl_spare,
865 + chip->oob_poi + sector * spare_per_sec,
866 + spare_per_sec);
867 +
868 + /* Return to Big Endian mode for commands etc */
869 + bcmnand_reg_awrite(ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 0);
870 +
871 + /* Push spare bytes into internal buffer, last goes to flash */
872 + bcmnand_reg_write(ctrl, NANDC_CMD_START_OPCODE,
873 + NANDC_CMD_OPCODE_SPARE_PROG);
874 +
875 + if (sector == (num_sec - 1))
876 + to = 1 << 16;
877 + else
878 + to = 1 << 10;
879 +
880 + if (bcmnand_wait_cmd(ctrl, to))
881 + return -EIO;
882 + }
883 +
884 + /* Restore partial programming inhibition */
885 + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_PGM_PARTIAL(n), 0);
886 +
887 + status = bcmnand_waitfunc(mtd, chip);
888 + return status & NAND_STATUS_FAIL ? -EIO : 0;
889 +}
890 +
891 +/*
892 + * verify that a buffer is all erased
893 + */
894 +static bool bcmnand_buf_erased(const void *buf, unsigned int len)
895 +{
896 + unsigned int i;
897 + const u32 *p = buf;
898 +
899 + for (i = 0; i < (len >> 2); i++) {
900 + if (p[i] != 0xffffffff)
901 + return false;
902 + }
903 + return true;
904 +}
905 +
906 +/*
907 + * read a page, with or without ECC checking
908 + */
909 +static int bcmnand_read_page_do(struct mtd_info *mtd, struct nand_chip *chip,
910 + uint8_t *buf, int page, bool ecc)
911 +{
912 + struct bcmnand_ctrl *ctrl = chip->priv;
913 + unsigned int n = ctrl->chip_num;
914 + void __iomem *ctrl_cache;
915 + void __iomem *ctrl_spare;
916 + unsigned int data_bytes;
917 + unsigned int spare_per_sec;
918 + unsigned int sector, to = 1 << 16;
919 + u32 err_soft_reg, err_hard_reg;
920 + unsigned int hard_err_count = 0;
921 + int ret;
922 + u64 nand_addr;
923 +
924 + ctrl_cache = ctrl->core->io_addr + NANDC_CACHE_OFF;
925 + ctrl_spare = ctrl->core->io_addr + NANDC_SPARE_AREA_READ_OFF;
926 +
927 + /* Reset ECC error stats */
928 + err_hard_reg = bcmnand_reg_read(ctrl, NANDC_UNCORR_ERR_COUNT);
929 + err_soft_reg = bcmnand_reg_read(ctrl, NANDC_READ_CORR_BIT_COUNT);
930 +
931 + spare_per_sec = mtd->oobsize >> ctrl->sec_per_page_shift;
932 +
933 + /* Set the page address for the following commands */
934 + nand_addr = ((u64)page << chip->page_shift);
935 + bcmnand_reg_write(ctrl, NANDC_CMD_EXT_ADDR, nand_addr >> 32);
936 +
937 + /* Enable ECC validation for ecc page reads */
938 + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_RD_ECC(n), ecc);
939 +
940 + /* Loop all sectors in page */
941 + for (sector = 0; sector < (1 << ctrl->sec_per_page_shift); sector++) {
942 + data_bytes = 0;
943 +
944 + /* Copy partial sectors sized by cache reg */
945 + while (data_bytes < (1<<ctrl->sector_size_shift)) {
946 + unsigned int col;
947 +
948 + col = data_bytes + (sector << ctrl->sector_size_shift);
949 +
950 + bcmnand_reg_write(ctrl, NANDC_CMD_ADDRESS,
951 + nand_addr + col);
952 +
953 + /* Issue command to read partial page */
954 + bcmnand_reg_write(ctrl, NANDC_CMD_START_OPCODE,
955 + NANDC_CMD_OPCODE_PAGE_READ);
956 +
957 + /* Wait for the command to complete */
958 + ret = bcmnand_wait_cmd(ctrl, to);
959 + if (ret < 0)
960 + return ret;
961 +
962 + /* Set controller to Little Endian mode for copying */
963 + bcmnand_reg_awrite(ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 1);
964 +
965 + if (data_bytes == 0) {
966 + memcpy(chip->oob_poi + sector * spare_per_sec,
967 + ctrl_spare, spare_per_sec);
968 + }
969 +
970 + memcpy(buf + col, ctrl_cache, NANDC_CACHE_SIZE);
971 + data_bytes += NANDC_CACHE_SIZE;
972 +
973 + /* Return to Big Endian mode for commands etc */
974 + bcmnand_reg_awrite(ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 0);
975 +
976 + /* Next iterations should go fast */
977 + to = 1 << 10;
978 +
979 + /* capture hard errors for each partial */
980 + if (err_hard_reg != bcmnand_reg_read(ctrl, NANDC_UNCORR_ERR_COUNT)) {
981 + int era = bcmnand_reg_read(ctrl, NANDC_INT_STAT_ERASED);
982 +
983 + if (!era &&
984 + !bcmnand_buf_erased(buf + col, NANDC_CACHE_SIZE))
985 + hard_err_count++;
986 +
987 + err_hard_reg = bcmnand_reg_read(ctrl,
988 + NANDC_UNCORR_ERR_COUNT);
989 + }
990 + }
991 + }
992 +
993 + if (!ecc)
994 + return 0;
995 +
996 + /* Report hard ECC errors */
997 + if (hard_err_count)
998 + mtd->ecc_stats.failed++;
999 +
1000 + /* Get ECC soft error stats */
1001 + mtd->ecc_stats.corrected += err_soft_reg -
1002 + bcmnand_reg_read(ctrl, NANDC_READ_CORR_BIT_COUNT);
1003 +
1004 + return 0;
1005 +}
1006 +
1007 +/*
1008 + * NAND Interface - read_page_ecc
1009 + */
1010 +static int bcmnand_read_page_ecc(struct mtd_info *mtd, struct nand_chip *chip,
1011 + uint8_t *buf, int oob_required, int page)
1012 +{
1013 + return bcmnand_read_page_do(mtd, chip, buf, page, true);
1014 +}
1015 +
1016 +/*
1017 + * NAND Interface - read_page_raw
1018 + */
1019 +static int bcmnand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1020 + uint8_t *buf, int oob_required, int page)
1021 +{
1022 + return bcmnand_read_page_do(mtd, chip, buf, page, true);
1023 +}
1024 +
1025 +/*
1026 + * do page write, with or without ECC generation enabled
1027 + */
1028 +static int bcmnand_write_page_do(struct mtd_info *mtd, struct nand_chip *chip,
1029 + const uint8_t *buf, bool ecc)
1030 +{
1031 + struct bcmnand_ctrl *ctrl = chip->priv;
1032 + unsigned int n = ctrl->chip_num;
1033 + void __iomem *ctrl_cache;
1034 + void __iomem *ctrl_spare;
1035 + unsigned int spare_per_sec, sector, num_sec;
1036 + unsigned int data_bytes, spare_bytes;
1037 + int i, to;
1038 + uint8_t *tmp_poi;
1039 + u32 nand_addr;
1040 +
1041 + ctrl_cache = ctrl->core->io_addr + NANDC_CACHE_OFF;
1042 + ctrl_spare = ctrl->core->io_addr + NANDC_SPARE_AREA_WRITE_OFF;
1043 +
1044 + /* Get start-of-page address */
1045 + nand_addr = bcmnand_reg_read(ctrl, NANDC_CMD_ADDRESS);
1046 +
1047 + tmp_poi = kmalloc(mtd->oobsize, GFP_KERNEL);
1048 + if (!tmp_poi)
1049 + return -ENOMEM;
1050 +
1051 + /* Retreive pre-existing OOB values */
1052 + memcpy(tmp_poi, chip->oob_poi, mtd->oobsize);
1053 + ctrl->cmd_ret = bcmnand_read_oob(mtd, chip,
1054 + nand_addr >> chip->page_shift);
1055 + if (ctrl->cmd_ret < 0) {
1056 + kfree(tmp_poi);
1057 + return ctrl->cmd_ret;
1058 + }
1059 +
1060 + /* Apply new OOB data bytes just like they would end up on the chip */
1061 + for (i = 0; i < mtd->oobsize; i++)
1062 + chip->oob_poi[i] &= tmp_poi[i];
1063 + kfree(tmp_poi);
1064 +
1065 + spare_per_sec = mtd->oobsize >> ctrl->sec_per_page_shift;
1066 +
1067 + /* Enable ECC generation for ecc page write, if requested */
1068 + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_WR_ECC(n), ecc);
1069 +
1070 + spare_bytes = 0;
1071 + num_sec = 1 << ctrl->sec_per_page_shift;
1072 +
1073 + /* Loop all sectors in page */
1074 + for (sector = 0; sector < num_sec; sector++) {
1075 + data_bytes = 0;
1076 +
1077 + /* Copy partial sectors sized by cache reg */
1078 + while (data_bytes < (1<<ctrl->sector_size_shift)) {
1079 + unsigned int col;
1080 +
1081 + col = data_bytes +
1082 + (sector << ctrl->sector_size_shift);
1083 +
1084 + /* Set address of 512-byte sub-page */
1085 + bcmnand_reg_write(ctrl, NANDC_CMD_ADDRESS,
1086 + nand_addr + col);
1087 +
1088 + /* Set controller to Little Endian mode for copying */
1089 + bcmnand_reg_awrite(ctrl, NANDC_IDM_APB_LITTLE_ENDIAN,
1090 + 1);
1091 +
1092 + /* Set spare area is written at each sector start */
1093 + if (data_bytes == 0) {
1094 + memcpy(ctrl_spare,
1095 + chip->oob_poi + spare_bytes,
1096 + spare_per_sec);
1097 + spare_bytes += spare_per_sec;
1098 + }
1099 +
1100 + /* Copy sub-page data */
1101 + memcpy(ctrl_cache, buf + col, NANDC_CACHE_SIZE);
1102 + data_bytes += NANDC_CACHE_SIZE;
1103 +
1104 + /* Return to Big Endian mode for commands etc */
1105 + bcmnand_reg_awrite(ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 0);
1106 +
1107 + /* Push data into internal cache */
1108 + bcmnand_reg_write(ctrl, NANDC_CMD_START_OPCODE,
1109 + NANDC_CMD_OPCODE_PAGE_PROG);
1110 +
1111 + /* Wait for the command to complete */
1112 + if (sector == (num_sec - 1))
1113 + to = 1 << 16;
1114 + else
1115 + to = 1 << 10;
1116 + ctrl->cmd_ret = bcmnand_wait_cmd(ctrl, to);
1117 + if (ctrl->cmd_ret < 0)
1118 + return ctrl->cmd_ret;
1119 + }
1120 + }
1121 + return 0;
1122 +}
1123 +
1124 +/*
1125 + * NAND Interface = write_page_ecc
1126 + */
1127 +static int bcmnand_write_page_ecc(struct mtd_info *mtd, struct nand_chip *chip,
1128 + const uint8_t *buf, int oob_required)
1129 +{
1130 + return bcmnand_write_page_do(mtd, chip, buf, true);
1131 +}
1132 +
1133 +/*
1134 + * NAND Interface = write_page_raw
1135 + */
1136 +static int bcmnand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1137 + const uint8_t *buf, int oob_required)
1138 +{
1139 + return bcmnand_write_page_do(mtd, chip, buf, false);
1140 +}
1141 +
1142 +/*
1143 + * MTD Interface - read_byte
1144 + *
1145 + * This function emulates simple controllers behavior
1146 + * for just a few relevant commands
1147 + */
1148 +static uint8_t bcmnand_read_byte(struct mtd_info *mtd)
1149 +{
1150 + struct nand_chip *nand = mtd->priv;
1151 + struct bcmnand_ctrl *ctrl = nand->priv;
1152 + uint8_t b = ~0;
1153 +
1154 + switch (ctrl->last_cmd) {
1155 + case NAND_CMD_READID:
1156 + if (ctrl->id_byte_index < 8) {
1157 + b = bcmnand_reg_read(ctrl, NANDC_DEVID_BYTE(
1158 + ctrl->id_byte_index));
1159 + ctrl->id_byte_index++;
1160 + }
1161 + break;
1162 + case NAND_CMD_READOOB:
1163 + if (ctrl->oob_index < mtd->oobsize)
1164 + b = nand->oob_poi[ctrl->oob_index++];
1165 + break;
1166 + case NAND_CMD_STATUS:
1167 + b = bcmnand_reg_read(ctrl, NANDC_INT_STAT_FLASH_STATUS);
1168 + break;
1169 + default:
1170 + pr_err("%s: got unkown command: 0x%x in %s\n", DRV_NAME,
1171 + ctrl->last_cmd, __func__);
1172 + }
1173 + return b;
1174 +}
1175 +
1176 +/*
1177 + * MTD Interface - read_word
1178 + *
1179 + * Can not be tested without x16 chip, but the SoC does not support x16 i/f.
1180 + */
1181 +static u16 bcmnand_read_word(struct mtd_info *mtd)
1182 +{
1183 + u16 w = ~0;
1184 +
1185 + w = bcmnand_read_byte(mtd);
1186 + barrier();
1187 + w |= bcmnand_read_byte(mtd) << 8;
1188 +
1189 + return w;
1190 +}
1191 +
1192 +/*
1193 + * MTD Interface - select a chip from an array
1194 + */
1195 +static void bcmnand_select_chip(struct mtd_info *mtd, int chip)
1196 +{
1197 + struct nand_chip *nand = mtd->priv;
1198 + struct bcmnand_ctrl *ctrl = nand->priv;
1199 +
1200 + ctrl->chip_num = chip;
1201 + bcmnand_reg_write(ctrl, NANDC_CMD_CS_SEL, chip);
1202 +}
1203 +
1204 +/*
1205 + * NAND Interface - emulate low-level NAND commands
1206 + *
1207 + * Only a few low-level commands are really needed by generic NAND,
1208 + * and they do not call for CMD_LL operations the controller can support.
1209 + */
1210 +static void bcmnand_cmdfunc(struct mtd_info *mtd, unsigned int command,
1211 + int column, int page_addr)
1212 +{
1213 + struct nand_chip *nand = mtd->priv;
1214 + struct bcmnand_ctrl *ctrl = nand->priv;
1215 + u64 nand_addr;
1216 + unsigned int to = 1;
1217 +
1218 + ctrl->last_cmd = command;
1219 +
1220 + /* Set address for some commands */
1221 + switch (command) {
1222 + case NAND_CMD_ERASE1:
1223 + column = 0;
1224 + /*FALLTHROUGH*/
1225 + case NAND_CMD_SEQIN:
1226 + case NAND_CMD_READ0:
1227 + case NAND_CMD_READ1:
1228 + WARN_ON(column >= mtd->writesize);
1229 + nand_addr = (u64) column |
1230 + ((u64)page_addr << nand->page_shift);
1231 + bcmnand_reg_write(ctrl, NANDC_CMD_EXT_ADDR, nand_addr >> 32);
1232 + bcmnand_reg_write(ctrl, NANDC_CMD_ADDRESS, nand_addr);
1233 + break;
1234 + case NAND_CMD_ERASE2:
1235 + case NAND_CMD_RESET:
1236 + case NAND_CMD_READID:
1237 + case NAND_CMD_READOOB:
1238 + case NAND_CMD_PAGEPROG:
1239 + default:
1240 + /* Do nothing, address not used */
1241 + break;
1242 + }
1243 +
1244 + /* Issue appropriate command to controller */
1245 + switch (command) {
1246 + case NAND_CMD_SEQIN:
1247 + /* Only need to load command address, done */
1248 + return;
1249 +
1250 + case NAND_CMD_RESET:
1251 + bcmnand_reg_write(ctrl, NANDC_CMD_START_OPCODE,
1252 + NANDC_CMD_OPCODE_FLASH_RESET);
1253 + to = 1 << 8;
1254 + break;
1255 +
1256 + case NAND_CMD_READID:
1257 + bcmnand_reg_write(ctrl, NANDC_CMD_START_OPCODE,
1258 + NANDC_CMD_OPCODE_DEVID_READ);
1259 + ctrl->id_byte_index = 0;
1260 + to = 1 << 8;
1261 + break;
1262 +
1263 + case NAND_CMD_READ0:
1264 + case NAND_CMD_READ1:
1265 + bcmnand_reg_write(ctrl, NANDC_CMD_START_OPCODE,
1266 + NANDC_CMD_OPCODE_PAGE_READ);
1267 + to = 1 << 15;
1268 + break;
1269 + case NAND_CMD_STATUS:
1270 + bcmnand_reg_write(ctrl, NANDC_CMD_START_OPCODE,
1271 + NANDC_CMD_OPCODE_STATUS_READ);
1272 + to = 1 << 8;
1273 + break;
1274 + case NAND_CMD_ERASE1:
1275 + return;
1276 +
1277 + case NAND_CMD_ERASE2:
1278 + bcmnand_reg_write(ctrl, NANDC_CMD_START_OPCODE,
1279 + NANDC_CMD_OPCODE_BLOCK_ERASE);
1280 + to = 1 << 18;
1281 + break;
1282 +
1283 + case NAND_CMD_PAGEPROG:
1284 + /* Cmd already set from write_page */
1285 + return;
1286 +
1287 + case NAND_CMD_READOOB:
1288 + /* Emulate simple interface */
1289 + bcmnand_read_oob(mtd, nand, page_addr);
1290 + ctrl->oob_index = 0;
1291 + return;
1292 +
1293 + default:
1294 + pr_err("%s: got unkown command: 0x%x in %s\n", DRV_NAME,
1295 + ctrl->last_cmd, __func__);
1296 + }
1297 +
1298 + /* Wait for command to complete */
1299 + ctrl->cmd_ret = bcmnand_wait_cmd(ctrl, to);
1300 +
1301 +}
1302 +
1303 +static int bcmnand_scan(struct mtd_info *mtd)
1304 +{
1305 + struct nand_chip *nand = mtd->priv;
1306 + struct bcmnand_ctrl *ctrl = nand->priv;
1307 + bool sector_1k = false;
1308 + unsigned int chip_num = 0;
1309 + int ecc_level = 0;
1310 + int ret;
1311 +
1312 + ret = nand_scan_ident(mtd, NANDC_MAX_CHIPS, NULL);
1313 + if (ret)
1314 + return ret;
1315 +
1316 + /* Get configuration from first chip */
1317 + sector_1k = bcmnand_reg_read(ctrl, NANDC_ACC_CTRL_SECTOR_1K(0));
1318 + ecc_level = bcmnand_reg_read(ctrl, NANDC_ACC_CTRL_ECC_LEVEL(0));
1319 + mtd->writesize_shift = nand->page_shift;
1320 +
1321 + ctrl->ecc_level = ecc_level;
1322 + ctrl->sector_size_shift = sector_1k ? 10 : 9;
1323 +
1324 + /* Configure spare area, tweak as needed */
1325 + do {
1326 + ctrl->sec_per_page_shift =
1327 + mtd->writesize_shift - ctrl->sector_size_shift;
1328 +
1329 + /* will return -EINVAL if OOB space exhausted */
1330 + ret = bcmnand_hw_ecc_layout(ctrl);
1331 +
1332 + /* First try to bump sector size to 1k, then decrease level */
1333 + if (ret && nand->page_shift > 9 && ctrl->sector_size_shift < 10)
1334 + ctrl->sector_size_shift = 10;
1335 + else if (ret)
1336 + ctrl->ecc_level--;
1337 +
1338 + } while (ret && ctrl->ecc_level > 0);
1339 +
1340 + if (WARN_ON(ctrl->ecc_level == 0))
1341 + return -ENOENT;
1342 +
1343 + if ((ctrl->sector_size_shift > 9) != (sector_1k == 1)) {
1344 + pr_info("%s: sector size adjusted to 1k\n", DRV_NAME);
1345 + sector_1k = 1;
1346 + }
1347 +
1348 + if (ecc_level != ctrl->ecc_level) {
1349 + pr_info("%s: ECC level adjusted from %u to %u\n",
1350 + DRV_NAME, ecc_level, ctrl->ecc_level);
1351 + ecc_level = ctrl->ecc_level;
1352 + }
1353 +
1354 + /* handle the hardware chip config registers */
1355 + for (chip_num = 0; chip_num < nand->numchips; chip_num++) {
1356 + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_SECTOR_1K(chip_num),
1357 + sector_1k);
1358 + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_ECC_LEVEL(chip_num),
1359 + ecc_level);
1360 +
1361 + /* Large pages: no partial page programming */
1362 + if (mtd->writesize > 512) {
1363 + bcmnand_reg_write(ctrl,
1364 + NANDC_ACC_CTRL_PGM_RDIN(chip_num), 0);
1365 + bcmnand_reg_write(ctrl,
1366 + NANDC_ACC_CTRL_PGM_PARTIAL(chip_num), 0);
1367 + }
1368 +
1369 + /* Do not raise ECC error when reading erased pages */
1370 + /* This bit has only partial effect, driver needs to help */
1371 + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_ERA_ECC_ERR(chip_num),
1372 + 0);
1373 +
1374 + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_PG_HIT(chip_num), 0);
1375 + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_PREFETCH(chip_num), 0);
1376 + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_CACHE_MODE(chip_num), 0);
1377 + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_CACHE_LASTPG(chip_num),
1378 + 0);
1379 +
1380 + /* TBD: consolidate or at least verify the s/w and h/w geometries agree */
1381 + }
1382 +
1383 + /* Allow writing on device */
1384 + if (!(nand->options & NAND_ROM))
1385 + bcmnand_reg_write(ctrl, NANDC_CS_NAND_WP, 0);
1386 +
1387 + pr_debug("%s: layout.oobavail=%d\n", DRV_NAME,
1388 + nand->ecc.layout->oobavail);
1389 +
1390 + ret = nand_scan_tail(mtd);
1391 +
1392 + if (nand->badblockbits == 0)
1393 + nand->badblockbits = 8;
1394 + if (WARN_ON((1 << nand->page_shift) != mtd->writesize))
1395 + return -EIO;
1396 +
1397 + /* Spit out some key chip parameters as detected by nand_base */
1398 + pr_debug("%s: erasesize=%d writesize=%d oobsize=%d page_shift=%d badblockpos=%d badblockbits=%d\n",
1399 + DRV_NAME, mtd->erasesize, mtd->writesize, mtd->oobsize,
1400 + nand->page_shift, nand->badblockpos, nand->badblockbits);
1401 +
1402 + return ret;
1403 +}
1404 +
1405 +/*
1406 + * main intiailization function
1407 + */
1408 +static int bcmnand_ctrl_init(struct bcmnand_ctrl *ctrl)
1409 +{
1410 + unsigned int chip;
1411 + struct nand_chip *nand;
1412 + struct mtd_info *mtd;
1413 + unsigned int n = 0;
1414 + int ret;
1415 +
1416 + /* Software variables init */
1417 + nand = &ctrl->nand;
1418 + mtd = &ctrl->mtd;
1419 +
1420 + init_completion(&ctrl->op_completion);
1421 +
1422 + mtd->priv = nand;
1423 + mtd->owner = THIS_MODULE;
1424 + mtd->name = DRV_NAME;
1425 +
1426 + nand->priv = ctrl;
1427 +
1428 + nand->chip_delay = 5; /* not used */
1429 + nand->IO_ADDR_R = nand->IO_ADDR_W = (void *)~0L;
1430 +
1431 + if (bcmnand_reg_read(ctrl, NANDC_CONFIG_CHIP_WIDTH(n)))
1432 + nand->options |= NAND_BUSWIDTH_16;
1433 + nand->options |= NAND_SKIP_BBTSCAN; /* Dont need BBTs */
1434 +
1435 + nand->options |= NAND_NO_SUBPAGE_WRITE; /* Subpages unsupported */
1436 +
1437 + nand->dev_ready = bcmnand_dev_ready;
1438 + nand->read_byte = bcmnand_read_byte;
1439 + nand->read_word = bcmnand_read_word;
1440 + nand->select_chip = bcmnand_select_chip;
1441 + nand->cmdfunc = bcmnand_cmdfunc;
1442 + nand->waitfunc = bcmnand_waitfunc;
1443 +
1444 + nand->ecc.mode = NAND_ECC_HW;
1445 + nand->ecc.read_page_raw = bcmnand_read_page_raw;
1446 + nand->ecc.write_page_raw = bcmnand_write_page_raw;
1447 + nand->ecc.read_page = bcmnand_read_page_ecc;
1448 + nand->ecc.write_page = bcmnand_write_page_ecc;
1449 + nand->ecc.read_oob = bcmnand_read_oob;
1450 + nand->ecc.write_oob = bcmnand_write_oob;
1451 +
1452 + /* Set AUTO_CNFIG bit - try to auto-detect chips */
1453 + bcmnand_reg_write(ctrl, NANDC_CS_AUTO_CONFIG, 1);
1454 +
1455 + usleep_range(1000, 1500);
1456 +
1457 + /* Print out current chip config */
1458 + for (chip = 0; chip < NANDC_MAX_CHIPS; chip++) {
1459 + pr_debug("%s: chip[%d]: size=%#x block=%#x page=%#x ecc_level=%#x\n",
1460 + DRV_NAME, chip,
1461 + bcmnand_reg_read(ctrl, NANDC_CONFIG_CHIP_SIZE(chip)),
1462 + bcmnand_reg_read(ctrl, NANDC_CONFIG_BLK_SIZE(chip)),
1463 + bcmnand_reg_read(ctrl, NANDC_CONFIG_PAGE_SIZE(chip)),
1464 + bcmnand_reg_read(ctrl, NANDC_ACC_CTRL_ECC_LEVEL(chip)));
1465 + }
1466 +
1467 + pr_debug("%s: Nand controller is reads=%d\n", DRV_NAME,
1468 + bcmnand_reg_aread(ctrl, NANDC_IDM_IO_CTRL_RDY));
1469 +
1470 + ret = bcmnand_scan(mtd);
1471 + if (ret) {
1472 + pr_err("%s: scanning the nand flash chip failed with %i\n",
1473 + DRV_NAME, ret);
1474 + return ret;
1475 + }
1476 +
1477 + return 0;
1478 +}
1479 +
1480 +static int __init bcmnand_idm_init(struct bcmnand_ctrl *ctrl)
1481 +{
1482 + int irq_off;
1483 + unsigned int retries = 0x1000;
1484 +
1485 + if (bcmnand_reg_aread(ctrl, NANDC_IDM_RESET))
1486 + pr_err("%s: stuck in reset\n", DRV_NAME);
1487 +
1488 + bcmnand_reg_awrite(ctrl, NANDC_IDM_RESET, 1);
1489 + if (!bcmnand_reg_aread(ctrl, NANDC_IDM_RESET)) {
1490 + pr_err("%s: reset of failed\n", DRV_NAME);
1491 + return -EIO;
1492 + }
1493 +
1494 + while (bcmnand_reg_aread(ctrl, NANDC_IDM_RESET)) {
1495 + bcmnand_reg_awrite(ctrl, NANDC_IDM_RESET, 0);
1496 + cpu_relax();
1497 + usleep_range(100, 150);
1498 + if (!(retries--)) {
1499 + pr_err("%s: did not came back from reset\n",
1500 + DRV_NAME);
1501 + return -ETIMEDOUT;
1502 + }
1503 + }
1504 +
1505 + bcmnand_reg_awrite(ctrl, NANDC_IDM_CLOCK_EN, 1);
1506 + bcmnand_reg_awrite(ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 0);
1507 + udelay(10);
1508 +
1509 + pr_info("%s: NAND Controller rev %d.%d\n", DRV_NAME,
1510 + bcmnand_reg_read(ctrl, NANDC_REV_MAJOR),
1511 + bcmnand_reg_read(ctrl, NANDC_REV_MINOR));
1512 +
1513 + usleep_range(250, 350);
1514 +
1515 + /* Disable all IRQs */
1516 + for (irq_off = 0; irq_off < NANDC_IRQ_NUM; irq_off++)
1517 + bcmnand_reg_awrite(ctrl, NANDC_IDM_IRQ_N_EN(irq_off), 0);
1518 +
1519 + return 0;
1520 +}
1521 +
1522 +static const char * const part_probes[] = { "bcm47xxpart", "cmdlinepart", NULL };
1523 +
1524 +/*
1525 + * Top-level init function
1526 + */
1527 +static int bcmnand_probe(struct bcma_device *core)
1528 +{
1529 + struct device *dev = &core->dev;
1530 + struct device_node *np = dev->of_node;
1531 + struct bcmnand_ctrl *ctrl;
1532 + int res, i, irq;
1533 +
1534 + if (!np) {
1535 + pr_err("%s: no device tree node found\n", DRV_NAME);
1536 + return -ENOENT;
1537 + }
1538 +
1539 + ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
1540 + if (!ctrl)
1541 + return -ENOMEM;
1542 +
1543 + bcma_set_drvdata(core, ctrl);
1544 +
1545 + ctrl->mtd.dev.parent = &core->dev;
1546 + ctrl->core = core;
1547 +
1548 + /* Acquire all interrupt lines */
1549 + for (i = 0; i < of_irq_count(np); i++) {
1550 + irq = irq_of_parse_and_map(np, i);
1551 + res = devm_request_irq(dev, irq, bcmnand_isr, 0, DRV_NAME, ctrl);
1552 + if (res < 0) {
1553 + pr_err("%s: problem requesting irq: %i (idx: %i)\n",
1554 + DRV_NAME, irq, i);
1555 + return res;
1556 + }
1557 + }
1558 +
1559 + res = bcmnand_idm_init(ctrl);
1560 + if (res)
1561 + return res;
1562 +
1563 + res = bcmnand_ctrl_init(ctrl);
1564 + if (res)
1565 + return res;
1566 +
1567 + res = mtd_device_parse_register(&ctrl->mtd, part_probes, NULL, NULL, 0);
1568 + if (res) {
1569 + pr_err("%s: Failed to register MTD device: %d\n", DRV_NAME, res);
1570 + return res;
1571 + }
1572 + return 0;
1573 +}
1574 +
1575 +static void bcmnand_remove(struct bcma_device *core)
1576 +{
1577 + struct bcmnand_ctrl *ctrl = bcma_get_drvdata(core);
1578 +
1579 + mtd_device_unregister(&ctrl->mtd);
1580 +}
1581 +
1582 +static const struct bcma_device_id bcmnand_bcma_tbl[] = {
1583 + BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_NAND, BCMA_ANY_REV, BCMA_ANY_CLASS),
1584 + BCMA_CORETABLE_END
1585 +};
1586 +MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl);
1587 +
1588 +static struct bcma_driver bcmnand_bcma_driver = {
1589 + .name = KBUILD_MODNAME,
1590 + .id_table = bcmnand_bcma_tbl,
1591 + .probe = bcmnand_probe,
1592 + .remove = bcmnand_remove,
1593 +};
1594 +
1595 +static int __init bcmnand_init(void)
1596 +{
1597 + int err;
1598 +
1599 + err = bcma_driver_register(&bcmnand_bcma_driver);
1600 + if (err)
1601 + return err;
1602 + pr_info("%s: Broadcom NAND Controller driver loaded\n", DRV_NAME);
1603 +
1604 + return 0;
1605 +}
1606 +
1607 +static void __exit bcmnand_exit(void)
1608 +{
1609 + bcma_driver_unregister(&bcmnand_bcma_driver);
1610 +}
1611 +
1612 +module_init(bcmnand_init)
1613 +module_exit(bcmnand_exit)
1614 +
1615 +MODULE_LICENSE("GPL");
1616 +MODULE_DESCRIPTION(DRV_DESC);