bcm53xx: update the NAND driver
[openwrt/svn-archive/archive.git] / target / linux / bcm53xx / patches-3.14 / 420-mtd-bcm5301x_nand.patch
1 --- a/drivers/mtd/nand/Kconfig
2 +++ b/drivers/mtd/nand/Kconfig
3 @@ -510,4 +510,10 @@ config MTD_NAND_XWAY
4 Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached
5 to the External Bus Unit (EBU).
6
7 +config MTD_NAND_BCM
8 + tristate "Support for NAND on some Broadcom SoC"
9 + help
10 + This driver is currently used for the NAND flash controller on the
11 + Broadcom BCM5301X (NorthStar) SoCs.
12 +
13 endif # MTD_NAND
14 --- a/drivers/mtd/nand/Makefile
15 +++ b/drivers/mtd/nand/Makefile
16 @@ -49,5 +49,6 @@ obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740
17 obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/
18 obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o
19 obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/
20 +obj-$(CONFIG_MTD_NAND_BCM) += bcm_nand.o
21
22 nand-objs := nand_base.o nand_bbt.o
23 --- /dev/null
24 +++ b/drivers/mtd/nand/bcm_nand.c
25 @@ -0,0 +1,1580 @@
26 +/*
27 + * Nortstar NAND controller driver
28 + *
29 + * (c) Broadcom, Inc. 2012 All Rights Reserved.
30 + * Copyright 2014 Hauke Mehrtens <hauke@hauke-m.de>
31 + *
32 + * Licensed under the GNU/GPL. See COPYING for details.
33 + *
34 + * This module interfaces the NAND controller and hardware ECC capabilities
35 + * tp the generic NAND chip support in the NAND library.
36 + *
37 + * Notes:
38 + * This driver depends on generic NAND driver, but works at the
39 + * page level for operations.
40 + *
41 + * When a page is written, the ECC calculated also protects the OOB
42 + * bytes not taken by ECC, and so the OOB must be combined with any
43 + * OOB data that preceded the page-write operation in order for the
44 + * ECC to be calculated correctly.
45 + * Also, when the page is erased, but OOB data is not, HW ECC will
46 + * indicate an error, because it checks OOB too, which calls for some
47 + * help from the software in this driver.
48 + *
49 + * TBD:
50 + * Block locking/unlocking support, OTP support
51 + */
52 +
53 +
54 +#include <linux/kernel.h>
55 +#include <linux/module.h>
56 +#include <linux/io.h>
57 +#include <linux/ioport.h>
58 +#include <linux/interrupt.h>
59 +#include <linux/delay.h>
60 +#include <linux/err.h>
61 +#include <linux/slab.h>
62 +#include <linux/bcma/bcma.h>
63 +#include <linux/of_irq.h>
64 +
65 +#include <linux/mtd/mtd.h>
66 +#include <linux/mtd/nand.h>
67 +
68 +#define NANDC_MAX_CHIPS 2 /* Only 2 CSn supported in NorthStar */
69 +
70 +/*
71 + * Driver private control structure
72 + */
73 +struct bcmnand_ctrl {
74 + struct mtd_info mtd;
75 + struct nand_chip nand;
76 + struct bcma_device *core;
77 +
78 + struct completion op_completion;
79 +
80 + struct nand_ecclayout ecclayout;
81 + int cmd_ret; /* saved error code */
82 + unsigned char oob_index;
83 + unsigned char id_byte_index;
84 + unsigned char chip_num;
85 + unsigned char last_cmd;
86 + unsigned char ecc_level;
87 + unsigned char sector_size_shift;
88 + unsigned char sec_per_page_shift;
89 +};
90 +
91 +
92 +/*
93 + * IRQ numbers - offset from first irq in nandc_irq resource
94 + */
95 +#define NANDC_IRQ_RD_MISS 0
96 +#define NANDC_IRQ_ERASE_COMPLETE 1
97 +#define NANDC_IRQ_COPYBACK_COMPLETE 2
98 +#define NANDC_IRQ_PROGRAM_COMPLETE 3
99 +#define NANDC_IRQ_CONTROLLER_RDY 4
100 +#define NANDC_IRQ_RDBSY_RDY 5
101 +#define NANDC_IRQ_ECC_UNCORRECTABLE 6
102 +#define NANDC_IRQ_ECC_CORRECTABLE 7
103 +#define NANDC_IRQ_NUM 8
104 +
105 +struct bcmnand_reg_field {
106 + unsigned int reg;
107 + unsigned int pos;
108 + unsigned int width;
109 +};
110 +
111 +/*
112 + * REGISTERS
113 + *
114 + * Individual bit-fields aof registers are specificed here
115 + * for clarity, and the rest of the code will access each field
116 + * as if it was its own register.
117 + *
118 + * Following registers are off <reg_base>:
119 + */
120 +#define REG_BIT_FIELD(r, p, w) ((struct bcmnand_reg_field){(r), (p), (w)})
121 +
122 +#define NANDC_8KB_PAGE_SUPPORT REG_BIT_FIELD(0x0, 31, 1)
123 +#define NANDC_REV_MAJOR REG_BIT_FIELD(0x0, 8, 8)
124 +#define NANDC_REV_MINOR REG_BIT_FIELD(0x0, 0, 8)
125 +
126 +#define NANDC_CMD_START_OPCODE REG_BIT_FIELD(0x4, 24, 5)
127 +
128 +#define NANDC_CMD_CS_SEL REG_BIT_FIELD(0x8, 16, 3)
129 +#define NANDC_CMD_EXT_ADDR REG_BIT_FIELD(0x8, 0, 16)
130 +
131 +#define NANDC_CMD_ADDRESS REG_BIT_FIELD(0xc, 0, 32)
132 +#define NANDC_CMD_END_ADDRESS REG_BIT_FIELD(0x10, 0, 32)
133 +
134 +#define NANDC_INT_STATUS REG_BIT_FIELD(0x14, 0, 32)
135 +#define NANDC_INT_STAT_CTLR_RDY REG_BIT_FIELD(0x14, 31, 1)
136 +#define NANDC_INT_STAT_FLASH_RDY REG_BIT_FIELD(0x14, 30, 1)
137 +#define NANDC_INT_STAT_CACHE_VALID REG_BIT_FIELD(0x14, 29, 1)
138 +#define NANDC_INT_STAT_SPARE_VALID REG_BIT_FIELD(0x14, 28, 1)
139 +#define NANDC_INT_STAT_ERASED REG_BIT_FIELD(0x14, 27, 1)
140 +#define NANDC_INT_STAT_PLANE_RDY REG_BIT_FIELD(0x14, 26, 1)
141 +#define NANDC_INT_STAT_FLASH_STATUS REG_BIT_FIELD(0x14, 0, 8)
142 +
143 +#define NANDC_CS_LOCK REG_BIT_FIELD(0x18, 31, 1)
144 +#define NANDC_CS_AUTO_CONFIG REG_BIT_FIELD(0x18, 30, 1)
145 +#define NANDC_CS_NAND_WP REG_BIT_FIELD(0x18, 29, 1)
146 +#define NANDC_CS_BLK0_WP REG_BIT_FIELD(0x18, 28, 1)
147 +#define NANDC_CS_SW_USING_CS(n) REG_BIT_FIELD(0x18, 8+(n), 1)
148 +#define NANDC_CS_MAP_SEL_CS(n) REG_BIT_FIELD(0x18, 0+(n), 1)
149 +
150 +#define NANDC_XOR_ADDR_BLK0_ONLY REG_BIT_FIELD(0x1c, 31, 1)
151 +#define NANDC_XOR_ADDR_CS(n) REG_BIT_FIELD(0x1c, 0+(n), 1)
152 +
153 +#define NANDC_LL_OP_RET_IDLE REG_BIT_FIELD(0x20, 31, 1)
154 +#define NANDC_LL_OP_CLE REG_BIT_FIELD(0x20, 19, 1)
155 +#define NANDC_LL_OP_ALE REG_BIT_FIELD(0x20, 18, 1)
156 +#define NANDC_LL_OP_WE REG_BIT_FIELD(0x20, 17, 1)
157 +#define NANDC_LL_OP_RE REG_BIT_FIELD(0x20, 16, 1)
158 +#define NANDC_LL_OP_DATA REG_BIT_FIELD(0x20, 0, 16)
159 +
160 +#define NANDC_MPLANE_ADDR_EXT REG_BIT_FIELD(0x24, 0, 16)
161 +#define NANDC_MPLANE_ADDR REG_BIT_FIELD(0x28, 0, 32)
162 +
163 +#define NANDC_ACC_CTRL_CS(n) REG_BIT_FIELD(0x50+((n)<<4), 0, 32)
164 +#define NANDC_ACC_CTRL_RD_ECC(n) REG_BIT_FIELD(0x50+((n)<<4), 31, 1)
165 +#define NANDC_ACC_CTRL_WR_ECC(n) REG_BIT_FIELD(0x50+((n)<<4), 30, 1)
166 +#define NANDC_ACC_CTRL_CE_CARE(n) REG_BIT_FIELD(0x50+((n)<<4), 29, 1)
167 +#define NANDC_ACC_CTRL_PGM_RDIN(n) REG_BIT_FIELD(0x50+((n)<<4), 28, 1)
168 +#define NANDC_ACC_CTRL_ERA_ECC_ERR(n) REG_BIT_FIELD(0x50+((n)<<4), 27, 1)
169 +#define NANDC_ACC_CTRL_PGM_PARTIAL(n) REG_BIT_FIELD(0x50+((n)<<4), 26, 1)
170 +#define NANDC_ACC_CTRL_WR_PREEMPT(n) REG_BIT_FIELD(0x50+((n)<<4), 25, 1)
171 +#define NANDC_ACC_CTRL_PG_HIT(n) REG_BIT_FIELD(0x50+((n)<<4), 24, 1)
172 +#define NANDC_ACC_CTRL_PREFETCH(n) REG_BIT_FIELD(0x50+((n)<<4), 23, 1)
173 +#define NANDC_ACC_CTRL_CACHE_MODE(n) REG_BIT_FIELD(0x50+((n)<<4), 22, 1)
174 +#define NANDC_ACC_CTRL_CACHE_LASTPG(n) REG_BIT_FIELD(0x50+((n)<<4), 21, 1)
175 +#define NANDC_ACC_CTRL_ECC_LEVEL(n) REG_BIT_FIELD(0x50+((n)<<4), 16, 5)
176 +#define NANDC_ACC_CTRL_SECTOR_1K(n) REG_BIT_FIELD(0x50+((n)<<4), 7, 1)
177 +#define NANDC_ACC_CTRL_SPARE_SIZE(n) REG_BIT_FIELD(0x50+((n)<<4), 0, 7)
178 +
179 +#define NANDC_CONFIG_CS(n) REG_BIT_FIELD(0x54+((n)<<4), 0, 32)
180 +#define NANDC_CONFIG_LOCK(n) REG_BIT_FIELD(0x54+((n)<<4), 31, 1)
181 +#define NANDC_CONFIG_BLK_SIZE(n) REG_BIT_FIELD(0x54+((n)<<4), 28, 3)
182 +#define NANDC_CONFIG_CHIP_SIZE(n) REG_BIT_FIELD(0x54+((n)<<4), 24, 4)
183 +#define NANDC_CONFIG_CHIP_WIDTH(n) REG_BIT_FIELD(0x54+((n)<<4), 23, 1)
184 +#define NANDC_CONFIG_PAGE_SIZE(n) REG_BIT_FIELD(0x54+((n)<<4), 20, 2)
185 +#define NANDC_CONFIG_FUL_ADDR_BYTES(n) REG_BIT_FIELD(0x54+((n)<<4), 16, 3)
186 +#define NANDC_CONFIG_COL_ADDR_BYTES(n) REG_BIT_FIELD(0x54+((n)<<4), 12, 3)
187 +#define NANDC_CONFIG_BLK_ADDR_BYTES(n) REG_BIT_FIELD(0x54+((n)<<4), 8, 3)
188 +
189 +#define NANDC_TIMING_1_CS(n) REG_BIT_FIELD(0x58+((n)<<4), 0, 32)
190 +#define NANDC_TIMING_2_CS(n) REG_BIT_FIELD(0x5c+((n)<<4), 0, 32)
191 + /* Individual bits for Timing registers - TBD */
192 +
193 +#define NANDC_CORR_STAT_THRESH_CS(n) REG_BIT_FIELD(0xc0, 6*(n), 6)
194 +
195 +#define NANDC_BLK_WP_END_ADDR REG_BIT_FIELD(0xc8, 0, 32)
196 +
197 +#define NANDC_MPLANE_ERASE_CYC2_OPCODE REG_BIT_FIELD(0xcc, 24, 8)
198 +#define NANDC_MPLANE_READ_STAT_OPCODE REG_BIT_FIELD(0xcc, 16, 8)
199 +#define NANDC_MPLANE_PROG_ODD_OPCODE REG_BIT_FIELD(0xcc, 8, 8)
200 +#define NANDC_MPLANE_PROG_TRL_OPCODE REG_BIT_FIELD(0xcc, 0, 8)
201 +
202 +#define NANDC_MPLANE_PGCACHE_TRL_OPCODE REG_BIT_FIELD(0xd0, 24, 8)
203 +#define NANDC_MPLANE_READ_STAT2_OPCODE REG_BIT_FIELD(0xd0, 16, 8)
204 +#define NANDC_MPLANE_READ_EVEN_OPCODE REG_BIT_FIELD(0xd0, 8, 8)
205 +#define NANDC_MPLANE_READ_ODD__OPCODE REG_BIT_FIELD(0xd0, 0, 8)
206 +
207 +#define NANDC_MPLANE_CTRL_ERASE_CYC2_EN REG_BIT_FIELD(0xd4, 31, 1)
208 +#define NANDC_MPLANE_CTRL_RD_ADDR_SIZE REG_BIT_FIELD(0xd4, 30, 1)
209 +#define NANDC_MPLANE_CTRL_RD_CYC_ADDR REG_BIT_FIELD(0xd4, 29, 1)
210 +#define NANDC_MPLANE_CTRL_RD_COL_ADDR REG_BIT_FIELD(0xd4, 28, 1)
211 +
212 +#define NANDC_UNCORR_ERR_COUNT REG_BIT_FIELD(0xfc, 0, 32)
213 +
214 +#define NANDC_CORR_ERR_COUNT REG_BIT_FIELD(0x100, 0, 32)
215 +
216 +#define NANDC_READ_CORR_BIT_COUNT REG_BIT_FIELD(0x104, 0, 32)
217 +
218 +#define NANDC_BLOCK_LOCK_STATUS REG_BIT_FIELD(0x108, 0, 8)
219 +
220 +#define NANDC_ECC_CORR_ADDR_CS REG_BIT_FIELD(0x10c, 16, 3)
221 +#define NANDC_ECC_CORR_ADDR_EXT REG_BIT_FIELD(0x10c, 0, 16)
222 +
223 +#define NANDC_ECC_CORR_ADDR REG_BIT_FIELD(0x110, 0, 32)
224 +
225 +#define NANDC_ECC_UNC_ADDR_CS REG_BIT_FIELD(0x114, 16, 3)
226 +#define NANDC_ECC_UNC_ADDR_EXT REG_BIT_FIELD(0x114, 0, 16)
227 +
228 +#define NANDC_ECC_UNC_ADDR REG_BIT_FIELD(0x118, 0, 32)
229 +
230 +#define NANDC_READ_ADDR_CS REG_BIT_FIELD(0x11c, 16, 3)
231 +#define NANDC_READ_ADDR_EXT REG_BIT_FIELD(0x11c, 0, 16)
232 +#define NANDC_READ_ADDR REG_BIT_FIELD(0x120, 0, 32)
233 +
234 +#define NANDC_PROG_ADDR_CS REG_BIT_FIELD(0x124, 16, 3)
235 +#define NANDC_PROG_ADDR_EXT REG_BIT_FIELD(0x124, 0, 16)
236 +#define NANDC_PROG_ADDR REG_BIT_FIELD(0x128, 0, 32)
237 +
238 +#define NANDC_CPYBK_ADDR_CS REG_BIT_FIELD(0x12c, 16, 3)
239 +#define NANDC_CPYBK_ADDR_EXT REG_BIT_FIELD(0x12c, 0, 16)
240 +#define NANDC_CPYBK_ADDR REG_BIT_FIELD(0x130, 0, 32)
241 +
242 +#define NANDC_ERASE_ADDR_CS REG_BIT_FIELD(0x134, 16, 3)
243 +#define NANDC_ERASE_ADDR_EXT REG_BIT_FIELD(0x134, 0, 16)
244 +#define NANDC_ERASE_ADDR REG_BIT_FIELD(0x138, 0, 32)
245 +
246 +#define NANDC_INV_READ_ADDR_CS REG_BIT_FIELD(0x13c, 16, 3)
247 +#define NANDC_INV_READ_ADDR_EXT REG_BIT_FIELD(0x13c, 0, 16)
248 +#define NANDC_INV_READ_ADDR REG_BIT_FIELD(0x140, 0, 32)
249 +
250 +#define NANDC_INIT_STAT REG_BIT_FIELD(0x144, 0, 32)
251 +#define NANDC_INIT_ONFI_DONE REG_BIT_FIELD(0x144, 31, 1)
252 +#define NANDC_INIT_DEVID_DONE REG_BIT_FIELD(0x144, 30, 1)
253 +#define NANDC_INIT_SUCCESS REG_BIT_FIELD(0x144, 29, 1)
254 +#define NANDC_INIT_FAIL REG_BIT_FIELD(0x144, 28, 1)
255 +#define NANDC_INIT_BLANK REG_BIT_FIELD(0x144, 27, 1)
256 +#define NANDC_INIT_TIMEOUT REG_BIT_FIELD(0x144, 26, 1)
257 +#define NANDC_INIT_UNC_ERROR REG_BIT_FIELD(0x144, 25, 1)
258 +#define NANDC_INIT_CORR_ERROR REG_BIT_FIELD(0x144, 24, 1)
259 +#define NANDC_INIT_PARAM_RDY REG_BIT_FIELD(0x144, 23, 1)
260 +#define NANDC_INIT_AUTH_FAIL REG_BIT_FIELD(0x144, 22, 1)
261 +
262 +#define NANDC_ONFI_STAT REG_BIT_FIELD(0x148, 0, 32)
263 +#define NANDC_ONFI_DEBUG REG_BIT_FIELD(0x148, 28, 4)
264 +#define NANDC_ONFI_PRESENT REG_BIT_FIELD(0x148, 27, 1)
265 +#define NANDC_ONFI_BADID_PG2 REG_BIT_FIELD(0x148, 5, 1)
266 +#define NANDC_ONFI_BADID_PG1 REG_BIT_FIELD(0x148, 4, 1)
267 +#define NANDC_ONFI_BADID_PG0 REG_BIT_FIELD(0x148, 3, 1)
268 +#define NANDC_ONFI_BADCRC_PG2 REG_BIT_FIELD(0x148, 2, 1)
269 +#define NANDC_ONFI_BADCRC_PG1 REG_BIT_FIELD(0x148, 1, 1)
270 +#define NANDC_ONFI_BADCRC_PG0 REG_BIT_FIELD(0x148, 0, 1)
271 +
272 +#define NANDC_ONFI_DEBUG_DATA REG_BIT_FIELD(0x14c, 0, 32)
273 +
274 +#define NANDC_SEMAPHORE REG_BIT_FIELD(0x150, 0, 8)
275 +
276 +#define NANDC_DEVID_BYTE(b) REG_BIT_FIELD(0x194+((b)&0x4), \
277 + 24-(((b)&3)<<3), 8)
278 +
279 +#define NANDC_LL_RDDATA REG_BIT_FIELD(0x19c, 0, 16)
280 +
281 +#define NANDC_INT_N_REG(n) REG_BIT_FIELD(0xf00|((n)<<2), 0, 1)
282 +#define NANDC_INT_DIREC_READ_MISS REG_BIT_FIELD(0xf00, 0, 1)
283 +#define NANDC_INT_ERASE_DONE REG_BIT_FIELD(0xf04, 0, 1)
284 +#define NANDC_INT_CPYBK_DONE REG_BIT_FIELD(0xf08, 0, 1)
285 +#define NANDC_INT_PROGRAM_DONE REG_BIT_FIELD(0xf0c, 0, 1)
286 +#define NANDC_INT_CONTROLLER_RDY REG_BIT_FIELD(0xf10, 0, 1)
287 +#define NANDC_INT_RDBSY_RDY REG_BIT_FIELD(0xf14, 0, 1)
288 +#define NANDC_INT_ECC_UNCORRECTABLE REG_BIT_FIELD(0xf18, 0, 1)
289 +#define NANDC_INT_ECC_CORRECTABLE REG_BIT_FIELD(0xf1c, 0, 1)
290 +
291 +/*
292 + * Following registers are treated as contigous IO memory, offset is from
293 + * <reg_base>, and the data is in big-endian byte order
294 + */
295 +#define NANDC_SPARE_AREA_READ_OFF 0x200
296 +#define NANDC_SPARE_AREA_WRITE_OFF 0x280
297 +#define NANDC_CACHE_OFF 0x400
298 +#define NANDC_CACHE_SIZE (128*4)
299 +
300 +struct bcmnand_areg_field {
301 + unsigned int reg;
302 + unsigned int pos;
303 + unsigned int width;
304 +};
305 +
306 +/*
307 + * Following are IDM (a.k.a. Slave Wrapper) registers are off <idm_base>:
308 + */
309 +#define IDMREG_BIT_FIELD(r, p, w) ((struct bcmnand_areg_field){(r), (p), (w)})
310 +
311 +#define NANDC_IDM_AXI_BIG_ENDIAN IDMREG_BIT_FIELD(0x408, 28, 1)
312 +#define NANDC_IDM_APB_LITTLE_ENDIAN IDMREG_BIT_FIELD(0x408, 24, 1)
313 +#define NANDC_IDM_TM IDMREG_BIT_FIELD(0x408, 16, 5)
314 +#define NANDC_IDM_IRQ_CORRECABLE_EN IDMREG_BIT_FIELD(0x408, 9, 1)
315 +#define NANDC_IDM_IRQ_UNCORRECABLE_EN IDMREG_BIT_FIELD(0x408, 8, 1)
316 +#define NANDC_IDM_IRQ_RDYBSY_RDY_EN IDMREG_BIT_FIELD(0x408, 7, 1)
317 +#define NANDC_IDM_IRQ_CONTROLLER_RDY_EN IDMREG_BIT_FIELD(0x408, 6, 1)
318 +#define NANDC_IDM_IRQ_PRPOGRAM_COMP_EN IDMREG_BIT_FIELD(0x408, 5, 1)
319 +#define NANDC_IDM_IRQ_COPYBK_COMP_EN IDMREG_BIT_FIELD(0x408, 4, 1)
320 +#define NANDC_IDM_IRQ_ERASE_COMP_EN IDMREG_BIT_FIELD(0x408, 3, 1)
321 +#define NANDC_IDM_IRQ_READ_MISS_EN IDMREG_BIT_FIELD(0x408, 2, 1)
322 +#define NANDC_IDM_IRQ_N_EN(n) IDMREG_BIT_FIELD(0x408, 2+(n), 1)
323 +
324 +#define NANDC_IDM_CLOCK_EN IDMREG_BIT_FIELD(0x408, 0, 1)
325 +
326 +#define NANDC_IDM_IO_ECC_CORR IDMREG_BIT_FIELD(0x500, 3, 1)
327 +#define NANDC_IDM_IO_ECC_UNCORR IDMREG_BIT_FIELD(0x500, 2, 1)
328 +#define NANDC_IDM_IO_RDYBSY IDMREG_BIT_FIELD(0x500, 1, 1)
329 +#define NANDC_IDM_IO_CTRL_RDY IDMREG_BIT_FIELD(0x500, 0, 1)
330 +
331 +#define NANDC_IDM_RESET IDMREG_BIT_FIELD(0x800, 0, 1)
332 + /* Remaining IDM registers do not seem to be useful, skipped */
333 +
334 +/*
335 + * NAND Controller has its own command opcodes
336 + * different from opcodes sent to the actual flash chip
337 + */
338 +#define NANDC_CMD_OPCODE_NULL 0
339 +#define NANDC_CMD_OPCODE_PAGE_READ 1
340 +#define NANDC_CMD_OPCODE_SPARE_READ 2
341 +#define NANDC_CMD_OPCODE_STATUS_READ 3
342 +#define NANDC_CMD_OPCODE_PAGE_PROG 4
343 +#define NANDC_CMD_OPCODE_SPARE_PROG 5
344 +#define NANDC_CMD_OPCODE_DEVID_READ 7
345 +#define NANDC_CMD_OPCODE_BLOCK_ERASE 8
346 +#define NANDC_CMD_OPCODE_FLASH_RESET 9
347 +
348 +/*
349 + * NAND Controller hardware ECC data size
350 + *
351 + * The following table contains the number of bytes needed for
352 + * each of the ECC levels, per "sector", which is either 512 or 1024 bytes.
353 + * The actual layout is as follows:
354 + * The entire spare area is equally divided into as many sections as there
355 + * are sectors per page, and the ECC data is located at the end of each
356 + * of these sections.
357 + * For example, given a 2K per page and 64 bytes spare device, configured for
358 + * sector size 1k and ECC level of 4, the spare area will be divided into 2
359 + * sections 32 bytes each, and the last 14 bytes of 32 in each section will
360 + * be filled with ECC data.
361 + * Note: the name of the algorythm and the number of error bits it can correct
362 + * is of no consequence to this driver, therefore omitted.
363 + */
364 +struct bcmnand_ecc_size_s {
365 + unsigned char sector_size_shift;
366 + unsigned char ecc_level;
367 + unsigned char ecc_bytes_per_sec;
368 + unsigned char reserved;
369 +};
370 +
371 +static const struct bcmnand_ecc_size_s bcmnand_ecc_sizes[] = {
372 + { 9, 0, 0 },
373 + { 10, 0, 0 },
374 + { 9, 1, 2 },
375 + { 10, 1, 4 },
376 + { 9, 2, 4 },
377 + { 10, 2, 7 },
378 + { 9, 3, 6 },
379 + { 10, 3, 11 },
380 + { 9, 4, 7 },
381 + { 10, 4, 14 },
382 + { 9, 5, 9 },
383 + { 10, 5, 18 },
384 + { 9, 6, 11 },
385 + { 10, 6, 21 },
386 + { 9, 7, 13 },
387 + { 10, 7, 25 },
388 + { 9, 8, 14 },
389 + { 10, 8, 28 },
390 +
391 + { 9, 9, 16 },
392 + { 9, 10, 18 },
393 + { 9, 11, 20 },
394 + { 9, 12, 21 },
395 +
396 + { 10, 9, 32 },
397 + { 10, 10, 35 },
398 + { 10, 11, 39 },
399 + { 10, 12, 42 },
400 +};
401 +
402 +/*
403 + * Populate the various fields that depend on how
404 + * the hardware ECC data is located in the spare area
405 + *
406 + * For this controiller, it is easier to fill-in these
407 + * structures at run time.
408 + *
409 + * The bad-block marker is assumed to occupy one byte
410 + * at chip->badblockpos, which must be in the first
411 + * sector of the spare area, namely it is either
412 + * at offset 0 or 5.
413 + * Some chips use both for manufacturer's bad block
414 + * markers, but we ingore that issue here, and assume only
415 + * one byte is used as bad-block marker always.
416 + */
417 +static int bcmnand_hw_ecc_layout(struct bcmnand_ctrl *ctrl)
418 +{
419 + struct nand_ecclayout *layout;
420 + struct device *dev = &ctrl->core->dev;
421 + unsigned int i, j, k;
422 + unsigned int ecc_per_sec, oob_per_sec;
423 + unsigned int bbm_pos = ctrl->nand.badblockpos;
424 +
425 + /* Caclculate spare area per sector size */
426 + oob_per_sec = ctrl->mtd.oobsize >> ctrl->sec_per_page_shift;
427 +
428 + /* Try to calculate the amount of ECC bytes per sector with a formula */
429 + if (ctrl->sector_size_shift == 9)
430 + ecc_per_sec = ((ctrl->ecc_level * 14) + 7) >> 3;
431 + else if (ctrl->sector_size_shift == 10)
432 + ecc_per_sec = ((ctrl->ecc_level * 14) + 3) >> 2;
433 + else
434 + ecc_per_sec = oob_per_sec + 1; /* cause an error if not in table */
435 +
436 + /* Now find out the answer according to the table */
437 + for (i = 0; i < ARRAY_SIZE(bcmnand_ecc_sizes); i++) {
438 + if (bcmnand_ecc_sizes[i].ecc_level == ctrl->ecc_level &&
439 + bcmnand_ecc_sizes[i].sector_size_shift ==
440 + ctrl->sector_size_shift) {
441 + break;
442 + }
443 + }
444 +
445 + /* Table match overrides formula */
446 + if (bcmnand_ecc_sizes[i].ecc_level == ctrl->ecc_level &&
447 + bcmnand_ecc_sizes[i].sector_size_shift == ctrl->sector_size_shift)
448 + ecc_per_sec = bcmnand_ecc_sizes[i].ecc_bytes_per_sec;
449 +
450 + /* Return an error if calculated ECC leaves no room for OOB */
451 + if ((ctrl->sec_per_page_shift != 0 && ecc_per_sec >= oob_per_sec) ||
452 + (ctrl->sec_per_page_shift == 0 && ecc_per_sec >= (oob_per_sec - 1))) {
453 + dev_err(dev, "ECC level %d too high, leaves no room for OOB data\n",
454 + ctrl->ecc_level);
455 + return -EINVAL;
456 + }
457 +
458 + /* Fill in the needed fields */
459 + ctrl->nand.ecc.size = ctrl->mtd.writesize >> ctrl->sec_per_page_shift;
460 + ctrl->nand.ecc.bytes = ecc_per_sec;
461 + ctrl->nand.ecc.steps = 1 << ctrl->sec_per_page_shift;
462 + ctrl->nand.ecc.total = ecc_per_sec << ctrl->sec_per_page_shift;
463 + ctrl->nand.ecc.strength = ctrl->ecc_level;
464 +
465 + /* Build an ecc layout data structure */
466 + layout = &ctrl->ecclayout;
467 + memset(layout, 0, sizeof(*layout));
468 +
469 + /* Total number of bytes used by HW ECC */
470 + layout->eccbytes = ecc_per_sec << ctrl->sec_per_page_shift;
471 +
472 + /* Location for each of the HW ECC bytes */
473 + for (i = j = 0, k = 1;
474 + i < ARRAY_SIZE(layout->eccpos) && i < layout->eccbytes;
475 + i++, j++) {
476 + /* switch sector # */
477 + if (j == ecc_per_sec) {
478 + j = 0;
479 + k++;
480 + }
481 + /* save position of each HW-generated ECC byte */
482 + layout->eccpos[i] = (oob_per_sec * k) - ecc_per_sec + j;
483 +
484 + /* Check that HW ECC does not overlap bad-block marker */
485 + if (bbm_pos == layout->eccpos[i]) {
486 + dev_err(dev, "ECC level %d too high, HW ECC collides with bad-block marker position\n",
487 + ctrl->ecc_level);
488 + return -EINVAL;
489 + }
490 + }
491 +
492 + /* Location of all user-available OOB byte-ranges */
493 + for (i = 0; i < ARRAY_SIZE(layout->oobfree); i++) {
494 + struct nand_oobfree *oobfree = &layout->oobfree[i];
495 +
496 + if (i >= (1 << ctrl->sec_per_page_shift))
497 + break;
498 + oobfree->offset = oob_per_sec * i;
499 + oobfree->length = oob_per_sec - ecc_per_sec;
500 +
501 + /* Bad-block marker must be in the first sector spare area */
502 + if (WARN_ON(bbm_pos >= (oobfree->offset + oobfree->length)))
503 + return -EINVAL;
504 +
505 + if (i != 0)
506 + continue;
507 +
508 + /* Remove bad-block marker from available byte range */
509 + if (bbm_pos == oobfree->offset) {
510 + oobfree->offset += 1;
511 + oobfree->length -= 1;
512 + } else if (bbm_pos == (oobfree->offset + oobfree->length - 1)) {
513 + oobfree->length -= 1;
514 + } else {
515 + layout->oobfree[i + 1].offset = bbm_pos + 1;
516 + layout->oobfree[i + 1].length =
517 + oobfree->length - bbm_pos - 1;
518 + oobfree->length = bbm_pos;
519 + i++;
520 + }
521 + }
522 +
523 + layout->oobavail = ((oob_per_sec - ecc_per_sec)
524 + << ctrl->sec_per_page_shift) - 1;
525 +
526 + ctrl->mtd.oobavail = layout->oobavail;
527 + ctrl->nand.ecc.layout = layout;
528 +
529 + /* Output layout for debugging */
530 + dev_dbg(dev, "Spare area=%d eccbytes %d, ecc bytes located at:\n",
531 + ctrl->mtd.oobsize, layout->eccbytes);
532 + for (i = j = 0;
533 + i < ARRAY_SIZE(layout->eccpos) && i < layout->eccbytes; i++)
534 + pr_debug(" %d", layout->eccpos[i]);
535 + pr_debug("\n");
536 +
537 + dev_dbg(dev, "Available %d bytes at (off,len):\n", layout->oobavail);
538 + for (i = 0; i < ARRAY_SIZE(layout->oobfree); i++)
539 + pr_debug("(%d,%d) ", layout->oobfree[i].offset,
540 + layout->oobfree[i].length);
541 + pr_debug("\n");
542 +
543 + return 0;
544 +}
545 +
546 +/*
547 + * Register bit-field manipulation routines
548 + */
549 +
550 +static inline unsigned int bcmnand_reg_read(struct bcmnand_ctrl *ctrl,
551 + struct bcmnand_reg_field rbf)
552 +{
553 + u32 val;
554 +
555 + val = bcma_read32(ctrl->core, rbf.reg);
556 + val >>= rbf.pos;
557 + val &= (1 << rbf.width) - 1;
558 +
559 + return val;
560 +}
561 +
562 +static inline void bcmnand_reg_write(struct bcmnand_ctrl *ctrl,
563 + struct bcmnand_reg_field rbf,
564 + unsigned newval)
565 +{
566 + u32 val, msk;
567 +
568 + msk = (1 << rbf.width) - 1;
569 + msk <<= rbf.pos;
570 + newval <<= rbf.pos;
571 + newval &= msk;
572 +
573 + val = bcma_read32(ctrl->core, rbf.reg);
574 + val &= ~msk;
575 + val |= newval;
576 + bcma_write32(ctrl->core, rbf.reg, val);
577 +}
578 +
579 +static inline unsigned int bcmnand_reg_aread(struct bcmnand_ctrl *ctrl,
580 + struct bcmnand_areg_field rbf)
581 +{
582 + u32 val;
583 +
584 + val = bcma_aread32(ctrl->core, rbf.reg);
585 + val >>= rbf.pos;
586 + val &= (1 << rbf.width) - 1;
587 +
588 + return val;
589 +}
590 +
591 +static inline void bcmnand_reg_awrite(struct bcmnand_ctrl *ctrl,
592 + struct bcmnand_areg_field rbf,
593 + unsigned int newval)
594 +{
595 + u32 val, msk;
596 +
597 + msk = (1 << rbf.width) - 1;
598 + msk <<= rbf.pos;
599 + newval <<= rbf.pos;
600 + newval &= msk;
601 +
602 + val = bcma_aread32(ctrl->core, rbf.reg);
603 + val &= ~msk;
604 + val |= newval;
605 + bcma_awrite32(ctrl->core, rbf.reg, val);
606 +}
607 +
608 +/*
609 + * NAND Interface - dev_ready
610 + *
611 + * Return 1 iff device is ready, 0 otherwise
612 + */
613 +static int bcmnand_dev_ready(struct mtd_info *mtd)
614 +{
615 + struct nand_chip *chip = mtd->priv;
616 + struct bcmnand_ctrl *ctrl = chip->priv;
617 +
618 + return bcmnand_reg_aread(ctrl, NANDC_IDM_IO_CTRL_RDY);
619 +}
620 +
621 +/*
622 + * Interrupt service routines
623 + */
624 +static irqreturn_t bcmnand_isr(int irq, void *dev_id)
625 +{
626 + struct bcmnand_ctrl *ctrl = dev_id;
627 + int irq_off;
628 +
629 + irq_off = irq - ctrl->core->irq;
630 + WARN_ON(irq_off < 0 || irq_off >= NANDC_IRQ_NUM);
631 +
632 + if (!bcmnand_reg_read(ctrl, NANDC_INT_N_REG(irq_off)))
633 + return IRQ_NONE;
634 +
635 + /* Acknowledge interrupt */
636 + bcmnand_reg_write(ctrl, NANDC_INT_N_REG(irq_off), 1);
637 +
638 + /* Wake up task */
639 + complete(&ctrl->op_completion);
640 +
641 + return IRQ_HANDLED;
642 +}
643 +
644 +static int bcmnand_wait_interrupt(struct bcmnand_ctrl *ctrl,
645 + unsigned int irq_off,
646 + unsigned int timeout_usec)
647 +{
648 + long timeout_jiffies;
649 + int ret = 0;
650 +
651 + reinit_completion(&ctrl->op_completion);
652 +
653 + /* Acknowledge interrupt */
654 + bcmnand_reg_write(ctrl, NANDC_INT_N_REG(irq_off), 1);
655 +
656 + /* Enable IRQ to wait on */
657 + bcmnand_reg_awrite(ctrl, NANDC_IDM_IRQ_N_EN(irq_off), 1);
658 +
659 + timeout_jiffies = 1 + usecs_to_jiffies(timeout_usec);
660 +
661 + if (irq_off != NANDC_IRQ_CONTROLLER_RDY ||
662 + 0 == bcmnand_reg_aread(ctrl, NANDC_IDM_IO_CTRL_RDY)) {
663 +
664 + timeout_jiffies = wait_for_completion_interruptible_timeout(
665 + &ctrl->op_completion, timeout_jiffies);
666 +
667 + if (timeout_jiffies < 0)
668 + ret = timeout_jiffies;
669 + if (timeout_jiffies == 0)
670 + ret = -ETIME;
671 + }
672 +
673 + /* Disable IRQ, we're done waiting */
674 + bcmnand_reg_awrite(ctrl, NANDC_IDM_IRQ_N_EN(irq_off), 0);
675 +
676 + if (bcmnand_reg_aread(ctrl, NANDC_IDM_IO_CTRL_RDY))
677 + ret = 0;
678 +
679 + return ret;
680 +}
681 +
682 +/*
683 + * wait for command completion
684 + */
685 +static int bcmnand_wait_cmd(struct bcmnand_ctrl *ctrl, unsigned int timeout_usec)
686 +{
687 + unsigned int retries;
688 +
689 + if (bcmnand_reg_read(ctrl, NANDC_INT_STAT_CTLR_RDY))
690 + return 0;
691 +
692 + /* If the timeout is long, wait for interrupt */
693 + if (timeout_usec >= jiffies_to_usecs(1) >> 4)
694 + return bcmnand_wait_interrupt(
695 + ctrl, NANDC_IRQ_CONTROLLER_RDY, timeout_usec);
696 +
697 + /* Wait for completion of the prior command */
698 + retries = (timeout_usec >> 3) + 1;
699 +
700 + while (retries-- &&
701 + 0 == bcmnand_reg_read(ctrl, NANDC_INT_STAT_CTLR_RDY)) {
702 + cpu_relax();
703 + udelay(6);
704 + }
705 +
706 + if (retries == 0)
707 + return -ETIME;
708 +
709 + return 0;
710 +}
711 +
712 +
713 +/*
714 + * NAND Interface - waitfunc
715 + */
716 +static int bcmnand_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
717 +{
718 + struct bcmnand_ctrl *ctrl = chip->priv;
719 + unsigned int to;
720 + int ret;
721 +
722 + /* figure out timeout based on what command is on */
723 + switch (ctrl->last_cmd) {
724 + default:
725 + case NAND_CMD_ERASE1:
726 + case NAND_CMD_ERASE2:
727 + to = 1 << 16;
728 + break;
729 + case NAND_CMD_STATUS:
730 + case NAND_CMD_RESET:
731 + to = 256;
732 + break;
733 + case NAND_CMD_READID:
734 + to = 1024;
735 + break;
736 + case NAND_CMD_READ1:
737 + case NAND_CMD_READ0:
738 + to = 2048;
739 + break;
740 + case NAND_CMD_PAGEPROG:
741 + to = 4096;
742 + break;
743 + case NAND_CMD_READOOB:
744 + to = 512;
745 + break;
746 + }
747 +
748 + /* deliver deferred error code if any */
749 + ret = ctrl->cmd_ret;
750 + if (ret < 0)
751 + ctrl->cmd_ret = 0;
752 + else
753 + ret = bcmnand_wait_cmd(ctrl, to);
754 +
755 + /* Timeout */
756 + if (ret < 0)
757 + return NAND_STATUS_FAIL;
758 +
759 + ret = bcmnand_reg_read(ctrl, NANDC_INT_STAT_FLASH_STATUS);
760 +
761 + return ret;
762 +}
763 +
764 +/*
765 + * NAND Interface - read_oob
766 + */
767 +static int bcmnand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
768 + int page)
769 +{
770 + struct bcmnand_ctrl *ctrl = chip->priv;
771 + unsigned int n = ctrl->chip_num;
772 + void __iomem *ctrl_spare;
773 + unsigned int spare_per_sec, sector;
774 + u64 nand_addr;
775 +
776 + ctrl_spare = ctrl->core->io_addr + NANDC_SPARE_AREA_READ_OFF;
777 +
778 + /* Set the page address for the following commands */
779 + nand_addr = ((u64)page << chip->page_shift);
780 + bcmnand_reg_write(ctrl, NANDC_CMD_EXT_ADDR, nand_addr >> 32);
781 +
782 + spare_per_sec = mtd->oobsize >> ctrl->sec_per_page_shift;
783 +
784 + /* Disable ECC validation for spare area reads */
785 + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_RD_ECC(n), 0);
786 +
787 + /* Loop all sectors in page */
788 + for (sector = 0; sector < (1<<ctrl->sec_per_page_shift); sector++) {
789 + unsigned int col;
790 +
791 + col = (sector << ctrl->sector_size_shift);
792 +
793 + /* Issue command to read partial page */
794 + bcmnand_reg_write(ctrl, NANDC_CMD_ADDRESS, nand_addr + col);
795 +
796 + bcmnand_reg_write(ctrl, NANDC_CMD_START_OPCODE,
797 + NANDC_CMD_OPCODE_SPARE_READ);
798 +
799 + /* Wait for the command to complete */
800 + if (bcmnand_wait_cmd(ctrl, (sector == 0) ? 10000 : 100))
801 + return -EIO;
802 +
803 + if (!bcmnand_reg_read(ctrl, NANDC_INT_STAT_SPARE_VALID))
804 + return -EIO;
805 +
806 + /* Set controller to Little Endian mode for copying */
807 + bcmnand_reg_awrite(ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 1);
808 +
809 + memcpy(chip->oob_poi + sector * spare_per_sec,
810 + ctrl_spare, spare_per_sec);
811 +
812 + /* Return to Big Endian mode for commands etc */
813 + bcmnand_reg_awrite(ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 0);
814 + }
815 +
816 + return 0;
817 +}
818 +
819 +/*
820 + * NAND Interface - write_oob
821 + */
822 +static int bcmnand_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
823 + int page)
824 +{
825 + struct bcmnand_ctrl *ctrl = chip->priv;
826 + unsigned int n = ctrl->chip_num;
827 + void __iomem *ctrl_spare;
828 + unsigned int spare_per_sec, sector, num_sec;
829 + u64 nand_addr;
830 + int to, status = 0;
831 +
832 + ctrl_spare = ctrl->core->io_addr + NANDC_SPARE_AREA_WRITE_OFF;
833 +
834 + /* Disable ECC generation for spare area writes */
835 + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_WR_ECC(n), 0);
836 +
837 + spare_per_sec = mtd->oobsize >> ctrl->sec_per_page_shift;
838 +
839 + /* Set the page address for the following commands */
840 + nand_addr = ((u64)page << chip->page_shift);
841 + bcmnand_reg_write(ctrl, NANDC_CMD_EXT_ADDR, nand_addr >> 32);
842 +
843 + /* Must allow partial programming to change spare area only */
844 + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_PGM_PARTIAL(n), 1);
845 +
846 + num_sec = 1 << ctrl->sec_per_page_shift;
847 + /* Loop all sectors in page */
848 + for (sector = 0; sector < num_sec; sector++) {
849 + unsigned int col;
850 +
851 + /* Spare area accessed by the data sector offset */
852 + col = (sector << ctrl->sector_size_shift);
853 +
854 + bcmnand_reg_write(ctrl, NANDC_CMD_ADDRESS, nand_addr + col);
855 +
856 + /* Set controller to Little Endian mode for copying */
857 + bcmnand_reg_awrite(ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 1);
858 +
859 + memcpy(ctrl_spare, chip->oob_poi + sector * spare_per_sec,
860 + spare_per_sec);
861 +
862 + /* Return to Big Endian mode for commands etc */
863 + bcmnand_reg_awrite(ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 0);
864 +
865 + /* Push spare bytes into internal buffer, last goes to flash */
866 + bcmnand_reg_write(ctrl, NANDC_CMD_START_OPCODE,
867 + NANDC_CMD_OPCODE_SPARE_PROG);
868 +
869 + if (sector == (num_sec - 1))
870 + to = 1 << 16;
871 + else
872 + to = 1 << 10;
873 +
874 + if (bcmnand_wait_cmd(ctrl, to))
875 + return -EIO;
876 + }
877 +
878 + /* Restore partial programming inhibition */
879 + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_PGM_PARTIAL(n), 0);
880 +
881 + status = bcmnand_waitfunc(mtd, chip);
882 + return status & NAND_STATUS_FAIL ? -EIO : 0;
883 +}
884 +
885 +/*
886 + * verify that a buffer is all erased
887 + */
888 +static bool bcmnand_buf_erased(const void *buf, unsigned int len)
889 +{
890 + unsigned int i;
891 + const u32 *p = buf;
892 +
893 + for (i = 0; i < (len >> 2); i++) {
894 + if (p[i] != 0xffffffff)
895 + return false;
896 + }
897 + return true;
898 +}
899 +
900 +/*
901 + * read a page, with or without ECC checking
902 + */
903 +static int bcmnand_read_page_do(struct mtd_info *mtd, struct nand_chip *chip,
904 + uint8_t *buf, int page, bool ecc)
905 +{
906 + struct bcmnand_ctrl *ctrl = chip->priv;
907 + unsigned int n = ctrl->chip_num;
908 + void __iomem *ctrl_cache;
909 + void __iomem *ctrl_spare;
910 + unsigned int data_bytes;
911 + unsigned int spare_per_sec;
912 + unsigned int sector, to = 1 << 16;
913 + u32 err_soft_reg, err_hard_reg;
914 + unsigned int hard_err_count = 0;
915 + int ret;
916 + u64 nand_addr;
917 +
918 + ctrl_cache = ctrl->core->io_addr + NANDC_CACHE_OFF;
919 + ctrl_spare = ctrl->core->io_addr + NANDC_SPARE_AREA_READ_OFF;
920 +
921 + /* Reset ECC error stats */
922 + err_hard_reg = bcmnand_reg_read(ctrl, NANDC_UNCORR_ERR_COUNT);
923 + err_soft_reg = bcmnand_reg_read(ctrl, NANDC_READ_CORR_BIT_COUNT);
924 +
925 + spare_per_sec = mtd->oobsize >> ctrl->sec_per_page_shift;
926 +
927 + /* Set the page address for the following commands */
928 + nand_addr = ((u64)page << chip->page_shift);
929 + bcmnand_reg_write(ctrl, NANDC_CMD_EXT_ADDR, nand_addr >> 32);
930 +
931 + /* Enable ECC validation for ecc page reads */
932 + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_RD_ECC(n), ecc);
933 +
934 + /* Loop all sectors in page */
935 + for (sector = 0; sector < (1 << ctrl->sec_per_page_shift); sector++) {
936 + data_bytes = 0;
937 +
938 + /* Copy partial sectors sized by cache reg */
939 + while (data_bytes < (1<<ctrl->sector_size_shift)) {
940 + unsigned int col;
941 +
942 + col = data_bytes + (sector << ctrl->sector_size_shift);
943 +
944 + bcmnand_reg_write(ctrl, NANDC_CMD_ADDRESS,
945 + nand_addr + col);
946 +
947 + /* Issue command to read partial page */
948 + bcmnand_reg_write(ctrl, NANDC_CMD_START_OPCODE,
949 + NANDC_CMD_OPCODE_PAGE_READ);
950 +
951 + /* Wait for the command to complete */
952 + ret = bcmnand_wait_cmd(ctrl, to);
953 + if (ret < 0)
954 + return ret;
955 +
956 + /* Set controller to Little Endian mode for copying */
957 + bcmnand_reg_awrite(ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 1);
958 +
959 + if (data_bytes == 0) {
960 + memcpy(chip->oob_poi + sector * spare_per_sec,
961 + ctrl_spare, spare_per_sec);
962 + }
963 +
964 + memcpy(buf + col, ctrl_cache, NANDC_CACHE_SIZE);
965 + data_bytes += NANDC_CACHE_SIZE;
966 +
967 + /* Return to Big Endian mode for commands etc */
968 + bcmnand_reg_awrite(ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 0);
969 +
970 + /* Next iterations should go fast */
971 + to = 1 << 10;
972 +
973 + /* capture hard errors for each partial */
974 + if (err_hard_reg != bcmnand_reg_read(ctrl, NANDC_UNCORR_ERR_COUNT)) {
975 + int era = bcmnand_reg_read(ctrl, NANDC_INT_STAT_ERASED);
976 +
977 + if (!era &&
978 + !bcmnand_buf_erased(buf + col, NANDC_CACHE_SIZE))
979 + hard_err_count++;
980 +
981 + err_hard_reg = bcmnand_reg_read(ctrl,
982 + NANDC_UNCORR_ERR_COUNT);
983 + }
984 + }
985 + }
986 +
987 + if (!ecc)
988 + return 0;
989 +
990 + /* Report hard ECC errors */
991 + if (hard_err_count)
992 + mtd->ecc_stats.failed++;
993 +
994 + /* Get ECC soft error stats */
995 + mtd->ecc_stats.corrected += err_soft_reg -
996 + bcmnand_reg_read(ctrl, NANDC_READ_CORR_BIT_COUNT);
997 +
998 + return 0;
999 +}
1000 +
1001 +/*
1002 + * NAND Interface - read_page_ecc
1003 + */
1004 +static int bcmnand_read_page_ecc(struct mtd_info *mtd, struct nand_chip *chip,
1005 + uint8_t *buf, int oob_required, int page)
1006 +{
1007 + return bcmnand_read_page_do(mtd, chip, buf, page, true);
1008 +}
1009 +
1010 +/*
1011 + * NAND Interface - read_page_raw
1012 + */
1013 +static int bcmnand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1014 + uint8_t *buf, int oob_required, int page)
1015 +{
1016 + return bcmnand_read_page_do(mtd, chip, buf, page, true);
1017 +}
1018 +
1019 +/*
1020 + * do page write, with or without ECC generation enabled
1021 + */
1022 +static int bcmnand_write_page_do(struct mtd_info *mtd, struct nand_chip *chip,
1023 + const uint8_t *buf, bool ecc)
1024 +{
1025 + struct bcmnand_ctrl *ctrl = chip->priv;
1026 + unsigned int n = ctrl->chip_num;
1027 + void __iomem *ctrl_cache;
1028 + void __iomem *ctrl_spare;
1029 + unsigned int spare_per_sec, sector, num_sec;
1030 + unsigned int data_bytes, spare_bytes;
1031 + int i, to;
1032 + uint8_t *tmp_poi;
1033 + u32 nand_addr;
1034 +
1035 + ctrl_cache = ctrl->core->io_addr + NANDC_CACHE_OFF;
1036 + ctrl_spare = ctrl->core->io_addr + NANDC_SPARE_AREA_WRITE_OFF;
1037 +
1038 + /* Get start-of-page address */
1039 + nand_addr = bcmnand_reg_read(ctrl, NANDC_CMD_ADDRESS);
1040 +
1041 + tmp_poi = kmalloc(mtd->oobsize, GFP_KERNEL);
1042 + if (!tmp_poi)
1043 + return -ENOMEM;
1044 +
1045 + /* Retreive pre-existing OOB values */
1046 + memcpy(tmp_poi, chip->oob_poi, mtd->oobsize);
1047 + ctrl->cmd_ret = bcmnand_read_oob(mtd, chip,
1048 + nand_addr >> chip->page_shift);
1049 + if (ctrl->cmd_ret < 0) {
1050 + kfree(tmp_poi);
1051 + return ctrl->cmd_ret;
1052 + }
1053 +
1054 + /* Apply new OOB data bytes just like they would end up on the chip */
1055 + for (i = 0; i < mtd->oobsize; i++)
1056 + chip->oob_poi[i] &= tmp_poi[i];
1057 + kfree(tmp_poi);
1058 +
1059 + spare_per_sec = mtd->oobsize >> ctrl->sec_per_page_shift;
1060 +
1061 + /* Enable ECC generation for ecc page write, if requested */
1062 + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_WR_ECC(n), ecc);
1063 +
1064 + spare_bytes = 0;
1065 + num_sec = 1 << ctrl->sec_per_page_shift;
1066 +
1067 + /* Loop all sectors in page */
1068 + for (sector = 0; sector < num_sec; sector++) {
1069 + data_bytes = 0;
1070 +
1071 + /* Copy partial sectors sized by cache reg */
1072 + while (data_bytes < (1<<ctrl->sector_size_shift)) {
1073 + unsigned int col;
1074 +
1075 + col = data_bytes +
1076 + (sector << ctrl->sector_size_shift);
1077 +
1078 + /* Set address of 512-byte sub-page */
1079 + bcmnand_reg_write(ctrl, NANDC_CMD_ADDRESS,
1080 + nand_addr + col);
1081 +
1082 + /* Set controller to Little Endian mode for copying */
1083 + bcmnand_reg_awrite(ctrl, NANDC_IDM_APB_LITTLE_ENDIAN,
1084 + 1);
1085 +
1086 + /* Set spare area is written at each sector start */
1087 + if (data_bytes == 0) {
1088 + memcpy(ctrl_spare,
1089 + chip->oob_poi + spare_bytes,
1090 + spare_per_sec);
1091 + spare_bytes += spare_per_sec;
1092 + }
1093 +
1094 + /* Copy sub-page data */
1095 + memcpy(ctrl_cache, buf + col, NANDC_CACHE_SIZE);
1096 + data_bytes += NANDC_CACHE_SIZE;
1097 +
1098 + /* Return to Big Endian mode for commands etc */
1099 + bcmnand_reg_awrite(ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 0);
1100 +
1101 + /* Push data into internal cache */
1102 + bcmnand_reg_write(ctrl, NANDC_CMD_START_OPCODE,
1103 + NANDC_CMD_OPCODE_PAGE_PROG);
1104 +
1105 + /* Wait for the command to complete */
1106 + if (sector == (num_sec - 1))
1107 + to = 1 << 16;
1108 + else
1109 + to = 1 << 10;
1110 + ctrl->cmd_ret = bcmnand_wait_cmd(ctrl, to);
1111 + if (ctrl->cmd_ret < 0)
1112 + return ctrl->cmd_ret;
1113 + }
1114 + }
1115 + return 0;
1116 +}
1117 +
1118 +/*
1119 + * NAND Interface = write_page_ecc
1120 + */
1121 +static int bcmnand_write_page_ecc(struct mtd_info *mtd, struct nand_chip *chip,
1122 + const uint8_t *buf, int oob_required)
1123 +{
1124 + return bcmnand_write_page_do(mtd, chip, buf, true);
1125 +}
1126 +
1127 +/*
1128 + * NAND Interface = write_page_raw
1129 + */
1130 +static int bcmnand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1131 + const uint8_t *buf, int oob_required)
1132 +{
1133 + return bcmnand_write_page_do(mtd, chip, buf, false);
1134 +}
1135 +
1136 +/*
1137 + * MTD Interface - read_byte
1138 + *
1139 + * This function emulates simple controllers behavior
1140 + * for just a few relevant commands
1141 + */
1142 +static uint8_t bcmnand_read_byte(struct mtd_info *mtd)
1143 +{
1144 + struct nand_chip *nand = mtd->priv;
1145 + struct bcmnand_ctrl *ctrl = nand->priv;
1146 + struct device *dev = &ctrl->core->dev;
1147 + uint8_t b = ~0;
1148 +
1149 + switch (ctrl->last_cmd) {
1150 + case NAND_CMD_READID:
1151 + if (ctrl->id_byte_index < 8) {
1152 + b = bcmnand_reg_read(ctrl, NANDC_DEVID_BYTE(
1153 + ctrl->id_byte_index));
1154 + ctrl->id_byte_index++;
1155 + }
1156 + break;
1157 + case NAND_CMD_READOOB:
1158 + if (ctrl->oob_index < mtd->oobsize)
1159 + b = nand->oob_poi[ctrl->oob_index++];
1160 + break;
1161 + case NAND_CMD_STATUS:
1162 + b = bcmnand_reg_read(ctrl, NANDC_INT_STAT_FLASH_STATUS);
1163 + break;
1164 + default:
1165 + dev_err(dev, "got unkown command: 0x%x in read_byte\n",
1166 + ctrl->last_cmd);
1167 + }
1168 + return b;
1169 +}
1170 +
1171 +/*
1172 + * MTD Interface - read_word
1173 + *
1174 + * Can not be tested without x16 chip, but the SoC does not support x16 i/f.
1175 + */
1176 +static u16 bcmnand_read_word(struct mtd_info *mtd)
1177 +{
1178 + u16 w = ~0;
1179 +
1180 + w = bcmnand_read_byte(mtd);
1181 + barrier();
1182 + w |= bcmnand_read_byte(mtd) << 8;
1183 +
1184 + return w;
1185 +}
1186 +
1187 +/*
1188 + * MTD Interface - select a chip from an array
1189 + */
1190 +static void bcmnand_select_chip(struct mtd_info *mtd, int chip)
1191 +{
1192 + struct nand_chip *nand = mtd->priv;
1193 + struct bcmnand_ctrl *ctrl = nand->priv;
1194 +
1195 + ctrl->chip_num = chip;
1196 + bcmnand_reg_write(ctrl, NANDC_CMD_CS_SEL, chip);
1197 +}
1198 +
1199 +/*
1200 + * NAND Interface - emulate low-level NAND commands
1201 + *
1202 + * Only a few low-level commands are really needed by generic NAND,
1203 + * and they do not call for CMD_LL operations the controller can support.
1204 + */
1205 +static void bcmnand_cmdfunc(struct mtd_info *mtd, unsigned int command,
1206 + int column, int page_addr)
1207 +{
1208 + struct nand_chip *nand = mtd->priv;
1209 + struct bcmnand_ctrl *ctrl = nand->priv;
1210 + struct device *dev = &ctrl->core->dev;
1211 + u64 nand_addr;
1212 + unsigned int to = 1;
1213 +
1214 + ctrl->last_cmd = command;
1215 +
1216 + /* Set address for some commands */
1217 + switch (command) {
1218 + case NAND_CMD_ERASE1:
1219 + column = 0;
1220 + /*FALLTHROUGH*/
1221 + case NAND_CMD_SEQIN:
1222 + case NAND_CMD_READ0:
1223 + case NAND_CMD_READ1:
1224 + WARN_ON(column >= mtd->writesize);
1225 + nand_addr = (u64) column |
1226 + ((u64)page_addr << nand->page_shift);
1227 + bcmnand_reg_write(ctrl, NANDC_CMD_EXT_ADDR, nand_addr >> 32);
1228 + bcmnand_reg_write(ctrl, NANDC_CMD_ADDRESS, nand_addr);
1229 + break;
1230 + case NAND_CMD_ERASE2:
1231 + case NAND_CMD_RESET:
1232 + case NAND_CMD_READID:
1233 + case NAND_CMD_READOOB:
1234 + case NAND_CMD_PAGEPROG:
1235 + default:
1236 + /* Do nothing, address not used */
1237 + break;
1238 + }
1239 +
1240 + /* Issue appropriate command to controller */
1241 + switch (command) {
1242 + case NAND_CMD_SEQIN:
1243 + /* Only need to load command address, done */
1244 + return;
1245 +
1246 + case NAND_CMD_RESET:
1247 + bcmnand_reg_write(ctrl, NANDC_CMD_START_OPCODE,
1248 + NANDC_CMD_OPCODE_FLASH_RESET);
1249 + to = 1 << 8;
1250 + break;
1251 +
1252 + case NAND_CMD_READID:
1253 + bcmnand_reg_write(ctrl, NANDC_CMD_START_OPCODE,
1254 + NANDC_CMD_OPCODE_DEVID_READ);
1255 + ctrl->id_byte_index = 0;
1256 + to = 1 << 8;
1257 + break;
1258 +
1259 + case NAND_CMD_READ0:
1260 + case NAND_CMD_READ1:
1261 + bcmnand_reg_write(ctrl, NANDC_CMD_START_OPCODE,
1262 + NANDC_CMD_OPCODE_PAGE_READ);
1263 + to = 1 << 15;
1264 + break;
1265 + case NAND_CMD_STATUS:
1266 + bcmnand_reg_write(ctrl, NANDC_CMD_START_OPCODE,
1267 + NANDC_CMD_OPCODE_STATUS_READ);
1268 + to = 1 << 8;
1269 + break;
1270 + case NAND_CMD_ERASE1:
1271 + return;
1272 +
1273 + case NAND_CMD_ERASE2:
1274 + bcmnand_reg_write(ctrl, NANDC_CMD_START_OPCODE,
1275 + NANDC_CMD_OPCODE_BLOCK_ERASE);
1276 + to = 1 << 18;
1277 + break;
1278 +
1279 + case NAND_CMD_PAGEPROG:
1280 + /* Cmd already set from write_page */
1281 + return;
1282 +
1283 + case NAND_CMD_READOOB:
1284 + /* Emulate simple interface */
1285 + bcmnand_read_oob(mtd, nand, page_addr);
1286 + ctrl->oob_index = 0;
1287 + return;
1288 +
1289 + default:
1290 + dev_err(dev, "got unkown command: 0x%x in cmdfunc\n",
1291 + ctrl->last_cmd);
1292 + }
1293 +
1294 + /* Wait for command to complete */
1295 + ctrl->cmd_ret = bcmnand_wait_cmd(ctrl, to);
1296 +
1297 +}
1298 +
1299 +static int bcmnand_scan(struct mtd_info *mtd)
1300 +{
1301 + struct nand_chip *nand = mtd->priv;
1302 + struct bcmnand_ctrl *ctrl = nand->priv;
1303 + struct device *dev = &ctrl->core->dev;
1304 + bool sector_1k = false;
1305 + unsigned int chip_num = 0;
1306 + int ecc_level = 0;
1307 + int ret;
1308 +
1309 + ret = nand_scan_ident(mtd, NANDC_MAX_CHIPS, NULL);
1310 + if (ret)
1311 + return ret;
1312 +
1313 + /* Get configuration from first chip */
1314 + sector_1k = bcmnand_reg_read(ctrl, NANDC_ACC_CTRL_SECTOR_1K(0));
1315 + ecc_level = bcmnand_reg_read(ctrl, NANDC_ACC_CTRL_ECC_LEVEL(0));
1316 + mtd->writesize_shift = nand->page_shift;
1317 +
1318 + ctrl->ecc_level = ecc_level;
1319 + ctrl->sector_size_shift = sector_1k ? 10 : 9;
1320 +
1321 + /* Configure spare area, tweak as needed */
1322 + do {
1323 + ctrl->sec_per_page_shift =
1324 + mtd->writesize_shift - ctrl->sector_size_shift;
1325 +
1326 + /* will return -EINVAL if OOB space exhausted */
1327 + ret = bcmnand_hw_ecc_layout(ctrl);
1328 +
1329 + /* First try to bump sector size to 1k, then decrease level */
1330 + if (ret && nand->page_shift > 9 && ctrl->sector_size_shift < 10)
1331 + ctrl->sector_size_shift = 10;
1332 + else if (ret)
1333 + ctrl->ecc_level--;
1334 +
1335 + } while (ret && ctrl->ecc_level > 0);
1336 +
1337 + if (WARN_ON(ctrl->ecc_level == 0))
1338 + return -ENOENT;
1339 +
1340 + if ((ctrl->sector_size_shift > 9) != (sector_1k == 1)) {
1341 + dev_info(dev, "sector size adjusted to 1k\n");
1342 + sector_1k = 1;
1343 + }
1344 +
1345 + if (ecc_level != ctrl->ecc_level) {
1346 + dev_info(dev, "ECC level adjusted from %u to %u\n",
1347 + ecc_level, ctrl->ecc_level);
1348 + ecc_level = ctrl->ecc_level;
1349 + }
1350 +
1351 + /* handle the hardware chip config registers */
1352 + for (chip_num = 0; chip_num < nand->numchips; chip_num++) {
1353 + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_SECTOR_1K(chip_num),
1354 + sector_1k);
1355 + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_ECC_LEVEL(chip_num),
1356 + ecc_level);
1357 +
1358 + /* Large pages: no partial page programming */
1359 + if (mtd->writesize > 512) {
1360 + bcmnand_reg_write(ctrl,
1361 + NANDC_ACC_CTRL_PGM_RDIN(chip_num), 0);
1362 + bcmnand_reg_write(ctrl,
1363 + NANDC_ACC_CTRL_PGM_PARTIAL(chip_num), 0);
1364 + }
1365 +
1366 + /* Do not raise ECC error when reading erased pages */
1367 + /* This bit has only partial effect, driver needs to help */
1368 + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_ERA_ECC_ERR(chip_num),
1369 + 0);
1370 +
1371 + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_PG_HIT(chip_num), 0);
1372 + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_PREFETCH(chip_num), 0);
1373 + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_CACHE_MODE(chip_num), 0);
1374 + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_CACHE_LASTPG(chip_num),
1375 + 0);
1376 +
1377 + /* TBD: consolidate or at least verify the s/w and h/w geometries agree */
1378 + }
1379 +
1380 + /* Allow writing on device */
1381 + if (!(nand->options & NAND_ROM))
1382 + bcmnand_reg_write(ctrl, NANDC_CS_NAND_WP, 0);
1383 +
1384 + dev_dbg(dev, "layout.oobavail=%d\n", nand->ecc.layout->oobavail);
1385 +
1386 + ret = nand_scan_tail(mtd);
1387 +
1388 + if (nand->badblockbits == 0)
1389 + nand->badblockbits = 8;
1390 + if (WARN_ON((1 << nand->page_shift) != mtd->writesize))
1391 + return -EIO;
1392 +
1393 + /* Spit out some key chip parameters as detected by nand_base */
1394 + dev_dbg(dev, "erasesize=%d writesize=%d oobsize=%d page_shift=%d badblockpos=%d badblockbits=%d\n",
1395 + mtd->erasesize, mtd->writesize, mtd->oobsize,
1396 + nand->page_shift, nand->badblockpos, nand->badblockbits);
1397 +
1398 + return ret;
1399 +}
1400 +
1401 +/*
1402 + * main intiailization function
1403 + */
1404 +static int bcmnand_ctrl_init(struct bcmnand_ctrl *ctrl)
1405 +{
1406 + unsigned int chip;
1407 + struct nand_chip *nand;
1408 + struct mtd_info *mtd;
1409 + struct device *dev = &ctrl->core->dev;
1410 + int ret;
1411 +
1412 + /* Software variables init */
1413 + nand = &ctrl->nand;
1414 + mtd = &ctrl->mtd;
1415 +
1416 + init_completion(&ctrl->op_completion);
1417 +
1418 + mtd->priv = nand;
1419 + mtd->owner = THIS_MODULE;
1420 + mtd->name = KBUILD_MODNAME;
1421 +
1422 + nand->priv = ctrl;
1423 +
1424 + nand->chip_delay = 5; /* not used */
1425 + nand->IO_ADDR_R = nand->IO_ADDR_W = (void *)~0L;
1426 +
1427 + if (bcmnand_reg_read(ctrl, NANDC_CONFIG_CHIP_WIDTH(0)))
1428 + nand->options |= NAND_BUSWIDTH_16;
1429 + nand->options |= NAND_SKIP_BBTSCAN; /* Dont need BBTs */
1430 +
1431 + nand->options |= NAND_NO_SUBPAGE_WRITE; /* Subpages unsupported */
1432 +
1433 + nand->dev_ready = bcmnand_dev_ready;
1434 + nand->read_byte = bcmnand_read_byte;
1435 + nand->read_word = bcmnand_read_word;
1436 + nand->select_chip = bcmnand_select_chip;
1437 + nand->cmdfunc = bcmnand_cmdfunc;
1438 + nand->waitfunc = bcmnand_waitfunc;
1439 +
1440 + nand->ecc.mode = NAND_ECC_HW;
1441 + nand->ecc.read_page_raw = bcmnand_read_page_raw;
1442 + nand->ecc.write_page_raw = bcmnand_write_page_raw;
1443 + nand->ecc.read_page = bcmnand_read_page_ecc;
1444 + nand->ecc.write_page = bcmnand_write_page_ecc;
1445 + nand->ecc.read_oob = bcmnand_read_oob;
1446 + nand->ecc.write_oob = bcmnand_write_oob;
1447 +
1448 + /* Set AUTO_CNFIG bit - try to auto-detect chips */
1449 + bcmnand_reg_write(ctrl, NANDC_CS_AUTO_CONFIG, 1);
1450 +
1451 + usleep_range(1000, 1500);
1452 +
1453 + /* Print out current chip config */
1454 + for (chip = 0; chip < NANDC_MAX_CHIPS; chip++) {
1455 + dev_dbg(dev, "chip[%d]: size=%#x block=%#x page=%#x ecc_level=%#x\n",
1456 + chip,
1457 + bcmnand_reg_read(ctrl, NANDC_CONFIG_CHIP_SIZE(chip)),
1458 + bcmnand_reg_read(ctrl, NANDC_CONFIG_BLK_SIZE(chip)),
1459 + bcmnand_reg_read(ctrl, NANDC_CONFIG_PAGE_SIZE(chip)),
1460 + bcmnand_reg_read(ctrl, NANDC_ACC_CTRL_ECC_LEVEL(chip)));
1461 + }
1462 +
1463 + dev_dbg(dev, "Nand controller is reads=%d\n",
1464 + bcmnand_reg_aread(ctrl, NANDC_IDM_IO_CTRL_RDY));
1465 +
1466 + ret = bcmnand_scan(mtd);
1467 + if (ret) {
1468 + dev_err(dev, "scanning the nand flash chip failed with %i\n",
1469 + ret);
1470 + return ret;
1471 + }
1472 +
1473 + return 0;
1474 +}
1475 +
1476 +static int bcmnand_idm_init(struct bcmnand_ctrl *ctrl)
1477 +{
1478 + int irq_off;
1479 + unsigned int retries = 0x1000;
1480 + struct device *dev = &ctrl->core->dev;
1481 +
1482 + if (bcmnand_reg_aread(ctrl, NANDC_IDM_RESET))
1483 + dev_info(dev, "stuck in reset\n");
1484 +
1485 + bcmnand_reg_awrite(ctrl, NANDC_IDM_RESET, 1);
1486 + if (!bcmnand_reg_aread(ctrl, NANDC_IDM_RESET)) {
1487 + dev_err(dev, "reset of failed\n");
1488 + return -EIO;
1489 + }
1490 +
1491 + while (bcmnand_reg_aread(ctrl, NANDC_IDM_RESET)) {
1492 + bcmnand_reg_awrite(ctrl, NANDC_IDM_RESET, 0);
1493 + cpu_relax();
1494 + usleep_range(100, 150);
1495 + if (!(retries--)) {
1496 + dev_err(dev, "did not came back from reset\n");
1497 + return -ETIMEDOUT;
1498 + }
1499 + }
1500 +
1501 + bcmnand_reg_awrite(ctrl, NANDC_IDM_CLOCK_EN, 1);
1502 + bcmnand_reg_awrite(ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 0);
1503 + udelay(10);
1504 +
1505 + dev_info(dev, "NAND Controller rev %d.%d\n",
1506 + bcmnand_reg_read(ctrl, NANDC_REV_MAJOR),
1507 + bcmnand_reg_read(ctrl, NANDC_REV_MINOR));
1508 +
1509 + usleep_range(250, 350);
1510 +
1511 + /* Disable all IRQs */
1512 + for (irq_off = 0; irq_off < NANDC_IRQ_NUM; irq_off++)
1513 + bcmnand_reg_awrite(ctrl, NANDC_IDM_IRQ_N_EN(irq_off), 0);
1514 +
1515 + return 0;
1516 +}
1517 +
1518 +static const char * const part_probes[] = { "bcm47xxpart", "cmdlinepart", NULL };
1519 +
1520 +/*
1521 + * Top-level init function
1522 + */
1523 +static int bcmnand_probe(struct bcma_device *core)
1524 +{
1525 + struct device *dev = &core->dev;
1526 + struct bcmnand_ctrl *ctrl;
1527 + int res, i, irq;
1528 +
1529 + ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
1530 + if (!ctrl)
1531 + return -ENOMEM;
1532 +
1533 + bcma_set_drvdata(core, ctrl);
1534 +
1535 + ctrl->mtd.dev.parent = &core->dev;
1536 + ctrl->core = core;
1537 +
1538 + /* Acquire all interrupt lines */
1539 + for (i = 0; i < NANDC_IRQ_NUM; i++) {
1540 + irq = bcma_core_irq(core, i);
1541 + if (!irq) {
1542 + dev_err(dev, "IRQ idx %i not available\n", i);
1543 + return -ENOENT;
1544 + }
1545 + res = devm_request_irq(dev, irq, bcmnand_isr, 0,
1546 + KBUILD_MODNAME, ctrl);
1547 + if (res < 0) {
1548 + dev_err(dev, "problem requesting irq: %i (idx: %i)\n",
1549 + irq, i);
1550 + return res;
1551 + }
1552 + }
1553 +
1554 + res = bcmnand_idm_init(ctrl);
1555 + if (res)
1556 + return res;
1557 +
1558 + res = bcmnand_ctrl_init(ctrl);
1559 + if (res)
1560 + return res;
1561 +
1562 + res = mtd_device_parse_register(&ctrl->mtd, part_probes, NULL, NULL, 0);
1563 + if (res) {
1564 + dev_err(dev, "Failed to register MTD device: %d\n", res);
1565 + return res;
1566 + }
1567 + return 0;
1568 +}
1569 +
1570 +static void bcmnand_remove(struct bcma_device *core)
1571 +{
1572 + struct bcmnand_ctrl *ctrl = bcma_get_drvdata(core);
1573 +
1574 + mtd_device_unregister(&ctrl->mtd);
1575 +}
1576 +
1577 +static const struct bcma_device_id bcmnand_bcma_tbl[] = {
1578 + BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_NAND, BCMA_ANY_REV, BCMA_ANY_CLASS),
1579 + BCMA_CORETABLE_END
1580 +};
1581 +MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl);
1582 +
1583 +static struct bcma_driver bcmnand_bcma_driver = {
1584 + .name = KBUILD_MODNAME,
1585 + .id_table = bcmnand_bcma_tbl,
1586 + .probe = bcmnand_probe,
1587 + .remove = bcmnand_remove,
1588 +};
1589 +
1590 +static int __init bcmnand_init(void)
1591 +{
1592 + return bcma_driver_register(&bcmnand_bcma_driver);
1593 +}
1594 +
1595 +static void __exit bcmnand_exit(void)
1596 +{
1597 + bcma_driver_unregister(&bcmnand_bcma_driver);
1598 +}
1599 +
1600 +module_init(bcmnand_init)
1601 +module_exit(bcmnand_exit)
1602 +
1603 +MODULE_LICENSE("GPL");
1604 +MODULE_AUTHOR("Hauke Mehrtens");
1605 +MODULE_DESCRIPTION("Northstar on-chip NAND Flash Controller driver");