octeon: Add MMC support for EdgeRouter ER8
[openwrt/svn-archive/archive.git] / target / linux / octeon / patches-3.18 / 150-mmc-octeon-add-host-driver-for-octeon-mmc-controller.patch
1 diff --git a/Documentation/devicetree/bindings/mmc/octeon-mmc.txt b/Documentation/devicetree/bindings/mmc/octeon-mmc.txt
2 new file mode 100644
3 index 0000000..40dd7f1
4 --- /dev/null
5 +++ b/Documentation/devicetree/bindings/mmc/octeon-mmc.txt
6 @@ -0,0 +1,69 @@
7 +* OCTEON SD/MMC Host Controller
8 +
9 +This controller is present on some members of the Cavium OCTEON SoC
10 +family, provide an interface for eMMC, MMC and SD devices. There is a
11 +single controller that may have several "slots" connected. These
12 +slots appear as children of the main controller node.
13 +The DMA engine is an integral part of the controller block.
14 +
15 +Required properties:
16 +- compatible : Should be "cavium,octeon-6130-mmc" or "cavium,octeon-7890-mmc"
17 +- reg : Two entries:
18 + 1) The base address of the MMC controller register bank.
19 + 2) The base address of the MMC DMA engine register bank.
20 +- interrupts :
21 + For "cavium,octeon-6130-mmc": two entries:
22 + 1) The MMC controller interrupt line.
23 + 2) The MMC DMA engine interrupt line.
24 + For "cavium,octeon-7890-mmc": nine entries:
25 + 1) The next block transfer of a multiblock transfer has completed (BUF_DONE)
26 + 2) Operation completed successfully (CMD_DONE).
27 + 3) DMA transfer completed successfully (DMA_DONE).
28 + 4) Operation encountered an error (CMD_ERR).
29 + 5) DMA transfer encountered an error (DMA_ERR).
30 + 6) Switch operation completed successfully (SWITCH_DONE).
31 + 7) Switch operation encountered an error (SWITCH_ERR).
32 + 8) Internal DMA engine request completion interrupt (DONE).
33 + 9) Internal DMA FIFO underflow (FIFO).
34 +- #address-cells : Must be <1>
35 +- #size-cells : Must be <0>
36 +
37 +Required properties of child nodes:
38 +- compatible : Should be "cavium,octeon-6130-mmc-slot".
39 +- reg : The slot number.
40 +
41 +Optional properties of child nodes:
42 +- cd-gpios : Specify GPIOs for card detection
43 +- wp-gpios : Specify GPIOs for write protection
44 +- power-gpios : Specify GPIOs for power control
45 +- cavium,bus-max-width : The number of data lines present in the slot.
46 + Default is 8.
47 +- spi-max-frequency : The maximum operating frequency of the slot.
48 + Default is 52000000.
49 +- cavium,cmd-clk-skew : the amount of delay (in pS) past the clock edge
50 + to sample the command pin.
51 +- cavium,dat-clk-skew : the amount of delay (in pS) past the clock edge
52 + to sample the data pin.
53 +
54 +Example:
55 + mmc@1180000002000 {
56 + compatible = "cavium,octeon-6130-mmc";
57 + reg = <0x11800 0x00002000 0x0 0x100>,
58 + <0x11800 0x00000168 0x0 0x20>;
59 + #address-cells = <1>;
60 + #size-cells = <0>;
61 + /* EMM irq, DMA irq */
62 + interrupts = <1 19>, <0 63>;
63 +
64 + /* The board only has a single MMC slot */
65 + mmc-slot@0 {
66 + compatible = "cavium,octeon-6130-mmc-slot";
67 + reg = <0>;
68 + spi-max-frequency = <20000000>;
69 + /* bus width can be 1, 4 or 8 */
70 + cavium,bus-max-width = <8>;
71 + cd-gpios = <&gpio 9 0>;
72 + wp-gpios = <&gpio 10 0>;
73 + power-gpios = <&gpio 8 0>;
74 + };
75 + };
76 diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
77 index 2d6fbdd..5077dfa 100644
78 --- a/drivers/mmc/host/Kconfig
79 +++ b/drivers/mmc/host/Kconfig
80 @@ -338,6 +338,16 @@
81
82 If unsure, say N.
83
84 +config MMC_OCTEON
85 + tristate "Cavium OCTEON Multimedia Card Interface support"
86 + depends on CAVIUM_OCTEON_SOC
87 + help
88 + This selects Cavium OCTEON Multimedia card Interface.
89 + If you have an OCTEON board with a Multimedia Card slot,
90 + say Y or M here.
91 +
92 + If unsure, say N.
93 +
94 config MMC_TIFM_SD
95 tristate "TI Flash Media MMC/SD Interface support"
96 depends on PCI
97 diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
98 index f7b0a77..448bbc9 100644
99 --- a/drivers/mmc/host/Makefile
100 +++ b/drivers/mmc/host/Makefile
101 @@ -19,6 +19,7 @@ obj-$(CONFIG_MMC_SDHCI_SIRF) += sdhci-sirf.o
102 obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o
103 obj-$(CONFIG_MMC_WBSD) += wbsd.o
104 obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
105 +obj-$(CONFIG_MMC_OCTEON) += octeon_mmc.o
106 obj-$(CONFIG_MMC_OMAP) += omap.o
107 obj-$(CONFIG_MMC_OMAP_HS) += omap_hsmmc.o
108 obj-$(CONFIG_MMC_ATMELMCI) += atmel-mci.o
109 diff --git a/drivers/mmc/host/octeon_mmc.c b/drivers/mmc/host/octeon_mmc.c
110 new file mode 100644
111 index 0000000..baba5a0
112 --- /dev/null
113 +++ b/drivers/mmc/host/octeon_mmc.c
114 @@ -0,0 +1,1518 @@
115 +/*
116 + * Driver for MMC and SSD cards for Cavium OCTEON SOCs.
117 + *
118 + * This file is subject to the terms and conditions of the GNU General Public
119 + * License. See the file "COPYING" in the main directory of this archive
120 + * for more details.
121 + *
122 + * Copyright (C) 2012-2014 Cavium Inc.
123 + */
124 +
125 +#include <linux/platform_device.h>
126 +#include <linux/of_platform.h>
127 +#include <linux/scatterlist.h>
128 +#include <linux/interrupt.h>
129 +#include <linux/of_gpio.h>
130 +#include <linux/blkdev.h>
131 +#include <linux/device.h>
132 +#include <linux/module.h>
133 +#include <linux/delay.h>
134 +#include <linux/init.h>
135 +#include <linux/clk.h>
136 +#include <linux/err.h>
137 +#include <linux/io.h>
138 +#include <linux/of.h>
139 +
140 +#include <linux/mmc/card.h>
141 +#include <linux/mmc/host.h>
142 +#include <linux/mmc/mmc.h>
143 +#include <linux/mmc/sd.h>
144 +#include <net/irda/parameters.h>
145 +
146 +#include <asm/byteorder.h>
147 +#include <asm/octeon/octeon.h>
148 +#include <asm/octeon/cvmx-mio-defs.h>
149 +
150 +#define DRV_NAME "octeon_mmc"
151 +
152 +#define OCTEON_MAX_MMC 4
153 +
154 +#define OCT_MIO_NDF_DMA_CFG 0x00
155 +#define OCT_MIO_EMM_DMA_ADR 0x08
156 +
157 +#define OCT_MIO_EMM_CFG 0x00
158 +#define OCT_MIO_EMM_SWITCH 0x48
159 +#define OCT_MIO_EMM_DMA 0x50
160 +#define OCT_MIO_EMM_CMD 0x58
161 +#define OCT_MIO_EMM_RSP_STS 0x60
162 +#define OCT_MIO_EMM_RSP_LO 0x68
163 +#define OCT_MIO_EMM_RSP_HI 0x70
164 +#define OCT_MIO_EMM_INT 0x78
165 +#define OCT_MIO_EMM_INT_EN 0x80
166 +#define OCT_MIO_EMM_WDOG 0x88
167 +#define OCT_MIO_EMM_SAMPLE 0x90
168 +#define OCT_MIO_EMM_STS_MASK 0x98
169 +#define OCT_MIO_EMM_RCA 0xa0
170 +#define OCT_MIO_EMM_BUF_IDX 0xe0
171 +#define OCT_MIO_EMM_BUF_DAT 0xe8
172 +
173 +#define CVMX_MIO_BOOT_CTL CVMX_ADD_IO_SEG(0x00011800000000D0ull)
174 +
175 +struct octeon_mmc_host {
176 + u64 base;
177 + u64 ndf_base;
178 + u64 emm_cfg;
179 + u64 n_minus_one; /* OCTEON II workaround location */
180 + int last_slot;
181 +
182 + struct semaphore mmc_serializer;
183 + struct mmc_request *current_req;
184 + unsigned int linear_buf_size;
185 + void *linear_buf;
186 + struct sg_mapping_iter smi;
187 + int sg_idx;
188 + bool dma_active;
189 +
190 + struct platform_device *pdev;
191 + int global_pwr_gpio;
192 + bool global_pwr_gpio_low;
193 + bool dma_err_pending;
194 + bool need_bootbus_lock;
195 + bool big_dma_addr;
196 + bool need_irq_handler_lock;
197 + spinlock_t irq_handler_lock;
198 +
199 + struct octeon_mmc_slot *slot[OCTEON_MAX_MMC];
200 +};
201 +
202 +struct octeon_mmc_slot {
203 + struct mmc_host *mmc; /* slot-level mmc_core object */
204 + struct octeon_mmc_host *host; /* common hw for all 4 slots */
205 +
206 + unsigned int clock;
207 + unsigned int sclock;
208 +
209 + u64 cached_switch;
210 + u64 cached_rca;
211 +
212 + unsigned int cmd_cnt; /* sample delay */
213 + unsigned int dat_cnt; /* sample delay */
214 +
215 + int bus_width;
216 + int bus_id;
217 + int ro_gpio;
218 + int cd_gpio;
219 + int pwr_gpio;
220 + bool cd_gpio_low;
221 + bool ro_gpio_low;
222 + bool pwr_gpio_low;
223 +};
224 +
225 +static int bb_size = 1 << 16;
226 +module_param(bb_size, int, S_IRUGO);
227 +MODULE_PARM_DESC(bb_size,
228 + "Size of DMA linearizing buffer (max transfer size).");
229 +
230 +static int ddr = 2;
231 +module_param(ddr, int, S_IRUGO);
232 +MODULE_PARM_DESC(ddr,
233 + "enable DoubleDataRate clocking: 0=no, 1=always, 2=at spi-max-frequency/2");
234 +
235 +#if 0
236 +#define octeon_mmc_dbg trace_printk
237 +#else
238 +static inline void octeon_mmc_dbg(const char *s, ...) { }
239 +#endif
240 +
241 +static void octeon_mmc_acquire_bus(struct octeon_mmc_host *host)
242 +{
243 + if (host->need_bootbus_lock) {
244 + down(&octeon_bootbus_sem);
245 + /* On cn70XX switch the mmc unit onto the bus. */
246 + if (OCTEON_IS_MODEL(OCTEON_CN70XX))
247 + cvmx_write_csr(CVMX_MIO_BOOT_CTL, 0);
248 + } else {
249 + down(&host->mmc_serializer);
250 + }
251 +}
252 +
253 +static void octeon_mmc_release_bus(struct octeon_mmc_host *host)
254 +{
255 + if (host->need_bootbus_lock)
256 + up(&octeon_bootbus_sem);
257 + else
258 + up(&host->mmc_serializer);
259 +}
260 +
261 +struct octeon_mmc_cr_type {
262 + u8 ctype;
263 + u8 rtype;
264 +};
265 +
266 +/*
267 + * The OCTEON MMC host hardware assumes that all commands have fixed
268 + * command and response types. These are correct if MMC devices are
269 + * being used. However, non-MMC devices like SD use command and
270 + * response types that are unexpected by the host hardware.
271 + *
272 + * The command and response types can be overridden by supplying an
273 + * XOR value that is applied to the type. We calculate the XOR value
274 + * from the values in this table and the flags passed from the MMC
275 + * core.
276 + */
277 +static struct octeon_mmc_cr_type octeon_mmc_cr_types[] = {
278 + {0, 0}, /* CMD0 */
279 + {0, 3}, /* CMD1 */
280 + {0, 2}, /* CMD2 */
281 + {0, 1}, /* CMD3 */
282 + {0, 0}, /* CMD4 */
283 + {0, 1}, /* CMD5 */
284 + {0, 1}, /* CMD6 */
285 + {0, 1}, /* CMD7 */
286 + {1, 1}, /* CMD8 */
287 + {0, 2}, /* CMD9 */
288 + {0, 2}, /* CMD10 */
289 + {1, 1}, /* CMD11 */
290 + {0, 1}, /* CMD12 */
291 + {0, 1}, /* CMD13 */
292 + {1, 1}, /* CMD14 */
293 + {0, 0}, /* CMD15 */
294 + {0, 1}, /* CMD16 */
295 + {1, 1}, /* CMD17 */
296 + {1, 1}, /* CMD18 */
297 + {3, 1}, /* CMD19 */
298 + {2, 1}, /* CMD20 */
299 + {0, 0}, /* CMD21 */
300 + {0, 0}, /* CMD22 */
301 + {0, 1}, /* CMD23 */
302 + {2, 1}, /* CMD24 */
303 + {2, 1}, /* CMD25 */
304 + {2, 1}, /* CMD26 */
305 + {2, 1}, /* CMD27 */
306 + {0, 1}, /* CMD28 */
307 + {0, 1}, /* CMD29 */
308 + {1, 1}, /* CMD30 */
309 + {1, 1}, /* CMD31 */
310 + {0, 0}, /* CMD32 */
311 + {0, 0}, /* CMD33 */
312 + {0, 0}, /* CMD34 */
313 + {0, 1}, /* CMD35 */
314 + {0, 1}, /* CMD36 */
315 + {0, 0}, /* CMD37 */
316 + {0, 1}, /* CMD38 */
317 + {0, 4}, /* CMD39 */
318 + {0, 5}, /* CMD40 */
319 + {0, 0}, /* CMD41 */
320 + {2, 1}, /* CMD42 */
321 + {0, 0}, /* CMD43 */
322 + {0, 0}, /* CMD44 */
323 + {0, 0}, /* CMD45 */
324 + {0, 0}, /* CMD46 */
325 + {0, 0}, /* CMD47 */
326 + {0, 0}, /* CMD48 */
327 + {0, 0}, /* CMD49 */
328 + {0, 0}, /* CMD50 */
329 + {0, 0}, /* CMD51 */
330 + {0, 0}, /* CMD52 */
331 + {0, 0}, /* CMD53 */
332 + {0, 0}, /* CMD54 */
333 + {0, 1}, /* CMD55 */
334 + {0xff, 0xff}, /* CMD56 */
335 + {0, 0}, /* CMD57 */
336 + {0, 0}, /* CMD58 */
337 + {0, 0}, /* CMD59 */
338 + {0, 0}, /* CMD60 */
339 + {0, 0}, /* CMD61 */
340 + {0, 0}, /* CMD62 */
341 + {0, 0} /* CMD63 */
342 +};
343 +
344 +struct octeon_mmc_cr_mods {
345 + u8 ctype_xor;
346 + u8 rtype_xor;
347 +};
348 +
349 +/*
350 + * The functions below are used for the EMMC-17978 workaround.
351 + *
352 + * Due to an imperfection in the design of the MMC bus hardware,
353 + * the 2nd to last cache block of a DMA read must be locked into the L2 Cache.
354 + * Otherwise, data corruption may occur.
355 + */
356 +
357 +static inline void *phys_to_ptr(u64 address)
358 +{
359 + return (void *)(address | (1ull<<63)); /* XKPHYS */
360 +}
361 +
362 +/**
363 + * Lock a single line into L2. The line is zeroed before locking
364 + * to make sure no dram accesses are made.
365 + *
366 + * @addr Physical address to lock
367 + */
368 +static void l2c_lock_line(u64 addr)
369 +{
370 + char *addr_ptr = phys_to_ptr(addr);
371 +
372 + asm volatile (
373 + "cache 31, %[line]" /* Unlock the line */
374 + :: [line] "m" (*addr_ptr));
375 +}
376 +
377 +/**
378 + * Locks a memory region in the L2 cache
379 + *
380 + * @start - start address to begin locking
381 + * @len - length in bytes to lock
382 + */
383 +static void l2c_lock_mem_region(u64 start, u64 len)
384 +{
385 + u64 end;
386 +
387 + /* Round start/end to cache line boundaries */
388 + end = ALIGN(start + len - 1, CVMX_CACHE_LINE_SIZE);
389 + start = ALIGN(start, CVMX_CACHE_LINE_SIZE);
390 +
391 + while (start <= end) {
392 + l2c_lock_line(start);
393 + start += CVMX_CACHE_LINE_SIZE;
394 + }
395 + asm volatile("sync");
396 +}
397 +
398 +/**
399 + * Unlock a single line in the L2 cache.
400 + *
401 + * @addr Physical address to unlock
402 + *
403 + * Return Zero on success
404 + */
405 +static void l2c_unlock_line(u64 addr)
406 +{
407 + char *addr_ptr = phys_to_ptr(addr);
408 + asm volatile (
409 + "cache 23, %[line]" /* Unlock the line */
410 + :: [line] "m" (*addr_ptr));
411 +}
412 +
413 +/**
414 + * Unlock a memory region in the L2 cache
415 + *
416 + * @start - start address to unlock
417 + * @len - length to unlock in bytes
418 + */
419 +static void l2c_unlock_mem_region(u64 start, u64 len)
420 +{
421 + u64 end;
422 +
423 + /* Round start/end to cache line boundaries */
424 + end = ALIGN(start + len - 1, CVMX_CACHE_LINE_SIZE);
425 + start = ALIGN(start, CVMX_CACHE_LINE_SIZE);
426 +
427 + while (start <= end) {
428 + l2c_unlock_line(start);
429 + start += CVMX_CACHE_LINE_SIZE;
430 + }
431 +}
432 +
433 +static struct octeon_mmc_cr_mods octeon_mmc_get_cr_mods(struct mmc_command *cmd)
434 +{
435 + struct octeon_mmc_cr_type *cr;
436 + u8 desired_ctype, hardware_ctype;
437 + u8 desired_rtype, hardware_rtype;
438 + struct octeon_mmc_cr_mods r;
439 +
440 + desired_ctype = desired_rtype = 0;
441 +
442 + cr = octeon_mmc_cr_types + (cmd->opcode & 0x3f);
443 + hardware_ctype = cr->ctype;
444 + hardware_rtype = cr->rtype;
445 + if (cmd->opcode == 56) { /* CMD56 GEN_CMD */
446 + hardware_ctype = (cmd->arg & 1) ? 1 : 2;
447 + }
448 +
449 + switch (mmc_cmd_type(cmd)) {
450 + case MMC_CMD_ADTC:
451 + desired_ctype = (cmd->data->flags & MMC_DATA_WRITE) ? 2 : 1;
452 + break;
453 + case MMC_CMD_AC:
454 + case MMC_CMD_BC:
455 + case MMC_CMD_BCR:
456 + desired_ctype = 0;
457 + break;
458 + }
459 +
460 + switch (mmc_resp_type(cmd)) {
461 + case MMC_RSP_NONE:
462 + desired_rtype = 0;
463 + break;
464 + case MMC_RSP_R1:/* MMC_RSP_R5, MMC_RSP_R6, MMC_RSP_R7 */
465 + case MMC_RSP_R1B:
466 + desired_rtype = 1;
467 + break;
468 + case MMC_RSP_R2:
469 + desired_rtype = 2;
470 + break;
471 + case MMC_RSP_R3: /* MMC_RSP_R4 */
472 + desired_rtype = 3;
473 + break;
474 + }
475 + r.ctype_xor = desired_ctype ^ hardware_ctype;
476 + r.rtype_xor = desired_rtype ^ hardware_rtype;
477 + return r;
478 +}
479 +
480 +static bool octeon_mmc_switch_val_changed(struct octeon_mmc_slot *slot,
481 + u64 new_val)
482 +{
483 + /* Match BUS_ID, HS_TIMING, BUS_WIDTH, POWER_CLASS, CLK_HI, CLK_LO */
484 + u64 m = 0x3001070fffffffffull;
485 +
486 + return (slot->cached_switch & m) != (new_val & m);
487 +}
488 +
489 +static unsigned int octeon_mmc_timeout_to_wdog(struct octeon_mmc_slot *slot,
490 + unsigned int ns)
491 +{
492 + u64 bt = (u64)slot->clock * (u64)ns;
493 +
494 + return (unsigned int)(bt / 1000000000);
495 +}
496 +
497 +static irqreturn_t octeon_mmc_interrupt(int irq, void *dev_id)
498 +{
499 + struct octeon_mmc_host *host = dev_id;
500 + union cvmx_mio_emm_int emm_int;
501 + struct mmc_request *req;
502 + bool host_done;
503 + union cvmx_mio_emm_rsp_sts rsp_sts;
504 + unsigned long flags = 0;
505 +
506 + if (host->need_irq_handler_lock)
507 + spin_lock_irqsave(&host->irq_handler_lock, flags);
508 + emm_int.u64 = cvmx_read_csr(host->base + OCT_MIO_EMM_INT);
509 + req = host->current_req;
510 + cvmx_write_csr(host->base + OCT_MIO_EMM_INT, emm_int.u64);
511 +
512 + octeon_mmc_dbg("Got interrupt: EMM_INT = 0x%llx\n", emm_int.u64);
513 +
514 + if (!req)
515 + goto out;
516 +
517 + rsp_sts.u64 = cvmx_read_csr(host->base + OCT_MIO_EMM_RSP_STS);
518 + octeon_mmc_dbg("octeon_mmc_interrupt MIO_EMM_RSP_STS 0x%llx\n",
519 + rsp_sts.u64);
520 +
521 + if (host->dma_err_pending) {
522 + host->current_req = NULL;
523 + host->dma_err_pending = false;
524 + req->done(req);
525 + host_done = true;
526 + goto no_req_done;
527 + }
528 +
529 + if (!host->dma_active && emm_int.s.buf_done && req->data) {
530 + unsigned int type = (rsp_sts.u64 >> 7) & 3;
531 +
532 + if (type == 1) {
533 + /* Read */
534 + int dbuf = rsp_sts.s.dbuf;
535 + struct sg_mapping_iter *smi = &host->smi;
536 + unsigned int data_len =
537 + req->data->blksz * req->data->blocks;
538 + unsigned int bytes_xfered;
539 + u64 dat = 0;
540 + int shift = -1;
541 +
542 + /* Auto inc from offset zero */
543 + cvmx_write_csr(host->base + OCT_MIO_EMM_BUF_IDX,
544 + (u64)(0x10000 | (dbuf << 6)));
545 +
546 + for (bytes_xfered = 0; bytes_xfered < data_len;) {
547 + if (smi->consumed >= smi->length) {
548 + if (!sg_miter_next(smi))
549 + break;
550 + smi->consumed = 0;
551 + }
552 + if (shift < 0) {
553 + dat = cvmx_read_csr(host->base +
554 + OCT_MIO_EMM_BUF_DAT);
555 + shift = 56;
556 + }
557 +
558 + while (smi->consumed < smi->length &&
559 + shift >= 0) {
560 + ((u8 *)(smi->addr))[smi->consumed] =
561 + (dat >> shift) & 0xff;
562 + bytes_xfered++;
563 + smi->consumed++;
564 + shift -= 8;
565 + }
566 + }
567 + sg_miter_stop(smi);
568 + req->data->bytes_xfered = bytes_xfered;
569 + req->data->error = 0;
570 + } else if (type == 2) {
571 + /* write */
572 + req->data->bytes_xfered = req->data->blksz *
573 + req->data->blocks;
574 + req->data->error = 0;
575 + }
576 + }
577 + host_done = emm_int.s.cmd_done || emm_int.s.dma_done ||
578 + emm_int.s.cmd_err || emm_int.s.dma_err;
579 + if (host_done && req->done) {
580 + if (rsp_sts.s.rsp_bad_sts ||
581 + rsp_sts.s.rsp_crc_err ||
582 + rsp_sts.s.rsp_timeout ||
583 + rsp_sts.s.blk_crc_err ||
584 + rsp_sts.s.blk_timeout ||
585 + rsp_sts.s.dbuf_err) {
586 + req->cmd->error = -EILSEQ;
587 + } else {
588 + req->cmd->error = 0;
589 + }
590 +
591 + if (host->dma_active && req->data) {
592 + req->data->error = 0;
593 + req->data->bytes_xfered = req->data->blocks *
594 + req->data->blksz;
595 + if (!(req->data->flags & MMC_DATA_WRITE) &&
596 + req->data->sg_len > 1) {
597 + size_t r = sg_copy_from_buffer(req->data->sg,
598 + req->data->sg_len, host->linear_buf,
599 + req->data->bytes_xfered);
600 + WARN_ON(r != req->data->bytes_xfered);
601 + }
602 + }
603 + if (rsp_sts.s.rsp_val) {
604 + u64 rsp_hi;
605 + u64 rsp_lo = cvmx_read_csr(
606 + host->base + OCT_MIO_EMM_RSP_LO);
607 +
608 + switch (rsp_sts.s.rsp_type) {
609 + case 1:
610 + case 3:
611 + req->cmd->resp[0] = (rsp_lo >> 8) & 0xffffffff;
612 + req->cmd->resp[1] = 0;
613 + req->cmd->resp[2] = 0;
614 + req->cmd->resp[3] = 0;
615 + break;
616 + case 2:
617 + req->cmd->resp[3] = rsp_lo & 0xffffffff;
618 + req->cmd->resp[2] = (rsp_lo >> 32) & 0xffffffff;
619 + rsp_hi = cvmx_read_csr(host->base +
620 + OCT_MIO_EMM_RSP_HI);
621 + req->cmd->resp[1] = rsp_hi & 0xffffffff;
622 + req->cmd->resp[0] = (rsp_hi >> 32) & 0xffffffff;
623 + break;
624 + default:
625 + octeon_mmc_dbg("octeon_mmc_interrupt unhandled rsp_val %d\n",
626 + rsp_sts.s.rsp_type);
627 + break;
628 + }
629 + octeon_mmc_dbg("octeon_mmc_interrupt resp %08x %08x %08x %08x\n",
630 + req->cmd->resp[0], req->cmd->resp[1],
631 + req->cmd->resp[2], req->cmd->resp[3]);
632 + }
633 + if (emm_int.s.dma_err && rsp_sts.s.dma_pend) {
634 + /* Try to clean up failed DMA */
635 + union cvmx_mio_emm_dma emm_dma;
636 +
637 + emm_dma.u64 =
638 + cvmx_read_csr(host->base + OCT_MIO_EMM_DMA);
639 + emm_dma.s.dma_val = 1;
640 + emm_dma.s.dat_null = 1;
641 + emm_dma.s.bus_id = rsp_sts.s.bus_id;
642 + cvmx_write_csr(host->base + OCT_MIO_EMM_DMA,
643 + emm_dma.u64);
644 + host->dma_err_pending = true;
645 + host_done = false;
646 + goto no_req_done;
647 + }
648 +
649 + host->current_req = NULL;
650 + req->done(req);
651 + }
652 +no_req_done:
653 + if (host->n_minus_one) {
654 + l2c_unlock_mem_region(host->n_minus_one, 512);
655 + host->n_minus_one = 0;
656 + }
657 + if (host_done)
658 + octeon_mmc_release_bus(host);
659 +out:
660 + if (host->need_irq_handler_lock)
661 + spin_unlock_irqrestore(&host->irq_handler_lock, flags);
662 + return IRQ_RETVAL(emm_int.u64 != 0);
663 +}
664 +
665 +static void octeon_mmc_switch_to(struct octeon_mmc_slot *slot)
666 +{
667 + struct octeon_mmc_host *host = slot->host;
668 + struct octeon_mmc_slot *old_slot;
669 + union cvmx_mio_emm_switch sw;
670 + union cvmx_mio_emm_sample samp;
671 +
672 + if (slot->bus_id == host->last_slot)
673 + goto out;
674 +
675 + if (host->last_slot >= 0) {
676 + old_slot = host->slot[host->last_slot];
677 + old_slot->cached_switch =
678 + cvmx_read_csr(host->base + OCT_MIO_EMM_SWITCH);
679 + old_slot->cached_rca =
680 + cvmx_read_csr(host->base + OCT_MIO_EMM_RCA);
681 + }
682 + cvmx_write_csr(host->base + OCT_MIO_EMM_RCA, slot->cached_rca);
683 + sw.u64 = slot->cached_switch;
684 + sw.s.bus_id = 0;
685 + cvmx_write_csr(host->base + OCT_MIO_EMM_SWITCH, sw.u64);
686 + sw.s.bus_id = slot->bus_id;
687 + cvmx_write_csr(host->base + OCT_MIO_EMM_SWITCH, sw.u64);
688 +
689 + samp.u64 = 0;
690 + samp.s.cmd_cnt = slot->cmd_cnt;
691 + samp.s.dat_cnt = slot->dat_cnt;
692 + cvmx_write_csr(host->base + OCT_MIO_EMM_SAMPLE, samp.u64);
693 +out:
694 + host->last_slot = slot->bus_id;
695 +}
696 +
697 +static void octeon_mmc_dma_request(struct mmc_host *mmc,
698 + struct mmc_request *mrq)
699 +{
700 + struct octeon_mmc_slot *slot;
701 + struct octeon_mmc_host *host;
702 + struct mmc_command *cmd;
703 + struct mmc_data *data;
704 + union cvmx_mio_emm_int emm_int;
705 + union cvmx_mio_emm_dma emm_dma;
706 + union cvmx_mio_ndf_dma_cfg dma_cfg;
707 +
708 + cmd = mrq->cmd;
709 + if (mrq->data == NULL || mrq->data->sg == NULL || !mrq->data->sg_len ||
710 + mrq->stop == NULL || mrq->stop->opcode != MMC_STOP_TRANSMISSION) {
711 + dev_err(&mmc->card->dev,
712 + "Error: octeon_mmc_dma_request no data\n");
713 + cmd->error = -EINVAL;
714 + if (mrq->done)
715 + mrq->done(mrq);
716 + return;
717 + }
718 +
719 + slot = mmc_priv(mmc);
720 + host = slot->host;
721 +
722 + /* Only a single user of the bootbus at a time. */
723 + octeon_mmc_acquire_bus(host);
724 +
725 + octeon_mmc_switch_to(slot);
726 +
727 + data = mrq->data;
728 +
729 + if (data->timeout_ns) {
730 + cvmx_write_csr(host->base + OCT_MIO_EMM_WDOG,
731 + octeon_mmc_timeout_to_wdog(slot, data->timeout_ns));
732 + octeon_mmc_dbg("OCT_MIO_EMM_WDOG %llu\n",
733 + cvmx_read_csr(host->base + OCT_MIO_EMM_WDOG));
734 + }
735 +
736 + WARN_ON(host->current_req);
737 + host->current_req = mrq;
738 +
739 + host->sg_idx = 0;
740 +
741 + WARN_ON(data->blksz * data->blocks > host->linear_buf_size);
742 +
743 + if ((data->flags & MMC_DATA_WRITE) && data->sg_len > 1) {
744 + size_t r = sg_copy_to_buffer(data->sg, data->sg_len,
745 + host->linear_buf, data->blksz * data->blocks);
746 + WARN_ON(data->blksz * data->blocks != r);
747 + }
748 +
749 + dma_cfg.u64 = 0;
750 + dma_cfg.s.en = 1;
751 + dma_cfg.s.rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
752 +#ifdef __LITTLE_ENDIAN
753 + dma_cfg.s.endian = 1;
754 +#endif
755 + dma_cfg.s.size = ((data->blksz * data->blocks) / 8) - 1;
756 + if (!host->big_dma_addr) {
757 + if (data->sg_len > 1)
758 + dma_cfg.s.adr = virt_to_phys(host->linear_buf);
759 + else
760 + dma_cfg.s.adr = sg_phys(data->sg);
761 + }
762 + cvmx_write_csr(host->ndf_base + OCT_MIO_NDF_DMA_CFG, dma_cfg.u64);
763 + octeon_mmc_dbg("MIO_NDF_DMA_CFG: %016llx\n",
764 + (unsigned long long)dma_cfg.u64);
765 + if (host->big_dma_addr) {
766 + u64 addr;
767 +
768 + if (data->sg_len > 1)
769 + addr = virt_to_phys(host->linear_buf);
770 + else
771 + addr = sg_phys(data->sg);
772 + cvmx_write_csr(host->ndf_base + OCT_MIO_EMM_DMA_ADR, addr);
773 + octeon_mmc_dbg("MIO_EMM_DMA_ADR: %016llx\n",
774 + (unsigned long long)addr);
775 + }
776 +
777 + emm_dma.u64 = 0;
778 + emm_dma.s.bus_id = slot->bus_id;
779 + emm_dma.s.dma_val = 1;
780 + emm_dma.s.sector = mmc_card_blockaddr(mmc->card) ? 1 : 0;
781 + emm_dma.s.rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
782 + if (mmc_card_mmc(mmc->card) ||
783 + (mmc_card_sd(mmc->card) &&
784 + (mmc->card->scr.cmds & SD_SCR_CMD23_SUPPORT)))
785 + emm_dma.s.multi = 1;
786 + emm_dma.s.block_cnt = data->blocks;
787 + emm_dma.s.card_addr = cmd->arg;
788 +
789 + emm_int.u64 = 0;
790 + emm_int.s.dma_done = 1;
791 + emm_int.s.cmd_err = 1;
792 + emm_int.s.dma_err = 1;
793 + /* Clear the bit. */
794 + cvmx_write_csr(host->base + OCT_MIO_EMM_INT, emm_int.u64);
795 + cvmx_write_csr(host->base + OCT_MIO_EMM_INT_EN, emm_int.u64);
796 + host->dma_active = true;
797 +
798 + if ((OCTEON_IS_MODEL(OCTEON_CN6XXX) ||
799 + OCTEON_IS_MODEL(OCTEON_CNF7XXX)) &&
800 + cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK &&
801 + (data->blksz * data->blocks) > 1024) {
802 + host->n_minus_one = dma_cfg.s.adr +
803 + (data->blksz * data->blocks) - 1024;
804 + l2c_lock_mem_region(host->n_minus_one, 512);
805 + }
806 +
807 + if (mmc->card && mmc_card_sd(mmc->card))
808 + cvmx_write_csr(host->base + OCT_MIO_EMM_STS_MASK,
809 + 0x00b00000ull);
810 + else
811 + cvmx_write_csr(host->base + OCT_MIO_EMM_STS_MASK,
812 + 0xe4f90080ull);
813 + cvmx_write_csr(host->base + OCT_MIO_EMM_DMA, emm_dma.u64);
814 + octeon_mmc_dbg("MIO_EMM_DMA: %llx\n", emm_dma.u64);
815 +}
816 +
817 +static void octeon_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
818 +{
819 + struct octeon_mmc_slot *slot;
820 + struct octeon_mmc_host *host;
821 + struct mmc_command *cmd;
822 + union cvmx_mio_emm_int emm_int;
823 + union cvmx_mio_emm_cmd emm_cmd;
824 + struct octeon_mmc_cr_mods mods;
825 +
826 + cmd = mrq->cmd;
827 +
828 + if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
829 + cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) {
830 + octeon_mmc_dma_request(mmc, mrq);
831 + return;
832 + }
833 +
834 + mods = octeon_mmc_get_cr_mods(cmd);
835 +
836 + slot = mmc_priv(mmc);
837 + host = slot->host;
838 +
839 + /* Only a single user of the bootbus at a time. */
840 + octeon_mmc_acquire_bus(host);
841 +
842 + octeon_mmc_switch_to(slot);
843 +
844 + WARN_ON(host->current_req);
845 + host->current_req = mrq;
846 +
847 + emm_int.u64 = 0;
848 + emm_int.s.cmd_done = 1;
849 + emm_int.s.cmd_err = 1;
850 + if (cmd->data) {
851 + octeon_mmc_dbg("command has data\n");
852 + if (cmd->data->flags & MMC_DATA_READ) {
853 + sg_miter_start(&host->smi, mrq->data->sg,
854 + mrq->data->sg_len,
855 + SG_MITER_ATOMIC | SG_MITER_TO_SG);
856 + } else {
857 + struct sg_mapping_iter *smi = &host->smi;
858 + unsigned int data_len =
859 + mrq->data->blksz * mrq->data->blocks;
860 + unsigned int bytes_xfered;
861 + u64 dat = 0;
862 + int shift = 56;
863 + /*
864 + * Copy data to the xmit buffer before
865 + * issuing the command
866 + */
867 + sg_miter_start(smi, mrq->data->sg,
868 + mrq->data->sg_len, SG_MITER_FROM_SG);
869 + /* Auto inc from offset zero, dbuf zero */
870 + cvmx_write_csr(host->base + OCT_MIO_EMM_BUF_IDX,
871 + 0x10000ull);
872 +
873 + for (bytes_xfered = 0; bytes_xfered < data_len;) {
874 + if (smi->consumed >= smi->length) {
875 + if (!sg_miter_next(smi))
876 + break;
877 + smi->consumed = 0;
878 + }
879 +
880 + while (smi->consumed < smi->length &&
881 + shift >= 0) {
882 +
883 + dat |= (u64)(((u8 *)(smi->addr))
884 + [smi->consumed]) << shift;
885 + bytes_xfered++;
886 + smi->consumed++;
887 + shift -= 8;
888 + }
889 + if (shift < 0) {
890 + cvmx_write_csr(host->base +
891 + OCT_MIO_EMM_BUF_DAT, dat);
892 + shift = 56;
893 + dat = 0;
894 + }
895 + }
896 + sg_miter_stop(smi);
897 + }
898 + if (cmd->data->timeout_ns) {
899 + cvmx_write_csr(host->base + OCT_MIO_EMM_WDOG,
900 + octeon_mmc_timeout_to_wdog(slot,
901 + cmd->data->timeout_ns));
902 + octeon_mmc_dbg("OCT_MIO_EMM_WDOG %llu\n",
903 + cvmx_read_csr(host->base +
904 + OCT_MIO_EMM_WDOG));
905 + }
906 + } else {
907 + cvmx_write_csr(host->base + OCT_MIO_EMM_WDOG,
908 + ((u64)slot->clock * 850ull) / 1000ull);
909 + octeon_mmc_dbg("OCT_MIO_EMM_WDOG %llu\n",
910 + cvmx_read_csr(host->base + OCT_MIO_EMM_WDOG));
911 + }
912 + /* Clear the bit. */
913 + cvmx_write_csr(host->base + OCT_MIO_EMM_INT, emm_int.u64);
914 + cvmx_write_csr(host->base + OCT_MIO_EMM_INT_EN, emm_int.u64);
915 + host->dma_active = false;
916 +
917 + emm_cmd.u64 = 0;
918 + emm_cmd.s.cmd_val = 1;
919 + emm_cmd.s.ctype_xor = mods.ctype_xor;
920 + emm_cmd.s.rtype_xor = mods.rtype_xor;
921 + if (mmc_cmd_type(cmd) == MMC_CMD_ADTC)
922 + emm_cmd.s.offset = 64 -
923 + ((cmd->data->blksz * cmd->data->blocks) / 8);
924 + emm_cmd.s.bus_id = slot->bus_id;
925 + emm_cmd.s.cmd_idx = cmd->opcode;
926 + emm_cmd.s.arg = cmd->arg;
927 + cvmx_write_csr(host->base + OCT_MIO_EMM_STS_MASK, 0);
928 + cvmx_write_csr(host->base + OCT_MIO_EMM_CMD, emm_cmd.u64);
929 + octeon_mmc_dbg("MIO_EMM_CMD: %llx\n", emm_cmd.u64);
930 +}
931 +
932 +static void octeon_mmc_reset_bus(struct octeon_mmc_slot *slot, int preserve)
933 +{
934 + union cvmx_mio_emm_cfg emm_cfg;
935 + union cvmx_mio_emm_switch emm_switch;
936 + u64 wdog = 0;
937 +
938 + emm_cfg.u64 = cvmx_read_csr(slot->host->base + OCT_MIO_EMM_CFG);
939 + if (preserve) {
940 + emm_switch.u64 = cvmx_read_csr(slot->host->base +
941 + OCT_MIO_EMM_SWITCH);
942 + wdog = cvmx_read_csr(slot->host->base + OCT_MIO_EMM_WDOG);
943 + }
944 +
945 + /* Restore switch settings */
946 + if (preserve) {
947 + emm_switch.s.switch_exe = 0;
948 + emm_switch.s.switch_err0 = 0;
949 + emm_switch.s.switch_err1 = 0;
950 + emm_switch.s.switch_err2 = 0;
951 + emm_switch.s.bus_id = 0;
952 + cvmx_write_csr(slot->host->base + OCT_MIO_EMM_SWITCH,
953 + emm_switch.u64);
954 + emm_switch.s.bus_id = slot->bus_id;
955 + cvmx_write_csr(slot->host->base + OCT_MIO_EMM_SWITCH,
956 + emm_switch.u64);
957 +
958 + slot->cached_switch = emm_switch.u64;
959 +
960 + msleep(10);
961 + cvmx_write_csr(slot->host->base + OCT_MIO_EMM_WDOG, wdog);
962 + } else {
963 + slot->cached_switch = 0;
964 + }
965 +}
966 +
967 +static void octeon_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
968 +{
969 + struct octeon_mmc_slot *slot;
970 + struct octeon_mmc_host *host;
971 + int bus_width;
972 + int clock;
973 + bool ddr_clock;
974 + int hs_timing;
975 + int power_class = 10;
976 + int clk_period;
977 + int timeout = 2000;
978 + union cvmx_mio_emm_switch emm_switch;
979 + union cvmx_mio_emm_rsp_sts emm_sts;
980 +
981 + slot = mmc_priv(mmc);
982 + host = slot->host;
983 +
984 + /* Only a single user of the bootbus at a time. */
985 + octeon_mmc_acquire_bus(host);
986 +
987 + octeon_mmc_switch_to(slot);
988 +
989 + octeon_mmc_dbg("Calling set_ios: slot: clk = 0x%x, bus_width = %d\n",
990 + slot->clock, slot->bus_width);
991 + octeon_mmc_dbg("Calling set_ios: ios: clk = 0x%x, vdd = %u, bus_width = %u, power_mode = %u, timing = %u\n",
992 + ios->clock, ios->vdd, ios->bus_width, ios->power_mode,
993 + ios->timing);
994 + octeon_mmc_dbg("Calling set_ios: mmc: caps = 0x%x, bus_width = %d\n",
995 + mmc->caps, mmc->ios.bus_width);
996 +
997 + /*
998 + * Reset the chip on each power off
999 + */
1000 + if (ios->power_mode == MMC_POWER_OFF) {
1001 + octeon_mmc_reset_bus(slot, 1);
1002 + if (slot->pwr_gpio >= 0)
1003 + gpio_set_value_cansleep(slot->pwr_gpio,
1004 + slot->pwr_gpio_low);
1005 + } else {
1006 + if (slot->pwr_gpio >= 0)
1007 + gpio_set_value_cansleep(slot->pwr_gpio,
1008 + !slot->pwr_gpio_low);
1009 + }
1010 +
1011 + switch (ios->bus_width) {
1012 + case MMC_BUS_WIDTH_8:
1013 + bus_width = 2;
1014 + break;
1015 + case MMC_BUS_WIDTH_4:
1016 + bus_width = 1;
1017 + break;
1018 + case MMC_BUS_WIDTH_1:
1019 + bus_width = 0;
1020 + break;
1021 + default:
1022 + octeon_mmc_dbg("unknown bus width %d\n", ios->bus_width);
1023 + bus_width = 0;
1024 + break;
1025 + }
1026 +
1027 + hs_timing = (ios->timing == MMC_TIMING_MMC_HS);
1028 + ddr_clock = (bus_width && ios->timing >= MMC_TIMING_UHS_DDR50);
1029 +
1030 + if (ddr_clock)
1031 + bus_width |= 4;
1032 +
1033 + if (ios->clock) {
1034 + slot->clock = ios->clock;
1035 + slot->bus_width = bus_width;
1036 +
1037 + clock = slot->clock;
1038 +
1039 + if (clock > 52000000)
1040 + clock = 52000000;
1041 +
1042 + clk_period = (octeon_get_io_clock_rate() + clock - 1) /
1043 + (2 * clock);
1044 +
1045 + /* until clock-renengotiate-on-CRC is in */
1046 + if (ddr_clock && ddr > 1)
1047 + clk_period *= 2;
1048 +
1049 + emm_switch.u64 = 0;
1050 + emm_switch.s.hs_timing = hs_timing;
1051 + emm_switch.s.bus_width = bus_width;
1052 + emm_switch.s.power_class = power_class;
1053 + emm_switch.s.clk_hi = clk_period;
1054 + emm_switch.s.clk_lo = clk_period;
1055 +
1056 + if (!octeon_mmc_switch_val_changed(slot, emm_switch.u64)) {
1057 + octeon_mmc_dbg("No change from 0x%llx mio_emm_switch, returning.\n",
1058 + emm_switch.u64);
1059 + goto out;
1060 + }
1061 +
1062 + octeon_mmc_dbg("Writing 0x%llx to mio_emm_wdog\n",
1063 + ((u64)clock * 850ull) / 1000ull);
1064 + cvmx_write_csr(host->base + OCT_MIO_EMM_WDOG,
1065 + ((u64)clock * 850ull) / 1000ull);
1066 + octeon_mmc_dbg("Writing 0x%llx to mio_emm_switch\n",
1067 + emm_switch.u64);
1068 +
1069 + cvmx_write_csr(host->base + OCT_MIO_EMM_SWITCH, emm_switch.u64);
1070 + emm_switch.s.bus_id = slot->bus_id;
1071 + cvmx_write_csr(host->base + OCT_MIO_EMM_SWITCH, emm_switch.u64);
1072 + slot->cached_switch = emm_switch.u64;
1073 +
1074 + do {
1075 + emm_sts.u64 =
1076 + cvmx_read_csr(host->base + OCT_MIO_EMM_RSP_STS);
1077 + if (!emm_sts.s.switch_val)
1078 + break;
1079 + udelay(100);
1080 + } while (timeout-- > 0);
1081 +
1082 + if (timeout <= 0) {
1083 + octeon_mmc_dbg("switch command timed out, status=0x%llx\n",
1084 + emm_sts.u64);
1085 + goto out;
1086 + }
1087 + }
1088 +out:
1089 + octeon_mmc_release_bus(host);
1090 +}
1091 +
1092 +static int octeon_mmc_get_ro(struct mmc_host *mmc)
1093 +{
1094 + struct octeon_mmc_slot *slot = mmc_priv(mmc);
1095 +
1096 + if (slot->ro_gpio >= 0) {
1097 + int pin = gpio_get_value_cansleep(slot->ro_gpio);
1098 +
1099 + if (pin < 0)
1100 + return pin;
1101 + if (slot->ro_gpio_low)
1102 + pin = !pin;
1103 + return pin;
1104 + } else {
1105 + return -ENOSYS;
1106 + }
1107 +}
1108 +
1109 +static int octeon_mmc_get_cd(struct mmc_host *mmc)
1110 +{
1111 + struct octeon_mmc_slot *slot = mmc_priv(mmc);
1112 +
1113 + if (slot->cd_gpio >= 0) {
1114 + int pin = gpio_get_value_cansleep(slot->cd_gpio);
1115 +
1116 + if (pin < 0)
1117 + return pin;
1118 + if (slot->cd_gpio_low)
1119 + pin = !pin;
1120 + return pin;
1121 + } else {
1122 + return -ENOSYS;
1123 + }
1124 +}
1125 +
1126 +static const struct mmc_host_ops octeon_mmc_ops = {
1127 + .request = octeon_mmc_request,
1128 + .set_ios = octeon_mmc_set_ios,
1129 + .get_ro = octeon_mmc_get_ro,
1130 + .get_cd = octeon_mmc_get_cd,
1131 +};
1132 +
1133 +static void octeon_mmc_set_clock(struct octeon_mmc_slot *slot,
1134 + unsigned int clock)
1135 +{
1136 + struct mmc_host *mmc = slot->mmc;
1137 +
1138 + clock = min(clock, mmc->f_max);
1139 + clock = max(clock, mmc->f_min);
1140 + slot->clock = clock;
1141 +}
1142 +
1143 +static int octeon_mmc_initlowlevel(struct octeon_mmc_slot *slot,
1144 + int bus_width)
1145 +{
1146 + union cvmx_mio_emm_switch emm_switch;
1147 + struct octeon_mmc_host *host = slot->host;
1148 +
1149 + host->emm_cfg |= 1ull << slot->bus_id;
1150 + cvmx_write_csr(slot->host->base + OCT_MIO_EMM_CFG, host->emm_cfg);
1151 + octeon_mmc_set_clock(slot, 400000);
1152 +
1153 + /* Program initial clock speed and power */
1154 + emm_switch.u64 = 0;
1155 + emm_switch.s.power_class = 10;
1156 + emm_switch.s.clk_hi = (slot->sclock / slot->clock) / 2;
1157 + emm_switch.s.clk_lo = (slot->sclock / slot->clock) / 2;
1158 +
1159 + cvmx_write_csr(host->base + OCT_MIO_EMM_SWITCH, emm_switch.u64);
1160 + emm_switch.s.bus_id = slot->bus_id;
1161 + cvmx_write_csr(host->base + OCT_MIO_EMM_SWITCH, emm_switch.u64);
1162 + slot->cached_switch = emm_switch.u64;
1163 +
1164 + cvmx_write_csr(host->base + OCT_MIO_EMM_WDOG,
1165 + ((u64)slot->clock * 850ull) / 1000ull);
1166 + cvmx_write_csr(host->base + OCT_MIO_EMM_STS_MASK, 0xe4f90080ull);
1167 + cvmx_write_csr(host->base + OCT_MIO_EMM_RCA, 1);
1168 + return 0;
1169 +}
1170 +
1171 +static int __init octeon_init_slot(struct octeon_mmc_host *host, int id,
1172 + int bus_width, int max_freq,
1173 + int ro_gpio, int cd_gpio, int pwr_gpio,
1174 + bool ro_low, bool cd_low, bool power_low,
1175 + u32 cmd_skew, u32 dat_skew)
1176 +{
1177 + struct mmc_host *mmc;
1178 + struct octeon_mmc_slot *slot;
1179 + u64 clock_period;
1180 + int ret;
1181 +
1182 + /*
1183 + * Allocate MMC structue
1184 + */
1185 + mmc = mmc_alloc_host(sizeof(struct octeon_mmc_slot), &host->pdev->dev);
1186 + if (!mmc) {
1187 + dev_err(&host->pdev->dev, "alloc host failed\n");
1188 + return -ENOMEM;
1189 + }
1190 +
1191 + slot = mmc_priv(mmc);
1192 + slot->mmc = mmc;
1193 + slot->host = host;
1194 + slot->ro_gpio = ro_gpio;
1195 + slot->cd_gpio = cd_gpio;
1196 + slot->pwr_gpio = pwr_gpio;
1197 + slot->ro_gpio_low = ro_low;
1198 + slot->cd_gpio_low = cd_low;
1199 + slot->pwr_gpio_low = power_low;
1200 +
1201 + if (slot->ro_gpio >= 0) {
1202 + ret = gpio_request(slot->ro_gpio, "mmc_ro");
1203 + if (ret) {
1204 + dev_err(&host->pdev->dev,
1205 + "Could not request mmc_ro GPIO %d\n",
1206 + slot->ro_gpio);
1207 + return ret;
1208 + }
1209 + gpio_direction_input(slot->ro_gpio);
1210 + }
1211 + if (slot->cd_gpio >= 0) {
1212 + ret = gpio_request(slot->cd_gpio, "mmc_card_detect");
1213 + if (ret) {
1214 + if (slot->ro_gpio >= 0)
1215 + gpio_free(slot->ro_gpio);
1216 + dev_err(&host->pdev->dev, "Could not request mmc_card_detect GPIO %d\n",
1217 + slot->cd_gpio);
1218 + return ret;
1219 + }
1220 + gpio_direction_input(slot->cd_gpio);
1221 + }
1222 + if (slot->pwr_gpio >= 0) {
1223 + ret = gpio_request(slot->pwr_gpio, "mmc_power");
1224 + if (ret) {
1225 + dev_err(&host->pdev->dev,
1226 + "Could not request mmc_power GPIO %d\n",
1227 + slot->pwr_gpio);
1228 + if (slot->ro_gpio >= 0)
1229 + gpio_free(slot->ro_gpio);
1230 + if (slot->cd_gpio)
1231 + gpio_free(slot->cd_gpio);
1232 + return ret;
1233 + }
1234 + octeon_mmc_dbg("%s: Shutting off power to slot %d via gpio %d\n",
1235 + DRV_NAME, slot->bus_id, slot->pwr_gpio);
1236 + gpio_direction_output(slot->pwr_gpio,
1237 + slot->pwr_gpio_low);
1238 + }
1239 + /*
1240 + * Set up host parameters.
1241 + */
1242 + mmc->ops = &octeon_mmc_ops;
1243 + mmc->f_min = 400000;
1244 + mmc->f_max = max_freq;
1245 + mmc->caps = MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
1246 + MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA |
1247 + MMC_CAP_ERASE;
1248 + mmc->ocr_avail = MMC_VDD_27_28 | MMC_VDD_28_29 | MMC_VDD_29_30 |
1249 + MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33 |
1250 + MMC_VDD_33_34 | MMC_VDD_34_35 | MMC_VDD_35_36;
1251 +
1252 + /* post-sdk23 caps */
1253 + mmc->caps |=
1254 + ((mmc->f_max >= 12000000) * MMC_CAP_UHS_SDR12) |
1255 + ((mmc->f_max >= 25000000) * MMC_CAP_UHS_SDR25) |
1256 + ((mmc->f_max >= 50000000) * MMC_CAP_UHS_SDR50) |
1257 + MMC_CAP_CMD23;
1258 +
1259 + if (host->global_pwr_gpio >= 0)
1260 + mmc->caps |= MMC_CAP_POWER_OFF_CARD;
1261 +
1262 + /* "1.8v" capability is actually 1.8-or-3.3v */
1263 + if (ddr)
1264 + mmc->caps |= MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR;
1265 +
1266 + mmc->max_segs = 64;
1267 + mmc->max_seg_size = host->linear_buf_size;
1268 + mmc->max_req_size = host->linear_buf_size;
1269 + mmc->max_blk_size = 512;
1270 + mmc->max_blk_count = mmc->max_req_size / 512;
1271 +
1272 + slot->clock = mmc->f_min;
1273 + slot->sclock = octeon_get_io_clock_rate();
1274 +
1275 + clock_period = 1000000000000ull / slot->sclock; /* period in pS */
1276 + slot->cmd_cnt = (cmd_skew + clock_period / 2) / clock_period;
1277 + slot->dat_cnt = (dat_skew + clock_period / 2) / clock_period;
1278 +
1279 + slot->bus_width = bus_width;
1280 + slot->bus_id = id;
1281 + slot->cached_rca = 1;
1282 +
1283 + /* Only a single user of the bootbus at a time. */
1284 + octeon_mmc_acquire_bus(host);
1285 + host->slot[id] = slot;
1286 +
1287 + octeon_mmc_switch_to(slot);
1288 + /* Initialize MMC Block. */
1289 + octeon_mmc_initlowlevel(slot, bus_width);
1290 +
1291 + octeon_mmc_release_bus(host);
1292 +
1293 + ret = mmc_add_host(mmc);
1294 + octeon_mmc_dbg("mmc_add_host returned %d\n", ret);
1295 +
1296 + return 0;
1297 +}
1298 +
1299 +static int octeon_mmc_probe(struct platform_device *pdev)
1300 +{
1301 + union cvmx_mio_emm_cfg emm_cfg;
1302 + struct octeon_mmc_host *host;
1303 + struct resource *res;
1304 + void __iomem *base;
1305 + int mmc_irq[9];
1306 + int i;
1307 + int ret = 0;
1308 + struct device_node *node = pdev->dev.of_node;
1309 + bool cn78xx_style;
1310 + u64 t;
1311 + enum of_gpio_flags f;
1312 +
1313 + host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
1314 + if (!host)
1315 + return -ENOMEM;
1316 +
1317 + spin_lock_init(&host->irq_handler_lock);
1318 + sema_init(&host->mmc_serializer, 1);
1319 +
1320 + cn78xx_style = of_device_is_compatible(node, "cavium,octeon-7890-mmc");
1321 + if (cn78xx_style) {
1322 + host->need_bootbus_lock = false;
1323 + host->big_dma_addr = true;
1324 + host->need_irq_handler_lock = true;
1325 + /*
1326 + * First seven are the EMM_INT bits 0..6, then two for
1327 + * the EMM_DMA_INT bits
1328 + */
1329 + for (i = 0; i < 9; i++) {
1330 + mmc_irq[i] = platform_get_irq(pdev, i);
1331 + if (mmc_irq[i] < 0)
1332 + return mmc_irq[i];
1333 + }
1334 + } else {
1335 + host->need_bootbus_lock = true;
1336 + host->big_dma_addr = false;
1337 + host->need_irq_handler_lock = false;
1338 + /* First one is EMM second NDF_DMA */
1339 + for (i = 0; i < 2; i++) {
1340 + mmc_irq[i] = platform_get_irq(pdev, i);
1341 + if (mmc_irq[i] < 0)
1342 + return mmc_irq[i];
1343 + }
1344 + }
1345 + host->last_slot = -1;
1346 +
1347 + if (bb_size < 512 || bb_size >= (1 << 24))
1348 + bb_size = 1 << 16;
1349 + host->linear_buf_size = bb_size;
1350 + host->linear_buf = devm_kzalloc(&pdev->dev, host->linear_buf_size,
1351 + GFP_KERNEL);
1352 +
1353 + if (!host->linear_buf) {
1354 + dev_err(&pdev->dev, "devm_kzalloc failed\n");
1355 + return -ENOMEM;
1356 + }
1357 +
1358 + host->pdev = pdev;
1359 +
1360 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1361 + if (!res) {
1362 + dev_err(&pdev->dev, "Platform resource[0] is missing\n");
1363 + return -ENXIO;
1364 + }
1365 + base = devm_ioremap_resource(&pdev->dev, res);
1366 + if (IS_ERR(base))
1367 + return PTR_ERR(base);
1368 + host->base = (u64)base;
1369 +
1370 + res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1371 + if (!res) {
1372 + dev_err(&pdev->dev, "Platform resource[1] is missing\n");
1373 + ret = -EINVAL;
1374 + goto err;
1375 + }
1376 + base = devm_ioremap_resource(&pdev->dev, res);
1377 + if (IS_ERR(base)) {
1378 + ret = PTR_ERR(base);
1379 + goto err;
1380 + }
1381 + host->ndf_base = (u64)base;
1382 + /*
1383 + * Clear out any pending interrupts that may be left over from
1384 + * bootloader.
1385 + */
1386 + t = cvmx_read_csr(host->base + OCT_MIO_EMM_INT);
1387 + cvmx_write_csr(host->base + OCT_MIO_EMM_INT, t);
1388 + if (cn78xx_style) {
1389 + /* Only CMD_DONE, DMA_DONE, CMD_ERR, DMA_ERR */
1390 + for (i = 1; i <= 4; i++) {
1391 + ret = devm_request_irq(&pdev->dev, mmc_irq[i],
1392 + octeon_mmc_interrupt,
1393 + 0, DRV_NAME, host);
1394 + if (ret < 0) {
1395 + dev_err(&pdev->dev, "Error: devm_request_irq %d\n",
1396 + mmc_irq[i]);
1397 + goto err;
1398 + }
1399 + }
1400 + } else {
1401 + ret = devm_request_irq(&pdev->dev, mmc_irq[0],
1402 + octeon_mmc_interrupt, 0, DRV_NAME, host);
1403 + if (ret < 0) {
1404 + dev_err(&pdev->dev, "Error: devm_request_irq %d\n",
1405 + mmc_irq[0]);
1406 + goto err;
1407 + }
1408 + }
1409 +
1410 + ret = of_get_named_gpio_flags(node, "power-gpios", 0, &f);
1411 + if (ret == -EPROBE_DEFER)
1412 + goto err;
1413 +
1414 + host->global_pwr_gpio = ret;
1415 + host->global_pwr_gpio_low =
1416 + (host->global_pwr_gpio >= 0 && f == OF_GPIO_ACTIVE_LOW);
1417 +
1418 + if (host->global_pwr_gpio >= 0) {
1419 + ret = gpio_request(host->global_pwr_gpio, "mmc global power");
1420 + if (ret) {
1421 + dev_err(&pdev->dev,
1422 + "Could not request mmc global power gpio %d\n",
1423 + host->global_pwr_gpio);
1424 + goto err;
1425 + }
1426 + dev_dbg(&pdev->dev, "Global power on\n");
1427 + gpio_direction_output(host->global_pwr_gpio,
1428 + !host->global_pwr_gpio_low);
1429 + }
1430 +
1431 + platform_set_drvdata(pdev, host);
1432 +
1433 + for_each_child_of_node(pdev->dev.of_node, node) {
1434 +
1435 + int r;
1436 + u32 slot;
1437 + int ro_gpio, cd_gpio, pwr_gpio;
1438 + bool ro_low, cd_low, pwr_low;
1439 + u32 bus_width, max_freq, cmd_skew, dat_skew;
1440 +
1441 + if (!of_device_is_compatible(node,
1442 + "cavium,octeon-6130-mmc-slot")) {
1443 + pr_warn("Sub node isn't slot: %s\n",
1444 + of_node_full_name(node));
1445 + continue;
1446 + }
1447 +
1448 + if (of_property_read_u32(node, "reg", &slot) != 0) {
1449 + pr_warn("Missing or invalid reg property on %s\n",
1450 + of_node_full_name(node));
1451 + continue;
1452 + }
1453 +
1454 + r = of_property_read_u32(node, "cavium,bus-max-width",
1455 + &bus_width);
1456 + if (r) {
1457 + bus_width = 8;
1458 + pr_info("Bus width not found for slot %d, defaulting to %d\n",
1459 + slot, bus_width);
1460 + } else {
1461 + switch (bus_width) {
1462 + case 1:
1463 + case 4:
1464 + case 8:
1465 + break;
1466 + default:
1467 + pr_warn("Invalid bus width property for slot %d\n",
1468 + slot);
1469 + continue;
1470 + }
1471 + }
1472 +
1473 + r = of_property_read_u32(node, "cavium,cmd-clk-skew",
1474 + &cmd_skew);
1475 + if (r)
1476 + cmd_skew = 0;
1477 +
1478 + r = of_property_read_u32(node, "cavium,dat-clk-skew",
1479 + &dat_skew);
1480 + if (r)
1481 + dat_skew = 0;
1482 +
1483 + r = of_property_read_u32(node, "spi-max-frequency", &max_freq);
1484 + if (r) {
1485 + max_freq = 52000000;
1486 + pr_info("No spi-max-frequency for slot %d, defaulting to %d\n",
1487 + slot, max_freq);
1488 + }
1489 +
1490 + ro_gpio = of_get_named_gpio_flags(node, "wp-gpios", 0, &f);
1491 + ro_low = (ro_gpio >= 0 && f == OF_GPIO_ACTIVE_LOW);
1492 + cd_gpio = of_get_named_gpio_flags(node, "cd-gpios", 0, &f);
1493 + cd_low = (cd_gpio >= 0 && f == OF_GPIO_ACTIVE_LOW);
1494 + pwr_gpio = of_get_named_gpio_flags(node, "power-gpios", 0, &f);
1495 + pwr_low = (pwr_gpio >= 0 && f == OF_GPIO_ACTIVE_LOW);
1496 +
1497 + ret = octeon_init_slot(host, slot, bus_width, max_freq,
1498 + ro_gpio, cd_gpio, pwr_gpio,
1499 + ro_low, cd_low, pwr_low,
1500 + cmd_skew, dat_skew);
1501 + octeon_mmc_dbg("init slot %d, ret = %d\n", slot, ret);
1502 + if (ret)
1503 + goto err;
1504 + }
1505 +
1506 + return ret;
1507 +
1508 +err:
1509 + dev_err(&pdev->dev, "Probe failed: %d\n", ret);
1510 +
1511 + /* Disable MMC controller */
1512 + emm_cfg.s.bus_ena = 0;
1513 + cvmx_write_csr(host->base + OCT_MIO_EMM_CFG, emm_cfg.u64);
1514 +
1515 + if (host->global_pwr_gpio >= 0) {
1516 + dev_dbg(&pdev->dev, "Global power off\n");
1517 + gpio_set_value_cansleep(host->global_pwr_gpio,
1518 + host->global_pwr_gpio_low);
1519 + gpio_free(host->global_pwr_gpio);
1520 + }
1521 +
1522 + return ret;
1523 +}
1524 +
1525 +static int octeon_mmc_remove(struct platform_device *pdev)
1526 +{
1527 + union cvmx_mio_ndf_dma_cfg ndf_dma_cfg;
1528 + struct octeon_mmc_host *host = platform_get_drvdata(pdev);
1529 + struct octeon_mmc_slot *slot;
1530 +
1531 + platform_set_drvdata(pdev, NULL);
1532 +
1533 + if (host) {
1534 + int i;
1535 +
1536 + /* quench all users */
1537 + for (i = 0; i < OCTEON_MAX_MMC; i++) {
1538 + slot = host->slot[i];
1539 + if (slot)
1540 + mmc_remove_host(slot->mmc);
1541 + }
1542 +
1543 + /* Reset bus_id */
1544 + ndf_dma_cfg.u64 =
1545 + cvmx_read_csr(host->ndf_base + OCT_MIO_NDF_DMA_CFG);
1546 + ndf_dma_cfg.s.en = 0;
1547 + cvmx_write_csr(host->ndf_base + OCT_MIO_NDF_DMA_CFG,
1548 + ndf_dma_cfg.u64);
1549 +
1550 + for (i = 0; i < OCTEON_MAX_MMC; i++) {
1551 + struct octeon_mmc_slot *slot;
1552 +
1553 + slot = host->slot[i];
1554 + if (!slot)
1555 + continue;
1556 + /* Free the GPIOs */
1557 + if (slot->ro_gpio >= 0)
1558 + gpio_free(slot->ro_gpio);
1559 + if (slot->cd_gpio >= 0)
1560 + gpio_free(slot->cd_gpio);
1561 + if (slot->pwr_gpio >= 0) {
1562 + gpio_set_value_cansleep(slot->pwr_gpio,
1563 + slot->pwr_gpio_low);
1564 + gpio_free(slot->pwr_gpio);
1565 + }
1566 + }
1567 +
1568 + if (host->global_pwr_gpio >= 0) {
1569 + dev_dbg(&pdev->dev, "Global power off\n");
1570 + gpio_set_value_cansleep(host->global_pwr_gpio,
1571 + host->global_pwr_gpio_low);
1572 + gpio_free(host->global_pwr_gpio);
1573 + }
1574 +
1575 + for (i = 0; i < OCTEON_MAX_MMC; i++) {
1576 + slot = host->slot[i];
1577 + if (slot)
1578 + mmc_free_host(slot->mmc);
1579 + }
1580 +
1581 + }
1582 + return 0;
1583 +}
1584 +
1585 +static struct of_device_id octeon_mmc_match[] = {
1586 + {
1587 + .compatible = "cavium,octeon-6130-mmc",
1588 + },
1589 + {
1590 + .compatible = "cavium,octeon-7890-mmc",
1591 + },
1592 + {},
1593 +};
1594 +MODULE_DEVICE_TABLE(of, octeon_mmc_match);
1595 +
1596 +static struct platform_driver octeon_mmc_driver = {
1597 + .probe = octeon_mmc_probe,
1598 + .remove = octeon_mmc_remove,
1599 + .driver = {
1600 + .name = DRV_NAME,
1601 + .owner = THIS_MODULE,
1602 + .of_match_table = octeon_mmc_match,
1603 + },
1604 +};
1605 +
1606 +static int __init octeon_mmc_init(void)
1607 +{
1608 + int ret;
1609 +
1610 + octeon_mmc_dbg("calling octeon_mmc_init\n");
1611 +
1612 + ret = platform_driver_register(&octeon_mmc_driver);
1613 + octeon_mmc_dbg("driver probe returned %d\n", ret);
1614 +
1615 + if (ret)
1616 + pr_err("%s: Failed to register driver\n", DRV_NAME);
1617 +
1618 + return ret;
1619 +}
1620 +
1621 +static void __exit octeon_mmc_cleanup(void)
1622 +{
1623 + /* Unregister MMC driver */
1624 + platform_driver_unregister(&octeon_mmc_driver);
1625 +}
1626 +
1627 +module_init(octeon_mmc_init);
1628 +module_exit(octeon_mmc_cleanup);
1629 +
1630 +MODULE_AUTHOR("Cavium Inc. <support@cavium.com>");
1631 +MODULE_DESCRIPTION("low-level driver for Cavium OCTEON MMC/SSD card");
1632 +MODULE_LICENSE("GPL");