62e5ef858a2aaa82832be5947393dd4db2db6fb2
[openwrt/staging/yousong.git] / target / linux / octeon / patches-4.3 / 150-mmc-octeon-add-host-driver-for-octeon-mmc-controller.patch
1 --- /dev/null
2 +++ b/Documentation/devicetree/bindings/mmc/octeon-mmc.txt
3 @@ -0,0 +1,69 @@
4 +* OCTEON SD/MMC Host Controller
5 +
6 +This controller is present on some members of the Cavium OCTEON SoC
7 +family, provide an interface for eMMC, MMC and SD devices. There is a
8 +single controller that may have several "slots" connected. These
9 +slots appear as children of the main controller node.
10 +The DMA engine is an integral part of the controller block.
11 +
12 +Required properties:
13 +- compatible : Should be "cavium,octeon-6130-mmc" or "cavium,octeon-7890-mmc"
14 +- reg : Two entries:
15 + 1) The base address of the MMC controller register bank.
16 + 2) The base address of the MMC DMA engine register bank.
17 +- interrupts :
18 + For "cavium,octeon-6130-mmc": two entries:
19 + 1) The MMC controller interrupt line.
20 + 2) The MMC DMA engine interrupt line.
21 + For "cavium,octeon-7890-mmc": nine entries:
22 + 1) The next block transfer of a multiblock transfer has completed (BUF_DONE)
23 + 2) Operation completed successfully (CMD_DONE).
24 + 3) DMA transfer completed successfully (DMA_DONE).
25 + 4) Operation encountered an error (CMD_ERR).
26 + 5) DMA transfer encountered an error (DMA_ERR).
27 + 6) Switch operation completed successfully (SWITCH_DONE).
28 + 7) Switch operation encountered an error (SWITCH_ERR).
29 + 8) Internal DMA engine request completion interrupt (DONE).
30 + 9) Internal DMA FIFO underflow (FIFO).
31 +- #address-cells : Must be <1>
32 +- #size-cells : Must be <0>
33 +
34 +Required properties of child nodes:
35 +- compatible : Should be "cavium,octeon-6130-mmc-slot".
36 +- reg : The slot number.
37 +
38 +Optional properties of child nodes:
39 +- cd-gpios : Specify GPIOs for card detection
40 +- wp-gpios : Specify GPIOs for write protection
41 +- power-gpios : Specify GPIOs for power control
42 +- cavium,bus-max-width : The number of data lines present in the slot.
43 + Default is 8.
44 +- spi-max-frequency : The maximum operating frequency of the slot.
45 + Default is 52000000.
46 +- cavium,cmd-clk-skew : the amount of delay (in pS) past the clock edge
47 + to sample the command pin.
48 +- cavium,dat-clk-skew : the amount of delay (in pS) past the clock edge
49 + to sample the data pin.
50 +
51 +Example:
52 + mmc@1180000002000 {
53 + compatible = "cavium,octeon-6130-mmc";
54 + reg = <0x11800 0x00002000 0x0 0x100>,
55 + <0x11800 0x00000168 0x0 0x20>;
56 + #address-cells = <1>;
57 + #size-cells = <0>;
58 + /* EMM irq, DMA irq */
59 + interrupts = <1 19>, <0 63>;
60 +
61 + /* The board only has a single MMC slot */
62 + mmc-slot@0 {
63 + compatible = "cavium,octeon-6130-mmc-slot";
64 + reg = <0>;
65 + spi-max-frequency = <20000000>;
66 + /* bus width can be 1, 4 or 8 */
67 + cavium,bus-max-width = <8>;
68 + cd-gpios = <&gpio 9 0>;
69 + wp-gpios = <&gpio 10 0>;
70 + power-gpios = <&gpio 8 0>;
71 + };
72 + };
73 --- a/drivers/mmc/host/Kconfig
74 +++ b/drivers/mmc/host/Kconfig
75 @@ -436,6 +436,16 @@ config MMC_MXS
76
77 If unsure, say N.
78
79 +config MMC_OCTEON
80 + tristate "Cavium OCTEON Multimedia Card Interface support"
81 + depends on CAVIUM_OCTEON_SOC
82 + help
83 + This selects Cavium OCTEON Multimedia card Interface.
84 + If you have an OCTEON board with a Multimedia Card slot,
85 + say Y or M here.
86 +
87 + If unsure, say N.
88 +
89 config MMC_TIFM_SD
90 tristate "TI Flash Media MMC/SD Interface support"
91 depends on PCI
92 --- a/drivers/mmc/host/Makefile
93 +++ b/drivers/mmc/host/Makefile
94 @@ -20,6 +20,7 @@ obj-$(CONFIG_MMC_SDHCI_F_SDH30) += sdhci
95 obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o
96 obj-$(CONFIG_MMC_WBSD) += wbsd.o
97 obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
98 +obj-$(CONFIG_MMC_OCTEON) += octeon_mmc.o
99 obj-$(CONFIG_MMC_MTK) += mtk-sd.o
100 obj-$(CONFIG_MMC_OMAP) += omap.o
101 obj-$(CONFIG_MMC_OMAP_HS) += omap_hsmmc.o
102 --- /dev/null
103 +++ b/drivers/mmc/host/octeon_mmc.c
104 @@ -0,0 +1,1518 @@
105 +/*
106 + * Driver for MMC and SSD cards for Cavium OCTEON SOCs.
107 + *
108 + * This file is subject to the terms and conditions of the GNU General Public
109 + * License. See the file "COPYING" in the main directory of this archive
110 + * for more details.
111 + *
112 + * Copyright (C) 2012-2014 Cavium Inc.
113 + */
114 +
115 +#include <linux/platform_device.h>
116 +#include <linux/of_platform.h>
117 +#include <linux/scatterlist.h>
118 +#include <linux/interrupt.h>
119 +#include <linux/of_gpio.h>
120 +#include <linux/blkdev.h>
121 +#include <linux/device.h>
122 +#include <linux/module.h>
123 +#include <linux/delay.h>
124 +#include <linux/init.h>
125 +#include <linux/clk.h>
126 +#include <linux/err.h>
127 +#include <linux/io.h>
128 +#include <linux/of.h>
129 +
130 +#include <linux/mmc/card.h>
131 +#include <linux/mmc/host.h>
132 +#include <linux/mmc/mmc.h>
133 +#include <linux/mmc/sd.h>
134 +#include <net/irda/parameters.h>
135 +
136 +#include <asm/byteorder.h>
137 +#include <asm/octeon/octeon.h>
138 +#include <asm/octeon/cvmx-mio-defs.h>
139 +
140 +#define DRV_NAME "octeon_mmc"
141 +
142 +#define OCTEON_MAX_MMC 4
143 +
144 +#define OCT_MIO_NDF_DMA_CFG 0x00
145 +#define OCT_MIO_EMM_DMA_ADR 0x08
146 +
147 +#define OCT_MIO_EMM_CFG 0x00
148 +#define OCT_MIO_EMM_SWITCH 0x48
149 +#define OCT_MIO_EMM_DMA 0x50
150 +#define OCT_MIO_EMM_CMD 0x58
151 +#define OCT_MIO_EMM_RSP_STS 0x60
152 +#define OCT_MIO_EMM_RSP_LO 0x68
153 +#define OCT_MIO_EMM_RSP_HI 0x70
154 +#define OCT_MIO_EMM_INT 0x78
155 +#define OCT_MIO_EMM_INT_EN 0x80
156 +#define OCT_MIO_EMM_WDOG 0x88
157 +#define OCT_MIO_EMM_SAMPLE 0x90
158 +#define OCT_MIO_EMM_STS_MASK 0x98
159 +#define OCT_MIO_EMM_RCA 0xa0
160 +#define OCT_MIO_EMM_BUF_IDX 0xe0
161 +#define OCT_MIO_EMM_BUF_DAT 0xe8
162 +
163 +#define CVMX_MIO_BOOT_CTL CVMX_ADD_IO_SEG(0x00011800000000D0ull)
164 +
165 +struct octeon_mmc_host {
166 + u64 base;
167 + u64 ndf_base;
168 + u64 emm_cfg;
169 + u64 n_minus_one; /* OCTEON II workaround location */
170 + int last_slot;
171 +
172 + struct semaphore mmc_serializer;
173 + struct mmc_request *current_req;
174 + unsigned int linear_buf_size;
175 + void *linear_buf;
176 + struct sg_mapping_iter smi;
177 + int sg_idx;
178 + bool dma_active;
179 +
180 + struct platform_device *pdev;
181 + int global_pwr_gpio;
182 + bool global_pwr_gpio_low;
183 + bool dma_err_pending;
184 + bool need_bootbus_lock;
185 + bool big_dma_addr;
186 + bool need_irq_handler_lock;
187 + spinlock_t irq_handler_lock;
188 +
189 + struct octeon_mmc_slot *slot[OCTEON_MAX_MMC];
190 +};
191 +
192 +struct octeon_mmc_slot {
193 + struct mmc_host *mmc; /* slot-level mmc_core object */
194 + struct octeon_mmc_host *host; /* common hw for all 4 slots */
195 +
196 + unsigned int clock;
197 + unsigned int sclock;
198 +
199 + u64 cached_switch;
200 + u64 cached_rca;
201 +
202 + unsigned int cmd_cnt; /* sample delay */
203 + unsigned int dat_cnt; /* sample delay */
204 +
205 + int bus_width;
206 + int bus_id;
207 + int ro_gpio;
208 + int cd_gpio;
209 + int pwr_gpio;
210 + bool cd_gpio_low;
211 + bool ro_gpio_low;
212 + bool pwr_gpio_low;
213 +};
214 +
215 +static int bb_size = 1 << 16;
216 +module_param(bb_size, int, S_IRUGO);
217 +MODULE_PARM_DESC(bb_size,
218 + "Size of DMA linearizing buffer (max transfer size).");
219 +
220 +static int ddr = 2;
221 +module_param(ddr, int, S_IRUGO);
222 +MODULE_PARM_DESC(ddr,
223 + "enable DoubleDataRate clocking: 0=no, 1=always, 2=at spi-max-frequency/2");
224 +
225 +#if 0
226 +#define octeon_mmc_dbg trace_printk
227 +#else
228 +static inline void octeon_mmc_dbg(const char *s, ...) { }
229 +#endif
230 +
231 +static void octeon_mmc_acquire_bus(struct octeon_mmc_host *host)
232 +{
233 + if (host->need_bootbus_lock) {
234 + down(&octeon_bootbus_sem);
235 + /* On cn70XX switch the mmc unit onto the bus. */
236 + if (OCTEON_IS_MODEL(OCTEON_CN70XX))
237 + cvmx_write_csr(CVMX_MIO_BOOT_CTL, 0);
238 + } else {
239 + down(&host->mmc_serializer);
240 + }
241 +}
242 +
243 +static void octeon_mmc_release_bus(struct octeon_mmc_host *host)
244 +{
245 + if (host->need_bootbus_lock)
246 + up(&octeon_bootbus_sem);
247 + else
248 + up(&host->mmc_serializer);
249 +}
250 +
251 +struct octeon_mmc_cr_type {
252 + u8 ctype;
253 + u8 rtype;
254 +};
255 +
256 +/*
257 + * The OCTEON MMC host hardware assumes that all commands have fixed
258 + * command and response types. These are correct if MMC devices are
259 + * being used. However, non-MMC devices like SD use command and
260 + * response types that are unexpected by the host hardware.
261 + *
262 + * The command and response types can be overridden by supplying an
263 + * XOR value that is applied to the type. We calculate the XOR value
264 + * from the values in this table and the flags passed from the MMC
265 + * core.
266 + */
267 +static struct octeon_mmc_cr_type octeon_mmc_cr_types[] = {
268 + {0, 0}, /* CMD0 */
269 + {0, 3}, /* CMD1 */
270 + {0, 2}, /* CMD2 */
271 + {0, 1}, /* CMD3 */
272 + {0, 0}, /* CMD4 */
273 + {0, 1}, /* CMD5 */
274 + {0, 1}, /* CMD6 */
275 + {0, 1}, /* CMD7 */
276 + {1, 1}, /* CMD8 */
277 + {0, 2}, /* CMD9 */
278 + {0, 2}, /* CMD10 */
279 + {1, 1}, /* CMD11 */
280 + {0, 1}, /* CMD12 */
281 + {0, 1}, /* CMD13 */
282 + {1, 1}, /* CMD14 */
283 + {0, 0}, /* CMD15 */
284 + {0, 1}, /* CMD16 */
285 + {1, 1}, /* CMD17 */
286 + {1, 1}, /* CMD18 */
287 + {3, 1}, /* CMD19 */
288 + {2, 1}, /* CMD20 */
289 + {0, 0}, /* CMD21 */
290 + {0, 0}, /* CMD22 */
291 + {0, 1}, /* CMD23 */
292 + {2, 1}, /* CMD24 */
293 + {2, 1}, /* CMD25 */
294 + {2, 1}, /* CMD26 */
295 + {2, 1}, /* CMD27 */
296 + {0, 1}, /* CMD28 */
297 + {0, 1}, /* CMD29 */
298 + {1, 1}, /* CMD30 */
299 + {1, 1}, /* CMD31 */
300 + {0, 0}, /* CMD32 */
301 + {0, 0}, /* CMD33 */
302 + {0, 0}, /* CMD34 */
303 + {0, 1}, /* CMD35 */
304 + {0, 1}, /* CMD36 */
305 + {0, 0}, /* CMD37 */
306 + {0, 1}, /* CMD38 */
307 + {0, 4}, /* CMD39 */
308 + {0, 5}, /* CMD40 */
309 + {0, 0}, /* CMD41 */
310 + {2, 1}, /* CMD42 */
311 + {0, 0}, /* CMD43 */
312 + {0, 0}, /* CMD44 */
313 + {0, 0}, /* CMD45 */
314 + {0, 0}, /* CMD46 */
315 + {0, 0}, /* CMD47 */
316 + {0, 0}, /* CMD48 */
317 + {0, 0}, /* CMD49 */
318 + {0, 0}, /* CMD50 */
319 + {0, 0}, /* CMD51 */
320 + {0, 0}, /* CMD52 */
321 + {0, 0}, /* CMD53 */
322 + {0, 0}, /* CMD54 */
323 + {0, 1}, /* CMD55 */
324 + {0xff, 0xff}, /* CMD56 */
325 + {0, 0}, /* CMD57 */
326 + {0, 0}, /* CMD58 */
327 + {0, 0}, /* CMD59 */
328 + {0, 0}, /* CMD60 */
329 + {0, 0}, /* CMD61 */
330 + {0, 0}, /* CMD62 */
331 + {0, 0} /* CMD63 */
332 +};
333 +
334 +struct octeon_mmc_cr_mods {
335 + u8 ctype_xor;
336 + u8 rtype_xor;
337 +};
338 +
339 +/*
340 + * The functions below are used for the EMMC-17978 workaround.
341 + *
342 + * Due to an imperfection in the design of the MMC bus hardware,
343 + * the 2nd to last cache block of a DMA read must be locked into the L2 Cache.
344 + * Otherwise, data corruption may occur.
345 + */
346 +
347 +static inline void *phys_to_ptr(u64 address)
348 +{
349 + return (void *)(address | (1ull<<63)); /* XKPHYS */
350 +}
351 +
352 +/**
353 + * Lock a single line into L2. The line is zeroed before locking
354 + * to make sure no dram accesses are made.
355 + *
356 + * @addr Physical address to lock
357 + */
358 +static void l2c_lock_line(u64 addr)
359 +{
360 + char *addr_ptr = phys_to_ptr(addr);
361 +
362 + asm volatile (
363 + "cache 31, %[line]" /* Unlock the line */
364 + :: [line] "m" (*addr_ptr));
365 +}
366 +
367 +/**
368 + * Locks a memory region in the L2 cache
369 + *
370 + * @start - start address to begin locking
371 + * @len - length in bytes to lock
372 + */
373 +static void l2c_lock_mem_region(u64 start, u64 len)
374 +{
375 + u64 end;
376 +
377 + /* Round start/end to cache line boundaries */
378 + end = ALIGN(start + len - 1, CVMX_CACHE_LINE_SIZE);
379 + start = ALIGN(start, CVMX_CACHE_LINE_SIZE);
380 +
381 + while (start <= end) {
382 + l2c_lock_line(start);
383 + start += CVMX_CACHE_LINE_SIZE;
384 + }
385 + asm volatile("sync");
386 +}
387 +
388 +/**
389 + * Unlock a single line in the L2 cache.
390 + *
391 + * @addr Physical address to unlock
392 + *
393 + * Return Zero on success
394 + */
395 +static void l2c_unlock_line(u64 addr)
396 +{
397 + char *addr_ptr = phys_to_ptr(addr);
398 + asm volatile (
399 + "cache 23, %[line]" /* Unlock the line */
400 + :: [line] "m" (*addr_ptr));
401 +}
402 +
403 +/**
404 + * Unlock a memory region in the L2 cache
405 + *
406 + * @start - start address to unlock
407 + * @len - length to unlock in bytes
408 + */
409 +static void l2c_unlock_mem_region(u64 start, u64 len)
410 +{
411 + u64 end;
412 +
413 + /* Round start/end to cache line boundaries */
414 + end = ALIGN(start + len - 1, CVMX_CACHE_LINE_SIZE);
415 + start = ALIGN(start, CVMX_CACHE_LINE_SIZE);
416 +
417 + while (start <= end) {
418 + l2c_unlock_line(start);
419 + start += CVMX_CACHE_LINE_SIZE;
420 + }
421 +}
422 +
423 +static struct octeon_mmc_cr_mods octeon_mmc_get_cr_mods(struct mmc_command *cmd)
424 +{
425 + struct octeon_mmc_cr_type *cr;
426 + u8 desired_ctype, hardware_ctype;
427 + u8 desired_rtype, hardware_rtype;
428 + struct octeon_mmc_cr_mods r;
429 +
430 + desired_ctype = desired_rtype = 0;
431 +
432 + cr = octeon_mmc_cr_types + (cmd->opcode & 0x3f);
433 + hardware_ctype = cr->ctype;
434 + hardware_rtype = cr->rtype;
435 + if (cmd->opcode == 56) { /* CMD56 GEN_CMD */
436 + hardware_ctype = (cmd->arg & 1) ? 1 : 2;
437 + }
438 +
439 + switch (mmc_cmd_type(cmd)) {
440 + case MMC_CMD_ADTC:
441 + desired_ctype = (cmd->data->flags & MMC_DATA_WRITE) ? 2 : 1;
442 + break;
443 + case MMC_CMD_AC:
444 + case MMC_CMD_BC:
445 + case MMC_CMD_BCR:
446 + desired_ctype = 0;
447 + break;
448 + }
449 +
450 + switch (mmc_resp_type(cmd)) {
451 + case MMC_RSP_NONE:
452 + desired_rtype = 0;
453 + break;
454 + case MMC_RSP_R1:/* MMC_RSP_R5, MMC_RSP_R6, MMC_RSP_R7 */
455 + case MMC_RSP_R1B:
456 + desired_rtype = 1;
457 + break;
458 + case MMC_RSP_R2:
459 + desired_rtype = 2;
460 + break;
461 + case MMC_RSP_R3: /* MMC_RSP_R4 */
462 + desired_rtype = 3;
463 + break;
464 + }
465 + r.ctype_xor = desired_ctype ^ hardware_ctype;
466 + r.rtype_xor = desired_rtype ^ hardware_rtype;
467 + return r;
468 +}
469 +
470 +static bool octeon_mmc_switch_val_changed(struct octeon_mmc_slot *slot,
471 + u64 new_val)
472 +{
473 + /* Match BUS_ID, HS_TIMING, BUS_WIDTH, POWER_CLASS, CLK_HI, CLK_LO */
474 + u64 m = 0x3001070fffffffffull;
475 +
476 + return (slot->cached_switch & m) != (new_val & m);
477 +}
478 +
479 +static unsigned int octeon_mmc_timeout_to_wdog(struct octeon_mmc_slot *slot,
480 + unsigned int ns)
481 +{
482 + u64 bt = (u64)slot->clock * (u64)ns;
483 +
484 + return (unsigned int)(bt / 1000000000);
485 +}
486 +
487 +static irqreturn_t octeon_mmc_interrupt(int irq, void *dev_id)
488 +{
489 + struct octeon_mmc_host *host = dev_id;
490 + union cvmx_mio_emm_int emm_int;
491 + struct mmc_request *req;
492 + bool host_done;
493 + union cvmx_mio_emm_rsp_sts rsp_sts;
494 + unsigned long flags = 0;
495 +
496 + if (host->need_irq_handler_lock)
497 + spin_lock_irqsave(&host->irq_handler_lock, flags);
498 + emm_int.u64 = cvmx_read_csr(host->base + OCT_MIO_EMM_INT);
499 + req = host->current_req;
500 + cvmx_write_csr(host->base + OCT_MIO_EMM_INT, emm_int.u64);
501 +
502 + octeon_mmc_dbg("Got interrupt: EMM_INT = 0x%llx\n", emm_int.u64);
503 +
504 + if (!req)
505 + goto out;
506 +
507 + rsp_sts.u64 = cvmx_read_csr(host->base + OCT_MIO_EMM_RSP_STS);
508 + octeon_mmc_dbg("octeon_mmc_interrupt MIO_EMM_RSP_STS 0x%llx\n",
509 + rsp_sts.u64);
510 +
511 + if (host->dma_err_pending) {
512 + host->current_req = NULL;
513 + host->dma_err_pending = false;
514 + req->done(req);
515 + host_done = true;
516 + goto no_req_done;
517 + }
518 +
519 + if (!host->dma_active && emm_int.s.buf_done && req->data) {
520 + unsigned int type = (rsp_sts.u64 >> 7) & 3;
521 +
522 + if (type == 1) {
523 + /* Read */
524 + int dbuf = rsp_sts.s.dbuf;
525 + struct sg_mapping_iter *smi = &host->smi;
526 + unsigned int data_len =
527 + req->data->blksz * req->data->blocks;
528 + unsigned int bytes_xfered;
529 + u64 dat = 0;
530 + int shift = -1;
531 +
532 + /* Auto inc from offset zero */
533 + cvmx_write_csr(host->base + OCT_MIO_EMM_BUF_IDX,
534 + (u64)(0x10000 | (dbuf << 6)));
535 +
536 + for (bytes_xfered = 0; bytes_xfered < data_len;) {
537 + if (smi->consumed >= smi->length) {
538 + if (!sg_miter_next(smi))
539 + break;
540 + smi->consumed = 0;
541 + }
542 + if (shift < 0) {
543 + dat = cvmx_read_csr(host->base +
544 + OCT_MIO_EMM_BUF_DAT);
545 + shift = 56;
546 + }
547 +
548 + while (smi->consumed < smi->length &&
549 + shift >= 0) {
550 + ((u8 *)(smi->addr))[smi->consumed] =
551 + (dat >> shift) & 0xff;
552 + bytes_xfered++;
553 + smi->consumed++;
554 + shift -= 8;
555 + }
556 + }
557 + sg_miter_stop(smi);
558 + req->data->bytes_xfered = bytes_xfered;
559 + req->data->error = 0;
560 + } else if (type == 2) {
561 + /* write */
562 + req->data->bytes_xfered = req->data->blksz *
563 + req->data->blocks;
564 + req->data->error = 0;
565 + }
566 + }
567 + host_done = emm_int.s.cmd_done || emm_int.s.dma_done ||
568 + emm_int.s.cmd_err || emm_int.s.dma_err;
569 + if (host_done && req->done) {
570 + if (rsp_sts.s.rsp_bad_sts ||
571 + rsp_sts.s.rsp_crc_err ||
572 + rsp_sts.s.rsp_timeout ||
573 + rsp_sts.s.blk_crc_err ||
574 + rsp_sts.s.blk_timeout ||
575 + rsp_sts.s.dbuf_err) {
576 + req->cmd->error = -EILSEQ;
577 + } else {
578 + req->cmd->error = 0;
579 + }
580 +
581 + if (host->dma_active && req->data) {
582 + req->data->error = 0;
583 + req->data->bytes_xfered = req->data->blocks *
584 + req->data->blksz;
585 + if (!(req->data->flags & MMC_DATA_WRITE) &&
586 + req->data->sg_len > 1) {
587 + size_t r = sg_copy_from_buffer(req->data->sg,
588 + req->data->sg_len, host->linear_buf,
589 + req->data->bytes_xfered);
590 + WARN_ON(r != req->data->bytes_xfered);
591 + }
592 + }
593 + if (rsp_sts.s.rsp_val) {
594 + u64 rsp_hi;
595 + u64 rsp_lo = cvmx_read_csr(
596 + host->base + OCT_MIO_EMM_RSP_LO);
597 +
598 + switch (rsp_sts.s.rsp_type) {
599 + case 1:
600 + case 3:
601 + req->cmd->resp[0] = (rsp_lo >> 8) & 0xffffffff;
602 + req->cmd->resp[1] = 0;
603 + req->cmd->resp[2] = 0;
604 + req->cmd->resp[3] = 0;
605 + break;
606 + case 2:
607 + req->cmd->resp[3] = rsp_lo & 0xffffffff;
608 + req->cmd->resp[2] = (rsp_lo >> 32) & 0xffffffff;
609 + rsp_hi = cvmx_read_csr(host->base +
610 + OCT_MIO_EMM_RSP_HI);
611 + req->cmd->resp[1] = rsp_hi & 0xffffffff;
612 + req->cmd->resp[0] = (rsp_hi >> 32) & 0xffffffff;
613 + break;
614 + default:
615 + octeon_mmc_dbg("octeon_mmc_interrupt unhandled rsp_val %d\n",
616 + rsp_sts.s.rsp_type);
617 + break;
618 + }
619 + octeon_mmc_dbg("octeon_mmc_interrupt resp %08x %08x %08x %08x\n",
620 + req->cmd->resp[0], req->cmd->resp[1],
621 + req->cmd->resp[2], req->cmd->resp[3]);
622 + }
623 + if (emm_int.s.dma_err && rsp_sts.s.dma_pend) {
624 + /* Try to clean up failed DMA */
625 + union cvmx_mio_emm_dma emm_dma;
626 +
627 + emm_dma.u64 =
628 + cvmx_read_csr(host->base + OCT_MIO_EMM_DMA);
629 + emm_dma.s.dma_val = 1;
630 + emm_dma.s.dat_null = 1;
631 + emm_dma.s.bus_id = rsp_sts.s.bus_id;
632 + cvmx_write_csr(host->base + OCT_MIO_EMM_DMA,
633 + emm_dma.u64);
634 + host->dma_err_pending = true;
635 + host_done = false;
636 + goto no_req_done;
637 + }
638 +
639 + host->current_req = NULL;
640 + req->done(req);
641 + }
642 +no_req_done:
643 + if (host->n_minus_one) {
644 + l2c_unlock_mem_region(host->n_minus_one, 512);
645 + host->n_minus_one = 0;
646 + }
647 + if (host_done)
648 + octeon_mmc_release_bus(host);
649 +out:
650 + if (host->need_irq_handler_lock)
651 + spin_unlock_irqrestore(&host->irq_handler_lock, flags);
652 + return IRQ_RETVAL(emm_int.u64 != 0);
653 +}
654 +
655 +static void octeon_mmc_switch_to(struct octeon_mmc_slot *slot)
656 +{
657 + struct octeon_mmc_host *host = slot->host;
658 + struct octeon_mmc_slot *old_slot;
659 + union cvmx_mio_emm_switch sw;
660 + union cvmx_mio_emm_sample samp;
661 +
662 + if (slot->bus_id == host->last_slot)
663 + goto out;
664 +
665 + if (host->last_slot >= 0) {
666 + old_slot = host->slot[host->last_slot];
667 + old_slot->cached_switch =
668 + cvmx_read_csr(host->base + OCT_MIO_EMM_SWITCH);
669 + old_slot->cached_rca =
670 + cvmx_read_csr(host->base + OCT_MIO_EMM_RCA);
671 + }
672 + cvmx_write_csr(host->base + OCT_MIO_EMM_RCA, slot->cached_rca);
673 + sw.u64 = slot->cached_switch;
674 + sw.s.bus_id = 0;
675 + cvmx_write_csr(host->base + OCT_MIO_EMM_SWITCH, sw.u64);
676 + sw.s.bus_id = slot->bus_id;
677 + cvmx_write_csr(host->base + OCT_MIO_EMM_SWITCH, sw.u64);
678 +
679 + samp.u64 = 0;
680 + samp.s.cmd_cnt = slot->cmd_cnt;
681 + samp.s.dat_cnt = slot->dat_cnt;
682 + cvmx_write_csr(host->base + OCT_MIO_EMM_SAMPLE, samp.u64);
683 +out:
684 + host->last_slot = slot->bus_id;
685 +}
686 +
687 +static void octeon_mmc_dma_request(struct mmc_host *mmc,
688 + struct mmc_request *mrq)
689 +{
690 + struct octeon_mmc_slot *slot;
691 + struct octeon_mmc_host *host;
692 + struct mmc_command *cmd;
693 + struct mmc_data *data;
694 + union cvmx_mio_emm_int emm_int;
695 + union cvmx_mio_emm_dma emm_dma;
696 + union cvmx_mio_ndf_dma_cfg dma_cfg;
697 +
698 + cmd = mrq->cmd;
699 + if (mrq->data == NULL || mrq->data->sg == NULL || !mrq->data->sg_len ||
700 + mrq->stop == NULL || mrq->stop->opcode != MMC_STOP_TRANSMISSION) {
701 + dev_err(&mmc->card->dev,
702 + "Error: octeon_mmc_dma_request no data\n");
703 + cmd->error = -EINVAL;
704 + if (mrq->done)
705 + mrq->done(mrq);
706 + return;
707 + }
708 +
709 + slot = mmc_priv(mmc);
710 + host = slot->host;
711 +
712 + /* Only a single user of the bootbus at a time. */
713 + octeon_mmc_acquire_bus(host);
714 +
715 + octeon_mmc_switch_to(slot);
716 +
717 + data = mrq->data;
718 +
719 + if (data->timeout_ns) {
720 + cvmx_write_csr(host->base + OCT_MIO_EMM_WDOG,
721 + octeon_mmc_timeout_to_wdog(slot, data->timeout_ns));
722 + octeon_mmc_dbg("OCT_MIO_EMM_WDOG %llu\n",
723 + cvmx_read_csr(host->base + OCT_MIO_EMM_WDOG));
724 + }
725 +
726 + WARN_ON(host->current_req);
727 + host->current_req = mrq;
728 +
729 + host->sg_idx = 0;
730 +
731 + WARN_ON(data->blksz * data->blocks > host->linear_buf_size);
732 +
733 + if ((data->flags & MMC_DATA_WRITE) && data->sg_len > 1) {
734 + size_t r = sg_copy_to_buffer(data->sg, data->sg_len,
735 + host->linear_buf, data->blksz * data->blocks);
736 + WARN_ON(data->blksz * data->blocks != r);
737 + }
738 +
739 + dma_cfg.u64 = 0;
740 + dma_cfg.s.en = 1;
741 + dma_cfg.s.rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
742 +#ifdef __LITTLE_ENDIAN
743 + dma_cfg.s.endian = 1;
744 +#endif
745 + dma_cfg.s.size = ((data->blksz * data->blocks) / 8) - 1;
746 + if (!host->big_dma_addr) {
747 + if (data->sg_len > 1)
748 + dma_cfg.s.adr = virt_to_phys(host->linear_buf);
749 + else
750 + dma_cfg.s.adr = sg_phys(data->sg);
751 + }
752 + cvmx_write_csr(host->ndf_base + OCT_MIO_NDF_DMA_CFG, dma_cfg.u64);
753 + octeon_mmc_dbg("MIO_NDF_DMA_CFG: %016llx\n",
754 + (unsigned long long)dma_cfg.u64);
755 + if (host->big_dma_addr) {
756 + u64 addr;
757 +
758 + if (data->sg_len > 1)
759 + addr = virt_to_phys(host->linear_buf);
760 + else
761 + addr = sg_phys(data->sg);
762 + cvmx_write_csr(host->ndf_base + OCT_MIO_EMM_DMA_ADR, addr);
763 + octeon_mmc_dbg("MIO_EMM_DMA_ADR: %016llx\n",
764 + (unsigned long long)addr);
765 + }
766 +
767 + emm_dma.u64 = 0;
768 + emm_dma.s.bus_id = slot->bus_id;
769 + emm_dma.s.dma_val = 1;
770 + emm_dma.s.sector = mmc_card_blockaddr(mmc->card) ? 1 : 0;
771 + emm_dma.s.rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
772 + if (mmc_card_mmc(mmc->card) ||
773 + (mmc_card_sd(mmc->card) &&
774 + (mmc->card->scr.cmds & SD_SCR_CMD23_SUPPORT)))
775 + emm_dma.s.multi = 1;
776 + emm_dma.s.block_cnt = data->blocks;
777 + emm_dma.s.card_addr = cmd->arg;
778 +
779 + emm_int.u64 = 0;
780 + emm_int.s.dma_done = 1;
781 + emm_int.s.cmd_err = 1;
782 + emm_int.s.dma_err = 1;
783 + /* Clear the bit. */
784 + cvmx_write_csr(host->base + OCT_MIO_EMM_INT, emm_int.u64);
785 + cvmx_write_csr(host->base + OCT_MIO_EMM_INT_EN, emm_int.u64);
786 + host->dma_active = true;
787 +
788 + if ((OCTEON_IS_MODEL(OCTEON_CN6XXX) ||
789 + OCTEON_IS_MODEL(OCTEON_CNF7XXX)) &&
790 + cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK &&
791 + (data->blksz * data->blocks) > 1024) {
792 + host->n_minus_one = dma_cfg.s.adr +
793 + (data->blksz * data->blocks) - 1024;
794 + l2c_lock_mem_region(host->n_minus_one, 512);
795 + }
796 +
797 + if (mmc->card && mmc_card_sd(mmc->card))
798 + cvmx_write_csr(host->base + OCT_MIO_EMM_STS_MASK,
799 + 0x00b00000ull);
800 + else
801 + cvmx_write_csr(host->base + OCT_MIO_EMM_STS_MASK,
802 + 0xe4f90080ull);
803 + cvmx_write_csr(host->base + OCT_MIO_EMM_DMA, emm_dma.u64);
804 + octeon_mmc_dbg("MIO_EMM_DMA: %llx\n", emm_dma.u64);
805 +}
806 +
807 +static void octeon_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
808 +{
809 + struct octeon_mmc_slot *slot;
810 + struct octeon_mmc_host *host;
811 + struct mmc_command *cmd;
812 + union cvmx_mio_emm_int emm_int;
813 + union cvmx_mio_emm_cmd emm_cmd;
814 + struct octeon_mmc_cr_mods mods;
815 +
816 + cmd = mrq->cmd;
817 +
818 + if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
819 + cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) {
820 + octeon_mmc_dma_request(mmc, mrq);
821 + return;
822 + }
823 +
824 + mods = octeon_mmc_get_cr_mods(cmd);
825 +
826 + slot = mmc_priv(mmc);
827 + host = slot->host;
828 +
829 + /* Only a single user of the bootbus at a time. */
830 + octeon_mmc_acquire_bus(host);
831 +
832 + octeon_mmc_switch_to(slot);
833 +
834 + WARN_ON(host->current_req);
835 + host->current_req = mrq;
836 +
837 + emm_int.u64 = 0;
838 + emm_int.s.cmd_done = 1;
839 + emm_int.s.cmd_err = 1;
840 + if (cmd->data) {
841 + octeon_mmc_dbg("command has data\n");
842 + if (cmd->data->flags & MMC_DATA_READ) {
843 + sg_miter_start(&host->smi, mrq->data->sg,
844 + mrq->data->sg_len,
845 + SG_MITER_ATOMIC | SG_MITER_TO_SG);
846 + } else {
847 + struct sg_mapping_iter *smi = &host->smi;
848 + unsigned int data_len =
849 + mrq->data->blksz * mrq->data->blocks;
850 + unsigned int bytes_xfered;
851 + u64 dat = 0;
852 + int shift = 56;
853 + /*
854 + * Copy data to the xmit buffer before
855 + * issuing the command
856 + */
857 + sg_miter_start(smi, mrq->data->sg,
858 + mrq->data->sg_len, SG_MITER_FROM_SG);
859 + /* Auto inc from offset zero, dbuf zero */
860 + cvmx_write_csr(host->base + OCT_MIO_EMM_BUF_IDX,
861 + 0x10000ull);
862 +
863 + for (bytes_xfered = 0; bytes_xfered < data_len;) {
864 + if (smi->consumed >= smi->length) {
865 + if (!sg_miter_next(smi))
866 + break;
867 + smi->consumed = 0;
868 + }
869 +
870 + while (smi->consumed < smi->length &&
871 + shift >= 0) {
872 +
873 + dat |= (u64)(((u8 *)(smi->addr))
874 + [smi->consumed]) << shift;
875 + bytes_xfered++;
876 + smi->consumed++;
877 + shift -= 8;
878 + }
879 + if (shift < 0) {
880 + cvmx_write_csr(host->base +
881 + OCT_MIO_EMM_BUF_DAT, dat);
882 + shift = 56;
883 + dat = 0;
884 + }
885 + }
886 + sg_miter_stop(smi);
887 + }
888 + if (cmd->data->timeout_ns) {
889 + cvmx_write_csr(host->base + OCT_MIO_EMM_WDOG,
890 + octeon_mmc_timeout_to_wdog(slot,
891 + cmd->data->timeout_ns));
892 + octeon_mmc_dbg("OCT_MIO_EMM_WDOG %llu\n",
893 + cvmx_read_csr(host->base +
894 + OCT_MIO_EMM_WDOG));
895 + }
896 + } else {
897 + cvmx_write_csr(host->base + OCT_MIO_EMM_WDOG,
898 + ((u64)slot->clock * 850ull) / 1000ull);
899 + octeon_mmc_dbg("OCT_MIO_EMM_WDOG %llu\n",
900 + cvmx_read_csr(host->base + OCT_MIO_EMM_WDOG));
901 + }
902 + /* Clear the bit. */
903 + cvmx_write_csr(host->base + OCT_MIO_EMM_INT, emm_int.u64);
904 + cvmx_write_csr(host->base + OCT_MIO_EMM_INT_EN, emm_int.u64);
905 + host->dma_active = false;
906 +
907 + emm_cmd.u64 = 0;
908 + emm_cmd.s.cmd_val = 1;
909 + emm_cmd.s.ctype_xor = mods.ctype_xor;
910 + emm_cmd.s.rtype_xor = mods.rtype_xor;
911 + if (mmc_cmd_type(cmd) == MMC_CMD_ADTC)
912 + emm_cmd.s.offset = 64 -
913 + ((cmd->data->blksz * cmd->data->blocks) / 8);
914 + emm_cmd.s.bus_id = slot->bus_id;
915 + emm_cmd.s.cmd_idx = cmd->opcode;
916 + emm_cmd.s.arg = cmd->arg;
917 + cvmx_write_csr(host->base + OCT_MIO_EMM_STS_MASK, 0);
918 + cvmx_write_csr(host->base + OCT_MIO_EMM_CMD, emm_cmd.u64);
919 + octeon_mmc_dbg("MIO_EMM_CMD: %llx\n", emm_cmd.u64);
920 +}
921 +
922 +static void octeon_mmc_reset_bus(struct octeon_mmc_slot *slot, int preserve)
923 +{
924 + union cvmx_mio_emm_cfg emm_cfg;
925 + union cvmx_mio_emm_switch emm_switch;
926 + u64 wdog = 0;
927 +
928 + emm_cfg.u64 = cvmx_read_csr(slot->host->base + OCT_MIO_EMM_CFG);
929 + if (preserve) {
930 + emm_switch.u64 = cvmx_read_csr(slot->host->base +
931 + OCT_MIO_EMM_SWITCH);
932 + wdog = cvmx_read_csr(slot->host->base + OCT_MIO_EMM_WDOG);
933 + }
934 +
935 + /* Restore switch settings */
936 + if (preserve) {
937 + emm_switch.s.switch_exe = 0;
938 + emm_switch.s.switch_err0 = 0;
939 + emm_switch.s.switch_err1 = 0;
940 + emm_switch.s.switch_err2 = 0;
941 + emm_switch.s.bus_id = 0;
942 + cvmx_write_csr(slot->host->base + OCT_MIO_EMM_SWITCH,
943 + emm_switch.u64);
944 + emm_switch.s.bus_id = slot->bus_id;
945 + cvmx_write_csr(slot->host->base + OCT_MIO_EMM_SWITCH,
946 + emm_switch.u64);
947 +
948 + slot->cached_switch = emm_switch.u64;
949 +
950 + msleep(10);
951 + cvmx_write_csr(slot->host->base + OCT_MIO_EMM_WDOG, wdog);
952 + } else {
953 + slot->cached_switch = 0;
954 + }
955 +}
956 +
957 +static void octeon_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
958 +{
959 + struct octeon_mmc_slot *slot;
960 + struct octeon_mmc_host *host;
961 + int bus_width;
962 + int clock;
963 + bool ddr_clock;
964 + int hs_timing;
965 + int power_class = 10;
966 + int clk_period;
967 + int timeout = 2000;
968 + union cvmx_mio_emm_switch emm_switch;
969 + union cvmx_mio_emm_rsp_sts emm_sts;
970 +
971 + slot = mmc_priv(mmc);
972 + host = slot->host;
973 +
974 + /* Only a single user of the bootbus at a time. */
975 + octeon_mmc_acquire_bus(host);
976 +
977 + octeon_mmc_switch_to(slot);
978 +
979 + octeon_mmc_dbg("Calling set_ios: slot: clk = 0x%x, bus_width = %d\n",
980 + slot->clock, slot->bus_width);
981 + octeon_mmc_dbg("Calling set_ios: ios: clk = 0x%x, vdd = %u, bus_width = %u, power_mode = %u, timing = %u\n",
982 + ios->clock, ios->vdd, ios->bus_width, ios->power_mode,
983 + ios->timing);
984 + octeon_mmc_dbg("Calling set_ios: mmc: caps = 0x%x, bus_width = %d\n",
985 + mmc->caps, mmc->ios.bus_width);
986 +
987 + /*
988 + * Reset the chip on each power off
989 + */
990 + if (ios->power_mode == MMC_POWER_OFF) {
991 + octeon_mmc_reset_bus(slot, 1);
992 + if (slot->pwr_gpio >= 0)
993 + gpio_set_value_cansleep(slot->pwr_gpio,
994 + slot->pwr_gpio_low);
995 + } else {
996 + if (slot->pwr_gpio >= 0)
997 + gpio_set_value_cansleep(slot->pwr_gpio,
998 + !slot->pwr_gpio_low);
999 + }
1000 +
1001 + switch (ios->bus_width) {
1002 + case MMC_BUS_WIDTH_8:
1003 + bus_width = 2;
1004 + break;
1005 + case MMC_BUS_WIDTH_4:
1006 + bus_width = 1;
1007 + break;
1008 + case MMC_BUS_WIDTH_1:
1009 + bus_width = 0;
1010 + break;
1011 + default:
1012 + octeon_mmc_dbg("unknown bus width %d\n", ios->bus_width);
1013 + bus_width = 0;
1014 + break;
1015 + }
1016 +
1017 + hs_timing = (ios->timing == MMC_TIMING_MMC_HS);
1018 + ddr_clock = (bus_width && ios->timing >= MMC_TIMING_UHS_DDR50);
1019 +
1020 + if (ddr_clock)
1021 + bus_width |= 4;
1022 +
1023 + if (ios->clock) {
1024 + slot->clock = ios->clock;
1025 + slot->bus_width = bus_width;
1026 +
1027 + clock = slot->clock;
1028 +
1029 + if (clock > 52000000)
1030 + clock = 52000000;
1031 +
1032 + clk_period = (octeon_get_io_clock_rate() + clock - 1) /
1033 + (2 * clock);
1034 +
1035 + /* until clock-renengotiate-on-CRC is in */
1036 + if (ddr_clock && ddr > 1)
1037 + clk_period *= 2;
1038 +
1039 + emm_switch.u64 = 0;
1040 + emm_switch.s.hs_timing = hs_timing;
1041 + emm_switch.s.bus_width = bus_width;
1042 + emm_switch.s.power_class = power_class;
1043 + emm_switch.s.clk_hi = clk_period;
1044 + emm_switch.s.clk_lo = clk_period;
1045 +
1046 + if (!octeon_mmc_switch_val_changed(slot, emm_switch.u64)) {
1047 + octeon_mmc_dbg("No change from 0x%llx mio_emm_switch, returning.\n",
1048 + emm_switch.u64);
1049 + goto out;
1050 + }
1051 +
1052 + octeon_mmc_dbg("Writing 0x%llx to mio_emm_wdog\n",
1053 + ((u64)clock * 850ull) / 1000ull);
1054 + cvmx_write_csr(host->base + OCT_MIO_EMM_WDOG,
1055 + ((u64)clock * 850ull) / 1000ull);
1056 + octeon_mmc_dbg("Writing 0x%llx to mio_emm_switch\n",
1057 + emm_switch.u64);
1058 +
1059 + cvmx_write_csr(host->base + OCT_MIO_EMM_SWITCH, emm_switch.u64);
1060 + emm_switch.s.bus_id = slot->bus_id;
1061 + cvmx_write_csr(host->base + OCT_MIO_EMM_SWITCH, emm_switch.u64);
1062 + slot->cached_switch = emm_switch.u64;
1063 +
1064 + do {
1065 + emm_sts.u64 =
1066 + cvmx_read_csr(host->base + OCT_MIO_EMM_RSP_STS);
1067 + if (!emm_sts.s.switch_val)
1068 + break;
1069 + udelay(100);
1070 + } while (timeout-- > 0);
1071 +
1072 + if (timeout <= 0) {
1073 + octeon_mmc_dbg("switch command timed out, status=0x%llx\n",
1074 + emm_sts.u64);
1075 + goto out;
1076 + }
1077 + }
1078 +out:
1079 + octeon_mmc_release_bus(host);
1080 +}
1081 +
1082 +static int octeon_mmc_get_ro(struct mmc_host *mmc)
1083 +{
1084 + struct octeon_mmc_slot *slot = mmc_priv(mmc);
1085 +
1086 + if (slot->ro_gpio >= 0) {
1087 + int pin = gpio_get_value_cansleep(slot->ro_gpio);
1088 +
1089 + if (pin < 0)
1090 + return pin;
1091 + if (slot->ro_gpio_low)
1092 + pin = !pin;
1093 + return pin;
1094 + } else {
1095 + return -ENOSYS;
1096 + }
1097 +}
1098 +
1099 +static int octeon_mmc_get_cd(struct mmc_host *mmc)
1100 +{
1101 + struct octeon_mmc_slot *slot = mmc_priv(mmc);
1102 +
1103 + if (slot->cd_gpio >= 0) {
1104 + int pin = gpio_get_value_cansleep(slot->cd_gpio);
1105 +
1106 + if (pin < 0)
1107 + return pin;
1108 + if (slot->cd_gpio_low)
1109 + pin = !pin;
1110 + return pin;
1111 + } else {
1112 + return -ENOSYS;
1113 + }
1114 +}
1115 +
1116 +static const struct mmc_host_ops octeon_mmc_ops = {
1117 + .request = octeon_mmc_request,
1118 + .set_ios = octeon_mmc_set_ios,
1119 + .get_ro = octeon_mmc_get_ro,
1120 + .get_cd = octeon_mmc_get_cd,
1121 +};
1122 +
1123 +static void octeon_mmc_set_clock(struct octeon_mmc_slot *slot,
1124 + unsigned int clock)
1125 +{
1126 + struct mmc_host *mmc = slot->mmc;
1127 +
1128 + clock = min(clock, mmc->f_max);
1129 + clock = max(clock, mmc->f_min);
1130 + slot->clock = clock;
1131 +}
1132 +
1133 +static int octeon_mmc_initlowlevel(struct octeon_mmc_slot *slot,
1134 + int bus_width)
1135 +{
1136 + union cvmx_mio_emm_switch emm_switch;
1137 + struct octeon_mmc_host *host = slot->host;
1138 +
1139 + host->emm_cfg |= 1ull << slot->bus_id;
1140 + cvmx_write_csr(slot->host->base + OCT_MIO_EMM_CFG, host->emm_cfg);
1141 + octeon_mmc_set_clock(slot, 400000);
1142 +
1143 + /* Program initial clock speed and power */
1144 + emm_switch.u64 = 0;
1145 + emm_switch.s.power_class = 10;
1146 + emm_switch.s.clk_hi = (slot->sclock / slot->clock) / 2;
1147 + emm_switch.s.clk_lo = (slot->sclock / slot->clock) / 2;
1148 +
1149 + cvmx_write_csr(host->base + OCT_MIO_EMM_SWITCH, emm_switch.u64);
1150 + emm_switch.s.bus_id = slot->bus_id;
1151 + cvmx_write_csr(host->base + OCT_MIO_EMM_SWITCH, emm_switch.u64);
1152 + slot->cached_switch = emm_switch.u64;
1153 +
1154 + cvmx_write_csr(host->base + OCT_MIO_EMM_WDOG,
1155 + ((u64)slot->clock * 850ull) / 1000ull);
1156 + cvmx_write_csr(host->base + OCT_MIO_EMM_STS_MASK, 0xe4f90080ull);
1157 + cvmx_write_csr(host->base + OCT_MIO_EMM_RCA, 1);
1158 + return 0;
1159 +}
1160 +
1161 +static int __init octeon_init_slot(struct octeon_mmc_host *host, int id,
1162 + int bus_width, int max_freq,
1163 + int ro_gpio, int cd_gpio, int pwr_gpio,
1164 + bool ro_low, bool cd_low, bool power_low,
1165 + u32 cmd_skew, u32 dat_skew)
1166 +{
1167 + struct mmc_host *mmc;
1168 + struct octeon_mmc_slot *slot;
1169 + u64 clock_period;
1170 + int ret;
1171 +
1172 + /*
1173 + * Allocate MMC structue
1174 + */
1175 + mmc = mmc_alloc_host(sizeof(struct octeon_mmc_slot), &host->pdev->dev);
1176 + if (!mmc) {
1177 + dev_err(&host->pdev->dev, "alloc host failed\n");
1178 + return -ENOMEM;
1179 + }
1180 +
1181 + slot = mmc_priv(mmc);
1182 + slot->mmc = mmc;
1183 + slot->host = host;
1184 + slot->ro_gpio = ro_gpio;
1185 + slot->cd_gpio = cd_gpio;
1186 + slot->pwr_gpio = pwr_gpio;
1187 + slot->ro_gpio_low = ro_low;
1188 + slot->cd_gpio_low = cd_low;
1189 + slot->pwr_gpio_low = power_low;
1190 +
1191 + if (slot->ro_gpio >= 0) {
1192 + ret = gpio_request(slot->ro_gpio, "mmc_ro");
1193 + if (ret) {
1194 + dev_err(&host->pdev->dev,
1195 + "Could not request mmc_ro GPIO %d\n",
1196 + slot->ro_gpio);
1197 + return ret;
1198 + }
1199 + gpio_direction_input(slot->ro_gpio);
1200 + }
1201 + if (slot->cd_gpio >= 0) {
1202 + ret = gpio_request(slot->cd_gpio, "mmc_card_detect");
1203 + if (ret) {
1204 + if (slot->ro_gpio >= 0)
1205 + gpio_free(slot->ro_gpio);
1206 + dev_err(&host->pdev->dev, "Could not request mmc_card_detect GPIO %d\n",
1207 + slot->cd_gpio);
1208 + return ret;
1209 + }
1210 + gpio_direction_input(slot->cd_gpio);
1211 + }
1212 + if (slot->pwr_gpio >= 0) {
1213 + ret = gpio_request(slot->pwr_gpio, "mmc_power");
1214 + if (ret) {
1215 + dev_err(&host->pdev->dev,
1216 + "Could not request mmc_power GPIO %d\n",
1217 + slot->pwr_gpio);
1218 + if (slot->ro_gpio >= 0)
1219 + gpio_free(slot->ro_gpio);
1220 + if (slot->cd_gpio)
1221 + gpio_free(slot->cd_gpio);
1222 + return ret;
1223 + }
1224 + octeon_mmc_dbg("%s: Shutting off power to slot %d via gpio %d\n",
1225 + DRV_NAME, slot->bus_id, slot->pwr_gpio);
1226 + gpio_direction_output(slot->pwr_gpio,
1227 + slot->pwr_gpio_low);
1228 + }
1229 + /*
1230 + * Set up host parameters.
1231 + */
1232 + mmc->ops = &octeon_mmc_ops;
1233 + mmc->f_min = 400000;
1234 + mmc->f_max = max_freq;
1235 + mmc->caps = MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
1236 + MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA |
1237 + MMC_CAP_ERASE;
1238 + mmc->ocr_avail = MMC_VDD_27_28 | MMC_VDD_28_29 | MMC_VDD_29_30 |
1239 + MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33 |
1240 + MMC_VDD_33_34 | MMC_VDD_34_35 | MMC_VDD_35_36;
1241 +
1242 + /* post-sdk23 caps */
1243 + mmc->caps |=
1244 + ((mmc->f_max >= 12000000) * MMC_CAP_UHS_SDR12) |
1245 + ((mmc->f_max >= 25000000) * MMC_CAP_UHS_SDR25) |
1246 + ((mmc->f_max >= 50000000) * MMC_CAP_UHS_SDR50) |
1247 + MMC_CAP_CMD23;
1248 +
1249 + if (host->global_pwr_gpio >= 0)
1250 + mmc->caps |= MMC_CAP_POWER_OFF_CARD;
1251 +
1252 + /* "1.8v" capability is actually 1.8-or-3.3v */
1253 + if (ddr)
1254 + mmc->caps |= MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR;
1255 +
1256 + mmc->max_segs = 64;
1257 + mmc->max_seg_size = host->linear_buf_size;
1258 + mmc->max_req_size = host->linear_buf_size;
1259 + mmc->max_blk_size = 512;
1260 + mmc->max_blk_count = mmc->max_req_size / 512;
1261 +
1262 + slot->clock = mmc->f_min;
1263 + slot->sclock = octeon_get_io_clock_rate();
1264 +
1265 + clock_period = 1000000000000ull / slot->sclock; /* period in pS */
1266 + slot->cmd_cnt = (cmd_skew + clock_period / 2) / clock_period;
1267 + slot->dat_cnt = (dat_skew + clock_period / 2) / clock_period;
1268 +
1269 + slot->bus_width = bus_width;
1270 + slot->bus_id = id;
1271 + slot->cached_rca = 1;
1272 +
1273 + /* Only a single user of the bootbus at a time. */
1274 + octeon_mmc_acquire_bus(host);
1275 + host->slot[id] = slot;
1276 +
1277 + octeon_mmc_switch_to(slot);
1278 + /* Initialize MMC Block. */
1279 + octeon_mmc_initlowlevel(slot, bus_width);
1280 +
1281 + octeon_mmc_release_bus(host);
1282 +
1283 + ret = mmc_add_host(mmc);
1284 + octeon_mmc_dbg("mmc_add_host returned %d\n", ret);
1285 +
1286 + return 0;
1287 +}
1288 +
1289 +static int octeon_mmc_probe(struct platform_device *pdev)
1290 +{
1291 + union cvmx_mio_emm_cfg emm_cfg;
1292 + struct octeon_mmc_host *host;
1293 + struct resource *res;
1294 + void __iomem *base;
1295 + int mmc_irq[9];
1296 + int i;
1297 + int ret = 0;
1298 + struct device_node *node = pdev->dev.of_node;
1299 + bool cn78xx_style;
1300 + u64 t;
1301 + enum of_gpio_flags f;
1302 +
1303 + host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
1304 + if (!host)
1305 + return -ENOMEM;
1306 +
1307 + spin_lock_init(&host->irq_handler_lock);
1308 + sema_init(&host->mmc_serializer, 1);
1309 +
1310 + cn78xx_style = of_device_is_compatible(node, "cavium,octeon-7890-mmc");
1311 + if (cn78xx_style) {
1312 + host->need_bootbus_lock = false;
1313 + host->big_dma_addr = true;
1314 + host->need_irq_handler_lock = true;
1315 + /*
1316 + * First seven are the EMM_INT bits 0..6, then two for
1317 + * the EMM_DMA_INT bits
1318 + */
1319 + for (i = 0; i < 9; i++) {
1320 + mmc_irq[i] = platform_get_irq(pdev, i);
1321 + if (mmc_irq[i] < 0)
1322 + return mmc_irq[i];
1323 + }
1324 + } else {
1325 + host->need_bootbus_lock = true;
1326 + host->big_dma_addr = false;
1327 + host->need_irq_handler_lock = false;
1328 + /* First one is EMM second NDF_DMA */
1329 + for (i = 0; i < 2; i++) {
1330 + mmc_irq[i] = platform_get_irq(pdev, i);
1331 + if (mmc_irq[i] < 0)
1332 + return mmc_irq[i];
1333 + }
1334 + }
1335 + host->last_slot = -1;
1336 +
1337 + if (bb_size < 512 || bb_size >= (1 << 24))
1338 + bb_size = 1 << 16;
1339 + host->linear_buf_size = bb_size;
1340 + host->linear_buf = devm_kzalloc(&pdev->dev, host->linear_buf_size,
1341 + GFP_KERNEL);
1342 +
1343 + if (!host->linear_buf) {
1344 + dev_err(&pdev->dev, "devm_kzalloc failed\n");
1345 + return -ENOMEM;
1346 + }
1347 +
1348 + host->pdev = pdev;
1349 +
1350 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1351 + if (!res) {
1352 + dev_err(&pdev->dev, "Platform resource[0] is missing\n");
1353 + return -ENXIO;
1354 + }
1355 + base = devm_ioremap_resource(&pdev->dev, res);
1356 + if (IS_ERR(base))
1357 + return PTR_ERR(base);
1358 + host->base = (u64)base;
1359 +
1360 + res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1361 + if (!res) {
1362 + dev_err(&pdev->dev, "Platform resource[1] is missing\n");
1363 + ret = -EINVAL;
1364 + goto err;
1365 + }
1366 + base = devm_ioremap_resource(&pdev->dev, res);
1367 + if (IS_ERR(base)) {
1368 + ret = PTR_ERR(base);
1369 + goto err;
1370 + }
1371 + host->ndf_base = (u64)base;
1372 + /*
1373 + * Clear out any pending interrupts that may be left over from
1374 + * bootloader.
1375 + */
1376 + t = cvmx_read_csr(host->base + OCT_MIO_EMM_INT);
1377 + cvmx_write_csr(host->base + OCT_MIO_EMM_INT, t);
1378 + if (cn78xx_style) {
1379 + /* Only CMD_DONE, DMA_DONE, CMD_ERR, DMA_ERR */
1380 + for (i = 1; i <= 4; i++) {
1381 + ret = devm_request_irq(&pdev->dev, mmc_irq[i],
1382 + octeon_mmc_interrupt,
1383 + 0, DRV_NAME, host);
1384 + if (ret < 0) {
1385 + dev_err(&pdev->dev, "Error: devm_request_irq %d\n",
1386 + mmc_irq[i]);
1387 + goto err;
1388 + }
1389 + }
1390 + } else {
1391 + ret = devm_request_irq(&pdev->dev, mmc_irq[0],
1392 + octeon_mmc_interrupt, 0, DRV_NAME, host);
1393 + if (ret < 0) {
1394 + dev_err(&pdev->dev, "Error: devm_request_irq %d\n",
1395 + mmc_irq[0]);
1396 + goto err;
1397 + }
1398 + }
1399 +
1400 + ret = of_get_named_gpio_flags(node, "power-gpios", 0, &f);
1401 + if (ret == -EPROBE_DEFER)
1402 + goto err;
1403 +
1404 + host->global_pwr_gpio = ret;
1405 + host->global_pwr_gpio_low =
1406 + (host->global_pwr_gpio >= 0 && f == OF_GPIO_ACTIVE_LOW);
1407 +
1408 + if (host->global_pwr_gpio >= 0) {
1409 + ret = gpio_request(host->global_pwr_gpio, "mmc global power");
1410 + if (ret) {
1411 + dev_err(&pdev->dev,
1412 + "Could not request mmc global power gpio %d\n",
1413 + host->global_pwr_gpio);
1414 + goto err;
1415 + }
1416 + dev_dbg(&pdev->dev, "Global power on\n");
1417 + gpio_direction_output(host->global_pwr_gpio,
1418 + !host->global_pwr_gpio_low);
1419 + }
1420 +
1421 + platform_set_drvdata(pdev, host);
1422 +
1423 + for_each_child_of_node(pdev->dev.of_node, node) {
1424 +
1425 + int r;
1426 + u32 slot;
1427 + int ro_gpio, cd_gpio, pwr_gpio;
1428 + bool ro_low, cd_low, pwr_low;
1429 + u32 bus_width, max_freq, cmd_skew, dat_skew;
1430 +
1431 + if (!of_device_is_compatible(node,
1432 + "cavium,octeon-6130-mmc-slot")) {
1433 + pr_warn("Sub node isn't slot: %s\n",
1434 + of_node_full_name(node));
1435 + continue;
1436 + }
1437 +
1438 + if (of_property_read_u32(node, "reg", &slot) != 0) {
1439 + pr_warn("Missing or invalid reg property on %s\n",
1440 + of_node_full_name(node));
1441 + continue;
1442 + }
1443 +
1444 + r = of_property_read_u32(node, "cavium,bus-max-width",
1445 + &bus_width);
1446 + if (r) {
1447 + bus_width = 8;
1448 + pr_info("Bus width not found for slot %d, defaulting to %d\n",
1449 + slot, bus_width);
1450 + } else {
1451 + switch (bus_width) {
1452 + case 1:
1453 + case 4:
1454 + case 8:
1455 + break;
1456 + default:
1457 + pr_warn("Invalid bus width property for slot %d\n",
1458 + slot);
1459 + continue;
1460 + }
1461 + }
1462 +
1463 + r = of_property_read_u32(node, "cavium,cmd-clk-skew",
1464 + &cmd_skew);
1465 + if (r)
1466 + cmd_skew = 0;
1467 +
1468 + r = of_property_read_u32(node, "cavium,dat-clk-skew",
1469 + &dat_skew);
1470 + if (r)
1471 + dat_skew = 0;
1472 +
1473 + r = of_property_read_u32(node, "spi-max-frequency", &max_freq);
1474 + if (r) {
1475 + max_freq = 52000000;
1476 + pr_info("No spi-max-frequency for slot %d, defaulting to %d\n",
1477 + slot, max_freq);
1478 + }
1479 +
1480 + ro_gpio = of_get_named_gpio_flags(node, "wp-gpios", 0, &f);
1481 + ro_low = (ro_gpio >= 0 && f == OF_GPIO_ACTIVE_LOW);
1482 + cd_gpio = of_get_named_gpio_flags(node, "cd-gpios", 0, &f);
1483 + cd_low = (cd_gpio >= 0 && f == OF_GPIO_ACTIVE_LOW);
1484 + pwr_gpio = of_get_named_gpio_flags(node, "power-gpios", 0, &f);
1485 + pwr_low = (pwr_gpio >= 0 && f == OF_GPIO_ACTIVE_LOW);
1486 +
1487 + ret = octeon_init_slot(host, slot, bus_width, max_freq,
1488 + ro_gpio, cd_gpio, pwr_gpio,
1489 + ro_low, cd_low, pwr_low,
1490 + cmd_skew, dat_skew);
1491 + octeon_mmc_dbg("init slot %d, ret = %d\n", slot, ret);
1492 + if (ret)
1493 + goto err;
1494 + }
1495 +
1496 + return ret;
1497 +
1498 +err:
1499 + dev_err(&pdev->dev, "Probe failed: %d\n", ret);
1500 +
1501 + /* Disable MMC controller */
1502 + emm_cfg.s.bus_ena = 0;
1503 + cvmx_write_csr(host->base + OCT_MIO_EMM_CFG, emm_cfg.u64);
1504 +
1505 + if (host->global_pwr_gpio >= 0) {
1506 + dev_dbg(&pdev->dev, "Global power off\n");
1507 + gpio_set_value_cansleep(host->global_pwr_gpio,
1508 + host->global_pwr_gpio_low);
1509 + gpio_free(host->global_pwr_gpio);
1510 + }
1511 +
1512 + return ret;
1513 +}
1514 +
1515 +static int octeon_mmc_remove(struct platform_device *pdev)
1516 +{
1517 + union cvmx_mio_ndf_dma_cfg ndf_dma_cfg;
1518 + struct octeon_mmc_host *host = platform_get_drvdata(pdev);
1519 + struct octeon_mmc_slot *slot;
1520 +
1521 + platform_set_drvdata(pdev, NULL);
1522 +
1523 + if (host) {
1524 + int i;
1525 +
1526 + /* quench all users */
1527 + for (i = 0; i < OCTEON_MAX_MMC; i++) {
1528 + slot = host->slot[i];
1529 + if (slot)
1530 + mmc_remove_host(slot->mmc);
1531 + }
1532 +
1533 + /* Reset bus_id */
1534 + ndf_dma_cfg.u64 =
1535 + cvmx_read_csr(host->ndf_base + OCT_MIO_NDF_DMA_CFG);
1536 + ndf_dma_cfg.s.en = 0;
1537 + cvmx_write_csr(host->ndf_base + OCT_MIO_NDF_DMA_CFG,
1538 + ndf_dma_cfg.u64);
1539 +
1540 + for (i = 0; i < OCTEON_MAX_MMC; i++) {
1541 + struct octeon_mmc_slot *slot;
1542 +
1543 + slot = host->slot[i];
1544 + if (!slot)
1545 + continue;
1546 + /* Free the GPIOs */
1547 + if (slot->ro_gpio >= 0)
1548 + gpio_free(slot->ro_gpio);
1549 + if (slot->cd_gpio >= 0)
1550 + gpio_free(slot->cd_gpio);
1551 + if (slot->pwr_gpio >= 0) {
1552 + gpio_set_value_cansleep(slot->pwr_gpio,
1553 + slot->pwr_gpio_low);
1554 + gpio_free(slot->pwr_gpio);
1555 + }
1556 + }
1557 +
1558 + if (host->global_pwr_gpio >= 0) {
1559 + dev_dbg(&pdev->dev, "Global power off\n");
1560 + gpio_set_value_cansleep(host->global_pwr_gpio,
1561 + host->global_pwr_gpio_low);
1562 + gpio_free(host->global_pwr_gpio);
1563 + }
1564 +
1565 + for (i = 0; i < OCTEON_MAX_MMC; i++) {
1566 + slot = host->slot[i];
1567 + if (slot)
1568 + mmc_free_host(slot->mmc);
1569 + }
1570 +
1571 + }
1572 + return 0;
1573 +}
1574 +
1575 +static struct of_device_id octeon_mmc_match[] = {
1576 + {
1577 + .compatible = "cavium,octeon-6130-mmc",
1578 + },
1579 + {
1580 + .compatible = "cavium,octeon-7890-mmc",
1581 + },
1582 + {},
1583 +};
1584 +MODULE_DEVICE_TABLE(of, octeon_mmc_match);
1585 +
1586 +static struct platform_driver octeon_mmc_driver = {
1587 + .probe = octeon_mmc_probe,
1588 + .remove = octeon_mmc_remove,
1589 + .driver = {
1590 + .name = DRV_NAME,
1591 + .owner = THIS_MODULE,
1592 + .of_match_table = octeon_mmc_match,
1593 + },
1594 +};
1595 +
1596 +static int __init octeon_mmc_init(void)
1597 +{
1598 + int ret;
1599 +
1600 + octeon_mmc_dbg("calling octeon_mmc_init\n");
1601 +
1602 + ret = platform_driver_register(&octeon_mmc_driver);
1603 + octeon_mmc_dbg("driver probe returned %d\n", ret);
1604 +
1605 + if (ret)
1606 + pr_err("%s: Failed to register driver\n", DRV_NAME);
1607 +
1608 + return ret;
1609 +}
1610 +
1611 +static void __exit octeon_mmc_cleanup(void)
1612 +{
1613 + /* Unregister MMC driver */
1614 + platform_driver_unregister(&octeon_mmc_driver);
1615 +}
1616 +
1617 +module_init(octeon_mmc_init);
1618 +module_exit(octeon_mmc_cleanup);
1619 +
1620 +MODULE_AUTHOR("Cavium Inc. <support@cavium.com>");
1621 +MODULE_DESCRIPTION("low-level driver for Cavium OCTEON MMC/SSD card");
1622 +MODULE_LICENSE("GPL");