2 +++ b/Documentation/devicetree/bindings/mmc/octeon-mmc.txt
4 +* OCTEON SD/MMC Host Controller
6 +This controller is present on some members of the Cavium OCTEON SoC
7 +family, provide an interface for eMMC, MMC and SD devices. There is a
8 +single controller that may have several "slots" connected. These
9 +slots appear as children of the main controller node.
10 +The DMA engine is an integral part of the controller block.
13 +- compatible : Should be "cavium,octeon-6130-mmc" or "cavium,octeon-7890-mmc"
15 + 1) The base address of the MMC controller register bank.
16 + 2) The base address of the MMC DMA engine register bank.
18 + For "cavium,octeon-6130-mmc": two entries:
19 + 1) The MMC controller interrupt line.
20 + 2) The MMC DMA engine interrupt line.
21 + For "cavium,octeon-7890-mmc": nine entries:
22 + 1) The next block transfer of a multiblock transfer has completed (BUF_DONE)
23 + 2) Operation completed successfully (CMD_DONE).
24 + 3) DMA transfer completed successfully (DMA_DONE).
25 + 4) Operation encountered an error (CMD_ERR).
26 + 5) DMA transfer encountered an error (DMA_ERR).
27 + 6) Switch operation completed successfully (SWITCH_DONE).
28 + 7) Switch operation encountered an error (SWITCH_ERR).
29 + 8) Internal DMA engine request completion interrupt (DONE).
30 + 9) Internal DMA FIFO underflow (FIFO).
31 +- #address-cells : Must be <1>
32 +- #size-cells : Must be <0>
34 +Required properties of child nodes:
35 +- compatible : Should be "cavium,octeon-6130-mmc-slot".
36 +- reg : The slot number.
38 +Optional properties of child nodes:
39 +- cd-gpios : Specify GPIOs for card detection
40 +- wp-gpios : Specify GPIOs for write protection
41 +- power-gpios : Specify GPIOs for power control
42 +- cavium,bus-max-width : The number of data lines present in the slot.
44 +- spi-max-frequency : The maximum operating frequency of the slot.
45 + Default is 52000000.
46 +- cavium,cmd-clk-skew : the amount of delay (in pS) past the clock edge
47 + to sample the command pin.
48 +- cavium,dat-clk-skew : the amount of delay (in pS) past the clock edge
49 + to sample the data pin.
53 + compatible = "cavium,octeon-6130-mmc";
54 + reg = <0x11800 0x00002000 0x0 0x100>,
55 + <0x11800 0x00000168 0x0 0x20>;
56 + #address-cells = <1>;
58 + /* EMM irq, DMA irq */
59 + interrupts = <1 19>, <0 63>;
61 + /* The board only has a single MMC slot */
63 + compatible = "cavium,octeon-6130-mmc-slot";
65 + spi-max-frequency = <20000000>;
66 + /* bus width can be 1, 4 or 8 */
67 + cavium,bus-max-width = <8>;
68 + cd-gpios = <&gpio 9 0>;
69 + wp-gpios = <&gpio 10 0>;
70 + power-gpios = <&gpio 8 0>;
73 --- a/drivers/mmc/host/Kconfig
74 +++ b/drivers/mmc/host/Kconfig
75 @@ -436,6 +436,16 @@ config MMC_MXS
80 + tristate "Cavium OCTEON Multimedia Card Interface support"
81 + depends on CAVIUM_OCTEON_SOC
83 + This selects Cavium OCTEON Multimedia card Interface.
84 + If you have an OCTEON board with a Multimedia Card slot,
90 tristate "TI Flash Media MMC/SD Interface support"
92 --- a/drivers/mmc/host/Makefile
93 +++ b/drivers/mmc/host/Makefile
94 @@ -20,6 +20,7 @@ obj-$(CONFIG_MMC_SDHCI_F_SDH30) += sdhci
95 obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o
96 obj-$(CONFIG_MMC_WBSD) += wbsd.o
97 obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
98 +obj-$(CONFIG_MMC_OCTEON) += octeon_mmc.o
99 obj-$(CONFIG_MMC_MTK) += mtk-sd.o
100 obj-$(CONFIG_MMC_OMAP) += omap.o
101 obj-$(CONFIG_MMC_OMAP_HS) += omap_hsmmc.o
103 +++ b/drivers/mmc/host/octeon_mmc.c
106 + * Driver for MMC and SSD cards for Cavium OCTEON SOCs.
108 + * This file is subject to the terms and conditions of the GNU General Public
109 + * License. See the file "COPYING" in the main directory of this archive
110 + * for more details.
112 + * Copyright (C) 2012-2014 Cavium Inc.
115 +#include <linux/platform_device.h>
116 +#include <linux/of_platform.h>
117 +#include <linux/scatterlist.h>
118 +#include <linux/interrupt.h>
119 +#include <linux/of_gpio.h>
120 +#include <linux/blkdev.h>
121 +#include <linux/device.h>
122 +#include <linux/module.h>
123 +#include <linux/delay.h>
124 +#include <linux/init.h>
125 +#include <linux/clk.h>
126 +#include <linux/err.h>
127 +#include <linux/io.h>
128 +#include <linux/of.h>
130 +#include <linux/mmc/card.h>
131 +#include <linux/mmc/host.h>
132 +#include <linux/mmc/mmc.h>
133 +#include <linux/mmc/sd.h>
134 +#include <net/irda/parameters.h>
136 +#include <asm/byteorder.h>
137 +#include <asm/octeon/octeon.h>
138 +#include <asm/octeon/cvmx-mio-defs.h>
140 +#define DRV_NAME "octeon_mmc"
142 +#define OCTEON_MAX_MMC 4
144 +#define OCT_MIO_NDF_DMA_CFG 0x00
145 +#define OCT_MIO_EMM_DMA_ADR 0x08
147 +#define OCT_MIO_EMM_CFG 0x00
148 +#define OCT_MIO_EMM_SWITCH 0x48
149 +#define OCT_MIO_EMM_DMA 0x50
150 +#define OCT_MIO_EMM_CMD 0x58
151 +#define OCT_MIO_EMM_RSP_STS 0x60
152 +#define OCT_MIO_EMM_RSP_LO 0x68
153 +#define OCT_MIO_EMM_RSP_HI 0x70
154 +#define OCT_MIO_EMM_INT 0x78
155 +#define OCT_MIO_EMM_INT_EN 0x80
156 +#define OCT_MIO_EMM_WDOG 0x88
157 +#define OCT_MIO_EMM_SAMPLE 0x90
158 +#define OCT_MIO_EMM_STS_MASK 0x98
159 +#define OCT_MIO_EMM_RCA 0xa0
160 +#define OCT_MIO_EMM_BUF_IDX 0xe0
161 +#define OCT_MIO_EMM_BUF_DAT 0xe8
163 +#define CVMX_MIO_BOOT_CTL CVMX_ADD_IO_SEG(0x00011800000000D0ull)
165 +struct octeon_mmc_host {
169 + u64 n_minus_one; /* OCTEON II workaround location */
172 + struct semaphore mmc_serializer;
173 + struct mmc_request *current_req;
174 + unsigned int linear_buf_size;
176 + struct sg_mapping_iter smi;
180 + struct platform_device *pdev;
181 + int global_pwr_gpio;
182 + bool global_pwr_gpio_low;
183 + bool dma_err_pending;
184 + bool need_bootbus_lock;
186 + bool need_irq_handler_lock;
187 + spinlock_t irq_handler_lock;
189 + struct octeon_mmc_slot *slot[OCTEON_MAX_MMC];
192 +struct octeon_mmc_slot {
193 + struct mmc_host *mmc; /* slot-level mmc_core object */
194 + struct octeon_mmc_host *host; /* common hw for all 4 slots */
196 + unsigned int clock;
197 + unsigned int sclock;
202 + unsigned int cmd_cnt; /* sample delay */
203 + unsigned int dat_cnt; /* sample delay */
215 +static int bb_size = 1 << 16;
216 +module_param(bb_size, int, S_IRUGO);
217 +MODULE_PARM_DESC(bb_size,
218 + "Size of DMA linearizing buffer (max transfer size).");
221 +module_param(ddr, int, S_IRUGO);
222 +MODULE_PARM_DESC(ddr,
223 + "enable DoubleDataRate clocking: 0=no, 1=always, 2=at spi-max-frequency/2");
226 +#define octeon_mmc_dbg trace_printk
228 +static inline void octeon_mmc_dbg(const char *s, ...) { }
231 +static void octeon_mmc_acquire_bus(struct octeon_mmc_host *host)
233 + if (host->need_bootbus_lock) {
234 + down(&octeon_bootbus_sem);
235 + /* On cn70XX switch the mmc unit onto the bus. */
236 + if (OCTEON_IS_MODEL(OCTEON_CN70XX))
237 + cvmx_write_csr(CVMX_MIO_BOOT_CTL, 0);
239 + down(&host->mmc_serializer);
243 +static void octeon_mmc_release_bus(struct octeon_mmc_host *host)
245 + if (host->need_bootbus_lock)
246 + up(&octeon_bootbus_sem);
248 + up(&host->mmc_serializer);
251 +struct octeon_mmc_cr_type {
257 + * The OCTEON MMC host hardware assumes that all commands have fixed
258 + * command and response types. These are correct if MMC devices are
259 + * being used. However, non-MMC devices like SD use command and
260 + * response types that are unexpected by the host hardware.
262 + * The command and response types can be overridden by supplying an
263 + * XOR value that is applied to the type. We calculate the XOR value
264 + * from the values in this table and the flags passed from the MMC
267 +static struct octeon_mmc_cr_type octeon_mmc_cr_types[] = {
278 + {0, 2}, /* CMD10 */
279 + {1, 1}, /* CMD11 */
280 + {0, 1}, /* CMD12 */
281 + {0, 1}, /* CMD13 */
282 + {1, 1}, /* CMD14 */
283 + {0, 0}, /* CMD15 */
284 + {0, 1}, /* CMD16 */
285 + {1, 1}, /* CMD17 */
286 + {1, 1}, /* CMD18 */
287 + {3, 1}, /* CMD19 */
288 + {2, 1}, /* CMD20 */
289 + {0, 0}, /* CMD21 */
290 + {0, 0}, /* CMD22 */
291 + {0, 1}, /* CMD23 */
292 + {2, 1}, /* CMD24 */
293 + {2, 1}, /* CMD25 */
294 + {2, 1}, /* CMD26 */
295 + {2, 1}, /* CMD27 */
296 + {0, 1}, /* CMD28 */
297 + {0, 1}, /* CMD29 */
298 + {1, 1}, /* CMD30 */
299 + {1, 1}, /* CMD31 */
300 + {0, 0}, /* CMD32 */
301 + {0, 0}, /* CMD33 */
302 + {0, 0}, /* CMD34 */
303 + {0, 1}, /* CMD35 */
304 + {0, 1}, /* CMD36 */
305 + {0, 0}, /* CMD37 */
306 + {0, 1}, /* CMD38 */
307 + {0, 4}, /* CMD39 */
308 + {0, 5}, /* CMD40 */
309 + {0, 0}, /* CMD41 */
310 + {2, 1}, /* CMD42 */
311 + {0, 0}, /* CMD43 */
312 + {0, 0}, /* CMD44 */
313 + {0, 0}, /* CMD45 */
314 + {0, 0}, /* CMD46 */
315 + {0, 0}, /* CMD47 */
316 + {0, 0}, /* CMD48 */
317 + {0, 0}, /* CMD49 */
318 + {0, 0}, /* CMD50 */
319 + {0, 0}, /* CMD51 */
320 + {0, 0}, /* CMD52 */
321 + {0, 0}, /* CMD53 */
322 + {0, 0}, /* CMD54 */
323 + {0, 1}, /* CMD55 */
324 + {0xff, 0xff}, /* CMD56 */
325 + {0, 0}, /* CMD57 */
326 + {0, 0}, /* CMD58 */
327 + {0, 0}, /* CMD59 */
328 + {0, 0}, /* CMD60 */
329 + {0, 0}, /* CMD61 */
330 + {0, 0}, /* CMD62 */
334 +struct octeon_mmc_cr_mods {
340 + * The functions below are used for the EMMC-17978 workaround.
342 + * Due to an imperfection in the design of the MMC bus hardware,
343 + * the 2nd to last cache block of a DMA read must be locked into the L2 Cache.
344 + * Otherwise, data corruption may occur.
347 +static inline void *phys_to_ptr(u64 address)
349 + return (void *)(address | (1ull<<63)); /* XKPHYS */
353 + * Lock a single line into L2. The line is zeroed before locking
354 + * to make sure no dram accesses are made.
356 + * @addr Physical address to lock
358 +static void l2c_lock_line(u64 addr)
360 + char *addr_ptr = phys_to_ptr(addr);
363 + "cache 31, %[line]" /* Unlock the line */
364 + :: [line] "m" (*addr_ptr));
368 + * Locks a memory region in the L2 cache
370 + * @start - start address to begin locking
371 + * @len - length in bytes to lock
373 +static void l2c_lock_mem_region(u64 start, u64 len)
377 + /* Round start/end to cache line boundaries */
378 + end = ALIGN(start + len - 1, CVMX_CACHE_LINE_SIZE);
379 + start = ALIGN(start, CVMX_CACHE_LINE_SIZE);
381 + while (start <= end) {
382 + l2c_lock_line(start);
383 + start += CVMX_CACHE_LINE_SIZE;
385 + asm volatile("sync");
389 + * Unlock a single line in the L2 cache.
391 + * @addr Physical address to unlock
393 + * Return Zero on success
395 +static void l2c_unlock_line(u64 addr)
397 + char *addr_ptr = phys_to_ptr(addr);
399 + "cache 23, %[line]" /* Unlock the line */
400 + :: [line] "m" (*addr_ptr));
404 + * Unlock a memory region in the L2 cache
406 + * @start - start address to unlock
407 + * @len - length to unlock in bytes
409 +static void l2c_unlock_mem_region(u64 start, u64 len)
413 + /* Round start/end to cache line boundaries */
414 + end = ALIGN(start + len - 1, CVMX_CACHE_LINE_SIZE);
415 + start = ALIGN(start, CVMX_CACHE_LINE_SIZE);
417 + while (start <= end) {
418 + l2c_unlock_line(start);
419 + start += CVMX_CACHE_LINE_SIZE;
423 +static struct octeon_mmc_cr_mods octeon_mmc_get_cr_mods(struct mmc_command *cmd)
425 + struct octeon_mmc_cr_type *cr;
426 + u8 desired_ctype, hardware_ctype;
427 + u8 desired_rtype, hardware_rtype;
428 + struct octeon_mmc_cr_mods r;
430 + desired_ctype = desired_rtype = 0;
432 + cr = octeon_mmc_cr_types + (cmd->opcode & 0x3f);
433 + hardware_ctype = cr->ctype;
434 + hardware_rtype = cr->rtype;
435 + if (cmd->opcode == 56) { /* CMD56 GEN_CMD */
436 + hardware_ctype = (cmd->arg & 1) ? 1 : 2;
439 + switch (mmc_cmd_type(cmd)) {
441 + desired_ctype = (cmd->data->flags & MMC_DATA_WRITE) ? 2 : 1;
450 + switch (mmc_resp_type(cmd)) {
454 + case MMC_RSP_R1:/* MMC_RSP_R5, MMC_RSP_R6, MMC_RSP_R7 */
461 + case MMC_RSP_R3: /* MMC_RSP_R4 */
465 + r.ctype_xor = desired_ctype ^ hardware_ctype;
466 + r.rtype_xor = desired_rtype ^ hardware_rtype;
470 +static bool octeon_mmc_switch_val_changed(struct octeon_mmc_slot *slot,
473 + /* Match BUS_ID, HS_TIMING, BUS_WIDTH, POWER_CLASS, CLK_HI, CLK_LO */
474 + u64 m = 0x3001070fffffffffull;
476 + return (slot->cached_switch & m) != (new_val & m);
479 +static unsigned int octeon_mmc_timeout_to_wdog(struct octeon_mmc_slot *slot,
482 + u64 bt = (u64)slot->clock * (u64)ns;
484 + return (unsigned int)(bt / 1000000000);
487 +static irqreturn_t octeon_mmc_interrupt(int irq, void *dev_id)
489 + struct octeon_mmc_host *host = dev_id;
490 + union cvmx_mio_emm_int emm_int;
491 + struct mmc_request *req;
493 + union cvmx_mio_emm_rsp_sts rsp_sts;
494 + unsigned long flags = 0;
496 + if (host->need_irq_handler_lock)
497 + spin_lock_irqsave(&host->irq_handler_lock, flags);
498 + emm_int.u64 = cvmx_read_csr(host->base + OCT_MIO_EMM_INT);
499 + req = host->current_req;
500 + cvmx_write_csr(host->base + OCT_MIO_EMM_INT, emm_int.u64);
502 + octeon_mmc_dbg("Got interrupt: EMM_INT = 0x%llx\n", emm_int.u64);
507 + rsp_sts.u64 = cvmx_read_csr(host->base + OCT_MIO_EMM_RSP_STS);
508 + octeon_mmc_dbg("octeon_mmc_interrupt MIO_EMM_RSP_STS 0x%llx\n",
511 + if (host->dma_err_pending) {
512 + host->current_req = NULL;
513 + host->dma_err_pending = false;
519 + if (!host->dma_active && emm_int.s.buf_done && req->data) {
520 + unsigned int type = (rsp_sts.u64 >> 7) & 3;
524 + int dbuf = rsp_sts.s.dbuf;
525 + struct sg_mapping_iter *smi = &host->smi;
526 + unsigned int data_len =
527 + req->data->blksz * req->data->blocks;
528 + unsigned int bytes_xfered;
532 + /* Auto inc from offset zero */
533 + cvmx_write_csr(host->base + OCT_MIO_EMM_BUF_IDX,
534 + (u64)(0x10000 | (dbuf << 6)));
536 + for (bytes_xfered = 0; bytes_xfered < data_len;) {
537 + if (smi->consumed >= smi->length) {
538 + if (!sg_miter_next(smi))
543 + dat = cvmx_read_csr(host->base +
544 + OCT_MIO_EMM_BUF_DAT);
548 + while (smi->consumed < smi->length &&
550 + ((u8 *)(smi->addr))[smi->consumed] =
551 + (dat >> shift) & 0xff;
557 + sg_miter_stop(smi);
558 + req->data->bytes_xfered = bytes_xfered;
559 + req->data->error = 0;
560 + } else if (type == 2) {
562 + req->data->bytes_xfered = req->data->blksz *
564 + req->data->error = 0;
567 + host_done = emm_int.s.cmd_done || emm_int.s.dma_done ||
568 + emm_int.s.cmd_err || emm_int.s.dma_err;
569 + if (host_done && req->done) {
570 + if (rsp_sts.s.rsp_bad_sts ||
571 + rsp_sts.s.rsp_crc_err ||
572 + rsp_sts.s.rsp_timeout ||
573 + rsp_sts.s.blk_crc_err ||
574 + rsp_sts.s.blk_timeout ||
575 + rsp_sts.s.dbuf_err) {
576 + req->cmd->error = -EILSEQ;
578 + req->cmd->error = 0;
581 + if (host->dma_active && req->data) {
582 + req->data->error = 0;
583 + req->data->bytes_xfered = req->data->blocks *
585 + if (!(req->data->flags & MMC_DATA_WRITE) &&
586 + req->data->sg_len > 1) {
587 + size_t r = sg_copy_from_buffer(req->data->sg,
588 + req->data->sg_len, host->linear_buf,
589 + req->data->bytes_xfered);
590 + WARN_ON(r != req->data->bytes_xfered);
593 + if (rsp_sts.s.rsp_val) {
595 + u64 rsp_lo = cvmx_read_csr(
596 + host->base + OCT_MIO_EMM_RSP_LO);
598 + switch (rsp_sts.s.rsp_type) {
601 + req->cmd->resp[0] = (rsp_lo >> 8) & 0xffffffff;
602 + req->cmd->resp[1] = 0;
603 + req->cmd->resp[2] = 0;
604 + req->cmd->resp[3] = 0;
607 + req->cmd->resp[3] = rsp_lo & 0xffffffff;
608 + req->cmd->resp[2] = (rsp_lo >> 32) & 0xffffffff;
609 + rsp_hi = cvmx_read_csr(host->base +
610 + OCT_MIO_EMM_RSP_HI);
611 + req->cmd->resp[1] = rsp_hi & 0xffffffff;
612 + req->cmd->resp[0] = (rsp_hi >> 32) & 0xffffffff;
615 + octeon_mmc_dbg("octeon_mmc_interrupt unhandled rsp_val %d\n",
616 + rsp_sts.s.rsp_type);
619 + octeon_mmc_dbg("octeon_mmc_interrupt resp %08x %08x %08x %08x\n",
620 + req->cmd->resp[0], req->cmd->resp[1],
621 + req->cmd->resp[2], req->cmd->resp[3]);
623 + if (emm_int.s.dma_err && rsp_sts.s.dma_pend) {
624 + /* Try to clean up failed DMA */
625 + union cvmx_mio_emm_dma emm_dma;
628 + cvmx_read_csr(host->base + OCT_MIO_EMM_DMA);
629 + emm_dma.s.dma_val = 1;
630 + emm_dma.s.dat_null = 1;
631 + emm_dma.s.bus_id = rsp_sts.s.bus_id;
632 + cvmx_write_csr(host->base + OCT_MIO_EMM_DMA,
634 + host->dma_err_pending = true;
639 + host->current_req = NULL;
643 + if (host->n_minus_one) {
644 + l2c_unlock_mem_region(host->n_minus_one, 512);
645 + host->n_minus_one = 0;
648 + octeon_mmc_release_bus(host);
650 + if (host->need_irq_handler_lock)
651 + spin_unlock_irqrestore(&host->irq_handler_lock, flags);
652 + return IRQ_RETVAL(emm_int.u64 != 0);
655 +static void octeon_mmc_switch_to(struct octeon_mmc_slot *slot)
657 + struct octeon_mmc_host *host = slot->host;
658 + struct octeon_mmc_slot *old_slot;
659 + union cvmx_mio_emm_switch sw;
660 + union cvmx_mio_emm_sample samp;
662 + if (slot->bus_id == host->last_slot)
665 + if (host->last_slot >= 0) {
666 + old_slot = host->slot[host->last_slot];
667 + old_slot->cached_switch =
668 + cvmx_read_csr(host->base + OCT_MIO_EMM_SWITCH);
669 + old_slot->cached_rca =
670 + cvmx_read_csr(host->base + OCT_MIO_EMM_RCA);
672 + cvmx_write_csr(host->base + OCT_MIO_EMM_RCA, slot->cached_rca);
673 + sw.u64 = slot->cached_switch;
675 + cvmx_write_csr(host->base + OCT_MIO_EMM_SWITCH, sw.u64);
676 + sw.s.bus_id = slot->bus_id;
677 + cvmx_write_csr(host->base + OCT_MIO_EMM_SWITCH, sw.u64);
680 + samp.s.cmd_cnt = slot->cmd_cnt;
681 + samp.s.dat_cnt = slot->dat_cnt;
682 + cvmx_write_csr(host->base + OCT_MIO_EMM_SAMPLE, samp.u64);
684 + host->last_slot = slot->bus_id;
687 +static void octeon_mmc_dma_request(struct mmc_host *mmc,
688 + struct mmc_request *mrq)
690 + struct octeon_mmc_slot *slot;
691 + struct octeon_mmc_host *host;
692 + struct mmc_command *cmd;
693 + struct mmc_data *data;
694 + union cvmx_mio_emm_int emm_int;
695 + union cvmx_mio_emm_dma emm_dma;
696 + union cvmx_mio_ndf_dma_cfg dma_cfg;
699 + if (mrq->data == NULL || mrq->data->sg == NULL || !mrq->data->sg_len ||
700 + mrq->stop == NULL || mrq->stop->opcode != MMC_STOP_TRANSMISSION) {
701 + dev_err(&mmc->card->dev,
702 + "Error: octeon_mmc_dma_request no data\n");
703 + cmd->error = -EINVAL;
709 + slot = mmc_priv(mmc);
712 + /* Only a single user of the bootbus at a time. */
713 + octeon_mmc_acquire_bus(host);
715 + octeon_mmc_switch_to(slot);
719 + if (data->timeout_ns) {
720 + cvmx_write_csr(host->base + OCT_MIO_EMM_WDOG,
721 + octeon_mmc_timeout_to_wdog(slot, data->timeout_ns));
722 + octeon_mmc_dbg("OCT_MIO_EMM_WDOG %llu\n",
723 + cvmx_read_csr(host->base + OCT_MIO_EMM_WDOG));
726 + WARN_ON(host->current_req);
727 + host->current_req = mrq;
731 + WARN_ON(data->blksz * data->blocks > host->linear_buf_size);
733 + if ((data->flags & MMC_DATA_WRITE) && data->sg_len > 1) {
734 + size_t r = sg_copy_to_buffer(data->sg, data->sg_len,
735 + host->linear_buf, data->blksz * data->blocks);
736 + WARN_ON(data->blksz * data->blocks != r);
741 + dma_cfg.s.rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
742 +#ifdef __LITTLE_ENDIAN
743 + dma_cfg.s.endian = 1;
745 + dma_cfg.s.size = ((data->blksz * data->blocks) / 8) - 1;
746 + if (!host->big_dma_addr) {
747 + if (data->sg_len > 1)
748 + dma_cfg.s.adr = virt_to_phys(host->linear_buf);
750 + dma_cfg.s.adr = sg_phys(data->sg);
752 + cvmx_write_csr(host->ndf_base + OCT_MIO_NDF_DMA_CFG, dma_cfg.u64);
753 + octeon_mmc_dbg("MIO_NDF_DMA_CFG: %016llx\n",
754 + (unsigned long long)dma_cfg.u64);
755 + if (host->big_dma_addr) {
758 + if (data->sg_len > 1)
759 + addr = virt_to_phys(host->linear_buf);
761 + addr = sg_phys(data->sg);
762 + cvmx_write_csr(host->ndf_base + OCT_MIO_EMM_DMA_ADR, addr);
763 + octeon_mmc_dbg("MIO_EMM_DMA_ADR: %016llx\n",
764 + (unsigned long long)addr);
768 + emm_dma.s.bus_id = slot->bus_id;
769 + emm_dma.s.dma_val = 1;
770 + emm_dma.s.sector = mmc_card_blockaddr(mmc->card) ? 1 : 0;
771 + emm_dma.s.rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
772 + if (mmc_card_mmc(mmc->card) ||
773 + (mmc_card_sd(mmc->card) &&
774 + (mmc->card->scr.cmds & SD_SCR_CMD23_SUPPORT)))
775 + emm_dma.s.multi = 1;
776 + emm_dma.s.block_cnt = data->blocks;
777 + emm_dma.s.card_addr = cmd->arg;
780 + emm_int.s.dma_done = 1;
781 + emm_int.s.cmd_err = 1;
782 + emm_int.s.dma_err = 1;
783 + /* Clear the bit. */
784 + cvmx_write_csr(host->base + OCT_MIO_EMM_INT, emm_int.u64);
785 + cvmx_write_csr(host->base + OCT_MIO_EMM_INT_EN, emm_int.u64);
786 + host->dma_active = true;
788 + if ((OCTEON_IS_MODEL(OCTEON_CN6XXX) ||
789 + OCTEON_IS_MODEL(OCTEON_CNF7XXX)) &&
790 + cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK &&
791 + (data->blksz * data->blocks) > 1024) {
792 + host->n_minus_one = dma_cfg.s.adr +
793 + (data->blksz * data->blocks) - 1024;
794 + l2c_lock_mem_region(host->n_minus_one, 512);
797 + if (mmc->card && mmc_card_sd(mmc->card))
798 + cvmx_write_csr(host->base + OCT_MIO_EMM_STS_MASK,
801 + cvmx_write_csr(host->base + OCT_MIO_EMM_STS_MASK,
803 + cvmx_write_csr(host->base + OCT_MIO_EMM_DMA, emm_dma.u64);
804 + octeon_mmc_dbg("MIO_EMM_DMA: %llx\n", emm_dma.u64);
807 +static void octeon_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
809 + struct octeon_mmc_slot *slot;
810 + struct octeon_mmc_host *host;
811 + struct mmc_command *cmd;
812 + union cvmx_mio_emm_int emm_int;
813 + union cvmx_mio_emm_cmd emm_cmd;
814 + struct octeon_mmc_cr_mods mods;
818 + if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
819 + cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) {
820 + octeon_mmc_dma_request(mmc, mrq);
824 + mods = octeon_mmc_get_cr_mods(cmd);
826 + slot = mmc_priv(mmc);
829 + /* Only a single user of the bootbus at a time. */
830 + octeon_mmc_acquire_bus(host);
832 + octeon_mmc_switch_to(slot);
834 + WARN_ON(host->current_req);
835 + host->current_req = mrq;
838 + emm_int.s.cmd_done = 1;
839 + emm_int.s.cmd_err = 1;
841 + octeon_mmc_dbg("command has data\n");
842 + if (cmd->data->flags & MMC_DATA_READ) {
843 + sg_miter_start(&host->smi, mrq->data->sg,
845 + SG_MITER_ATOMIC | SG_MITER_TO_SG);
847 + struct sg_mapping_iter *smi = &host->smi;
848 + unsigned int data_len =
849 + mrq->data->blksz * mrq->data->blocks;
850 + unsigned int bytes_xfered;
854 + * Copy data to the xmit buffer before
855 + * issuing the command
857 + sg_miter_start(smi, mrq->data->sg,
858 + mrq->data->sg_len, SG_MITER_FROM_SG);
859 + /* Auto inc from offset zero, dbuf zero */
860 + cvmx_write_csr(host->base + OCT_MIO_EMM_BUF_IDX,
863 + for (bytes_xfered = 0; bytes_xfered < data_len;) {
864 + if (smi->consumed >= smi->length) {
865 + if (!sg_miter_next(smi))
870 + while (smi->consumed < smi->length &&
873 + dat |= (u64)(((u8 *)(smi->addr))
874 + [smi->consumed]) << shift;
880 + cvmx_write_csr(host->base +
881 + OCT_MIO_EMM_BUF_DAT, dat);
886 + sg_miter_stop(smi);
888 + if (cmd->data->timeout_ns) {
889 + cvmx_write_csr(host->base + OCT_MIO_EMM_WDOG,
890 + octeon_mmc_timeout_to_wdog(slot,
891 + cmd->data->timeout_ns));
892 + octeon_mmc_dbg("OCT_MIO_EMM_WDOG %llu\n",
893 + cvmx_read_csr(host->base +
894 + OCT_MIO_EMM_WDOG));
897 + cvmx_write_csr(host->base + OCT_MIO_EMM_WDOG,
898 + ((u64)slot->clock * 850ull) / 1000ull);
899 + octeon_mmc_dbg("OCT_MIO_EMM_WDOG %llu\n",
900 + cvmx_read_csr(host->base + OCT_MIO_EMM_WDOG));
902 + /* Clear the bit. */
903 + cvmx_write_csr(host->base + OCT_MIO_EMM_INT, emm_int.u64);
904 + cvmx_write_csr(host->base + OCT_MIO_EMM_INT_EN, emm_int.u64);
905 + host->dma_active = false;
908 + emm_cmd.s.cmd_val = 1;
909 + emm_cmd.s.ctype_xor = mods.ctype_xor;
910 + emm_cmd.s.rtype_xor = mods.rtype_xor;
911 + if (mmc_cmd_type(cmd) == MMC_CMD_ADTC)
912 + emm_cmd.s.offset = 64 -
913 + ((cmd->data->blksz * cmd->data->blocks) / 8);
914 + emm_cmd.s.bus_id = slot->bus_id;
915 + emm_cmd.s.cmd_idx = cmd->opcode;
916 + emm_cmd.s.arg = cmd->arg;
917 + cvmx_write_csr(host->base + OCT_MIO_EMM_STS_MASK, 0);
918 + cvmx_write_csr(host->base + OCT_MIO_EMM_CMD, emm_cmd.u64);
919 + octeon_mmc_dbg("MIO_EMM_CMD: %llx\n", emm_cmd.u64);
922 +static void octeon_mmc_reset_bus(struct octeon_mmc_slot *slot, int preserve)
924 + union cvmx_mio_emm_cfg emm_cfg;
925 + union cvmx_mio_emm_switch emm_switch;
928 + emm_cfg.u64 = cvmx_read_csr(slot->host->base + OCT_MIO_EMM_CFG);
930 + emm_switch.u64 = cvmx_read_csr(slot->host->base +
931 + OCT_MIO_EMM_SWITCH);
932 + wdog = cvmx_read_csr(slot->host->base + OCT_MIO_EMM_WDOG);
935 + /* Restore switch settings */
937 + emm_switch.s.switch_exe = 0;
938 + emm_switch.s.switch_err0 = 0;
939 + emm_switch.s.switch_err1 = 0;
940 + emm_switch.s.switch_err2 = 0;
941 + emm_switch.s.bus_id = 0;
942 + cvmx_write_csr(slot->host->base + OCT_MIO_EMM_SWITCH,
944 + emm_switch.s.bus_id = slot->bus_id;
945 + cvmx_write_csr(slot->host->base + OCT_MIO_EMM_SWITCH,
948 + slot->cached_switch = emm_switch.u64;
951 + cvmx_write_csr(slot->host->base + OCT_MIO_EMM_WDOG, wdog);
953 + slot->cached_switch = 0;
957 +static void octeon_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
959 + struct octeon_mmc_slot *slot;
960 + struct octeon_mmc_host *host;
965 + int power_class = 10;
967 + int timeout = 2000;
968 + union cvmx_mio_emm_switch emm_switch;
969 + union cvmx_mio_emm_rsp_sts emm_sts;
971 + slot = mmc_priv(mmc);
974 + /* Only a single user of the bootbus at a time. */
975 + octeon_mmc_acquire_bus(host);
977 + octeon_mmc_switch_to(slot);
979 + octeon_mmc_dbg("Calling set_ios: slot: clk = 0x%x, bus_width = %d\n",
980 + slot->clock, slot->bus_width);
981 + octeon_mmc_dbg("Calling set_ios: ios: clk = 0x%x, vdd = %u, bus_width = %u, power_mode = %u, timing = %u\n",
982 + ios->clock, ios->vdd, ios->bus_width, ios->power_mode,
984 + octeon_mmc_dbg("Calling set_ios: mmc: caps = 0x%x, bus_width = %d\n",
985 + mmc->caps, mmc->ios.bus_width);
988 + * Reset the chip on each power off
990 + if (ios->power_mode == MMC_POWER_OFF) {
991 + octeon_mmc_reset_bus(slot, 1);
992 + if (slot->pwr_gpio >= 0)
993 + gpio_set_value_cansleep(slot->pwr_gpio,
994 + slot->pwr_gpio_low);
996 + if (slot->pwr_gpio >= 0)
997 + gpio_set_value_cansleep(slot->pwr_gpio,
998 + !slot->pwr_gpio_low);
1001 + switch (ios->bus_width) {
1002 + case MMC_BUS_WIDTH_8:
1005 + case MMC_BUS_WIDTH_4:
1008 + case MMC_BUS_WIDTH_1:
1012 + octeon_mmc_dbg("unknown bus width %d\n", ios->bus_width);
1017 + hs_timing = (ios->timing == MMC_TIMING_MMC_HS);
1018 + ddr_clock = (bus_width && ios->timing >= MMC_TIMING_UHS_DDR50);
1024 + slot->clock = ios->clock;
1025 + slot->bus_width = bus_width;
1027 + clock = slot->clock;
1029 + if (clock > 52000000)
1032 + clk_period = (octeon_get_io_clock_rate() + clock - 1) /
1035 + /* until clock-renengotiate-on-CRC is in */
1036 + if (ddr_clock && ddr > 1)
1039 + emm_switch.u64 = 0;
1040 + emm_switch.s.hs_timing = hs_timing;
1041 + emm_switch.s.bus_width = bus_width;
1042 + emm_switch.s.power_class = power_class;
1043 + emm_switch.s.clk_hi = clk_period;
1044 + emm_switch.s.clk_lo = clk_period;
1046 + if (!octeon_mmc_switch_val_changed(slot, emm_switch.u64)) {
1047 + octeon_mmc_dbg("No change from 0x%llx mio_emm_switch, returning.\n",
1052 + octeon_mmc_dbg("Writing 0x%llx to mio_emm_wdog\n",
1053 + ((u64)clock * 850ull) / 1000ull);
1054 + cvmx_write_csr(host->base + OCT_MIO_EMM_WDOG,
1055 + ((u64)clock * 850ull) / 1000ull);
1056 + octeon_mmc_dbg("Writing 0x%llx to mio_emm_switch\n",
1059 + cvmx_write_csr(host->base + OCT_MIO_EMM_SWITCH, emm_switch.u64);
1060 + emm_switch.s.bus_id = slot->bus_id;
1061 + cvmx_write_csr(host->base + OCT_MIO_EMM_SWITCH, emm_switch.u64);
1062 + slot->cached_switch = emm_switch.u64;
1066 + cvmx_read_csr(host->base + OCT_MIO_EMM_RSP_STS);
1067 + if (!emm_sts.s.switch_val)
1070 + } while (timeout-- > 0);
1072 + if (timeout <= 0) {
1073 + octeon_mmc_dbg("switch command timed out, status=0x%llx\n",
1079 + octeon_mmc_release_bus(host);
1082 +static int octeon_mmc_get_ro(struct mmc_host *mmc)
1084 + struct octeon_mmc_slot *slot = mmc_priv(mmc);
1086 + if (slot->ro_gpio >= 0) {
1087 + int pin = gpio_get_value_cansleep(slot->ro_gpio);
1091 + if (slot->ro_gpio_low)
1099 +static int octeon_mmc_get_cd(struct mmc_host *mmc)
1101 + struct octeon_mmc_slot *slot = mmc_priv(mmc);
1103 + if (slot->cd_gpio >= 0) {
1104 + int pin = gpio_get_value_cansleep(slot->cd_gpio);
1108 + if (slot->cd_gpio_low)
1116 +static const struct mmc_host_ops octeon_mmc_ops = {
1117 + .request = octeon_mmc_request,
1118 + .set_ios = octeon_mmc_set_ios,
1119 + .get_ro = octeon_mmc_get_ro,
1120 + .get_cd = octeon_mmc_get_cd,
1123 +static void octeon_mmc_set_clock(struct octeon_mmc_slot *slot,
1124 + unsigned int clock)
1126 + struct mmc_host *mmc = slot->mmc;
1128 + clock = min(clock, mmc->f_max);
1129 + clock = max(clock, mmc->f_min);
1130 + slot->clock = clock;
1133 +static int octeon_mmc_initlowlevel(struct octeon_mmc_slot *slot,
1136 + union cvmx_mio_emm_switch emm_switch;
1137 + struct octeon_mmc_host *host = slot->host;
1139 + host->emm_cfg |= 1ull << slot->bus_id;
1140 + cvmx_write_csr(slot->host->base + OCT_MIO_EMM_CFG, host->emm_cfg);
1141 + octeon_mmc_set_clock(slot, 400000);
1143 + /* Program initial clock speed and power */
1144 + emm_switch.u64 = 0;
1145 + emm_switch.s.power_class = 10;
1146 + emm_switch.s.clk_hi = (slot->sclock / slot->clock) / 2;
1147 + emm_switch.s.clk_lo = (slot->sclock / slot->clock) / 2;
1149 + cvmx_write_csr(host->base + OCT_MIO_EMM_SWITCH, emm_switch.u64);
1150 + emm_switch.s.bus_id = slot->bus_id;
1151 + cvmx_write_csr(host->base + OCT_MIO_EMM_SWITCH, emm_switch.u64);
1152 + slot->cached_switch = emm_switch.u64;
1154 + cvmx_write_csr(host->base + OCT_MIO_EMM_WDOG,
1155 + ((u64)slot->clock * 850ull) / 1000ull);
1156 + cvmx_write_csr(host->base + OCT_MIO_EMM_STS_MASK, 0xe4f90080ull);
1157 + cvmx_write_csr(host->base + OCT_MIO_EMM_RCA, 1);
1161 +static int __init octeon_init_slot(struct octeon_mmc_host *host, int id,
1162 + int bus_width, int max_freq,
1163 + int ro_gpio, int cd_gpio, int pwr_gpio,
1164 + bool ro_low, bool cd_low, bool power_low,
1165 + u32 cmd_skew, u32 dat_skew)
1167 + struct mmc_host *mmc;
1168 + struct octeon_mmc_slot *slot;
1173 + * Allocate MMC structue
1175 + mmc = mmc_alloc_host(sizeof(struct octeon_mmc_slot), &host->pdev->dev);
1177 + dev_err(&host->pdev->dev, "alloc host failed\n");
1181 + slot = mmc_priv(mmc);
1183 + slot->host = host;
1184 + slot->ro_gpio = ro_gpio;
1185 + slot->cd_gpio = cd_gpio;
1186 + slot->pwr_gpio = pwr_gpio;
1187 + slot->ro_gpio_low = ro_low;
1188 + slot->cd_gpio_low = cd_low;
1189 + slot->pwr_gpio_low = power_low;
1191 + if (slot->ro_gpio >= 0) {
1192 + ret = gpio_request(slot->ro_gpio, "mmc_ro");
1194 + dev_err(&host->pdev->dev,
1195 + "Could not request mmc_ro GPIO %d\n",
1199 + gpio_direction_input(slot->ro_gpio);
1201 + if (slot->cd_gpio >= 0) {
1202 + ret = gpio_request(slot->cd_gpio, "mmc_card_detect");
1204 + if (slot->ro_gpio >= 0)
1205 + gpio_free(slot->ro_gpio);
1206 + dev_err(&host->pdev->dev, "Could not request mmc_card_detect GPIO %d\n",
1210 + gpio_direction_input(slot->cd_gpio);
1212 + if (slot->pwr_gpio >= 0) {
1213 + ret = gpio_request(slot->pwr_gpio, "mmc_power");
1215 + dev_err(&host->pdev->dev,
1216 + "Could not request mmc_power GPIO %d\n",
1218 + if (slot->ro_gpio >= 0)
1219 + gpio_free(slot->ro_gpio);
1220 + if (slot->cd_gpio)
1221 + gpio_free(slot->cd_gpio);
1224 + octeon_mmc_dbg("%s: Shutting off power to slot %d via gpio %d\n",
1225 + DRV_NAME, slot->bus_id, slot->pwr_gpio);
1226 + gpio_direction_output(slot->pwr_gpio,
1227 + slot->pwr_gpio_low);
1230 + * Set up host parameters.
1232 + mmc->ops = &octeon_mmc_ops;
1233 + mmc->f_min = 400000;
1234 + mmc->f_max = max_freq;
1235 + mmc->caps = MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
1236 + MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA |
1238 + mmc->ocr_avail = MMC_VDD_27_28 | MMC_VDD_28_29 | MMC_VDD_29_30 |
1239 + MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33 |
1240 + MMC_VDD_33_34 | MMC_VDD_34_35 | MMC_VDD_35_36;
1242 + /* post-sdk23 caps */
1244 + ((mmc->f_max >= 12000000) * MMC_CAP_UHS_SDR12) |
1245 + ((mmc->f_max >= 25000000) * MMC_CAP_UHS_SDR25) |
1246 + ((mmc->f_max >= 50000000) * MMC_CAP_UHS_SDR50) |
1249 + if (host->global_pwr_gpio >= 0)
1250 + mmc->caps |= MMC_CAP_POWER_OFF_CARD;
1252 + /* "1.8v" capability is actually 1.8-or-3.3v */
1254 + mmc->caps |= MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR;
1256 + mmc->max_segs = 64;
1257 + mmc->max_seg_size = host->linear_buf_size;
1258 + mmc->max_req_size = host->linear_buf_size;
1259 + mmc->max_blk_size = 512;
1260 + mmc->max_blk_count = mmc->max_req_size / 512;
1262 + slot->clock = mmc->f_min;
1263 + slot->sclock = octeon_get_io_clock_rate();
1265 + clock_period = 1000000000000ull / slot->sclock; /* period in pS */
1266 + slot->cmd_cnt = (cmd_skew + clock_period / 2) / clock_period;
1267 + slot->dat_cnt = (dat_skew + clock_period / 2) / clock_period;
1269 + slot->bus_width = bus_width;
1270 + slot->bus_id = id;
1271 + slot->cached_rca = 1;
1273 + /* Only a single user of the bootbus at a time. */
1274 + octeon_mmc_acquire_bus(host);
1275 + host->slot[id] = slot;
1277 + octeon_mmc_switch_to(slot);
1278 + /* Initialize MMC Block. */
1279 + octeon_mmc_initlowlevel(slot, bus_width);
1281 + octeon_mmc_release_bus(host);
1283 + ret = mmc_add_host(mmc);
1284 + octeon_mmc_dbg("mmc_add_host returned %d\n", ret);
1289 +static int octeon_mmc_probe(struct platform_device *pdev)
1291 + union cvmx_mio_emm_cfg emm_cfg;
1292 + struct octeon_mmc_host *host;
1293 + struct resource *res;
1294 + void __iomem *base;
1298 + struct device_node *node = pdev->dev.of_node;
1299 + bool cn78xx_style;
1301 + enum of_gpio_flags f;
1303 + host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
1307 + spin_lock_init(&host->irq_handler_lock);
1308 + sema_init(&host->mmc_serializer, 1);
1310 + cn78xx_style = of_device_is_compatible(node, "cavium,octeon-7890-mmc");
1311 + if (cn78xx_style) {
1312 + host->need_bootbus_lock = false;
1313 + host->big_dma_addr = true;
1314 + host->need_irq_handler_lock = true;
1316 + * First seven are the EMM_INT bits 0..6, then two for
1317 + * the EMM_DMA_INT bits
1319 + for (i = 0; i < 9; i++) {
1320 + mmc_irq[i] = platform_get_irq(pdev, i);
1321 + if (mmc_irq[i] < 0)
1322 + return mmc_irq[i];
1325 + host->need_bootbus_lock = true;
1326 + host->big_dma_addr = false;
1327 + host->need_irq_handler_lock = false;
1328 + /* First one is EMM second NDF_DMA */
1329 + for (i = 0; i < 2; i++) {
1330 + mmc_irq[i] = platform_get_irq(pdev, i);
1331 + if (mmc_irq[i] < 0)
1332 + return mmc_irq[i];
1335 + host->last_slot = -1;
1337 + if (bb_size < 512 || bb_size >= (1 << 24))
1338 + bb_size = 1 << 16;
1339 + host->linear_buf_size = bb_size;
1340 + host->linear_buf = devm_kzalloc(&pdev->dev, host->linear_buf_size,
1343 + if (!host->linear_buf) {
1344 + dev_err(&pdev->dev, "devm_kzalloc failed\n");
1348 + host->pdev = pdev;
1350 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1352 + dev_err(&pdev->dev, "Platform resource[0] is missing\n");
1355 + base = devm_ioremap_resource(&pdev->dev, res);
1357 + return PTR_ERR(base);
1358 + host->base = (u64)base;
1360 + res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1362 + dev_err(&pdev->dev, "Platform resource[1] is missing\n");
1366 + base = devm_ioremap_resource(&pdev->dev, res);
1367 + if (IS_ERR(base)) {
1368 + ret = PTR_ERR(base);
1371 + host->ndf_base = (u64)base;
1373 + * Clear out any pending interrupts that may be left over from
1376 + t = cvmx_read_csr(host->base + OCT_MIO_EMM_INT);
1377 + cvmx_write_csr(host->base + OCT_MIO_EMM_INT, t);
1378 + if (cn78xx_style) {
1379 + /* Only CMD_DONE, DMA_DONE, CMD_ERR, DMA_ERR */
1380 + for (i = 1; i <= 4; i++) {
1381 + ret = devm_request_irq(&pdev->dev, mmc_irq[i],
1382 + octeon_mmc_interrupt,
1383 + 0, DRV_NAME, host);
1385 + dev_err(&pdev->dev, "Error: devm_request_irq %d\n",
1391 + ret = devm_request_irq(&pdev->dev, mmc_irq[0],
1392 + octeon_mmc_interrupt, 0, DRV_NAME, host);
1394 + dev_err(&pdev->dev, "Error: devm_request_irq %d\n",
1400 + ret = of_get_named_gpio_flags(node, "power-gpios", 0, &f);
1401 + if (ret == -EPROBE_DEFER)
1404 + host->global_pwr_gpio = ret;
1405 + host->global_pwr_gpio_low =
1406 + (host->global_pwr_gpio >= 0 && f == OF_GPIO_ACTIVE_LOW);
1408 + if (host->global_pwr_gpio >= 0) {
1409 + ret = gpio_request(host->global_pwr_gpio, "mmc global power");
1411 + dev_err(&pdev->dev,
1412 + "Could not request mmc global power gpio %d\n",
1413 + host->global_pwr_gpio);
1416 + dev_dbg(&pdev->dev, "Global power on\n");
1417 + gpio_direction_output(host->global_pwr_gpio,
1418 + !host->global_pwr_gpio_low);
1421 + platform_set_drvdata(pdev, host);
1423 + for_each_child_of_node(pdev->dev.of_node, node) {
1427 + int ro_gpio, cd_gpio, pwr_gpio;
1428 + bool ro_low, cd_low, pwr_low;
1429 + u32 bus_width, max_freq, cmd_skew, dat_skew;
1431 + if (!of_device_is_compatible(node,
1432 + "cavium,octeon-6130-mmc-slot")) {
1433 + pr_warn("Sub node isn't slot: %s\n",
1434 + of_node_full_name(node));
1438 + if (of_property_read_u32(node, "reg", &slot) != 0) {
1439 + pr_warn("Missing or invalid reg property on %s\n",
1440 + of_node_full_name(node));
1444 + r = of_property_read_u32(node, "cavium,bus-max-width",
1448 + pr_info("Bus width not found for slot %d, defaulting to %d\n",
1451 + switch (bus_width) {
1457 + pr_warn("Invalid bus width property for slot %d\n",
1463 + r = of_property_read_u32(node, "cavium,cmd-clk-skew",
1468 + r = of_property_read_u32(node, "cavium,dat-clk-skew",
1473 + r = of_property_read_u32(node, "spi-max-frequency", &max_freq);
1475 + max_freq = 52000000;
1476 + pr_info("No spi-max-frequency for slot %d, defaulting to %d\n",
1480 + ro_gpio = of_get_named_gpio_flags(node, "wp-gpios", 0, &f);
1481 + ro_low = (ro_gpio >= 0 && f == OF_GPIO_ACTIVE_LOW);
1482 + cd_gpio = of_get_named_gpio_flags(node, "cd-gpios", 0, &f);
1483 + cd_low = (cd_gpio >= 0 && f == OF_GPIO_ACTIVE_LOW);
1484 + pwr_gpio = of_get_named_gpio_flags(node, "power-gpios", 0, &f);
1485 + pwr_low = (pwr_gpio >= 0 && f == OF_GPIO_ACTIVE_LOW);
1487 + ret = octeon_init_slot(host, slot, bus_width, max_freq,
1488 + ro_gpio, cd_gpio, pwr_gpio,
1489 + ro_low, cd_low, pwr_low,
1490 + cmd_skew, dat_skew);
1491 + octeon_mmc_dbg("init slot %d, ret = %d\n", slot, ret);
1499 + dev_err(&pdev->dev, "Probe failed: %d\n", ret);
1501 + /* Disable MMC controller */
1502 + emm_cfg.s.bus_ena = 0;
1503 + cvmx_write_csr(host->base + OCT_MIO_EMM_CFG, emm_cfg.u64);
1505 + if (host->global_pwr_gpio >= 0) {
1506 + dev_dbg(&pdev->dev, "Global power off\n");
1507 + gpio_set_value_cansleep(host->global_pwr_gpio,
1508 + host->global_pwr_gpio_low);
1509 + gpio_free(host->global_pwr_gpio);
1515 +static int octeon_mmc_remove(struct platform_device *pdev)
1517 + union cvmx_mio_ndf_dma_cfg ndf_dma_cfg;
1518 + struct octeon_mmc_host *host = platform_get_drvdata(pdev);
1519 + struct octeon_mmc_slot *slot;
1521 + platform_set_drvdata(pdev, NULL);
1526 + /* quench all users */
1527 + for (i = 0; i < OCTEON_MAX_MMC; i++) {
1528 + slot = host->slot[i];
1530 + mmc_remove_host(slot->mmc);
1533 + /* Reset bus_id */
1535 + cvmx_read_csr(host->ndf_base + OCT_MIO_NDF_DMA_CFG);
1536 + ndf_dma_cfg.s.en = 0;
1537 + cvmx_write_csr(host->ndf_base + OCT_MIO_NDF_DMA_CFG,
1540 + for (i = 0; i < OCTEON_MAX_MMC; i++) {
1541 + struct octeon_mmc_slot *slot;
1543 + slot = host->slot[i];
1546 + /* Free the GPIOs */
1547 + if (slot->ro_gpio >= 0)
1548 + gpio_free(slot->ro_gpio);
1549 + if (slot->cd_gpio >= 0)
1550 + gpio_free(slot->cd_gpio);
1551 + if (slot->pwr_gpio >= 0) {
1552 + gpio_set_value_cansleep(slot->pwr_gpio,
1553 + slot->pwr_gpio_low);
1554 + gpio_free(slot->pwr_gpio);
1558 + if (host->global_pwr_gpio >= 0) {
1559 + dev_dbg(&pdev->dev, "Global power off\n");
1560 + gpio_set_value_cansleep(host->global_pwr_gpio,
1561 + host->global_pwr_gpio_low);
1562 + gpio_free(host->global_pwr_gpio);
1565 + for (i = 0; i < OCTEON_MAX_MMC; i++) {
1566 + slot = host->slot[i];
1568 + mmc_free_host(slot->mmc);
1575 +static struct of_device_id octeon_mmc_match[] = {
1577 + .compatible = "cavium,octeon-6130-mmc",
1580 + .compatible = "cavium,octeon-7890-mmc",
1584 +MODULE_DEVICE_TABLE(of, octeon_mmc_match);
1586 +static struct platform_driver octeon_mmc_driver = {
1587 + .probe = octeon_mmc_probe,
1588 + .remove = octeon_mmc_remove,
1591 + .owner = THIS_MODULE,
1592 + .of_match_table = octeon_mmc_match,
1596 +static int __init octeon_mmc_init(void)
1600 + octeon_mmc_dbg("calling octeon_mmc_init\n");
1602 + ret = platform_driver_register(&octeon_mmc_driver);
1603 + octeon_mmc_dbg("driver probe returned %d\n", ret);
1606 + pr_err("%s: Failed to register driver\n", DRV_NAME);
1611 +static void __exit octeon_mmc_cleanup(void)
1613 + /* Unregister MMC driver */
1614 + platform_driver_unregister(&octeon_mmc_driver);
1617 +module_init(octeon_mmc_init);
1618 +module_exit(octeon_mmc_cleanup);
1620 +MODULE_AUTHOR("Cavium Inc. <support@cavium.com>");
1621 +MODULE_DESCRIPTION("low-level driver for Cavium OCTEON MMC/SSD card");
1622 +MODULE_LICENSE("GPL");