1 --- a/drivers/mmc/card/block.c
2 +++ b/drivers/mmc/card/block.c
3 @@ -1294,7 +1294,7 @@ static void mmc_blk_rw_rq_prep(struct mm
7 - if (brq->data.blocks > 1 || do_rel_wr) {
8 + if (brq->data.blocks > 1 || do_rel_wr || card->host->caps2 & MMC_CAP2_FORCE_MULTIBLOCK) {
9 /* SPI multiblock writes terminate using a special
10 * token, not a STOP_TRANSMISSION request.
12 --- a/drivers/mmc/core/sd.c
13 +++ b/drivers/mmc/core/sd.c
15 #include <linux/err.h>
16 #include <linux/slab.h>
17 #include <linux/stat.h>
18 +#include <linux/jiffies.h>
19 +#include <linux/nmi.h>
21 #include <linux/mmc/host.h>
22 #include <linux/mmc/card.h>
23 @@ -58,6 +60,15 @@ static const unsigned int tacc_mant[] =
28 +static const unsigned long retry_timeout_ms= 10*1000;
30 +// try at least 10 times, even if timeout is reached
31 +static const int retry_min_tries= 10;
33 +// delay between tries
34 +static const unsigned long retry_delay_ms= 10;
37 * Given the decoded CSD structure, decode the raw CID to our CID structure.
39 @@ -210,12 +221,62 @@ static int mmc_decode_scr(struct mmc_car
43 - * Fetch and process SD Status register.
44 + * Fetch and process SD Configuration Register.
46 +static int mmc_read_scr(struct mmc_card *card)
48 + unsigned long timeout_at;
51 + timeout_at= jiffies + msecs_to_jiffies( retry_timeout_ms );
54 + while( tries < retry_min_tries || time_before( jiffies, timeout_at ) )
56 + unsigned long delay_at;
59 + err = mmc_app_send_scr(card, card->raw_scr);
61 + break; // success!!!
63 + touch_nmi_watchdog(); // we are still alive!
66 + delay_at= jiffies + msecs_to_jiffies( retry_delay_ms );
67 + while( time_before( jiffies, delay_at ) )
70 + touch_nmi_watchdog(); // we are still alive!
76 + pr_err("%s: failed to read SD Configuration register (SCR) after %d tries during %lu ms, error %d\n", mmc_hostname(card->host), tries, retry_timeout_ms, err );
82 + pr_info("%s: could read SD Configuration register (SCR) at the %dth attempt\n", mmc_hostname(card->host), tries );
85 + err = mmc_decode_scr(card);
93 + * Fetch and process SD Status Register.
95 static int mmc_read_ssr(struct mmc_card *card)
97 + unsigned long timeout_at;
98 unsigned int au, es, et, eo;
103 if (!(card->csd.cmdclass & CCC_APP_SPEC)) {
104 @@ -228,14 +289,40 @@ static int mmc_read_ssr(struct mmc_card
108 + timeout_at= jiffies + msecs_to_jiffies( retry_timeout_ms );
111 + while( tries < retry_min_tries || time_before( jiffies, timeout_at ) )
113 + unsigned long delay_at;
116 err = mmc_app_sd_status(card, ssr);
118 - pr_warning("%s: problem reading SD Status "
119 - "register.\n", mmc_hostname(card->host));
122 + break; // sucess!!!
124 + touch_nmi_watchdog(); // we are still alive!
127 + delay_at= jiffies + msecs_to_jiffies( retry_delay_ms );
128 + while( time_before( jiffies, delay_at ) )
131 + touch_nmi_watchdog(); // we are still alive!
137 + pr_err("%s: failed to read SD Status register (SSR) after %d tries during %lu ms, error %d\n", mmc_hostname(card->host), tries, retry_timeout_ms, err );
143 + pr_info("%s: read SD Status register (SSR) after %d attempts\n", mmc_hostname(card->host), tries );
146 for (i = 0; i < 16; i++)
147 ssr[i] = be32_to_cpu(ssr[i]);
149 @@ -808,13 +895,9 @@ int mmc_sd_setup_card(struct mmc_host *h
153 - * Fetch SCR from card.
154 + * Fetch and decode SD Configuration register.
156 - err = mmc_app_send_scr(card, card->raw_scr);
160 - err = mmc_decode_scr(card);
161 + err = mmc_read_scr(card);
165 --- a/drivers/mmc/host/Kconfig
166 +++ b/drivers/mmc/host/Kconfig
167 @@ -249,6 +249,27 @@ config MMC_SDHCI_S3C_DMA
171 +config MMC_SDHCI_BCM2708
172 + tristate "SDHCI support on BCM2708"
173 + depends on MMC_SDHCI && MACH_BCM2708
174 + select MMC_SDHCI_IO_ACCESSORS
176 + This selects the Secure Digital Host Controller Interface (SDHCI)
177 + often referrered to as the eMMC block.
179 + If you have a controller with this interface, say Y or M here.
183 +config MMC_SDHCI_BCM2708_DMA
184 + bool "DMA support on BCM2708 Arasan controller"
185 + depends on MMC_SDHCI_BCM2708
187 + Enable DMA support on the Arasan SDHCI controller in Broadcom 2708
192 config MMC_SDHCI_BCM2835
193 tristate "SDHCI platform support for the BCM2835 SD/MMC Controller"
194 depends on ARCH_BCM2835
195 --- a/drivers/mmc/host/Makefile
196 +++ b/drivers/mmc/host/Makefile
197 @@ -15,6 +15,7 @@ obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-p
198 obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
199 obj-$(CONFIG_MMC_SDHCI_SIRF) += sdhci-sirf.o
200 obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o
201 +obj-$(CONFIG_MMC_SDHCI_BCM2708) += sdhci-bcm2708.o
202 obj-$(CONFIG_MMC_WBSD) += wbsd.o
203 obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
204 obj-$(CONFIG_MMC_OMAP) += omap.o
206 +++ b/drivers/mmc/host/sdhci-bcm2708.c
209 + * sdhci-bcm2708.c Support for SDHCI device on BCM2708
210 + * Copyright (c) 2010 Broadcom
212 + * This program is free software; you can redistribute it and/or modify
213 + * it under the terms of the GNU General Public License version 2 as
214 + * published by the Free Software Foundation.
216 + * This program is distributed in the hope that it will be useful,
217 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
218 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
219 + * GNU General Public License for more details.
221 + * You should have received a copy of the GNU General Public License
222 + * along with this program; if not, write to the Free Software
223 + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
227 + * SDHCI platform device - Arasan SD controller in BCM2708
229 + * Inspired by sdhci-pci.c, by Pierre Ossman
232 +#include <linux/delay.h>
233 +#include <linux/highmem.h>
234 +#include <linux/platform_device.h>
235 +#include <linux/module.h>
236 +#include <linux/mmc/mmc.h>
237 +#include <linux/mmc/host.h>
238 +#include <linux/mmc/sd.h>
240 +#include <linux/io.h>
241 +#include <linux/dma-mapping.h>
242 +#include <mach/dma.h>
246 +/*****************************************************************************\
250 +\*****************************************************************************/
252 +#define DRIVER_NAME "bcm2708_sdhci"
254 +/* for the time being insist on DMA mode - PIO seems not to work */
255 +#ifndef CONFIG_MMC_SDHCI_BCM2708_DMA
256 +#warning Non-DMA (PIO) version of this driver currently unavailable
258 +#undef CONFIG_MMC_SDHCI_BCM2708_DMA
259 +#define CONFIG_MMC_SDHCI_BCM2708_DMA y
261 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
262 +/* #define CHECK_DMA_USE */
264 +//#define LOG_REGISTERS
266 +#define USE_SCHED_TIME
267 +#define USE_SPACED_WRITES_2CLK 1 /* space consecutive register writes */
268 +#define USE_SOFTWARE_TIMEOUTS 1 /* not hardware timeouts */
269 +#define SOFTWARE_ERASE_TIMEOUT_SEC 30
271 +#define SDHCI_BCM_DMA_CHAN 4 /* this default is normally overriden */
272 +#define SDHCI_BCM_DMA_WAITS 0 /* delays slowing DMA transfers: 0-31 */
273 +/* We are worried that SD card DMA use may be blocking the AXI bus for others */
275 +/*! TODO: obtain these from the physical address */
276 +#define DMA_SDHCI_BASE 0x7e300000 /* EMMC register block on Videocore */
277 +#define DMA_SDHCI_BUFFER (DMA_SDHCI_BASE + SDHCI_BUFFER)
279 +#define BCM2708_SDHCI_SLEEP_TIMEOUT 1000 /* msecs */
281 +/* Mhz clock that the EMMC core is running at. Should match the platform clockman settings */
282 +#define BCM2708_EMMC_CLOCK_FREQ 50000000
284 +#define REG_EXRDFIFO_EN 0x80
285 +#define REG_EXRDFIFO_CFG 0x84
289 +/*****************************************************************************\
293 +\*****************************************************************************/
297 +#define DBG(f, x...) \
298 + pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
299 +// printk(KERN_INFO DRIVER_NAME " [%s()]: " f, __func__,## x)//GRAYG
302 +/*****************************************************************************\
304 + * High Precision Time *
306 +\*****************************************************************************/
308 +#ifdef USE_SCHED_TIME
310 +#include <mach/frc.h>
312 +typedef unsigned long hptime_t;
314 +#define FMT_HPT "lu"
316 +static inline hptime_t hptime(void)
318 + return frc_clock_ticks32();
321 +#define HPTIME_CLK_NS 1000ul
325 +typedef unsigned long hptime_t;
327 +#define FMT_HPT "lu"
329 +static inline hptime_t hptime(void)
334 +#define HPTIME_CLK_NS (1000000000ul/HZ)
338 +static inline unsigned long int since_ns(hptime_t t)
340 + return (unsigned long)((hptime() - t) * HPTIME_CLK_NS);
343 +static bool allow_highspeed = 1;
344 +static int emmc_clock_freq = BCM2708_EMMC_CLOCK_FREQ;
345 +static bool sync_after_dma = 1;
346 +static bool missing_status = 1;
347 +static bool spurious_crc_acmd51 = 0;
348 +bool enable_llm = 1;
349 +bool extra_messages = 0;
352 +static void hptime_test(void)
361 + printk(KERN_INFO DRIVER_NAME": 10ms = %"FMT_HPT" clks "
362 + "(from %"FMT_HPT" to %"FMT_HPT") = %luns\n",
363 + later-now, now, later,
364 + (unsigned long)(HPTIME_CLK_NS * (later - now)));
370 + printk(KERN_INFO DRIVER_NAME": 1s = %"FMT_HPT" clks "
371 + "(from %"FMT_HPT" to %"FMT_HPT") = %luns\n",
372 + later-now, now, later,
373 + (unsigned long)(HPTIME_CLK_NS * (later - now)));
377 +/*****************************************************************************\
379 + * SDHCI core callbacks *
381 +\*****************************************************************************/
384 +#ifdef CHECK_DMA_USE
385 +/*#define CHECK_DMA_REG_USE*/
388 +#ifdef CHECK_DMA_REG_USE
389 +/* we don't expect anything to be using these registers during a
390 + DMA (except the IRQ status) - so check */
391 +static void check_dma_reg_use(struct sdhci_host *host, int reg);
393 +#define check_dma_reg_use(host, reg)
397 +static inline u32 sdhci_bcm2708_raw_readl(struct sdhci_host *host, int reg)
399 + return readl(host->ioaddr + reg);
402 +u32 sdhci_bcm2708_readl(struct sdhci_host *host, int reg)
404 + u32 l = sdhci_bcm2708_raw_readl(host, reg);
406 +#ifdef LOG_REGISTERS
407 + printk(KERN_ERR "%s: readl from 0x%02x, value 0x%08x\n",
408 + mmc_hostname(host->mmc), reg, l);
410 + check_dma_reg_use(host, reg);
415 +u16 sdhci_bcm2708_readw(struct sdhci_host *host, int reg)
417 + u32 l = sdhci_bcm2708_raw_readl(host, reg & ~3);
418 + u32 w = l >> (reg << 3 & 0x18) & 0xffff;
420 +#ifdef LOG_REGISTERS
421 + printk(KERN_ERR "%s: readw from 0x%02x, value 0x%04x\n",
422 + mmc_hostname(host->mmc), reg, w);
424 + check_dma_reg_use(host, reg);
429 +u8 sdhci_bcm2708_readb(struct sdhci_host *host, int reg)
431 + u32 l = sdhci_bcm2708_raw_readl(host, reg & ~3);
432 + u32 b = l >> (reg << 3 & 0x18) & 0xff;
434 +#ifdef LOG_REGISTERS
435 + printk(KERN_ERR "%s: readb from 0x%02x, value 0x%02x\n",
436 + mmc_hostname(host->mmc), reg, b);
438 + check_dma_reg_use(host, reg);
444 +static void sdhci_bcm2708_raw_writel(struct sdhci_host *host, u32 val, int reg)
448 +#if USE_SPACED_WRITES_2CLK
449 + static bool timeout_disabled = false;
450 + unsigned int ns_2clk = 0;
452 + /* The Arasan has a bugette whereby it may lose the content of
453 + * successive writes to registers that are within two SD-card clock
454 + * cycles of each other (a clock domain crossing problem).
455 + * It seems, however, that the data register does not have this problem.
456 + * (Which is just as well - otherwise we'd have to nobble the DMA engine
459 + if (reg != SDHCI_BUFFER && host->clock != 0) {
460 + /* host->clock is the clock freq in Hz */
461 + static hptime_t last_write_hpt;
462 + hptime_t now = hptime();
463 + ns_2clk = cycle_delay*1000000/(host->clock/1000);
465 + if (now == last_write_hpt || now == last_write_hpt+1) {
466 + /* we can't guarantee any significant time has
467 + * passed - we'll have to wait anyway ! */
471 + /* we must have waited at least this many ns: */
472 + unsigned int ns_wait = HPTIME_CLK_NS *
473 + (last_write_hpt - now - 1);
474 + if (ns_wait < ns_2clk)
475 + ndelay(ns_2clk - ns_wait);
477 + last_write_hpt = now;
479 +#if USE_SOFTWARE_TIMEOUTS
480 + /* The Arasan is clocked for timeouts using the SD clock which is too
481 + * fast for ERASE commands and causes issues. So we disable timeouts
483 + if (host->cmd != NULL && host->cmd->opcode == MMC_ERASE &&
484 + reg == (SDHCI_COMMAND & ~3)) {
485 + mod_timer(&host->timer,
486 + jiffies + SOFTWARE_ERASE_TIMEOUT_SEC * HZ);
487 + ier = readl(host->ioaddr + SDHCI_SIGNAL_ENABLE);
488 + ier &= ~SDHCI_INT_DATA_TIMEOUT;
489 + writel(ier, host->ioaddr + SDHCI_SIGNAL_ENABLE);
490 + timeout_disabled = true;
492 + } else if (timeout_disabled) {
493 + ier = readl(host->ioaddr + SDHCI_SIGNAL_ENABLE);
494 + ier |= SDHCI_INT_DATA_TIMEOUT;
495 + writel(ier, host->ioaddr + SDHCI_SIGNAL_ENABLE);
496 + timeout_disabled = false;
500 + writel(val, host->ioaddr + reg);
502 + void __iomem * regaddr = host->ioaddr + reg;
504 + writel(val, regaddr);
506 + if (reg != SDHCI_BUFFER && reg != SDHCI_INT_STATUS && host->clock != 0)
508 + int timeout = 100000;
509 + while (val != readl(regaddr) && --timeout > 0)
513 + printk(KERN_ERR "%s: writing 0x%X to reg 0x%X "
514 + "always gives 0x%X\n",
515 + mmc_hostname(host->mmc),
516 + val, reg, readl(regaddr));
517 + BUG_ON(timeout <= 0);
523 +void sdhci_bcm2708_writel(struct sdhci_host *host, u32 val, int reg)
525 +#ifdef LOG_REGISTERS
526 + printk(KERN_ERR "%s: writel to 0x%02x, value 0x%08x\n",
527 + mmc_hostname(host->mmc), reg, val);
529 + check_dma_reg_use(host, reg);
531 + sdhci_bcm2708_raw_writel(host, val, reg);
534 +void sdhci_bcm2708_writew(struct sdhci_host *host, u16 val, int reg)
536 + static u32 shadow = 0;
538 + u32 p = reg == SDHCI_COMMAND ? shadow :
539 + sdhci_bcm2708_raw_readl(host, reg & ~3);
540 + u32 s = reg << 3 & 0x18;
542 + u32 m = 0xffff << s;
544 +#ifdef LOG_REGISTERS
545 + printk(KERN_ERR "%s: writew to 0x%02x, value 0x%04x\n",
546 + mmc_hostname(host->mmc), reg, val);
549 + if (reg == SDHCI_TRANSFER_MODE)
550 + shadow = (p & ~m) | l;
552 + check_dma_reg_use(host, reg);
553 + sdhci_bcm2708_raw_writel(host, (p & ~m) | l, reg & ~3);
557 +void sdhci_bcm2708_writeb(struct sdhci_host *host, u8 val, int reg)
559 + u32 p = sdhci_bcm2708_raw_readl(host, reg & ~3);
560 + u32 s = reg << 3 & 0x18;
564 +#ifdef LOG_REGISTERS
565 + printk(KERN_ERR "%s: writeb to 0x%02x, value 0x%02x\n",
566 + mmc_hostname(host->mmc), reg, val);
569 + check_dma_reg_use(host, reg);
570 + sdhci_bcm2708_raw_writel(host, (p & ~m) | l, reg & ~3);
573 +static unsigned int sdhci_bcm2708_get_max_clock(struct sdhci_host *host)
575 + return emmc_clock_freq;
578 +/*****************************************************************************\
582 +\*****************************************************************************/
584 +struct sdhci_bcm2708_priv {
587 + void __iomem *dma_chan_base;
588 + struct bcm2708_dma_cb *cb_base; /* DMA control blocks */
589 + dma_addr_t cb_handle;
590 + /* tracking scatter gather progress */
591 + unsigned sg_ix; /* scatter gather list index */
592 + unsigned sg_done; /* bytes in current sg_ix done */
593 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
594 + unsigned char dma_wanted; /* DMA transfer requested */
595 + unsigned char dma_waits; /* wait states in DMAs */
596 +#ifdef CHECK_DMA_USE
597 + unsigned char dmas_pending; /* no of unfinished DMAs */
598 + hptime_t when_started;
599 + hptime_t when_reset;
600 + hptime_t when_stopped;
603 + /* signalling the end of a transfer */
604 + void (*complete)(struct sdhci_host *);
607 +#define SDHCI_HOST_PRIV(host) \
608 + (struct sdhci_bcm2708_priv *)((struct sdhci_host *)(host)+1)
612 +#ifdef CHECK_DMA_REG_USE
613 +static void check_dma_reg_use(struct sdhci_host *host, int reg)
615 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
616 + if (host_priv->dma_wanted && reg != SDHCI_INT_STATUS) {
617 + printk(KERN_INFO"%s: accessing register 0x%x during DMA\n",
618 + mmc_hostname(host->mmc), reg);
625 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
627 +static void sdhci_clear_set_irqgen(struct sdhci_host *host, u32 clear, u32 set)
631 + ier = sdhci_bcm2708_raw_readl(host, SDHCI_SIGNAL_ENABLE);
634 + /* change which requests generate IRQs - makes no difference to
635 + the content of SDHCI_INT_STATUS, or the need to acknowledge IRQs */
636 + sdhci_bcm2708_raw_writel(host, ier, SDHCI_SIGNAL_ENABLE);
639 +static void sdhci_signal_irqs(struct sdhci_host *host, u32 irqs)
641 + sdhci_clear_set_irqgen(host, 0, irqs);
644 +static void sdhci_unsignal_irqs(struct sdhci_host *host, u32 irqs)
646 + sdhci_clear_set_irqgen(host, irqs, 0);
651 +static void schci_bcm2708_cb_read(struct sdhci_bcm2708_priv *host,
653 + dma_addr_t dma_addr, unsigned len,
654 + int /*bool*/ is_last)
656 + struct bcm2708_dma_cb *cb = &host->cb_base[ix];
657 + unsigned char dmawaits = host->dma_waits;
659 + cb->info = BCM2708_DMA_PER_MAP(BCM2708_DMA_DREQ_EMMC) |
660 + BCM2708_DMA_WAITS(dmawaits) |
661 + BCM2708_DMA_S_DREQ |
662 + BCM2708_DMA_D_WIDTH |
664 + cb->src = DMA_SDHCI_BUFFER; /* DATA register DMA address */
665 + cb->dst = dma_addr;
670 + cb->info |= BCM2708_DMA_INT_EN |
671 + BCM2708_DMA_WAIT_RESP;
674 + cb->next = host->cb_handle +
675 + (ix+1)*sizeof(struct bcm2708_dma_cb);
681 +static void schci_bcm2708_cb_write(struct sdhci_bcm2708_priv *host,
683 + dma_addr_t dma_addr, unsigned len,
684 + int /*bool*/ is_last)
686 + struct bcm2708_dma_cb *cb = &host->cb_base[ix];
687 + unsigned char dmawaits = host->dma_waits;
689 + /* We can make arbitrarily large writes as long as we specify DREQ to
690 + pace the delivery of bytes to the Arasan hardware */
691 + cb->info = BCM2708_DMA_PER_MAP(BCM2708_DMA_DREQ_EMMC) |
692 + BCM2708_DMA_WAITS(dmawaits) |
693 + BCM2708_DMA_D_DREQ |
694 + BCM2708_DMA_S_WIDTH |
696 + cb->src = dma_addr;
697 + cb->dst = DMA_SDHCI_BUFFER; /* DATA register DMA address */
702 + cb->info |= BCM2708_DMA_INT_EN |
703 + BCM2708_DMA_WAIT_RESP;
706 + cb->next = host->cb_handle +
707 + (ix+1)*sizeof(struct bcm2708_dma_cb);
714 +static void schci_bcm2708_dma_go(struct sdhci_host *host)
716 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
717 + void __iomem *dma_chan_base = host_priv->dma_chan_base;
719 + BUG_ON(host_priv->dma_wanted);
720 +#ifdef CHECK_DMA_USE
721 + if (host_priv->dma_wanted)
722 + printk(KERN_ERR "%s: DMA already in progress - "
723 + "now %"FMT_HPT", last started %lu "
724 + "reset %lu stopped %lu\n",
725 + mmc_hostname(host->mmc),
726 + hptime(), since_ns(host_priv->when_started),
727 + since_ns(host_priv->when_reset),
728 + since_ns(host_priv->when_stopped));
729 + else if (host_priv->dmas_pending > 0)
730 + printk(KERN_INFO "%s: note - new DMA when %d reset DMAs "
731 + "already in progress - "
732 + "now %"FMT_HPT", started %lu reset %lu stopped %lu\n",
733 + mmc_hostname(host->mmc),
734 + host_priv->dmas_pending,
735 + hptime(), since_ns(host_priv->when_started),
736 + since_ns(host_priv->when_reset),
737 + since_ns(host_priv->when_stopped));
738 + host_priv->dmas_pending += 1;
739 + host_priv->when_started = hptime();
741 + host_priv->dma_wanted = 1;
742 + DBG("PDMA go - base %p handle %08X\n", dma_chan_base,
743 + host_priv->cb_handle);
744 + bcm_dma_start(dma_chan_base, host_priv->cb_handle);
749 +sdhci_platdma_read(struct sdhci_host *host, dma_addr_t dma_addr, size_t len)
751 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
753 + DBG("PDMA to read %d bytes\n", len);
754 + host_priv->sg_done += len;
755 + schci_bcm2708_cb_read(host_priv, 0, dma_addr, len, 1/*TRUE*/);
756 + schci_bcm2708_dma_go(host);
761 +sdhci_platdma_write(struct sdhci_host *host, dma_addr_t dma_addr, size_t len)
763 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
765 + DBG("PDMA to write %d bytes\n", len);
766 + //BUG_ON(0 != (len & 0x1ff));
768 + host_priv->sg_done += len;
769 + schci_bcm2708_cb_write(host_priv, 0, dma_addr, len, 1/*TRUE*/);
770 + schci_bcm2708_dma_go(host);
773 +/*! space is avaiable to receive into or data is available to write
774 + Platform DMA exported function
777 +sdhci_bcm2708_platdma_avail(struct sdhci_host *host, unsigned int *ref_intmask,
778 + void(*completion_callback)(struct sdhci_host *host))
780 + struct mmc_data *data = host->data;
781 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
786 + BUG_ON(NULL == data);
787 + BUG_ON(0 == data->blksz);
789 + host_priv->complete = completion_callback;
791 + sg_ix = host_priv->sg_ix;
792 + BUG_ON(sg_ix >= data->sg_len);
794 + /* we can DMA blocks larger than blksz - it may hang the DMA
795 + channel but we are its only user */
796 + bytes = sg_dma_len(&data->sg[sg_ix]) - host_priv->sg_done;
797 + addr = sg_dma_address(&data->sg[sg_ix]) + host_priv->sg_done;
800 + /* We're going to poll for read/write available state until
804 + if (data->flags & MMC_DATA_READ) {
805 + if (*ref_intmask & SDHCI_INT_DATA_AVAIL) {
806 + sdhci_unsignal_irqs(host, SDHCI_INT_DATA_AVAIL |
807 + SDHCI_INT_SPACE_AVAIL);
808 + sdhci_platdma_read(host, addr, bytes);
811 + if (*ref_intmask & SDHCI_INT_SPACE_AVAIL) {
812 + sdhci_unsignal_irqs(host, SDHCI_INT_DATA_AVAIL |
813 + SDHCI_INT_SPACE_AVAIL);
814 + sdhci_platdma_write(host, addr, bytes);
819 + we have run out of bytes that need transferring (e.g. we may be in
820 + the middle of the last DMA transfer), or
821 + it is also possible that we've been called when another IRQ is
822 + signalled, even though we've turned off signalling of our own IRQ */
824 + *ref_intmask &= ~SDHCI_INT_DATA_END;
825 + /* don't let the main sdhci driver act on this .. we'll deal with it
826 + when we respond to the DMA - if one is currently in progress */
829 +/* is it possible to DMA the given mmc_data structure?
830 + Platform DMA exported function
833 +sdhci_bcm2708_platdma_dmaable(struct sdhci_host *host, struct mmc_data *data)
835 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
836 + int ok = bcm_sg_suitable_for_dma(data->sg, data->sg_len);
839 + DBG("Reverting to PIO - bad cache alignment\n");
842 + host_priv->sg_ix = 0; /* first SG index */
843 + host_priv->sg_done = 0; /* no bytes done */
849 +#include <mach/arm_control.h> //GRAYG
850 +/*! the current SD transacton has been abandonned
851 + We need to tidy up if we were in the middle of a DMA
852 + Platform DMA exported function
855 +sdhci_bcm2708_platdma_reset(struct sdhci_host *host, struct mmc_data *data)
857 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
858 +// unsigned long flags;
860 + BUG_ON(NULL == host);
862 +// spin_lock_irqsave(&host->lock, flags);
864 + if (host_priv->dma_wanted) {
865 + if (NULL == data) {
866 + printk(KERN_ERR "%s: ongoing DMA reset - no data!\n",
867 + mmc_hostname(host->mmc));
868 + BUG_ON(NULL == data);
870 + struct scatterlist *sg;
877 + sg_len = data->sg_len;
878 + sg_todo = sg_dma_len(&sg[host_priv->sg_ix]);
880 + cs = readl(host_priv->dma_chan_base + BCM2708_DMA_CS);
882 + if (!(BCM2708_DMA_ACTIVE & cs))
884 + if (extra_messages)
885 + printk(KERN_INFO "%s: missed completion of "
886 + "cmd %d DMA (%d/%d [%d]/[%d]) - "
888 + mmc_hostname(host->mmc),
890 + host_priv->sg_done, sg_todo,
891 + host_priv->sg_ix+1, sg_len);
894 + printk(KERN_INFO "%s: resetting ongoing cmd %d"
895 + "DMA before %d/%d [%d]/[%d] complete\n",
896 + mmc_hostname(host->mmc),
898 + host_priv->sg_done, sg_todo,
899 + host_priv->sg_ix+1, sg_len);
900 +#ifdef CHECK_DMA_USE
901 + printk(KERN_INFO "%s: now %"FMT_HPT" started %lu "
902 + "last reset %lu last stopped %lu\n",
903 + mmc_hostname(host->mmc),
904 + hptime(), since_ns(host_priv->when_started),
905 + since_ns(host_priv->when_reset),
906 + since_ns(host_priv->when_stopped));
907 + { unsigned long info, debug;
908 + void __iomem *base;
909 + unsigned long pend0, pend1, pend2;
911 + base = host_priv->dma_chan_base;
912 + cs = readl(base + BCM2708_DMA_CS);
913 + info = readl(base + BCM2708_DMA_INFO);
914 + debug = readl(base + BCM2708_DMA_DEBUG);
915 + printk(KERN_INFO "%s: DMA%d CS=%08lX TI=%08lX "
917 + mmc_hostname(host->mmc),
918 + host_priv->dma_chan,
920 + pend0 = readl(__io_address(ARM_IRQ_PEND0));
921 + pend1 = readl(__io_address(ARM_IRQ_PEND1));
922 + pend2 = readl(__io_address(ARM_IRQ_PEND2));
924 + printk(KERN_INFO "%s: PEND0=%08lX "
925 + "PEND1=%08lX PEND2=%08lX\n",
926 + mmc_hostname(host->mmc),
927 + pend0, pend1, pend2);
929 + //gintsts = readl(__io_address(GINTSTS));
930 + //gintmsk = readl(__io_address(GINTMSK));
931 + //printk(KERN_INFO "%s: USB GINTSTS=%08lX"
932 + // "GINTMSK=%08lX\n",
933 + // mmc_hostname(host->mmc), gintsts, gintmsk);
936 + rc = bcm_dma_abort(host_priv->dma_chan_base);
939 + host_priv->dma_wanted = 0;
940 +#ifdef CHECK_DMA_USE
941 + host_priv->when_reset = hptime();
945 +// spin_unlock_irqrestore(&host->lock, flags);
949 +static void sdhci_bcm2708_dma_complete_irq(struct sdhci_host *host,
952 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
953 + struct mmc_data *data;
954 + struct scatterlist *sg;
958 +// unsigned long flags;
960 + BUG_ON(NULL == host);
962 +// spin_lock_irqsave(&host->lock, flags);
965 +#ifdef CHECK_DMA_USE
966 + if (host_priv->dmas_pending <= 0)
967 + DBG("on completion no DMA in progress - "
968 + "now %"FMT_HPT" started %lu reset %lu stopped %lu\n",
969 + hptime(), since_ns(host_priv->when_started),
970 + since_ns(host_priv->when_reset),
971 + since_ns(host_priv->when_stopped));
972 + else if (host_priv->dmas_pending > 1)
973 + DBG("still %d DMA in progress after completion - "
974 + "now %"FMT_HPT" started %lu reset %lu stopped %lu\n",
975 + host_priv->dmas_pending - 1,
976 + hptime(), since_ns(host_priv->when_started),
977 + since_ns(host_priv->when_reset),
978 + since_ns(host_priv->when_stopped));
979 + BUG_ON(host_priv->dmas_pending <= 0);
980 + host_priv->dmas_pending -= 1;
981 + host_priv->when_stopped = hptime();
983 + host_priv->dma_wanted = 0;
985 + if (NULL == data) {
986 + DBG("PDMA unused completion - status 0x%X\n", dma_cs);
987 +// spin_unlock_irqrestore(&host->lock, flags);
991 + sg_len = data->sg_len;
992 + sg_todo = sg_dma_len(&sg[host_priv->sg_ix]);
994 + DBG("PDMA complete %d/%d [%d]/[%d]..\n",
995 + host_priv->sg_done, sg_todo,
996 + host_priv->sg_ix+1, sg_len);
998 + BUG_ON(host_priv->sg_done > sg_todo);
1000 + if (host_priv->sg_done >= sg_todo) {
1001 + host_priv->sg_ix++;
1002 + host_priv->sg_done = 0;
1005 + sg_ix = host_priv->sg_ix;
1006 + if (sg_ix < sg_len) {
1008 + /* Set off next DMA if we've got the capacity */
1010 + if (data->flags & MMC_DATA_READ)
1011 + irq_mask = SDHCI_INT_DATA_AVAIL;
1013 + irq_mask = SDHCI_INT_SPACE_AVAIL;
1015 + /* We have to use the interrupt status register on the BCM2708
1016 + rather than the SDHCI_PRESENT_STATE register because latency
1017 + in the glue logic means that the information retrieved from
1018 + the latter is not always up-to-date w.r.t the DMA engine -
1019 + it may not indicate that a read or a write is ready yet */
1020 + if (sdhci_bcm2708_raw_readl(host, SDHCI_INT_STATUS) &
1022 + size_t bytes = sg_dma_len(&sg[sg_ix]) -
1023 + host_priv->sg_done;
1024 + dma_addr_t addr = sg_dma_address(&data->sg[sg_ix]) +
1025 + host_priv->sg_done;
1027 + /* acknowledge interrupt */
1028 + sdhci_bcm2708_raw_writel(host, irq_mask,
1029 + SDHCI_INT_STATUS);
1031 + BUG_ON(0 == bytes);
1033 + if (data->flags & MMC_DATA_READ)
1034 + sdhci_platdma_read(host, addr, bytes);
1036 + sdhci_platdma_write(host, addr, bytes);
1038 + DBG("PDMA - wait avail\n");
1039 + /* may generate an IRQ if already present */
1040 + sdhci_signal_irqs(host, SDHCI_INT_DATA_AVAIL |
1041 + SDHCI_INT_SPACE_AVAIL);
1044 + if (sync_after_dma) {
1045 + /* On the Arasan controller the stop command (which will be
1046 + scheduled after this completes) does not seem to work
1047 + properly if we allow it to be issued when we are
1048 + transferring data to/from the SD card.
1049 + We get CRC and DEND errors unless we wait for
1050 + the SD controller to finish reading/writing to the card. */
1052 + int timeout=30*5000;
1054 + DBG("PDMA over - sync card\n");
1055 + if (data->flags & MMC_DATA_READ)
1056 + state_mask = SDHCI_DOING_READ;
1058 + state_mask = SDHCI_DOING_WRITE;
1060 + while (0 != (sdhci_bcm2708_raw_readl(host, SDHCI_PRESENT_STATE)
1061 + & state_mask) && --timeout > 0)
1067 + printk(KERN_ERR"%s: final %s to SD card still "
1069 + mmc_hostname(host->mmc),
1070 + data->flags & MMC_DATA_READ? "read": "write");
1072 + if (host_priv->complete) {
1073 + (*host_priv->complete)(host);
1074 + DBG("PDMA %s complete\n",
1075 + data->flags & MMC_DATA_READ?"read":"write");
1076 + sdhci_signal_irqs(host, SDHCI_INT_DATA_AVAIL |
1077 + SDHCI_INT_SPACE_AVAIL);
1080 +// spin_unlock_irqrestore(&host->lock, flags);
1083 +static irqreturn_t sdhci_bcm2708_dma_irq(int irq, void *dev_id)
1085 + irqreturn_t result = IRQ_NONE;
1086 + struct sdhci_host *host = dev_id;
1087 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
1088 + u32 dma_cs; /* control and status register */
1090 + BUG_ON(NULL == dev_id);
1091 + BUG_ON(NULL == host_priv->dma_chan_base);
1093 + sdhci_spin_lock(host);
1095 + dma_cs = readl(host_priv->dma_chan_base + BCM2708_DMA_CS);
1097 + if (dma_cs & BCM2708_DMA_ERR) {
1098 + unsigned long debug;
1099 + debug = readl(host_priv->dma_chan_base +
1100 + BCM2708_DMA_DEBUG);
1101 + printk(KERN_ERR "%s: DMA error - CS %lX DEBUG %lX\n",
1102 + mmc_hostname(host->mmc), (unsigned long)dma_cs,
1103 + (unsigned long)debug);
1105 + writel(debug, host_priv->dma_chan_base +
1106 + BCM2708_DMA_DEBUG);
1108 + if (dma_cs & BCM2708_DMA_INT) {
1109 + /* acknowledge interrupt */
1110 + writel(BCM2708_DMA_INT,
1111 + host_priv->dma_chan_base + BCM2708_DMA_CS);
1113 + dsb(); /* ARM data synchronization (push) operation */
1115 + if (!host_priv->dma_wanted) {
1116 + /* ignore this interrupt - it was reset */
1117 + if (extra_messages)
1118 + printk(KERN_INFO "%s: DMA IRQ %X ignored - "
1119 + "results were reset\n",
1120 + mmc_hostname(host->mmc), dma_cs);
1121 +#ifdef CHECK_DMA_USE
1122 + printk(KERN_INFO "%s: now %"FMT_HPT
1123 + " started %lu reset %lu stopped %lu\n",
1124 + mmc_hostname(host->mmc), hptime(),
1125 + since_ns(host_priv->when_started),
1126 + since_ns(host_priv->when_reset),
1127 + since_ns(host_priv->when_stopped));
1128 + host_priv->dmas_pending--;
1131 + sdhci_bcm2708_dma_complete_irq(host, dma_cs);
1133 + result = IRQ_HANDLED;
1135 + sdhci_spin_unlock(host);
1139 +#endif /* CONFIG_MMC_SDHCI_BCM2708_DMA */
1142 +/***************************************************************************** \
1144 + * Device Attributes *
1146 +\*****************************************************************************/
1150 + * Show the DMA-using status
1152 +static ssize_t attr_dma_show(struct device *_dev,
1153 + struct device_attribute *attr, char *buf)
1155 + struct sdhci_host *host = (struct sdhci_host *)dev_get_drvdata(_dev);
1158 + int use_dma = (host->flags & SDHCI_USE_PLATDMA? 1:0);
1159 + return sprintf(buf, "%d\n", use_dma);
1165 + * Set the DMA-using status
1167 +static ssize_t attr_dma_store(struct device *_dev,
1168 + struct device_attribute *attr,
1169 + const char *buf, size_t count)
1171 + struct sdhci_host *host = (struct sdhci_host *)dev_get_drvdata(_dev);
1174 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
1175 + int on = simple_strtol(buf, NULL, 0);
1177 + host->flags |= SDHCI_USE_PLATDMA;
1178 + sdhci_bcm2708_writel(host, 1, REG_EXRDFIFO_EN);
1179 + printk(KERN_INFO "%s: DMA enabled\n",
1180 + mmc_hostname(host->mmc));
1182 + host->flags &= ~(SDHCI_USE_PLATDMA | SDHCI_REQ_USE_DMA);
1183 + sdhci_bcm2708_writel(host, 0, REG_EXRDFIFO_EN);
1184 + printk(KERN_INFO "%s: DMA disabled\n",
1185 + mmc_hostname(host->mmc));
1193 +static DEVICE_ATTR(use_dma, S_IRUGO | S_IWUGO, attr_dma_show, attr_dma_store);
1197 + * Show the DMA wait states used
1199 +static ssize_t attr_dmawait_show(struct device *_dev,
1200 + struct device_attribute *attr, char *buf)
1202 + struct sdhci_host *host = (struct sdhci_host *)dev_get_drvdata(_dev);
1205 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
1206 + int dmawait = host_priv->dma_waits;
1207 + return sprintf(buf, "%d\n", dmawait);
1213 + * Set the DMA wait state used
1215 +static ssize_t attr_dmawait_store(struct device *_dev,
1216 + struct device_attribute *attr,
1217 + const char *buf, size_t count)
1219 + struct sdhci_host *host = (struct sdhci_host *)dev_get_drvdata(_dev);
1222 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
1223 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
1224 + int dma_waits = simple_strtol(buf, NULL, 0);
1225 + if (dma_waits >= 0 && dma_waits < 32)
1226 + host_priv->dma_waits = dma_waits;
1228 + printk(KERN_ERR "%s: illegal dma_waits value - %d",
1229 + mmc_hostname(host->mmc), dma_waits);
1236 +static DEVICE_ATTR(dma_wait, S_IRUGO | S_IWUGO,
1237 + attr_dmawait_show, attr_dmawait_store);
1241 + * Show the DMA-using status
1243 +static ssize_t attr_status_show(struct device *_dev,
1244 + struct device_attribute *attr, char *buf)
1246 + struct sdhci_host *host = (struct sdhci_host *)dev_get_drvdata(_dev);
1249 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
1250 + return sprintf(buf,
1254 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
1255 + "dma: %s (%d waits)\n",
1257 + "dma: unconfigured\n",
1261 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
1262 + , (host->flags & SDHCI_USE_PLATDMA)? "on": "off"
1263 + , host_priv->dma_waits
1270 +static DEVICE_ATTR(status, S_IRUGO, attr_status_show, NULL);
1272 +/***************************************************************************** \
1274 + * Power Management *
1276 +\*****************************************************************************/
1280 +static int sdhci_bcm2708_suspend(struct platform_device *dev, pm_message_t state)
1282 + struct sdhci_host *host = (struct sdhci_host *)
1283 + platform_get_drvdata(dev);
1287 + ret = mmc_suspend_host(host->mmc);
1293 +static int sdhci_bcm2708_resume(struct platform_device *dev)
1295 + struct sdhci_host *host = (struct sdhci_host *)
1296 + platform_get_drvdata(dev);
1300 + ret = mmc_resume_host(host->mmc);
1308 +/*****************************************************************************\
1310 + * Device quirk functions. Implemented as local ops because the flags *
1311 + * field is out of space with newer kernels. This implementation can be *
1312 + * back ported to older kernels as well. *
1313 +\****************************************************************************/
1314 +static unsigned int sdhci_bcm2708_quirk_extra_ints(struct sdhci_host *host)
1319 +static unsigned int sdhci_bcm2708_quirk_spurious_crc_acmd51(struct sdhci_host *host)
1324 +static unsigned int sdhci_bcm2708_quirk_voltage_broken(struct sdhci_host *host)
1329 +static unsigned int sdhci_bcm2708_uhs_broken(struct sdhci_host *host)
1334 +static unsigned int sdhci_bcm2708_missing_status(struct sdhci_host *host)
1339 +/***************************************************************************** \
1343 +\*****************************************************************************/
1345 +static struct sdhci_ops sdhci_bcm2708_ops = {
1346 +#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
1347 + .read_l = sdhci_bcm2708_readl,
1348 + .read_w = sdhci_bcm2708_readw,
1349 + .read_b = sdhci_bcm2708_readb,
1350 + .write_l = sdhci_bcm2708_writel,
1351 + .write_w = sdhci_bcm2708_writew,
1352 + .write_b = sdhci_bcm2708_writeb,
1354 +#error The BCM2708 SDHCI driver needs CONFIG_MMC_SDHCI_IO_ACCESSORS to be set
1356 + .get_max_clock = sdhci_bcm2708_get_max_clock,
1358 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
1359 + // Platform DMA operations
1360 + .pdma_able = sdhci_bcm2708_platdma_dmaable,
1361 + .pdma_avail = sdhci_bcm2708_platdma_avail,
1362 + .pdma_reset = sdhci_bcm2708_platdma_reset,
1364 + .extra_ints = sdhci_bcm2708_quirk_extra_ints,
1365 + .voltage_broken = sdhci_bcm2708_quirk_voltage_broken,
1366 + .uhs_broken = sdhci_bcm2708_uhs_broken,
1369 +/*****************************************************************************\
1371 + * Device probing/removal *
1373 +\*****************************************************************************/
1375 +static int sdhci_bcm2708_probe(struct platform_device *pdev)
1377 + struct sdhci_host *host;
1378 + struct resource *iomem;
1379 + struct sdhci_bcm2708_priv *host_priv;
1382 + BUG_ON(pdev == NULL);
1384 + iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1390 + if (resource_size(iomem) != 0x100)
1391 + dev_err(&pdev->dev, "Invalid iomem size. You may "
1392 + "experience problems.\n");
1394 + if (pdev->dev.parent)
1395 + host = sdhci_alloc_host(pdev->dev.parent,
1396 + sizeof(struct sdhci_bcm2708_priv));
1398 + host = sdhci_alloc_host(&pdev->dev,
1399 + sizeof(struct sdhci_bcm2708_priv));
1401 + if (IS_ERR(host)) {
1402 + ret = PTR_ERR(host);
1405 + if (missing_status) {
1406 + sdhci_bcm2708_ops.missing_status = sdhci_bcm2708_missing_status;
1409 + if( spurious_crc_acmd51 ) {
1410 + sdhci_bcm2708_ops.spurious_crc_acmd51 = sdhci_bcm2708_quirk_spurious_crc_acmd51;
1414 + printk("sdhci: %s low-latency mode\n",enable_llm?"Enable":"Disable");
1416 + host->hw_name = "BCM2708_Arasan";
1417 + host->ops = &sdhci_bcm2708_ops;
1418 + host->irq = platform_get_irq(pdev, 0);
1419 + host->second_irq = 0;
1421 + host->quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
1422 + SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1423 + SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1424 + SDHCI_QUIRK_MISSING_CAPS |
1425 + SDHCI_QUIRK_NO_HISPD_BIT |
1426 + (sync_after_dma ? 0:SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12);
1429 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
1430 + host->flags = SDHCI_USE_PLATDMA;
1433 + if (!request_mem_region(iomem->start, resource_size(iomem),
1434 + mmc_hostname(host->mmc))) {
1435 + dev_err(&pdev->dev, "cannot request region\n");
1440 + host->ioaddr = ioremap(iomem->start, resource_size(iomem));
1441 + if (!host->ioaddr) {
1442 + dev_err(&pdev->dev, "failed to remap registers\n");
1447 + host_priv = SDHCI_HOST_PRIV(host);
1449 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
1450 + host_priv->dma_wanted = 0;
1451 +#ifdef CHECK_DMA_USE
1452 + host_priv->dmas_pending = 0;
1453 + host_priv->when_started = 0;
1454 + host_priv->when_reset = 0;
1455 + host_priv->when_stopped = 0;
1457 + host_priv->sg_ix = 0;
1458 + host_priv->sg_done = 0;
1459 + host_priv->complete = NULL;
1460 + host_priv->dma_waits = SDHCI_BCM_DMA_WAITS;
1462 + host_priv->cb_base = dma_alloc_writecombine(&pdev->dev, SZ_4K,
1463 + &host_priv->cb_handle,
1465 + if (!host_priv->cb_base) {
1466 + dev_err(&pdev->dev, "cannot allocate DMA CBs\n");
1468 + goto err_alloc_cb;
1471 + ret = bcm_dma_chan_alloc(BCM_DMA_FEATURE_FAST,
1472 + &host_priv->dma_chan_base,
1473 + &host_priv->dma_irq);
1475 + dev_err(&pdev->dev, "couldn't allocate a DMA channel\n");
1478 + host_priv->dma_chan = ret;
1480 + ret = request_irq(host_priv->dma_irq, sdhci_bcm2708_dma_irq,0,//IRQF_SHARED,
1481 + DRIVER_NAME " (dma)", host);
1483 + dev_err(&pdev->dev, "cannot set DMA IRQ\n");
1484 + goto err_add_dma_irq;
1486 + host->second_irq = host_priv->dma_irq;
1487 + DBG("DMA CBs %p handle %08X DMA%d %p DMA IRQ %d\n",
1488 + host_priv->cb_base, (unsigned)host_priv->cb_handle,
1489 + host_priv->dma_chan, host_priv->dma_chan_base,
1490 + host_priv->dma_irq);
1492 + if (allow_highspeed)
1493 + host->mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
1495 + /* single block writes cause data loss with some SD cards! */
1496 + host->mmc->caps2 |= MMC_CAP2_FORCE_MULTIBLOCK;
1499 + ret = sdhci_add_host(host);
1501 + goto err_add_host;
1503 + platform_set_drvdata(pdev, host);
1504 + ret = device_create_file(&pdev->dev, &dev_attr_use_dma);
1505 + ret = device_create_file(&pdev->dev, &dev_attr_dma_wait);
1506 + ret = device_create_file(&pdev->dev, &dev_attr_status);
1508 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
1509 + /* enable extension fifo for paced DMA transfers */
1510 + sdhci_bcm2708_writel(host, 1, REG_EXRDFIFO_EN);
1511 + sdhci_bcm2708_writel(host, 4, REG_EXRDFIFO_CFG);
1514 + printk(KERN_INFO "%s: BCM2708 SDHC host at 0x%08llx DMA %d IRQ %d\n",
1515 + mmc_hostname(host->mmc), (unsigned long long)iomem->start,
1516 + host_priv->dma_chan, host_priv->dma_irq);
1521 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
1522 + free_irq(host_priv->dma_irq, host);
1524 + bcm_dma_chan_free(host_priv->dma_chan);
1526 + dma_free_writecombine(&pdev->dev, SZ_4K, host_priv->cb_base,
1527 + host_priv->cb_handle);
1530 + iounmap(host->ioaddr);
1532 + release_mem_region(iomem->start, resource_size(iomem));
1534 + sdhci_free_host(host);
1536 + dev_err(&pdev->dev, "probe failed, err %d\n", ret);
1540 +static int sdhci_bcm2708_remove(struct platform_device *pdev)
1542 + struct sdhci_host *host = platform_get_drvdata(pdev);
1543 + struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1544 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
1549 + scratch = sdhci_bcm2708_readl(host, SDHCI_INT_STATUS);
1550 + if (scratch == (u32)-1)
1553 + device_remove_file(&pdev->dev, &dev_attr_status);
1554 + device_remove_file(&pdev->dev, &dev_attr_dma_wait);
1555 + device_remove_file(&pdev->dev, &dev_attr_use_dma);
1557 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
1558 + free_irq(host_priv->dma_irq, host);
1559 + dma_free_writecombine(&pdev->dev, SZ_4K, host_priv->cb_base,
1560 + host_priv->cb_handle);
1562 + sdhci_remove_host(host, dead);
1563 + iounmap(host->ioaddr);
1564 + release_mem_region(iomem->start, resource_size(iomem));
1565 + sdhci_free_host(host);
1566 + platform_set_drvdata(pdev, NULL);
1571 +static struct platform_driver sdhci_bcm2708_driver = {
1573 + .name = DRIVER_NAME,
1574 + .owner = THIS_MODULE,
1576 + .probe = sdhci_bcm2708_probe,
1577 + .remove = sdhci_bcm2708_remove,
1580 + .suspend = sdhci_bcm2708_suspend,
1581 + .resume = sdhci_bcm2708_resume,
1586 +/*****************************************************************************\
1588 + * Driver init/exit *
1590 +\*****************************************************************************/
1592 +static int __init sdhci_drv_init(void)
1594 + return platform_driver_register(&sdhci_bcm2708_driver);
1597 +static void __exit sdhci_drv_exit(void)
1599 + platform_driver_unregister(&sdhci_bcm2708_driver);
1602 +module_init(sdhci_drv_init);
1603 +module_exit(sdhci_drv_exit);
1605 +module_param(allow_highspeed, bool, 0444);
1606 +module_param(emmc_clock_freq, int, 0444);
1607 +module_param(sync_after_dma, bool, 0444);
1608 +module_param(missing_status, bool, 0444);
1609 +module_param(spurious_crc_acmd51, bool, 0444);
1610 +module_param(enable_llm, bool, 0444);
1611 +module_param(cycle_delay, int, 0444);
1612 +module_param(extra_messages, bool, 0444);
1614 +MODULE_DESCRIPTION("Secure Digital Host Controller Interface platform driver");
1615 +MODULE_AUTHOR("Broadcom <info@broadcom.com>");
1616 +MODULE_LICENSE("GPL v2");
1617 +MODULE_ALIAS("platform:"DRIVER_NAME);
1619 +MODULE_PARM_DESC(allow_highspeed, "Allow high speed transfers modes");
1620 +MODULE_PARM_DESC(emmc_clock_freq, "Specify the speed of emmc clock");
1621 +MODULE_PARM_DESC(sync_after_dma, "Block in driver until dma complete");
1622 +MODULE_PARM_DESC(missing_status, "Use the missing status quirk");
1623 +MODULE_PARM_DESC(spurious_crc_acmd51, "Use the spurious crc quirk for reading SCR (ACMD51)");
1624 +MODULE_PARM_DESC(enable_llm, "Enable low-latency mode");
1625 +MODULE_PARM_DESC(extra_messages, "Enable more sdcard warning messages");
1628 --- a/drivers/mmc/host/sdhci.c
1629 +++ b/drivers/mmc/host/sdhci.c
1631 #include <linux/mmc/mmc.h>
1632 #include <linux/mmc/host.h>
1633 #include <linux/mmc/card.h>
1634 +#include <linux/mmc/sd.h>
1635 #include <linux/mmc/slot-gpio.h>
1638 @@ -123,6 +124,91 @@ static void sdhci_dumpregs(struct sdhci_
1639 * Low level functions *
1641 \*****************************************************************************/
1642 +extern bool enable_llm;
1643 +static int sdhci_locked=0;
1644 +void sdhci_spin_lock(struct sdhci_host *host)
1646 + spin_lock(&host->lock);
1647 +#ifdef CONFIG_PREEMPT
1650 + disable_irq_nosync(host->irq);
1651 + if(host->second_irq)
1652 + disable_irq_nosync(host->second_irq);
1653 + local_irq_enable();
1658 +void sdhci_spin_unlock(struct sdhci_host *host)
1660 +#ifdef CONFIG_PREEMPT
1663 + local_irq_disable();
1664 + if(host->second_irq)
1665 + enable_irq(host->second_irq);
1666 + enable_irq(host->irq);
1669 + spin_unlock(&host->lock);
1672 +void sdhci_spin_lock_irqsave(struct sdhci_host *host,unsigned long *flags)
1674 +#ifdef CONFIG_PREEMPT
1677 + while(sdhci_locked)
1679 + preempt_schedule();
1681 + spin_lock_irqsave(&host->lock,*flags);
1682 + disable_irq(host->irq);
1683 + if(host->second_irq)
1684 + disable_irq(host->second_irq);
1685 + local_irq_enable();
1689 + spin_lock_irqsave(&host->lock,*flags);
1692 +void sdhci_spin_unlock_irqrestore(struct sdhci_host *host,unsigned long flags)
1694 +#ifdef CONFIG_PREEMPT
1697 + local_irq_disable();
1698 + if(host->second_irq)
1699 + enable_irq(host->second_irq);
1700 + enable_irq(host->irq);
1703 + spin_unlock_irqrestore(&host->lock,flags);
1706 +static void sdhci_spin_enable_schedule(struct sdhci_host *host)
1708 +#ifdef CONFIG_PREEMPT
1717 +static void sdhci_spin_disable_schedule(struct sdhci_host *host)
1719 +#ifdef CONFIG_PREEMPT
1722 + preempt_disable();
1728 static void sdhci_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set)
1730 @@ -288,7 +374,7 @@ static void sdhci_led_control(struct led
1731 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
1732 unsigned long flags;
1734 - spin_lock_irqsave(&host->lock, flags);
1735 + sdhci_spin_lock_irqsave(host, &flags);
1737 if (host->runtime_suspended)
1739 @@ -298,7 +384,7 @@ static void sdhci_led_control(struct led
1741 sdhci_activate_led(host);
1743 - spin_unlock_irqrestore(&host->lock, flags);
1744 + sdhci_spin_unlock_irqrestore(host, flags);
1748 @@ -315,7 +401,7 @@ static void sdhci_read_block_pio(struct
1749 u32 uninitialized_var(scratch);
1752 - DBG("PIO reading\n");
1753 + DBG("PIO reading %db\n", host->data->blksz);
1755 blksize = host->data->blksz;
1757 @@ -360,7 +446,7 @@ static void sdhci_write_block_pio(struct
1761 - DBG("PIO writing\n");
1762 + DBG("PIO writing %db\n", host->data->blksz);
1764 blksize = host->data->blksz;
1766 @@ -399,19 +485,28 @@ static void sdhci_write_block_pio(struct
1767 local_irq_restore(flags);
1770 -static void sdhci_transfer_pio(struct sdhci_host *host)
1771 +static void sdhci_transfer_pio(struct sdhci_host *host, u32 intstate)
1778 BUG_ON(!host->data);
1780 if (host->blocks == 0)
1783 - if (host->data->flags & MMC_DATA_READ)
1784 + if (host->data->flags & MMC_DATA_READ) {
1785 mask = SDHCI_DATA_AVAILABLE;
1787 + intmask = SDHCI_INT_DATA_AVAIL;
1789 mask = SDHCI_SPACE_AVAILABLE;
1790 + intmask = SDHCI_INT_SPACE_AVAIL;
1793 + /* initially we can see whether we can procede using intstate */
1794 + available = (intstate & intmask);
1797 * Some controllers (JMicron JMB38x) mess up the buffer bits
1798 @@ -422,7 +517,7 @@ static void sdhci_transfer_pio(struct sd
1799 (host->data->blocks == 1))
1802 - while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1803 + while (available) {
1804 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
1807 @@ -434,9 +529,11 @@ static void sdhci_transfer_pio(struct sd
1809 if (host->blocks == 0)
1811 + state = sdhci_readl(host, SDHCI_PRESENT_STATE);
1812 + available = state & mask;
1815 - DBG("PIO transfer complete.\n");
1816 + DBG("PIO transfer complete - %d blocks left.\n", host->blocks);
1819 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
1820 @@ -709,7 +806,9 @@ static void sdhci_set_transfer_irqs(stru
1821 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
1822 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
1824 - if (host->flags & SDHCI_REQ_USE_DMA)
1825 + /* platform DMA will begin on receipt of PIO irqs */
1826 + if ((host->flags & SDHCI_REQ_USE_DMA) &&
1827 + !(host->flags & SDHCI_USE_PLATDMA))
1828 sdhci_clear_set_irqs(host, pio_irqs, dma_irqs);
1830 sdhci_clear_set_irqs(host, dma_irqs, pio_irqs);
1831 @@ -741,44 +840,25 @@ static void sdhci_prepare_data(struct sd
1832 host->data_early = 0;
1833 host->data->bytes_xfered = 0;
1835 - if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))
1836 + if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA | SDHCI_USE_PLATDMA))
1837 host->flags |= SDHCI_REQ_USE_DMA;
1840 * FIXME: This doesn't account for merging when mapping the
1843 - if (host->flags & SDHCI_REQ_USE_DMA) {
1845 - struct scatterlist *sg;
1848 - if (host->flags & SDHCI_USE_ADMA) {
1849 - if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
1852 - if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
1856 - if (unlikely(broken)) {
1857 - for_each_sg(data->sg, sg, data->sg_len, i) {
1858 - if (sg->length & 0x3) {
1859 - DBG("Reverting to PIO because of "
1860 - "transfer size (%d)\n",
1862 - host->flags &= ~SDHCI_REQ_USE_DMA;
1870 * The assumption here being that alignment is the same after
1871 * translation to device address space.
1873 - if (host->flags & SDHCI_REQ_USE_DMA) {
1874 + if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_PLATDMA)) ==
1875 + (SDHCI_REQ_USE_DMA | SDHCI_USE_PLATDMA)) {
1877 + if (! sdhci_platdma_dmaable(host, data))
1878 + host->flags &= ~SDHCI_REQ_USE_DMA;
1880 + } else if (host->flags & SDHCI_REQ_USE_DMA) {
1882 struct scatterlist *sg;
1884 @@ -837,7 +917,8 @@ static void sdhci_prepare_data(struct sd
1887 host->flags &= ~SDHCI_REQ_USE_DMA;
1890 + if (!(host->flags & SDHCI_USE_PLATDMA)) {
1891 WARN_ON(sg_cnt != 1);
1892 sdhci_writel(host, sg_dma_address(data->sg),
1894 @@ -853,11 +934,13 @@ static void sdhci_prepare_data(struct sd
1895 if (host->version >= SDHCI_SPEC_200) {
1896 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1897 ctrl &= ~SDHCI_CTRL_DMA_MASK;
1898 + if (! (host->flags & SDHCI_USE_PLATDMA)) {
1899 if ((host->flags & SDHCI_REQ_USE_DMA) &&
1900 (host->flags & SDHCI_USE_ADMA))
1901 ctrl |= SDHCI_CTRL_ADMA32;
1903 ctrl |= SDHCI_CTRL_SDMA;
1905 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1908 @@ -909,7 +992,8 @@ static void sdhci_set_transfer_mode(stru
1910 if (data->flags & MMC_DATA_READ)
1911 mode |= SDHCI_TRNS_READ;
1912 - if (host->flags & SDHCI_REQ_USE_DMA)
1913 + if ((host->flags & SDHCI_REQ_USE_DMA) &&
1914 + !(host->flags & SDHCI_USE_PLATDMA))
1915 mode |= SDHCI_TRNS_DMA;
1917 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
1918 @@ -925,13 +1009,16 @@ static void sdhci_finish_data(struct sdh
1921 if (host->flags & SDHCI_REQ_USE_DMA) {
1922 - if (host->flags & SDHCI_USE_ADMA)
1923 - sdhci_adma_table_post(host, data);
1925 + /* we may have to abandon an ongoing platform DMA */
1926 + if (host->flags & SDHCI_USE_PLATDMA)
1927 + sdhci_platdma_reset(host, data);
1929 + if (host->flags & (SDHCI_USE_PLATDMA | SDHCI_USE_SDMA)) {
1930 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
1931 data->sg_len, (data->flags & MMC_DATA_READ) ?
1932 DMA_FROM_DEVICE : DMA_TO_DEVICE);
1934 + } else if (host->flags & SDHCI_USE_ADMA)
1935 + sdhci_adma_table_post(host, data);
1939 @@ -984,6 +1071,12 @@ static void sdhci_send_command(struct sd
1940 if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
1941 mask |= SDHCI_DATA_INHIBIT;
1943 + if(host->ops->missing_status && (cmd->opcode == MMC_SEND_STATUS)) {
1944 + timeout = 5000; // Really obscenely large delay to send the status, due to bug in controller
1945 + // which might cause the STATUS command to get stuck when a data operation is in flow
1946 + mask |= SDHCI_DATA_INHIBIT;
1949 /* We shouldn't wait for data inihibit for stop commands, even
1950 though they might use busy signaling */
1951 if (host->mrq->data && (cmd == host->mrq->data->stop))
1952 @@ -999,12 +1092,20 @@ static void sdhci_send_command(struct sd
1956 + sdhci_spin_enable_schedule(host);
1958 + sdhci_spin_disable_schedule(host);
1960 + DBG("send cmd %d - wait 0x%X irq 0x%x\n", cmd->opcode, mask,
1961 + sdhci_readl(host, SDHCI_INT_STATUS));
1963 mod_timer(&host->timer, jiffies + 10 * HZ);
1966 + if (host->last_cmdop == MMC_APP_CMD)
1967 + host->last_cmdop = -cmd->opcode;
1969 + host->last_cmdop = cmd->opcode;
1971 sdhci_prepare_data(host, cmd);
1973 @@ -1220,7 +1321,9 @@ clock_set:
1977 + sdhci_spin_enable_schedule(host);
1979 + sdhci_spin_disable_schedule(host);
1982 clk |= SDHCI_CLOCK_CARD_EN;
1983 @@ -1316,7 +1419,7 @@ static void sdhci_request(struct mmc_hos
1985 sdhci_runtime_pm_get(host);
1987 - spin_lock_irqsave(&host->lock, flags);
1988 + sdhci_spin_lock_irqsave(host, &flags);
1990 WARN_ON(host->mrq != NULL);
1992 @@ -1374,9 +1477,9 @@ static void sdhci_request(struct mmc_hos
1993 mmc->card->type == MMC_TYPE_MMC ?
1994 MMC_SEND_TUNING_BLOCK_HS200 :
1995 MMC_SEND_TUNING_BLOCK;
1996 - spin_unlock_irqrestore(&host->lock, flags);
1997 + sdhci_spin_unlock_irqrestore(host, flags);
1998 sdhci_execute_tuning(mmc, tuning_opcode);
1999 - spin_lock_irqsave(&host->lock, flags);
2000 + sdhci_spin_lock_irqsave(host, &flags);
2002 /* Restore original mmc_request structure */
2004 @@ -1390,7 +1493,7 @@ static void sdhci_request(struct mmc_hos
2008 - spin_unlock_irqrestore(&host->lock, flags);
2009 + sdhci_spin_unlock_irqrestore(host, flags);
2012 static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
2013 @@ -1399,10 +1502,10 @@ static void sdhci_do_set_ios(struct sdhc
2017 - spin_lock_irqsave(&host->lock, flags);
2018 + sdhci_spin_lock_irqsave(host, &flags);
2020 if (host->flags & SDHCI_DEVICE_DEAD) {
2021 - spin_unlock_irqrestore(&host->lock, flags);
2022 + sdhci_spin_unlock_irqrestore(host, flags);
2023 if (host->vmmc && ios->power_mode == MMC_POWER_OFF)
2024 mmc_regulator_set_ocr(host->mmc, host->vmmc, 0);
2026 @@ -1429,9 +1532,9 @@ static void sdhci_do_set_ios(struct sdhc
2027 vdd_bit = sdhci_set_power(host, ios->vdd);
2029 if (host->vmmc && vdd_bit != -1) {
2030 - spin_unlock_irqrestore(&host->lock, flags);
2031 + sdhci_spin_unlock_irqrestore(host, flags);
2032 mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd_bit);
2033 - spin_lock_irqsave(&host->lock, flags);
2034 + sdhci_spin_lock_irqsave(host, &flags);
2037 if (host->ops->platform_send_init_74_clocks)
2038 @@ -1470,7 +1573,7 @@ static void sdhci_do_set_ios(struct sdhc
2040 ctrl &= ~SDHCI_CTRL_HISPD;
2042 - if (host->version >= SDHCI_SPEC_300) {
2043 + if (host->version >= SDHCI_SPEC_300 && !(host->ops->uhs_broken)) {
2046 /* In case of UHS-I modes, set High Speed Enable */
2047 @@ -1569,7 +1672,7 @@ static void sdhci_do_set_ios(struct sdhc
2048 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
2051 - spin_unlock_irqrestore(&host->lock, flags);
2052 + sdhci_spin_unlock_irqrestore(host, flags);
2055 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
2056 @@ -1617,7 +1720,7 @@ static int sdhci_check_ro(struct sdhci_h
2057 unsigned long flags;
2060 - spin_lock_irqsave(&host->lock, flags);
2061 + sdhci_spin_lock_irqsave(host, &flags);
2063 if (host->flags & SDHCI_DEVICE_DEAD)
2065 @@ -1627,7 +1730,7 @@ static int sdhci_check_ro(struct sdhci_h
2066 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
2067 & SDHCI_WRITE_PROTECT);
2069 - spin_unlock_irqrestore(&host->lock, flags);
2070 + sdhci_spin_unlock_irqrestore(host, flags);
2072 /* This quirk needs to be replaced by a callback-function later */
2073 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
2074 @@ -1700,9 +1803,9 @@ static void sdhci_enable_sdio_irq(struct
2075 struct sdhci_host *host = mmc_priv(mmc);
2076 unsigned long flags;
2078 - spin_lock_irqsave(&host->lock, flags);
2079 + sdhci_spin_lock_irqsave(host, &flags);
2080 sdhci_enable_sdio_irq_nolock(host, enable);
2081 - spin_unlock_irqrestore(&host->lock, flags);
2082 + sdhci_spin_unlock_irqrestore(host, flags);
2085 static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
2086 @@ -2046,7 +2149,7 @@ static void sdhci_card_event(struct mmc_
2087 struct sdhci_host *host = mmc_priv(mmc);
2088 unsigned long flags;
2090 - spin_lock_irqsave(&host->lock, flags);
2091 + sdhci_spin_lock_irqsave(host, &flags);
2093 /* Check host->mrq first in case we are runtime suspended */
2095 @@ -2063,7 +2166,7 @@ static void sdhci_card_event(struct mmc_
2096 tasklet_schedule(&host->finish_tasklet);
2099 - spin_unlock_irqrestore(&host->lock, flags);
2100 + sdhci_spin_unlock_irqrestore(host, flags);
2103 static const struct mmc_host_ops sdhci_ops = {
2104 @@ -2102,14 +2205,14 @@ static void sdhci_tasklet_finish(unsigne
2106 host = (struct sdhci_host*)param;
2108 - spin_lock_irqsave(&host->lock, flags);
2109 + sdhci_spin_lock_irqsave(host, &flags);
2112 * If this tasklet gets rescheduled while running, it will
2113 * be run again afterwards but without any active request.
2116 - spin_unlock_irqrestore(&host->lock, flags);
2117 + sdhci_spin_unlock_irqrestore(host, flags);
2121 @@ -2147,7 +2250,7 @@ static void sdhci_tasklet_finish(unsigne
2125 - spin_unlock_irqrestore(&host->lock, flags);
2126 + sdhci_spin_unlock_irqrestore(host, flags);
2128 mmc_request_done(host->mmc, mrq);
2129 sdhci_runtime_pm_put(host);
2130 @@ -2160,11 +2263,11 @@ static void sdhci_timeout_timer(unsigned
2132 host = (struct sdhci_host*)data;
2134 - spin_lock_irqsave(&host->lock, flags);
2135 + sdhci_spin_lock_irqsave(host, &flags);
2138 pr_err("%s: Timeout waiting for hardware "
2139 - "interrupt.\n", mmc_hostname(host->mmc));
2140 + "interrupt - cmd%d.\n", mmc_hostname(host->mmc), host->last_cmdop);
2141 sdhci_dumpregs(host);
2144 @@ -2181,7 +2284,7 @@ static void sdhci_timeout_timer(unsigned
2148 - spin_unlock_irqrestore(&host->lock, flags);
2149 + sdhci_spin_unlock_irqrestore(host, flags);
2152 static void sdhci_tuning_timer(unsigned long data)
2153 @@ -2191,11 +2294,11 @@ static void sdhci_tuning_timer(unsigned
2155 host = (struct sdhci_host *)data;
2157 - spin_lock_irqsave(&host->lock, flags);
2158 + sdhci_spin_lock_irqsave(host, &flags);
2160 host->flags |= SDHCI_NEEDS_RETUNING;
2162 - spin_unlock_irqrestore(&host->lock, flags);
2163 + sdhci_spin_unlock_irqrestore(host, flags);
2166 /*****************************************************************************\
2167 @@ -2209,10 +2312,13 @@ static void sdhci_cmd_irq(struct sdhci_h
2168 BUG_ON(intmask == 0);
2171 + if (!(host->ops->extra_ints)) {
2172 pr_err("%s: Got command interrupt 0x%08x even "
2173 "though no command operation was in progress.\n",
2174 mmc_hostname(host->mmc), (unsigned)intmask);
2175 sdhci_dumpregs(host);
2177 + DBG("cmd irq 0x%08x cmd complete\n", (unsigned)intmask);
2181 @@ -2282,6 +2388,19 @@ static void sdhci_show_adma_error(struct
2182 static void sdhci_show_adma_error(struct sdhci_host *host) { }
2185 +static void sdhci_data_end(struct sdhci_host *host)
2189 + * Data managed to finish before the
2190 + * command completed. Make sure we do
2191 + * things in the proper order.
2193 + host->data_early = 1;
2195 + sdhci_finish_data(host);
2198 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2201 @@ -2311,23 +2430,39 @@ static void sdhci_data_irq(struct sdhci_
2205 + if (!(host->ops->extra_ints)) {
2206 pr_err("%s: Got data interrupt 0x%08x even "
2207 "though no data operation was in progress.\n",
2208 mmc_hostname(host->mmc), (unsigned)intmask);
2209 sdhci_dumpregs(host);
2211 + DBG("data irq 0x%08x but no data\n", (unsigned)intmask);
2216 if (intmask & SDHCI_INT_DATA_TIMEOUT)
2217 host->data->error = -ETIMEDOUT;
2218 - else if (intmask & SDHCI_INT_DATA_END_BIT)
2219 + else if (intmask & SDHCI_INT_DATA_END_BIT) {
2220 + DBG("end error in cmd %d\n", host->last_cmdop);
2221 + if (host->ops->spurious_crc_acmd51 &&
2222 + host->last_cmdop == -SD_APP_SEND_SCR) {
2223 + DBG("ignoring spurious data_end_bit error\n");
2224 + intmask = SDHCI_INT_DATA_AVAIL|SDHCI_INT_DATA_END;
2226 host->data->error = -EILSEQ;
2227 - else if ((intmask & SDHCI_INT_DATA_CRC) &&
2228 + } else if ((intmask & SDHCI_INT_DATA_CRC) &&
2229 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
2230 - != MMC_BUS_TEST_R)
2231 + != MMC_BUS_TEST_R) {
2232 + DBG("crc error in cmd %d\n", host->last_cmdop);
2233 + if (host->ops->spurious_crc_acmd51 &&
2234 + host->last_cmdop == -SD_APP_SEND_SCR) {
2235 + DBG("ignoring spurious data_crc_bit error\n");
2236 + intmask = SDHCI_INT_DATA_AVAIL|SDHCI_INT_DATA_END;
2238 host->data->error = -EILSEQ;
2239 - else if (intmask & SDHCI_INT_ADMA_ERROR) {
2241 + } else if (intmask & SDHCI_INT_ADMA_ERROR) {
2242 pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
2243 sdhci_show_adma_error(host);
2244 host->data->error = -EIO;
2245 @@ -2335,11 +2470,18 @@ static void sdhci_data_irq(struct sdhci_
2246 host->ops->adma_workaround(host, intmask);
2249 - if (host->data->error)
2250 + if (host->data->error) {
2251 + DBG("finish request early on error %d\n", host->data->error);
2252 sdhci_finish_data(host);
2254 - if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2255 - sdhci_transfer_pio(host);
2257 + if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)) {
2258 + if (host->flags & SDHCI_REQ_USE_DMA) {
2259 + /* possible only in PLATDMA mode */
2260 + sdhci_platdma_avail(host, &intmask,
2263 + sdhci_transfer_pio(host, intmask);
2267 * We currently don't do anything fancy with DMA
2268 @@ -2368,18 +2510,8 @@ static void sdhci_data_irq(struct sdhci_
2269 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
2272 - if (intmask & SDHCI_INT_DATA_END) {
2275 - * Data managed to finish before the
2276 - * command completed. Make sure we do
2277 - * things in the proper order.
2279 - host->data_early = 1;
2281 - sdhci_finish_data(host);
2284 + if (intmask & SDHCI_INT_DATA_END)
2285 + sdhci_data_end(host);
2289 @@ -2390,10 +2522,10 @@ static irqreturn_t sdhci_irq(int irq, vo
2290 u32 intmask, unexpected = 0;
2291 int cardint = 0, max_loops = 16;
2293 - spin_lock(&host->lock);
2294 + sdhci_spin_lock(host);
2296 if (host->runtime_suspended) {
2297 - spin_unlock(&host->lock);
2298 + sdhci_spin_unlock(host);
2299 pr_warning("%s: got irq while runtime suspended\n",
2300 mmc_hostname(host->mmc));
2302 @@ -2435,6 +2567,22 @@ again:
2303 tasklet_schedule(&host->card_tasklet);
2306 + if (intmask & SDHCI_INT_ERROR_MASK & ~SDHCI_INT_ERROR)
2307 + DBG("controller reports error 0x%x -"
2308 + "%s%s%s%s%s%s%s%s%s%s",
2310 + intmask & SDHCI_INT_TIMEOUT? " timeout": "",
2311 + intmask & SDHCI_INT_CRC ? " crc": "",
2312 + intmask & SDHCI_INT_END_BIT? " endbit": "",
2313 + intmask & SDHCI_INT_INDEX? " index": "",
2314 + intmask & SDHCI_INT_DATA_TIMEOUT? " data_timeout": "",
2315 + intmask & SDHCI_INT_DATA_CRC? " data_crc": "",
2316 + intmask & SDHCI_INT_DATA_END_BIT? " data_endbit": "",
2317 + intmask & SDHCI_INT_BUS_POWER? " buspower": "",
2318 + intmask & SDHCI_INT_ACMD12ERR? " acmd12": "",
2319 + intmask & SDHCI_INT_ADMA_ERROR? " adma": ""
2322 if (intmask & SDHCI_INT_CMD_MASK) {
2323 sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK,
2325 @@ -2449,7 +2597,13 @@ again:
2327 intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK);
2329 - intmask &= ~SDHCI_INT_ERROR;
2330 + if (intmask & SDHCI_INT_ERROR_MASK) {
2331 + /* collect any uncovered errors */
2332 + sdhci_writel(host, intmask & SDHCI_INT_ERROR_MASK,
2333 + SDHCI_INT_STATUS);
2336 + intmask &= ~SDHCI_INT_ERROR_MASK;
2338 if (intmask & SDHCI_INT_BUS_POWER) {
2339 pr_err("%s: Card is consuming too much power!\n",
2340 @@ -2475,7 +2629,7 @@ again:
2341 if (intmask && --max_loops)
2344 - spin_unlock(&host->lock);
2345 + sdhci_spin_unlock(host);
2348 pr_err("%s: Unexpected interrupt 0x%08x.\n",
2349 @@ -2569,7 +2723,8 @@ int sdhci_resume_host(struct sdhci_host
2353 - if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2354 + if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA |
2355 + SDHCI_USE_PLATDMA)) {
2356 if (host->ops->enable_dma)
2357 host->ops->enable_dma(host);
2359 @@ -2636,15 +2791,15 @@ int sdhci_runtime_suspend_host(struct sd
2360 host->flags &= ~SDHCI_NEEDS_RETUNING;
2363 - spin_lock_irqsave(&host->lock, flags);
2364 + sdhci_spin_lock_irqsave(host, &flags);
2365 sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
2366 - spin_unlock_irqrestore(&host->lock, flags);
2367 + sdhci_spin_unlock_irqrestore(host, flags);
2369 synchronize_irq(host->irq);
2371 - spin_lock_irqsave(&host->lock, flags);
2372 + sdhci_spin_lock_irqsave(host, &flags);
2373 host->runtime_suspended = true;
2374 - spin_unlock_irqrestore(&host->lock, flags);
2375 + sdhci_spin_unlock_irqrestore(host, flags);
2379 @@ -2670,16 +2825,16 @@ int sdhci_runtime_resume_host(struct sdh
2380 sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios);
2381 if ((host_flags & SDHCI_PV_ENABLED) &&
2382 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
2383 - spin_lock_irqsave(&host->lock, flags);
2384 + sdhci_spin_lock_irqsave(host, &flags);
2385 sdhci_enable_preset_value(host, true);
2386 - spin_unlock_irqrestore(&host->lock, flags);
2387 + sdhci_spin_unlock_irqrestore(host, flags);
2390 /* Set the re-tuning expiration flag */
2391 if (host->flags & SDHCI_USING_RETUNING_TIMER)
2392 host->flags |= SDHCI_NEEDS_RETUNING;
2394 - spin_lock_irqsave(&host->lock, flags);
2395 + sdhci_spin_lock_irqsave(host, &flags);
2397 host->runtime_suspended = false;
2399 @@ -2690,7 +2845,7 @@ int sdhci_runtime_resume_host(struct sdh
2400 /* Enable Card Detection */
2401 sdhci_enable_card_detection(host);
2403 - spin_unlock_irqrestore(&host->lock, flags);
2404 + sdhci_spin_unlock_irqrestore(host, flags);
2408 @@ -2785,14 +2940,16 @@ int sdhci_add_host(struct sdhci_host *ho
2409 host->flags &= ~SDHCI_USE_ADMA;
2412 - if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2413 + if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA |
2414 + SDHCI_USE_PLATDMA)) {
2415 if (host->ops->enable_dma) {
2416 if (host->ops->enable_dma(host)) {
2417 pr_warning("%s: No suitable DMA "
2418 "available. Falling back to PIO.\n",
2421 - ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
2422 + ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA |
2423 + SDHCI_USE_PLATDMA);
2427 @@ -3080,6 +3237,12 @@ int sdhci_add_host(struct sdhci_host *ho
2428 SDHCI_MAX_CURRENT_MULTIPLIER;
2431 + if(host->ops->voltage_broken) {
2432 + ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
2433 + // Cannot support UHS modes if we are stuck at 3.3V;
2434 + mmc->caps &= ~(MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_DDR50);
2437 mmc->ocr_avail = ocr_avail;
2438 mmc->ocr_avail_sdio = ocr_avail;
2439 if (host->ocr_avail_sdio)
2440 @@ -3174,7 +3337,7 @@ int sdhci_add_host(struct sdhci_host *ho
2441 host->tuning_timer.function = sdhci_tuning_timer;
2444 - ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
2445 + ret = request_irq(host->irq, sdhci_irq, 0,//IRQF_SHARED,
2446 mmc_hostname(mmc), host);
2448 pr_err("%s: Failed to request IRQ %d: %d\n",
2449 @@ -3210,6 +3373,7 @@ int sdhci_add_host(struct sdhci_host *ho
2451 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
2452 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
2453 + (host->flags & SDHCI_USE_PLATDMA) ? "platform's DMA" :
2454 (host->flags & SDHCI_USE_ADMA) ? "ADMA" :
2455 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
2457 @@ -3237,7 +3401,7 @@ void sdhci_remove_host(struct sdhci_host
2458 unsigned long flags;
2461 - spin_lock_irqsave(&host->lock, flags);
2462 + sdhci_spin_lock_irqsave(host, &flags);
2464 host->flags |= SDHCI_DEVICE_DEAD;
2466 @@ -3249,7 +3413,7 @@ void sdhci_remove_host(struct sdhci_host
2467 tasklet_schedule(&host->finish_tasklet);
2470 - spin_unlock_irqrestore(&host->lock, flags);
2471 + sdhci_spin_unlock_irqrestore(host, flags);
2474 sdhci_disable_card_detection(host);
2475 --- a/drivers/mmc/host/sdhci.h
2476 +++ b/drivers/mmc/host/sdhci.h
2477 @@ -289,6 +289,20 @@ struct sdhci_ops {
2478 void (*platform_reset_enter)(struct sdhci_host *host, u8 mask);
2479 void (*platform_reset_exit)(struct sdhci_host *host, u8 mask);
2480 int (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
2482 + int (*pdma_able)(struct sdhci_host *host,
2483 + struct mmc_data *data);
2484 + void (*pdma_avail)(struct sdhci_host *host,
2485 + unsigned int *ref_intmask,
2486 + void(*complete)(struct sdhci_host *));
2487 + void (*pdma_reset)(struct sdhci_host *host,
2488 + struct mmc_data *data);
2489 + unsigned int (*extra_ints)(struct sdhci_host *host);
2490 + unsigned int (*spurious_crc_acmd51)(struct sdhci_host *host);
2491 + unsigned int (*voltage_broken)(struct sdhci_host *host);
2492 + unsigned int (*uhs_broken)(struct sdhci_host *host);
2493 + unsigned int (*missing_status)(struct sdhci_host *host);
2495 void (*hw_reset)(struct sdhci_host *host);
2496 void (*platform_suspend)(struct sdhci_host *host);
2497 void (*platform_resume)(struct sdhci_host *host);
2498 @@ -399,9 +413,38 @@ extern int sdhci_resume_host(struct sdhc
2499 extern void sdhci_enable_irq_wakeups(struct sdhci_host *host);
2502 +static inline int /*bool*/
2503 +sdhci_platdma_dmaable(struct sdhci_host *host, struct mmc_data *data)
2505 + if (host->ops->pdma_able)
2506 + return host->ops->pdma_able(host, data);
2511 +sdhci_platdma_avail(struct sdhci_host *host, unsigned int *ref_intmask,
2512 + void(*completion_callback)(struct sdhci_host *))
2514 + if (host->ops->pdma_avail)
2515 + host->ops->pdma_avail(host, ref_intmask, completion_callback);
2519 +sdhci_platdma_reset(struct sdhci_host *host, struct mmc_data *data)
2521 + if (host->ops->pdma_reset)
2522 + host->ops->pdma_reset(host, data);
2525 #ifdef CONFIG_PM_RUNTIME
2526 extern int sdhci_runtime_suspend_host(struct sdhci_host *host);
2527 extern int sdhci_runtime_resume_host(struct sdhci_host *host);
2530 +extern void sdhci_spin_lock_irqsave(struct sdhci_host *host,unsigned long *flags);
2531 +extern void sdhci_spin_unlock_irqrestore(struct sdhci_host *host,unsigned long flags);
2532 +extern void sdhci_spin_lock(struct sdhci_host *host);
2533 +extern void sdhci_spin_unlock(struct sdhci_host *host);
2536 #endif /* __SDHCI_HW_H */
2537 --- a/include/linux/mmc/host.h
2538 +++ b/include/linux/mmc/host.h
2539 @@ -281,6 +281,7 @@ struct mmc_host {
2540 #define MMC_CAP2_PACKED_CMD (MMC_CAP2_PACKED_RD | \
2542 #define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14) /* Don't power up before scan */
2543 +#define MMC_CAP2_FORCE_MULTIBLOCK (1 << 31) /* Always use multiblock transfers */
2545 mmc_pm_flag_t pm_caps; /* supported pm features */
2547 --- a/include/linux/mmc/sdhci.h
2548 +++ b/include/linux/mmc/sdhci.h
2549 @@ -97,6 +97,7 @@ struct sdhci_host {
2550 #define SDHCI_QUIRK2_PRESET_VALUE_BROKEN (1<<3)
2552 int irq; /* Device IRQ */
2553 + int second_irq; /* Additional IRQ to disable/enable in low-latency mode */
2554 void __iomem *ioaddr; /* Mapped address */
2556 const struct sdhci_ops *ops; /* Low level hw interface */
2557 @@ -128,6 +129,7 @@ struct sdhci_host {
2558 #define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */
2559 #define SDHCI_HS200_NEEDS_TUNING (1<<10) /* HS200 needs tuning */
2560 #define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */
2561 +#define SDHCI_USE_PLATDMA (1<<12) /* Host uses 3rd party DMA */
2563 unsigned int version; /* SDHCI spec. version */
2565 @@ -142,6 +144,7 @@ struct sdhci_host {
2567 struct mmc_request *mrq; /* Current request */
2568 struct mmc_command *cmd; /* Current command */
2569 + int last_cmdop; /* Opcode of last cmd sent */
2570 struct mmc_data *data; /* Current data request */
2571 unsigned int data_early:1; /* Data finished before cmd */