kernel: update 3.10 to 3.10.2
[openwrt/openwrt.git] / target / linux / brcm2708 / patches-3.10 / 014-bcm2708-sdhci-driver.patch
1 --- a/drivers/mmc/card/block.c
2 +++ b/drivers/mmc/card/block.c
3 @@ -1294,7 +1294,7 @@ static void mmc_blk_rw_rq_prep(struct mm
4 brq->data.blocks = 1;
5 }
6
7 - if (brq->data.blocks > 1 || do_rel_wr) {
8 + if (brq->data.blocks > 1 || do_rel_wr || card->host->caps2 & MMC_CAP2_FORCE_MULTIBLOCK) {
9 /* SPI multiblock writes terminate using a special
10 * token, not a STOP_TRANSMISSION request.
11 */
12 --- a/drivers/mmc/core/sd.c
13 +++ b/drivers/mmc/core/sd.c
14 @@ -13,6 +13,8 @@
15 #include <linux/err.h>
16 #include <linux/slab.h>
17 #include <linux/stat.h>
18 +#include <linux/jiffies.h>
19 +#include <linux/nmi.h>
20
21 #include <linux/mmc/host.h>
22 #include <linux/mmc/card.h>
23 @@ -58,6 +60,15 @@ static const unsigned int tacc_mant[] =
24 __res & __mask; \
25 })
26
27 +// timeout for tries
28 +static const unsigned long retry_timeout_ms= 10*1000;
29 +
30 +// try at least 10 times, even if timeout is reached
31 +static const int retry_min_tries= 10;
32 +
33 +// delay between tries
34 +static const unsigned long retry_delay_ms= 10;
35 +
36 /*
37 * Given the decoded CSD structure, decode the raw CID to our CID structure.
38 */
39 @@ -210,12 +221,62 @@ static int mmc_decode_scr(struct mmc_car
40 }
41
42 /*
43 - * Fetch and process SD Status register.
44 + * Fetch and process SD Configuration Register.
45 + */
46 +static int mmc_read_scr(struct mmc_card *card)
47 +{
48 + unsigned long timeout_at;
49 + int err, tries;
50 +
51 + timeout_at= jiffies + msecs_to_jiffies( retry_timeout_ms );
52 + tries= 0;
53 +
54 + while( tries < retry_min_tries || time_before( jiffies, timeout_at ) )
55 + {
56 + unsigned long delay_at;
57 + tries++;
58 +
59 + err = mmc_app_send_scr(card, card->raw_scr);
60 + if( !err )
61 + break; // success!!!
62 +
63 + touch_nmi_watchdog(); // we are still alive!
64 +
65 + // delay
66 + delay_at= jiffies + msecs_to_jiffies( retry_delay_ms );
67 + while( time_before( jiffies, delay_at ) )
68 + {
69 + mdelay( 1 );
70 + touch_nmi_watchdog(); // we are still alive!
71 + }
72 + }
73 +
74 + if( err)
75 + {
76 + pr_err("%s: failed to read SD Configuration register (SCR) after %d tries during %lu ms, error %d\n", mmc_hostname(card->host), tries, retry_timeout_ms, err );
77 + return err;
78 + }
79 +
80 + if( tries > 1 )
81 + {
82 + pr_info("%s: could read SD Configuration register (SCR) at the %dth attempt\n", mmc_hostname(card->host), tries );
83 + }
84 +
85 + err = mmc_decode_scr(card);
86 + if (err)
87 + return err;
88 +
89 + return err;
90 +}
91 +
92 +/*
93 + * Fetch and process SD Status Register.
94 */
95 static int mmc_read_ssr(struct mmc_card *card)
96 {
97 + unsigned long timeout_at;
98 unsigned int au, es, et, eo;
99 - int err, i;
100 + int err, i, tries;
101 u32 *ssr;
102
103 if (!(card->csd.cmdclass & CCC_APP_SPEC)) {
104 @@ -228,14 +289,40 @@ static int mmc_read_ssr(struct mmc_card
105 if (!ssr)
106 return -ENOMEM;
107
108 + timeout_at= jiffies + msecs_to_jiffies( retry_timeout_ms );
109 + tries= 0;
110 +
111 + while( tries < retry_min_tries || time_before( jiffies, timeout_at ) )
112 + {
113 + unsigned long delay_at;
114 + tries++;
115 +
116 err = mmc_app_sd_status(card, ssr);
117 - if (err) {
118 - pr_warning("%s: problem reading SD Status "
119 - "register.\n", mmc_hostname(card->host));
120 - err = 0;
121 + if( !err )
122 + break; // sucess!!!
123 +
124 + touch_nmi_watchdog(); // we are still alive!
125 +
126 + // delay
127 + delay_at= jiffies + msecs_to_jiffies( retry_delay_ms );
128 + while( time_before( jiffies, delay_at ) )
129 + {
130 + mdelay( 1 );
131 + touch_nmi_watchdog(); // we are still alive!
132 + }
133 + }
134 +
135 + if( err)
136 + {
137 + pr_err("%s: failed to read SD Status register (SSR) after %d tries during %lu ms, error %d\n", mmc_hostname(card->host), tries, retry_timeout_ms, err );
138 goto out;
139 }
140
141 + if( tries > 1 )
142 + {
143 + pr_info("%s: read SD Status register (SSR) after %d attempts\n", mmc_hostname(card->host), tries );
144 + }
145 +
146 for (i = 0; i < 16; i++)
147 ssr[i] = be32_to_cpu(ssr[i]);
148
149 @@ -808,13 +895,9 @@ int mmc_sd_setup_card(struct mmc_host *h
150
151 if (!reinit) {
152 /*
153 - * Fetch SCR from card.
154 + * Fetch and decode SD Configuration register.
155 */
156 - err = mmc_app_send_scr(card, card->raw_scr);
157 - if (err)
158 - return err;
159 -
160 - err = mmc_decode_scr(card);
161 + err = mmc_read_scr(card);
162 if (err)
163 return err;
164
165 --- a/drivers/mmc/host/Kconfig
166 +++ b/drivers/mmc/host/Kconfig
167 @@ -249,6 +249,27 @@ config MMC_SDHCI_S3C_DMA
168
169 YMMV.
170
171 +config MMC_SDHCI_BCM2708
172 + tristate "SDHCI support on BCM2708"
173 + depends on MMC_SDHCI && MACH_BCM2708
174 + select MMC_SDHCI_IO_ACCESSORS
175 + help
176 + This selects the Secure Digital Host Controller Interface (SDHCI)
177 + often referrered to as the eMMC block.
178 +
179 + If you have a controller with this interface, say Y or M here.
180 +
181 + If unsure, say N.
182 +
183 +config MMC_SDHCI_BCM2708_DMA
184 + bool "DMA support on BCM2708 Arasan controller"
185 + depends on MMC_SDHCI_BCM2708
186 + help
187 + Enable DMA support on the Arasan SDHCI controller in Broadcom 2708
188 + based chips.
189 +
190 + If unsure, say N.
191 +
192 config MMC_SDHCI_BCM2835
193 tristate "SDHCI platform support for the BCM2835 SD/MMC Controller"
194 depends on ARCH_BCM2835
195 --- a/drivers/mmc/host/Makefile
196 +++ b/drivers/mmc/host/Makefile
197 @@ -15,6 +15,7 @@ obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-p
198 obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
199 obj-$(CONFIG_MMC_SDHCI_SIRF) += sdhci-sirf.o
200 obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o
201 +obj-$(CONFIG_MMC_SDHCI_BCM2708) += sdhci-bcm2708.o
202 obj-$(CONFIG_MMC_WBSD) += wbsd.o
203 obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
204 obj-$(CONFIG_MMC_OMAP) += omap.o
205 --- /dev/null
206 +++ b/drivers/mmc/host/sdhci-bcm2708.c
207 @@ -0,0 +1,1420 @@
208 +/*
209 + * sdhci-bcm2708.c Support for SDHCI device on BCM2708
210 + * Copyright (c) 2010 Broadcom
211 + *
212 + * This program is free software; you can redistribute it and/or modify
213 + * it under the terms of the GNU General Public License version 2 as
214 + * published by the Free Software Foundation.
215 + *
216 + * This program is distributed in the hope that it will be useful,
217 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
218 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
219 + * GNU General Public License for more details.
220 + *
221 + * You should have received a copy of the GNU General Public License
222 + * along with this program; if not, write to the Free Software
223 + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
224 + */
225 +
226 +/* Supports:
227 + * SDHCI platform device - Arasan SD controller in BCM2708
228 + *
229 + * Inspired by sdhci-pci.c, by Pierre Ossman
230 + */
231 +
232 +#include <linux/delay.h>
233 +#include <linux/highmem.h>
234 +#include <linux/platform_device.h>
235 +#include <linux/module.h>
236 +#include <linux/mmc/mmc.h>
237 +#include <linux/mmc/host.h>
238 +#include <linux/mmc/sd.h>
239 +
240 +#include <linux/io.h>
241 +#include <linux/dma-mapping.h>
242 +#include <mach/dma.h>
243 +
244 +#include "sdhci.h"
245 +
246 +/*****************************************************************************\
247 + * *
248 + * Configuration *
249 + * *
250 +\*****************************************************************************/
251 +
252 +#define DRIVER_NAME "bcm2708_sdhci"
253 +
254 +/* for the time being insist on DMA mode - PIO seems not to work */
255 +#ifndef CONFIG_MMC_SDHCI_BCM2708_DMA
256 +#warning Non-DMA (PIO) version of this driver currently unavailable
257 +#endif
258 +#undef CONFIG_MMC_SDHCI_BCM2708_DMA
259 +#define CONFIG_MMC_SDHCI_BCM2708_DMA y
260 +
261 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
262 +/* #define CHECK_DMA_USE */
263 +#endif
264 +//#define LOG_REGISTERS
265 +
266 +#define USE_SCHED_TIME
267 +#define USE_SPACED_WRITES_2CLK 1 /* space consecutive register writes */
268 +#define USE_SOFTWARE_TIMEOUTS 1 /* not hardware timeouts */
269 +#define SOFTWARE_ERASE_TIMEOUT_SEC 30
270 +
271 +#define SDHCI_BCM_DMA_CHAN 4 /* this default is normally overriden */
272 +#define SDHCI_BCM_DMA_WAITS 0 /* delays slowing DMA transfers: 0-31 */
273 +/* We are worried that SD card DMA use may be blocking the AXI bus for others */
274 +
275 +/*! TODO: obtain these from the physical address */
276 +#define DMA_SDHCI_BASE 0x7e300000 /* EMMC register block on Videocore */
277 +#define DMA_SDHCI_BUFFER (DMA_SDHCI_BASE + SDHCI_BUFFER)
278 +
279 +#define BCM2708_SDHCI_SLEEP_TIMEOUT 1000 /* msecs */
280 +
281 +/* Mhz clock that the EMMC core is running at. Should match the platform clockman settings */
282 +#define BCM2708_EMMC_CLOCK_FREQ 50000000
283 +
284 +#define REG_EXRDFIFO_EN 0x80
285 +#define REG_EXRDFIFO_CFG 0x84
286 +
287 +int cycle_delay=2;
288 +
289 +/*****************************************************************************\
290 + * *
291 + * Debug *
292 + * *
293 +\*****************************************************************************/
294 +
295 +
296 +
297 +#define DBG(f, x...) \
298 + pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
299 +// printk(KERN_INFO DRIVER_NAME " [%s()]: " f, __func__,## x)//GRAYG
300 +
301 +
302 +/*****************************************************************************\
303 + * *
304 + * High Precision Time *
305 + * *
306 +\*****************************************************************************/
307 +
308 +#ifdef USE_SCHED_TIME
309 +
310 +#include <mach/frc.h>
311 +
312 +typedef unsigned long hptime_t;
313 +
314 +#define FMT_HPT "lu"
315 +
316 +static inline hptime_t hptime(void)
317 +{
318 + return frc_clock_ticks32();
319 +}
320 +
321 +#define HPTIME_CLK_NS 1000ul
322 +
323 +#else
324 +
325 +typedef unsigned long hptime_t;
326 +
327 +#define FMT_HPT "lu"
328 +
329 +static inline hptime_t hptime(void)
330 +{
331 + return jiffies;
332 +}
333 +
334 +#define HPTIME_CLK_NS (1000000000ul/HZ)
335 +
336 +#endif
337 +
338 +static inline unsigned long int since_ns(hptime_t t)
339 +{
340 + return (unsigned long)((hptime() - t) * HPTIME_CLK_NS);
341 +}
342 +
343 +static bool allow_highspeed = 1;
344 +static int emmc_clock_freq = BCM2708_EMMC_CLOCK_FREQ;
345 +static bool sync_after_dma = 1;
346 +static bool missing_status = 1;
347 +static bool spurious_crc_acmd51 = 0;
348 +bool enable_llm = 1;
349 +bool extra_messages = 0;
350 +
351 +#if 0
352 +static void hptime_test(void)
353 +{
354 + hptime_t now;
355 + hptime_t later;
356 +
357 + now = hptime();
358 + msleep(10);
359 + later = hptime();
360 +
361 + printk(KERN_INFO DRIVER_NAME": 10ms = %"FMT_HPT" clks "
362 + "(from %"FMT_HPT" to %"FMT_HPT") = %luns\n",
363 + later-now, now, later,
364 + (unsigned long)(HPTIME_CLK_NS * (later - now)));
365 +
366 + now = hptime();
367 + msleep(1000);
368 + later = hptime();
369 +
370 + printk(KERN_INFO DRIVER_NAME": 1s = %"FMT_HPT" clks "
371 + "(from %"FMT_HPT" to %"FMT_HPT") = %luns\n",
372 + later-now, now, later,
373 + (unsigned long)(HPTIME_CLK_NS * (later - now)));
374 +}
375 +#endif
376 +
377 +/*****************************************************************************\
378 + * *
379 + * SDHCI core callbacks *
380 + * *
381 +\*****************************************************************************/
382 +
383 +
384 +#ifdef CHECK_DMA_USE
385 +/*#define CHECK_DMA_REG_USE*/
386 +#endif
387 +
388 +#ifdef CHECK_DMA_REG_USE
389 +/* we don't expect anything to be using these registers during a
390 + DMA (except the IRQ status) - so check */
391 +static void check_dma_reg_use(struct sdhci_host *host, int reg);
392 +#else
393 +#define check_dma_reg_use(host, reg)
394 +#endif
395 +
396 +
397 +static inline u32 sdhci_bcm2708_raw_readl(struct sdhci_host *host, int reg)
398 +{
399 + return readl(host->ioaddr + reg);
400 +}
401 +
402 +u32 sdhci_bcm2708_readl(struct sdhci_host *host, int reg)
403 +{
404 + u32 l = sdhci_bcm2708_raw_readl(host, reg);
405 +
406 +#ifdef LOG_REGISTERS
407 + printk(KERN_ERR "%s: readl from 0x%02x, value 0x%08x\n",
408 + mmc_hostname(host->mmc), reg, l);
409 +#endif
410 + check_dma_reg_use(host, reg);
411 +
412 + return l;
413 +}
414 +
415 +u16 sdhci_bcm2708_readw(struct sdhci_host *host, int reg)
416 +{
417 + u32 l = sdhci_bcm2708_raw_readl(host, reg & ~3);
418 + u32 w = l >> (reg << 3 & 0x18) & 0xffff;
419 +
420 +#ifdef LOG_REGISTERS
421 + printk(KERN_ERR "%s: readw from 0x%02x, value 0x%04x\n",
422 + mmc_hostname(host->mmc), reg, w);
423 +#endif
424 + check_dma_reg_use(host, reg);
425 +
426 + return (u16)w;
427 +}
428 +
429 +u8 sdhci_bcm2708_readb(struct sdhci_host *host, int reg)
430 +{
431 + u32 l = sdhci_bcm2708_raw_readl(host, reg & ~3);
432 + u32 b = l >> (reg << 3 & 0x18) & 0xff;
433 +
434 +#ifdef LOG_REGISTERS
435 + printk(KERN_ERR "%s: readb from 0x%02x, value 0x%02x\n",
436 + mmc_hostname(host->mmc), reg, b);
437 +#endif
438 + check_dma_reg_use(host, reg);
439 +
440 + return (u8)b;
441 +}
442 +
443 +
444 +static void sdhci_bcm2708_raw_writel(struct sdhci_host *host, u32 val, int reg)
445 +{
446 + u32 ier;
447 +
448 +#if USE_SPACED_WRITES_2CLK
449 + static bool timeout_disabled = false;
450 + unsigned int ns_2clk = 0;
451 +
452 + /* The Arasan has a bugette whereby it may lose the content of
453 + * successive writes to registers that are within two SD-card clock
454 + * cycles of each other (a clock domain crossing problem).
455 + * It seems, however, that the data register does not have this problem.
456 + * (Which is just as well - otherwise we'd have to nobble the DMA engine
457 + * too)
458 + */
459 + if (reg != SDHCI_BUFFER && host->clock != 0) {
460 + /* host->clock is the clock freq in Hz */
461 + static hptime_t last_write_hpt;
462 + hptime_t now = hptime();
463 + ns_2clk = cycle_delay*1000000/(host->clock/1000);
464 +
465 + if (now == last_write_hpt || now == last_write_hpt+1) {
466 + /* we can't guarantee any significant time has
467 + * passed - we'll have to wait anyway ! */
468 + ndelay(ns_2clk);
469 + } else
470 + {
471 + /* we must have waited at least this many ns: */
472 + unsigned int ns_wait = HPTIME_CLK_NS *
473 + (last_write_hpt - now - 1);
474 + if (ns_wait < ns_2clk)
475 + ndelay(ns_2clk - ns_wait);
476 + }
477 + last_write_hpt = now;
478 + }
479 +#if USE_SOFTWARE_TIMEOUTS
480 + /* The Arasan is clocked for timeouts using the SD clock which is too
481 + * fast for ERASE commands and causes issues. So we disable timeouts
482 + * for ERASE */
483 + if (host->cmd != NULL && host->cmd->opcode == MMC_ERASE &&
484 + reg == (SDHCI_COMMAND & ~3)) {
485 + mod_timer(&host->timer,
486 + jiffies + SOFTWARE_ERASE_TIMEOUT_SEC * HZ);
487 + ier = readl(host->ioaddr + SDHCI_SIGNAL_ENABLE);
488 + ier &= ~SDHCI_INT_DATA_TIMEOUT;
489 + writel(ier, host->ioaddr + SDHCI_SIGNAL_ENABLE);
490 + timeout_disabled = true;
491 + ndelay(ns_2clk);
492 + } else if (timeout_disabled) {
493 + ier = readl(host->ioaddr + SDHCI_SIGNAL_ENABLE);
494 + ier |= SDHCI_INT_DATA_TIMEOUT;
495 + writel(ier, host->ioaddr + SDHCI_SIGNAL_ENABLE);
496 + timeout_disabled = false;
497 + ndelay(ns_2clk);
498 + }
499 +#endif
500 + writel(val, host->ioaddr + reg);
501 +#else
502 + void __iomem * regaddr = host->ioaddr + reg;
503 +
504 + writel(val, regaddr);
505 +
506 + if (reg != SDHCI_BUFFER && reg != SDHCI_INT_STATUS && host->clock != 0)
507 + {
508 + int timeout = 100000;
509 + while (val != readl(regaddr) && --timeout > 0)
510 + continue;
511 +
512 + if (timeout <= 0)
513 + printk(KERN_ERR "%s: writing 0x%X to reg 0x%X "
514 + "always gives 0x%X\n",
515 + mmc_hostname(host->mmc),
516 + val, reg, readl(regaddr));
517 + BUG_ON(timeout <= 0);
518 + }
519 +#endif
520 +}
521 +
522 +
523 +void sdhci_bcm2708_writel(struct sdhci_host *host, u32 val, int reg)
524 +{
525 +#ifdef LOG_REGISTERS
526 + printk(KERN_ERR "%s: writel to 0x%02x, value 0x%08x\n",
527 + mmc_hostname(host->mmc), reg, val);
528 +#endif
529 + check_dma_reg_use(host, reg);
530 +
531 + sdhci_bcm2708_raw_writel(host, val, reg);
532 +}
533 +
534 +void sdhci_bcm2708_writew(struct sdhci_host *host, u16 val, int reg)
535 +{
536 + static u32 shadow = 0;
537 +
538 + u32 p = reg == SDHCI_COMMAND ? shadow :
539 + sdhci_bcm2708_raw_readl(host, reg & ~3);
540 + u32 s = reg << 3 & 0x18;
541 + u32 l = val << s;
542 + u32 m = 0xffff << s;
543 +
544 +#ifdef LOG_REGISTERS
545 + printk(KERN_ERR "%s: writew to 0x%02x, value 0x%04x\n",
546 + mmc_hostname(host->mmc), reg, val);
547 +#endif
548 +
549 + if (reg == SDHCI_TRANSFER_MODE)
550 + shadow = (p & ~m) | l;
551 + else {
552 + check_dma_reg_use(host, reg);
553 + sdhci_bcm2708_raw_writel(host, (p & ~m) | l, reg & ~3);
554 + }
555 +}
556 +
557 +void sdhci_bcm2708_writeb(struct sdhci_host *host, u8 val, int reg)
558 +{
559 + u32 p = sdhci_bcm2708_raw_readl(host, reg & ~3);
560 + u32 s = reg << 3 & 0x18;
561 + u32 l = val << s;
562 + u32 m = 0xff << s;
563 +
564 +#ifdef LOG_REGISTERS
565 + printk(KERN_ERR "%s: writeb to 0x%02x, value 0x%02x\n",
566 + mmc_hostname(host->mmc), reg, val);
567 +#endif
568 +
569 + check_dma_reg_use(host, reg);
570 + sdhci_bcm2708_raw_writel(host, (p & ~m) | l, reg & ~3);
571 +}
572 +
573 +static unsigned int sdhci_bcm2708_get_max_clock(struct sdhci_host *host)
574 +{
575 + return emmc_clock_freq;
576 +}
577 +
578 +/*****************************************************************************\
579 + * *
580 + * DMA Operation *
581 + * *
582 +\*****************************************************************************/
583 +
584 +struct sdhci_bcm2708_priv {
585 + int dma_chan;
586 + int dma_irq;
587 + void __iomem *dma_chan_base;
588 + struct bcm2708_dma_cb *cb_base; /* DMA control blocks */
589 + dma_addr_t cb_handle;
590 + /* tracking scatter gather progress */
591 + unsigned sg_ix; /* scatter gather list index */
592 + unsigned sg_done; /* bytes in current sg_ix done */
593 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
594 + unsigned char dma_wanted; /* DMA transfer requested */
595 + unsigned char dma_waits; /* wait states in DMAs */
596 +#ifdef CHECK_DMA_USE
597 + unsigned char dmas_pending; /* no of unfinished DMAs */
598 + hptime_t when_started;
599 + hptime_t when_reset;
600 + hptime_t when_stopped;
601 +#endif
602 +#endif
603 + /* signalling the end of a transfer */
604 + void (*complete)(struct sdhci_host *);
605 +};
606 +
607 +#define SDHCI_HOST_PRIV(host) \
608 + (struct sdhci_bcm2708_priv *)((struct sdhci_host *)(host)+1)
609 +
610 +
611 +
612 +#ifdef CHECK_DMA_REG_USE
613 +static void check_dma_reg_use(struct sdhci_host *host, int reg)
614 +{
615 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
616 + if (host_priv->dma_wanted && reg != SDHCI_INT_STATUS) {
617 + printk(KERN_INFO"%s: accessing register 0x%x during DMA\n",
618 + mmc_hostname(host->mmc), reg);
619 + }
620 +}
621 +#endif
622 +
623 +
624 +
625 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
626 +
627 +static void sdhci_clear_set_irqgen(struct sdhci_host *host, u32 clear, u32 set)
628 +{
629 + u32 ier;
630 +
631 + ier = sdhci_bcm2708_raw_readl(host, SDHCI_SIGNAL_ENABLE);
632 + ier &= ~clear;
633 + ier |= set;
634 + /* change which requests generate IRQs - makes no difference to
635 + the content of SDHCI_INT_STATUS, or the need to acknowledge IRQs */
636 + sdhci_bcm2708_raw_writel(host, ier, SDHCI_SIGNAL_ENABLE);
637 +}
638 +
639 +static void sdhci_signal_irqs(struct sdhci_host *host, u32 irqs)
640 +{
641 + sdhci_clear_set_irqgen(host, 0, irqs);
642 +}
643 +
644 +static void sdhci_unsignal_irqs(struct sdhci_host *host, u32 irqs)
645 +{
646 + sdhci_clear_set_irqgen(host, irqs, 0);
647 +}
648 +
649 +
650 +
651 +static void schci_bcm2708_cb_read(struct sdhci_bcm2708_priv *host,
652 + int ix,
653 + dma_addr_t dma_addr, unsigned len,
654 + int /*bool*/ is_last)
655 +{
656 + struct bcm2708_dma_cb *cb = &host->cb_base[ix];
657 + unsigned char dmawaits = host->dma_waits;
658 +
659 + cb->info = BCM2708_DMA_PER_MAP(BCM2708_DMA_DREQ_EMMC) |
660 + BCM2708_DMA_WAITS(dmawaits) |
661 + BCM2708_DMA_S_DREQ |
662 + BCM2708_DMA_D_WIDTH |
663 + BCM2708_DMA_D_INC;
664 + cb->src = DMA_SDHCI_BUFFER; /* DATA register DMA address */
665 + cb->dst = dma_addr;
666 + cb->length = len;
667 + cb->stride = 0;
668 +
669 + if (is_last) {
670 + cb->info |= BCM2708_DMA_INT_EN |
671 + BCM2708_DMA_WAIT_RESP;
672 + cb->next = 0;
673 + } else
674 + cb->next = host->cb_handle +
675 + (ix+1)*sizeof(struct bcm2708_dma_cb);
676 +
677 + cb->pad[0] = 0;
678 + cb->pad[1] = 0;
679 +}
680 +
681 +static void schci_bcm2708_cb_write(struct sdhci_bcm2708_priv *host,
682 + int ix,
683 + dma_addr_t dma_addr, unsigned len,
684 + int /*bool*/ is_last)
685 +{
686 + struct bcm2708_dma_cb *cb = &host->cb_base[ix];
687 + unsigned char dmawaits = host->dma_waits;
688 +
689 + /* We can make arbitrarily large writes as long as we specify DREQ to
690 + pace the delivery of bytes to the Arasan hardware */
691 + cb->info = BCM2708_DMA_PER_MAP(BCM2708_DMA_DREQ_EMMC) |
692 + BCM2708_DMA_WAITS(dmawaits) |
693 + BCM2708_DMA_D_DREQ |
694 + BCM2708_DMA_S_WIDTH |
695 + BCM2708_DMA_S_INC;
696 + cb->src = dma_addr;
697 + cb->dst = DMA_SDHCI_BUFFER; /* DATA register DMA address */
698 + cb->length = len;
699 + cb->stride = 0;
700 +
701 + if (is_last) {
702 + cb->info |= BCM2708_DMA_INT_EN |
703 + BCM2708_DMA_WAIT_RESP;
704 + cb->next = 0;
705 + } else
706 + cb->next = host->cb_handle +
707 + (ix+1)*sizeof(struct bcm2708_dma_cb);
708 +
709 + cb->pad[0] = 0;
710 + cb->pad[1] = 0;
711 +}
712 +
713 +
714 +static void schci_bcm2708_dma_go(struct sdhci_host *host)
715 +{
716 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
717 + void __iomem *dma_chan_base = host_priv->dma_chan_base;
718 +
719 + BUG_ON(host_priv->dma_wanted);
720 +#ifdef CHECK_DMA_USE
721 + if (host_priv->dma_wanted)
722 + printk(KERN_ERR "%s: DMA already in progress - "
723 + "now %"FMT_HPT", last started %lu "
724 + "reset %lu stopped %lu\n",
725 + mmc_hostname(host->mmc),
726 + hptime(), since_ns(host_priv->when_started),
727 + since_ns(host_priv->when_reset),
728 + since_ns(host_priv->when_stopped));
729 + else if (host_priv->dmas_pending > 0)
730 + printk(KERN_INFO "%s: note - new DMA when %d reset DMAs "
731 + "already in progress - "
732 + "now %"FMT_HPT", started %lu reset %lu stopped %lu\n",
733 + mmc_hostname(host->mmc),
734 + host_priv->dmas_pending,
735 + hptime(), since_ns(host_priv->when_started),
736 + since_ns(host_priv->when_reset),
737 + since_ns(host_priv->when_stopped));
738 + host_priv->dmas_pending += 1;
739 + host_priv->when_started = hptime();
740 +#endif
741 + host_priv->dma_wanted = 1;
742 + DBG("PDMA go - base %p handle %08X\n", dma_chan_base,
743 + host_priv->cb_handle);
744 + bcm_dma_start(dma_chan_base, host_priv->cb_handle);
745 +}
746 +
747 +
748 +static void
749 +sdhci_platdma_read(struct sdhci_host *host, dma_addr_t dma_addr, size_t len)
750 +{
751 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
752 +
753 + DBG("PDMA to read %d bytes\n", len);
754 + host_priv->sg_done += len;
755 + schci_bcm2708_cb_read(host_priv, 0, dma_addr, len, 1/*TRUE*/);
756 + schci_bcm2708_dma_go(host);
757 +}
758 +
759 +
760 +static void
761 +sdhci_platdma_write(struct sdhci_host *host, dma_addr_t dma_addr, size_t len)
762 +{
763 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
764 +
765 + DBG("PDMA to write %d bytes\n", len);
766 + //BUG_ON(0 != (len & 0x1ff));
767 +
768 + host_priv->sg_done += len;
769 + schci_bcm2708_cb_write(host_priv, 0, dma_addr, len, 1/*TRUE*/);
770 + schci_bcm2708_dma_go(host);
771 +}
772 +
773 +/*! space is avaiable to receive into or data is available to write
774 + Platform DMA exported function
775 +*/
776 +void
777 +sdhci_bcm2708_platdma_avail(struct sdhci_host *host, unsigned int *ref_intmask,
778 + void(*completion_callback)(struct sdhci_host *host))
779 +{
780 + struct mmc_data *data = host->data;
781 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
782 + int sg_ix;
783 + size_t bytes;
784 + dma_addr_t addr;
785 +
786 + BUG_ON(NULL == data);
787 + BUG_ON(0 == data->blksz);
788 +
789 + host_priv->complete = completion_callback;
790 +
791 + sg_ix = host_priv->sg_ix;
792 + BUG_ON(sg_ix >= data->sg_len);
793 +
794 + /* we can DMA blocks larger than blksz - it may hang the DMA
795 + channel but we are its only user */
796 + bytes = sg_dma_len(&data->sg[sg_ix]) - host_priv->sg_done;
797 + addr = sg_dma_address(&data->sg[sg_ix]) + host_priv->sg_done;
798 +
799 + if (bytes > 0) {
800 + /* We're going to poll for read/write available state until
801 + we finish this DMA
802 + */
803 +
804 + if (data->flags & MMC_DATA_READ) {
805 + if (*ref_intmask & SDHCI_INT_DATA_AVAIL) {
806 + sdhci_unsignal_irqs(host, SDHCI_INT_DATA_AVAIL |
807 + SDHCI_INT_SPACE_AVAIL);
808 + sdhci_platdma_read(host, addr, bytes);
809 + }
810 + } else {
811 + if (*ref_intmask & SDHCI_INT_SPACE_AVAIL) {
812 + sdhci_unsignal_irqs(host, SDHCI_INT_DATA_AVAIL |
813 + SDHCI_INT_SPACE_AVAIL);
814 + sdhci_platdma_write(host, addr, bytes);
815 + }
816 + }
817 + }
818 + /* else:
819 + we have run out of bytes that need transferring (e.g. we may be in
820 + the middle of the last DMA transfer), or
821 + it is also possible that we've been called when another IRQ is
822 + signalled, even though we've turned off signalling of our own IRQ */
823 +
824 + *ref_intmask &= ~SDHCI_INT_DATA_END;
825 + /* don't let the main sdhci driver act on this .. we'll deal with it
826 + when we respond to the DMA - if one is currently in progress */
827 +}
828 +
829 +/* is it possible to DMA the given mmc_data structure?
830 + Platform DMA exported function
831 +*/
832 +int /*bool*/
833 +sdhci_bcm2708_platdma_dmaable(struct sdhci_host *host, struct mmc_data *data)
834 +{
835 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
836 + int ok = bcm_sg_suitable_for_dma(data->sg, data->sg_len);
837 +
838 + if (!ok)
839 + DBG("Reverting to PIO - bad cache alignment\n");
840 +
841 + else {
842 + host_priv->sg_ix = 0; /* first SG index */
843 + host_priv->sg_done = 0; /* no bytes done */
844 + }
845 +
846 + return ok;
847 +}
848 +
849 +#include <mach/arm_control.h> //GRAYG
850 +/*! the current SD transacton has been abandonned
851 + We need to tidy up if we were in the middle of a DMA
852 + Platform DMA exported function
853 +*/
854 +void
855 +sdhci_bcm2708_platdma_reset(struct sdhci_host *host, struct mmc_data *data)
856 +{
857 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
858 +// unsigned long flags;
859 +
860 + BUG_ON(NULL == host);
861 +
862 +// spin_lock_irqsave(&host->lock, flags);
863 +
864 + if (host_priv->dma_wanted) {
865 + if (NULL == data) {
866 + printk(KERN_ERR "%s: ongoing DMA reset - no data!\n",
867 + mmc_hostname(host->mmc));
868 + BUG_ON(NULL == data);
869 + } else {
870 + struct scatterlist *sg;
871 + int sg_len;
872 + int sg_todo;
873 + int rc;
874 + unsigned long cs;
875 +
876 + sg = data->sg;
877 + sg_len = data->sg_len;
878 + sg_todo = sg_dma_len(&sg[host_priv->sg_ix]);
879 +
880 + cs = readl(host_priv->dma_chan_base + BCM2708_DMA_CS);
881 +
882 + if (!(BCM2708_DMA_ACTIVE & cs))
883 + {
884 + if (extra_messages)
885 + printk(KERN_INFO "%s: missed completion of "
886 + "cmd %d DMA (%d/%d [%d]/[%d]) - "
887 + "ignoring it\n",
888 + mmc_hostname(host->mmc),
889 + host->last_cmdop,
890 + host_priv->sg_done, sg_todo,
891 + host_priv->sg_ix+1, sg_len);
892 + }
893 + else
894 + printk(KERN_INFO "%s: resetting ongoing cmd %d"
895 + "DMA before %d/%d [%d]/[%d] complete\n",
896 + mmc_hostname(host->mmc),
897 + host->last_cmdop,
898 + host_priv->sg_done, sg_todo,
899 + host_priv->sg_ix+1, sg_len);
900 +#ifdef CHECK_DMA_USE
901 + printk(KERN_INFO "%s: now %"FMT_HPT" started %lu "
902 + "last reset %lu last stopped %lu\n",
903 + mmc_hostname(host->mmc),
904 + hptime(), since_ns(host_priv->when_started),
905 + since_ns(host_priv->when_reset),
906 + since_ns(host_priv->when_stopped));
907 + { unsigned long info, debug;
908 + void __iomem *base;
909 + unsigned long pend0, pend1, pend2;
910 +
911 + base = host_priv->dma_chan_base;
912 + cs = readl(base + BCM2708_DMA_CS);
913 + info = readl(base + BCM2708_DMA_INFO);
914 + debug = readl(base + BCM2708_DMA_DEBUG);
915 + printk(KERN_INFO "%s: DMA%d CS=%08lX TI=%08lX "
916 + "DEBUG=%08lX\n",
917 + mmc_hostname(host->mmc),
918 + host_priv->dma_chan,
919 + cs, info, debug);
920 + pend0 = readl(__io_address(ARM_IRQ_PEND0));
921 + pend1 = readl(__io_address(ARM_IRQ_PEND1));
922 + pend2 = readl(__io_address(ARM_IRQ_PEND2));
923 +
924 + printk(KERN_INFO "%s: PEND0=%08lX "
925 + "PEND1=%08lX PEND2=%08lX\n",
926 + mmc_hostname(host->mmc),
927 + pend0, pend1, pend2);
928 +
929 + //gintsts = readl(__io_address(GINTSTS));
930 + //gintmsk = readl(__io_address(GINTMSK));
931 + //printk(KERN_INFO "%s: USB GINTSTS=%08lX"
932 + // "GINTMSK=%08lX\n",
933 + // mmc_hostname(host->mmc), gintsts, gintmsk);
934 + }
935 +#endif
936 + rc = bcm_dma_abort(host_priv->dma_chan_base);
937 + BUG_ON(rc != 0);
938 + }
939 + host_priv->dma_wanted = 0;
940 +#ifdef CHECK_DMA_USE
941 + host_priv->when_reset = hptime();
942 +#endif
943 + }
944 +
945 +// spin_unlock_irqrestore(&host->lock, flags);
946 +}
947 +
948 +
949 +static void sdhci_bcm2708_dma_complete_irq(struct sdhci_host *host,
950 + u32 dma_cs)
951 +{
952 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
953 + struct mmc_data *data;
954 + struct scatterlist *sg;
955 + int sg_len;
956 + int sg_ix;
957 + int sg_todo;
958 +// unsigned long flags;
959 +
960 + BUG_ON(NULL == host);
961 +
962 +// spin_lock_irqsave(&host->lock, flags);
963 + data = host->data;
964 +
965 +#ifdef CHECK_DMA_USE
966 + if (host_priv->dmas_pending <= 0)
967 + DBG("on completion no DMA in progress - "
968 + "now %"FMT_HPT" started %lu reset %lu stopped %lu\n",
969 + hptime(), since_ns(host_priv->when_started),
970 + since_ns(host_priv->when_reset),
971 + since_ns(host_priv->when_stopped));
972 + else if (host_priv->dmas_pending > 1)
973 + DBG("still %d DMA in progress after completion - "
974 + "now %"FMT_HPT" started %lu reset %lu stopped %lu\n",
975 + host_priv->dmas_pending - 1,
976 + hptime(), since_ns(host_priv->when_started),
977 + since_ns(host_priv->when_reset),
978 + since_ns(host_priv->when_stopped));
979 + BUG_ON(host_priv->dmas_pending <= 0);
980 + host_priv->dmas_pending -= 1;
981 + host_priv->when_stopped = hptime();
982 +#endif
983 + host_priv->dma_wanted = 0;
984 +
985 + if (NULL == data) {
986 + DBG("PDMA unused completion - status 0x%X\n", dma_cs);
987 +// spin_unlock_irqrestore(&host->lock, flags);
988 + return;
989 + }
990 + sg = data->sg;
991 + sg_len = data->sg_len;
992 + sg_todo = sg_dma_len(&sg[host_priv->sg_ix]);
993 +
994 + DBG("PDMA complete %d/%d [%d]/[%d]..\n",
995 + host_priv->sg_done, sg_todo,
996 + host_priv->sg_ix+1, sg_len);
997 +
998 + BUG_ON(host_priv->sg_done > sg_todo);
999 +
1000 + if (host_priv->sg_done >= sg_todo) {
1001 + host_priv->sg_ix++;
1002 + host_priv->sg_done = 0;
1003 + }
1004 +
1005 + sg_ix = host_priv->sg_ix;
1006 + if (sg_ix < sg_len) {
1007 + u32 irq_mask;
1008 + /* Set off next DMA if we've got the capacity */
1009 +
1010 + if (data->flags & MMC_DATA_READ)
1011 + irq_mask = SDHCI_INT_DATA_AVAIL;
1012 + else
1013 + irq_mask = SDHCI_INT_SPACE_AVAIL;
1014 +
1015 + /* We have to use the interrupt status register on the BCM2708
1016 + rather than the SDHCI_PRESENT_STATE register because latency
1017 + in the glue logic means that the information retrieved from
1018 + the latter is not always up-to-date w.r.t the DMA engine -
1019 + it may not indicate that a read or a write is ready yet */
1020 + if (sdhci_bcm2708_raw_readl(host, SDHCI_INT_STATUS) &
1021 + irq_mask) {
1022 + size_t bytes = sg_dma_len(&sg[sg_ix]) -
1023 + host_priv->sg_done;
1024 + dma_addr_t addr = sg_dma_address(&data->sg[sg_ix]) +
1025 + host_priv->sg_done;
1026 +
1027 + /* acknowledge interrupt */
1028 + sdhci_bcm2708_raw_writel(host, irq_mask,
1029 + SDHCI_INT_STATUS);
1030 +
1031 + BUG_ON(0 == bytes);
1032 +
1033 + if (data->flags & MMC_DATA_READ)
1034 + sdhci_platdma_read(host, addr, bytes);
1035 + else
1036 + sdhci_platdma_write(host, addr, bytes);
1037 + } else {
1038 + DBG("PDMA - wait avail\n");
1039 + /* may generate an IRQ if already present */
1040 + sdhci_signal_irqs(host, SDHCI_INT_DATA_AVAIL |
1041 + SDHCI_INT_SPACE_AVAIL);
1042 + }
1043 + } else {
1044 + if (sync_after_dma) {
1045 + /* On the Arasan controller the stop command (which will be
1046 + scheduled after this completes) does not seem to work
1047 + properly if we allow it to be issued when we are
1048 + transferring data to/from the SD card.
1049 + We get CRC and DEND errors unless we wait for
1050 + the SD controller to finish reading/writing to the card. */
1051 + u32 state_mask;
1052 + int timeout=30*5000;
1053 +
1054 + DBG("PDMA over - sync card\n");
1055 + if (data->flags & MMC_DATA_READ)
1056 + state_mask = SDHCI_DOING_READ;
1057 + else
1058 + state_mask = SDHCI_DOING_WRITE;
1059 +
1060 + while (0 != (sdhci_bcm2708_raw_readl(host, SDHCI_PRESENT_STATE)
1061 + & state_mask) && --timeout > 0)
1062 + {
1063 + udelay(1);
1064 + continue;
1065 + }
1066 + if (timeout <= 0)
1067 + printk(KERN_ERR"%s: final %s to SD card still "
1068 + "running\n",
1069 + mmc_hostname(host->mmc),
1070 + data->flags & MMC_DATA_READ? "read": "write");
1071 + }
1072 + if (host_priv->complete) {
1073 + (*host_priv->complete)(host);
1074 + DBG("PDMA %s complete\n",
1075 + data->flags & MMC_DATA_READ?"read":"write");
1076 + sdhci_signal_irqs(host, SDHCI_INT_DATA_AVAIL |
1077 + SDHCI_INT_SPACE_AVAIL);
1078 + }
1079 + }
1080 +// spin_unlock_irqrestore(&host->lock, flags);
1081 +}
1082 +
1083 +static irqreturn_t sdhci_bcm2708_dma_irq(int irq, void *dev_id)
1084 +{
1085 + irqreturn_t result = IRQ_NONE;
1086 + struct sdhci_host *host = dev_id;
1087 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
1088 + u32 dma_cs; /* control and status register */
1089 +
1090 + BUG_ON(NULL == dev_id);
1091 + BUG_ON(NULL == host_priv->dma_chan_base);
1092 +
1093 + sdhci_spin_lock(host);
1094 +
1095 + dma_cs = readl(host_priv->dma_chan_base + BCM2708_DMA_CS);
1096 +
1097 + if (dma_cs & BCM2708_DMA_ERR) {
1098 + unsigned long debug;
1099 + debug = readl(host_priv->dma_chan_base +
1100 + BCM2708_DMA_DEBUG);
1101 + printk(KERN_ERR "%s: DMA error - CS %lX DEBUG %lX\n",
1102 + mmc_hostname(host->mmc), (unsigned long)dma_cs,
1103 + (unsigned long)debug);
1104 + /* reset error */
1105 + writel(debug, host_priv->dma_chan_base +
1106 + BCM2708_DMA_DEBUG);
1107 + }
1108 + if (dma_cs & BCM2708_DMA_INT) {
1109 + /* acknowledge interrupt */
1110 + writel(BCM2708_DMA_INT,
1111 + host_priv->dma_chan_base + BCM2708_DMA_CS);
1112 +
1113 + dsb(); /* ARM data synchronization (push) operation */
1114 +
1115 + if (!host_priv->dma_wanted) {
1116 + /* ignore this interrupt - it was reset */
1117 + if (extra_messages)
1118 + printk(KERN_INFO "%s: DMA IRQ %X ignored - "
1119 + "results were reset\n",
1120 + mmc_hostname(host->mmc), dma_cs);
1121 +#ifdef CHECK_DMA_USE
1122 + printk(KERN_INFO "%s: now %"FMT_HPT
1123 + " started %lu reset %lu stopped %lu\n",
1124 + mmc_hostname(host->mmc), hptime(),
1125 + since_ns(host_priv->when_started),
1126 + since_ns(host_priv->when_reset),
1127 + since_ns(host_priv->when_stopped));
1128 + host_priv->dmas_pending--;
1129 +#endif
1130 + } else
1131 + sdhci_bcm2708_dma_complete_irq(host, dma_cs);
1132 +
1133 + result = IRQ_HANDLED;
1134 + }
1135 + sdhci_spin_unlock(host);
1136 +
1137 + return result;
1138 +}
1139 +#endif /* CONFIG_MMC_SDHCI_BCM2708_DMA */
1140 +
1141 +
1142 +/***************************************************************************** \
1143 + * *
1144 + * Device Attributes *
1145 + * *
1146 +\*****************************************************************************/
1147 +
1148 +
1149 +/**
1150 + * Show the DMA-using status
1151 + */
1152 +static ssize_t attr_dma_show(struct device *_dev,
1153 + struct device_attribute *attr, char *buf)
1154 +{
1155 + struct sdhci_host *host = (struct sdhci_host *)dev_get_drvdata(_dev);
1156 +
1157 + if (host) {
1158 + int use_dma = (host->flags & SDHCI_USE_PLATDMA? 1:0);
1159 + return sprintf(buf, "%d\n", use_dma);
1160 + } else
1161 + return -EINVAL;
1162 +}
1163 +
1164 +/**
1165 + * Set the DMA-using status
1166 + */
1167 +static ssize_t attr_dma_store(struct device *_dev,
1168 + struct device_attribute *attr,
1169 + const char *buf, size_t count)
1170 +{
1171 + struct sdhci_host *host = (struct sdhci_host *)dev_get_drvdata(_dev);
1172 +
1173 + if (host) {
1174 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
1175 + int on = simple_strtol(buf, NULL, 0);
1176 + if (on) {
1177 + host->flags |= SDHCI_USE_PLATDMA;
1178 + sdhci_bcm2708_writel(host, 1, REG_EXRDFIFO_EN);
1179 + printk(KERN_INFO "%s: DMA enabled\n",
1180 + mmc_hostname(host->mmc));
1181 + } else {
1182 + host->flags &= ~(SDHCI_USE_PLATDMA | SDHCI_REQ_USE_DMA);
1183 + sdhci_bcm2708_writel(host, 0, REG_EXRDFIFO_EN);
1184 + printk(KERN_INFO "%s: DMA disabled\n",
1185 + mmc_hostname(host->mmc));
1186 + }
1187 +#endif
1188 + return count;
1189 + } else
1190 + return -EINVAL;
1191 +}
1192 +
1193 +static DEVICE_ATTR(use_dma, S_IRUGO | S_IWUGO, attr_dma_show, attr_dma_store);
1194 +
1195 +
1196 +/**
1197 + * Show the DMA wait states used
1198 + */
1199 +static ssize_t attr_dmawait_show(struct device *_dev,
1200 + struct device_attribute *attr, char *buf)
1201 +{
1202 + struct sdhci_host *host = (struct sdhci_host *)dev_get_drvdata(_dev);
1203 +
1204 + if (host) {
1205 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
1206 + int dmawait = host_priv->dma_waits;
1207 + return sprintf(buf, "%d\n", dmawait);
1208 + } else
1209 + return -EINVAL;
1210 +}
1211 +
1212 +/**
1213 + * Set the DMA wait state used
1214 + */
1215 +static ssize_t attr_dmawait_store(struct device *_dev,
1216 + struct device_attribute *attr,
1217 + const char *buf, size_t count)
1218 +{
1219 + struct sdhci_host *host = (struct sdhci_host *)dev_get_drvdata(_dev);
1220 +
1221 + if (host) {
1222 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
1223 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
1224 + int dma_waits = simple_strtol(buf, NULL, 0);
1225 + if (dma_waits >= 0 && dma_waits < 32)
1226 + host_priv->dma_waits = dma_waits;
1227 + else
1228 + printk(KERN_ERR "%s: illegal dma_waits value - %d",
1229 + mmc_hostname(host->mmc), dma_waits);
1230 +#endif
1231 + return count;
1232 + } else
1233 + return -EINVAL;
1234 +}
1235 +
1236 +static DEVICE_ATTR(dma_wait, S_IRUGO | S_IWUGO,
1237 + attr_dmawait_show, attr_dmawait_store);
1238 +
1239 +
1240 +/**
1241 + * Show the DMA-using status
1242 + */
1243 +static ssize_t attr_status_show(struct device *_dev,
1244 + struct device_attribute *attr, char *buf)
1245 +{
1246 + struct sdhci_host *host = (struct sdhci_host *)dev_get_drvdata(_dev);
1247 +
1248 + if (host) {
1249 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
1250 + return sprintf(buf,
1251 + "present: yes\n"
1252 + "power: %s\n"
1253 + "clock: %u Hz\n"
1254 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
1255 + "dma: %s (%d waits)\n",
1256 +#else
1257 + "dma: unconfigured\n",
1258 +#endif
1259 + "always on",
1260 + host->clock
1261 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
1262 + , (host->flags & SDHCI_USE_PLATDMA)? "on": "off"
1263 + , host_priv->dma_waits
1264 +#endif
1265 + );
1266 + } else
1267 + return -EINVAL;
1268 +}
1269 +
1270 +static DEVICE_ATTR(status, S_IRUGO, attr_status_show, NULL);
1271 +
1272 +/***************************************************************************** \
1273 + * *
1274 + * Power Management *
1275 + * *
1276 +\*****************************************************************************/
1277 +
1278 +
1279 +#ifdef CONFIG_PM
1280 +static int sdhci_bcm2708_suspend(struct platform_device *dev, pm_message_t state)
1281 +{
1282 + struct sdhci_host *host = (struct sdhci_host *)
1283 + platform_get_drvdata(dev);
1284 + int ret = 0;
1285 +
1286 + if (host->mmc) {
1287 + ret = mmc_suspend_host(host->mmc);
1288 + }
1289 +
1290 + return ret;
1291 +}
1292 +
1293 +static int sdhci_bcm2708_resume(struct platform_device *dev)
1294 +{
1295 + struct sdhci_host *host = (struct sdhci_host *)
1296 + platform_get_drvdata(dev);
1297 + int ret = 0;
1298 +
1299 + if (host->mmc) {
1300 + ret = mmc_resume_host(host->mmc);
1301 + }
1302 +
1303 + return ret;
1304 +}
1305 +#endif
1306 +
1307 +
1308 +/*****************************************************************************\
1309 + * *
1310 + * Device quirk functions. Implemented as local ops because the flags *
1311 + * field is out of space with newer kernels. This implementation can be *
1312 + * back ported to older kernels as well. *
1313 +\****************************************************************************/
1314 +static unsigned int sdhci_bcm2708_quirk_extra_ints(struct sdhci_host *host)
1315 +{
1316 + return 1;
1317 +}
1318 +
1319 +static unsigned int sdhci_bcm2708_quirk_spurious_crc_acmd51(struct sdhci_host *host)
1320 +{
1321 + return 1;
1322 +}
1323 +
1324 +static unsigned int sdhci_bcm2708_quirk_voltage_broken(struct sdhci_host *host)
1325 +{
1326 + return 1;
1327 +}
1328 +
1329 +static unsigned int sdhci_bcm2708_uhs_broken(struct sdhci_host *host)
1330 +{
1331 + return 1;
1332 +}
1333 +
1334 +static unsigned int sdhci_bcm2708_missing_status(struct sdhci_host *host)
1335 +{
1336 + return 1;
1337 +}
1338 +
1339 +/***************************************************************************** \
1340 + * *
1341 + * Device ops *
1342 + * *
1343 +\*****************************************************************************/
1344 +
1345 +static struct sdhci_ops sdhci_bcm2708_ops = {
1346 +#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
1347 + .read_l = sdhci_bcm2708_readl,
1348 + .read_w = sdhci_bcm2708_readw,
1349 + .read_b = sdhci_bcm2708_readb,
1350 + .write_l = sdhci_bcm2708_writel,
1351 + .write_w = sdhci_bcm2708_writew,
1352 + .write_b = sdhci_bcm2708_writeb,
1353 +#else
1354 +#error The BCM2708 SDHCI driver needs CONFIG_MMC_SDHCI_IO_ACCESSORS to be set
1355 +#endif
1356 + .get_max_clock = sdhci_bcm2708_get_max_clock,
1357 +
1358 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
1359 + // Platform DMA operations
1360 + .pdma_able = sdhci_bcm2708_platdma_dmaable,
1361 + .pdma_avail = sdhci_bcm2708_platdma_avail,
1362 + .pdma_reset = sdhci_bcm2708_platdma_reset,
1363 +#endif
1364 + .extra_ints = sdhci_bcm2708_quirk_extra_ints,
1365 + .voltage_broken = sdhci_bcm2708_quirk_voltage_broken,
1366 + .uhs_broken = sdhci_bcm2708_uhs_broken,
1367 +};
1368 +
1369 +/*****************************************************************************\
1370 + * *
1371 + * Device probing/removal *
1372 + * *
1373 +\*****************************************************************************/
1374 +
1375 +static int sdhci_bcm2708_probe(struct platform_device *pdev)
1376 +{
1377 + struct sdhci_host *host;
1378 + struct resource *iomem;
1379 + struct sdhci_bcm2708_priv *host_priv;
1380 + int ret;
1381 +
1382 + BUG_ON(pdev == NULL);
1383 +
1384 + iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1385 + if (!iomem) {
1386 + ret = -ENOMEM;
1387 + goto err;
1388 + }
1389 +
1390 + if (resource_size(iomem) != 0x100)
1391 + dev_err(&pdev->dev, "Invalid iomem size. You may "
1392 + "experience problems.\n");
1393 +
1394 + if (pdev->dev.parent)
1395 + host = sdhci_alloc_host(pdev->dev.parent,
1396 + sizeof(struct sdhci_bcm2708_priv));
1397 + else
1398 + host = sdhci_alloc_host(&pdev->dev,
1399 + sizeof(struct sdhci_bcm2708_priv));
1400 +
1401 + if (IS_ERR(host)) {
1402 + ret = PTR_ERR(host);
1403 + goto err;
1404 + }
1405 + if (missing_status) {
1406 + sdhci_bcm2708_ops.missing_status = sdhci_bcm2708_missing_status;
1407 + }
1408 +
1409 + if( spurious_crc_acmd51 ) {
1410 + sdhci_bcm2708_ops.spurious_crc_acmd51 = sdhci_bcm2708_quirk_spurious_crc_acmd51;
1411 + }
1412 +
1413 +
1414 + printk("sdhci: %s low-latency mode\n",enable_llm?"Enable":"Disable");
1415 +
1416 + host->hw_name = "BCM2708_Arasan";
1417 + host->ops = &sdhci_bcm2708_ops;
1418 + host->irq = platform_get_irq(pdev, 0);
1419 + host->second_irq = 0;
1420 +
1421 + host->quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
1422 + SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1423 + SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1424 + SDHCI_QUIRK_MISSING_CAPS |
1425 + SDHCI_QUIRK_NO_HISPD_BIT |
1426 + (sync_after_dma ? 0:SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12);
1427 +
1428 +
1429 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
1430 + host->flags = SDHCI_USE_PLATDMA;
1431 +#endif
1432 +
1433 + if (!request_mem_region(iomem->start, resource_size(iomem),
1434 + mmc_hostname(host->mmc))) {
1435 + dev_err(&pdev->dev, "cannot request region\n");
1436 + ret = -EBUSY;
1437 + goto err_request;
1438 + }
1439 +
1440 + host->ioaddr = ioremap(iomem->start, resource_size(iomem));
1441 + if (!host->ioaddr) {
1442 + dev_err(&pdev->dev, "failed to remap registers\n");
1443 + ret = -ENOMEM;
1444 + goto err_remap;
1445 + }
1446 +
1447 + host_priv = SDHCI_HOST_PRIV(host);
1448 +
1449 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
1450 + host_priv->dma_wanted = 0;
1451 +#ifdef CHECK_DMA_USE
1452 + host_priv->dmas_pending = 0;
1453 + host_priv->when_started = 0;
1454 + host_priv->when_reset = 0;
1455 + host_priv->when_stopped = 0;
1456 +#endif
1457 + host_priv->sg_ix = 0;
1458 + host_priv->sg_done = 0;
1459 + host_priv->complete = NULL;
1460 + host_priv->dma_waits = SDHCI_BCM_DMA_WAITS;
1461 +
1462 + host_priv->cb_base = dma_alloc_writecombine(&pdev->dev, SZ_4K,
1463 + &host_priv->cb_handle,
1464 + GFP_KERNEL);
1465 + if (!host_priv->cb_base) {
1466 + dev_err(&pdev->dev, "cannot allocate DMA CBs\n");
1467 + ret = -ENOMEM;
1468 + goto err_alloc_cb;
1469 + }
1470 +
1471 + ret = bcm_dma_chan_alloc(BCM_DMA_FEATURE_FAST,
1472 + &host_priv->dma_chan_base,
1473 + &host_priv->dma_irq);
1474 + if (ret < 0) {
1475 + dev_err(&pdev->dev, "couldn't allocate a DMA channel\n");
1476 + goto err_add_dma;
1477 + }
1478 + host_priv->dma_chan = ret;
1479 +
1480 + ret = request_irq(host_priv->dma_irq, sdhci_bcm2708_dma_irq,0,//IRQF_SHARED,
1481 + DRIVER_NAME " (dma)", host);
1482 + if (ret) {
1483 + dev_err(&pdev->dev, "cannot set DMA IRQ\n");
1484 + goto err_add_dma_irq;
1485 + }
1486 + host->second_irq = host_priv->dma_irq;
1487 + DBG("DMA CBs %p handle %08X DMA%d %p DMA IRQ %d\n",
1488 + host_priv->cb_base, (unsigned)host_priv->cb_handle,
1489 + host_priv->dma_chan, host_priv->dma_chan_base,
1490 + host_priv->dma_irq);
1491 +
1492 + if (allow_highspeed)
1493 + host->mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
1494 +
1495 + /* single block writes cause data loss with some SD cards! */
1496 + host->mmc->caps2 |= MMC_CAP2_FORCE_MULTIBLOCK;
1497 +#endif
1498 +
1499 + ret = sdhci_add_host(host);
1500 + if (ret)
1501 + goto err_add_host;
1502 +
1503 + platform_set_drvdata(pdev, host);
1504 + ret = device_create_file(&pdev->dev, &dev_attr_use_dma);
1505 + ret = device_create_file(&pdev->dev, &dev_attr_dma_wait);
1506 + ret = device_create_file(&pdev->dev, &dev_attr_status);
1507 +
1508 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
1509 + /* enable extension fifo for paced DMA transfers */
1510 + sdhci_bcm2708_writel(host, 1, REG_EXRDFIFO_EN);
1511 + sdhci_bcm2708_writel(host, 4, REG_EXRDFIFO_CFG);
1512 +#endif
1513 +
1514 + printk(KERN_INFO "%s: BCM2708 SDHC host at 0x%08llx DMA %d IRQ %d\n",
1515 + mmc_hostname(host->mmc), (unsigned long long)iomem->start,
1516 + host_priv->dma_chan, host_priv->dma_irq);
1517 +
1518 + return 0;
1519 +
1520 +err_add_host:
1521 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
1522 + free_irq(host_priv->dma_irq, host);
1523 +err_add_dma_irq:
1524 + bcm_dma_chan_free(host_priv->dma_chan);
1525 +err_add_dma:
1526 + dma_free_writecombine(&pdev->dev, SZ_4K, host_priv->cb_base,
1527 + host_priv->cb_handle);
1528 +err_alloc_cb:
1529 +#endif
1530 + iounmap(host->ioaddr);
1531 +err_remap:
1532 + release_mem_region(iomem->start, resource_size(iomem));
1533 +err_request:
1534 + sdhci_free_host(host);
1535 +err:
1536 + dev_err(&pdev->dev, "probe failed, err %d\n", ret);
1537 + return ret;
1538 +}
1539 +
1540 +static int sdhci_bcm2708_remove(struct platform_device *pdev)
1541 +{
1542 + struct sdhci_host *host = platform_get_drvdata(pdev);
1543 + struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1544 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
1545 + int dead;
1546 + u32 scratch;
1547 +
1548 + dead = 0;
1549 + scratch = sdhci_bcm2708_readl(host, SDHCI_INT_STATUS);
1550 + if (scratch == (u32)-1)
1551 + dead = 1;
1552 +
1553 + device_remove_file(&pdev->dev, &dev_attr_status);
1554 + device_remove_file(&pdev->dev, &dev_attr_dma_wait);
1555 + device_remove_file(&pdev->dev, &dev_attr_use_dma);
1556 +
1557 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
1558 + free_irq(host_priv->dma_irq, host);
1559 + dma_free_writecombine(&pdev->dev, SZ_4K, host_priv->cb_base,
1560 + host_priv->cb_handle);
1561 +#endif
1562 + sdhci_remove_host(host, dead);
1563 + iounmap(host->ioaddr);
1564 + release_mem_region(iomem->start, resource_size(iomem));
1565 + sdhci_free_host(host);
1566 + platform_set_drvdata(pdev, NULL);
1567 +
1568 + return 0;
1569 +}
1570 +
1571 +static struct platform_driver sdhci_bcm2708_driver = {
1572 + .driver = {
1573 + .name = DRIVER_NAME,
1574 + .owner = THIS_MODULE,
1575 + },
1576 + .probe = sdhci_bcm2708_probe,
1577 + .remove = sdhci_bcm2708_remove,
1578 +
1579 +#ifdef CONFIG_PM
1580 + .suspend = sdhci_bcm2708_suspend,
1581 + .resume = sdhci_bcm2708_resume,
1582 +#endif
1583 +
1584 +};
1585 +
1586 +/*****************************************************************************\
1587 + * *
1588 + * Driver init/exit *
1589 + * *
1590 +\*****************************************************************************/
1591 +
1592 +static int __init sdhci_drv_init(void)
1593 +{
1594 + return platform_driver_register(&sdhci_bcm2708_driver);
1595 +}
1596 +
1597 +static void __exit sdhci_drv_exit(void)
1598 +{
1599 + platform_driver_unregister(&sdhci_bcm2708_driver);
1600 +}
1601 +
1602 +module_init(sdhci_drv_init);
1603 +module_exit(sdhci_drv_exit);
1604 +
1605 +module_param(allow_highspeed, bool, 0444);
1606 +module_param(emmc_clock_freq, int, 0444);
1607 +module_param(sync_after_dma, bool, 0444);
1608 +module_param(missing_status, bool, 0444);
1609 +module_param(spurious_crc_acmd51, bool, 0444);
1610 +module_param(enable_llm, bool, 0444);
1611 +module_param(cycle_delay, int, 0444);
1612 +module_param(extra_messages, bool, 0444);
1613 +
1614 +MODULE_DESCRIPTION("Secure Digital Host Controller Interface platform driver");
1615 +MODULE_AUTHOR("Broadcom <info@broadcom.com>");
1616 +MODULE_LICENSE("GPL v2");
1617 +MODULE_ALIAS("platform:"DRIVER_NAME);
1618 +
1619 +MODULE_PARM_DESC(allow_highspeed, "Allow high speed transfers modes");
1620 +MODULE_PARM_DESC(emmc_clock_freq, "Specify the speed of emmc clock");
1621 +MODULE_PARM_DESC(sync_after_dma, "Block in driver until dma complete");
1622 +MODULE_PARM_DESC(missing_status, "Use the missing status quirk");
1623 +MODULE_PARM_DESC(spurious_crc_acmd51, "Use the spurious crc quirk for reading SCR (ACMD51)");
1624 +MODULE_PARM_DESC(enable_llm, "Enable low-latency mode");
1625 +MODULE_PARM_DESC(extra_messages, "Enable more sdcard warning messages");
1626 +
1627 +
1628 --- a/drivers/mmc/host/sdhci.c
1629 +++ b/drivers/mmc/host/sdhci.c
1630 @@ -28,6 +28,7 @@
1631 #include <linux/mmc/mmc.h>
1632 #include <linux/mmc/host.h>
1633 #include <linux/mmc/card.h>
1634 +#include <linux/mmc/sd.h>
1635 #include <linux/mmc/slot-gpio.h>
1636
1637 #include "sdhci.h"
1638 @@ -123,6 +124,91 @@ static void sdhci_dumpregs(struct sdhci_
1639 * Low level functions *
1640 * *
1641 \*****************************************************************************/
1642 +extern bool enable_llm;
1643 +static int sdhci_locked=0;
1644 +void sdhci_spin_lock(struct sdhci_host *host)
1645 +{
1646 + spin_lock(&host->lock);
1647 +#ifdef CONFIG_PREEMPT
1648 + if(enable_llm)
1649 + {
1650 + disable_irq_nosync(host->irq);
1651 + if(host->second_irq)
1652 + disable_irq_nosync(host->second_irq);
1653 + local_irq_enable();
1654 + }
1655 +#endif
1656 +}
1657 +
1658 +void sdhci_spin_unlock(struct sdhci_host *host)
1659 +{
1660 +#ifdef CONFIG_PREEMPT
1661 + if(enable_llm)
1662 + {
1663 + local_irq_disable();
1664 + if(host->second_irq)
1665 + enable_irq(host->second_irq);
1666 + enable_irq(host->irq);
1667 + }
1668 +#endif
1669 + spin_unlock(&host->lock);
1670 +}
1671 +
1672 +void sdhci_spin_lock_irqsave(struct sdhci_host *host,unsigned long *flags)
1673 +{
1674 +#ifdef CONFIG_PREEMPT
1675 + if(enable_llm)
1676 + {
1677 + while(sdhci_locked)
1678 + {
1679 + preempt_schedule();
1680 + }
1681 + spin_lock_irqsave(&host->lock,*flags);
1682 + disable_irq(host->irq);
1683 + if(host->second_irq)
1684 + disable_irq(host->second_irq);
1685 + local_irq_enable();
1686 + }
1687 + else
1688 +#endif
1689 + spin_lock_irqsave(&host->lock,*flags);
1690 +}
1691 +
1692 +void sdhci_spin_unlock_irqrestore(struct sdhci_host *host,unsigned long flags)
1693 +{
1694 +#ifdef CONFIG_PREEMPT
1695 + if(enable_llm)
1696 + {
1697 + local_irq_disable();
1698 + if(host->second_irq)
1699 + enable_irq(host->second_irq);
1700 + enable_irq(host->irq);
1701 + }
1702 +#endif
1703 + spin_unlock_irqrestore(&host->lock,flags);
1704 +}
1705 +
1706 +static void sdhci_spin_enable_schedule(struct sdhci_host *host)
1707 +{
1708 +#ifdef CONFIG_PREEMPT
1709 + if(enable_llm)
1710 + {
1711 + sdhci_locked = 1;
1712 + preempt_enable();
1713 + }
1714 +#endif
1715 +}
1716 +
1717 +static void sdhci_spin_disable_schedule(struct sdhci_host *host)
1718 +{
1719 +#ifdef CONFIG_PREEMPT
1720 + if(enable_llm)
1721 + {
1722 + preempt_disable();
1723 + sdhci_locked = 0;
1724 + }
1725 +#endif
1726 +}
1727
1728 static void sdhci_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set)
1729 {
1730 @@ -288,7 +374,7 @@ static void sdhci_led_control(struct led
1731 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
1732 unsigned long flags;
1733
1734 - spin_lock_irqsave(&host->lock, flags);
1735 + sdhci_spin_lock_irqsave(host, &flags);
1736
1737 if (host->runtime_suspended)
1738 goto out;
1739 @@ -298,7 +384,7 @@ static void sdhci_led_control(struct led
1740 else
1741 sdhci_activate_led(host);
1742 out:
1743 - spin_unlock_irqrestore(&host->lock, flags);
1744 + sdhci_spin_unlock_irqrestore(host, flags);
1745 }
1746 #endif
1747
1748 @@ -315,7 +401,7 @@ static void sdhci_read_block_pio(struct
1749 u32 uninitialized_var(scratch);
1750 u8 *buf;
1751
1752 - DBG("PIO reading\n");
1753 + DBG("PIO reading %db\n", host->data->blksz);
1754
1755 blksize = host->data->blksz;
1756 chunk = 0;
1757 @@ -360,7 +446,7 @@ static void sdhci_write_block_pio(struct
1758 u32 scratch;
1759 u8 *buf;
1760
1761 - DBG("PIO writing\n");
1762 + DBG("PIO writing %db\n", host->data->blksz);
1763
1764 blksize = host->data->blksz;
1765 chunk = 0;
1766 @@ -399,19 +485,28 @@ static void sdhci_write_block_pio(struct
1767 local_irq_restore(flags);
1768 }
1769
1770 -static void sdhci_transfer_pio(struct sdhci_host *host)
1771 +static void sdhci_transfer_pio(struct sdhci_host *host, u32 intstate)
1772 {
1773 u32 mask;
1774 + u32 state = 0;
1775 + u32 intmask;
1776 + int available;
1777
1778 BUG_ON(!host->data);
1779
1780 if (host->blocks == 0)
1781 return;
1782
1783 - if (host->data->flags & MMC_DATA_READ)
1784 + if (host->data->flags & MMC_DATA_READ) {
1785 mask = SDHCI_DATA_AVAILABLE;
1786 - else
1787 + intmask = SDHCI_INT_DATA_AVAIL;
1788 + } else {
1789 mask = SDHCI_SPACE_AVAILABLE;
1790 + intmask = SDHCI_INT_SPACE_AVAIL;
1791 + }
1792 +
1793 + /* initially we can see whether we can procede using intstate */
1794 + available = (intstate & intmask);
1795
1796 /*
1797 * Some controllers (JMicron JMB38x) mess up the buffer bits
1798 @@ -422,7 +517,7 @@ static void sdhci_transfer_pio(struct sd
1799 (host->data->blocks == 1))
1800 mask = ~0;
1801
1802 - while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1803 + while (available) {
1804 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
1805 udelay(100);
1806
1807 @@ -434,9 +529,11 @@ static void sdhci_transfer_pio(struct sd
1808 host->blocks--;
1809 if (host->blocks == 0)
1810 break;
1811 + state = sdhci_readl(host, SDHCI_PRESENT_STATE);
1812 + available = state & mask;
1813 }
1814
1815 - DBG("PIO transfer complete.\n");
1816 + DBG("PIO transfer complete - %d blocks left.\n", host->blocks);
1817 }
1818
1819 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
1820 @@ -709,7 +806,9 @@ static void sdhci_set_transfer_irqs(stru
1821 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
1822 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
1823
1824 - if (host->flags & SDHCI_REQ_USE_DMA)
1825 + /* platform DMA will begin on receipt of PIO irqs */
1826 + if ((host->flags & SDHCI_REQ_USE_DMA) &&
1827 + !(host->flags & SDHCI_USE_PLATDMA))
1828 sdhci_clear_set_irqs(host, pio_irqs, dma_irqs);
1829 else
1830 sdhci_clear_set_irqs(host, dma_irqs, pio_irqs);
1831 @@ -741,44 +840,25 @@ static void sdhci_prepare_data(struct sd
1832 host->data_early = 0;
1833 host->data->bytes_xfered = 0;
1834
1835 - if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))
1836 + if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA | SDHCI_USE_PLATDMA))
1837 host->flags |= SDHCI_REQ_USE_DMA;
1838
1839 /*
1840 * FIXME: This doesn't account for merging when mapping the
1841 * scatterlist.
1842 */
1843 - if (host->flags & SDHCI_REQ_USE_DMA) {
1844 - int broken, i;
1845 - struct scatterlist *sg;
1846 -
1847 - broken = 0;
1848 - if (host->flags & SDHCI_USE_ADMA) {
1849 - if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
1850 - broken = 1;
1851 - } else {
1852 - if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
1853 - broken = 1;
1854 - }
1855 -
1856 - if (unlikely(broken)) {
1857 - for_each_sg(data->sg, sg, data->sg_len, i) {
1858 - if (sg->length & 0x3) {
1859 - DBG("Reverting to PIO because of "
1860 - "transfer size (%d)\n",
1861 - sg->length);
1862 - host->flags &= ~SDHCI_REQ_USE_DMA;
1863 - break;
1864 - }
1865 - }
1866 - }
1867 - }
1868
1869 /*
1870 * The assumption here being that alignment is the same after
1871 * translation to device address space.
1872 */
1873 - if (host->flags & SDHCI_REQ_USE_DMA) {
1874 + if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_PLATDMA)) ==
1875 + (SDHCI_REQ_USE_DMA | SDHCI_USE_PLATDMA)) {
1876 +
1877 + if (! sdhci_platdma_dmaable(host, data))
1878 + host->flags &= ~SDHCI_REQ_USE_DMA;
1879 +
1880 + } else if (host->flags & SDHCI_REQ_USE_DMA) {
1881 int broken, i;
1882 struct scatterlist *sg;
1883
1884 @@ -837,7 +917,8 @@ static void sdhci_prepare_data(struct sd
1885 */
1886 WARN_ON(1);
1887 host->flags &= ~SDHCI_REQ_USE_DMA;
1888 - } else {
1889 + } else
1890 + if (!(host->flags & SDHCI_USE_PLATDMA)) {
1891 WARN_ON(sg_cnt != 1);
1892 sdhci_writel(host, sg_dma_address(data->sg),
1893 SDHCI_DMA_ADDRESS);
1894 @@ -853,11 +934,13 @@ static void sdhci_prepare_data(struct sd
1895 if (host->version >= SDHCI_SPEC_200) {
1896 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1897 ctrl &= ~SDHCI_CTRL_DMA_MASK;
1898 + if (! (host->flags & SDHCI_USE_PLATDMA)) {
1899 if ((host->flags & SDHCI_REQ_USE_DMA) &&
1900 (host->flags & SDHCI_USE_ADMA))
1901 ctrl |= SDHCI_CTRL_ADMA32;
1902 else
1903 ctrl |= SDHCI_CTRL_SDMA;
1904 + }
1905 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1906 }
1907
1908 @@ -909,7 +992,8 @@ static void sdhci_set_transfer_mode(stru
1909
1910 if (data->flags & MMC_DATA_READ)
1911 mode |= SDHCI_TRNS_READ;
1912 - if (host->flags & SDHCI_REQ_USE_DMA)
1913 + if ((host->flags & SDHCI_REQ_USE_DMA) &&
1914 + !(host->flags & SDHCI_USE_PLATDMA))
1915 mode |= SDHCI_TRNS_DMA;
1916
1917 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
1918 @@ -925,13 +1009,16 @@ static void sdhci_finish_data(struct sdh
1919 host->data = NULL;
1920
1921 if (host->flags & SDHCI_REQ_USE_DMA) {
1922 - if (host->flags & SDHCI_USE_ADMA)
1923 - sdhci_adma_table_post(host, data);
1924 - else {
1925 + /* we may have to abandon an ongoing platform DMA */
1926 + if (host->flags & SDHCI_USE_PLATDMA)
1927 + sdhci_platdma_reset(host, data);
1928 +
1929 + if (host->flags & (SDHCI_USE_PLATDMA | SDHCI_USE_SDMA)) {
1930 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
1931 data->sg_len, (data->flags & MMC_DATA_READ) ?
1932 DMA_FROM_DEVICE : DMA_TO_DEVICE);
1933 - }
1934 + } else if (host->flags & SDHCI_USE_ADMA)
1935 + sdhci_adma_table_post(host, data);
1936 }
1937
1938 /*
1939 @@ -984,6 +1071,12 @@ static void sdhci_send_command(struct sd
1940 if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
1941 mask |= SDHCI_DATA_INHIBIT;
1942
1943 + if(host->ops->missing_status && (cmd->opcode == MMC_SEND_STATUS)) {
1944 + timeout = 5000; // Really obscenely large delay to send the status, due to bug in controller
1945 + // which might cause the STATUS command to get stuck when a data operation is in flow
1946 + mask |= SDHCI_DATA_INHIBIT;
1947 + }
1948 +
1949 /* We shouldn't wait for data inihibit for stop commands, even
1950 though they might use busy signaling */
1951 if (host->mrq->data && (cmd == host->mrq->data->stop))
1952 @@ -999,12 +1092,20 @@ static void sdhci_send_command(struct sd
1953 return;
1954 }
1955 timeout--;
1956 + sdhci_spin_enable_schedule(host);
1957 mdelay(1);
1958 + sdhci_spin_disable_schedule(host);
1959 }
1960 + DBG("send cmd %d - wait 0x%X irq 0x%x\n", cmd->opcode, mask,
1961 + sdhci_readl(host, SDHCI_INT_STATUS));
1962
1963 mod_timer(&host->timer, jiffies + 10 * HZ);
1964
1965 host->cmd = cmd;
1966 + if (host->last_cmdop == MMC_APP_CMD)
1967 + host->last_cmdop = -cmd->opcode;
1968 + else
1969 + host->last_cmdop = cmd->opcode;
1970
1971 sdhci_prepare_data(host, cmd);
1972
1973 @@ -1220,7 +1321,9 @@ clock_set:
1974 return;
1975 }
1976 timeout--;
1977 + sdhci_spin_enable_schedule(host);
1978 mdelay(1);
1979 + sdhci_spin_disable_schedule(host);
1980 }
1981
1982 clk |= SDHCI_CLOCK_CARD_EN;
1983 @@ -1316,7 +1419,7 @@ static void sdhci_request(struct mmc_hos
1984
1985 sdhci_runtime_pm_get(host);
1986
1987 - spin_lock_irqsave(&host->lock, flags);
1988 + sdhci_spin_lock_irqsave(host, &flags);
1989
1990 WARN_ON(host->mrq != NULL);
1991
1992 @@ -1374,9 +1477,9 @@ static void sdhci_request(struct mmc_hos
1993 mmc->card->type == MMC_TYPE_MMC ?
1994 MMC_SEND_TUNING_BLOCK_HS200 :
1995 MMC_SEND_TUNING_BLOCK;
1996 - spin_unlock_irqrestore(&host->lock, flags);
1997 + sdhci_spin_unlock_irqrestore(host, flags);
1998 sdhci_execute_tuning(mmc, tuning_opcode);
1999 - spin_lock_irqsave(&host->lock, flags);
2000 + sdhci_spin_lock_irqsave(host, &flags);
2001
2002 /* Restore original mmc_request structure */
2003 host->mrq = mrq;
2004 @@ -1390,7 +1493,7 @@ static void sdhci_request(struct mmc_hos
2005 }
2006
2007 mmiowb();
2008 - spin_unlock_irqrestore(&host->lock, flags);
2009 + sdhci_spin_unlock_irqrestore(host, flags);
2010 }
2011
2012 static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
2013 @@ -1399,10 +1502,10 @@ static void sdhci_do_set_ios(struct sdhc
2014 int vdd_bit = -1;
2015 u8 ctrl;
2016
2017 - spin_lock_irqsave(&host->lock, flags);
2018 + sdhci_spin_lock_irqsave(host, &flags);
2019
2020 if (host->flags & SDHCI_DEVICE_DEAD) {
2021 - spin_unlock_irqrestore(&host->lock, flags);
2022 + sdhci_spin_unlock_irqrestore(host, flags);
2023 if (host->vmmc && ios->power_mode == MMC_POWER_OFF)
2024 mmc_regulator_set_ocr(host->mmc, host->vmmc, 0);
2025 return;
2026 @@ -1429,9 +1532,9 @@ static void sdhci_do_set_ios(struct sdhc
2027 vdd_bit = sdhci_set_power(host, ios->vdd);
2028
2029 if (host->vmmc && vdd_bit != -1) {
2030 - spin_unlock_irqrestore(&host->lock, flags);
2031 + sdhci_spin_unlock_irqrestore(host, flags);
2032 mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd_bit);
2033 - spin_lock_irqsave(&host->lock, flags);
2034 + sdhci_spin_lock_irqsave(host, &flags);
2035 }
2036
2037 if (host->ops->platform_send_init_74_clocks)
2038 @@ -1470,7 +1573,7 @@ static void sdhci_do_set_ios(struct sdhc
2039 else
2040 ctrl &= ~SDHCI_CTRL_HISPD;
2041
2042 - if (host->version >= SDHCI_SPEC_300) {
2043 + if (host->version >= SDHCI_SPEC_300 && !(host->ops->uhs_broken)) {
2044 u16 clk, ctrl_2;
2045
2046 /* In case of UHS-I modes, set High Speed Enable */
2047 @@ -1569,7 +1672,7 @@ static void sdhci_do_set_ios(struct sdhc
2048 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
2049
2050 mmiowb();
2051 - spin_unlock_irqrestore(&host->lock, flags);
2052 + sdhci_spin_unlock_irqrestore(host, flags);
2053 }
2054
2055 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
2056 @@ -1617,7 +1720,7 @@ static int sdhci_check_ro(struct sdhci_h
2057 unsigned long flags;
2058 int is_readonly;
2059
2060 - spin_lock_irqsave(&host->lock, flags);
2061 + sdhci_spin_lock_irqsave(host, &flags);
2062
2063 if (host->flags & SDHCI_DEVICE_DEAD)
2064 is_readonly = 0;
2065 @@ -1627,7 +1730,7 @@ static int sdhci_check_ro(struct sdhci_h
2066 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
2067 & SDHCI_WRITE_PROTECT);
2068
2069 - spin_unlock_irqrestore(&host->lock, flags);
2070 + sdhci_spin_unlock_irqrestore(host, flags);
2071
2072 /* This quirk needs to be replaced by a callback-function later */
2073 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
2074 @@ -1700,9 +1803,9 @@ static void sdhci_enable_sdio_irq(struct
2075 struct sdhci_host *host = mmc_priv(mmc);
2076 unsigned long flags;
2077
2078 - spin_lock_irqsave(&host->lock, flags);
2079 + sdhci_spin_lock_irqsave(host, &flags);
2080 sdhci_enable_sdio_irq_nolock(host, enable);
2081 - spin_unlock_irqrestore(&host->lock, flags);
2082 + sdhci_spin_unlock_irqrestore(host, flags);
2083 }
2084
2085 static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
2086 @@ -2046,7 +2149,7 @@ static void sdhci_card_event(struct mmc_
2087 struct sdhci_host *host = mmc_priv(mmc);
2088 unsigned long flags;
2089
2090 - spin_lock_irqsave(&host->lock, flags);
2091 + sdhci_spin_lock_irqsave(host, &flags);
2092
2093 /* Check host->mrq first in case we are runtime suspended */
2094 if (host->mrq &&
2095 @@ -2063,7 +2166,7 @@ static void sdhci_card_event(struct mmc_
2096 tasklet_schedule(&host->finish_tasklet);
2097 }
2098
2099 - spin_unlock_irqrestore(&host->lock, flags);
2100 + sdhci_spin_unlock_irqrestore(host, flags);
2101 }
2102
2103 static const struct mmc_host_ops sdhci_ops = {
2104 @@ -2102,14 +2205,14 @@ static void sdhci_tasklet_finish(unsigne
2105
2106 host = (struct sdhci_host*)param;
2107
2108 - spin_lock_irqsave(&host->lock, flags);
2109 + sdhci_spin_lock_irqsave(host, &flags);
2110
2111 /*
2112 * If this tasklet gets rescheduled while running, it will
2113 * be run again afterwards but without any active request.
2114 */
2115 if (!host->mrq) {
2116 - spin_unlock_irqrestore(&host->lock, flags);
2117 + sdhci_spin_unlock_irqrestore(host, flags);
2118 return;
2119 }
2120
2121 @@ -2147,7 +2250,7 @@ static void sdhci_tasklet_finish(unsigne
2122 #endif
2123
2124 mmiowb();
2125 - spin_unlock_irqrestore(&host->lock, flags);
2126 + sdhci_spin_unlock_irqrestore(host, flags);
2127
2128 mmc_request_done(host->mmc, mrq);
2129 sdhci_runtime_pm_put(host);
2130 @@ -2160,11 +2263,11 @@ static void sdhci_timeout_timer(unsigned
2131
2132 host = (struct sdhci_host*)data;
2133
2134 - spin_lock_irqsave(&host->lock, flags);
2135 + sdhci_spin_lock_irqsave(host, &flags);
2136
2137 if (host->mrq) {
2138 pr_err("%s: Timeout waiting for hardware "
2139 - "interrupt.\n", mmc_hostname(host->mmc));
2140 + "interrupt - cmd%d.\n", mmc_hostname(host->mmc), host->last_cmdop);
2141 sdhci_dumpregs(host);
2142
2143 if (host->data) {
2144 @@ -2181,7 +2284,7 @@ static void sdhci_timeout_timer(unsigned
2145 }
2146
2147 mmiowb();
2148 - spin_unlock_irqrestore(&host->lock, flags);
2149 + sdhci_spin_unlock_irqrestore(host, flags);
2150 }
2151
2152 static void sdhci_tuning_timer(unsigned long data)
2153 @@ -2191,11 +2294,11 @@ static void sdhci_tuning_timer(unsigned
2154
2155 host = (struct sdhci_host *)data;
2156
2157 - spin_lock_irqsave(&host->lock, flags);
2158 + sdhci_spin_lock_irqsave(host, &flags);
2159
2160 host->flags |= SDHCI_NEEDS_RETUNING;
2161
2162 - spin_unlock_irqrestore(&host->lock, flags);
2163 + sdhci_spin_unlock_irqrestore(host, flags);
2164 }
2165
2166 /*****************************************************************************\
2167 @@ -2209,10 +2312,13 @@ static void sdhci_cmd_irq(struct sdhci_h
2168 BUG_ON(intmask == 0);
2169
2170 if (!host->cmd) {
2171 + if (!(host->ops->extra_ints)) {
2172 pr_err("%s: Got command interrupt 0x%08x even "
2173 "though no command operation was in progress.\n",
2174 mmc_hostname(host->mmc), (unsigned)intmask);
2175 sdhci_dumpregs(host);
2176 + } else
2177 + DBG("cmd irq 0x%08x cmd complete\n", (unsigned)intmask);
2178 return;
2179 }
2180
2181 @@ -2282,6 +2388,19 @@ static void sdhci_show_adma_error(struct
2182 static void sdhci_show_adma_error(struct sdhci_host *host) { }
2183 #endif
2184
2185 +static void sdhci_data_end(struct sdhci_host *host)
2186 +{
2187 + if (host->cmd) {
2188 + /*
2189 + * Data managed to finish before the
2190 + * command completed. Make sure we do
2191 + * things in the proper order.
2192 + */
2193 + host->data_early = 1;
2194 + } else
2195 + sdhci_finish_data(host);
2196 +}
2197 +
2198 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2199 {
2200 u32 command;
2201 @@ -2311,23 +2430,39 @@ static void sdhci_data_irq(struct sdhci_
2202 }
2203 }
2204
2205 + if (!(host->ops->extra_ints)) {
2206 pr_err("%s: Got data interrupt 0x%08x even "
2207 "though no data operation was in progress.\n",
2208 mmc_hostname(host->mmc), (unsigned)intmask);
2209 sdhci_dumpregs(host);
2210 + } else
2211 + DBG("data irq 0x%08x but no data\n", (unsigned)intmask);
2212
2213 return;
2214 }
2215
2216 if (intmask & SDHCI_INT_DATA_TIMEOUT)
2217 host->data->error = -ETIMEDOUT;
2218 - else if (intmask & SDHCI_INT_DATA_END_BIT)
2219 + else if (intmask & SDHCI_INT_DATA_END_BIT) {
2220 + DBG("end error in cmd %d\n", host->last_cmdop);
2221 + if (host->ops->spurious_crc_acmd51 &&
2222 + host->last_cmdop == -SD_APP_SEND_SCR) {
2223 + DBG("ignoring spurious data_end_bit error\n");
2224 + intmask = SDHCI_INT_DATA_AVAIL|SDHCI_INT_DATA_END;
2225 + } else
2226 host->data->error = -EILSEQ;
2227 - else if ((intmask & SDHCI_INT_DATA_CRC) &&
2228 + } else if ((intmask & SDHCI_INT_DATA_CRC) &&
2229 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
2230 - != MMC_BUS_TEST_R)
2231 + != MMC_BUS_TEST_R) {
2232 + DBG("crc error in cmd %d\n", host->last_cmdop);
2233 + if (host->ops->spurious_crc_acmd51 &&
2234 + host->last_cmdop == -SD_APP_SEND_SCR) {
2235 + DBG("ignoring spurious data_crc_bit error\n");
2236 + intmask = SDHCI_INT_DATA_AVAIL|SDHCI_INT_DATA_END;
2237 + } else {
2238 host->data->error = -EILSEQ;
2239 - else if (intmask & SDHCI_INT_ADMA_ERROR) {
2240 + }
2241 + } else if (intmask & SDHCI_INT_ADMA_ERROR) {
2242 pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
2243 sdhci_show_adma_error(host);
2244 host->data->error = -EIO;
2245 @@ -2335,11 +2470,18 @@ static void sdhci_data_irq(struct sdhci_
2246 host->ops->adma_workaround(host, intmask);
2247 }
2248
2249 - if (host->data->error)
2250 + if (host->data->error) {
2251 + DBG("finish request early on error %d\n", host->data->error);
2252 sdhci_finish_data(host);
2253 - else {
2254 - if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2255 - sdhci_transfer_pio(host);
2256 + } else {
2257 + if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)) {
2258 + if (host->flags & SDHCI_REQ_USE_DMA) {
2259 + /* possible only in PLATDMA mode */
2260 + sdhci_platdma_avail(host, &intmask,
2261 + &sdhci_data_end);
2262 + } else
2263 + sdhci_transfer_pio(host, intmask);
2264 + }
2265
2266 /*
2267 * We currently don't do anything fancy with DMA
2268 @@ -2368,18 +2510,8 @@ static void sdhci_data_irq(struct sdhci_
2269 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
2270 }
2271
2272 - if (intmask & SDHCI_INT_DATA_END) {
2273 - if (host->cmd) {
2274 - /*
2275 - * Data managed to finish before the
2276 - * command completed. Make sure we do
2277 - * things in the proper order.
2278 - */
2279 - host->data_early = 1;
2280 - } else {
2281 - sdhci_finish_data(host);
2282 - }
2283 - }
2284 + if (intmask & SDHCI_INT_DATA_END)
2285 + sdhci_data_end(host);
2286 }
2287 }
2288
2289 @@ -2390,10 +2522,10 @@ static irqreturn_t sdhci_irq(int irq, vo
2290 u32 intmask, unexpected = 0;
2291 int cardint = 0, max_loops = 16;
2292
2293 - spin_lock(&host->lock);
2294 + sdhci_spin_lock(host);
2295
2296 if (host->runtime_suspended) {
2297 - spin_unlock(&host->lock);
2298 + sdhci_spin_unlock(host);
2299 pr_warning("%s: got irq while runtime suspended\n",
2300 mmc_hostname(host->mmc));
2301 return IRQ_HANDLED;
2302 @@ -2435,6 +2567,22 @@ again:
2303 tasklet_schedule(&host->card_tasklet);
2304 }
2305
2306 + if (intmask & SDHCI_INT_ERROR_MASK & ~SDHCI_INT_ERROR)
2307 + DBG("controller reports error 0x%x -"
2308 + "%s%s%s%s%s%s%s%s%s%s",
2309 + intmask,
2310 + intmask & SDHCI_INT_TIMEOUT? " timeout": "",
2311 + intmask & SDHCI_INT_CRC ? " crc": "",
2312 + intmask & SDHCI_INT_END_BIT? " endbit": "",
2313 + intmask & SDHCI_INT_INDEX? " index": "",
2314 + intmask & SDHCI_INT_DATA_TIMEOUT? " data_timeout": "",
2315 + intmask & SDHCI_INT_DATA_CRC? " data_crc": "",
2316 + intmask & SDHCI_INT_DATA_END_BIT? " data_endbit": "",
2317 + intmask & SDHCI_INT_BUS_POWER? " buspower": "",
2318 + intmask & SDHCI_INT_ACMD12ERR? " acmd12": "",
2319 + intmask & SDHCI_INT_ADMA_ERROR? " adma": ""
2320 + );
2321 +
2322 if (intmask & SDHCI_INT_CMD_MASK) {
2323 sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK,
2324 SDHCI_INT_STATUS);
2325 @@ -2449,7 +2597,13 @@ again:
2326
2327 intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK);
2328
2329 - intmask &= ~SDHCI_INT_ERROR;
2330 + if (intmask & SDHCI_INT_ERROR_MASK) {
2331 + /* collect any uncovered errors */
2332 + sdhci_writel(host, intmask & SDHCI_INT_ERROR_MASK,
2333 + SDHCI_INT_STATUS);
2334 + }
2335 +
2336 + intmask &= ~SDHCI_INT_ERROR_MASK;
2337
2338 if (intmask & SDHCI_INT_BUS_POWER) {
2339 pr_err("%s: Card is consuming too much power!\n",
2340 @@ -2475,7 +2629,7 @@ again:
2341 if (intmask && --max_loops)
2342 goto again;
2343 out:
2344 - spin_unlock(&host->lock);
2345 + sdhci_spin_unlock(host);
2346
2347 if (unexpected) {
2348 pr_err("%s: Unexpected interrupt 0x%08x.\n",
2349 @@ -2569,7 +2723,8 @@ int sdhci_resume_host(struct sdhci_host
2350 {
2351 int ret;
2352
2353 - if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2354 + if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA |
2355 + SDHCI_USE_PLATDMA)) {
2356 if (host->ops->enable_dma)
2357 host->ops->enable_dma(host);
2358 }
2359 @@ -2636,15 +2791,15 @@ int sdhci_runtime_suspend_host(struct sd
2360 host->flags &= ~SDHCI_NEEDS_RETUNING;
2361 }
2362
2363 - spin_lock_irqsave(&host->lock, flags);
2364 + sdhci_spin_lock_irqsave(host, &flags);
2365 sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
2366 - spin_unlock_irqrestore(&host->lock, flags);
2367 + sdhci_spin_unlock_irqrestore(host, flags);
2368
2369 synchronize_irq(host->irq);
2370
2371 - spin_lock_irqsave(&host->lock, flags);
2372 + sdhci_spin_lock_irqsave(host, &flags);
2373 host->runtime_suspended = true;
2374 - spin_unlock_irqrestore(&host->lock, flags);
2375 + sdhci_spin_unlock_irqrestore(host, flags);
2376
2377 return ret;
2378 }
2379 @@ -2670,16 +2825,16 @@ int sdhci_runtime_resume_host(struct sdh
2380 sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios);
2381 if ((host_flags & SDHCI_PV_ENABLED) &&
2382 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
2383 - spin_lock_irqsave(&host->lock, flags);
2384 + sdhci_spin_lock_irqsave(host, &flags);
2385 sdhci_enable_preset_value(host, true);
2386 - spin_unlock_irqrestore(&host->lock, flags);
2387 + sdhci_spin_unlock_irqrestore(host, flags);
2388 }
2389
2390 /* Set the re-tuning expiration flag */
2391 if (host->flags & SDHCI_USING_RETUNING_TIMER)
2392 host->flags |= SDHCI_NEEDS_RETUNING;
2393
2394 - spin_lock_irqsave(&host->lock, flags);
2395 + sdhci_spin_lock_irqsave(host, &flags);
2396
2397 host->runtime_suspended = false;
2398
2399 @@ -2690,7 +2845,7 @@ int sdhci_runtime_resume_host(struct sdh
2400 /* Enable Card Detection */
2401 sdhci_enable_card_detection(host);
2402
2403 - spin_unlock_irqrestore(&host->lock, flags);
2404 + sdhci_spin_unlock_irqrestore(host, flags);
2405
2406 return ret;
2407 }
2408 @@ -2785,14 +2940,16 @@ int sdhci_add_host(struct sdhci_host *ho
2409 host->flags &= ~SDHCI_USE_ADMA;
2410 }
2411
2412 - if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2413 + if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA |
2414 + SDHCI_USE_PLATDMA)) {
2415 if (host->ops->enable_dma) {
2416 if (host->ops->enable_dma(host)) {
2417 pr_warning("%s: No suitable DMA "
2418 "available. Falling back to PIO.\n",
2419 mmc_hostname(mmc));
2420 host->flags &=
2421 - ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
2422 + ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA |
2423 + SDHCI_USE_PLATDMA);
2424 }
2425 }
2426 }
2427 @@ -3080,6 +3237,12 @@ int sdhci_add_host(struct sdhci_host *ho
2428 SDHCI_MAX_CURRENT_MULTIPLIER;
2429 }
2430
2431 + if(host->ops->voltage_broken) {
2432 + ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
2433 + // Cannot support UHS modes if we are stuck at 3.3V;
2434 + mmc->caps &= ~(MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_DDR50);
2435 + }
2436 +
2437 mmc->ocr_avail = ocr_avail;
2438 mmc->ocr_avail_sdio = ocr_avail;
2439 if (host->ocr_avail_sdio)
2440 @@ -3174,7 +3337,7 @@ int sdhci_add_host(struct sdhci_host *ho
2441 host->tuning_timer.function = sdhci_tuning_timer;
2442 }
2443
2444 - ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
2445 + ret = request_irq(host->irq, sdhci_irq, 0,//IRQF_SHARED,
2446 mmc_hostname(mmc), host);
2447 if (ret) {
2448 pr_err("%s: Failed to request IRQ %d: %d\n",
2449 @@ -3210,6 +3373,7 @@ int sdhci_add_host(struct sdhci_host *ho
2450
2451 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
2452 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
2453 + (host->flags & SDHCI_USE_PLATDMA) ? "platform's DMA" :
2454 (host->flags & SDHCI_USE_ADMA) ? "ADMA" :
2455 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
2456
2457 @@ -3237,7 +3401,7 @@ void sdhci_remove_host(struct sdhci_host
2458 unsigned long flags;
2459
2460 if (dead) {
2461 - spin_lock_irqsave(&host->lock, flags);
2462 + sdhci_spin_lock_irqsave(host, &flags);
2463
2464 host->flags |= SDHCI_DEVICE_DEAD;
2465
2466 @@ -3249,7 +3413,7 @@ void sdhci_remove_host(struct sdhci_host
2467 tasklet_schedule(&host->finish_tasklet);
2468 }
2469
2470 - spin_unlock_irqrestore(&host->lock, flags);
2471 + sdhci_spin_unlock_irqrestore(host, flags);
2472 }
2473
2474 sdhci_disable_card_detection(host);
2475 --- a/drivers/mmc/host/sdhci.h
2476 +++ b/drivers/mmc/host/sdhci.h
2477 @@ -289,6 +289,20 @@ struct sdhci_ops {
2478 void (*platform_reset_enter)(struct sdhci_host *host, u8 mask);
2479 void (*platform_reset_exit)(struct sdhci_host *host, u8 mask);
2480 int (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
2481 +
2482 + int (*pdma_able)(struct sdhci_host *host,
2483 + struct mmc_data *data);
2484 + void (*pdma_avail)(struct sdhci_host *host,
2485 + unsigned int *ref_intmask,
2486 + void(*complete)(struct sdhci_host *));
2487 + void (*pdma_reset)(struct sdhci_host *host,
2488 + struct mmc_data *data);
2489 + unsigned int (*extra_ints)(struct sdhci_host *host);
2490 + unsigned int (*spurious_crc_acmd51)(struct sdhci_host *host);
2491 + unsigned int (*voltage_broken)(struct sdhci_host *host);
2492 + unsigned int (*uhs_broken)(struct sdhci_host *host);
2493 + unsigned int (*missing_status)(struct sdhci_host *host);
2494 +
2495 void (*hw_reset)(struct sdhci_host *host);
2496 void (*platform_suspend)(struct sdhci_host *host);
2497 void (*platform_resume)(struct sdhci_host *host);
2498 @@ -399,9 +413,38 @@ extern int sdhci_resume_host(struct sdhc
2499 extern void sdhci_enable_irq_wakeups(struct sdhci_host *host);
2500 #endif
2501
2502 +static inline int /*bool*/
2503 +sdhci_platdma_dmaable(struct sdhci_host *host, struct mmc_data *data)
2504 +{
2505 + if (host->ops->pdma_able)
2506 + return host->ops->pdma_able(host, data);
2507 + else
2508 + return 1;
2509 +}
2510 +static inline void
2511 +sdhci_platdma_avail(struct sdhci_host *host, unsigned int *ref_intmask,
2512 + void(*completion_callback)(struct sdhci_host *))
2513 +{
2514 + if (host->ops->pdma_avail)
2515 + host->ops->pdma_avail(host, ref_intmask, completion_callback);
2516 +}
2517 +
2518 +static inline void
2519 +sdhci_platdma_reset(struct sdhci_host *host, struct mmc_data *data)
2520 +{
2521 + if (host->ops->pdma_reset)
2522 + host->ops->pdma_reset(host, data);
2523 +}
2524 +
2525 #ifdef CONFIG_PM_RUNTIME
2526 extern int sdhci_runtime_suspend_host(struct sdhci_host *host);
2527 extern int sdhci_runtime_resume_host(struct sdhci_host *host);
2528 #endif
2529
2530 +extern void sdhci_spin_lock_irqsave(struct sdhci_host *host,unsigned long *flags);
2531 +extern void sdhci_spin_unlock_irqrestore(struct sdhci_host *host,unsigned long flags);
2532 +extern void sdhci_spin_lock(struct sdhci_host *host);
2533 +extern void sdhci_spin_unlock(struct sdhci_host *host);
2534 +
2535 +
2536 #endif /* __SDHCI_HW_H */
2537 --- a/include/linux/mmc/host.h
2538 +++ b/include/linux/mmc/host.h
2539 @@ -281,6 +281,7 @@ struct mmc_host {
2540 #define MMC_CAP2_PACKED_CMD (MMC_CAP2_PACKED_RD | \
2541 MMC_CAP2_PACKED_WR)
2542 #define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14) /* Don't power up before scan */
2543 +#define MMC_CAP2_FORCE_MULTIBLOCK (1 << 31) /* Always use multiblock transfers */
2544
2545 mmc_pm_flag_t pm_caps; /* supported pm features */
2546
2547 --- a/include/linux/mmc/sdhci.h
2548 +++ b/include/linux/mmc/sdhci.h
2549 @@ -97,6 +97,7 @@ struct sdhci_host {
2550 #define SDHCI_QUIRK2_PRESET_VALUE_BROKEN (1<<3)
2551
2552 int irq; /* Device IRQ */
2553 + int second_irq; /* Additional IRQ to disable/enable in low-latency mode */
2554 void __iomem *ioaddr; /* Mapped address */
2555
2556 const struct sdhci_ops *ops; /* Low level hw interface */
2557 @@ -128,6 +129,7 @@ struct sdhci_host {
2558 #define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */
2559 #define SDHCI_HS200_NEEDS_TUNING (1<<10) /* HS200 needs tuning */
2560 #define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */
2561 +#define SDHCI_USE_PLATDMA (1<<12) /* Host uses 3rd party DMA */
2562
2563 unsigned int version; /* SDHCI spec. version */
2564
2565 @@ -142,6 +144,7 @@ struct sdhci_host {
2566
2567 struct mmc_request *mrq; /* Current request */
2568 struct mmc_command *cmd; /* Current command */
2569 + int last_cmdop; /* Opcode of last cmd sent */
2570 struct mmc_data *data; /* Current data request */
2571 unsigned int data_early:1; /* Data finished before cmd */
2572