kernel: update 3.9 to 3.9.11
[openwrt/svn-archive/archive.git] / target / linux / brcm2708 / patches-3.10 / 014-bcm2708-sdhci-driver.patch
1 diff -urwN linux-3.10/drivers/mmc/card/block.c linux-rpi-3.10.y/drivers/mmc/card/block.c
2 --- linux-3.10/drivers/mmc/card/block.c 2013-06-30 23:13:29.000000000 +0100
3 +++ linux-rpi-3.10.y/drivers/mmc/card/block.c 2013-07-06 15:25:50.000000000 +0100
4 @@ -1294,7 +1294,7 @@
5 brq->data.blocks = 1;
6 }
7
8 - if (brq->data.blocks > 1 || do_rel_wr) {
9 + if (brq->data.blocks > 1 || do_rel_wr || card->host->caps2 & MMC_CAP2_FORCE_MULTIBLOCK) {
10 /* SPI multiblock writes terminate using a special
11 * token, not a STOP_TRANSMISSION request.
12 */
13 diff -urwN linux-3.10/drivers/mmc/core/sd.c linux-rpi-3.10.y/drivers/mmc/core/sd.c
14 --- linux-3.10/drivers/mmc/core/sd.c 2013-06-30 23:13:29.000000000 +0100
15 +++ linux-rpi-3.10.y/drivers/mmc/core/sd.c 2013-07-06 15:25:50.000000000 +0100
16 @@ -13,6 +13,8 @@
17 #include <linux/err.h>
18 #include <linux/slab.h>
19 #include <linux/stat.h>
20 +#include <linux/jiffies.h>
21 +#include <linux/nmi.h>
22
23 #include <linux/mmc/host.h>
24 #include <linux/mmc/card.h>
25 @@ -58,6 +60,15 @@
26 __res & __mask; \
27 })
28
29 +// timeout for tries
30 +static const unsigned long retry_timeout_ms= 10*1000;
31 +
32 +// try at least 10 times, even if timeout is reached
33 +static const int retry_min_tries= 10;
34 +
35 +// delay between tries
36 +static const unsigned long retry_delay_ms= 10;
37 +
38 /*
39 * Given the decoded CSD structure, decode the raw CID to our CID structure.
40 */
41 @@ -210,12 +221,62 @@
42 }
43
44 /*
45 - * Fetch and process SD Status register.
46 + * Fetch and process SD Configuration Register.
47 + */
48 +static int mmc_read_scr(struct mmc_card *card)
49 +{
50 + unsigned long timeout_at;
51 + int err, tries;
52 +
53 + timeout_at= jiffies + msecs_to_jiffies( retry_timeout_ms );
54 + tries= 0;
55 +
56 + while( tries < retry_min_tries || time_before( jiffies, timeout_at ) )
57 + {
58 + unsigned long delay_at;
59 + tries++;
60 +
61 + err = mmc_app_send_scr(card, card->raw_scr);
62 + if( !err )
63 + break; // success!!!
64 +
65 + touch_nmi_watchdog(); // we are still alive!
66 +
67 + // delay
68 + delay_at= jiffies + msecs_to_jiffies( retry_delay_ms );
69 + while( time_before( jiffies, delay_at ) )
70 + {
71 + mdelay( 1 );
72 + touch_nmi_watchdog(); // we are still alive!
73 + }
74 + }
75 +
76 + if( err)
77 + {
78 + pr_err("%s: failed to read SD Configuration register (SCR) after %d tries during %lu ms, error %d\n", mmc_hostname(card->host), tries, retry_timeout_ms, err );
79 + return err;
80 + }
81 +
82 + if( tries > 1 )
83 + {
84 + pr_info("%s: could read SD Configuration register (SCR) at the %dth attempt\n", mmc_hostname(card->host), tries );
85 + }
86 +
87 + err = mmc_decode_scr(card);
88 + if (err)
89 + return err;
90 +
91 + return err;
92 +}
93 +
94 +/*
95 + * Fetch and process SD Status Register.
96 */
97 static int mmc_read_ssr(struct mmc_card *card)
98 {
99 + unsigned long timeout_at;
100 unsigned int au, es, et, eo;
101 - int err, i;
102 + int err, i, tries;
103 u32 *ssr;
104
105 if (!(card->csd.cmdclass & CCC_APP_SPEC)) {
106 @@ -228,14 +289,40 @@
107 if (!ssr)
108 return -ENOMEM;
109
110 + timeout_at= jiffies + msecs_to_jiffies( retry_timeout_ms );
111 + tries= 0;
112 +
113 + while( tries < retry_min_tries || time_before( jiffies, timeout_at ) )
114 + {
115 + unsigned long delay_at;
116 + tries++;
117 +
118 err = mmc_app_sd_status(card, ssr);
119 - if (err) {
120 - pr_warning("%s: problem reading SD Status "
121 - "register.\n", mmc_hostname(card->host));
122 - err = 0;
123 + if( !err )
124 + break; // sucess!!!
125 +
126 + touch_nmi_watchdog(); // we are still alive!
127 +
128 + // delay
129 + delay_at= jiffies + msecs_to_jiffies( retry_delay_ms );
130 + while( time_before( jiffies, delay_at ) )
131 + {
132 + mdelay( 1 );
133 + touch_nmi_watchdog(); // we are still alive!
134 + }
135 + }
136 +
137 + if( err)
138 + {
139 + pr_err("%s: failed to read SD Status register (SSR) after %d tries during %lu ms, error %d\n", mmc_hostname(card->host), tries, retry_timeout_ms, err );
140 goto out;
141 }
142
143 + if( tries > 1 )
144 + {
145 + pr_info("%s: read SD Status register (SSR) after %d attempts\n", mmc_hostname(card->host), tries );
146 + }
147 +
148 for (i = 0; i < 16; i++)
149 ssr[i] = be32_to_cpu(ssr[i]);
150
151 @@ -808,13 +895,9 @@
152
153 if (!reinit) {
154 /*
155 - * Fetch SCR from card.
156 + * Fetch and decode SD Configuration register.
157 */
158 - err = mmc_app_send_scr(card, card->raw_scr);
159 - if (err)
160 - return err;
161 -
162 - err = mmc_decode_scr(card);
163 + err = mmc_read_scr(card);
164 if (err)
165 return err;
166
167 diff -urwN linux-3.10/drivers/mmc/host/Kconfig linux-rpi-3.10.y/drivers/mmc/host/Kconfig
168 --- linux-3.10/drivers/mmc/host/Kconfig 2013-06-30 23:13:29.000000000 +0100
169 +++ linux-rpi-3.10.y/drivers/mmc/host/Kconfig 2013-07-06 15:25:50.000000000 +0100
170 @@ -249,6 +249,27 @@
171
172 YMMV.
173
174 +config MMC_SDHCI_BCM2708
175 + tristate "SDHCI support on BCM2708"
176 + depends on MMC_SDHCI && MACH_BCM2708
177 + select MMC_SDHCI_IO_ACCESSORS
178 + help
179 + This selects the Secure Digital Host Controller Interface (SDHCI)
180 + often referrered to as the eMMC block.
181 +
182 + If you have a controller with this interface, say Y or M here.
183 +
184 + If unsure, say N.
185 +
186 +config MMC_SDHCI_BCM2708_DMA
187 + bool "DMA support on BCM2708 Arasan controller"
188 + depends on MMC_SDHCI_BCM2708
189 + help
190 + Enable DMA support on the Arasan SDHCI controller in Broadcom 2708
191 + based chips.
192 +
193 + If unsure, say N.
194 +
195 config MMC_SDHCI_BCM2835
196 tristate "SDHCI platform support for the BCM2835 SD/MMC Controller"
197 depends on ARCH_BCM2835
198 diff -urwN linux-3.10/drivers/mmc/host/Makefile linux-rpi-3.10.y/drivers/mmc/host/Makefile
199 --- linux-3.10/drivers/mmc/host/Makefile 2013-06-30 23:13:29.000000000 +0100
200 +++ linux-rpi-3.10.y/drivers/mmc/host/Makefile 2013-07-06 15:25:50.000000000 +0100
201 @@ -15,6 +15,7 @@
202 obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
203 obj-$(CONFIG_MMC_SDHCI_SIRF) += sdhci-sirf.o
204 obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o
205 +obj-$(CONFIG_MMC_SDHCI_BCM2708) += sdhci-bcm2708.o
206 obj-$(CONFIG_MMC_WBSD) += wbsd.o
207 obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
208 obj-$(CONFIG_MMC_OMAP) += omap.o
209 diff -urwN linux-3.10/drivers/mmc/host/sdhci-bcm2708.c linux-rpi-3.10.y/drivers/mmc/host/sdhci-bcm2708.c
210 --- linux-3.10/drivers/mmc/host/sdhci-bcm2708.c 1970-01-01 01:00:00.000000000 +0100
211 +++ linux-rpi-3.10.y/drivers/mmc/host/sdhci-bcm2708.c 2013-07-06 15:25:50.000000000 +0100
212 @@ -0,0 +1,1420 @@
213 +/*
214 + * sdhci-bcm2708.c Support for SDHCI device on BCM2708
215 + * Copyright (c) 2010 Broadcom
216 + *
217 + * This program is free software; you can redistribute it and/or modify
218 + * it under the terms of the GNU General Public License version 2 as
219 + * published by the Free Software Foundation.
220 + *
221 + * This program is distributed in the hope that it will be useful,
222 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
223 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
224 + * GNU General Public License for more details.
225 + *
226 + * You should have received a copy of the GNU General Public License
227 + * along with this program; if not, write to the Free Software
228 + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
229 + */
230 +
231 +/* Supports:
232 + * SDHCI platform device - Arasan SD controller in BCM2708
233 + *
234 + * Inspired by sdhci-pci.c, by Pierre Ossman
235 + */
236 +
237 +#include <linux/delay.h>
238 +#include <linux/highmem.h>
239 +#include <linux/platform_device.h>
240 +#include <linux/module.h>
241 +#include <linux/mmc/mmc.h>
242 +#include <linux/mmc/host.h>
243 +#include <linux/mmc/sd.h>
244 +
245 +#include <linux/io.h>
246 +#include <linux/dma-mapping.h>
247 +#include <mach/dma.h>
248 +
249 +#include "sdhci.h"
250 +
251 +/*****************************************************************************\
252 + * *
253 + * Configuration *
254 + * *
255 +\*****************************************************************************/
256 +
257 +#define DRIVER_NAME "bcm2708_sdhci"
258 +
259 +/* for the time being insist on DMA mode - PIO seems not to work */
260 +#ifndef CONFIG_MMC_SDHCI_BCM2708_DMA
261 +#warning Non-DMA (PIO) version of this driver currently unavailable
262 +#endif
263 +#undef CONFIG_MMC_SDHCI_BCM2708_DMA
264 +#define CONFIG_MMC_SDHCI_BCM2708_DMA y
265 +
266 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
267 +/* #define CHECK_DMA_USE */
268 +#endif
269 +//#define LOG_REGISTERS
270 +
271 +#define USE_SCHED_TIME
272 +#define USE_SPACED_WRITES_2CLK 1 /* space consecutive register writes */
273 +#define USE_SOFTWARE_TIMEOUTS 1 /* not hardware timeouts */
274 +#define SOFTWARE_ERASE_TIMEOUT_SEC 30
275 +
276 +#define SDHCI_BCM_DMA_CHAN 4 /* this default is normally overriden */
277 +#define SDHCI_BCM_DMA_WAITS 0 /* delays slowing DMA transfers: 0-31 */
278 +/* We are worried that SD card DMA use may be blocking the AXI bus for others */
279 +
280 +/*! TODO: obtain these from the physical address */
281 +#define DMA_SDHCI_BASE 0x7e300000 /* EMMC register block on Videocore */
282 +#define DMA_SDHCI_BUFFER (DMA_SDHCI_BASE + SDHCI_BUFFER)
283 +
284 +#define BCM2708_SDHCI_SLEEP_TIMEOUT 1000 /* msecs */
285 +
286 +/* Mhz clock that the EMMC core is running at. Should match the platform clockman settings */
287 +#define BCM2708_EMMC_CLOCK_FREQ 50000000
288 +
289 +#define REG_EXRDFIFO_EN 0x80
290 +#define REG_EXRDFIFO_CFG 0x84
291 +
292 +int cycle_delay=2;
293 +
294 +/*****************************************************************************\
295 + * *
296 + * Debug *
297 + * *
298 +\*****************************************************************************/
299 +
300 +
301 +
302 +#define DBG(f, x...) \
303 + pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
304 +// printk(KERN_INFO DRIVER_NAME " [%s()]: " f, __func__,## x)//GRAYG
305 +
306 +
307 +/*****************************************************************************\
308 + * *
309 + * High Precision Time *
310 + * *
311 +\*****************************************************************************/
312 +
313 +#ifdef USE_SCHED_TIME
314 +
315 +#include <mach/frc.h>
316 +
317 +typedef unsigned long hptime_t;
318 +
319 +#define FMT_HPT "lu"
320 +
321 +static inline hptime_t hptime(void)
322 +{
323 + return frc_clock_ticks32();
324 +}
325 +
326 +#define HPTIME_CLK_NS 1000ul
327 +
328 +#else
329 +
330 +typedef unsigned long hptime_t;
331 +
332 +#define FMT_HPT "lu"
333 +
334 +static inline hptime_t hptime(void)
335 +{
336 + return jiffies;
337 +}
338 +
339 +#define HPTIME_CLK_NS (1000000000ul/HZ)
340 +
341 +#endif
342 +
343 +static inline unsigned long int since_ns(hptime_t t)
344 +{
345 + return (unsigned long)((hptime() - t) * HPTIME_CLK_NS);
346 +}
347 +
348 +static bool allow_highspeed = 1;
349 +static int emmc_clock_freq = BCM2708_EMMC_CLOCK_FREQ;
350 +static bool sync_after_dma = 1;
351 +static bool missing_status = 1;
352 +static bool spurious_crc_acmd51 = 0;
353 +bool enable_llm = 1;
354 +bool extra_messages = 0;
355 +
356 +#if 0
357 +static void hptime_test(void)
358 +{
359 + hptime_t now;
360 + hptime_t later;
361 +
362 + now = hptime();
363 + msleep(10);
364 + later = hptime();
365 +
366 + printk(KERN_INFO DRIVER_NAME": 10ms = %"FMT_HPT" clks "
367 + "(from %"FMT_HPT" to %"FMT_HPT") = %luns\n",
368 + later-now, now, later,
369 + (unsigned long)(HPTIME_CLK_NS * (later - now)));
370 +
371 + now = hptime();
372 + msleep(1000);
373 + later = hptime();
374 +
375 + printk(KERN_INFO DRIVER_NAME": 1s = %"FMT_HPT" clks "
376 + "(from %"FMT_HPT" to %"FMT_HPT") = %luns\n",
377 + later-now, now, later,
378 + (unsigned long)(HPTIME_CLK_NS * (later - now)));
379 +}
380 +#endif
381 +
382 +/*****************************************************************************\
383 + * *
384 + * SDHCI core callbacks *
385 + * *
386 +\*****************************************************************************/
387 +
388 +
389 +#ifdef CHECK_DMA_USE
390 +/*#define CHECK_DMA_REG_USE*/
391 +#endif
392 +
393 +#ifdef CHECK_DMA_REG_USE
394 +/* we don't expect anything to be using these registers during a
395 + DMA (except the IRQ status) - so check */
396 +static void check_dma_reg_use(struct sdhci_host *host, int reg);
397 +#else
398 +#define check_dma_reg_use(host, reg)
399 +#endif
400 +
401 +
402 +static inline u32 sdhci_bcm2708_raw_readl(struct sdhci_host *host, int reg)
403 +{
404 + return readl(host->ioaddr + reg);
405 +}
406 +
407 +u32 sdhci_bcm2708_readl(struct sdhci_host *host, int reg)
408 +{
409 + u32 l = sdhci_bcm2708_raw_readl(host, reg);
410 +
411 +#ifdef LOG_REGISTERS
412 + printk(KERN_ERR "%s: readl from 0x%02x, value 0x%08x\n",
413 + mmc_hostname(host->mmc), reg, l);
414 +#endif
415 + check_dma_reg_use(host, reg);
416 +
417 + return l;
418 +}
419 +
420 +u16 sdhci_bcm2708_readw(struct sdhci_host *host, int reg)
421 +{
422 + u32 l = sdhci_bcm2708_raw_readl(host, reg & ~3);
423 + u32 w = l >> (reg << 3 & 0x18) & 0xffff;
424 +
425 +#ifdef LOG_REGISTERS
426 + printk(KERN_ERR "%s: readw from 0x%02x, value 0x%04x\n",
427 + mmc_hostname(host->mmc), reg, w);
428 +#endif
429 + check_dma_reg_use(host, reg);
430 +
431 + return (u16)w;
432 +}
433 +
434 +u8 sdhci_bcm2708_readb(struct sdhci_host *host, int reg)
435 +{
436 + u32 l = sdhci_bcm2708_raw_readl(host, reg & ~3);
437 + u32 b = l >> (reg << 3 & 0x18) & 0xff;
438 +
439 +#ifdef LOG_REGISTERS
440 + printk(KERN_ERR "%s: readb from 0x%02x, value 0x%02x\n",
441 + mmc_hostname(host->mmc), reg, b);
442 +#endif
443 + check_dma_reg_use(host, reg);
444 +
445 + return (u8)b;
446 +}
447 +
448 +
449 +static void sdhci_bcm2708_raw_writel(struct sdhci_host *host, u32 val, int reg)
450 +{
451 + u32 ier;
452 +
453 +#if USE_SPACED_WRITES_2CLK
454 + static bool timeout_disabled = false;
455 + unsigned int ns_2clk = 0;
456 +
457 + /* The Arasan has a bugette whereby it may lose the content of
458 + * successive writes to registers that are within two SD-card clock
459 + * cycles of each other (a clock domain crossing problem).
460 + * It seems, however, that the data register does not have this problem.
461 + * (Which is just as well - otherwise we'd have to nobble the DMA engine
462 + * too)
463 + */
464 + if (reg != SDHCI_BUFFER && host->clock != 0) {
465 + /* host->clock is the clock freq in Hz */
466 + static hptime_t last_write_hpt;
467 + hptime_t now = hptime();
468 + ns_2clk = cycle_delay*1000000/(host->clock/1000);
469 +
470 + if (now == last_write_hpt || now == last_write_hpt+1) {
471 + /* we can't guarantee any significant time has
472 + * passed - we'll have to wait anyway ! */
473 + ndelay(ns_2clk);
474 + } else
475 + {
476 + /* we must have waited at least this many ns: */
477 + unsigned int ns_wait = HPTIME_CLK_NS *
478 + (last_write_hpt - now - 1);
479 + if (ns_wait < ns_2clk)
480 + ndelay(ns_2clk - ns_wait);
481 + }
482 + last_write_hpt = now;
483 + }
484 +#if USE_SOFTWARE_TIMEOUTS
485 + /* The Arasan is clocked for timeouts using the SD clock which is too
486 + * fast for ERASE commands and causes issues. So we disable timeouts
487 + * for ERASE */
488 + if (host->cmd != NULL && host->cmd->opcode == MMC_ERASE &&
489 + reg == (SDHCI_COMMAND & ~3)) {
490 + mod_timer(&host->timer,
491 + jiffies + SOFTWARE_ERASE_TIMEOUT_SEC * HZ);
492 + ier = readl(host->ioaddr + SDHCI_SIGNAL_ENABLE);
493 + ier &= ~SDHCI_INT_DATA_TIMEOUT;
494 + writel(ier, host->ioaddr + SDHCI_SIGNAL_ENABLE);
495 + timeout_disabled = true;
496 + ndelay(ns_2clk);
497 + } else if (timeout_disabled) {
498 + ier = readl(host->ioaddr + SDHCI_SIGNAL_ENABLE);
499 + ier |= SDHCI_INT_DATA_TIMEOUT;
500 + writel(ier, host->ioaddr + SDHCI_SIGNAL_ENABLE);
501 + timeout_disabled = false;
502 + ndelay(ns_2clk);
503 + }
504 +#endif
505 + writel(val, host->ioaddr + reg);
506 +#else
507 + void __iomem * regaddr = host->ioaddr + reg;
508 +
509 + writel(val, regaddr);
510 +
511 + if (reg != SDHCI_BUFFER && reg != SDHCI_INT_STATUS && host->clock != 0)
512 + {
513 + int timeout = 100000;
514 + while (val != readl(regaddr) && --timeout > 0)
515 + continue;
516 +
517 + if (timeout <= 0)
518 + printk(KERN_ERR "%s: writing 0x%X to reg 0x%X "
519 + "always gives 0x%X\n",
520 + mmc_hostname(host->mmc),
521 + val, reg, readl(regaddr));
522 + BUG_ON(timeout <= 0);
523 + }
524 +#endif
525 +}
526 +
527 +
528 +void sdhci_bcm2708_writel(struct sdhci_host *host, u32 val, int reg)
529 +{
530 +#ifdef LOG_REGISTERS
531 + printk(KERN_ERR "%s: writel to 0x%02x, value 0x%08x\n",
532 + mmc_hostname(host->mmc), reg, val);
533 +#endif
534 + check_dma_reg_use(host, reg);
535 +
536 + sdhci_bcm2708_raw_writel(host, val, reg);
537 +}
538 +
539 +void sdhci_bcm2708_writew(struct sdhci_host *host, u16 val, int reg)
540 +{
541 + static u32 shadow = 0;
542 +
543 + u32 p = reg == SDHCI_COMMAND ? shadow :
544 + sdhci_bcm2708_raw_readl(host, reg & ~3);
545 + u32 s = reg << 3 & 0x18;
546 + u32 l = val << s;
547 + u32 m = 0xffff << s;
548 +
549 +#ifdef LOG_REGISTERS
550 + printk(KERN_ERR "%s: writew to 0x%02x, value 0x%04x\n",
551 + mmc_hostname(host->mmc), reg, val);
552 +#endif
553 +
554 + if (reg == SDHCI_TRANSFER_MODE)
555 + shadow = (p & ~m) | l;
556 + else {
557 + check_dma_reg_use(host, reg);
558 + sdhci_bcm2708_raw_writel(host, (p & ~m) | l, reg & ~3);
559 + }
560 +}
561 +
562 +void sdhci_bcm2708_writeb(struct sdhci_host *host, u8 val, int reg)
563 +{
564 + u32 p = sdhci_bcm2708_raw_readl(host, reg & ~3);
565 + u32 s = reg << 3 & 0x18;
566 + u32 l = val << s;
567 + u32 m = 0xff << s;
568 +
569 +#ifdef LOG_REGISTERS
570 + printk(KERN_ERR "%s: writeb to 0x%02x, value 0x%02x\n",
571 + mmc_hostname(host->mmc), reg, val);
572 +#endif
573 +
574 + check_dma_reg_use(host, reg);
575 + sdhci_bcm2708_raw_writel(host, (p & ~m) | l, reg & ~3);
576 +}
577 +
578 +static unsigned int sdhci_bcm2708_get_max_clock(struct sdhci_host *host)
579 +{
580 + return emmc_clock_freq;
581 +}
582 +
583 +/*****************************************************************************\
584 + * *
585 + * DMA Operation *
586 + * *
587 +\*****************************************************************************/
588 +
589 +struct sdhci_bcm2708_priv {
590 + int dma_chan;
591 + int dma_irq;
592 + void __iomem *dma_chan_base;
593 + struct bcm2708_dma_cb *cb_base; /* DMA control blocks */
594 + dma_addr_t cb_handle;
595 + /* tracking scatter gather progress */
596 + unsigned sg_ix; /* scatter gather list index */
597 + unsigned sg_done; /* bytes in current sg_ix done */
598 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
599 + unsigned char dma_wanted; /* DMA transfer requested */
600 + unsigned char dma_waits; /* wait states in DMAs */
601 +#ifdef CHECK_DMA_USE
602 + unsigned char dmas_pending; /* no of unfinished DMAs */
603 + hptime_t when_started;
604 + hptime_t when_reset;
605 + hptime_t when_stopped;
606 +#endif
607 +#endif
608 + /* signalling the end of a transfer */
609 + void (*complete)(struct sdhci_host *);
610 +};
611 +
612 +#define SDHCI_HOST_PRIV(host) \
613 + (struct sdhci_bcm2708_priv *)((struct sdhci_host *)(host)+1)
614 +
615 +
616 +
617 +#ifdef CHECK_DMA_REG_USE
618 +static void check_dma_reg_use(struct sdhci_host *host, int reg)
619 +{
620 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
621 + if (host_priv->dma_wanted && reg != SDHCI_INT_STATUS) {
622 + printk(KERN_INFO"%s: accessing register 0x%x during DMA\n",
623 + mmc_hostname(host->mmc), reg);
624 + }
625 +}
626 +#endif
627 +
628 +
629 +
630 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
631 +
632 +static void sdhci_clear_set_irqgen(struct sdhci_host *host, u32 clear, u32 set)
633 +{
634 + u32 ier;
635 +
636 + ier = sdhci_bcm2708_raw_readl(host, SDHCI_SIGNAL_ENABLE);
637 + ier &= ~clear;
638 + ier |= set;
639 + /* change which requests generate IRQs - makes no difference to
640 + the content of SDHCI_INT_STATUS, or the need to acknowledge IRQs */
641 + sdhci_bcm2708_raw_writel(host, ier, SDHCI_SIGNAL_ENABLE);
642 +}
643 +
644 +static void sdhci_signal_irqs(struct sdhci_host *host, u32 irqs)
645 +{
646 + sdhci_clear_set_irqgen(host, 0, irqs);
647 +}
648 +
649 +static void sdhci_unsignal_irqs(struct sdhci_host *host, u32 irqs)
650 +{
651 + sdhci_clear_set_irqgen(host, irqs, 0);
652 +}
653 +
654 +
655 +
656 +static void schci_bcm2708_cb_read(struct sdhci_bcm2708_priv *host,
657 + int ix,
658 + dma_addr_t dma_addr, unsigned len,
659 + int /*bool*/ is_last)
660 +{
661 + struct bcm2708_dma_cb *cb = &host->cb_base[ix];
662 + unsigned char dmawaits = host->dma_waits;
663 +
664 + cb->info = BCM2708_DMA_PER_MAP(BCM2708_DMA_DREQ_EMMC) |
665 + BCM2708_DMA_WAITS(dmawaits) |
666 + BCM2708_DMA_S_DREQ |
667 + BCM2708_DMA_D_WIDTH |
668 + BCM2708_DMA_D_INC;
669 + cb->src = DMA_SDHCI_BUFFER; /* DATA register DMA address */
670 + cb->dst = dma_addr;
671 + cb->length = len;
672 + cb->stride = 0;
673 +
674 + if (is_last) {
675 + cb->info |= BCM2708_DMA_INT_EN |
676 + BCM2708_DMA_WAIT_RESP;
677 + cb->next = 0;
678 + } else
679 + cb->next = host->cb_handle +
680 + (ix+1)*sizeof(struct bcm2708_dma_cb);
681 +
682 + cb->pad[0] = 0;
683 + cb->pad[1] = 0;
684 +}
685 +
686 +static void schci_bcm2708_cb_write(struct sdhci_bcm2708_priv *host,
687 + int ix,
688 + dma_addr_t dma_addr, unsigned len,
689 + int /*bool*/ is_last)
690 +{
691 + struct bcm2708_dma_cb *cb = &host->cb_base[ix];
692 + unsigned char dmawaits = host->dma_waits;
693 +
694 + /* We can make arbitrarily large writes as long as we specify DREQ to
695 + pace the delivery of bytes to the Arasan hardware */
696 + cb->info = BCM2708_DMA_PER_MAP(BCM2708_DMA_DREQ_EMMC) |
697 + BCM2708_DMA_WAITS(dmawaits) |
698 + BCM2708_DMA_D_DREQ |
699 + BCM2708_DMA_S_WIDTH |
700 + BCM2708_DMA_S_INC;
701 + cb->src = dma_addr;
702 + cb->dst = DMA_SDHCI_BUFFER; /* DATA register DMA address */
703 + cb->length = len;
704 + cb->stride = 0;
705 +
706 + if (is_last) {
707 + cb->info |= BCM2708_DMA_INT_EN |
708 + BCM2708_DMA_WAIT_RESP;
709 + cb->next = 0;
710 + } else
711 + cb->next = host->cb_handle +
712 + (ix+1)*sizeof(struct bcm2708_dma_cb);
713 +
714 + cb->pad[0] = 0;
715 + cb->pad[1] = 0;
716 +}
717 +
718 +
719 +static void schci_bcm2708_dma_go(struct sdhci_host *host)
720 +{
721 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
722 + void __iomem *dma_chan_base = host_priv->dma_chan_base;
723 +
724 + BUG_ON(host_priv->dma_wanted);
725 +#ifdef CHECK_DMA_USE
726 + if (host_priv->dma_wanted)
727 + printk(KERN_ERR "%s: DMA already in progress - "
728 + "now %"FMT_HPT", last started %lu "
729 + "reset %lu stopped %lu\n",
730 + mmc_hostname(host->mmc),
731 + hptime(), since_ns(host_priv->when_started),
732 + since_ns(host_priv->when_reset),
733 + since_ns(host_priv->when_stopped));
734 + else if (host_priv->dmas_pending > 0)
735 + printk(KERN_INFO "%s: note - new DMA when %d reset DMAs "
736 + "already in progress - "
737 + "now %"FMT_HPT", started %lu reset %lu stopped %lu\n",
738 + mmc_hostname(host->mmc),
739 + host_priv->dmas_pending,
740 + hptime(), since_ns(host_priv->when_started),
741 + since_ns(host_priv->when_reset),
742 + since_ns(host_priv->when_stopped));
743 + host_priv->dmas_pending += 1;
744 + host_priv->when_started = hptime();
745 +#endif
746 + host_priv->dma_wanted = 1;
747 + DBG("PDMA go - base %p handle %08X\n", dma_chan_base,
748 + host_priv->cb_handle);
749 + bcm_dma_start(dma_chan_base, host_priv->cb_handle);
750 +}
751 +
752 +
753 +static void
754 +sdhci_platdma_read(struct sdhci_host *host, dma_addr_t dma_addr, size_t len)
755 +{
756 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
757 +
758 + DBG("PDMA to read %d bytes\n", len);
759 + host_priv->sg_done += len;
760 + schci_bcm2708_cb_read(host_priv, 0, dma_addr, len, 1/*TRUE*/);
761 + schci_bcm2708_dma_go(host);
762 +}
763 +
764 +
765 +static void
766 +sdhci_platdma_write(struct sdhci_host *host, dma_addr_t dma_addr, size_t len)
767 +{
768 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
769 +
770 + DBG("PDMA to write %d bytes\n", len);
771 + //BUG_ON(0 != (len & 0x1ff));
772 +
773 + host_priv->sg_done += len;
774 + schci_bcm2708_cb_write(host_priv, 0, dma_addr, len, 1/*TRUE*/);
775 + schci_bcm2708_dma_go(host);
776 +}
777 +
778 +/*! space is avaiable to receive into or data is available to write
779 + Platform DMA exported function
780 +*/
781 +void
782 +sdhci_bcm2708_platdma_avail(struct sdhci_host *host, unsigned int *ref_intmask,
783 + void(*completion_callback)(struct sdhci_host *host))
784 +{
785 + struct mmc_data *data = host->data;
786 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
787 + int sg_ix;
788 + size_t bytes;
789 + dma_addr_t addr;
790 +
791 + BUG_ON(NULL == data);
792 + BUG_ON(0 == data->blksz);
793 +
794 + host_priv->complete = completion_callback;
795 +
796 + sg_ix = host_priv->sg_ix;
797 + BUG_ON(sg_ix >= data->sg_len);
798 +
799 + /* we can DMA blocks larger than blksz - it may hang the DMA
800 + channel but we are its only user */
801 + bytes = sg_dma_len(&data->sg[sg_ix]) - host_priv->sg_done;
802 + addr = sg_dma_address(&data->sg[sg_ix]) + host_priv->sg_done;
803 +
804 + if (bytes > 0) {
805 + /* We're going to poll for read/write available state until
806 + we finish this DMA
807 + */
808 +
809 + if (data->flags & MMC_DATA_READ) {
810 + if (*ref_intmask & SDHCI_INT_DATA_AVAIL) {
811 + sdhci_unsignal_irqs(host, SDHCI_INT_DATA_AVAIL |
812 + SDHCI_INT_SPACE_AVAIL);
813 + sdhci_platdma_read(host, addr, bytes);
814 + }
815 + } else {
816 + if (*ref_intmask & SDHCI_INT_SPACE_AVAIL) {
817 + sdhci_unsignal_irqs(host, SDHCI_INT_DATA_AVAIL |
818 + SDHCI_INT_SPACE_AVAIL);
819 + sdhci_platdma_write(host, addr, bytes);
820 + }
821 + }
822 + }
823 + /* else:
824 + we have run out of bytes that need transferring (e.g. we may be in
825 + the middle of the last DMA transfer), or
826 + it is also possible that we've been called when another IRQ is
827 + signalled, even though we've turned off signalling of our own IRQ */
828 +
829 + *ref_intmask &= ~SDHCI_INT_DATA_END;
830 + /* don't let the main sdhci driver act on this .. we'll deal with it
831 + when we respond to the DMA - if one is currently in progress */
832 +}
833 +
834 +/* is it possible to DMA the given mmc_data structure?
835 + Platform DMA exported function
836 +*/
837 +int /*bool*/
838 +sdhci_bcm2708_platdma_dmaable(struct sdhci_host *host, struct mmc_data *data)
839 +{
840 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
841 + int ok = bcm_sg_suitable_for_dma(data->sg, data->sg_len);
842 +
843 + if (!ok)
844 + DBG("Reverting to PIO - bad cache alignment\n");
845 +
846 + else {
847 + host_priv->sg_ix = 0; /* first SG index */
848 + host_priv->sg_done = 0; /* no bytes done */
849 + }
850 +
851 + return ok;
852 +}
853 +
854 +#include <mach/arm_control.h> //GRAYG
855 +/*! the current SD transacton has been abandonned
856 + We need to tidy up if we were in the middle of a DMA
857 + Platform DMA exported function
858 +*/
859 +void
860 +sdhci_bcm2708_platdma_reset(struct sdhci_host *host, struct mmc_data *data)
861 +{
862 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
863 +// unsigned long flags;
864 +
865 + BUG_ON(NULL == host);
866 +
867 +// spin_lock_irqsave(&host->lock, flags);
868 +
869 + if (host_priv->dma_wanted) {
870 + if (NULL == data) {
871 + printk(KERN_ERR "%s: ongoing DMA reset - no data!\n",
872 + mmc_hostname(host->mmc));
873 + BUG_ON(NULL == data);
874 + } else {
875 + struct scatterlist *sg;
876 + int sg_len;
877 + int sg_todo;
878 + int rc;
879 + unsigned long cs;
880 +
881 + sg = data->sg;
882 + sg_len = data->sg_len;
883 + sg_todo = sg_dma_len(&sg[host_priv->sg_ix]);
884 +
885 + cs = readl(host_priv->dma_chan_base + BCM2708_DMA_CS);
886 +
887 + if (!(BCM2708_DMA_ACTIVE & cs))
888 + {
889 + if (extra_messages)
890 + printk(KERN_INFO "%s: missed completion of "
891 + "cmd %d DMA (%d/%d [%d]/[%d]) - "
892 + "ignoring it\n",
893 + mmc_hostname(host->mmc),
894 + host->last_cmdop,
895 + host_priv->sg_done, sg_todo,
896 + host_priv->sg_ix+1, sg_len);
897 + }
898 + else
899 + printk(KERN_INFO "%s: resetting ongoing cmd %d"
900 + "DMA before %d/%d [%d]/[%d] complete\n",
901 + mmc_hostname(host->mmc),
902 + host->last_cmdop,
903 + host_priv->sg_done, sg_todo,
904 + host_priv->sg_ix+1, sg_len);
905 +#ifdef CHECK_DMA_USE
906 + printk(KERN_INFO "%s: now %"FMT_HPT" started %lu "
907 + "last reset %lu last stopped %lu\n",
908 + mmc_hostname(host->mmc),
909 + hptime(), since_ns(host_priv->when_started),
910 + since_ns(host_priv->when_reset),
911 + since_ns(host_priv->when_stopped));
912 + { unsigned long info, debug;
913 + void __iomem *base;
914 + unsigned long pend0, pend1, pend2;
915 +
916 + base = host_priv->dma_chan_base;
917 + cs = readl(base + BCM2708_DMA_CS);
918 + info = readl(base + BCM2708_DMA_INFO);
919 + debug = readl(base + BCM2708_DMA_DEBUG);
920 + printk(KERN_INFO "%s: DMA%d CS=%08lX TI=%08lX "
921 + "DEBUG=%08lX\n",
922 + mmc_hostname(host->mmc),
923 + host_priv->dma_chan,
924 + cs, info, debug);
925 + pend0 = readl(__io_address(ARM_IRQ_PEND0));
926 + pend1 = readl(__io_address(ARM_IRQ_PEND1));
927 + pend2 = readl(__io_address(ARM_IRQ_PEND2));
928 +
929 + printk(KERN_INFO "%s: PEND0=%08lX "
930 + "PEND1=%08lX PEND2=%08lX\n",
931 + mmc_hostname(host->mmc),
932 + pend0, pend1, pend2);
933 +
934 + //gintsts = readl(__io_address(GINTSTS));
935 + //gintmsk = readl(__io_address(GINTMSK));
936 + //printk(KERN_INFO "%s: USB GINTSTS=%08lX"
937 + // "GINTMSK=%08lX\n",
938 + // mmc_hostname(host->mmc), gintsts, gintmsk);
939 + }
940 +#endif
941 + rc = bcm_dma_abort(host_priv->dma_chan_base);
942 + BUG_ON(rc != 0);
943 + }
944 + host_priv->dma_wanted = 0;
945 +#ifdef CHECK_DMA_USE
946 + host_priv->when_reset = hptime();
947 +#endif
948 + }
949 +
950 +// spin_unlock_irqrestore(&host->lock, flags);
951 +}
952 +
953 +
954 +static void sdhci_bcm2708_dma_complete_irq(struct sdhci_host *host,
955 + u32 dma_cs)
956 +{
957 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
958 + struct mmc_data *data;
959 + struct scatterlist *sg;
960 + int sg_len;
961 + int sg_ix;
962 + int sg_todo;
963 +// unsigned long flags;
964 +
965 + BUG_ON(NULL == host);
966 +
967 +// spin_lock_irqsave(&host->lock, flags);
968 + data = host->data;
969 +
970 +#ifdef CHECK_DMA_USE
971 + if (host_priv->dmas_pending <= 0)
972 + DBG("on completion no DMA in progress - "
973 + "now %"FMT_HPT" started %lu reset %lu stopped %lu\n",
974 + hptime(), since_ns(host_priv->when_started),
975 + since_ns(host_priv->when_reset),
976 + since_ns(host_priv->when_stopped));
977 + else if (host_priv->dmas_pending > 1)
978 + DBG("still %d DMA in progress after completion - "
979 + "now %"FMT_HPT" started %lu reset %lu stopped %lu\n",
980 + host_priv->dmas_pending - 1,
981 + hptime(), since_ns(host_priv->when_started),
982 + since_ns(host_priv->when_reset),
983 + since_ns(host_priv->when_stopped));
984 + BUG_ON(host_priv->dmas_pending <= 0);
985 + host_priv->dmas_pending -= 1;
986 + host_priv->when_stopped = hptime();
987 +#endif
988 + host_priv->dma_wanted = 0;
989 +
990 + if (NULL == data) {
991 + DBG("PDMA unused completion - status 0x%X\n", dma_cs);
992 +// spin_unlock_irqrestore(&host->lock, flags);
993 + return;
994 + }
995 + sg = data->sg;
996 + sg_len = data->sg_len;
997 + sg_todo = sg_dma_len(&sg[host_priv->sg_ix]);
998 +
999 + DBG("PDMA complete %d/%d [%d]/[%d]..\n",
1000 + host_priv->sg_done, sg_todo,
1001 + host_priv->sg_ix+1, sg_len);
1002 +
1003 + BUG_ON(host_priv->sg_done > sg_todo);
1004 +
1005 + if (host_priv->sg_done >= sg_todo) {
1006 + host_priv->sg_ix++;
1007 + host_priv->sg_done = 0;
1008 + }
1009 +
1010 + sg_ix = host_priv->sg_ix;
1011 + if (sg_ix < sg_len) {
1012 + u32 irq_mask;
1013 + /* Set off next DMA if we've got the capacity */
1014 +
1015 + if (data->flags & MMC_DATA_READ)
1016 + irq_mask = SDHCI_INT_DATA_AVAIL;
1017 + else
1018 + irq_mask = SDHCI_INT_SPACE_AVAIL;
1019 +
1020 + /* We have to use the interrupt status register on the BCM2708
1021 + rather than the SDHCI_PRESENT_STATE register because latency
1022 + in the glue logic means that the information retrieved from
1023 + the latter is not always up-to-date w.r.t the DMA engine -
1024 + it may not indicate that a read or a write is ready yet */
1025 + if (sdhci_bcm2708_raw_readl(host, SDHCI_INT_STATUS) &
1026 + irq_mask) {
1027 + size_t bytes = sg_dma_len(&sg[sg_ix]) -
1028 + host_priv->sg_done;
1029 + dma_addr_t addr = sg_dma_address(&data->sg[sg_ix]) +
1030 + host_priv->sg_done;
1031 +
1032 + /* acknowledge interrupt */
1033 + sdhci_bcm2708_raw_writel(host, irq_mask,
1034 + SDHCI_INT_STATUS);
1035 +
1036 + BUG_ON(0 == bytes);
1037 +
1038 + if (data->flags & MMC_DATA_READ)
1039 + sdhci_platdma_read(host, addr, bytes);
1040 + else
1041 + sdhci_platdma_write(host, addr, bytes);
1042 + } else {
1043 + DBG("PDMA - wait avail\n");
1044 + /* may generate an IRQ if already present */
1045 + sdhci_signal_irqs(host, SDHCI_INT_DATA_AVAIL |
1046 + SDHCI_INT_SPACE_AVAIL);
1047 + }
1048 + } else {
1049 + if (sync_after_dma) {
1050 + /* On the Arasan controller the stop command (which will be
1051 + scheduled after this completes) does not seem to work
1052 + properly if we allow it to be issued when we are
1053 + transferring data to/from the SD card.
1054 + We get CRC and DEND errors unless we wait for
1055 + the SD controller to finish reading/writing to the card. */
1056 + u32 state_mask;
1057 + int timeout=30*5000;
1058 +
1059 + DBG("PDMA over - sync card\n");
1060 + if (data->flags & MMC_DATA_READ)
1061 + state_mask = SDHCI_DOING_READ;
1062 + else
1063 + state_mask = SDHCI_DOING_WRITE;
1064 +
1065 + while (0 != (sdhci_bcm2708_raw_readl(host, SDHCI_PRESENT_STATE)
1066 + & state_mask) && --timeout > 0)
1067 + {
1068 + udelay(1);
1069 + continue;
1070 + }
1071 + if (timeout <= 0)
1072 + printk(KERN_ERR"%s: final %s to SD card still "
1073 + "running\n",
1074 + mmc_hostname(host->mmc),
1075 + data->flags & MMC_DATA_READ? "read": "write");
1076 + }
1077 + if (host_priv->complete) {
1078 + (*host_priv->complete)(host);
1079 + DBG("PDMA %s complete\n",
1080 + data->flags & MMC_DATA_READ?"read":"write");
1081 + sdhci_signal_irqs(host, SDHCI_INT_DATA_AVAIL |
1082 + SDHCI_INT_SPACE_AVAIL);
1083 + }
1084 + }
1085 +// spin_unlock_irqrestore(&host->lock, flags);
1086 +}
1087 +
1088 +static irqreturn_t sdhci_bcm2708_dma_irq(int irq, void *dev_id)
1089 +{
1090 + irqreturn_t result = IRQ_NONE;
1091 + struct sdhci_host *host = dev_id;
1092 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
1093 + u32 dma_cs; /* control and status register */
1094 +
1095 + BUG_ON(NULL == dev_id);
1096 + BUG_ON(NULL == host_priv->dma_chan_base);
1097 +
1098 + sdhci_spin_lock(host);
1099 +
1100 + dma_cs = readl(host_priv->dma_chan_base + BCM2708_DMA_CS);
1101 +
1102 + if (dma_cs & BCM2708_DMA_ERR) {
1103 + unsigned long debug;
1104 + debug = readl(host_priv->dma_chan_base +
1105 + BCM2708_DMA_DEBUG);
1106 + printk(KERN_ERR "%s: DMA error - CS %lX DEBUG %lX\n",
1107 + mmc_hostname(host->mmc), (unsigned long)dma_cs,
1108 + (unsigned long)debug);
1109 + /* reset error */
1110 + writel(debug, host_priv->dma_chan_base +
1111 + BCM2708_DMA_DEBUG);
1112 + }
1113 + if (dma_cs & BCM2708_DMA_INT) {
1114 + /* acknowledge interrupt */
1115 + writel(BCM2708_DMA_INT,
1116 + host_priv->dma_chan_base + BCM2708_DMA_CS);
1117 +
1118 + dsb(); /* ARM data synchronization (push) operation */
1119 +
1120 + if (!host_priv->dma_wanted) {
1121 + /* ignore this interrupt - it was reset */
1122 + if (extra_messages)
1123 + printk(KERN_INFO "%s: DMA IRQ %X ignored - "
1124 + "results were reset\n",
1125 + mmc_hostname(host->mmc), dma_cs);
1126 +#ifdef CHECK_DMA_USE
1127 + printk(KERN_INFO "%s: now %"FMT_HPT
1128 + " started %lu reset %lu stopped %lu\n",
1129 + mmc_hostname(host->mmc), hptime(),
1130 + since_ns(host_priv->when_started),
1131 + since_ns(host_priv->when_reset),
1132 + since_ns(host_priv->when_stopped));
1133 + host_priv->dmas_pending--;
1134 +#endif
1135 + } else
1136 + sdhci_bcm2708_dma_complete_irq(host, dma_cs);
1137 +
1138 + result = IRQ_HANDLED;
1139 + }
1140 + sdhci_spin_unlock(host);
1141 +
1142 + return result;
1143 +}
1144 +#endif /* CONFIG_MMC_SDHCI_BCM2708_DMA */
1145 +
1146 +
1147 +/***************************************************************************** \
1148 + * *
1149 + * Device Attributes *
1150 + * *
1151 +\*****************************************************************************/
1152 +
1153 +
1154 +/**
1155 + * Show the DMA-using status
1156 + */
1157 +static ssize_t attr_dma_show(struct device *_dev,
1158 + struct device_attribute *attr, char *buf)
1159 +{
1160 + struct sdhci_host *host = (struct sdhci_host *)dev_get_drvdata(_dev);
1161 +
1162 + if (host) {
1163 + int use_dma = (host->flags & SDHCI_USE_PLATDMA? 1:0);
1164 + return sprintf(buf, "%d\n", use_dma);
1165 + } else
1166 + return -EINVAL;
1167 +}
1168 +
1169 +/**
1170 + * Set the DMA-using status
1171 + */
1172 +static ssize_t attr_dma_store(struct device *_dev,
1173 + struct device_attribute *attr,
1174 + const char *buf, size_t count)
1175 +{
1176 + struct sdhci_host *host = (struct sdhci_host *)dev_get_drvdata(_dev);
1177 +
1178 + if (host) {
1179 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
1180 + int on = simple_strtol(buf, NULL, 0);
1181 + if (on) {
1182 + host->flags |= SDHCI_USE_PLATDMA;
1183 + sdhci_bcm2708_writel(host, 1, REG_EXRDFIFO_EN);
1184 + printk(KERN_INFO "%s: DMA enabled\n",
1185 + mmc_hostname(host->mmc));
1186 + } else {
1187 + host->flags &= ~(SDHCI_USE_PLATDMA | SDHCI_REQ_USE_DMA);
1188 + sdhci_bcm2708_writel(host, 0, REG_EXRDFIFO_EN);
1189 + printk(KERN_INFO "%s: DMA disabled\n",
1190 + mmc_hostname(host->mmc));
1191 + }
1192 +#endif
1193 + return count;
1194 + } else
1195 + return -EINVAL;
1196 +}
1197 +
1198 +static DEVICE_ATTR(use_dma, S_IRUGO | S_IWUGO, attr_dma_show, attr_dma_store);
1199 +
1200 +
1201 +/**
1202 + * Show the DMA wait states used
1203 + */
1204 +static ssize_t attr_dmawait_show(struct device *_dev,
1205 + struct device_attribute *attr, char *buf)
1206 +{
1207 + struct sdhci_host *host = (struct sdhci_host *)dev_get_drvdata(_dev);
1208 +
1209 + if (host) {
1210 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
1211 + int dmawait = host_priv->dma_waits;
1212 + return sprintf(buf, "%d\n", dmawait);
1213 + } else
1214 + return -EINVAL;
1215 +}
1216 +
1217 +/**
1218 + * Set the DMA wait state used
1219 + */
1220 +static ssize_t attr_dmawait_store(struct device *_dev,
1221 + struct device_attribute *attr,
1222 + const char *buf, size_t count)
1223 +{
1224 + struct sdhci_host *host = (struct sdhci_host *)dev_get_drvdata(_dev);
1225 +
1226 + if (host) {
1227 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
1228 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
1229 + int dma_waits = simple_strtol(buf, NULL, 0);
1230 + if (dma_waits >= 0 && dma_waits < 32)
1231 + host_priv->dma_waits = dma_waits;
1232 + else
1233 + printk(KERN_ERR "%s: illegal dma_waits value - %d",
1234 + mmc_hostname(host->mmc), dma_waits);
1235 +#endif
1236 + return count;
1237 + } else
1238 + return -EINVAL;
1239 +}
1240 +
1241 +static DEVICE_ATTR(dma_wait, S_IRUGO | S_IWUGO,
1242 + attr_dmawait_show, attr_dmawait_store);
1243 +
1244 +
1245 +/**
1246 + * Show the DMA-using status
1247 + */
1248 +static ssize_t attr_status_show(struct device *_dev,
1249 + struct device_attribute *attr, char *buf)
1250 +{
1251 + struct sdhci_host *host = (struct sdhci_host *)dev_get_drvdata(_dev);
1252 +
1253 + if (host) {
1254 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
1255 + return sprintf(buf,
1256 + "present: yes\n"
1257 + "power: %s\n"
1258 + "clock: %u Hz\n"
1259 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
1260 + "dma: %s (%d waits)\n",
1261 +#else
1262 + "dma: unconfigured\n",
1263 +#endif
1264 + "always on",
1265 + host->clock
1266 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
1267 + , (host->flags & SDHCI_USE_PLATDMA)? "on": "off"
1268 + , host_priv->dma_waits
1269 +#endif
1270 + );
1271 + } else
1272 + return -EINVAL;
1273 +}
1274 +
1275 +static DEVICE_ATTR(status, S_IRUGO, attr_status_show, NULL);
1276 +
1277 +/***************************************************************************** \
1278 + * *
1279 + * Power Management *
1280 + * *
1281 +\*****************************************************************************/
1282 +
1283 +
1284 +#ifdef CONFIG_PM
1285 +static int sdhci_bcm2708_suspend(struct platform_device *dev, pm_message_t state)
1286 +{
1287 + struct sdhci_host *host = (struct sdhci_host *)
1288 + platform_get_drvdata(dev);
1289 + int ret = 0;
1290 +
1291 + if (host->mmc) {
1292 + ret = mmc_suspend_host(host->mmc);
1293 + }
1294 +
1295 + return ret;
1296 +}
1297 +
1298 +static int sdhci_bcm2708_resume(struct platform_device *dev)
1299 +{
1300 + struct sdhci_host *host = (struct sdhci_host *)
1301 + platform_get_drvdata(dev);
1302 + int ret = 0;
1303 +
1304 + if (host->mmc) {
1305 + ret = mmc_resume_host(host->mmc);
1306 + }
1307 +
1308 + return ret;
1309 +}
1310 +#endif
1311 +
1312 +
1313 +/*****************************************************************************\
1314 + * *
1315 + * Device quirk functions. Implemented as local ops because the flags *
1316 + * field is out of space with newer kernels. This implementation can be *
1317 + * back ported to older kernels as well. *
1318 +\****************************************************************************/
1319 +static unsigned int sdhci_bcm2708_quirk_extra_ints(struct sdhci_host *host)
1320 +{
1321 + return 1;
1322 +}
1323 +
1324 +static unsigned int sdhci_bcm2708_quirk_spurious_crc_acmd51(struct sdhci_host *host)
1325 +{
1326 + return 1;
1327 +}
1328 +
1329 +static unsigned int sdhci_bcm2708_quirk_voltage_broken(struct sdhci_host *host)
1330 +{
1331 + return 1;
1332 +}
1333 +
1334 +static unsigned int sdhci_bcm2708_uhs_broken(struct sdhci_host *host)
1335 +{
1336 + return 1;
1337 +}
1338 +
1339 +static unsigned int sdhci_bcm2708_missing_status(struct sdhci_host *host)
1340 +{
1341 + return 1;
1342 +}
1343 +
1344 +/***************************************************************************** \
1345 + * *
1346 + * Device ops *
1347 + * *
1348 +\*****************************************************************************/
1349 +
1350 +static struct sdhci_ops sdhci_bcm2708_ops = {
1351 +#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
1352 + .read_l = sdhci_bcm2708_readl,
1353 + .read_w = sdhci_bcm2708_readw,
1354 + .read_b = sdhci_bcm2708_readb,
1355 + .write_l = sdhci_bcm2708_writel,
1356 + .write_w = sdhci_bcm2708_writew,
1357 + .write_b = sdhci_bcm2708_writeb,
1358 +#else
1359 +#error The BCM2708 SDHCI driver needs CONFIG_MMC_SDHCI_IO_ACCESSORS to be set
1360 +#endif
1361 + .get_max_clock = sdhci_bcm2708_get_max_clock,
1362 +
1363 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
1364 + // Platform DMA operations
1365 + .pdma_able = sdhci_bcm2708_platdma_dmaable,
1366 + .pdma_avail = sdhci_bcm2708_platdma_avail,
1367 + .pdma_reset = sdhci_bcm2708_platdma_reset,
1368 +#endif
1369 + .extra_ints = sdhci_bcm2708_quirk_extra_ints,
1370 + .voltage_broken = sdhci_bcm2708_quirk_voltage_broken,
1371 + .uhs_broken = sdhci_bcm2708_uhs_broken,
1372 +};
1373 +
1374 +/*****************************************************************************\
1375 + * *
1376 + * Device probing/removal *
1377 + * *
1378 +\*****************************************************************************/
1379 +
1380 +static int sdhci_bcm2708_probe(struct platform_device *pdev)
1381 +{
1382 + struct sdhci_host *host;
1383 + struct resource *iomem;
1384 + struct sdhci_bcm2708_priv *host_priv;
1385 + int ret;
1386 +
1387 + BUG_ON(pdev == NULL);
1388 +
1389 + iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1390 + if (!iomem) {
1391 + ret = -ENOMEM;
1392 + goto err;
1393 + }
1394 +
1395 + if (resource_size(iomem) != 0x100)
1396 + dev_err(&pdev->dev, "Invalid iomem size. You may "
1397 + "experience problems.\n");
1398 +
1399 + if (pdev->dev.parent)
1400 + host = sdhci_alloc_host(pdev->dev.parent,
1401 + sizeof(struct sdhci_bcm2708_priv));
1402 + else
1403 + host = sdhci_alloc_host(&pdev->dev,
1404 + sizeof(struct sdhci_bcm2708_priv));
1405 +
1406 + if (IS_ERR(host)) {
1407 + ret = PTR_ERR(host);
1408 + goto err;
1409 + }
1410 + if (missing_status) {
1411 + sdhci_bcm2708_ops.missing_status = sdhci_bcm2708_missing_status;
1412 + }
1413 +
1414 + if( spurious_crc_acmd51 ) {
1415 + sdhci_bcm2708_ops.spurious_crc_acmd51 = sdhci_bcm2708_quirk_spurious_crc_acmd51;
1416 + }
1417 +
1418 +
1419 + printk("sdhci: %s low-latency mode\n",enable_llm?"Enable":"Disable");
1420 +
1421 + host->hw_name = "BCM2708_Arasan";
1422 + host->ops = &sdhci_bcm2708_ops;
1423 + host->irq = platform_get_irq(pdev, 0);
1424 + host->second_irq = 0;
1425 +
1426 + host->quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
1427 + SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1428 + SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1429 + SDHCI_QUIRK_MISSING_CAPS |
1430 + SDHCI_QUIRK_NO_HISPD_BIT |
1431 + (sync_after_dma ? 0:SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12);
1432 +
1433 +
1434 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
1435 + host->flags = SDHCI_USE_PLATDMA;
1436 +#endif
1437 +
1438 + if (!request_mem_region(iomem->start, resource_size(iomem),
1439 + mmc_hostname(host->mmc))) {
1440 + dev_err(&pdev->dev, "cannot request region\n");
1441 + ret = -EBUSY;
1442 + goto err_request;
1443 + }
1444 +
1445 + host->ioaddr = ioremap(iomem->start, resource_size(iomem));
1446 + if (!host->ioaddr) {
1447 + dev_err(&pdev->dev, "failed to remap registers\n");
1448 + ret = -ENOMEM;
1449 + goto err_remap;
1450 + }
1451 +
1452 + host_priv = SDHCI_HOST_PRIV(host);
1453 +
1454 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
1455 + host_priv->dma_wanted = 0;
1456 +#ifdef CHECK_DMA_USE
1457 + host_priv->dmas_pending = 0;
1458 + host_priv->when_started = 0;
1459 + host_priv->when_reset = 0;
1460 + host_priv->when_stopped = 0;
1461 +#endif
1462 + host_priv->sg_ix = 0;
1463 + host_priv->sg_done = 0;
1464 + host_priv->complete = NULL;
1465 + host_priv->dma_waits = SDHCI_BCM_DMA_WAITS;
1466 +
1467 + host_priv->cb_base = dma_alloc_writecombine(&pdev->dev, SZ_4K,
1468 + &host_priv->cb_handle,
1469 + GFP_KERNEL);
1470 + if (!host_priv->cb_base) {
1471 + dev_err(&pdev->dev, "cannot allocate DMA CBs\n");
1472 + ret = -ENOMEM;
1473 + goto err_alloc_cb;
1474 + }
1475 +
1476 + ret = bcm_dma_chan_alloc(BCM_DMA_FEATURE_FAST,
1477 + &host_priv->dma_chan_base,
1478 + &host_priv->dma_irq);
1479 + if (ret < 0) {
1480 + dev_err(&pdev->dev, "couldn't allocate a DMA channel\n");
1481 + goto err_add_dma;
1482 + }
1483 + host_priv->dma_chan = ret;
1484 +
1485 + ret = request_irq(host_priv->dma_irq, sdhci_bcm2708_dma_irq,0,//IRQF_SHARED,
1486 + DRIVER_NAME " (dma)", host);
1487 + if (ret) {
1488 + dev_err(&pdev->dev, "cannot set DMA IRQ\n");
1489 + goto err_add_dma_irq;
1490 + }
1491 + host->second_irq = host_priv->dma_irq;
1492 + DBG("DMA CBs %p handle %08X DMA%d %p DMA IRQ %d\n",
1493 + host_priv->cb_base, (unsigned)host_priv->cb_handle,
1494 + host_priv->dma_chan, host_priv->dma_chan_base,
1495 + host_priv->dma_irq);
1496 +
1497 + if (allow_highspeed)
1498 + host->mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
1499 +
1500 + /* single block writes cause data loss with some SD cards! */
1501 + host->mmc->caps2 |= MMC_CAP2_FORCE_MULTIBLOCK;
1502 +#endif
1503 +
1504 + ret = sdhci_add_host(host);
1505 + if (ret)
1506 + goto err_add_host;
1507 +
1508 + platform_set_drvdata(pdev, host);
1509 + ret = device_create_file(&pdev->dev, &dev_attr_use_dma);
1510 + ret = device_create_file(&pdev->dev, &dev_attr_dma_wait);
1511 + ret = device_create_file(&pdev->dev, &dev_attr_status);
1512 +
1513 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
1514 + /* enable extension fifo for paced DMA transfers */
1515 + sdhci_bcm2708_writel(host, 1, REG_EXRDFIFO_EN);
1516 + sdhci_bcm2708_writel(host, 4, REG_EXRDFIFO_CFG);
1517 +#endif
1518 +
1519 + printk(KERN_INFO "%s: BCM2708 SDHC host at 0x%08llx DMA %d IRQ %d\n",
1520 + mmc_hostname(host->mmc), (unsigned long long)iomem->start,
1521 + host_priv->dma_chan, host_priv->dma_irq);
1522 +
1523 + return 0;
1524 +
1525 +err_add_host:
1526 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
1527 + free_irq(host_priv->dma_irq, host);
1528 +err_add_dma_irq:
1529 + bcm_dma_chan_free(host_priv->dma_chan);
1530 +err_add_dma:
1531 + dma_free_writecombine(&pdev->dev, SZ_4K, host_priv->cb_base,
1532 + host_priv->cb_handle);
1533 +err_alloc_cb:
1534 +#endif
1535 + iounmap(host->ioaddr);
1536 +err_remap:
1537 + release_mem_region(iomem->start, resource_size(iomem));
1538 +err_request:
1539 + sdhci_free_host(host);
1540 +err:
1541 + dev_err(&pdev->dev, "probe failed, err %d\n", ret);
1542 + return ret;
1543 +}
1544 +
1545 +static int sdhci_bcm2708_remove(struct platform_device *pdev)
1546 +{
1547 + struct sdhci_host *host = platform_get_drvdata(pdev);
1548 + struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1549 + struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
1550 + int dead;
1551 + u32 scratch;
1552 +
1553 + dead = 0;
1554 + scratch = sdhci_bcm2708_readl(host, SDHCI_INT_STATUS);
1555 + if (scratch == (u32)-1)
1556 + dead = 1;
1557 +
1558 + device_remove_file(&pdev->dev, &dev_attr_status);
1559 + device_remove_file(&pdev->dev, &dev_attr_dma_wait);
1560 + device_remove_file(&pdev->dev, &dev_attr_use_dma);
1561 +
1562 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
1563 + free_irq(host_priv->dma_irq, host);
1564 + dma_free_writecombine(&pdev->dev, SZ_4K, host_priv->cb_base,
1565 + host_priv->cb_handle);
1566 +#endif
1567 + sdhci_remove_host(host, dead);
1568 + iounmap(host->ioaddr);
1569 + release_mem_region(iomem->start, resource_size(iomem));
1570 + sdhci_free_host(host);
1571 + platform_set_drvdata(pdev, NULL);
1572 +
1573 + return 0;
1574 +}
1575 +
1576 +static struct platform_driver sdhci_bcm2708_driver = {
1577 + .driver = {
1578 + .name = DRIVER_NAME,
1579 + .owner = THIS_MODULE,
1580 + },
1581 + .probe = sdhci_bcm2708_probe,
1582 + .remove = sdhci_bcm2708_remove,
1583 +
1584 +#ifdef CONFIG_PM
1585 + .suspend = sdhci_bcm2708_suspend,
1586 + .resume = sdhci_bcm2708_resume,
1587 +#endif
1588 +
1589 +};
1590 +
1591 +/*****************************************************************************\
1592 + * *
1593 + * Driver init/exit *
1594 + * *
1595 +\*****************************************************************************/
1596 +
1597 +static int __init sdhci_drv_init(void)
1598 +{
1599 + return platform_driver_register(&sdhci_bcm2708_driver);
1600 +}
1601 +
1602 +static void __exit sdhci_drv_exit(void)
1603 +{
1604 + platform_driver_unregister(&sdhci_bcm2708_driver);
1605 +}
1606 +
1607 +module_init(sdhci_drv_init);
1608 +module_exit(sdhci_drv_exit);
1609 +
1610 +module_param(allow_highspeed, bool, 0444);
1611 +module_param(emmc_clock_freq, int, 0444);
1612 +module_param(sync_after_dma, bool, 0444);
1613 +module_param(missing_status, bool, 0444);
1614 +module_param(spurious_crc_acmd51, bool, 0444);
1615 +module_param(enable_llm, bool, 0444);
1616 +module_param(cycle_delay, int, 0444);
1617 +module_param(extra_messages, bool, 0444);
1618 +
1619 +MODULE_DESCRIPTION("Secure Digital Host Controller Interface platform driver");
1620 +MODULE_AUTHOR("Broadcom <info@broadcom.com>");
1621 +MODULE_LICENSE("GPL v2");
1622 +MODULE_ALIAS("platform:"DRIVER_NAME);
1623 +
1624 +MODULE_PARM_DESC(allow_highspeed, "Allow high speed transfers modes");
1625 +MODULE_PARM_DESC(emmc_clock_freq, "Specify the speed of emmc clock");
1626 +MODULE_PARM_DESC(sync_after_dma, "Block in driver until dma complete");
1627 +MODULE_PARM_DESC(missing_status, "Use the missing status quirk");
1628 +MODULE_PARM_DESC(spurious_crc_acmd51, "Use the spurious crc quirk for reading SCR (ACMD51)");
1629 +MODULE_PARM_DESC(enable_llm, "Enable low-latency mode");
1630 +MODULE_PARM_DESC(extra_messages, "Enable more sdcard warning messages");
1631 +
1632 +
1633 diff -urwN linux-3.10/drivers/mmc/host/sdhci.c linux-rpi-3.10.y/drivers/mmc/host/sdhci.c
1634 --- linux-3.10/drivers/mmc/host/sdhci.c 2013-06-30 23:13:29.000000000 +0100
1635 +++ linux-rpi-3.10.y/drivers/mmc/host/sdhci.c 2013-07-06 15:25:50.000000000 +0100
1636 @@ -28,6 +28,7 @@
1637 #include <linux/mmc/mmc.h>
1638 #include <linux/mmc/host.h>
1639 #include <linux/mmc/card.h>
1640 +#include <linux/mmc/sd.h>
1641 #include <linux/mmc/slot-gpio.h>
1642
1643 #include "sdhci.h"
1644 @@ -123,6 +124,91 @@
1645 * Low level functions *
1646 * *
1647 \*****************************************************************************/
1648 +extern bool enable_llm;
1649 +static int sdhci_locked=0;
1650 +void sdhci_spin_lock(struct sdhci_host *host)
1651 +{
1652 + spin_lock(&host->lock);
1653 +#ifdef CONFIG_PREEMPT
1654 + if(enable_llm)
1655 + {
1656 + disable_irq_nosync(host->irq);
1657 + if(host->second_irq)
1658 + disable_irq_nosync(host->second_irq);
1659 + local_irq_enable();
1660 + }
1661 +#endif
1662 +}
1663 +
1664 +void sdhci_spin_unlock(struct sdhci_host *host)
1665 +{
1666 +#ifdef CONFIG_PREEMPT
1667 + if(enable_llm)
1668 + {
1669 + local_irq_disable();
1670 + if(host->second_irq)
1671 + enable_irq(host->second_irq);
1672 + enable_irq(host->irq);
1673 + }
1674 +#endif
1675 + spin_unlock(&host->lock);
1676 +}
1677 +
1678 +void sdhci_spin_lock_irqsave(struct sdhci_host *host,unsigned long *flags)
1679 +{
1680 +#ifdef CONFIG_PREEMPT
1681 + if(enable_llm)
1682 + {
1683 + while(sdhci_locked)
1684 + {
1685 + preempt_schedule();
1686 + }
1687 + spin_lock_irqsave(&host->lock,*flags);
1688 + disable_irq(host->irq);
1689 + if(host->second_irq)
1690 + disable_irq(host->second_irq);
1691 + local_irq_enable();
1692 + }
1693 + else
1694 +#endif
1695 + spin_lock_irqsave(&host->lock,*flags);
1696 +}
1697 +
1698 +void sdhci_spin_unlock_irqrestore(struct sdhci_host *host,unsigned long flags)
1699 +{
1700 +#ifdef CONFIG_PREEMPT
1701 + if(enable_llm)
1702 + {
1703 + local_irq_disable();
1704 + if(host->second_irq)
1705 + enable_irq(host->second_irq);
1706 + enable_irq(host->irq);
1707 + }
1708 +#endif
1709 + spin_unlock_irqrestore(&host->lock,flags);
1710 +}
1711 +
1712 +static void sdhci_spin_enable_schedule(struct sdhci_host *host)
1713 +{
1714 +#ifdef CONFIG_PREEMPT
1715 + if(enable_llm)
1716 + {
1717 + sdhci_locked = 1;
1718 + preempt_enable();
1719 + }
1720 +#endif
1721 +}
1722 +
1723 +static void sdhci_spin_disable_schedule(struct sdhci_host *host)
1724 +{
1725 +#ifdef CONFIG_PREEMPT
1726 + if(enable_llm)
1727 + {
1728 + preempt_disable();
1729 + sdhci_locked = 0;
1730 + }
1731 +#endif
1732 +}
1733
1734 static void sdhci_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set)
1735 {
1736 @@ -288,7 +374,7 @@
1737 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
1738 unsigned long flags;
1739
1740 - spin_lock_irqsave(&host->lock, flags);
1741 + sdhci_spin_lock_irqsave(host, &flags);
1742
1743 if (host->runtime_suspended)
1744 goto out;
1745 @@ -298,7 +384,7 @@
1746 else
1747 sdhci_activate_led(host);
1748 out:
1749 - spin_unlock_irqrestore(&host->lock, flags);
1750 + sdhci_spin_unlock_irqrestore(host, flags);
1751 }
1752 #endif
1753
1754 @@ -315,7 +401,7 @@
1755 u32 uninitialized_var(scratch);
1756 u8 *buf;
1757
1758 - DBG("PIO reading\n");
1759 + DBG("PIO reading %db\n", host->data->blksz);
1760
1761 blksize = host->data->blksz;
1762 chunk = 0;
1763 @@ -360,7 +446,7 @@
1764 u32 scratch;
1765 u8 *buf;
1766
1767 - DBG("PIO writing\n");
1768 + DBG("PIO writing %db\n", host->data->blksz);
1769
1770 blksize = host->data->blksz;
1771 chunk = 0;
1772 @@ -399,19 +485,28 @@
1773 local_irq_restore(flags);
1774 }
1775
1776 -static void sdhci_transfer_pio(struct sdhci_host *host)
1777 +static void sdhci_transfer_pio(struct sdhci_host *host, u32 intstate)
1778 {
1779 u32 mask;
1780 + u32 state = 0;
1781 + u32 intmask;
1782 + int available;
1783
1784 BUG_ON(!host->data);
1785
1786 if (host->blocks == 0)
1787 return;
1788
1789 - if (host->data->flags & MMC_DATA_READ)
1790 + if (host->data->flags & MMC_DATA_READ) {
1791 mask = SDHCI_DATA_AVAILABLE;
1792 - else
1793 + intmask = SDHCI_INT_DATA_AVAIL;
1794 + } else {
1795 mask = SDHCI_SPACE_AVAILABLE;
1796 + intmask = SDHCI_INT_SPACE_AVAIL;
1797 + }
1798 +
1799 + /* initially we can see whether we can procede using intstate */
1800 + available = (intstate & intmask);
1801
1802 /*
1803 * Some controllers (JMicron JMB38x) mess up the buffer bits
1804 @@ -422,7 +517,7 @@
1805 (host->data->blocks == 1))
1806 mask = ~0;
1807
1808 - while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1809 + while (available) {
1810 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
1811 udelay(100);
1812
1813 @@ -434,9 +529,11 @@
1814 host->blocks--;
1815 if (host->blocks == 0)
1816 break;
1817 + state = sdhci_readl(host, SDHCI_PRESENT_STATE);
1818 + available = state & mask;
1819 }
1820
1821 - DBG("PIO transfer complete.\n");
1822 + DBG("PIO transfer complete - %d blocks left.\n", host->blocks);
1823 }
1824
1825 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
1826 @@ -709,7 +806,9 @@
1827 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
1828 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
1829
1830 - if (host->flags & SDHCI_REQ_USE_DMA)
1831 + /* platform DMA will begin on receipt of PIO irqs */
1832 + if ((host->flags & SDHCI_REQ_USE_DMA) &&
1833 + !(host->flags & SDHCI_USE_PLATDMA))
1834 sdhci_clear_set_irqs(host, pio_irqs, dma_irqs);
1835 else
1836 sdhci_clear_set_irqs(host, dma_irqs, pio_irqs);
1837 @@ -741,44 +840,25 @@
1838 host->data_early = 0;
1839 host->data->bytes_xfered = 0;
1840
1841 - if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))
1842 + if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA | SDHCI_USE_PLATDMA))
1843 host->flags |= SDHCI_REQ_USE_DMA;
1844
1845 /*
1846 * FIXME: This doesn't account for merging when mapping the
1847 * scatterlist.
1848 */
1849 - if (host->flags & SDHCI_REQ_USE_DMA) {
1850 - int broken, i;
1851 - struct scatterlist *sg;
1852 -
1853 - broken = 0;
1854 - if (host->flags & SDHCI_USE_ADMA) {
1855 - if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
1856 - broken = 1;
1857 - } else {
1858 - if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
1859 - broken = 1;
1860 - }
1861 -
1862 - if (unlikely(broken)) {
1863 - for_each_sg(data->sg, sg, data->sg_len, i) {
1864 - if (sg->length & 0x3) {
1865 - DBG("Reverting to PIO because of "
1866 - "transfer size (%d)\n",
1867 - sg->length);
1868 - host->flags &= ~SDHCI_REQ_USE_DMA;
1869 - break;
1870 - }
1871 - }
1872 - }
1873 - }
1874
1875 /*
1876 * The assumption here being that alignment is the same after
1877 * translation to device address space.
1878 */
1879 - if (host->flags & SDHCI_REQ_USE_DMA) {
1880 + if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_PLATDMA)) ==
1881 + (SDHCI_REQ_USE_DMA | SDHCI_USE_PLATDMA)) {
1882 +
1883 + if (! sdhci_platdma_dmaable(host, data))
1884 + host->flags &= ~SDHCI_REQ_USE_DMA;
1885 +
1886 + } else if (host->flags & SDHCI_REQ_USE_DMA) {
1887 int broken, i;
1888 struct scatterlist *sg;
1889
1890 @@ -837,7 +917,8 @@
1891 */
1892 WARN_ON(1);
1893 host->flags &= ~SDHCI_REQ_USE_DMA;
1894 - } else {
1895 + } else
1896 + if (!(host->flags & SDHCI_USE_PLATDMA)) {
1897 WARN_ON(sg_cnt != 1);
1898 sdhci_writel(host, sg_dma_address(data->sg),
1899 SDHCI_DMA_ADDRESS);
1900 @@ -853,11 +934,13 @@
1901 if (host->version >= SDHCI_SPEC_200) {
1902 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1903 ctrl &= ~SDHCI_CTRL_DMA_MASK;
1904 + if (! (host->flags & SDHCI_USE_PLATDMA)) {
1905 if ((host->flags & SDHCI_REQ_USE_DMA) &&
1906 (host->flags & SDHCI_USE_ADMA))
1907 ctrl |= SDHCI_CTRL_ADMA32;
1908 else
1909 ctrl |= SDHCI_CTRL_SDMA;
1910 + }
1911 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1912 }
1913
1914 @@ -909,7 +992,8 @@
1915
1916 if (data->flags & MMC_DATA_READ)
1917 mode |= SDHCI_TRNS_READ;
1918 - if (host->flags & SDHCI_REQ_USE_DMA)
1919 + if ((host->flags & SDHCI_REQ_USE_DMA) &&
1920 + !(host->flags & SDHCI_USE_PLATDMA))
1921 mode |= SDHCI_TRNS_DMA;
1922
1923 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
1924 @@ -925,13 +1009,16 @@
1925 host->data = NULL;
1926
1927 if (host->flags & SDHCI_REQ_USE_DMA) {
1928 - if (host->flags & SDHCI_USE_ADMA)
1929 - sdhci_adma_table_post(host, data);
1930 - else {
1931 + /* we may have to abandon an ongoing platform DMA */
1932 + if (host->flags & SDHCI_USE_PLATDMA)
1933 + sdhci_platdma_reset(host, data);
1934 +
1935 + if (host->flags & (SDHCI_USE_PLATDMA | SDHCI_USE_SDMA)) {
1936 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
1937 data->sg_len, (data->flags & MMC_DATA_READ) ?
1938 DMA_FROM_DEVICE : DMA_TO_DEVICE);
1939 - }
1940 + } else if (host->flags & SDHCI_USE_ADMA)
1941 + sdhci_adma_table_post(host, data);
1942 }
1943
1944 /*
1945 @@ -984,6 +1071,12 @@
1946 if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
1947 mask |= SDHCI_DATA_INHIBIT;
1948
1949 + if(host->ops->missing_status && (cmd->opcode == MMC_SEND_STATUS)) {
1950 + timeout = 5000; // Really obscenely large delay to send the status, due to bug in controller
1951 + // which might cause the STATUS command to get stuck when a data operation is in flow
1952 + mask |= SDHCI_DATA_INHIBIT;
1953 + }
1954 +
1955 /* We shouldn't wait for data inihibit for stop commands, even
1956 though they might use busy signaling */
1957 if (host->mrq->data && (cmd == host->mrq->data->stop))
1958 @@ -999,12 +1092,20 @@
1959 return;
1960 }
1961 timeout--;
1962 + sdhci_spin_enable_schedule(host);
1963 mdelay(1);
1964 + sdhci_spin_disable_schedule(host);
1965 }
1966 + DBG("send cmd %d - wait 0x%X irq 0x%x\n", cmd->opcode, mask,
1967 + sdhci_readl(host, SDHCI_INT_STATUS));
1968
1969 mod_timer(&host->timer, jiffies + 10 * HZ);
1970
1971 host->cmd = cmd;
1972 + if (host->last_cmdop == MMC_APP_CMD)
1973 + host->last_cmdop = -cmd->opcode;
1974 + else
1975 + host->last_cmdop = cmd->opcode;
1976
1977 sdhci_prepare_data(host, cmd);
1978
1979 @@ -1220,7 +1321,9 @@
1980 return;
1981 }
1982 timeout--;
1983 + sdhci_spin_enable_schedule(host);
1984 mdelay(1);
1985 + sdhci_spin_disable_schedule(host);
1986 }
1987
1988 clk |= SDHCI_CLOCK_CARD_EN;
1989 @@ -1316,7 +1419,7 @@
1990
1991 sdhci_runtime_pm_get(host);
1992
1993 - spin_lock_irqsave(&host->lock, flags);
1994 + sdhci_spin_lock_irqsave(host, &flags);
1995
1996 WARN_ON(host->mrq != NULL);
1997
1998 @@ -1374,9 +1477,9 @@
1999 mmc->card->type == MMC_TYPE_MMC ?
2000 MMC_SEND_TUNING_BLOCK_HS200 :
2001 MMC_SEND_TUNING_BLOCK;
2002 - spin_unlock_irqrestore(&host->lock, flags);
2003 + sdhci_spin_unlock_irqrestore(host, flags);
2004 sdhci_execute_tuning(mmc, tuning_opcode);
2005 - spin_lock_irqsave(&host->lock, flags);
2006 + sdhci_spin_lock_irqsave(host, &flags);
2007
2008 /* Restore original mmc_request structure */
2009 host->mrq = mrq;
2010 @@ -1390,7 +1493,7 @@
2011 }
2012
2013 mmiowb();
2014 - spin_unlock_irqrestore(&host->lock, flags);
2015 + sdhci_spin_unlock_irqrestore(host, flags);
2016 }
2017
2018 static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
2019 @@ -1399,10 +1502,10 @@
2020 int vdd_bit = -1;
2021 u8 ctrl;
2022
2023 - spin_lock_irqsave(&host->lock, flags);
2024 + sdhci_spin_lock_irqsave(host, &flags);
2025
2026 if (host->flags & SDHCI_DEVICE_DEAD) {
2027 - spin_unlock_irqrestore(&host->lock, flags);
2028 + sdhci_spin_unlock_irqrestore(host, flags);
2029 if (host->vmmc && ios->power_mode == MMC_POWER_OFF)
2030 mmc_regulator_set_ocr(host->mmc, host->vmmc, 0);
2031 return;
2032 @@ -1429,9 +1532,9 @@
2033 vdd_bit = sdhci_set_power(host, ios->vdd);
2034
2035 if (host->vmmc && vdd_bit != -1) {
2036 - spin_unlock_irqrestore(&host->lock, flags);
2037 + sdhci_spin_unlock_irqrestore(host, flags);
2038 mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd_bit);
2039 - spin_lock_irqsave(&host->lock, flags);
2040 + sdhci_spin_lock_irqsave(host, &flags);
2041 }
2042
2043 if (host->ops->platform_send_init_74_clocks)
2044 @@ -1470,7 +1573,7 @@
2045 else
2046 ctrl &= ~SDHCI_CTRL_HISPD;
2047
2048 - if (host->version >= SDHCI_SPEC_300) {
2049 + if (host->version >= SDHCI_SPEC_300 && !(host->ops->uhs_broken)) {
2050 u16 clk, ctrl_2;
2051
2052 /* In case of UHS-I modes, set High Speed Enable */
2053 @@ -1569,7 +1672,7 @@
2054 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
2055
2056 mmiowb();
2057 - spin_unlock_irqrestore(&host->lock, flags);
2058 + sdhci_spin_unlock_irqrestore(host, flags);
2059 }
2060
2061 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
2062 @@ -1617,7 +1720,7 @@
2063 unsigned long flags;
2064 int is_readonly;
2065
2066 - spin_lock_irqsave(&host->lock, flags);
2067 + sdhci_spin_lock_irqsave(host, &flags);
2068
2069 if (host->flags & SDHCI_DEVICE_DEAD)
2070 is_readonly = 0;
2071 @@ -1627,7 +1730,7 @@
2072 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
2073 & SDHCI_WRITE_PROTECT);
2074
2075 - spin_unlock_irqrestore(&host->lock, flags);
2076 + sdhci_spin_unlock_irqrestore(host, flags);
2077
2078 /* This quirk needs to be replaced by a callback-function later */
2079 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
2080 @@ -1700,9 +1803,9 @@
2081 struct sdhci_host *host = mmc_priv(mmc);
2082 unsigned long flags;
2083
2084 - spin_lock_irqsave(&host->lock, flags);
2085 + sdhci_spin_lock_irqsave(host, &flags);
2086 sdhci_enable_sdio_irq_nolock(host, enable);
2087 - spin_unlock_irqrestore(&host->lock, flags);
2088 + sdhci_spin_unlock_irqrestore(host, flags);
2089 }
2090
2091 static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
2092 @@ -2046,7 +2149,7 @@
2093 struct sdhci_host *host = mmc_priv(mmc);
2094 unsigned long flags;
2095
2096 - spin_lock_irqsave(&host->lock, flags);
2097 + sdhci_spin_lock_irqsave(host, &flags);
2098
2099 /* Check host->mrq first in case we are runtime suspended */
2100 if (host->mrq &&
2101 @@ -2063,7 +2166,7 @@
2102 tasklet_schedule(&host->finish_tasklet);
2103 }
2104
2105 - spin_unlock_irqrestore(&host->lock, flags);
2106 + sdhci_spin_unlock_irqrestore(host, flags);
2107 }
2108
2109 static const struct mmc_host_ops sdhci_ops = {
2110 @@ -2102,14 +2205,14 @@
2111
2112 host = (struct sdhci_host*)param;
2113
2114 - spin_lock_irqsave(&host->lock, flags);
2115 + sdhci_spin_lock_irqsave(host, &flags);
2116
2117 /*
2118 * If this tasklet gets rescheduled while running, it will
2119 * be run again afterwards but without any active request.
2120 */
2121 if (!host->mrq) {
2122 - spin_unlock_irqrestore(&host->lock, flags);
2123 + sdhci_spin_unlock_irqrestore(host, flags);
2124 return;
2125 }
2126
2127 @@ -2147,7 +2250,7 @@
2128 #endif
2129
2130 mmiowb();
2131 - spin_unlock_irqrestore(&host->lock, flags);
2132 + sdhci_spin_unlock_irqrestore(host, flags);
2133
2134 mmc_request_done(host->mmc, mrq);
2135 sdhci_runtime_pm_put(host);
2136 @@ -2160,11 +2263,11 @@
2137
2138 host = (struct sdhci_host*)data;
2139
2140 - spin_lock_irqsave(&host->lock, flags);
2141 + sdhci_spin_lock_irqsave(host, &flags);
2142
2143 if (host->mrq) {
2144 pr_err("%s: Timeout waiting for hardware "
2145 - "interrupt.\n", mmc_hostname(host->mmc));
2146 + "interrupt - cmd%d.\n", mmc_hostname(host->mmc), host->last_cmdop);
2147 sdhci_dumpregs(host);
2148
2149 if (host->data) {
2150 @@ -2181,7 +2284,7 @@
2151 }
2152
2153 mmiowb();
2154 - spin_unlock_irqrestore(&host->lock, flags);
2155 + sdhci_spin_unlock_irqrestore(host, flags);
2156 }
2157
2158 static void sdhci_tuning_timer(unsigned long data)
2159 @@ -2191,11 +2294,11 @@
2160
2161 host = (struct sdhci_host *)data;
2162
2163 - spin_lock_irqsave(&host->lock, flags);
2164 + sdhci_spin_lock_irqsave(host, &flags);
2165
2166 host->flags |= SDHCI_NEEDS_RETUNING;
2167
2168 - spin_unlock_irqrestore(&host->lock, flags);
2169 + sdhci_spin_unlock_irqrestore(host, flags);
2170 }
2171
2172 /*****************************************************************************\
2173 @@ -2209,10 +2312,13 @@
2174 BUG_ON(intmask == 0);
2175
2176 if (!host->cmd) {
2177 + if (!(host->ops->extra_ints)) {
2178 pr_err("%s: Got command interrupt 0x%08x even "
2179 "though no command operation was in progress.\n",
2180 mmc_hostname(host->mmc), (unsigned)intmask);
2181 sdhci_dumpregs(host);
2182 + } else
2183 + DBG("cmd irq 0x%08x cmd complete\n", (unsigned)intmask);
2184 return;
2185 }
2186
2187 @@ -2282,6 +2388,19 @@
2188 static void sdhci_show_adma_error(struct sdhci_host *host) { }
2189 #endif
2190
2191 +static void sdhci_data_end(struct sdhci_host *host)
2192 +{
2193 + if (host->cmd) {
2194 + /*
2195 + * Data managed to finish before the
2196 + * command completed. Make sure we do
2197 + * things in the proper order.
2198 + */
2199 + host->data_early = 1;
2200 + } else
2201 + sdhci_finish_data(host);
2202 +}
2203 +
2204 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2205 {
2206 u32 command;
2207 @@ -2311,23 +2430,39 @@
2208 }
2209 }
2210
2211 + if (!(host->ops->extra_ints)) {
2212 pr_err("%s: Got data interrupt 0x%08x even "
2213 "though no data operation was in progress.\n",
2214 mmc_hostname(host->mmc), (unsigned)intmask);
2215 sdhci_dumpregs(host);
2216 + } else
2217 + DBG("data irq 0x%08x but no data\n", (unsigned)intmask);
2218
2219 return;
2220 }
2221
2222 if (intmask & SDHCI_INT_DATA_TIMEOUT)
2223 host->data->error = -ETIMEDOUT;
2224 - else if (intmask & SDHCI_INT_DATA_END_BIT)
2225 + else if (intmask & SDHCI_INT_DATA_END_BIT) {
2226 + DBG("end error in cmd %d\n", host->last_cmdop);
2227 + if (host->ops->spurious_crc_acmd51 &&
2228 + host->last_cmdop == -SD_APP_SEND_SCR) {
2229 + DBG("ignoring spurious data_end_bit error\n");
2230 + intmask = SDHCI_INT_DATA_AVAIL|SDHCI_INT_DATA_END;
2231 + } else
2232 host->data->error = -EILSEQ;
2233 - else if ((intmask & SDHCI_INT_DATA_CRC) &&
2234 + } else if ((intmask & SDHCI_INT_DATA_CRC) &&
2235 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
2236 - != MMC_BUS_TEST_R)
2237 + != MMC_BUS_TEST_R) {
2238 + DBG("crc error in cmd %d\n", host->last_cmdop);
2239 + if (host->ops->spurious_crc_acmd51 &&
2240 + host->last_cmdop == -SD_APP_SEND_SCR) {
2241 + DBG("ignoring spurious data_crc_bit error\n");
2242 + intmask = SDHCI_INT_DATA_AVAIL|SDHCI_INT_DATA_END;
2243 + } else {
2244 host->data->error = -EILSEQ;
2245 - else if (intmask & SDHCI_INT_ADMA_ERROR) {
2246 + }
2247 + } else if (intmask & SDHCI_INT_ADMA_ERROR) {
2248 pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
2249 sdhci_show_adma_error(host);
2250 host->data->error = -EIO;
2251 @@ -2335,11 +2470,18 @@
2252 host->ops->adma_workaround(host, intmask);
2253 }
2254
2255 - if (host->data->error)
2256 + if (host->data->error) {
2257 + DBG("finish request early on error %d\n", host->data->error);
2258 sdhci_finish_data(host);
2259 - else {
2260 - if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2261 - sdhci_transfer_pio(host);
2262 + } else {
2263 + if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)) {
2264 + if (host->flags & SDHCI_REQ_USE_DMA) {
2265 + /* possible only in PLATDMA mode */
2266 + sdhci_platdma_avail(host, &intmask,
2267 + &sdhci_data_end);
2268 + } else
2269 + sdhci_transfer_pio(host, intmask);
2270 + }
2271
2272 /*
2273 * We currently don't do anything fancy with DMA
2274 @@ -2368,18 +2510,8 @@
2275 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
2276 }
2277
2278 - if (intmask & SDHCI_INT_DATA_END) {
2279 - if (host->cmd) {
2280 - /*
2281 - * Data managed to finish before the
2282 - * command completed. Make sure we do
2283 - * things in the proper order.
2284 - */
2285 - host->data_early = 1;
2286 - } else {
2287 - sdhci_finish_data(host);
2288 - }
2289 - }
2290 + if (intmask & SDHCI_INT_DATA_END)
2291 + sdhci_data_end(host);
2292 }
2293 }
2294
2295 @@ -2390,10 +2522,10 @@
2296 u32 intmask, unexpected = 0;
2297 int cardint = 0, max_loops = 16;
2298
2299 - spin_lock(&host->lock);
2300 + sdhci_spin_lock(host);
2301
2302 if (host->runtime_suspended) {
2303 - spin_unlock(&host->lock);
2304 + sdhci_spin_unlock(host);
2305 pr_warning("%s: got irq while runtime suspended\n",
2306 mmc_hostname(host->mmc));
2307 return IRQ_HANDLED;
2308 @@ -2435,6 +2567,22 @@
2309 tasklet_schedule(&host->card_tasklet);
2310 }
2311
2312 + if (intmask & SDHCI_INT_ERROR_MASK & ~SDHCI_INT_ERROR)
2313 + DBG("controller reports error 0x%x -"
2314 + "%s%s%s%s%s%s%s%s%s%s",
2315 + intmask,
2316 + intmask & SDHCI_INT_TIMEOUT? " timeout": "",
2317 + intmask & SDHCI_INT_CRC ? " crc": "",
2318 + intmask & SDHCI_INT_END_BIT? " endbit": "",
2319 + intmask & SDHCI_INT_INDEX? " index": "",
2320 + intmask & SDHCI_INT_DATA_TIMEOUT? " data_timeout": "",
2321 + intmask & SDHCI_INT_DATA_CRC? " data_crc": "",
2322 + intmask & SDHCI_INT_DATA_END_BIT? " data_endbit": "",
2323 + intmask & SDHCI_INT_BUS_POWER? " buspower": "",
2324 + intmask & SDHCI_INT_ACMD12ERR? " acmd12": "",
2325 + intmask & SDHCI_INT_ADMA_ERROR? " adma": ""
2326 + );
2327 +
2328 if (intmask & SDHCI_INT_CMD_MASK) {
2329 sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK,
2330 SDHCI_INT_STATUS);
2331 @@ -2449,7 +2597,13 @@
2332
2333 intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK);
2334
2335 - intmask &= ~SDHCI_INT_ERROR;
2336 + if (intmask & SDHCI_INT_ERROR_MASK) {
2337 + /* collect any uncovered errors */
2338 + sdhci_writel(host, intmask & SDHCI_INT_ERROR_MASK,
2339 + SDHCI_INT_STATUS);
2340 + }
2341 +
2342 + intmask &= ~SDHCI_INT_ERROR_MASK;
2343
2344 if (intmask & SDHCI_INT_BUS_POWER) {
2345 pr_err("%s: Card is consuming too much power!\n",
2346 @@ -2475,7 +2629,7 @@
2347 if (intmask && --max_loops)
2348 goto again;
2349 out:
2350 - spin_unlock(&host->lock);
2351 + sdhci_spin_unlock(host);
2352
2353 if (unexpected) {
2354 pr_err("%s: Unexpected interrupt 0x%08x.\n",
2355 @@ -2569,7 +2723,8 @@
2356 {
2357 int ret;
2358
2359 - if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2360 + if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA |
2361 + SDHCI_USE_PLATDMA)) {
2362 if (host->ops->enable_dma)
2363 host->ops->enable_dma(host);
2364 }
2365 @@ -2636,15 +2791,15 @@
2366 host->flags &= ~SDHCI_NEEDS_RETUNING;
2367 }
2368
2369 - spin_lock_irqsave(&host->lock, flags);
2370 + sdhci_spin_lock_irqsave(host, &flags);
2371 sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
2372 - spin_unlock_irqrestore(&host->lock, flags);
2373 + sdhci_spin_unlock_irqrestore(host, flags);
2374
2375 synchronize_irq(host->irq);
2376
2377 - spin_lock_irqsave(&host->lock, flags);
2378 + sdhci_spin_lock_irqsave(host, &flags);
2379 host->runtime_suspended = true;
2380 - spin_unlock_irqrestore(&host->lock, flags);
2381 + sdhci_spin_unlock_irqrestore(host, flags);
2382
2383 return ret;
2384 }
2385 @@ -2670,16 +2825,16 @@
2386 sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios);
2387 if ((host_flags & SDHCI_PV_ENABLED) &&
2388 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
2389 - spin_lock_irqsave(&host->lock, flags);
2390 + sdhci_spin_lock_irqsave(host, &flags);
2391 sdhci_enable_preset_value(host, true);
2392 - spin_unlock_irqrestore(&host->lock, flags);
2393 + sdhci_spin_unlock_irqrestore(host, flags);
2394 }
2395
2396 /* Set the re-tuning expiration flag */
2397 if (host->flags & SDHCI_USING_RETUNING_TIMER)
2398 host->flags |= SDHCI_NEEDS_RETUNING;
2399
2400 - spin_lock_irqsave(&host->lock, flags);
2401 + sdhci_spin_lock_irqsave(host, &flags);
2402
2403 host->runtime_suspended = false;
2404
2405 @@ -2690,7 +2845,7 @@
2406 /* Enable Card Detection */
2407 sdhci_enable_card_detection(host);
2408
2409 - spin_unlock_irqrestore(&host->lock, flags);
2410 + sdhci_spin_unlock_irqrestore(host, flags);
2411
2412 return ret;
2413 }
2414 @@ -2785,14 +2940,16 @@
2415 host->flags &= ~SDHCI_USE_ADMA;
2416 }
2417
2418 - if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2419 + if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA |
2420 + SDHCI_USE_PLATDMA)) {
2421 if (host->ops->enable_dma) {
2422 if (host->ops->enable_dma(host)) {
2423 pr_warning("%s: No suitable DMA "
2424 "available. Falling back to PIO.\n",
2425 mmc_hostname(mmc));
2426 host->flags &=
2427 - ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
2428 + ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA |
2429 + SDHCI_USE_PLATDMA);
2430 }
2431 }
2432 }
2433 @@ -3080,6 +3237,12 @@
2434 SDHCI_MAX_CURRENT_MULTIPLIER;
2435 }
2436
2437 + if(host->ops->voltage_broken) {
2438 + ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
2439 + // Cannot support UHS modes if we are stuck at 3.3V;
2440 + mmc->caps &= ~(MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_DDR50);
2441 + }
2442 +
2443 mmc->ocr_avail = ocr_avail;
2444 mmc->ocr_avail_sdio = ocr_avail;
2445 if (host->ocr_avail_sdio)
2446 @@ -3174,7 +3337,7 @@
2447 host->tuning_timer.function = sdhci_tuning_timer;
2448 }
2449
2450 - ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
2451 + ret = request_irq(host->irq, sdhci_irq, 0,//IRQF_SHARED,
2452 mmc_hostname(mmc), host);
2453 if (ret) {
2454 pr_err("%s: Failed to request IRQ %d: %d\n",
2455 @@ -3210,6 +3373,7 @@
2456
2457 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
2458 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
2459 + (host->flags & SDHCI_USE_PLATDMA) ? "platform's DMA" :
2460 (host->flags & SDHCI_USE_ADMA) ? "ADMA" :
2461 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
2462
2463 @@ -3237,7 +3401,7 @@
2464 unsigned long flags;
2465
2466 if (dead) {
2467 - spin_lock_irqsave(&host->lock, flags);
2468 + sdhci_spin_lock_irqsave(host, &flags);
2469
2470 host->flags |= SDHCI_DEVICE_DEAD;
2471
2472 @@ -3249,7 +3413,7 @@
2473 tasklet_schedule(&host->finish_tasklet);
2474 }
2475
2476 - spin_unlock_irqrestore(&host->lock, flags);
2477 + sdhci_spin_unlock_irqrestore(host, flags);
2478 }
2479
2480 sdhci_disable_card_detection(host);
2481 diff -urwN linux-3.10/drivers/mmc/host/sdhci.h linux-rpi-3.10.y/drivers/mmc/host/sdhci.h
2482 --- linux-3.10/drivers/mmc/host/sdhci.h 2013-06-30 23:13:29.000000000 +0100
2483 +++ linux-rpi-3.10.y/drivers/mmc/host/sdhci.h 2013-07-06 15:25:50.000000000 +0100
2484 @@ -289,6 +289,20 @@
2485 void (*platform_reset_enter)(struct sdhci_host *host, u8 mask);
2486 void (*platform_reset_exit)(struct sdhci_host *host, u8 mask);
2487 int (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
2488 +
2489 + int (*pdma_able)(struct sdhci_host *host,
2490 + struct mmc_data *data);
2491 + void (*pdma_avail)(struct sdhci_host *host,
2492 + unsigned int *ref_intmask,
2493 + void(*complete)(struct sdhci_host *));
2494 + void (*pdma_reset)(struct sdhci_host *host,
2495 + struct mmc_data *data);
2496 + unsigned int (*extra_ints)(struct sdhci_host *host);
2497 + unsigned int (*spurious_crc_acmd51)(struct sdhci_host *host);
2498 + unsigned int (*voltage_broken)(struct sdhci_host *host);
2499 + unsigned int (*uhs_broken)(struct sdhci_host *host);
2500 + unsigned int (*missing_status)(struct sdhci_host *host);
2501 +
2502 void (*hw_reset)(struct sdhci_host *host);
2503 void (*platform_suspend)(struct sdhci_host *host);
2504 void (*platform_resume)(struct sdhci_host *host);
2505 @@ -399,9 +413,38 @@
2506 extern void sdhci_enable_irq_wakeups(struct sdhci_host *host);
2507 #endif
2508
2509 +static inline int /*bool*/
2510 +sdhci_platdma_dmaable(struct sdhci_host *host, struct mmc_data *data)
2511 +{
2512 + if (host->ops->pdma_able)
2513 + return host->ops->pdma_able(host, data);
2514 + else
2515 + return 1;
2516 +}
2517 +static inline void
2518 +sdhci_platdma_avail(struct sdhci_host *host, unsigned int *ref_intmask,
2519 + void(*completion_callback)(struct sdhci_host *))
2520 +{
2521 + if (host->ops->pdma_avail)
2522 + host->ops->pdma_avail(host, ref_intmask, completion_callback);
2523 +}
2524 +
2525 +static inline void
2526 +sdhci_platdma_reset(struct sdhci_host *host, struct mmc_data *data)
2527 +{
2528 + if (host->ops->pdma_reset)
2529 + host->ops->pdma_reset(host, data);
2530 +}
2531 +
2532 #ifdef CONFIG_PM_RUNTIME
2533 extern int sdhci_runtime_suspend_host(struct sdhci_host *host);
2534 extern int sdhci_runtime_resume_host(struct sdhci_host *host);
2535 #endif
2536
2537 +extern void sdhci_spin_lock_irqsave(struct sdhci_host *host,unsigned long *flags);
2538 +extern void sdhci_spin_unlock_irqrestore(struct sdhci_host *host,unsigned long flags);
2539 +extern void sdhci_spin_lock(struct sdhci_host *host);
2540 +extern void sdhci_spin_unlock(struct sdhci_host *host);
2541 +
2542 +
2543 #endif /* __SDHCI_HW_H */
2544 diff -urN linux-3.10/include/linux/mmc/host.h linux-rpi-3.10.y/include/linux/mmc/host.h
2545 --- linux-3.10/include/linux/mmc/host.h 2013-06-30 23:13:29.000000000 +0100
2546 +++ linux-rpi-3.10.y/include/linux/mmc/host.h 2013-07-06 15:25:50.000000000 +0100
2547 @@ -281,6 +281,7 @@
2548 #define MMC_CAP2_PACKED_CMD (MMC_CAP2_PACKED_RD | \
2549 MMC_CAP2_PACKED_WR)
2550 #define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14) /* Don't power up before scan */
2551 +#define MMC_CAP2_FORCE_MULTIBLOCK (1 << 31) /* Always use multiblock transfers */
2552
2553 mmc_pm_flag_t pm_caps; /* supported pm features */
2554
2555 diff -urN linux-3.10/include/linux/mmc/sdhci.h linux-rpi-3.10.y/include/linux/mmc/sdhci.h
2556 --- linux-3.10/include/linux/mmc/sdhci.h 2013-06-30 23:13:29.000000000 +0100
2557 +++ linux-rpi-3.10.y/include/linux/mmc/sdhci.h 2013-07-06 15:25:50.000000000 +0100
2558 @@ -97,6 +97,7 @@
2559 #define SDHCI_QUIRK2_PRESET_VALUE_BROKEN (1<<3)
2560
2561 int irq; /* Device IRQ */
2562 + int second_irq; /* Additional IRQ to disable/enable in low-latency mode */
2563 void __iomem *ioaddr; /* Mapped address */
2564
2565 const struct sdhci_ops *ops; /* Low level hw interface */
2566 @@ -128,6 +129,7 @@
2567 #define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */
2568 #define SDHCI_HS200_NEEDS_TUNING (1<<10) /* HS200 needs tuning */
2569 #define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */
2570 +#define SDHCI_USE_PLATDMA (1<<12) /* Host uses 3rd party DMA */
2571
2572 unsigned int version; /* SDHCI spec. version */
2573
2574 @@ -142,6 +144,7 @@
2575
2576 struct mmc_request *mrq; /* Current request */
2577 struct mmc_command *cmd; /* Current command */
2578 + int last_cmdop; /* Opcode of last cmd sent */
2579 struct mmc_data *data; /* Current data request */
2580 unsigned int data_early:1; /* Data finished before cmd */
2581