d59df5b28b7e364be4e0f4851c43f8a4817bf630
[openwrt/openwrt.git] / target / linux / brcm2708 / patches-3.14 / 0013-Fixes-for-sdhci-bcm2708.patch
1 From 58a6eba44c8b8a13ff99a7696519437c0abeb3aa Mon Sep 17 00:00:00 2001
2 From: popcornmix <popcornmix@gmail.com>
3 Date: Tue, 8 May 2012 23:12:13 +0100
4 Subject: [PATCH 13/54] Fixes for sdhci-bcm2708
5
6 possible fix for sdcard missing status. Thank naren
7
8 sdcard patch improvements from naren
9
10 sdhci-bcm2708: speed up DMA sync
11
12 Experiments show that it doesn't really take that long to sync, so we
13 can reduce the poll interval slightly. Might improve performance a bit.
14
15 sdhci-bcm2708: remove custom clock handling
16
17 The custom clock handling code is redundant and buggy. The MMC/SDHCI
18 subsystem does a better job than it, so remove it for good.
19
20 sdhci-bcm2708: add additional quirks
21
22 Some additional quirks are needed for correct operation.
23 There's no SDHCI capabilities register documented, and it always reads
24 zero, so add SDHCI_QUIRK_MISSING_CAPS. Apparently
25 SDHCI_QUIRK_NO_HISPD_BIT is needed for many cards to work correctly in
26 high-speed mode, so add it as well.
27
28 sdhci-bcm2708: add allow_highspeed parameter
29
30 Add a parameter to disable high-speed mode for the few cards that
31 still might have problems. High-speed mode is enabled by default.
32
33 sdhci-bcm2708: assume 50 MHz eMMC clock
34
35 80 MHz clock isnt't suited well to be dividable to get SD clocks of 25
36 MHz (default mode) or 50 MHz (high speed mode). 50 MHz are perfect to
37 drive the SD interface at ideal frequencies.
38
39 Allow emmc clock to be specified as command line parameter
40
41 sdhci-bcm2708: raise DMA sync timeout
42
43 Commit d64b84c by accident reduced the maximum overall DMA sync
44 timeout. The maximum overall timeout was reduced from 100ms to 30ms,
45 which isn't enough for many cards. Increase it to 150ms, just to be
46 extra safe. According to commit 872a8ff in the MMC subsystem, some
47 cards require crazy long timeouts (3s), but as we're busy-waiting,
48 and shouldn't delay for such a long time, let's hope 150ms will be
49 enough for most cards.
50
51 Use ndelay rather than udelay. Thanks lb
52
53 Add sync_after_dma module parameter
54
55 sdhci-bcm2708: use extension FIFO to buffer DMA transfers
56
57 The additional FIFO might speed up transfers in some cases.
58
59 sdhci-bcm2708: use multiblock-type transfers for single blocks
60
61 There are issues with both single block reads (missed completion)
62 and writes (data loss in some cases!). Just don't do single block
63 transfers anymore, and treat them like multiblock transfers. This
64 adds a quirk for this and uses it.
65
66 Add module parameter for missing_status quirk. sdhci-bcm2708.missing_status=0 may improve interrupt latency
67
68 Fix spinlock recursion in sdhci-bcm2708.c
69
70 mmc: Report 3.3V support in caps
71
72 sdhci: Use macros for out spin lock/unlock functions to reduce diffs with upstream code
73
74 sdhci: sdhci_bcm2708_quirk_voltage_broken appears to be a no-op
75
76 sdhci: sdhci_bcm2708_uhs_broken should be handled through caps reported
77
78 Add low-latency mode to sdcard driver. Disable with sdhci-bcm2708.enable_llm=0. Thanks ddv2005.
79
80 Allow the number of cycles delay between sdcard peripheral writes to be specified on command line with sdhci-bcm2708.cycle_delay
81
82 Lazy CRC quirk: Implemented retrying mechanisms for SD SSR and SCR, disabled missing_status and spurious CRC ACMD51 quirks by default (should be fixed by the retrying-mechanishm)
83
84 mmc: suppress sdcard warnings we are happy about by default
85
86 sdhci-bcm2807: Increase sync_after_dma timeout
87
88 The current timeout is being hit with some cards that complete successfully with a longer timeout.
89 The timeout is not handled well, and is believed to be a code path that causes corruption.
90 872a8ff suggests that crappy cards can take up to 3 seconds to respond
91
92 remove suspend/resume
93
94 fix sign in sdhci_bcm2708_raw_writel wait calculation
95
96 The ns_wait variable is intended to hold a lower bound on the number of nanoseconds that have elapsed since the last sdhci register write. However, the actual calculation of it was incorrect, as the subtraction was inverted. This commit fixes the calculation.
97
98 Note that this correction has no bearing when running with the default cycle_delay of 2 and the default clock rate of 50 MHz, under which conditions ns_2clk is 40 nanoseconds and ns_wait, regardless of whether the subtraction is done correctly or incorrectly, cannot possibly be less than 40 except for during the one-microsecond period just before the tick counter wraps around to meet last_write_hpt (i.e., approximately 4295 seconds after the preceding sdhci register write). The correction in this commit only comes into play if ns_2clk > 1000, which requires a cycle_delay of 51 or greater when using the default clock rate. Under those conditions, sdhci_bcm2708_raw_writel will not wait for the full cycle_delay count if at least 1000 nanoseconds have elapsed since the last register write.
99
100 sdhci: Only do one iteration of PIO reading loop
101
102 Changed wording on logging. Previously, we received errors like this:
103 mmc0: could read SD Status register (SSR) at the 3th attempt
104 A more sensible response is now returned.
105 A typo also fixed in comments.
106 ---
107 drivers/mmc/card/block.c | 2 +-
108 drivers/mmc/core/sd.c | 110 +++++++++++++++--
109 drivers/mmc/host/sdhci-bcm2708.c | 254 ++++++++++++++++++---------------------
110 drivers/mmc/host/sdhci.c | 180 ++++++++++++++++++++-------
111 drivers/mmc/host/sdhci.h | 8 +-
112 include/linux/mmc/host.h | 1 +
113 include/linux/mmc/sdhci.h | 1 +
114 7 files changed, 365 insertions(+), 191 deletions(-)
115
116 diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
117 index 7b5424f..687cccb 100644
118 --- a/drivers/mmc/card/block.c
119 +++ b/drivers/mmc/card/block.c
120 @@ -1361,7 +1361,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
121 brq->data.blocks = 1;
122 }
123
124 - if (brq->data.blocks > 1 || do_rel_wr) {
125 + if (brq->data.blocks > 1 || do_rel_wr || card->host->caps2 & MMC_CAP2_FORCE_MULTIBLOCK) {
126 /* SPI multiblock writes terminate using a special
127 * token, not a STOP_TRANSMISSION request.
128 */
129 diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
130 index 692fdb1..ea11f9c 100644
131 --- a/drivers/mmc/core/sd.c
132 +++ b/drivers/mmc/core/sd.c
133 @@ -15,6 +15,8 @@
134 #include <linux/slab.h>
135 #include <linux/stat.h>
136 #include <linux/pm_runtime.h>
137 +#include <linux/jiffies.h>
138 +#include <linux/nmi.h>
139
140 #include <linux/mmc/host.h>
141 #include <linux/mmc/card.h>
142 @@ -67,6 +69,15 @@ static const unsigned int sd_au_size[] = {
143 __res & __mask; \
144 })
145
146 +// timeout for tries
147 +static const unsigned long retry_timeout_ms= 10*1000;
148 +
149 +// try at least 10 times, even if timeout is reached
150 +static const int retry_min_tries= 10;
151 +
152 +// delay between tries
153 +static const unsigned long retry_delay_ms= 10;
154 +
155 /*
156 * Given the decoded CSD structure, decode the raw CID to our CID structure.
157 */
158 @@ -219,12 +230,63 @@ static int mmc_decode_scr(struct mmc_card *card)
159 }
160
161 /*
162 - * Fetch and process SD Status register.
163 + * Fetch and process SD Configuration Register.
164 + */
165 +static int mmc_read_scr(struct mmc_card *card)
166 +{
167 + unsigned long timeout_at;
168 + int err, tries;
169 +
170 + timeout_at= jiffies + msecs_to_jiffies( retry_timeout_ms );
171 + tries= 0;
172 +
173 + while( tries < retry_min_tries || time_before( jiffies, timeout_at ) )
174 + {
175 + unsigned long delay_at;
176 + tries++;
177 +
178 + err = mmc_app_send_scr(card, card->raw_scr);
179 + if( !err )
180 + break; // success!!!
181 +
182 + touch_nmi_watchdog(); // we are still alive!
183 +
184 + // delay
185 + delay_at= jiffies + msecs_to_jiffies( retry_delay_ms );
186 + while( time_before( jiffies, delay_at ) )
187 + {
188 + mdelay( 1 );
189 + touch_nmi_watchdog(); // we are still alive!
190 + }
191 + }
192 +
193 + if( err)
194 + {
195 + pr_err("%s: failed to read SD Configuration register (SCR) after %d tries during %lu ms, error %d\n", mmc_hostname(card->host), tries, retry_timeout_ms, err );
196 + return err;
197 + }
198 +
199 + if( tries > 1 )
200 + {
201 + pr_info("%s: could read SD Configuration register (SCR) at the %dth attempt\n", mmc_hostname(card->host), tries );
202 + }
203 +
204 + err = mmc_decode_scr(card);
205 + if (err)
206 + return err;
207 +
208 + return err;
209 +}
210 +
211 +/*
212 + * Fetch and process SD Status Register.
213 */
214 static int mmc_read_ssr(struct mmc_card *card)
215 {
216 + unsigned long timeout_at;
217 unsigned int au, es, et, eo;
218 int err, i;
219 + int tries;
220 u32 *ssr;
221
222 if (!(card->csd.cmdclass & CCC_APP_SPEC)) {
223 @@ -237,14 +299,40 @@ static int mmc_read_ssr(struct mmc_card *card)
224 if (!ssr)
225 return -ENOMEM;
226
227 - err = mmc_app_sd_status(card, ssr);
228 - if (err) {
229 - pr_warning("%s: problem reading SD Status "
230 - "register.\n", mmc_hostname(card->host));
231 - err = 0;
232 + timeout_at= jiffies + msecs_to_jiffies( retry_timeout_ms );
233 + tries= 0;
234 +
235 + while( tries < retry_min_tries || time_before( jiffies, timeout_at ) )
236 + {
237 + unsigned long delay_at;
238 + tries++;
239 +
240 + err= mmc_app_sd_status(card, ssr);
241 + if( !err )
242 + break; // sucess!!!
243 +
244 + touch_nmi_watchdog(); // we are still alive!
245 +
246 + // delay
247 + delay_at= jiffies + msecs_to_jiffies( retry_delay_ms );
248 + while( time_before( jiffies, delay_at ) )
249 + {
250 + mdelay( 1 );
251 + touch_nmi_watchdog(); // we are still alive!
252 + }
253 + }
254 +
255 + if( err)
256 + {
257 + pr_err("%s: failed to read SD Status register (SSR) after %d tries during %lu ms, error %d\n", mmc_hostname(card->host), tries, retry_timeout_ms, err );
258 goto out;
259 }
260
261 + if( tries > 1 )
262 + {
263 + pr_info("%s: read SD Status register (SSR) after %d attempts\n", mmc_hostname(card->host), tries );
264 + }
265 +
266 for (i = 0; i < 16; i++)
267 ssr[i] = be32_to_cpu(ssr[i]);
268
269 @@ -826,14 +914,10 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
270
271 if (!reinit) {
272 /*
273 - * Fetch SCR from card.
274 + * Fetch and decode SD Configuration register.
275 */
276 - err = mmc_app_send_scr(card, card->raw_scr);
277 - if (err)
278 - return err;
279 -
280 - err = mmc_decode_scr(card);
281 - if (err)
282 + err = mmc_read_scr(card);
283 + if( err )
284 return err;
285
286 /*
287 diff --git a/drivers/mmc/host/sdhci-bcm2708.c b/drivers/mmc/host/sdhci-bcm2708.c
288 index d8ef77c..3173c18 100644
289 --- a/drivers/mmc/host/sdhci-bcm2708.c
290 +++ b/drivers/mmc/host/sdhci-bcm2708.c
291 @@ -51,7 +51,6 @@
292 #undef CONFIG_MMC_SDHCI_BCM2708_DMA
293 #define CONFIG_MMC_SDHCI_BCM2708_DMA y
294
295 -#define USE_SYNC_AFTER_DMA
296 #ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
297 /* #define CHECK_DMA_USE */
298 #endif
299 @@ -73,7 +72,12 @@
300 #define BCM2708_SDHCI_SLEEP_TIMEOUT 1000 /* msecs */
301
302 /* Mhz clock that the EMMC core is running at. Should match the platform clockman settings */
303 -#define BCM2708_EMMC_CLOCK_FREQ 80000000
304 +#define BCM2708_EMMC_CLOCK_FREQ 50000000
305 +
306 +#define REG_EXRDFIFO_EN 0x80
307 +#define REG_EXRDFIFO_CFG 0x84
308 +
309 +int cycle_delay=2;
310
311 /*****************************************************************************\
312 * *
313 @@ -129,6 +133,14 @@ static inline unsigned long int since_ns(hptime_t t)
314 return (unsigned long)((hptime() - t) * HPTIME_CLK_NS);
315 }
316
317 +static bool allow_highspeed = 1;
318 +static int emmc_clock_freq = BCM2708_EMMC_CLOCK_FREQ;
319 +static bool sync_after_dma = 1;
320 +static bool missing_status = 1;
321 +static bool spurious_crc_acmd51 = 0;
322 +bool enable_llm = 1;
323 +bool extra_messages = 0;
324 +
325 #if 0
326 static void hptime_test(void)
327 {
328 @@ -241,19 +253,19 @@ static void sdhci_bcm2708_raw_writel(struct sdhci_host *host, u32 val, int reg)
329 /* host->clock is the clock freq in Hz */
330 static hptime_t last_write_hpt;
331 hptime_t now = hptime();
332 - ns_2clk = 2000000000/host->clock;
333 + ns_2clk = cycle_delay*1000000/(host->clock/1000);
334
335 if (now == last_write_hpt || now == last_write_hpt+1) {
336 /* we can't guarantee any significant time has
337 * passed - we'll have to wait anyway ! */
338 - udelay((ns_2clk+1000-1)/1000);
339 + ndelay(ns_2clk);
340 } else
341 {
342 /* we must have waited at least this many ns: */
343 unsigned int ns_wait = HPTIME_CLK_NS *
344 - (last_write_hpt - now - 1);
345 + (now - last_write_hpt - 1);
346 if (ns_wait < ns_2clk)
347 - udelay((ns_2clk-ns_wait+500)/1000);
348 + ndelay(ns_2clk - ns_wait);
349 }
350 last_write_hpt = now;
351 }
352 @@ -269,13 +281,13 @@ static void sdhci_bcm2708_raw_writel(struct sdhci_host *host, u32 val, int reg)
353 ier &= ~SDHCI_INT_DATA_TIMEOUT;
354 writel(ier, host->ioaddr + SDHCI_SIGNAL_ENABLE);
355 timeout_disabled = true;
356 - udelay((ns_2clk+1000-1)/1000);
357 + ndelay(ns_2clk);
358 } else if (timeout_disabled) {
359 ier = readl(host->ioaddr + SDHCI_SIGNAL_ENABLE);
360 ier |= SDHCI_INT_DATA_TIMEOUT;
361 writel(ier, host->ioaddr + SDHCI_SIGNAL_ENABLE);
362 timeout_disabled = false;
363 - udelay((ns_2clk+1000-1)/1000);
364 + ndelay(ns_2clk);
365 }
366 #endif
367 writel(val, host->ioaddr + reg);
368 @@ -353,68 +365,9 @@ void sdhci_bcm2708_writeb(struct sdhci_host *host, u8 val, int reg)
369
370 static unsigned int sdhci_bcm2708_get_max_clock(struct sdhci_host *host)
371 {
372 - return 20000000; // this value is in Hz (20MHz)
373 -}
374 -
375 -static unsigned int sdhci_bcm2708_get_timeout_clock(struct sdhci_host *host)
376 -{
377 - if(host->clock)
378 - return (host->clock / 1000); // this value is in kHz (100MHz)
379 - else
380 - return (sdhci_bcm2708_get_max_clock(host) / 1000);
381 + return emmc_clock_freq;
382 }
383
384 -static void sdhci_bcm2708_set_clock(struct sdhci_host *host, unsigned int clock)
385 -{
386 - int div = 0;
387 - u16 clk = 0;
388 - unsigned long timeout;
389 -
390 - if (clock == host->clock)
391 - return;
392 -
393 - sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
394 -
395 - if (clock == 0)
396 - goto out;
397 -
398 - if (BCM2708_EMMC_CLOCK_FREQ <= clock)
399 - div = 1;
400 - else {
401 - for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; div += 2) {
402 - if ((BCM2708_EMMC_CLOCK_FREQ / div) <= clock)
403 - break;
404 - }
405 - }
406 -
407 - DBG( "desired SD clock: %d, actual: %d\n",
408 - clock, BCM2708_EMMC_CLOCK_FREQ / div);
409 -
410 - clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
411 - clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
412 - << SDHCI_DIVIDER_HI_SHIFT;
413 - clk |= SDHCI_CLOCK_INT_EN;
414 -
415 - sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
416 -
417 - timeout = 20;
418 - while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
419 - & SDHCI_CLOCK_INT_STABLE)) {
420 - if (timeout == 0) {
421 - printk(KERN_ERR "%s: Internal clock never "
422 - "stabilised.\n", mmc_hostname(host->mmc));
423 - return;
424 - }
425 - timeout--;
426 - mdelay(1);
427 - }
428 -
429 - clk |= SDHCI_CLOCK_CARD_EN;
430 - sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
431 -out:
432 - host->clock = clock;
433 - }
434 -
435 /*****************************************************************************\
436 * *
437 * DMA Operation *
438 @@ -695,11 +648,11 @@ void
439 sdhci_bcm2708_platdma_reset(struct sdhci_host *host, struct mmc_data *data)
440 {
441 struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
442 - unsigned long flags;
443 +// unsigned long flags;
444
445 BUG_ON(NULL == host);
446
447 - spin_lock_irqsave(&host->lock, flags);
448 +// spin_lock_irqsave(&host->lock, flags);
449
450 if (host_priv->dma_wanted) {
451 if (NULL == data) {
452 @@ -720,13 +673,16 @@ sdhci_bcm2708_platdma_reset(struct sdhci_host *host, struct mmc_data *data)
453 cs = readl(host_priv->dma_chan_base + BCM2708_DMA_CS);
454
455 if (!(BCM2708_DMA_ACTIVE & cs))
456 - printk(KERN_INFO "%s: missed completion of "
457 + {
458 + if (extra_messages)
459 + printk(KERN_INFO "%s: missed completion of "
460 "cmd %d DMA (%d/%d [%d]/[%d]) - "
461 "ignoring it\n",
462 mmc_hostname(host->mmc),
463 host->last_cmdop,
464 host_priv->sg_done, sg_todo,
465 host_priv->sg_ix+1, sg_len);
466 + }
467 else
468 printk(KERN_INFO "%s: resetting ongoing cmd %d"
469 "DMA before %d/%d [%d]/[%d] complete\n",
470 @@ -779,7 +735,7 @@ sdhci_bcm2708_platdma_reset(struct sdhci_host *host, struct mmc_data *data)
471 #endif
472 }
473
474 - spin_unlock_irqrestore(&host->lock, flags);
475 +// spin_unlock_irqrestore(&host->lock, flags);
476 }
477
478
479 @@ -792,11 +748,11 @@ static void sdhci_bcm2708_dma_complete_irq(struct sdhci_host *host,
480 int sg_len;
481 int sg_ix;
482 int sg_todo;
483 - unsigned long flags;
484 +// unsigned long flags;
485
486 BUG_ON(NULL == host);
487
488 - spin_lock_irqsave(&host->lock, flags);
489 +// spin_lock_irqsave(&host->lock, flags);
490 data = host->data;
491
492 #ifdef CHECK_DMA_USE
493 @@ -821,7 +777,7 @@ static void sdhci_bcm2708_dma_complete_irq(struct sdhci_host *host,
494
495 if (NULL == data) {
496 DBG("PDMA unused completion - status 0x%X\n", dma_cs);
497 - spin_unlock_irqrestore(&host->lock, flags);
498 +// spin_unlock_irqrestore(&host->lock, flags);
499 return;
500 }
501 sg = data->sg;
502 @@ -878,40 +834,34 @@ static void sdhci_bcm2708_dma_complete_irq(struct sdhci_host *host,
503 SDHCI_INT_SPACE_AVAIL);
504 }
505 } else {
506 -#ifdef USE_SYNC_AFTER_DMA
507 - /* On the Arasan controller the stop command (which will be
508 - scheduled after this completes) does not seem to work
509 - properly if we allow it to be issued when we are
510 - transferring data to/from the SD card.
511 - We get CRC and DEND errors unless we wait for
512 - the SD controller to finish reading/writing to the card. */
513 - u32 state_mask;
514 - int timeout=1000000;
515 - hptime_t now = hptime();
516 -
517 - DBG("PDMA over - sync card\n");
518 - if (data->flags & MMC_DATA_READ)
519 - state_mask = SDHCI_DOING_READ;
520 - else
521 - state_mask = SDHCI_DOING_WRITE;
522 -
523 - while (0 != (sdhci_bcm2708_raw_readl(host,
524 - SDHCI_PRESENT_STATE) &
525 - state_mask) && --timeout > 0)
526 - continue;
527 + if (sync_after_dma) {
528 + /* On the Arasan controller the stop command (which will be
529 + scheduled after this completes) does not seem to work
530 + properly if we allow it to be issued when we are
531 + transferring data to/from the SD card.
532 + We get CRC and DEND errors unless we wait for
533 + the SD controller to finish reading/writing to the card. */
534 + u32 state_mask;
535 + int timeout=3*1000*1000;
536 +
537 + DBG("PDMA over - sync card\n");
538 + if (data->flags & MMC_DATA_READ)
539 + state_mask = SDHCI_DOING_READ;
540 + else
541 + state_mask = SDHCI_DOING_WRITE;
542
543 - if (1000000-timeout > 4000) /*ave. is about 3250*/
544 - DBG("%s: note - long %s sync %luns - "
545 - "%d its.\n",
546 - mmc_hostname(host->mmc),
547 - data->flags & MMC_DATA_READ? "read": "write",
548 - since_ns(now), 1000000-timeout);
549 - if (timeout <= 0)
550 - printk(KERN_ERR"%s: final %s to SD card still "
551 - "running\n",
552 - mmc_hostname(host->mmc),
553 - data->flags & MMC_DATA_READ? "read": "write");
554 -#endif
555 + while (0 != (sdhci_bcm2708_raw_readl(host, SDHCI_PRESENT_STATE)
556 + & state_mask) && --timeout > 0)
557 + {
558 + udelay(1);
559 + continue;
560 + }
561 + if (timeout <= 0)
562 + printk(KERN_ERR"%s: final %s to SD card still "
563 + "running\n",
564 + mmc_hostname(host->mmc),
565 + data->flags & MMC_DATA_READ? "read": "write");
566 + }
567 if (host_priv->complete) {
568 (*host_priv->complete)(host);
569 DBG("PDMA %s complete\n",
570 @@ -920,7 +870,7 @@ static void sdhci_bcm2708_dma_complete_irq(struct sdhci_host *host,
571 SDHCI_INT_SPACE_AVAIL);
572 }
573 }
574 - spin_unlock_irqrestore(&host->lock, flags);
575 +// spin_unlock_irqrestore(&host->lock, flags);
576 }
577
578 static irqreturn_t sdhci_bcm2708_dma_irq(int irq, void *dev_id)
579 @@ -929,12 +879,11 @@ static irqreturn_t sdhci_bcm2708_dma_irq(int irq, void *dev_id)
580 struct sdhci_host *host = dev_id;
581 struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
582 u32 dma_cs; /* control and status register */
583 - unsigned long flags;
584
585 BUG_ON(NULL == dev_id);
586 BUG_ON(NULL == host_priv->dma_chan_base);
587
588 - spin_lock_irqsave(&host->lock, flags);
589 + sdhci_spin_lock(host);
590
591 dma_cs = readl(host_priv->dma_chan_base + BCM2708_DMA_CS);
592
593 @@ -958,7 +907,8 @@ static irqreturn_t sdhci_bcm2708_dma_irq(int irq, void *dev_id)
594
595 if (!host_priv->dma_wanted) {
596 /* ignore this interrupt - it was reset */
597 - printk(KERN_INFO "%s: DMA IRQ %X ignored - "
598 + if (extra_messages)
599 + printk(KERN_INFO "%s: DMA IRQ %X ignored - "
600 "results were reset\n",
601 mmc_hostname(host->mmc), dma_cs);
602 #ifdef CHECK_DMA_USE
603 @@ -975,8 +925,7 @@ static irqreturn_t sdhci_bcm2708_dma_irq(int irq, void *dev_id)
604
605 result = IRQ_HANDLED;
606 }
607 -
608 - spin_unlock_irqrestore(&host->lock, flags);
609 + sdhci_spin_unlock(host);
610
611 return result;
612 }
613 @@ -1019,10 +968,12 @@ static ssize_t attr_dma_store(struct device *_dev,
614 int on = simple_strtol(buf, NULL, 0);
615 if (on) {
616 host->flags |= SDHCI_USE_PLATDMA;
617 + sdhci_bcm2708_writel(host, 1, REG_EXRDFIFO_EN);
618 printk(KERN_INFO "%s: DMA enabled\n",
619 mmc_hostname(host->mmc));
620 } else {
621 host->flags &= ~(SDHCI_USE_PLATDMA | SDHCI_REQ_USE_DMA);
622 + sdhci_bcm2708_writel(host, 0, REG_EXRDFIFO_EN);
623 printk(KERN_INFO "%s: DMA disabled\n",
624 mmc_hostname(host->mmc));
625 }
626 @@ -1126,7 +1077,7 @@ static int sdhci_bcm2708_suspend(struct platform_device *dev, pm_message_t state
627 int ret = 0;
628
629 if (host->mmc) {
630 - ret = mmc_suspend_host(host->mmc);
631 + //ret = mmc_suspend_host(host->mmc);
632 }
633
634 return ret;
635 @@ -1139,7 +1090,7 @@ static int sdhci_bcm2708_resume(struct platform_device *dev)
636 int ret = 0;
637
638 if (host->mmc) {
639 - ret = mmc_resume_host(host->mmc);
640 + //ret = mmc_resume_host(host->mmc);
641 }
642
643 return ret;
644 @@ -1158,19 +1109,14 @@ static unsigned int sdhci_bcm2708_quirk_extra_ints(struct sdhci_host *host)
645 return 1;
646 }
647
648 -static unsigned int sdhci_bcm2708_quirk_spurious_crc(struct sdhci_host *host)
649 +static unsigned int sdhci_bcm2708_quirk_spurious_crc_acmd51(struct sdhci_host *host)
650 {
651 return 1;
652 }
653
654 -static unsigned int sdhci_bcm2708_quirk_voltage_broken(struct sdhci_host *host)
655 +static unsigned int sdhci_bcm2708_missing_status(struct sdhci_host *host)
656 {
657 - return 1;
658 -}
659 -
660 -static unsigned int sdhci_bcm2708_uhs_broken(struct sdhci_host *host)
661 -{
662 - return 1;
663 + return 1;
664 }
665
666 /***************************************************************************** \
667 @@ -1190,11 +1136,7 @@ static struct sdhci_ops sdhci_bcm2708_ops = {
668 #else
669 #error The BCM2708 SDHCI driver needs CONFIG_MMC_SDHCI_IO_ACCESSORS to be set
670 #endif
671 - //.enable_dma = NULL,
672 - .set_clock = sdhci_bcm2708_set_clock,
673 .get_max_clock = sdhci_bcm2708_get_max_clock,
674 - //.get_min_clock = NULL,
675 - .get_timeout_clock = sdhci_bcm2708_get_timeout_clock,
676
677 #ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
678 // Platform DMA operations
679 @@ -1203,9 +1145,6 @@ static struct sdhci_ops sdhci_bcm2708_ops = {
680 .pdma_reset = sdhci_bcm2708_platdma_reset,
681 #endif
682 .extra_ints = sdhci_bcm2708_quirk_extra_ints,
683 - .spurious_crc_acmd51 = sdhci_bcm2708_quirk_spurious_crc,
684 - .voltage_broken = sdhci_bcm2708_quirk_voltage_broken,
685 - .uhs_broken = sdhci_bcm2708_uhs_broken,
686 };
687
688 /*****************************************************************************\
689 @@ -1244,15 +1183,30 @@ static int sdhci_bcm2708_probe(struct platform_device *pdev)
690 ret = PTR_ERR(host);
691 goto err;
692 }
693 + if (missing_status) {
694 + sdhci_bcm2708_ops.missing_status = sdhci_bcm2708_missing_status;
695 + }
696 +
697 + if( spurious_crc_acmd51 ) {
698 + sdhci_bcm2708_ops.spurious_crc_acmd51 = sdhci_bcm2708_quirk_spurious_crc_acmd51;
699 + }
700 +
701 +
702 + printk("sdhci: %s low-latency mode\n",enable_llm?"Enable":"Disable");
703
704 host->hw_name = "BCM2708_Arasan";
705 host->ops = &sdhci_bcm2708_ops;
706 host->irq = platform_get_irq(pdev, 0);
707 + host->second_irq = 0;
708
709 host->quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
710 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
711 SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
712 - SDHCI_QUIRK_NONSTANDARD_CLOCK;
713 + SDHCI_QUIRK_MISSING_CAPS |
714 + SDHCI_QUIRK_NO_HISPD_BIT |
715 + (sync_after_dma ? 0:SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12);
716 +
717 +
718 #ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
719 host->flags = SDHCI_USE_PLATDMA;
720 #endif
721 @@ -1305,17 +1259,24 @@ static int sdhci_bcm2708_probe(struct platform_device *pdev)
722 host_priv->dma_chan = ret;
723
724 ret = request_irq(host_priv->dma_irq, sdhci_bcm2708_dma_irq,
725 - IRQF_SHARED, DRIVER_NAME " (dma)", host);
726 + 0 /*IRQF_SHARED*/, DRIVER_NAME " (dma)", host);
727 if (ret) {
728 dev_err(&pdev->dev, "cannot set DMA IRQ\n");
729 goto err_add_dma_irq;
730 }
731 + host->second_irq = host_priv->dma_irq;
732 DBG("DMA CBs %p handle %08X DMA%d %p DMA IRQ %d\n",
733 host_priv->cb_base, (unsigned)host_priv->cb_handle,
734 host_priv->dma_chan, host_priv->dma_chan_base,
735 host_priv->dma_irq);
736
737 - host->mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
738 + // we support 3.3V
739 + host->caps |= SDHCI_CAN_VDD_330;
740 + if (allow_highspeed)
741 + host->mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
742 +
743 + /* single block writes cause data loss with some SD cards! */
744 + host->mmc->caps2 |= MMC_CAP2_FORCE_MULTIBLOCK;
745 #endif
746
747 ret = sdhci_add_host(host);
748 @@ -1327,6 +1288,12 @@ static int sdhci_bcm2708_probe(struct platform_device *pdev)
749 ret = device_create_file(&pdev->dev, &dev_attr_dma_wait);
750 ret = device_create_file(&pdev->dev, &dev_attr_status);
751
752 +#ifdef CONFIG_MMC_SDHCI_BCM2708_DMA
753 + /* enable extension fifo for paced DMA transfers */
754 + sdhci_bcm2708_writel(host, 1, REG_EXRDFIFO_EN);
755 + sdhci_bcm2708_writel(host, 4, REG_EXRDFIFO_CFG);
756 +#endif
757 +
758 printk(KERN_INFO "%s: BCM2708 SDHC host at 0x%08llx DMA %d IRQ %d\n",
759 mmc_hostname(host->mmc), (unsigned long long)iomem->start,
760 host_priv->dma_chan, host_priv->dma_irq);
761 @@ -1418,7 +1385,26 @@ static void __exit sdhci_drv_exit(void)
762 module_init(sdhci_drv_init);
763 module_exit(sdhci_drv_exit);
764
765 +module_param(allow_highspeed, bool, 0444);
766 +module_param(emmc_clock_freq, int, 0444);
767 +module_param(sync_after_dma, bool, 0444);
768 +module_param(missing_status, bool, 0444);
769 +module_param(spurious_crc_acmd51, bool, 0444);
770 +module_param(enable_llm, bool, 0444);
771 +module_param(cycle_delay, int, 0444);
772 +module_param(extra_messages, bool, 0444);
773 +
774 MODULE_DESCRIPTION("Secure Digital Host Controller Interface platform driver");
775 MODULE_AUTHOR("Broadcom <info@broadcom.com>");
776 MODULE_LICENSE("GPL v2");
777 MODULE_ALIAS("platform:"DRIVER_NAME);
778 +
779 +MODULE_PARM_DESC(allow_highspeed, "Allow high speed transfers modes");
780 +MODULE_PARM_DESC(emmc_clock_freq, "Specify the speed of emmc clock");
781 +MODULE_PARM_DESC(sync_after_dma, "Block in driver until dma complete");
782 +MODULE_PARM_DESC(missing_status, "Use the missing status quirk");
783 +MODULE_PARM_DESC(spurious_crc_acmd51, "Use the spurious crc quirk for reading SCR (ACMD51)");
784 +MODULE_PARM_DESC(enable_llm, "Enable low-latency mode");
785 +MODULE_PARM_DESC(extra_messages, "Enable more sdcard warning messages");
786 +
787 +
788 diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
789 index b78afa2..db67be8 100644
790 --- a/drivers/mmc/host/sdhci.c
791 +++ b/drivers/mmc/host/sdhci.c
792 @@ -131,6 +131,99 @@ static void sdhci_dumpregs(struct sdhci_host *host)
793 * Low level functions *
794 * *
795 \*****************************************************************************/
796 +extern bool enable_llm;
797 +static int sdhci_locked=0;
798 +void sdhci_spin_lock(struct sdhci_host *host)
799 +{
800 + spin_lock(&host->lock);
801 +#ifdef CONFIG_PREEMPT
802 + if(enable_llm)
803 + {
804 + disable_irq_nosync(host->irq);
805 + if(host->second_irq)
806 + disable_irq_nosync(host->second_irq);
807 + local_irq_enable();
808 + }
809 +#endif
810 +}
811 +
812 +void sdhci_spin_unlock(struct sdhci_host *host)
813 +{
814 +#ifdef CONFIG_PREEMPT
815 + if(enable_llm)
816 + {
817 + local_irq_disable();
818 + if(host->second_irq)
819 + enable_irq(host->second_irq);
820 + enable_irq(host->irq);
821 + }
822 +#endif
823 + spin_unlock(&host->lock);
824 +}
825 +
826 +void sdhci_spin_lock_irqsave(struct sdhci_host *host,unsigned long *flags)
827 +{
828 +#ifdef CONFIG_PREEMPT
829 + if(enable_llm)
830 + {
831 + while(sdhci_locked)
832 + {
833 + preempt_schedule();
834 + }
835 + spin_lock_irqsave(&host->lock,*flags);
836 + disable_irq(host->irq);
837 + if(host->second_irq)
838 + disable_irq(host->second_irq);
839 + local_irq_enable();
840 + }
841 + else
842 +#endif
843 + spin_lock_irqsave(&host->lock,*flags);
844 +}
845 +
846 +void sdhci_spin_unlock_irqrestore(struct sdhci_host *host,unsigned long flags)
847 +{
848 +#ifdef CONFIG_PREEMPT
849 + if(enable_llm)
850 + {
851 + local_irq_disable();
852 + if(host->second_irq)
853 + enable_irq(host->second_irq);
854 + enable_irq(host->irq);
855 + }
856 +#endif
857 + spin_unlock_irqrestore(&host->lock,flags);
858 +}
859 +
860 +static void sdhci_spin_enable_schedule(struct sdhci_host *host)
861 +{
862 +#ifdef CONFIG_PREEMPT
863 + if(enable_llm)
864 + {
865 + sdhci_locked = 1;
866 + preempt_enable();
867 + }
868 +#endif
869 +}
870 +
871 +static void sdhci_spin_disable_schedule(struct sdhci_host *host)
872 +{
873 +#ifdef CONFIG_PREEMPT
874 + if(enable_llm)
875 + {
876 + preempt_disable();
877 + sdhci_locked = 0;
878 + }
879 +#endif
880 +}
881 +
882 +
883 +#undef spin_lock_irqsave
884 +#define spin_lock_irqsave(host_lock, flags) sdhci_spin_lock_irqsave(container_of(host_lock, struct sdhci_host, lock), &flags)
885 +#define spin_unlock_irqrestore(host_lock, flags) sdhci_spin_unlock_irqrestore(container_of(host_lock, struct sdhci_host, lock), flags)
886 +
887 +#define spin_lock(host_lock) sdhci_spin_lock(container_of(host_lock, struct sdhci_host, lock))
888 +#define spin_unlock(host_lock) sdhci_spin_unlock(container_of(host_lock, struct sdhci_host, lock))
889
890 static void sdhci_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set)
891 {
892 @@ -300,7 +393,7 @@ static void sdhci_led_control(struct led_classdev *led,
893 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
894 unsigned long flags;
895
896 - spin_lock_irqsave(&host->lock, flags);
897 + sdhci_spin_lock_irqsave(host, &flags);
898
899 if (host->runtime_suspended)
900 goto out;
901 @@ -310,7 +403,7 @@ static void sdhci_led_control(struct led_classdev *led,
902 else
903 sdhci_activate_led(host);
904 out:
905 - spin_unlock_irqrestore(&host->lock, flags);
906 + sdhci_spin_unlock_irqrestore(host, flags);
907 }
908 #endif
909
910 @@ -457,6 +550,7 @@ static void sdhci_transfer_pio(struct sdhci_host *host, u32 intstate)
911 break;
912 state = sdhci_readl(host, SDHCI_PRESENT_STATE);
913 available = state & mask;
914 + break;
915 }
916
917 DBG("PIO transfer complete - %d blocks left.\n", host->blocks);
918 @@ -1023,7 +1117,9 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
919 return;
920 }
921 timeout--;
922 + sdhci_spin_enable_schedule(host);
923 mdelay(1);
924 + sdhci_spin_disable_schedule(host);
925 }
926 DBG("send cmd %d - wait 0x%X irq 0x%x\n", cmd->opcode, mask,
927 sdhci_readl(host, SDHCI_INT_STATUS));
928 @@ -1256,7 +1352,9 @@ clock_set:
929 return;
930 }
931 timeout--;
932 + sdhci_spin_enable_schedule(host);
933 mdelay(1);
934 + sdhci_spin_disable_schedule(host);
935 }
936
937 clk |= SDHCI_CLOCK_CARD_EN;
938 @@ -1357,7 +1455,7 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
939
940 sdhci_runtime_pm_get(host);
941
942 - spin_lock_irqsave(&host->lock, flags);
943 + sdhci_spin_lock_irqsave(host, &flags);
944
945 WARN_ON(host->mrq != NULL);
946
947 @@ -1422,9 +1520,9 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
948 */
949 host->mrq = NULL;
950
951 - spin_unlock_irqrestore(&host->lock, flags);
952 + sdhci_spin_unlock_irqrestore(host, flags);
953 sdhci_execute_tuning(mmc, tuning_opcode);
954 - spin_lock_irqsave(&host->lock, flags);
955 + sdhci_spin_lock_irqsave(host, &flags);
956
957 /* Restore original mmc_request structure */
958 host->mrq = mrq;
959 @@ -1438,7 +1536,7 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
960 }
961
962 mmiowb();
963 - spin_unlock_irqrestore(&host->lock, flags);
964 + sdhci_spin_unlock_irqrestore(host, flags);
965 }
966
967 static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
968 @@ -1447,10 +1545,10 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
969 int vdd_bit = -1;
970 u8 ctrl;
971
972 - spin_lock_irqsave(&host->lock, flags);
973 + sdhci_spin_lock_irqsave(host, &flags);
974
975 if (host->flags & SDHCI_DEVICE_DEAD) {
976 - spin_unlock_irqrestore(&host->lock, flags);
977 + sdhci_spin_unlock_irqrestore(host, flags);
978 if (host->vmmc && ios->power_mode == MMC_POWER_OFF)
979 mmc_regulator_set_ocr(host->mmc, host->vmmc, 0);
980 return;
981 @@ -1478,9 +1576,9 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
982 vdd_bit = sdhci_set_power(host, ios->vdd);
983
984 if (host->vmmc && vdd_bit != -1) {
985 - spin_unlock_irqrestore(&host->lock, flags);
986 + sdhci_spin_unlock_irqrestore(host, flags);
987 mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd_bit);
988 - spin_lock_irqsave(&host->lock, flags);
989 + sdhci_spin_lock_irqsave(host, &flags);
990 }
991
992 if (host->ops->platform_send_init_74_clocks)
993 @@ -1519,7 +1617,7 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
994 else
995 ctrl &= ~SDHCI_CTRL_HISPD;
996
997 - if (host->version >= SDHCI_SPEC_300 && !(host->ops->uhs_broken)) {
998 + if (host->version >= SDHCI_SPEC_300) {
999 u16 clk, ctrl_2;
1000
1001 /* In case of UHS-I modes, set High Speed Enable */
1002 @@ -1617,7 +1715,7 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
1003 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1004
1005 mmiowb();
1006 - spin_unlock_irqrestore(&host->lock, flags);
1007 + sdhci_spin_unlock_irqrestore(host, flags);
1008 }
1009
1010 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1011 @@ -1665,7 +1763,7 @@ static int sdhci_check_ro(struct sdhci_host *host)
1012 unsigned long flags;
1013 int is_readonly;
1014
1015 - spin_lock_irqsave(&host->lock, flags);
1016 + sdhci_spin_lock_irqsave(host, &flags);
1017
1018 if (host->flags & SDHCI_DEVICE_DEAD)
1019 is_readonly = 0;
1020 @@ -1675,7 +1773,7 @@ static int sdhci_check_ro(struct sdhci_host *host)
1021 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
1022 & SDHCI_WRITE_PROTECT);
1023
1024 - spin_unlock_irqrestore(&host->lock, flags);
1025 + sdhci_spin_unlock_irqrestore(host, flags);
1026
1027 /* This quirk needs to be replaced by a callback-function later */
1028 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
1029 @@ -1748,9 +1846,9 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1030 struct sdhci_host *host = mmc_priv(mmc);
1031 unsigned long flags;
1032
1033 - spin_lock_irqsave(&host->lock, flags);
1034 + sdhci_spin_lock_irqsave(host, &flags);
1035 sdhci_enable_sdio_irq_nolock(host, enable);
1036 - spin_unlock_irqrestore(&host->lock, flags);
1037 + sdhci_spin_unlock_irqrestore(host, flags);
1038 }
1039
1040 static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
1041 @@ -2101,7 +2199,7 @@ static void sdhci_card_event(struct mmc_host *mmc)
1042 if (host->ops->card_event)
1043 host->ops->card_event(host);
1044
1045 - spin_lock_irqsave(&host->lock, flags);
1046 + sdhci_spin_lock_irqsave(host, &flags);
1047
1048 /* Check host->mrq first in case we are runtime suspended */
1049 if (host->mrq && !sdhci_do_get_cd(host)) {
1050 @@ -2117,7 +2215,7 @@ static void sdhci_card_event(struct mmc_host *mmc)
1051 tasklet_schedule(&host->finish_tasklet);
1052 }
1053
1054 - spin_unlock_irqrestore(&host->lock, flags);
1055 + sdhci_spin_unlock_irqrestore(host, flags);
1056 }
1057
1058 static const struct mmc_host_ops sdhci_ops = {
1059 @@ -2156,14 +2254,14 @@ static void sdhci_tasklet_finish(unsigned long param)
1060
1061 host = (struct sdhci_host*)param;
1062
1063 - spin_lock_irqsave(&host->lock, flags);
1064 + sdhci_spin_lock_irqsave(host, &flags);
1065
1066 /*
1067 * If this tasklet gets rescheduled while running, it will
1068 * be run again afterwards but without any active request.
1069 */
1070 if (!host->mrq) {
1071 - spin_unlock_irqrestore(&host->lock, flags);
1072 + sdhci_spin_unlock_irqrestore(host, flags);
1073 return;
1074 }
1075
1076 @@ -2201,7 +2299,7 @@ static void sdhci_tasklet_finish(unsigned long param)
1077 #endif
1078
1079 mmiowb();
1080 - spin_unlock_irqrestore(&host->lock, flags);
1081 + sdhci_spin_unlock_irqrestore(host, flags);
1082
1083 mmc_request_done(host->mmc, mrq);
1084 sdhci_runtime_pm_put(host);
1085 @@ -2214,7 +2312,7 @@ static void sdhci_timeout_timer(unsigned long data)
1086
1087 host = (struct sdhci_host*)data;
1088
1089 - spin_lock_irqsave(&host->lock, flags);
1090 + sdhci_spin_lock_irqsave(host, &flags);
1091
1092 if (host->mrq) {
1093 pr_err("%s: Timeout waiting for hardware "
1094 @@ -2235,7 +2333,7 @@ static void sdhci_timeout_timer(unsigned long data)
1095 }
1096
1097 mmiowb();
1098 - spin_unlock_irqrestore(&host->lock, flags);
1099 + sdhci_spin_unlock_irqrestore(host, flags);
1100 }
1101
1102 static void sdhci_tuning_timer(unsigned long data)
1103 @@ -2245,11 +2343,11 @@ static void sdhci_tuning_timer(unsigned long data)
1104
1105 host = (struct sdhci_host *)data;
1106
1107 - spin_lock_irqsave(&host->lock, flags);
1108 + sdhci_spin_lock_irqsave(host, &flags);
1109
1110 host->flags |= SDHCI_NEEDS_RETUNING;
1111
1112 - spin_unlock_irqrestore(&host->lock, flags);
1113 + sdhci_spin_unlock_irqrestore(host, flags);
1114 }
1115
1116 /*****************************************************************************\
1117 @@ -2473,10 +2571,10 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
1118 u32 intmask, unexpected = 0;
1119 int cardint = 0, max_loops = 16;
1120
1121 - spin_lock(&host->lock);
1122 + sdhci_spin_lock(host);
1123
1124 if (host->runtime_suspended) {
1125 - spin_unlock(&host->lock);
1126 + sdhci_spin_unlock(host);
1127 pr_warning("%s: got irq while runtime suspended\n",
1128 mmc_hostname(host->mmc));
1129 return IRQ_HANDLED;
1130 @@ -2588,7 +2686,7 @@ again:
1131 if (intmask && --max_loops)
1132 goto again;
1133 out:
1134 - spin_unlock(&host->lock);
1135 + sdhci_spin_unlock(host);
1136
1137 if (unexpected) {
1138 pr_err("%s: Unexpected interrupt 0x%08x.\n",
1139 @@ -2674,7 +2772,7 @@ int sdhci_resume_host(struct sdhci_host *host)
1140 }
1141
1142 if (!device_may_wakeup(mmc_dev(host->mmc))) {
1143 - ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
1144 + ret = request_irq(host->irq, sdhci_irq, 0 /*IRQF_SHARED*/,
1145 mmc_hostname(host->mmc), host);
1146 if (ret)
1147 return ret;
1148 @@ -2750,15 +2848,15 @@ int sdhci_runtime_suspend_host(struct sdhci_host *host)
1149 host->flags &= ~SDHCI_NEEDS_RETUNING;
1150 }
1151
1152 - spin_lock_irqsave(&host->lock, flags);
1153 + sdhci_spin_lock_irqsave(host, &flags);
1154 sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
1155 - spin_unlock_irqrestore(&host->lock, flags);
1156 + sdhci_spin_unlock_irqrestore(host, flags);
1157
1158 synchronize_irq(host->irq);
1159
1160 - spin_lock_irqsave(&host->lock, flags);
1161 + sdhci_spin_lock_irqsave(host, &flags);
1162 host->runtime_suspended = true;
1163 - spin_unlock_irqrestore(&host->lock, flags);
1164 + sdhci_spin_unlock_irqrestore(host, flags);
1165
1166 return ret;
1167 }
1168 @@ -2784,16 +2882,16 @@ int sdhci_runtime_resume_host(struct sdhci_host *host)
1169 sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios);
1170 if ((host_flags & SDHCI_PV_ENABLED) &&
1171 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
1172 - spin_lock_irqsave(&host->lock, flags);
1173 + sdhci_spin_lock_irqsave(host, &flags);
1174 sdhci_enable_preset_value(host, true);
1175 - spin_unlock_irqrestore(&host->lock, flags);
1176 + sdhci_spin_unlock_irqrestore(host, flags);
1177 }
1178
1179 /* Set the re-tuning expiration flag */
1180 if (host->flags & SDHCI_USING_RETUNING_TIMER)
1181 host->flags |= SDHCI_NEEDS_RETUNING;
1182
1183 - spin_lock_irqsave(&host->lock, flags);
1184 + sdhci_spin_lock_irqsave(host, &flags);
1185
1186 host->runtime_suspended = false;
1187
1188 @@ -2804,7 +2902,7 @@ int sdhci_runtime_resume_host(struct sdhci_host *host)
1189 /* Enable Card Detection */
1190 sdhci_enable_card_detection(host);
1191
1192 - spin_unlock_irqrestore(&host->lock, flags);
1193 + sdhci_spin_unlock_irqrestore(host, flags);
1194
1195 return ret;
1196 }
1197 @@ -3300,8 +3398,8 @@ int sdhci_add_host(struct sdhci_host *host)
1198
1199 sdhci_init(host, 0);
1200
1201 - ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
1202 - mmc_hostname(mmc), host);
1203 + ret = request_irq(host->irq, sdhci_irq, 0 /*IRQF_SHARED*/,
1204 + mmc_hostname(mmc), host);
1205 if (ret) {
1206 pr_err("%s: Failed to request IRQ %d: %d\n",
1207 mmc_hostname(mmc), host->irq, ret);
1208 @@ -3362,7 +3460,7 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
1209 unsigned long flags;
1210
1211 if (dead) {
1212 - spin_lock_irqsave(&host->lock, flags);
1213 + sdhci_spin_lock_irqsave(host, &flags);
1214
1215 host->flags |= SDHCI_DEVICE_DEAD;
1216
1217 @@ -3374,7 +3472,7 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
1218 tasklet_schedule(&host->finish_tasklet);
1219 }
1220
1221 - spin_unlock_irqrestore(&host->lock, flags);
1222 + sdhci_spin_unlock_irqrestore(host, flags);
1223 }
1224
1225 sdhci_disable_card_detection(host);
1226 diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
1227 index 6857875..649f3cf 100644
1228 --- a/drivers/mmc/host/sdhci.h
1229 +++ b/drivers/mmc/host/sdhci.h
1230 @@ -300,8 +300,6 @@ struct sdhci_ops {
1231 struct mmc_data *data);
1232 unsigned int (*extra_ints)(struct sdhci_host *host);
1233 unsigned int (*spurious_crc_acmd51)(struct sdhci_host *host);
1234 - unsigned int (*voltage_broken)(struct sdhci_host *host);
1235 - unsigned int (*uhs_broken)(struct sdhci_host *host);
1236 unsigned int (*missing_status)(struct sdhci_host *host);
1237
1238 void (*hw_reset)(struct sdhci_host *host);
1239 @@ -445,4 +443,10 @@ extern int sdhci_runtime_suspend_host(struct sdhci_host *host);
1240 extern int sdhci_runtime_resume_host(struct sdhci_host *host);
1241 #endif
1242
1243 +extern void sdhci_spin_lock_irqsave(struct sdhci_host *host,unsigned long *flags);
1244 +extern void sdhci_spin_unlock_irqrestore(struct sdhci_host *host,unsigned long flags);
1245 +extern void sdhci_spin_lock(struct sdhci_host *host);
1246 +extern void sdhci_spin_unlock(struct sdhci_host *host);
1247 +
1248 +
1249 #endif /* __SDHCI_HW_H */
1250 diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
1251 index 99f5709..cd6f996 100644
1252 --- a/include/linux/mmc/host.h
1253 +++ b/include/linux/mmc/host.h
1254 @@ -282,6 +282,7 @@ struct mmc_host {
1255 MMC_CAP2_PACKED_WR)
1256 #define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14) /* Don't power up before scan */
1257 #define MMC_CAP2_SANITIZE (1 << 15) /* Support Sanitize */
1258 +#define MMC_CAP2_FORCE_MULTIBLOCK (1 << 31) /* Always use multiblock transfers */
1259
1260 mmc_pm_flag_t pm_caps; /* supported pm features */
1261
1262 diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
1263 index e23fffb..b1b6537 100644
1264 --- a/include/linux/mmc/sdhci.h
1265 +++ b/include/linux/mmc/sdhci.h
1266 @@ -102,6 +102,7 @@ struct sdhci_host {
1267 #define SDHCI_QUIRK2_BROKEN_HS200 (1<<6)
1268
1269 int irq; /* Device IRQ */
1270 + int second_irq; /* Additional IRQ to disable/enable in low-latency mode */
1271 void __iomem *ioaddr; /* Mapped address */
1272
1273 const struct sdhci_ops *ops; /* Low level hw interface */
1274 --
1275 1.9.1
1276