add the 'goldfish' target, useful for experimenting with virtual phone hardware ...
[openwrt/svn-archive/archive.git] / target / linux / goldfish / patches-2.6.30 / 0124--ARM-goldfish-mmc-goldfish-MMC-driver-building-an.patch
1 From 2eccfcf4c5c50b412a0859a345d8d28fc043956b Mon Sep 17 00:00:00 2001
2 From: Mike Lockwood <lockwood@android.com>
3 Date: Thu, 7 Feb 2008 07:47:30 -0500
4 Subject: [PATCH 124/134] [ARM] goldfish: mmc: goldfish MMC driver building and runnning in 2.6.27.
5
6 Signed-off-by: Mike A. Chan <mikechan@google.com>
7 ---
8 drivers/mmc/host/Kconfig | 8 +
9 drivers/mmc/host/Makefile | 1 +
10 drivers/mmc/host/goldfish.c | 583 +++++++++++++++++++++++++++++++++++++++++++
11 3 files changed, 592 insertions(+), 0 deletions(-)
12 create mode 100644 drivers/mmc/host/goldfish.c
13
14 --- a/drivers/mmc/host/Kconfig
15 +++ b/drivers/mmc/host/Kconfig
16 @@ -200,6 +200,14 @@ config MMC_MVSDIO
17 To compile this driver as a module, choose M here: the
18 module will be called mvsdio.
19
20 +config MMC_GOLDFISH
21 + tristate "goldfish qemu Multimedia Card Interface support"
22 + depends on ARCH_GOLDFISH
23 + help
24 + This selects the Goldfish Multimedia card Interface emulation.
25 +
26 + If unsure, say N.
27 +
28 config MMC_SPI
29 tristate "MMC/SD/SDIO over SPI"
30 depends on SPI_MASTER && !HIGHMEM && HAS_DMA
31 --- a/drivers/mmc/host/Makefile
32 +++ b/drivers/mmc/host/Makefile
33 @@ -26,6 +26,7 @@ obj-$(CONFIG_MMC_SPI) += mmc_spi.o
34 ifeq ($(CONFIG_OF),y)
35 obj-$(CONFIG_MMC_SPI) += of_mmc_spi.o
36 endif
37 +obj-$(CONFIG_MMC_GOLDFISH) += goldfish.o
38 obj-$(CONFIG_MMC_S3C) += s3cmci.o
39 obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o
40 obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
41 --- /dev/null
42 +++ b/drivers/mmc/host/goldfish.c
43 @@ -0,0 +1,583 @@
44 +/*
45 + * linux/drivers/media/mmc/goldfish.c
46 + *
47 + * Copyright 2007, Google Inc.
48 + *
49 + * based on omap.c driver, which was
50 + * Copyright (C) 2004 Nokia Corporation
51 + * Written by Tuukka Tikkanen and Juha Yrjölä<juha.yrjola@nokia.com>
52 + * Misc hacks here and there by Tony Lindgren <tony@atomide.com>
53 + * Other hacks (DMA, SD, etc) by David Brownell
54 + *
55 + * This program is free software; you can redistribute it and/or modify
56 + * it under the terms of the GNU General Public License version 2 as
57 + * published by the Free Software Foundation.
58 + */
59 +
60 +#include <linux/module.h>
61 +#include <linux/platform_device.h>
62 +#include <linux/major.h>
63 +
64 +#include <linux/types.h>
65 +#include <linux/pci.h>
66 +#include <linux/interrupt.h>
67 +
68 +#include <linux/kernel.h>
69 +#include <linux/fs.h>
70 +#include <linux/errno.h>
71 +#include <linux/hdreg.h>
72 +#include <linux/kdev_t.h>
73 +#include <linux/blkdev.h>
74 +#include <linux/mutex.h>
75 +#include <linux/scatterlist.h>
76 +#include <linux/mmc/mmc.h>
77 +#include <linux/mmc/sdio.h>
78 +#include <linux/mmc/host.h>
79 +#include <linux/mmc/card.h>
80 +
81 +#include <linux/moduleparam.h>
82 +#include <linux/init.h>
83 +#include <linux/ioport.h>
84 +#include <linux/dma-mapping.h>
85 +#include <linux/delay.h>
86 +#include <linux/spinlock.h>
87 +#include <linux/timer.h>
88 +#include <linux/clk.h>
89 +
90 +#include <asm/io.h>
91 +#include <asm/irq.h>
92 +#include <asm/scatterlist.h>
93 +#include <asm/mach-types.h>
94 +
95 +
96 +#include <asm/types.h>
97 +#include <asm/io.h>
98 +#include <asm/uaccess.h>
99 +
100 +#define DRIVER_NAME "goldfish_mmc"
101 +
102 +#define BUFFER_SIZE 16384
103 +
104 +#define GOLDFISH_MMC_READ(host, addr) (readl(host->reg_base + addr))
105 +#define GOLDFISH_MMC_WRITE(host, addr, x) (writel(x, host->reg_base + addr))
106 +
107 +
108 +enum {
109 + /* status register */
110 + MMC_INT_STATUS = 0x00,
111 + /* set this to enable IRQ */
112 + MMC_INT_ENABLE = 0x04,
113 + /* set this to specify buffer address */
114 + MMC_SET_BUFFER = 0x08,
115 +
116 + /* MMC command number */
117 + MMC_CMD = 0x0C,
118 +
119 + /* MMC argument */
120 + MMC_ARG = 0x10,
121 +
122 + /* MMC response (or R2 bits 0 - 31) */
123 + MMC_RESP_0 = 0x14,
124 +
125 + /* MMC R2 response bits 32 - 63 */
126 + MMC_RESP_1 = 0x18,
127 +
128 + /* MMC R2 response bits 64 - 95 */
129 + MMC_RESP_2 = 0x1C,
130 +
131 + /* MMC R2 response bits 96 - 127 */
132 + MMC_RESP_3 = 0x20,
133 +
134 + MMC_BLOCK_LENGTH = 0x24,
135 + MMC_BLOCK_COUNT = 0x28,
136 +
137 + /* MMC state flags */
138 + MMC_STATE = 0x2C,
139 +
140 + /* MMC_INT_STATUS bits */
141 +
142 + MMC_STAT_END_OF_CMD = 1U << 0,
143 + MMC_STAT_END_OF_DATA = 1U << 1,
144 + MMC_STAT_STATE_CHANGE = 1U << 2,
145 +
146 + /* MMC_STATE bits */
147 + MMC_STATE_INSERTED = 1U << 0,
148 + MMC_STATE_READ_ONLY = 1U << 1,
149 +};
150 +
151 +/*
152 + * Command types
153 + */
154 +#define OMAP_MMC_CMDTYPE_BC 0
155 +#define OMAP_MMC_CMDTYPE_BCR 1
156 +#define OMAP_MMC_CMDTYPE_AC 2
157 +#define OMAP_MMC_CMDTYPE_ADTC 3
158 +
159 +
160 +struct goldfish_mmc_host {
161 + struct mmc_request * mrq;
162 + struct mmc_command * cmd;
163 + struct mmc_data * data;
164 + struct mmc_host * mmc;
165 + struct device * dev;
166 + unsigned char id; /* 16xx chips have 2 MMC blocks */
167 + void __iomem *virt_base;
168 + unsigned int phys_base;
169 + int irq;
170 + unsigned char bus_mode;
171 + unsigned char hw_bus_mode;
172 +
173 + unsigned int sg_len;
174 + unsigned dma_done:1;
175 + unsigned dma_in_use:1;
176 +
177 + struct work_struct switch_work;
178 + int switch_last_state;
179 +
180 + uint32_t reg_base;
181 +};
182 +
183 +static inline int
184 +goldfish_mmc_cover_is_open(struct goldfish_mmc_host *host)
185 +{
186 + return 0;
187 +}
188 +
189 +static ssize_t
190 +goldfish_mmc_show_cover_switch(struct device *dev,
191 + struct device_attribute *attr, char *buf)
192 +{
193 + struct goldfish_mmc_host *host = dev_get_drvdata(dev);
194 +
195 + return sprintf(buf, "%s\n", goldfish_mmc_cover_is_open(host) ? "open" :
196 + "closed");
197 +}
198 +
199 +static DEVICE_ATTR(cover_switch, S_IRUGO, goldfish_mmc_show_cover_switch, NULL);
200 +
201 +static void
202 +goldfish_mmc_start_command(struct goldfish_mmc_host *host, struct mmc_command *cmd)
203 +{
204 + u32 cmdreg;
205 + u32 resptype;
206 + u32 cmdtype;
207 +
208 + host->cmd = cmd;
209 +
210 + resptype = 0;
211 + cmdtype = 0;
212 +
213 + /* Our hardware needs to know exact type */
214 + switch (mmc_resp_type(cmd)) {
215 + case MMC_RSP_NONE:
216 + break;
217 + case MMC_RSP_R1:
218 + case MMC_RSP_R1B:
219 + /* resp 1, 1b, 6, 7 */
220 + resptype = 1;
221 + break;
222 + case MMC_RSP_R2:
223 + resptype = 2;
224 + break;
225 + case MMC_RSP_R3:
226 + resptype = 3;
227 + break;
228 + default:
229 + dev_err(mmc_dev(host->mmc), "Invalid response type: %04x\n", mmc_resp_type(cmd));
230 + break;
231 + }
232 +
233 + if (mmc_cmd_type(cmd) == MMC_CMD_ADTC) {
234 + cmdtype = OMAP_MMC_CMDTYPE_ADTC;
235 + } else if (mmc_cmd_type(cmd) == MMC_CMD_BC) {
236 + cmdtype = OMAP_MMC_CMDTYPE_BC;
237 + } else if (mmc_cmd_type(cmd) == MMC_CMD_BCR) {
238 + cmdtype = OMAP_MMC_CMDTYPE_BCR;
239 + } else {
240 + cmdtype = OMAP_MMC_CMDTYPE_AC;
241 + }
242 +
243 + cmdreg = cmd->opcode | (resptype << 8) | (cmdtype << 12);
244 +
245 + if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
246 + cmdreg |= 1 << 6;
247 +
248 + if (cmd->flags & MMC_RSP_BUSY)
249 + cmdreg |= 1 << 11;
250 +
251 + if (host->data && !(host->data->flags & MMC_DATA_WRITE))
252 + cmdreg |= 1 << 15;
253 +
254 + GOLDFISH_MMC_WRITE(host, MMC_ARG, cmd->arg);
255 + GOLDFISH_MMC_WRITE(host, MMC_CMD, cmdreg);
256 +}
257 +
258 +static void
259 +goldfish_mmc_xfer_done(struct goldfish_mmc_host *host, struct mmc_data *data)
260 +{
261 + if (host->dma_in_use) {
262 + enum dma_data_direction dma_data_dir;
263 +
264 + if (data->flags & MMC_DATA_WRITE)
265 + dma_data_dir = DMA_TO_DEVICE;
266 + else
267 + dma_data_dir = DMA_FROM_DEVICE;
268 +
269 + if (dma_data_dir == DMA_FROM_DEVICE) {
270 + // we don't really have DMA, so we need to copy from our platform driver buffer
271 + uint8_t* dest = (uint8_t *)sg_virt(data->sg);
272 + memcpy(dest, host->virt_base, data->sg->length);
273 + }
274 +
275 + host->data->bytes_xfered += data->sg->length;
276 +
277 + dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len, dma_data_dir);
278 + }
279 +
280 + host->data = NULL;
281 + host->sg_len = 0;
282 +
283 + /* NOTE: MMC layer will sometimes poll-wait CMD13 next, issuing
284 + * dozens of requests until the card finishes writing data.
285 + * It'd be cheaper to just wait till an EOFB interrupt arrives...
286 + */
287 +
288 + if (!data->stop) {
289 + host->mrq = NULL;
290 + mmc_request_done(host->mmc, data->mrq);
291 + return;
292 + }
293 +
294 + goldfish_mmc_start_command(host, data->stop);
295 +}
296 +
297 +static void
298 +goldfish_mmc_end_of_data(struct goldfish_mmc_host *host, struct mmc_data *data)
299 +{
300 + if (!host->dma_in_use) {
301 + goldfish_mmc_xfer_done(host, data);
302 + return;
303 + }
304 + if (host->dma_done)
305 + goldfish_mmc_xfer_done(host, data);
306 +}
307 +
308 +static void
309 +goldfish_mmc_cmd_done(struct goldfish_mmc_host *host, struct mmc_command *cmd)
310 +{
311 + host->cmd = NULL;
312 + if (cmd->flags & MMC_RSP_PRESENT) {
313 + if (cmd->flags & MMC_RSP_136) {
314 + /* response type 2 */
315 + cmd->resp[3] =
316 + GOLDFISH_MMC_READ(host, MMC_RESP_0);
317 + cmd->resp[2] =
318 + GOLDFISH_MMC_READ(host, MMC_RESP_1);
319 + cmd->resp[1] =
320 + GOLDFISH_MMC_READ(host, MMC_RESP_2);
321 + cmd->resp[0] =
322 + GOLDFISH_MMC_READ(host, MMC_RESP_3);
323 + } else {
324 + /* response types 1, 1b, 3, 4, 5, 6 */
325 + cmd->resp[0] =
326 + GOLDFISH_MMC_READ(host, MMC_RESP_0);
327 + }
328 + }
329 +
330 + if (host->data == NULL || cmd->error) {
331 + host->mrq = NULL;
332 + mmc_request_done(host->mmc, cmd->mrq);
333 + }
334 +}
335 +
336 +static irqreturn_t goldfish_mmc_irq(int irq, void *dev_id)
337 +{
338 + struct goldfish_mmc_host * host = (struct goldfish_mmc_host *)dev_id;
339 + u16 status;
340 + int end_command;
341 + int end_transfer;
342 + int transfer_error;
343 + int state_changed;
344 +
345 + if (host->cmd == NULL && host->data == NULL) {
346 + status = GOLDFISH_MMC_READ(host, MMC_INT_STATUS);
347 + dev_info(mmc_dev(host->mmc),"spurious irq 0x%04x\n", status);
348 + if (status != 0) {
349 + GOLDFISH_MMC_WRITE(host, MMC_INT_STATUS, status);
350 + GOLDFISH_MMC_WRITE(host, MMC_INT_ENABLE, 0);
351 + }
352 + return IRQ_HANDLED;
353 + }
354 +
355 + end_command = 0;
356 + end_transfer = 0;
357 + transfer_error = 0;
358 + state_changed = 0;
359 +
360 + while ((status = GOLDFISH_MMC_READ(host, MMC_INT_STATUS)) != 0) {
361 + GOLDFISH_MMC_WRITE(host, MMC_INT_STATUS, status);
362 +
363 + if (status & MMC_STAT_END_OF_CMD) {
364 + end_command = 1;
365 + }
366 +
367 + if (status & MMC_STAT_END_OF_DATA) {
368 + end_transfer = 1;
369 + }
370 + if (status & MMC_STAT_STATE_CHANGE) {
371 + state_changed = 1;
372 + }
373 + }
374 +
375 + if (end_command) {
376 + goldfish_mmc_cmd_done(host, host->cmd);
377 + }
378 + if (transfer_error)
379 + goldfish_mmc_xfer_done(host, host->data);
380 + else if (end_transfer) {
381 + host->dma_done = 1;
382 + goldfish_mmc_end_of_data(host, host->data);
383 + }
384 + if (state_changed) {
385 + schedule_work(&host->switch_work);
386 + }
387 +
388 + return IRQ_HANDLED;
389 +}
390 +
391 +
392 +static void goldfish_mmc_switch_handler(struct work_struct *work)
393 +{
394 +/*
395 + struct goldfish_mmc_host *host = container_of(work, struct goldfish_mmc_host, switch_work);
396 + struct mmc_card *card;
397 + static int complained = 0;
398 + int cards = 0, cover_open;
399 +
400 + cover_open = goldfish_mmc_cover_is_open(host);
401 + if (cover_open != host->switch_last_state) {
402 + kobject_uevent(&host->dev->kobj, KOBJ_CHANGE);
403 + host->switch_last_state = cover_open;
404 + }
405 + mmc_detect_change(host->mmc, 0);
406 + list_for_each_entry(card, &host->mmc->cards, node) {
407 + if (mmc_card_present(card))
408 + cards++;
409 + }
410 + if (goldfish_mmc_cover_is_open(host)) {
411 + if (!complained) {
412 + dev_info(mmc_dev(host->mmc), "cover is open\n");
413 + complained = 1;
414 + }
415 + } else {
416 + complained = 0;
417 + }
418 +*/
419 +}
420 +
421 +
422 +static void
423 +goldfish_mmc_prepare_data(struct goldfish_mmc_host *host, struct mmc_request *req)
424 +{
425 + struct mmc_data *data = req->data;
426 + int block_size;
427 + unsigned sg_len;
428 + enum dma_data_direction dma_data_dir;
429 +
430 + host->data = data;
431 + if (data == NULL) {
432 + GOLDFISH_MMC_WRITE(host, MMC_BLOCK_LENGTH, 0);
433 + GOLDFISH_MMC_WRITE(host, MMC_BLOCK_COUNT, 0);
434 + host->dma_in_use = 0;
435 + return;
436 + }
437 +
438 + block_size = data->blksz;
439 +
440 + GOLDFISH_MMC_WRITE(host, MMC_BLOCK_COUNT, data->blocks - 1);
441 + GOLDFISH_MMC_WRITE(host, MMC_BLOCK_LENGTH, block_size - 1);
442 +
443 + /* cope with calling layer confusion; it issues "single
444 + * block" writes using multi-block scatterlists.
445 + */
446 + sg_len = (data->blocks == 1) ? 1 : data->sg_len;
447 +
448 + if (data->flags & MMC_DATA_WRITE)
449 + dma_data_dir = DMA_TO_DEVICE;
450 + else
451 + dma_data_dir = DMA_FROM_DEVICE;
452 +
453 + host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
454 + sg_len, dma_data_dir);
455 + host->dma_done = 0;
456 + host->dma_in_use = 1;
457 +
458 + if (dma_data_dir == DMA_TO_DEVICE) {
459 + // we don't really have DMA, so we need to copy to our platform driver buffer
460 + const uint8_t* src = (uint8_t *)sg_virt(data->sg);
461 + memcpy(host->virt_base, src, data->sg->length);
462 + }
463 +}
464 +
465 +static void goldfish_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
466 +{
467 + struct goldfish_mmc_host *host = mmc_priv(mmc);
468 +
469 + WARN_ON(host->mrq != NULL);
470 +
471 + host->mrq = req;
472 + goldfish_mmc_prepare_data(host, req);
473 + goldfish_mmc_start_command(host, req->cmd);
474 +
475 + /* this is to avoid accidentally being detected as an SDIO card in mmc_attach_sdio() */
476 + if (req->cmd->opcode == SD_IO_SEND_OP_COND &&
477 + req->cmd->flags == (MMC_RSP_SPI_R4 | MMC_RSP_R4 | MMC_CMD_BCR)) {
478 + req->cmd->error = -EINVAL;
479 + }
480 +}
481 +
482 +static void goldfish_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
483 +{
484 + struct goldfish_mmc_host *host = mmc_priv(mmc);
485 +
486 + host->bus_mode = ios->bus_mode;
487 + host->hw_bus_mode = host->bus_mode;
488 +}
489 +
490 +static int goldfish_mmc_get_ro(struct mmc_host *mmc)
491 +{
492 + uint32_t state;
493 + struct goldfish_mmc_host *host = mmc_priv(mmc);
494 +
495 + state = GOLDFISH_MMC_READ(host, MMC_STATE);
496 + return ((state & MMC_STATE_READ_ONLY) != 0);
497 +}
498 +
499 +static const struct mmc_host_ops goldfish_mmc_ops = {
500 + .request = goldfish_mmc_request,
501 + .set_ios = goldfish_mmc_set_ios,
502 + .get_ro = goldfish_mmc_get_ro,
503 +};
504 +
505 +static int __init goldfish_mmc_probe(struct platform_device *pdev)
506 +{
507 + struct mmc_host *mmc;
508 + struct goldfish_mmc_host *host = NULL;
509 + struct resource *res;
510 + int ret = 0;
511 + int irq;
512 + dma_addr_t buf_addr;
513 +
514 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
515 + irq = platform_get_irq(pdev, 0);
516 + if (res == NULL || irq < 0)
517 + return -ENXIO;
518 +
519 + mmc = mmc_alloc_host(sizeof(struct goldfish_mmc_host), &pdev->dev);
520 + if (mmc == NULL) {
521 + ret = -ENOMEM;
522 + goto err_alloc_host_failed;
523 + }
524 +
525 + host = mmc_priv(mmc);
526 + host->mmc = mmc;
527 + host->reg_base = IO_ADDRESS(res->start - IO_START);
528 + host->virt_base = dma_alloc_writecombine(&pdev->dev, BUFFER_SIZE,
529 + &buf_addr, GFP_KERNEL);
530 + if(host->virt_base == 0) {
531 + ret = -EBUSY;
532 + goto dma_alloc_failed;
533 + }
534 + host->phys_base = buf_addr;
535 +
536 + host->id = pdev->id;
537 + host->irq = irq;
538 +
539 + mmc->ops = &goldfish_mmc_ops;
540 + mmc->f_min = 400000;
541 + mmc->f_max = 24000000;
542 + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
543 + mmc->caps = MMC_CAP_4_BIT_DATA;
544 +
545 + /* Use scatterlist DMA to reduce per-transfer costs.
546 + * NOTE max_seg_size assumption that small blocks aren't
547 + * normally used (except e.g. for reading SD registers).
548 + */
549 + mmc->max_phys_segs = 32;
550 + mmc->max_hw_segs = 32;
551 + mmc->max_blk_size = 2048; /* MMC_BLOCK_LENGTH is 11 bits (+1) */
552 + mmc->max_blk_count = 2048; /* MMC_BLOCK_COUNT is 11 bits (+1) */
553 + mmc->max_req_size = BUFFER_SIZE;
554 + mmc->max_seg_size = mmc->max_req_size;
555 +
556 + ret = request_irq(host->irq, goldfish_mmc_irq, 0, DRIVER_NAME, host);
557 + if (ret)
558 + goto err_request_irq_failed;
559 +
560 + host->dev = &pdev->dev;
561 + platform_set_drvdata(pdev, host);
562 +
563 + ret = device_create_file(&pdev->dev, &dev_attr_cover_switch);
564 + if (ret)
565 + dev_warn(mmc_dev(host->mmc), "Unable to create sysfs attributes\n");
566 +
567 + mmc_add_host(mmc);
568 +
569 + GOLDFISH_MMC_WRITE(host, MMC_SET_BUFFER, host->phys_base);
570 + GOLDFISH_MMC_WRITE(host, MMC_INT_ENABLE,
571 + MMC_STAT_END_OF_CMD | MMC_STAT_END_OF_DATA | MMC_STAT_STATE_CHANGE
572 + );
573 +
574 + // we start with the card present
575 + kobject_uevent(&host->dev->kobj, KOBJ_CHANGE);
576 + mmc_detect_change(host->mmc, 0);
577 +
578 + INIT_WORK(&host->switch_work, goldfish_mmc_switch_handler);
579 +
580 + return 0;
581 +
582 +err_request_irq_failed:
583 + dma_free_writecombine(&pdev->dev, BUFFER_SIZE, host->virt_base, host->phys_base);
584 +dma_alloc_failed:
585 + mmc_free_host(host->mmc);
586 +err_alloc_host_failed:
587 + return ret;
588 +}
589 +
590 +static int goldfish_mmc_remove(struct platform_device *pdev)
591 +{
592 + struct goldfish_mmc_host *host = platform_get_drvdata(pdev);
593 +
594 + platform_set_drvdata(pdev, NULL);
595 +
596 + BUG_ON(host == NULL);
597 +
598 + mmc_remove_host(host->mmc);
599 + free_irq(host->irq, host);
600 + dma_free_writecombine(&pdev->dev, BUFFER_SIZE, host->virt_base, host->phys_base);
601 + mmc_free_host(host->mmc);
602 +
603 + return 0;
604 +}
605 +
606 +static struct platform_driver goldfish_mmc_driver = {
607 + .probe = goldfish_mmc_probe,
608 + .remove = goldfish_mmc_remove,
609 + .driver = {
610 + .name = DRIVER_NAME,
611 + },
612 +};
613 +
614 +static int __init goldfish_mmc_init(void)
615 +{
616 + return platform_driver_register(&goldfish_mmc_driver);
617 +}
618 +
619 +static void __exit goldfish_mmc_exit(void)
620 +{
621 + platform_driver_unregister(&goldfish_mmc_driver);
622 +}
623 +
624 +module_init(goldfish_mmc_init);
625 +module_exit(goldfish_mmc_exit);
626 +