42284e68d4faacf73c4d820b7477db1892cdc091
[openwrt/svn-archive/archive.git] / target / linux / s3c24xx / files-2.6.30 / drivers / mfd / glamo / glamo-core.c
1 /* Smedia Glamo 336x/337x driver
2 *
3 * (C) 2007 by Openmoko, Inc.
4 * Author: Harald Welte <laforge@openmoko.org>
5 * All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of
10 * the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
20 * MA 02111-1307 USA
21 */
22
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/errno.h>
26 #include <linux/string.h>
27 #include <linux/mm.h>
28 #include <linux/tty.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/fb.h>
32 #include <linux/init.h>
33 #include <linux/irq.h>
34 #include <linux/interrupt.h>
35 #include <linux/workqueue.h>
36 #include <linux/wait.h>
37 #include <linux/platform_device.h>
38 #include <linux/kernel_stat.h>
39 #include <linux/spinlock.h>
40 #include <linux/glamofb.h>
41 #include <linux/mmc/mmc.h>
42 #include <linux/mmc/host.h>
43
44 #include <asm/io.h>
45 #include <asm/uaccess.h>
46 #include <asm/div64.h>
47
48 #ifdef CONFIG_PM
49 #include <linux/pm.h>
50 #endif
51
52 #include "glamo-regs.h"
53 #include "glamo-core.h"
54
55 #define GLAMO_MEM_REFRESH_COUNT 0x100
56
57 /*
58 * Glamo internal settings
59 *
60 * We run the memory interface from the faster PLLB on 2.6.28 kernels and
61 * above. Couple of GTA02 users report trouble with memory bus when they
62 * upgraded from 2.6.24. So this parameter allows reversion to 2.6.24
63 * scheme if their Glamo chip needs it.
64 *
65 * you can override the faster default on kernel commandline using
66 *
67 * glamo3362.slow_memory=1
68 *
69 * for example
70 */
71
72 static int slow_memory = 0;
73 module_param(slow_memory, int, 0644);
74
75 struct reg_range {
76 int start;
77 int count;
78 char *name;
79 char dump;
80 };
81 struct reg_range reg_range[] = {
82 { 0x0000, 0x76, "General", 1 },
83 { 0x0200, 0x16, "Host Bus", 1 },
84 { 0x0300, 0x38, "Memory", 1 },
85 /* { 0x0400, 0x100, "Sensor", 0 }, */
86 /* { 0x0500, 0x300, "ISP", 0 }, */
87 /* { 0x0800, 0x400, "JPEG", 0 }, */
88 /* { 0x0c00, 0xcc, "MPEG", 0 }, */
89 { 0x1100, 0xb2, "LCD 1", 1 },
90 { 0x1200, 0x64, "LCD 2", 1 },
91 { 0x1400, 0x40, "MMC", 1 },
92 /* { 0x1500, 0x080, "MPU 0", 0 },
93 { 0x1580, 0x080, "MPU 1", 0 },
94 { 0x1600, 0x080, "Cmd Queue", 0 },
95 { 0x1680, 0x080, "RISC CPU", 0 },
96 { 0x1700, 0x400, "2D Unit", 0 },
97 { 0x1b00, 0x900, "3D Unit", 0 }, */
98 };
99
100 static struct glamo_core *glamo_handle;
101
102 static inline void __reg_write(struct glamo_core *glamo,
103 u_int16_t reg, u_int16_t val)
104 {
105 writew(val, glamo->base + reg);
106 }
107
108 static inline u_int16_t __reg_read(struct glamo_core *glamo,
109 u_int16_t reg)
110 {
111 return readw(glamo->base + reg);
112 }
113
114 static void __reg_set_bit_mask(struct glamo_core *glamo,
115 u_int16_t reg, u_int16_t mask,
116 u_int16_t val)
117 {
118 u_int16_t tmp;
119
120 val &= mask;
121
122 tmp = __reg_read(glamo, reg);
123 tmp &= ~mask;
124 tmp |= val;
125 __reg_write(glamo, reg, tmp);
126 }
127
128 static void reg_set_bit_mask(struct glamo_core *glamo,
129 u_int16_t reg, u_int16_t mask,
130 u_int16_t val)
131 {
132 spin_lock(&glamo->lock);
133 __reg_set_bit_mask(glamo, reg, mask, val);
134 spin_unlock(&glamo->lock);
135 }
136
137 static inline void __reg_set_bit(struct glamo_core *glamo,
138 u_int16_t reg, u_int16_t bit)
139 {
140 __reg_set_bit_mask(glamo, reg, bit, 0xffff);
141 }
142
143 static inline void __reg_clear_bit(struct glamo_core *glamo,
144 u_int16_t reg, u_int16_t bit)
145 {
146 __reg_set_bit_mask(glamo, reg, bit, 0);
147 }
148
149 /***********************************************************************
150 * resources of sibling devices
151 ***********************************************************************/
152
153 #if 0
154 static struct resource glamo_core_resources[] = {
155 {
156 .start = GLAMO_REGOFS_GENERIC,
157 .end = GLAMO_REGOFS_GENERIC + 0x400,
158 .flags = IORESOURCE_MEM,
159 }, {
160 .start = 0,
161 .end = 0,
162 .flags = IORESOURCE_IRQ,
163 },
164 };
165
166 static struct platform_device glamo_core_dev = {
167 .name = "glamo-core",
168 .resource = &glamo_core_resources,
169 .num_resources = ARRAY_SIZE(glamo_core_resources),
170 };
171 #endif
172
173 static struct resource glamo_jpeg_resources[] = {
174 {
175 .start = GLAMO_REGOFS_JPEG,
176 .end = GLAMO_REGOFS_MPEG - 1,
177 .flags = IORESOURCE_MEM,
178 }, {
179 .start = IRQ_GLAMO_JPEG,
180 .end = IRQ_GLAMO_JPEG,
181 .flags = IORESOURCE_IRQ,
182 },
183 };
184
185 static struct platform_device glamo_jpeg_dev = {
186 .name = "glamo-jpeg",
187 .resource = glamo_jpeg_resources,
188 .num_resources = ARRAY_SIZE(glamo_jpeg_resources),
189 };
190
191 static struct resource glamo_mpeg_resources[] = {
192 {
193 .start = GLAMO_REGOFS_MPEG,
194 .end = GLAMO_REGOFS_LCD - 1,
195 .flags = IORESOURCE_MEM,
196 }, {
197 .start = IRQ_GLAMO_MPEG,
198 .end = IRQ_GLAMO_MPEG,
199 .flags = IORESOURCE_IRQ,
200 },
201 };
202
203 static struct platform_device glamo_mpeg_dev = {
204 .name = "glamo-mpeg",
205 .resource = glamo_mpeg_resources,
206 .num_resources = ARRAY_SIZE(glamo_mpeg_resources),
207 };
208
209 static struct resource glamo_2d_resources[] = {
210 {
211 .start = GLAMO_REGOFS_2D,
212 .end = GLAMO_REGOFS_3D - 1,
213 .flags = IORESOURCE_MEM,
214 }, {
215 .start = IRQ_GLAMO_2D,
216 .end = IRQ_GLAMO_2D,
217 .flags = IORESOURCE_IRQ,
218 },
219 };
220
221 static struct platform_device glamo_2d_dev = {
222 .name = "glamo-2d",
223 .resource = glamo_2d_resources,
224 .num_resources = ARRAY_SIZE(glamo_2d_resources),
225 };
226
227 static struct resource glamo_3d_resources[] = {
228 {
229 .start = GLAMO_REGOFS_3D,
230 .end = GLAMO_REGOFS_END - 1,
231 .flags = IORESOURCE_MEM,
232 },
233 };
234
235 static struct platform_device glamo_3d_dev = {
236 .name = "glamo-3d",
237 .resource = glamo_3d_resources,
238 .num_resources = ARRAY_SIZE(glamo_3d_resources),
239 };
240
241 static struct platform_device glamo_spigpio_dev = {
242 .name = "glamo-spi-gpio",
243 };
244
245 static struct resource glamo_fb_resources[] = {
246 /* FIXME: those need to be incremented by parent base */
247 {
248 .name = "glamo-fb-regs",
249 .start = GLAMO_REGOFS_LCD,
250 .end = GLAMO_REGOFS_MMC - 1,
251 .flags = IORESOURCE_MEM,
252 }, {
253 .name = "glamo-fb-mem",
254 .start = GLAMO_OFFSET_FB,
255 .end = GLAMO_OFFSET_FB + GLAMO_FB_SIZE - 1,
256 .flags = IORESOURCE_MEM,
257 },
258 };
259
260 static struct platform_device glamo_fb_dev = {
261 .name = "glamo-fb",
262 .resource = glamo_fb_resources,
263 .num_resources = ARRAY_SIZE(glamo_fb_resources),
264 };
265
266 static struct resource glamo_mmc_resources[] = {
267 {
268 /* FIXME: those need to be incremented by parent base */
269 .start = GLAMO_REGOFS_MMC,
270 .end = GLAMO_REGOFS_MPROC0 - 1,
271 .flags = IORESOURCE_MEM
272 }, {
273 .start = IRQ_GLAMO_MMC,
274 .end = IRQ_GLAMO_MMC,
275 .flags = IORESOURCE_IRQ,
276 }, { /* our data buffer for MMC transfers */
277 .start = GLAMO_OFFSET_FB + GLAMO_FB_SIZE,
278 .end = GLAMO_OFFSET_FB + GLAMO_FB_SIZE +
279 GLAMO_MMC_BUFFER_SIZE - 1,
280 .flags = IORESOURCE_MEM
281 },
282 };
283
284 static struct glamo_mci_pdata glamo_mci_def_pdata = {
285 .gpio_detect = 0,
286 .glamo_can_set_mci_power = NULL, /* filled in from MFD platform data */
287 /* .ocr_avail = MMC_VDD_20_21 |
288 MMC_VDD_21_22 |
289 MMC_VDD_22_23 |
290 MMC_VDD_23_24 |
291 MMC_VDD_24_25 |
292 MMC_VDD_25_26 |
293 MMC_VDD_26_27 |
294 MMC_VDD_27_28 |
295 MMC_VDD_28_29 |
296 MMC_VDD_29_30 |
297 MMC_VDD_30_31 |
298 MMC_VDD_32_33,*/
299 .glamo_irq_is_wired = NULL, /* filled in from MFD platform data */
300 .mci_suspending = NULL, /* filled in from MFD platform data */
301 .mci_all_dependencies_resumed = NULL, /* filled in from MFD platform data */
302 };
303
304 static void mangle_mem_resources(struct resource *res, int num_res,
305 struct resource *parent)
306 {
307 int i;
308
309 for (i = 0; i < num_res; i++) {
310 if (res[i].flags != IORESOURCE_MEM)
311 continue;
312 res[i].start += parent->start;
313 res[i].end += parent->start;
314 res[i].parent = parent;
315 }
316 }
317
318 /***********************************************************************
319 * IRQ demultiplexer
320 ***********************************************************************/
321 #define irq2glamo(x) (x - IRQ_GLAMO(0))
322
323 static void glamo_ack_irq(unsigned int irq)
324 {
325 /* clear interrupt source */
326 __reg_write(glamo_handle, GLAMO_REG_IRQ_CLEAR,
327 1 << irq2glamo(irq));
328 }
329
330 static void glamo_mask_irq(unsigned int irq)
331 {
332 u_int16_t tmp;
333
334 /* clear bit in enable register */
335 tmp = __reg_read(glamo_handle, GLAMO_REG_IRQ_ENABLE);
336 tmp &= ~(1 << irq2glamo(irq));
337 __reg_write(glamo_handle, GLAMO_REG_IRQ_ENABLE, tmp);
338 }
339
340 static void glamo_unmask_irq(unsigned int irq)
341 {
342 u_int16_t tmp;
343
344 /* set bit in enable register */
345 tmp = __reg_read(glamo_handle, GLAMO_REG_IRQ_ENABLE);
346 tmp |= (1 << irq2glamo(irq));
347 __reg_write(glamo_handle, GLAMO_REG_IRQ_ENABLE, tmp);
348 }
349
350 static struct irq_chip glamo_irq_chip = {
351 .ack = glamo_ack_irq,
352 .mask = glamo_mask_irq,
353 .unmask = glamo_unmask_irq,
354 };
355
356 static void glamo_irq_demux_handler(unsigned int irq, struct irq_desc *desc)
357 {
358 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
359
360 if (unlikely(desc->status & IRQ_INPROGRESS)) {
361 desc->status |= (IRQ_PENDING | IRQ_MASKED);
362 desc->chip->mask(irq);
363 desc->chip->ack(irq);
364 return;
365 }
366 kstat_incr_irqs_this_cpu(irq, desc);
367
368 desc->chip->ack(irq);
369 desc->status |= IRQ_INPROGRESS;
370
371 do {
372 u_int16_t irqstatus;
373 int i;
374
375 if (unlikely((desc->status &
376 (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) ==
377 (IRQ_PENDING | IRQ_MASKED))) {
378 /* dealing with pending IRQ, unmasking */
379 desc->chip->unmask(irq);
380 desc->status &= ~IRQ_MASKED;
381 }
382
383 desc->status &= ~IRQ_PENDING;
384
385 /* read IRQ status register */
386 irqstatus = __reg_read(glamo_handle, GLAMO_REG_IRQ_STATUS);
387 for (i = 0; i < 9; i++)
388 if (irqstatus & (1 << i))
389 desc_handle_irq(IRQ_GLAMO(i),
390 irq_desc+IRQ_GLAMO(i));
391
392 } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
393
394 desc->status &= ~IRQ_INPROGRESS;
395 }
396
397
398 static ssize_t regs_write(struct device *dev, struct device_attribute *attr,
399 const char *buf, size_t count)
400 {
401 unsigned long reg = simple_strtoul(buf, NULL, 10);
402 struct glamo_core *glamo = dev_get_drvdata(dev);
403
404 while (*buf && (*buf != ' '))
405 buf++;
406 if (*buf != ' ')
407 return -EINVAL;
408 while (*buf && (*buf == ' '))
409 buf++;
410 if (!*buf)
411 return -EINVAL;
412
413 printk(KERN_INFO"reg 0x%02lX <-- 0x%04lX\n",
414 reg, simple_strtoul(buf, NULL, 10));
415
416 __reg_write(glamo, reg, simple_strtoul(buf, NULL, 10));
417
418 return count;
419 }
420
421 static ssize_t regs_read(struct device *dev, struct device_attribute *attr,
422 char *buf)
423 {
424 struct glamo_core *glamo = dev_get_drvdata(dev);
425 int n, n1 = 0, r;
426 char * end = buf;
427
428 spin_lock(&glamo->lock);
429
430 for (r = 0; r < ARRAY_SIZE(reg_range); r++) {
431 if (!reg_range[r].dump)
432 continue;
433 n1 = 0;
434 end += sprintf(end, "\n%s\n", reg_range[r].name);
435 for (n = reg_range[r].start;
436 n < reg_range[r].start + reg_range[r].count; n += 2) {
437 if (((n1++) & 7) == 0)
438 end += sprintf(end, "\n%04X: ", n);
439 end += sprintf(end, "%04x ", __reg_read(glamo, n));
440 }
441 end += sprintf(end, "\n");
442 if (!attr) {
443 printk("%s", buf);
444 end = buf;
445 }
446 }
447 spin_unlock(&glamo->lock);
448
449 return end - buf;
450 }
451
452 static DEVICE_ATTR(regs, 0644, regs_read, regs_write);
453 static struct attribute *glamo_sysfs_entries[] = {
454 &dev_attr_regs.attr,
455 NULL
456 };
457 static struct attribute_group glamo_attr_group = {
458 .name = NULL,
459 .attrs = glamo_sysfs_entries,
460 };
461
462
463
464 /***********************************************************************
465 * 'engine' support
466 ***********************************************************************/
467
468 int __glamo_engine_enable(struct glamo_core *glamo, enum glamo_engine engine)
469 {
470 switch (engine) {
471 case GLAMO_ENGINE_LCD:
472 __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
473 GLAMO_HOSTBUS2_MMIO_EN_LCD,
474 GLAMO_HOSTBUS2_MMIO_EN_LCD);
475 __reg_write(glamo, GLAMO_REG_CLOCK_LCD,
476 GLAMO_CLOCK_LCD_EN_M5CLK |
477 GLAMO_CLOCK_LCD_EN_DHCLK |
478 GLAMO_CLOCK_LCD_EN_DMCLK |
479 GLAMO_CLOCK_LCD_EN_DCLK |
480 GLAMO_CLOCK_LCD_DG_M5CLK |
481 GLAMO_CLOCK_LCD_DG_DMCLK);
482 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
483 GLAMO_CLOCK_GEN51_EN_DIV_DHCLK |
484 GLAMO_CLOCK_GEN51_EN_DIV_DMCLK |
485 GLAMO_CLOCK_GEN51_EN_DIV_DCLK, 0xffff);
486 break;
487 case GLAMO_ENGINE_MMC:
488 __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
489 GLAMO_HOSTBUS2_MMIO_EN_MMC,
490 GLAMO_HOSTBUS2_MMIO_EN_MMC);
491 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_MMC,
492 GLAMO_CLOCK_MMC_EN_M9CLK |
493 GLAMO_CLOCK_MMC_EN_TCLK |
494 GLAMO_CLOCK_MMC_DG_M9CLK |
495 GLAMO_CLOCK_MMC_DG_TCLK, 0xffff);
496 /* enable the TCLK divider clk input */
497 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
498 GLAMO_CLOCK_GEN51_EN_DIV_TCLK,
499 GLAMO_CLOCK_GEN51_EN_DIV_TCLK);
500 break;
501 case GLAMO_ENGINE_2D:
502 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_2D,
503 GLAMO_CLOCK_2D_EN_M7CLK |
504 GLAMO_CLOCK_2D_EN_GCLK |
505 GLAMO_CLOCK_2D_DG_M7CLK |
506 GLAMO_CLOCK_2D_DG_GCLK, 0xffff);
507 __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
508 GLAMO_HOSTBUS2_MMIO_EN_2D,
509 GLAMO_HOSTBUS2_MMIO_EN_2D);
510 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
511 GLAMO_CLOCK_GEN51_EN_DIV_GCLK,
512 0xffff);
513 break;
514 case GLAMO_ENGINE_CMDQ:
515 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_2D,
516 GLAMO_CLOCK_2D_EN_M6CLK, 0xffff);
517 __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
518 GLAMO_HOSTBUS2_MMIO_EN_CQ,
519 GLAMO_HOSTBUS2_MMIO_EN_CQ);
520 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
521 GLAMO_CLOCK_GEN51_EN_DIV_MCLK,
522 0xffff);
523 break;
524 /* FIXME: Implementation */
525 default:
526 return -EINVAL;
527 }
528
529 glamo->engine_enabled_bitfield |= 1 << engine;
530
531 return 0;
532 }
533
534 int glamo_engine_enable(struct glamo_core *glamo, enum glamo_engine engine)
535 {
536 int ret;
537
538 spin_lock(&glamo->lock);
539
540 ret = __glamo_engine_enable(glamo, engine);
541
542 spin_unlock(&glamo->lock);
543
544 return ret;
545 }
546 EXPORT_SYMBOL_GPL(glamo_engine_enable);
547
548 int __glamo_engine_disable(struct glamo_core *glamo, enum glamo_engine engine)
549 {
550 switch (engine) {
551 case GLAMO_ENGINE_LCD:
552 /* remove pixel clock to LCM */
553 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_LCD,
554 GLAMO_CLOCK_LCD_EN_DCLK, 0);
555 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_LCD,
556 GLAMO_CLOCK_LCD_EN_DHCLK |
557 GLAMO_CLOCK_LCD_EN_DMCLK, 0);
558 /* kill memory clock */
559 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_LCD,
560 GLAMO_CLOCK_LCD_EN_M5CLK, 0);
561 /* stop dividing the clocks */
562 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
563 GLAMO_CLOCK_GEN51_EN_DIV_DHCLK |
564 GLAMO_CLOCK_GEN51_EN_DIV_DMCLK |
565 GLAMO_CLOCK_GEN51_EN_DIV_DCLK, 0);
566 break;
567
568 case GLAMO_ENGINE_MMC:
569 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_MMC,
570 GLAMO_CLOCK_MMC_EN_M9CLK |
571 GLAMO_CLOCK_MMC_EN_TCLK |
572 GLAMO_CLOCK_MMC_DG_M9CLK |
573 GLAMO_CLOCK_MMC_DG_TCLK, 0);
574 /* disable the TCLK divider clk input */
575 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
576 GLAMO_CLOCK_GEN51_EN_DIV_TCLK, 0);
577 break;
578 case GLAMO_ENGINE_CMDQ:
579 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_2D,
580 GLAMO_CLOCK_2D_EN_M6CLK,
581 0);
582 __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
583 GLAMO_HOSTBUS2_MMIO_EN_CQ,
584 GLAMO_HOSTBUS2_MMIO_EN_CQ);
585 /* __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
586 GLAMO_CLOCK_GEN51_EN_DIV_MCLK,
587 0);*/
588 break;
589 case GLAMO_ENGINE_2D:
590 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_2D,
591 GLAMO_CLOCK_2D_EN_M7CLK |
592 GLAMO_CLOCK_2D_EN_GCLK |
593 GLAMO_CLOCK_2D_DG_M7CLK |
594 GLAMO_CLOCK_2D_DG_GCLK,
595 0);
596 __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
597 GLAMO_HOSTBUS2_MMIO_EN_2D,
598 GLAMO_HOSTBUS2_MMIO_EN_2D);
599 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
600 GLAMO_CLOCK_GEN51_EN_DIV_GCLK,
601 0);
602 break;
603 default:
604 return -EINVAL;
605 }
606
607 glamo->engine_enabled_bitfield &= ~(1 << engine);
608
609 return 0;
610 }
611 int glamo_engine_disable(struct glamo_core *glamo, enum glamo_engine engine)
612 {
613 int ret;
614
615 spin_lock(&glamo->lock);
616
617 ret = __glamo_engine_disable(glamo, engine);
618
619 spin_unlock(&glamo->lock);
620
621 return ret;
622 }
623 EXPORT_SYMBOL_GPL(glamo_engine_disable);
624
625 static const u_int16_t engine_clock_regs[__NUM_GLAMO_ENGINES] = {
626 [GLAMO_ENGINE_LCD] = GLAMO_REG_CLOCK_LCD,
627 [GLAMO_ENGINE_MMC] = GLAMO_REG_CLOCK_MMC,
628 [GLAMO_ENGINE_ISP] = GLAMO_REG_CLOCK_ISP,
629 [GLAMO_ENGINE_JPEG] = GLAMO_REG_CLOCK_JPEG,
630 [GLAMO_ENGINE_3D] = GLAMO_REG_CLOCK_3D,
631 [GLAMO_ENGINE_2D] = GLAMO_REG_CLOCK_2D,
632 [GLAMO_ENGINE_MPEG_ENC] = GLAMO_REG_CLOCK_MPEG,
633 [GLAMO_ENGINE_MPEG_DEC] = GLAMO_REG_CLOCK_MPEG,
634 };
635
636 void glamo_engine_clkreg_set(struct glamo_core *glamo,
637 enum glamo_engine engine,
638 u_int16_t mask, u_int16_t val)
639 {
640 reg_set_bit_mask(glamo, engine_clock_regs[engine], mask, val);
641 }
642 EXPORT_SYMBOL_GPL(glamo_engine_clkreg_set);
643
644 u_int16_t glamo_engine_clkreg_get(struct glamo_core *glamo,
645 enum glamo_engine engine)
646 {
647 u_int16_t val;
648
649 spin_lock(&glamo->lock);
650 val = __reg_read(glamo, engine_clock_regs[engine]);
651 spin_unlock(&glamo->lock);
652
653 return val;
654 }
655 EXPORT_SYMBOL_GPL(glamo_engine_clkreg_get);
656
657 struct glamo_script reset_regs[] = {
658 [GLAMO_ENGINE_LCD] = {
659 GLAMO_REG_CLOCK_LCD, GLAMO_CLOCK_LCD_RESET
660 },
661 #if 0
662 [GLAMO_ENGINE_HOST] = {
663 GLAMO_REG_CLOCK_HOST, GLAMO_CLOCK_HOST_RESET
664 },
665 [GLAMO_ENGINE_MEM] = {
666 GLAMO_REG_CLOCK_MEM, GLAMO_CLOCK_MEM_RESET
667 },
668 #endif
669 [GLAMO_ENGINE_MMC] = {
670 GLAMO_REG_CLOCK_MMC, GLAMO_CLOCK_MMC_RESET
671 },
672 [GLAMO_ENGINE_CMDQ] = {
673 GLAMO_REG_CLOCK_2D, GLAMO_CLOCK_2D_CQ_RESET
674 },
675 [GLAMO_ENGINE_2D] = {
676 GLAMO_REG_CLOCK_2D, GLAMO_CLOCK_2D_RESET
677 },
678 [GLAMO_ENGINE_JPEG] = {
679 GLAMO_REG_CLOCK_JPEG, GLAMO_CLOCK_JPEG_RESET
680 },
681 };
682
683 void glamo_engine_reset(struct glamo_core *glamo, enum glamo_engine engine)
684 {
685 struct glamo_script *rst;
686
687 if (engine >= ARRAY_SIZE(reset_regs)) {
688 dev_warn(&glamo->pdev->dev, "unknown engine %u ", engine);
689 return;
690 }
691
692 rst = &reset_regs[engine];
693
694 spin_lock(&glamo->lock);
695 __reg_set_bit(glamo, rst->reg, rst->val);
696 __reg_clear_bit(glamo, rst->reg, rst->val);
697 spin_unlock(&glamo->lock);
698 }
699 EXPORT_SYMBOL_GPL(glamo_engine_reset);
700
701 void glamo_lcm_reset(int level)
702 {
703 if (!glamo_handle)
704 return;
705
706 glamo_gpio_setpin(glamo_handle, GLAMO_GPIO4, level);
707 glamo_gpio_cfgpin(glamo_handle, GLAMO_GPIO4_OUTPUT);
708
709 }
710 EXPORT_SYMBOL_GPL(glamo_lcm_reset);
711
712 enum glamo_pll {
713 GLAMO_PLL1,
714 GLAMO_PLL2,
715 };
716
717 static int glamo_pll_rate(struct glamo_core *glamo,
718 enum glamo_pll pll)
719 {
720 u_int16_t reg;
721 unsigned int div = 512;
722 /* FIXME: move osci into platform_data */
723 unsigned int osci = 32768;
724
725 if (osci == 32768)
726 div = 1;
727
728 switch (pll) {
729 case GLAMO_PLL1:
730 reg = __reg_read(glamo, GLAMO_REG_PLL_GEN1);
731 break;
732 case GLAMO_PLL2:
733 reg = __reg_read(glamo, GLAMO_REG_PLL_GEN3);
734 break;
735 default:
736 return -EINVAL;
737 }
738 return (osci/div)*reg;
739 }
740
741 int glamo_engine_reclock(struct glamo_core *glamo,
742 enum glamo_engine engine,
743 int ps)
744 {
745 int pll, khz;
746 u_int16_t reg, mask, val = 0;
747
748 if (!ps)
749 return 0;
750
751 switch (engine) {
752 case GLAMO_ENGINE_LCD:
753 pll = GLAMO_PLL1;
754 reg = GLAMO_REG_CLOCK_GEN7;
755 mask = 0xff;
756 break;
757 default:
758 dev_warn(&glamo->pdev->dev,
759 "reclock of engine 0x%x not supported\n", engine);
760 return -EINVAL;
761 break;
762 }
763
764 pll = glamo_pll_rate(glamo, pll);
765 khz = 1000000000UL / ps;
766
767 if (khz)
768 val = (pll / khz) / 1000;
769
770 dev_dbg(&glamo->pdev->dev,
771 "PLL %d, kHZ %d, div %d\n", pll, khz, val);
772
773 if (val) {
774 val--;
775 reg_set_bit_mask(glamo, reg, mask, val);
776 mdelay(5); /* wait some time to stabilize */
777
778 return 0;
779 } else {
780 return -EINVAL;
781 }
782 }
783 EXPORT_SYMBOL_GPL(glamo_engine_reclock);
784
785 /***********************************************************************
786 * script support
787 ***********************************************************************/
788
789 int glamo_run_script(struct glamo_core *glamo, struct glamo_script *script,
790 int len, int may_sleep)
791 {
792 int i;
793
794 for (i = 0; i < len; i++) {
795 struct glamo_script *line = &script[i];
796
797 switch (line->reg) {
798 case 0xffff:
799 return 0;
800 case 0xfffe:
801 if (may_sleep)
802 msleep(line->val);
803 else
804 mdelay(line->val * 4);
805 break;
806 case 0xfffd:
807 /* spin until PLLs lock */
808 while ((__reg_read(glamo, GLAMO_REG_PLL_GEN5) & 3) != 3)
809 ;
810 break;
811
812 /*
813 * couple of people reported artefacts with 2.6.28 changes, this
814 * allows reversion to 2.6.24 settings
815 */
816
817 case 0x200:
818 switch (slow_memory) {
819 /* choice 1 is the most conservative */
820 case 1: /* 3 waits on Async BB R & W, Use PLL 1 for mem bus */
821 __reg_write(glamo, script[i].reg, 0xef0);
822 break;
823 case 2: /* 2 waits on Async BB R & W, Use PLL 1 for mem bus */
824 __reg_write(glamo, script[i].reg, 0xea0);
825 break;
826 case 3: /* 1 waits on Async BB R & W, Use PLL 1 for mem bus */
827 __reg_write(glamo, script[i].reg, 0xe50);
828 break;
829 case 4: /* 0 waits on Async BB R & W, Use PLL 1 for mem bus */
830 __reg_write(glamo, script[i].reg, 0xe00);
831 break;
832
833 /* using PLL2 for memory bus increases CPU bandwidth significantly */
834 case 5: /* 3 waits on Async BB R & W, Use PLL 2 for mem bus */
835 __reg_write(glamo, script[i].reg, 0xef3);
836 break;
837 case 6: /* 2 waits on Async BB R & W, Use PLL 2 for mem bus */
838 __reg_write(glamo, script[i].reg, 0xea3);
839 break;
840 case 7: /* 1 waits on Async BB R & W, Use PLL 2 for mem bus */
841 __reg_write(glamo, script[i].reg, 0xe53);
842 break;
843 /* default of 0 or >7 is fastest */
844 default: /* 0 waits on Async BB R & W, Use PLL 2 for mem bus */
845 __reg_write(glamo, script[i].reg, 0xe03);
846 break;
847 }
848 break;
849
850 default:
851 __reg_write(glamo, script[i].reg, script[i].val);
852 break;
853 }
854 }
855
856 return 0;
857 }
858 EXPORT_SYMBOL(glamo_run_script);
859
860 static struct glamo_script glamo_init_script[] = {
861 { GLAMO_REG_CLOCK_HOST, 0x1000 },
862 { 0xfffe, 2 },
863 { GLAMO_REG_CLOCK_MEMORY, 0x1000 },
864 { GLAMO_REG_CLOCK_MEMORY, 0x2000 },
865 { GLAMO_REG_CLOCK_LCD, 0x1000 },
866 { GLAMO_REG_CLOCK_MMC, 0x1000 },
867 { GLAMO_REG_CLOCK_ISP, 0x1000 },
868 { GLAMO_REG_CLOCK_ISP, 0x3000 },
869 { GLAMO_REG_CLOCK_JPEG, 0x1000 },
870 { GLAMO_REG_CLOCK_3D, 0x1000 },
871 { GLAMO_REG_CLOCK_3D, 0x3000 },
872 { GLAMO_REG_CLOCK_2D, 0x1000 },
873 { GLAMO_REG_CLOCK_2D, 0x3000 },
874 { GLAMO_REG_CLOCK_RISC1, 0x1000 },
875 { GLAMO_REG_CLOCK_MPEG, 0x3000 },
876 { GLAMO_REG_CLOCK_MPEG, 0x3000 },
877 { GLAMO_REG_CLOCK_MPROC, 0x1000 /*0x100f*/ },
878 { 0xfffe, 2 },
879 { GLAMO_REG_CLOCK_HOST, 0x0000 },
880 { GLAMO_REG_CLOCK_MEMORY, 0x0000 },
881 { GLAMO_REG_CLOCK_LCD, 0x0000 },
882 { GLAMO_REG_CLOCK_MMC, 0x0000 },
883 #if 0
884 /* unused engines must be left in reset to stop MMC block read "blackouts" */
885 { GLAMO_REG_CLOCK_ISP, 0x0000 },
886 { GLAMO_REG_CLOCK_ISP, 0x0000 },
887 { GLAMO_REG_CLOCK_JPEG, 0x0000 },
888 { GLAMO_REG_CLOCK_3D, 0x0000 },
889 { GLAMO_REG_CLOCK_3D, 0x0000 },
890 { GLAMO_REG_CLOCK_2D, 0x0000 },
891 { GLAMO_REG_CLOCK_2D, 0x0000 },
892 { GLAMO_REG_CLOCK_RISC1, 0x0000 },
893 { GLAMO_REG_CLOCK_MPEG, 0x0000 },
894 { GLAMO_REG_CLOCK_MPEG, 0x0000 },
895 #endif
896 { GLAMO_REG_PLL_GEN1, 0x05db }, /* 48MHz */
897 { GLAMO_REG_PLL_GEN3, 0x0aba }, /* 90MHz */
898 { 0xfffd, 0 },
899 /*
900 * b9 of this register MUST be zero to get any interrupts on INT#
901 * the other set bits enable all the engine interrupt sources
902 */
903 { GLAMO_REG_IRQ_ENABLE, 0x01ff },
904 { GLAMO_REG_CLOCK_GEN6, 0x2000 },
905 { GLAMO_REG_CLOCK_GEN7, 0x0101 },
906 { GLAMO_REG_CLOCK_GEN8, 0x0100 },
907 { GLAMO_REG_CLOCK_HOST, 0x000d },
908 /*
909 * b7..b4 = 0 = no wait states on read or write
910 * b0 = 1 select PLL2 for Host interface, b1 = enable it
911 */
912 { 0x200, 0x0e03 /* this is replaced by script parser */ },
913 { 0x202, 0x07ff },
914 { 0x212, 0x0000 },
915 { 0x214, 0x4000 },
916 { 0x216, 0xf00e },
917
918 /* S-Media recommended "set tiling mode to 512 mode for memory access
919 * more efficiency when 640x480" */
920 { GLAMO_REG_MEM_TYPE, 0x0c74 }, /* 8MB, 16 word pg wr+rd */
921 { GLAMO_REG_MEM_GEN, 0xafaf }, /* 63 grants min + max */
922
923 { GLAMO_REGOFS_HOSTBUS + 2, 0xffff }, /* enable on MMIO*/
924
925 { GLAMO_REG_MEM_TIMING1, 0x0108 },
926 { GLAMO_REG_MEM_TIMING2, 0x0010 }, /* Taa = 3 MCLK */
927 { GLAMO_REG_MEM_TIMING3, 0x0000 },
928 { GLAMO_REG_MEM_TIMING4, 0x0000 }, /* CE1# delay fall/rise */
929 { GLAMO_REG_MEM_TIMING5, 0x0000 }, /* UB# LB# */
930 { GLAMO_REG_MEM_TIMING6, 0x0000 }, /* OE# */
931 { GLAMO_REG_MEM_TIMING7, 0x0000 }, /* WE# */
932 { GLAMO_REG_MEM_TIMING8, 0x1002 }, /* MCLK delay, was 0x1000 */
933 { GLAMO_REG_MEM_TIMING9, 0x6006 },
934 { GLAMO_REG_MEM_TIMING10, 0x00ff },
935 { GLAMO_REG_MEM_TIMING11, 0x0001 },
936 { GLAMO_REG_MEM_POWER1, 0x0020 },
937 { GLAMO_REG_MEM_POWER2, 0x0000 },
938 { GLAMO_REG_MEM_DRAM1, 0x0000 },
939 { 0xfffe, 1 },
940 { GLAMO_REG_MEM_DRAM1, 0xc100 },
941 { 0xfffe, 1 },
942 { GLAMO_REG_MEM_DRAM1, 0xe100 },
943 { GLAMO_REG_MEM_DRAM2, 0x01d6 },
944 { GLAMO_REG_CLOCK_MEMORY, 0x000b },
945 { GLAMO_REG_GPIO_GEN1, 0x000f },
946 { GLAMO_REG_GPIO_GEN2, 0x111e },
947 { GLAMO_REG_GPIO_GEN3, 0xccc3 },
948 { GLAMO_REG_GPIO_GEN4, 0x111e },
949 { GLAMO_REG_GPIO_GEN5, 0x000f },
950 };
951 #if 0
952 static struct glamo_script glamo_resume_script[] = {
953
954 { GLAMO_REG_PLL_GEN1, 0x05db }, /* 48MHz */
955 { GLAMO_REG_PLL_GEN3, 0x0aba }, /* 90MHz */
956 { GLAMO_REG_DFT_GEN6, 1 },
957 { 0xfffe, 100 },
958 { 0xfffd, 0 },
959 { 0x200, 0x0e03 },
960
961 /*
962 * b9 of this register MUST be zero to get any interrupts on INT#
963 * the other set bits enable all the engine interrupt sources
964 */
965 { GLAMO_REG_IRQ_ENABLE, 0x01ff },
966 { GLAMO_REG_CLOCK_HOST, 0x0018 },
967 { GLAMO_REG_CLOCK_GEN5_1, 0x18b1 },
968
969 { GLAMO_REG_MEM_DRAM1, 0x0000 },
970 { 0xfffe, 1 },
971 { GLAMO_REG_MEM_DRAM1, 0xc100 },
972 { 0xfffe, 1 },
973 { GLAMO_REG_MEM_DRAM1, 0xe100 },
974 { GLAMO_REG_MEM_DRAM2, 0x01d6 },
975 { GLAMO_REG_CLOCK_MEMORY, 0x000b },
976 };
977 #endif
978
979 enum glamo_power {
980 GLAMO_POWER_ON,
981 GLAMO_POWER_SUSPEND,
982 };
983
984 static void glamo_power(struct glamo_core *glamo,
985 enum glamo_power new_state)
986 {
987 int n;
988 unsigned long flags;
989
990 spin_lock_irqsave(&glamo->lock, flags);
991
992 dev_info(&glamo->pdev->dev, "***** glamo_power -> %d\n", new_state);
993
994 /*
995 Power management
996 static const REG_VALUE_MASK_TYPE reg_powerOn[] =
997 {
998 { REG_GEN_DFT6, REG_BIT_ALL, REG_DATA(1u << 0) },
999 { REG_GEN_PLL3, 0u, REG_DATA(1u << 13) },
1000 { REG_GEN_MEM_CLK, REG_BIT_ALL, REG_BIT_EN_MOCACLK },
1001 { REG_MEM_DRAM2, 0u, REG_BIT_EN_DEEP_POWER_DOWN },
1002 { REG_MEM_DRAM1, 0u, REG_BIT_SELF_REFRESH }
1003 };
1004
1005 static const REG_VALUE_MASK_TYPE reg_powerStandby[] =
1006 {
1007 { REG_MEM_DRAM1, REG_BIT_ALL, REG_BIT_SELF_REFRESH },
1008 { REG_GEN_MEM_CLK, 0u, REG_BIT_EN_MOCACLK },
1009 { REG_GEN_PLL3, REG_BIT_ALL, REG_DATA(1u << 13) },
1010 { REG_GEN_DFT5, REG_BIT_ALL, REG_DATA(1u << 0) }
1011 };
1012
1013 static const REG_VALUE_MASK_TYPE reg_powerSuspend[] =
1014 {
1015 { REG_MEM_DRAM2, REG_BIT_ALL, REG_BIT_EN_DEEP_POWER_DOWN },
1016 { REG_GEN_MEM_CLK, 0u, REG_BIT_EN_MOCACLK },
1017 { REG_GEN_PLL3, REG_BIT_ALL, REG_DATA(1u << 13) },
1018 { REG_GEN_DFT5, REG_BIT_ALL, REG_DATA(1u << 0) }
1019 };
1020 */
1021
1022 switch (new_state) {
1023 case GLAMO_POWER_ON:
1024
1025 /*
1026 * glamo state on resume is nondeterministic in some
1027 * fundamental way, it has also been observed that the
1028 * Glamo reset pin can get asserted by, eg, touching it with
1029 * a scope probe. So the only answer is to roll with it and
1030 * force an external reset on the Glamo during resume.
1031 */
1032
1033 (glamo->pdata->glamo_external_reset)(0);
1034 udelay(10);
1035 (glamo->pdata->glamo_external_reset)(1);
1036 mdelay(5);
1037
1038 glamo_run_script(glamo, glamo_init_script,
1039 ARRAY_SIZE(glamo_init_script), 0);
1040
1041 break;
1042
1043 case GLAMO_POWER_SUSPEND:
1044
1045 /* nuke interrupts */
1046 __reg_write(glamo, GLAMO_REG_IRQ_ENABLE, 0x200);
1047
1048 /* stash a copy of which engines were running */
1049 glamo->engine_enabled_bitfield_suspend =
1050 glamo->engine_enabled_bitfield;
1051
1052 /* take down each engine before we kill mem and pll */
1053 for (n = 0; n < __NUM_GLAMO_ENGINES; n++)
1054 if (glamo->engine_enabled_bitfield & (1 << n))
1055 __glamo_engine_disable(glamo, n);
1056
1057 /* enable self-refresh */
1058
1059 __reg_write(glamo, GLAMO_REG_MEM_DRAM1,
1060 GLAMO_MEM_DRAM1_EN_DRAM_REFRESH |
1061 GLAMO_MEM_DRAM1_EN_GATE_CKE |
1062 GLAMO_MEM_DRAM1_SELF_REFRESH |
1063 GLAMO_MEM_REFRESH_COUNT);
1064 __reg_write(glamo, GLAMO_REG_MEM_DRAM1,
1065 GLAMO_MEM_DRAM1_EN_MODEREG_SET |
1066 GLAMO_MEM_DRAM1_EN_DRAM_REFRESH |
1067 GLAMO_MEM_DRAM1_EN_GATE_CKE |
1068 GLAMO_MEM_DRAM1_SELF_REFRESH |
1069 GLAMO_MEM_REFRESH_COUNT);
1070
1071 /* force RAM into deep powerdown */
1072
1073 __reg_write(glamo, GLAMO_REG_MEM_DRAM2,
1074 GLAMO_MEM_DRAM2_DEEP_PWRDOWN |
1075 (7 << 6) | /* tRC */
1076 (1 << 4) | /* tRP */
1077 (1 << 2) | /* tRCD */
1078 2); /* CAS latency */
1079
1080 /* disable clocks to memory */
1081 __reg_write(glamo, GLAMO_REG_CLOCK_MEMORY, 0);
1082
1083 /* all dividers from OSCI */
1084 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1, 0x400, 0x400);
1085
1086 /* PLL2 into bypass */
1087 __reg_set_bit_mask(glamo, GLAMO_REG_PLL_GEN3, 1 << 12, 1 << 12);
1088
1089 __reg_write(glamo, 0x200, 0x0e00);
1090
1091
1092 /* kill PLLS 1 then 2 */
1093 __reg_write(glamo, GLAMO_REG_DFT_GEN5, 0x0001);
1094 __reg_set_bit_mask(glamo, GLAMO_REG_PLL_GEN3, 1 << 13, 1 << 13);
1095
1096 break;
1097 }
1098
1099 spin_unlock_irqrestore(&glamo->lock, flags);
1100 }
1101
1102 #if 0
1103 #define MEMDETECT_RETRY 6
1104 static unsigned int detect_memsize(struct glamo_core *glamo)
1105 {
1106 int i;
1107
1108 /*static const u_int16_t pattern[] = {
1109 0x1111, 0x8a8a, 0x2222, 0x7a7a,
1110 0x3333, 0x6a6a, 0x4444, 0x5a5a,
1111 0x5555, 0x4a4a, 0x6666, 0x3a3a,
1112 0x7777, 0x2a2a, 0x8888, 0x1a1a
1113 }; */
1114
1115 for (i = 0; i < MEMDETECT_RETRY; i++) {
1116 switch (glamo->type) {
1117 case 3600:
1118 __reg_write(glamo, GLAMO_REG_MEM_TYPE, 0x0072);
1119 __reg_write(glamo, GLAMO_REG_MEM_DRAM1, 0xc100);
1120 break;
1121 case 3650:
1122 switch (glamo->revision) {
1123 case GLAMO_CORE_REV_A0:
1124 if (i & 1)
1125 __reg_write(glamo, GLAMO_REG_MEM_TYPE,
1126 0x097a);
1127 else
1128 __reg_write(glamo, GLAMO_REG_MEM_TYPE,
1129 0x0173);
1130
1131 __reg_write(glamo, GLAMO_REG_MEM_DRAM1, 0x0000);
1132 msleep(1);
1133 __reg_write(glamo, GLAMO_REG_MEM_DRAM1, 0xc100);
1134 break;
1135 default:
1136 if (i & 1)
1137 __reg_write(glamo, GLAMO_REG_MEM_TYPE,
1138 0x0972);
1139 else
1140 __reg_write(glamo, GLAMO_REG_MEM_TYPE,
1141 0x0872);
1142
1143 __reg_write(glamo, GLAMO_REG_MEM_DRAM1, 0x0000);
1144 msleep(1);
1145 __reg_write(glamo, GLAMO_REG_MEM_DRAM1, 0xe100);
1146 break;
1147 }
1148 break;
1149 case 3700:
1150 /* FIXME */
1151 default:
1152 break;
1153 }
1154
1155 #if 0
1156 /* FIXME: finish implementation */
1157 for (j = 0; j < 8; j++) {
1158 __
1159 #endif
1160 }
1161
1162 return 0;
1163 }
1164 #endif
1165
1166 /* Find out if we can support this version of the Glamo chip */
1167 static int glamo_supported(struct glamo_core *glamo)
1168 {
1169 u_int16_t dev_id, rev_id; /*, memsize; */
1170
1171 dev_id = __reg_read(glamo, GLAMO_REG_DEVICE_ID);
1172 rev_id = __reg_read(glamo, GLAMO_REG_REVISION_ID);
1173
1174 switch (dev_id) {
1175 case 0x3650:
1176 switch (rev_id) {
1177 case GLAMO_CORE_REV_A2:
1178 break;
1179 case GLAMO_CORE_REV_A0:
1180 case GLAMO_CORE_REV_A1:
1181 case GLAMO_CORE_REV_A3:
1182 dev_warn(&glamo->pdev->dev, "untested core revision "
1183 "%04x, your mileage may vary\n", rev_id);
1184 break;
1185 default:
1186 dev_warn(&glamo->pdev->dev, "unknown glamo revision "
1187 "%04x, your mileage may vary\n", rev_id);
1188 /* maybe should abort ? */
1189 }
1190 break;
1191 case 0x3600:
1192 case 0x3700:
1193 default:
1194 dev_err(&glamo->pdev->dev, "unsupported Glamo device %04x\n",
1195 dev_id);
1196 return 0;
1197 }
1198
1199 dev_dbg(&glamo->pdev->dev, "Detected Glamo core %04x Revision %04x "
1200 "(%uHz CPU / %uHz Memory)\n", dev_id, rev_id,
1201 glamo_pll_rate(glamo, GLAMO_PLL1),
1202 glamo_pll_rate(glamo, GLAMO_PLL2));
1203
1204 return 1;
1205 }
1206
1207 static int __init glamo_probe(struct platform_device *pdev)
1208 {
1209 int rc = 0, irq;
1210 struct glamo_core *glamo;
1211 struct platform_device *glamo_mmc_dev;
1212
1213 if (glamo_handle) {
1214 dev_err(&pdev->dev,
1215 "This driver supports only one instance\n");
1216 return -EBUSY;
1217 }
1218
1219 glamo = kmalloc(GFP_KERNEL, sizeof(*glamo));
1220 if (!glamo)
1221 return -ENOMEM;
1222
1223 spin_lock_init(&glamo->lock);
1224 glamo_handle = glamo;
1225 glamo->pdev = pdev;
1226 glamo->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1227 glamo->irq = platform_get_irq(pdev, 0);
1228 glamo->pdata = pdev->dev.platform_data;
1229 if (!glamo->mem || !glamo->pdata) {
1230 dev_err(&pdev->dev, "platform device with no MEM/PDATA ?\n");
1231 rc = -ENOENT;
1232 goto bail_free;
1233 }
1234
1235 /* register a number of sibling devices whoise IOMEM resources
1236 * are siblings of pdev's IOMEM resource */
1237 #if 0
1238 glamo_core_dev.dev.parent = &pdev.dev;
1239 mangle_mem_resources(glamo_core_dev.resources,
1240 glamo_core_dev.num_resources, glamo->mem);
1241 glamo_core_dev.resources[1].start = glamo->irq;
1242 glamo_core_dev.resources[1].end = glamo->irq;
1243 platform_device_register(&glamo_core_dev);
1244 #endif
1245 /* only remap the generic, hostbus and memory controller registers */
1246 glamo->base = ioremap(glamo->mem->start, 0x4000 /*GLAMO_REGOFS_VIDCAP*/);
1247 if (!glamo->base) {
1248 dev_err(&pdev->dev, "failed to ioremap() memory region\n");
1249 goto bail_free;
1250 }
1251
1252 platform_set_drvdata(pdev, glamo);
1253
1254 (glamo->pdata->glamo_external_reset)(0);
1255 udelay(10);
1256 (glamo->pdata->glamo_external_reset)(1);
1257 mdelay(10);
1258
1259 /*
1260 * finally set the mfd interrupts up
1261 * can't do them earlier or sibling probes blow up
1262 */
1263
1264 for (irq = IRQ_GLAMO(0); irq <= IRQ_GLAMO(8); irq++) {
1265 set_irq_chip(irq, &glamo_irq_chip);
1266 set_irq_handler(irq, handle_level_irq);
1267 set_irq_flags(irq, IRQF_VALID);
1268 }
1269
1270 if (glamo->pdata->glamo_irq_is_wired &&
1271 !glamo->pdata->glamo_irq_is_wired()) {
1272 set_irq_chained_handler(glamo->irq, glamo_irq_demux_handler);
1273 set_irq_type(glamo->irq, IRQ_TYPE_EDGE_FALLING);
1274 dev_info(&pdev->dev, "Glamo interrupt registered\n");
1275 glamo->irq_works = 1;
1276 } else {
1277 dev_err(&pdev->dev, "Glamo interrupt not used\n");
1278 glamo->irq_works = 0;
1279 }
1280
1281
1282 /* confirm it isn't insane version */
1283 if (!glamo_supported(glamo)) {
1284 dev_err(&pdev->dev, "This Glamo is not supported\n");
1285 goto bail_irq;
1286 }
1287
1288 /* sysfs */
1289 rc = sysfs_create_group(&pdev->dev.kobj, &glamo_attr_group);
1290 if (rc < 0) {
1291 dev_err(&pdev->dev, "cannot create sysfs group\n");
1292 goto bail_irq;
1293 }
1294
1295 /* init the chip with canned register set */
1296
1297 dev_dbg(&glamo->pdev->dev, "running init script\n");
1298 glamo_run_script(glamo, glamo_init_script,
1299 ARRAY_SIZE(glamo_init_script), 1);
1300
1301 dev_info(&glamo->pdev->dev, "Glamo core PLL1: %uHz, PLL2: %uHz\n",
1302 glamo_pll_rate(glamo, GLAMO_PLL1),
1303 glamo_pll_rate(glamo, GLAMO_PLL2));
1304
1305 /* bring MCI specific stuff over from our MFD platform data */
1306 glamo_mci_def_pdata.glamo_can_set_mci_power =
1307 glamo->pdata->glamo_can_set_mci_power;
1308 glamo_mci_def_pdata.glamo_mci_use_slow =
1309 glamo->pdata->glamo_mci_use_slow;
1310 glamo_mci_def_pdata.glamo_irq_is_wired =
1311 glamo->pdata->glamo_irq_is_wired;
1312
1313 /* start creating the siblings */
1314
1315 glamo_2d_dev.dev.parent = &pdev->dev;
1316 mangle_mem_resources(glamo_2d_dev.resource,
1317 glamo_2d_dev.num_resources, glamo->mem);
1318 platform_device_register(&glamo_2d_dev);
1319
1320 glamo_3d_dev.dev.parent = &pdev->dev;
1321 mangle_mem_resources(glamo_3d_dev.resource,
1322 glamo_3d_dev.num_resources, glamo->mem);
1323 platform_device_register(&glamo_3d_dev);
1324
1325 glamo_jpeg_dev.dev.parent = &pdev->dev;
1326 mangle_mem_resources(glamo_jpeg_dev.resource,
1327 glamo_jpeg_dev.num_resources, glamo->mem);
1328 platform_device_register(&glamo_jpeg_dev);
1329
1330 glamo_mpeg_dev.dev.parent = &pdev->dev;
1331 mangle_mem_resources(glamo_mpeg_dev.resource,
1332 glamo_mpeg_dev.num_resources, glamo->mem);
1333 platform_device_register(&glamo_mpeg_dev);
1334
1335 glamo->pdata->glamo = glamo;
1336 glamo_fb_dev.dev.parent = &pdev->dev;
1337 glamo_fb_dev.dev.platform_data = glamo->pdata;
1338 mangle_mem_resources(glamo_fb_dev.resource,
1339 glamo_fb_dev.num_resources, glamo->mem);
1340 platform_device_register(&glamo_fb_dev);
1341
1342 glamo->pdata->spigpio_info->glamo = glamo;
1343 glamo_spigpio_dev.dev.parent = &pdev->dev;
1344 glamo_spigpio_dev.dev.platform_data = glamo->pdata->spigpio_info;
1345 platform_device_register(&glamo_spigpio_dev);
1346
1347 glamo_mmc_dev = glamo->pdata->mmc_dev;
1348 glamo_mmc_dev->name = "glamo-mci";
1349 glamo_mmc_dev->dev.parent = &pdev->dev;
1350 glamo_mmc_dev->resource = glamo_mmc_resources;
1351 glamo_mmc_dev->num_resources = ARRAY_SIZE(glamo_mmc_resources);
1352 glamo_mmc_dev->dev.platform_data = &glamo_mci_def_pdata;
1353
1354 /* we need it later to give to the engine enable and disable */
1355 glamo_mci_def_pdata.pglamo = glamo;
1356 mangle_mem_resources(glamo_mmc_dev->resource,
1357 glamo_mmc_dev->num_resources, glamo->mem);
1358 platform_device_register(glamo_mmc_dev);
1359
1360 /* only request the generic, hostbus and memory controller MMIO */
1361 glamo->mem = request_mem_region(glamo->mem->start,
1362 GLAMO_REGOFS_VIDCAP, "glamo-core");
1363 if (!glamo->mem) {
1364 dev_err(&pdev->dev, "failed to request memory region\n");
1365 goto bail_irq;
1366 }
1367
1368 return 0;
1369
1370 bail_irq:
1371 disable_irq(glamo->irq);
1372 set_irq_chained_handler(glamo->irq, NULL);
1373
1374 for (irq = IRQ_GLAMO(0); irq <= IRQ_GLAMO(8); irq++) {
1375 set_irq_flags(irq, 0);
1376 set_irq_chip(irq, NULL);
1377 }
1378
1379 iounmap(glamo->base);
1380 bail_free:
1381 platform_set_drvdata(pdev, NULL);
1382 glamo_handle = NULL;
1383 kfree(glamo);
1384
1385 return rc;
1386 }
1387
1388 static int glamo_remove(struct platform_device *pdev)
1389 {
1390 struct glamo_core *glamo = platform_get_drvdata(pdev);
1391 int irq;
1392
1393 disable_irq(glamo->irq);
1394 set_irq_chained_handler(glamo->irq, NULL);
1395
1396 for (irq = IRQ_GLAMO(0); irq <= IRQ_GLAMO(8); irq++) {
1397 set_irq_flags(irq, 0);
1398 set_irq_chip(irq, NULL);
1399 }
1400
1401 platform_set_drvdata(pdev, NULL);
1402 platform_device_unregister(&glamo_fb_dev);
1403 platform_device_unregister(glamo->pdata->mmc_dev);
1404 iounmap(glamo->base);
1405 release_mem_region(glamo->mem->start, GLAMO_REGOFS_VIDCAP);
1406 glamo_handle = NULL;
1407 kfree(glamo);
1408
1409 return 0;
1410 }
1411
1412 #ifdef CONFIG_PM
1413
1414 static int glamo_suspend(struct platform_device *pdev, pm_message_t state)
1415 {
1416 glamo_handle->suspending = 1;
1417 glamo_power(glamo_handle, GLAMO_POWER_SUSPEND);
1418
1419 return 0;
1420 }
1421
1422 static int glamo_resume(struct platform_device *pdev)
1423 {
1424 glamo_power(glamo_handle, GLAMO_POWER_ON);
1425 glamo_handle->suspending = 0;
1426
1427 return 0;
1428 }
1429
1430 #else
1431 #define glamo_suspend NULL
1432 #define glamo_resume NULL
1433 #endif
1434
1435 static struct platform_driver glamo_driver = {
1436 .probe = glamo_probe,
1437 .remove = glamo_remove,
1438 .suspend = glamo_suspend,
1439 .resume = glamo_resume,
1440 .driver = {
1441 .name = "glamo3362",
1442 .owner = THIS_MODULE,
1443 },
1444 };
1445
1446 static int __devinit glamo_init(void)
1447 {
1448 return platform_driver_register(&glamo_driver);
1449 }
1450
1451 static void __exit glamo_cleanup(void)
1452 {
1453 platform_driver_unregister(&glamo_driver);
1454 }
1455
1456 module_init(glamo_init);
1457 module_exit(glamo_cleanup);
1458
1459 MODULE_AUTHOR("Harald Welte <laforge@openmoko.org>");
1460 MODULE_DESCRIPTION("Smedia Glamo 336x/337x core/resource driver");
1461 MODULE_LICENSE("GPL");