use broken-out patches for the coldfire to make it easier to follow differences again...
[openwrt/svn-archive/archive.git] / target / linux / coldfire / patches / 046-mcfv4e_kern_to_phys.patch
diff --git a/target/linux/coldfire/patches/046-mcfv4e_kern_to_phys.patch b/target/linux/coldfire/patches/046-mcfv4e_kern_to_phys.patch
new file mode 100644 (file)
index 0000000..b111789
--- /dev/null
@@ -0,0 +1,1614 @@
+From 13fc0abf4b5da6f18dea15fd31697655615a79fc Mon Sep 17 00:00:00 2001
+From: Kurt Mahan <kmahan@freescale.com>
+Date: Wed, 12 Mar 2008 15:39:03 -0600
+Subject: [PATCH] Move kernel to physical sdram base address.
+
+Work based on the patches from Systec GMBH.  Also DMA support,
+uBoot integration, FEC fixes, and assorted cleanups.
+
+Uber patch needs to be split up.
+
+LTIBName: mcfv4e-kern-to-phys
+Signed-off-by: Kurt Mahan <kmahan@freescale.com>
+---
+ arch/m68k/coldfire/config.c           |  102 ++-----
+ arch/m68k/coldfire/dma.c              |    7 +-
+ arch/m68k/coldfire/entry.S            |   10 +
+ arch/m68k/coldfire/head.S             |   53 +++-
+ arch/m68k/coldfire/mcf5445x-devices.c |    2 +
+ arch/m68k/coldfire/mcf5445x-pci.c     |    3 +
+ arch/m68k/coldfire/signal.c           |    3 +
+ arch/m68k/configs/m5485evb_defconfig  |   79 +++++-
+ arch/m68k/mm/cache.c                  |    4 +-
+ arch/m68k/mm/cf-mmu.c                 |   28 ++-
+ arch/m68k/mm/memory.c                 |    8 +-
+ drivers/net/fec/fec.c                 |   20 +-
+ include/asm-m68k/bootinfo.h           |   59 +++-
+ include/asm-m68k/cf_cacheflush.h      |  547 +++++++++++----------------------
+ include/asm-m68k/cf_pgalloc.h         |    5 +-
+ include/asm-m68k/cf_pgtable.h         |    2 +-
+ include/asm-m68k/cfcache.h            |    6 +
+ include/asm-m68k/page.h               |    8 +
+ include/asm-m68k/page_offset.h        |   25 +-
+ 19 files changed, 446 insertions(+), 525 deletions(-)
+
+--- a/arch/m68k/coldfire/config.c
++++ b/arch/m68k/coldfire/config.c
+@@ -23,7 +23,6 @@
+ #include <asm/coldfire.h>
+ #include <asm/cfcache.h>
+ #include <asm/cacheflush.h>
+-#include <asm/bootinfo.h>
+ #include <asm/io.h>
+ #include <asm/cfmmu.h>
+ #include <asm/setup.h>
+@@ -36,7 +35,14 @@
+ #include <asm/mcfsim.h>
+-#if 0
++#if defined(CONFIG_M54455)
++#define UBOOT_EXTRA_CLOCKS
++#elif defined(CONFIG_M547X_8X)
++#define UBOOT_PCI
++#endif
++#include <asm/bootinfo.h>
++
++#ifdef CONFIG_M54455
+ #include <asm/mcf5445x_intc.h>
+ #include <asm/mcf5445x_sdramc.h>
+ #include <asm/mcf5445x_fbcs.h>
+@@ -44,10 +50,6 @@
+ #include <asm/mcf5445x_xbs.h>
+ #endif
+-/* JKM -- testing */
+-#include <linux/pfn.h>
+-/* JKM */
+-
+ extern int get_irq_list(struct seq_file *p, void *v);
+ extern char _text, _end;
+ extern char _etext, _edata, __init_begin, __init_end;
+@@ -58,6 +60,10 @@ extern unsigned long availmem;
+ static int irq_enable[NR_IRQS];
+ unsigned long num_pages;
++/* ethernet mac addresses from uboot */
++unsigned char uboot_enet0[6];
++unsigned char uboot_enet1[6];
++
+ void coldfire_sort_memrec(void)
+ {
+       int i, j;
+@@ -87,6 +93,9 @@ void coldfire_sort_memrec(void)
+       }
+ }
++/*
++ * UBoot Handler
++ */
+ int __init uboot_commandline(char *bootargs)
+ {
+       int len = 0, cmd_line_len;
+@@ -101,12 +110,17 @@ int __init uboot_commandline(char *boota
+               return 0;
+       /* Add offset to get post-remapped kernel memory location */
+-      uboot_info.bd_info = (*(u32 *)(uboot_info_stk)) + offset;
++      uboot_info.bdi = (struct bd_info *)((*(u32 *)(uboot_info_stk)) + offset);
+       uboot_info.initrd_start = (*(u32 *)(uboot_info_stk+4)) + offset;
+       uboot_info.initrd_end = (*(u32 *)(uboot_info_stk+8)) + offset;
+       uboot_info.cmd_line_start = (*(u32 *)(uboot_info_stk+12)) + offset;
+       uboot_info.cmd_line_stop = (*(u32 *)(uboot_info_stk+16)) + offset;
++      /* copy over mac addresses */
++      memcpy(uboot_enet0, uboot_info.bdi->bi_enet0addr, 6);
++      memcpy(uboot_enet1, uboot_info.bdi->bi_enet1addr, 6);
++
++      /* copy command line */
+       cmd_line_len = uboot_info.cmd_line_stop - uboot_info.cmd_line_start;
+       if ((cmd_line_len > 0) && (cmd_line_len < CL_SIZE-1))
+               len = (int)strncpy(bootargs, (char *)uboot_info.cmd_line_start,\
+@@ -121,7 +135,7 @@ int __init uboot_commandline(char *boota
+ #if defined(CONFIG_M54455)
+ #define DEFAULT_COMMAND_LINE "root=/dev/mtdblock1 rw rootfstype=jffs2 ip=none mtdparts=physmap-flash.0:5M(kernel)ro,-(jffs2)"
+ #elif defined(CONFIG_M547X_8X)
+-#define DEFAULT_COMMAND_LINE "debug root=/dev/nfs nfsroot=172.27.155.1:/tftpboot/rigo/rootfs/ ip=172.27.155.85:172.27.155.1"
++#define DEFAULT_COMMAND_LINE "debug root=/dev/nfs rw nfsroot=172.27.155.1:/tftpboot/rigo/rootfs/ ip=172.27.155.85:172.27.155.1"
+ #endif
+ asmlinkage void __init cf_early_init(void)
+ {
+@@ -142,7 +156,6 @@ asmlinkage void __init cf_early_init(voi
+       MCF_INTC1_IMRL = 0xFFFFFFFF;
+       MCF_INTC1_IMRH = 0xFFFFFFFF;
+ #elif defined(CONFIG_M547X_8X)
+-/* JKM -- ?? */
+       MCF_IMRL = 0xFFFFFFFF;
+       MCF_IMRH = 0xFFFFFFFF;
+ #endif
+@@ -170,7 +183,6 @@ asmlinkage void __init cf_early_init(voi
+                       MCF_XBS_PRS_M6(MCF_XBS_PRI_1) |
+                       MCF_XBS_PRS_M7(MCF_XBS_PRI_7));
+ #endif
+-      
+       m68k_machtype = MACH_CFMMU;
+       m68k_fputype = FPU_CFV4E;
+@@ -181,7 +193,6 @@ asmlinkage void __init cf_early_init(voi
+       m68k_memory[m68k_num_memory].addr = CONFIG_SDRAM_BASE;
+       m68k_memory[m68k_num_memory++].size = CONFIG_SDRAM_SIZE;
+-#if defined(CONFIG_M54455)
+       if (!uboot_commandline(m68k_command_line)) {
+ #if defined(CONFIG_BOOTPARAM)
+               strncpy(m68k_command_line, CONFIG_BOOTPARAM_STRING, CL_SIZE-1);
+@@ -189,11 +200,6 @@ asmlinkage void __init cf_early_init(voi
+               strcpy(m68k_command_line, DEFAULT_COMMAND_LINE);
+ #endif
+       }
+-#else
+-/* JKM -- hack until mappings get resolved */
+-      strcpy(m68k_command_line, DEFAULT_COMMAND_LINE);
+-#endif
+-
+ #if defined(CONFIG_BLK_DEV_INITRD)
+       /* add initrd image */
+@@ -372,75 +378,11 @@ void coldfire_reboot(void)
+ #endif
+ }
+-/* int coldfire_hwclk(int i, struct rtc_time *t)
+-{
+-      printk ("Real time clock needs porting.\n");
+-      return 0;
+-}*/
+-
+ static void coldfire_get_model(char *model)
+ {
+       sprintf(model, "Version 4 ColdFire");
+ }
+-/* JKM -- Why do we need these? */
+-void coldfire_enable_irq(unsigned int vec)
+-{
+-      unsigned long flags;
+-
+-      vec -= 64;
+-
+-      if (((int)vec < 0) || (vec > 63)) {
+-              printk(KERN_WARNING "enable_irq %d failed\n", vec);
+-              return;
+-      }
+-
+-      local_irq_save(flags);
+-      irq_enable[vec]++;
+-#if defined(CONFIG_M54455)
+-      if (vec < 32)
+-              MCF_INTC0_IMRL &= ~(1 << vec);
+-      else
+-              MCF_INTC0_IMRH &= ~(1 << (vec - 32));
+-#elif defined(CONFIG_M547X_8X)
+-      if (vec < 32)
+-              MCF_IMRL &= ~(1 << vec);
+-      else
+-              MCF_IMRH &= ~(1 << (vec - 32));
+-#endif
+-      local_irq_restore(flags);
+-}
+-
+-/* JKM -- Why do we need these? */
+-void coldfire_disable_irq(unsigned int vec)
+-{
+-      unsigned long flags;
+-
+-      vec -= 64;
+-
+-      if (((int)vec < 0) || (vec > 63)) {
+-              printk(KERN_WARNING "disable_irq %d failed\n", vec);
+-              return;
+-      }
+-
+-      local_irq_save(flags);
+-      if (--irq_enable[vec] == 0) {
+-#if defined(CONFIG_M54455)
+-              if (vec < 32)
+-                      MCF_INTC0_IMRL |= (1 << vec);
+-              else
+-                      MCF_INTC0_IMRH |= (1 << (vec - 32));
+-#elif defined(CONFIG_M547X_8X)
+-              if (vec < 32)
+-                      MCF_IMRL |= (1 << vec);
+-              else
+-                      MCF_IMRH |= (1 << (vec - 32));
+-#endif
+-
+-      }
+-      local_irq_restore(flags);
+-}
+-
+ static void __init
+ coldfire_bootmem_alloc(unsigned long memory_start, unsigned long memory_end)
+ {
+--- a/arch/m68k/coldfire/dma.c
++++ b/arch/m68k/coldfire/dma.c
+@@ -516,13 +516,8 @@ dma_init()
+       MCF_DMA_DIPR = 0xFFFFFFFF;
+       MCF_ICR(ISC_DMA) = ILP_DMA;
+-#if 0
+-// JKM
+-      enable_irq( 64 + ISC_DMA );
+-#endif
+-      result =
+-          MCD_initDma((dmaRegs *) (MCF_MBAR + 0x8000),
++      result = MCD_initDma((dmaRegs *) (MCF_MBAR + 0x8000),
+                       (void *) SYS_SRAM_DMA_START, MCD_RELOC_TASKS);
+       if (result != MCD_OK) {
+               printk("Cannot perform DMA initialization\n");
+--- a/arch/m68k/coldfire/entry.S
++++ b/arch/m68k/coldfire/entry.S
+@@ -10,6 +10,8 @@
+  *  Kurt Mahan kmahan@freescale.com
+  *  Copyright Freescale Semiconductor, Inc. 2007
+  *
++ *  Modify irq status in buserr -- (c) Copyright 2008, SYSTEM electronic Gmbh
++ *
+  * Based on:
+  *
+  *  arch/m68knommu/platform/5307/entry.S &
+@@ -77,7 +79,15 @@ sw_usp:
+ .globl inthandler
+ ENTRY(buserr)
++#ifdef CONFIG_COLDFIRE_FOO
++      movew   #0x2700,%sr             /* lock interrupts */
++#endif
+       SAVE_ALL_INT
++#ifdef        CONFIG_COLDFIRE_FOO
++      movew   PT_SR(%sp),%d3          /* get original %sr */
++      oril    #0x2000,%d3             /* set supervisor mode in it */
++      movew   %d3,%sr                 /* recover irq state */
++#endif
+       GET_CURRENT(%d0)
+       movel   %sp,%sp@-               /* stack frame pointer argument */
+       jsr     buserr_c
+--- a/arch/m68k/coldfire/head.S
++++ b/arch/m68k/coldfire/head.S
+@@ -5,6 +5,7 @@
+  *    Matt Waddel Matt.Waddel@freescale.com
+  *    Kurt Mahan kmahan@freescale.com
+  *  Copyright Freescale Semiconductor, Inc. 2007, 2008
++ *  Phys kernel mapping Copyright Daniel Krueger, SYSTEC electornic GmbH 2008
+  *
+  *  This program is free software; you can redistribute it and/or modify
+  *  it under the terms of the GNU General Public License as published by
+@@ -52,7 +53,7 @@
+ #define __FINIT               .previous
+ #endif
+-/* JKM -- REVISE DOCS FOR RIGO */
++/* JKM -- REVISE DOCS FOR RIGO and PHYS MAPPING */
+ /*
+  * Setup ACR mappings to provide the following memory map:
+  *   Data
+@@ -62,10 +63,17 @@
+  *     None currently (mapped via TLBs)
+  */
++#if CONFIG_SDRAM_BASE != PAGE_BASE
+ #if defined(CONFIG_M54455)
++#if 0
+ #define ACR0_DEFAULT  #0xA00FA048   /* ACR0 default value */
++#endif
++#define ACR0_DEFAULT  #0x400FA028   /* ACR0 default value */
+ #define ACR1_DEFAULT  #0xF00FA040   /* ACR1 default value */
++#if 0
+ #define ACR2_DEFAULT  #0x00000000   /* ACR2 default value */
++#endif
++#define ACR2_DEFAULT  #0x400FA028   /* ACR2 default value */
+ #define ACR3_DEFAULT  #0x00000000   /* ACR3 default value */
+ /* ACR mapping for FPGA (maps 0) */
+ #define ACR0_FPGA     #0x000FA048   /* ACR0 enable FPGA */
+@@ -76,6 +84,16 @@
+ #define ACR3_DEFAULT  #0x00000000   /* ACR3 default value */
+ #endif
++#else
++#if defined(CONFIG_M54455)
++#elif defined(CONFIG_M547X_8X)
++#define ACR0_DEFAULT  #0xE000C040   /* ACR0 default value */
++#define ACR1_DEFAULT  #0x000FA008   /* ACR1 default value */
++#define ACR2_DEFAULT  #0x00000000   /* ACR2 default value */
++#define ACR3_DEFAULT  #0x000FA008   /* ACR3 default value */
++#endif
++#endif
++
+ /* Several macros to make the writing of subroutines easier:
+  * - func_start marks the beginning of the routine which setups the frame
+@@ -153,6 +171,7 @@ func_define        putn,1
+ #endif
+ .endm
++#if CONFIG_SDRAM_BASE != PAGE_OFFSET
+ /*
+       mmu_map  -  creates a new TLB entry
+@@ -222,6 +241,7 @@ func_define        putn,1
+       movew   \temp_data_reg, (MMUOR)
+ 1:    
+ .endm /* mmu_unmap */
++#endif /* CONFIG_SDRAM_BASE != PAGE_OFFSET */
+ /* .text */
+ .section ".text.head","ax"
+@@ -235,7 +255,11 @@ ENTRY(_stext)
+       .long   BOOTINFOV_MAGIC
+       .long   0
++#if CONFIG_SDRAM_BASE != PAGE_OFFSET
+ 1:    jmp     __start-(0xc0000000-CONFIG_SDRAM_BASE)
++#else
++1:    jmp     __start
++#endif
+ .equ  kernel_pg_dir,_stext
+ .equ  .,_stext+0x1000
+@@ -244,24 +268,13 @@ ENTRY(_start)
+       jra     __start
+ __INIT
+ ENTRY(__start)
+-
+-/* JKM -- make sure Rigo handles UBOOT */
+ /* Save the location of u-boot info - cmd line, bd_info, etc. */
+       movel   %a7,%a4         /* Don't use %a4 before cf_early_init */
+-/* JKM -- fix to use CONFIG_SDRAM_BASE) */
+-#if defined(CONFIG_M54455)
+-      addl    #0x80000004,%a4 /* 0x80000004= 1 stack push + high mem offset */
+-#elif defined(CONFIG_M547X_8X)
+-      addl    #0xc0000004,%a4 /* 0x80000004= 1 stack push + high mem offset */
+-#endif
++      addl    #0x00000004,%a4 /* offset past top */
++      addl    #(PAGE_OFFSET-CONFIG_SDRAM_BASE),%a4    /* high mem offset */
+-/* JKM -- fix this to work off of CONFIG_SDRAM_BASE */
+ /* Setup initial stack pointer */
+-#if defined(CONFIG_M54455)
+-      movel   #0x40001000,%sp 
+-#elif defined(CONFIG_M547X_8X)
+-      movel   #0x00001000,%sp 
+-#endif
++      movel   #CONFIG_SDRAM_BASE+0x1000,%sp
+ /* Setup usp */
+       subl    %a0,%a0
+@@ -297,16 +310,21 @@ ENTRY(__start)
+ /* setup ACRs */
+       movel   ACR0_DEFAULT, %d0               /* ACR0 (DATA) setup */
+       movec   %d0, %acr0
++      nop
+       movel   ACR1_DEFAULT, %d0               /* ACR1 (DATA) setup */
+       movec   %d0, %acr1
++      nop
+       movel   ACR2_DEFAULT, %d0               /* ACR2 (CODE) setup */
+       movec   %d0, %acr2
++      nop
+       movel   ACR3_DEFAULT, %d0               /* ACR3 (CODE) setup */
+       movec   %d0, %acr3
++      nop
+       /* If you change the memory size to another value make a matching 
+          change in paging_init(cf-mmu.c) to zones_size[]. */
++#if CONFIG_SDRAM_BASE != PAGE_OFFSET
+ #if defined(CONFIG_M54455)
+       /* Map 256MB as code */
+       mmu_map (PAGE_OFFSET+0*0x1000000),  (PHYS_OFFSET+0*0x1000000), \
+@@ -511,6 +529,7 @@ ENTRY(__start)
+               MMUTR_SG, MMUDR_SZ1M, MMUDR_DCB, MMUDR_SP, MMUDR_R, \
+               MMUDR_W, 0, 0, %d0
+ #endif
++#endif /* CONFIG_SDRAM_BASE != PAGE_OFFSET */
+       /* Turn on MMU */
+       movel   #(MMUCR_EN),%a0
+@@ -531,6 +550,7 @@ ENTRY(__running_high)
+       movel   %a1,%a0@
+       /* Unmap unity mappings */
++#if CONFIG_SDRAM_BASE != PAGE_OFFSET
+ #if defined(CONFIG_M54455)
+       mmu_unmap (PHYS_OFFSET+0*0x1000000), MMUOR_ITLB, %d0
+       mmu_unmap (PHYS_OFFSET+0*0x1000000), 0, %d0
+@@ -544,6 +564,7 @@ ENTRY(__running_high)
+       mmu_unmap (PHYS_OFFSET+2*0x1000000), 0, %d0
+       mmu_unmap (PHYS_OFFSET+3*0x1000000), 0, %d0
+ #endif
++#endif /* CONFIG_SDRAM_BASE != PAGE_OFFSET */
+ /* Setup initial stack pointer */
+       lea     init_task,%a2 
+@@ -574,6 +595,7 @@ func_return        set_context
+  * Map in 0x00000000 -> 0x0fffffff and then do the write.
+  */
+ set_fpga:
++#if 0
+       movew   %sr,%d1
+       movew   #0x2700,%sr
+       movel   ACR0_FPGA, %d0
+@@ -585,6 +607,7 @@ set_fpga:
+       movec   %d0, %acr0
+       nop
+       movew   %d1,%sr
++#endif
+       rts
+ #endif
+--- a/arch/m68k/coldfire/mcf5445x-devices.c
++++ b/arch/m68k/coldfire/mcf5445x-devices.c
+@@ -127,7 +127,9 @@ static inline void mcf5445x_init_pata(vo
+ static int __init mcf5445x_init_devices(void)
+ {
+       printk(KERN_INFO "MCF5445x INIT_DEVICES\n");
++#if 0
+       mcf5445x_init_pata();
++#endif
+       return 0;
+ }
+--- a/arch/m68k/coldfire/mcf5445x-pci.c
++++ b/arch/m68k/coldfire/mcf5445x-pci.c
+@@ -236,6 +236,8 @@ mcf5445x_pci_arb_interrupt(int irq, void
+ int __init
+ init_mcf5445x_pci(void)
+ {
++      return 0;
++#if 0
+       /*
+        * Initialize the PCI core
+        */
+@@ -331,6 +333,7 @@ init_mcf5445x_pci(void)
+       raw_pci_ops = &mcf5445x_pci_ops;
+       return 0;
++#endif
+ }
+ /*
+--- a/arch/m68k/coldfire/signal.c
++++ b/arch/m68k/coldfire/signal.c
+@@ -608,7 +608,10 @@ static inline int rt_setup_ucontext(stru
+ static inline void push_cache(unsigned long vaddr)
+ {
++#if 0 
++// JKM -- need to add into the old cpushl cache stuff
+       cf_cache_push(__pa(vaddr), 8);
++#endif
+ }
+ static inline void __user *
+--- a/arch/m68k/configs/m5485evb_defconfig
++++ b/arch/m68k/configs/m5485evb_defconfig
+@@ -1,7 +1,7 @@
+ #
+ # Automatically generated make config: don't edit
+ # Linux kernel version: 2.6.24
+-# Thu Feb 21 00:19:01 2008
++# Wed Mar  5 16:05:25 2008
+ #
+ CONFIG_M68K=y
+ CONFIG_MMU=y
+@@ -103,6 +103,7 @@ CONFIG_DEFAULT_IOSCHED="cfq"
+ # CONFIG_SUN3 is not set
+ CONFIG_COLDFIRE=y
+ CONFIG_CFV4E=y
++CONFIG_MCD_DMA=y
+ # CONFIG_AMIGA is not set
+ # CONFIG_ATARI is not set
+ # CONFIG_MAC is not set
+@@ -135,7 +136,7 @@ CONFIG_M5485CFE=y
+ # CONFIG_M5485DFE is not set
+ # CONFIG_M5485EFE is not set
+ # CONFIG_M5485FFE is not set
+-CONFIG_MCFCLK=50000000
++CONFIG_MCFCLK=200000000
+ # CONFIG_MCF_USER_HALT is not set
+ CONFIG_MMU_CFV4E=y
+ CONFIG_SDRAM_BASE=0x00000000
+@@ -279,7 +280,73 @@ CONFIG_FW_LOADER=y
+ # CONFIG_DEBUG_DEVRES is not set
+ # CONFIG_SYS_HYPERVISOR is not set
+ # CONFIG_CONNECTOR is not set
+-# CONFIG_MTD is not set
++CONFIG_MTD=y
++# CONFIG_MTD_DEBUG is not set
++# CONFIG_MTD_CONCAT is not set
++CONFIG_MTD_PARTITIONS=y
++# CONFIG_MTD_REDBOOT_PARTS is not set
++CONFIG_MTD_CMDLINE_PARTS=y
++
++#
++# User Modules And Translation Layers
++#
++# CONFIG_MTD_CHAR is not set
++CONFIG_MTD_BLKDEVS=y
++CONFIG_MTD_BLOCK=y
++# CONFIG_FTL is not set
++# CONFIG_NFTL is not set
++# CONFIG_INFTL is not set
++# CONFIG_RFD_FTL is not set
++# CONFIG_SSFDC is not set
++# CONFIG_MTD_OOPS is not set
++
++#
++# RAM/ROM/Flash chip drivers
++#
++# CONFIG_MTD_CFI is not set
++# CONFIG_MTD_JEDECPROBE is not set
++CONFIG_MTD_MAP_BANK_WIDTH_1=y
++CONFIG_MTD_MAP_BANK_WIDTH_2=y
++CONFIG_MTD_MAP_BANK_WIDTH_4=y
++# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
++CONFIG_MTD_CFI_I1=y
++CONFIG_MTD_CFI_I2=y
++# CONFIG_MTD_CFI_I4 is not set
++# CONFIG_MTD_CFI_I8 is not set
++CONFIG_MTD_RAM=y
++CONFIG_MTD_ROM=y
++# CONFIG_MTD_ABSENT is not set
++
++#
++# Mapping drivers for chip access
++#
++# CONFIG_MTD_COMPLEX_MAPPINGS is not set
++# CONFIG_MTD_PHYSMAP is not set
++# CONFIG_MTD_PLATRAM is not set
++
++#
++# Self-contained MTD device drivers
++#
++# CONFIG_MTD_SLRAM is not set
++# CONFIG_MTD_PHRAM is not set
++# CONFIG_MTD_MTDRAM is not set
++# CONFIG_MTD_BLOCK2MTD is not set
++
++#
++# Disk-On-Chip Device Drivers
++#
++# CONFIG_MTD_DOC2000 is not set
++# CONFIG_MTD_DOC2001 is not set
++# CONFIG_MTD_DOC2001PLUS is not set
++# CONFIG_MTD_NAND is not set
++# CONFIG_MTD_ONENAND is not set
++
++#
++# UBI - Unsorted block images
++#
++# CONFIG_MTD_UBI is not set
+ # CONFIG_PARPORT is not set
+ CONFIG_BLK_DEV=y
+ # CONFIG_BLK_DEV_COW_COMMON is not set
+@@ -347,6 +414,9 @@ CONFIG_NETDEVICES=y
+ # CONFIG_PHYLIB is not set
+ CONFIG_NET_ETHERNET=y
+ CONFIG_MII=y
++CONFIG_FEC_548x=y
++CONFIG_FEC_548x_AUTO_NEGOTIATION=y
++# CONFIG_FEC_548x_ENABLE_FEC2 is not set
+ # CONFIG_IBM_NEW_EMAC_ZMII is not set
+ # CONFIG_IBM_NEW_EMAC_RGMII is not set
+ # CONFIG_IBM_NEW_EMAC_TAH is not set
+@@ -530,7 +600,7 @@ CONFIG_FS_MBCACHE=y
+ # CONFIG_GFS2_FS is not set
+ # CONFIG_OCFS2_FS is not set
+ CONFIG_MINIX_FS=y
+-# CONFIG_ROMFS_FS is not set
++CONFIG_ROMFS_FS=y
+ # CONFIG_INOTIFY is not set
+ # CONFIG_QUOTA is not set
+ CONFIG_DNOTIFY=y
+@@ -578,6 +648,7 @@ CONFIG_TMPFS=y
+ # CONFIG_BEFS_FS is not set
+ # CONFIG_BFS_FS is not set
+ # CONFIG_EFS_FS is not set
++# CONFIG_JFFS2_FS is not set
+ # CONFIG_CRAMFS is not set
+ # CONFIG_VXFS_FS is not set
+ # CONFIG_HPFS_FS is not set
+--- a/arch/m68k/mm/cache.c
++++ b/arch/m68k/mm/cache.c
+@@ -81,7 +81,9 @@ static unsigned long virt_to_phys_slow(u
+ void flush_icache_range(unsigned long address, unsigned long endaddr)
+ {
+ #ifdef CONFIG_COLDFIRE
+-      cf_icache_flush_range(address, endaddr);
++// JKM -- hack until new cpushl stuff is in
++//    cf_icache_flush_range(address, endaddr);
++      flush_icache();
+ #else /* !CONFIG_COLDFIRE */
+       if (CPU_IS_040_OR_060) {
+--- a/arch/m68k/mm/cf-mmu.c
++++ b/arch/m68k/mm/cf-mmu.c
+@@ -35,6 +35,12 @@
+ #include <asm/coldfire.h>
+ #include <asm/tlbflush.h>
++#if PAGE_OFFSET == CONFIG_SDRAM_BASE
++#define       KERNRAM(x) ((x >= PAGE_OFFSET) && (x < (PAGE_OFFSET + CONFIG_SDRAM_SIZE)))
++#else
++#define       KERNRAM(x) (x >= PAGE_OFFSET)
++#endif
++
+ mm_context_t next_mmu_context;
+ unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
+@@ -53,10 +59,16 @@ extern char __init_begin, __init_end;
+ void free_initmem(void)
+ {
++#if 0
+       unsigned long addr;
+       unsigned long start = (unsigned long)&__init_begin;
+       unsigned long end = (unsigned long)&__init_end;
++/* 
++ * JKM -- revisit -- the latest round of vmlinux.lds changes has caused
++ * a little grief with how init areas are handled.  With the new toolchain
++ * release I'll fix this.
++ */
+       printk(KERN_INFO "free_initmem: __init_begin = 0x%lx  __init_end = 0x%lx\n", start, end);
+       addr = (unsigned long)&__init_begin;
+@@ -67,6 +79,7 @@ void free_initmem(void)
+               free_page(addr);
+               totalram_pages++;
+       }
++#endif
+ }
+ /* Coldfire paging_init derived from sun3 */
+@@ -149,8 +162,8 @@ int cf_tlb_miss(struct pt_regs *regs, in
+       mmuar = ( dtlb ) ? regs->mmuar
+                        : regs->pc + (extension_word * sizeof(long));
+-        mm = (!user_mode(regs) && (mmuar >= PAGE_OFFSET)) ? &init_mm
+-                                                        : current->mm;
++        mm = (!user_mode(regs) && KERNRAM(mmuar)) ? &init_mm : current->mm;
++
+         if (!mm) {
+           local_irq_restore(flags);
+           return (-1);
+@@ -167,9 +180,9 @@ int cf_tlb_miss(struct pt_regs *regs, in
+           local_irq_restore(flags);
+           return (-1);
+       }       
+-      
+-      pte = (mmuar >= PAGE_OFFSET) ? pte_offset_kernel(pmd, mmuar)
+-                                   : pte_offset_map(pmd, mmuar);
++    
++      pte = (KERNRAM(mmuar)) ? pte_offset_kernel(pmd, mmuar)
++                             : pte_offset_map(pmd, mmuar);
+       if (pte_none(*pte) || !pte_present(*pte)) {
+           local_irq_restore(flags);
+           return (-1);                
+@@ -185,7 +198,7 @@ int cf_tlb_miss(struct pt_regs *regs, in
+       
+         set_pte(pte, pte_mkyoung(*pte));
+         asid = mm->context & 0xff;
+-        if (!pte_dirty(*pte) && mmuar<=PAGE_OFFSET)
++        if (!pte_dirty(*pte) && !KERNRAM(mmuar))
+           set_pte(pte, pte_wrprotect(*pte));
+         *MMUTR = (mmuar & PAGE_MASK) | (asid << CF_ASID_MMU_SHIFT)
+@@ -201,7 +214,8 @@ int cf_tlb_miss(struct pt_regs *regs, in
+       else
+           *MMUOR = MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA;
+-      asm ("nop");
++      asm("nop");
++
+       /*printk("cf_tlb_miss: va=%lx, pa=%lx\n", (mmuar & PAGE_MASK), 
+                 (pte_val(*pte)  & PAGE_MASK));*/
+       local_irq_restore(flags);
+--- a/arch/m68k/mm/memory.c
++++ b/arch/m68k/mm/memory.c
+@@ -206,7 +206,9 @@ static inline void pushcl040(unsigned lo
+ void cache_clear (unsigned long paddr, int len)
+ {
+ #ifdef CONFIG_COLDFIRE
+-      cf_cache_clear(paddr, len);
++// JKM -- revise to use proper caching
++//    cf_cache_clear(paddr, len);
++      flush_bcache();
+ #else
+     if (CPU_IS_040_OR_060) {
+       int tmp;
+@@ -257,7 +259,9 @@ EXPORT_SYMBOL(cache_clear);
+ void cache_push (unsigned long paddr, int len)
+ {
+ #ifdef CONFIG_COLDFIRE
+-      cf_cache_push(paddr, len);
++// JKM -- revise to use proper caching
++//    cf_cache_push(paddr, len);
++      flush_bcache();
+ #else
+     if (CPU_IS_040_OR_060) {
+       int tmp = PAGE_SIZE;
+--- a/drivers/net/fec/fec.c
++++ b/drivers/net/fec/fec.c
+@@ -99,6 +99,9 @@ unsigned char fec_mac_addr_fec0[6] = { 0
+ unsigned char fec_mac_addr_fec1[6] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x51 };  // Default address of FEC1
+ #endif
++extern unsigned char uboot_enet0[];
++extern unsigned char uboot_enet1[];
++
+ #ifndef MODULE
+ int fec_str_to_mac( char *str_mac, unsigned char* addr);
+ int __init fec_mac_setup0 (char *s);
+@@ -175,6 +178,12 @@ printk(KERN_INFO "FEI: index=%d\n", inde
+ printk(KERN_INFO "FEI: txdesc=0x%p  rxdesc=0x%p\n", fp->fecpriv_txdesc, fp->fecpriv_rxdesc);
+               /* mac addr */
++              if (uboot_enet0[0] || uboot_enet0[1] || uboot_enet0[2] ||
++                  uboot_enet0[3] || uboot_enet0[4] || uboot_enet0[5]) {
++                      /* use uboot enet 0 addr */
++                      memcpy(fec_mac_addr_fec0, uboot_enet0, 6);
++              }
++
+               fp->fecpriv_mac_addr = fec_mac_addr_fec0;
+       }
+       else {
+@@ -211,6 +220,11 @@ printk(KERN_INFO "FEI: txdesc=0x%p  rxde
+               fp->fecpriv_rxdesc = (void*)FEC_RX_DESC_FEC1;
+               /* mac addr */
++              if (uboot_enet1[0] || uboot_enet1[1] || uboot_enet1[2] ||
++                  uboot_enet1[3] || uboot_enet1[4] || uboot_enet1[5]) {
++                      /* use uboot enet 1 addr */
++                      memcpy(fec_mac_addr_fec1, uboot_enet1, 6);
++              }
+               fp->fecpriv_mac_addr = fec_mac_addr_fec1;
+ #endif
+       }
+@@ -762,7 +776,6 @@ int fec_tx(struct sk_buff *skb, struct n
+       void *data, *data_aligned;
+       int offset;
+-printk(KERN_INFO "fec_tx\n");
+       data = kmalloc(skb->len + 15, GFP_DMA | GFP_ATOMIC);
+       if (!data)
+@@ -820,7 +833,6 @@ void fec_tx_timeout(struct net_device *d
+       struct fec_priv *fp = netdev_priv(dev);
+       unsigned long base_addr = (unsigned long) dev->base_addr;
+-printk(KERN_INFO "fec_tx_timeout\n");
+       spin_lock_irq(&fp->fecpriv_lock);
+       MCD_killDma(fp->fecpriv_fec_tx_channel);
+       for (i = 0; i < FEC_TX_BUF_NUMBER; i++)
+@@ -941,7 +953,6 @@ void fec_interrupt_fec_tx_handler(struct
+ {
+       struct fec_priv *fp = netdev_priv(dev);
+-printk(KERN_INFO "fectxint\n");
+       //Release the socket buffer
+       if(fp->fecpriv_txbuf[fp->fecpriv_current_tx])
+       {
+@@ -977,7 +988,6 @@ void fec_interrupt_fec_rx_handler(struct
+       struct fec_priv *fp = netdev_priv(dev);
+       struct sk_buff *skb;
+-printk(KERN_INFO "fecrxint\n");
+       fp->fecpriv_rxflag = 1;
+ /*
+       // Some buffers can be missed
+@@ -1070,7 +1080,6 @@ irqreturn_t fec_interrupt_handler(int ir
+       unsigned long base_addr = (unsigned long) dev->base_addr;
+       unsigned long events;
+-printk(KERN_INFO "fecerrint\n");
+       // Read and clear the events
+       events = FEC_EIR(base_addr) & FEC_EIMR(base_addr);
+@@ -1157,7 +1166,6 @@ void fec_interrupt_fec_reinit(unsigned l
+       struct fec_priv *fp = netdev_priv(dev);
+       unsigned long base_addr = (unsigned long) dev->base_addr;
+-printk(KERN_INFO "fecreinit\n");
+       // Initialize reception descriptors and start DMA for the reception
+       for (i = 0; i < FEC_RX_BUF_NUMBER; i++)
+       {
+--- a/include/asm-m68k/bootinfo.h
++++ b/include/asm-m68k/bootinfo.h
+@@ -19,11 +19,59 @@
+ **   Redesign of the boot information structure; renamed to bootinfo.h again
+ ** 27/11/96 Geert Uytterhoeven:
+ **   Backwards compatibility with bootinfo interface version 1.0
++** 12/03/08 Kurt Mahan
++**   Copy the bd_info structure from uboot1.3.2/include/asm-m68k/u-boot.h
++**   into this file.  The original author is Wolfgang Denk, 
++**   DENX Software Engineering, wd@denx.de.
+ */
+ #ifndef _M68K_BOOTINFO_H
+ #define _M68K_BOOTINFO_H
++#ifndef __ASSEMBLY__
++/*
++ * UBoot Support
++ *
++ * bd_info structure from uboot1.3.2/include/asm-m68k/u-boot.h
++ */
++struct bd_info {
++      unsigned long bi_memstart;      /* start of DRAM memory */
++      unsigned long bi_memsize;       /* size  of DRAM memory in bytes */
++      unsigned long bi_flashstart;    /* start of FLASH memory */
++      unsigned long bi_flashsize;     /* size  of FLASH memory */
++      unsigned long bi_flashoffset;   /* reserved area for startup monitor */
++      unsigned long bi_sramstart;     /* start of SRAM memory */
++      unsigned long bi_sramsize;      /* size  of SRAM memory */
++      unsigned long bi_mbar_base;     /* base of internal registers */
++      unsigned long bi_bootflags;     /* boot / reboot flag (for LynxOS) */
++      unsigned long bi_boot_params;   /* where this board expects params */
++      unsigned long bi_ip_addr;       /* IP Address */
++      unsigned char bi_enet0addr[6];  /* Ethernet 0 mac address */
++      unsigned short bi_ethspeed;     /* Ethernet speed in Mbps */
++      unsigned long bi_intfreq;       /* Internal Freq, in MHz */
++      unsigned long bi_busfreq;       /* Bus Freq, in MHz */
++#ifdef UBOOT_PCI
++      unsigned long bi_pcifreq;       /* pci Freq in MHz */
++#endif
++#ifdef UBOOT_EXTRA_CLOCK
++      unsigned long bi_inpfreq;       /* input Freq in MHz */
++      unsigned long bi_vcofreq;       /* vco Freq in MHz */
++      unsigned long bi_flbfreq;       /* Flexbus Freq in MHz */
++#endif
++      unsigned long bi_baudrate;      /* Console Baudrate */
++      unsigned char bi_enet1addr[6];  /* eth1 mac address */
++      unsigned char bi_enet2addr[6];  /* eth2 mac address */
++      unsigned char bi_enet3addr[6];  /* eth3 mac address */
++};
++
++struct uboot_record {
++      struct bd_info *bdi;
++      unsigned long initrd_start;
++      unsigned long initrd_end;
++      unsigned long cmd_line_start;
++      unsigned long cmd_line_stop;
++};
++#endif /* __ASSEMBLY__ */
+     /*
+      *  Bootinfo definitions
+@@ -49,17 +97,6 @@ struct bi_record {
+ #endif /* __ASSEMBLY__ */
+-#ifndef __ASSEMBLY__
+-
+-struct uboot_record {
+-    unsigned long bd_info;
+-    unsigned long initrd_start;
+-    unsigned long initrd_end;
+-    unsigned long cmd_line_start;
+-    unsigned long cmd_line_stop;
+-};
+-
+-#endif /* __ASSEMBLY__ */
+     /*
+--- a/include/asm-m68k/cf_cacheflush.h
++++ b/include/asm-m68k/cf_cacheflush.h
+@@ -1,335 +1,92 @@
+-/*
+- * include/asm-m68k/cf_cacheflush.h - Coldfire Cache
+- *
+- * Based on include/asm-m68k/cacheflush.h
+- *
+- * Coldfire pieces by:
+- *   Kurt Mahan kmahan@freescale.com
+- *
+- * Copyright Freescale Semiconductor, Inc. 2007
+- *
+- * This program is free software; you can redistribute  it and/or modify it
+- * under  the terms of  the GNU General  Public License as published by the
+- * Free Software Foundation;  either version 2 of the  License, or (at your
+- * option) any later version.
+- */
+ #ifndef M68K_CF_CACHEFLUSH_H
+ #define M68K_CF_CACHEFLUSH_H
+ #include <asm/cfcache.h>
+-
+ /*
+- * Coldfire Cache Model
+- *
+- * The Coldfire processors use a Harvard architecture cache configured
+- * as four-way set associative.  The cache does not implement bus snooping
+- * so cache coherency with other masters must be maintained in software.
+- *
+- * The cache is managed via the CPUSHL instruction in conjunction with
+- * bits set in the CACR (cache control register).  Currently the code
+- * uses the CPUSHL enhancement which adds the ability to
+- * invalidate/clear/push a cacheline by physical address.  This feature
+- * is designated in the Hardware Configuration Register [D1-CPES].
+- *
+- * CACR Bits:
+- *    DPI[28]         cpushl invalidate disable for d-cache
+- *    IDPI[12]        cpushl invalidate disable for i-cache
+- *    SPA[14]         cpushl search by physical address
+- *    IVO[20]         cpushl invalidate only
+- *
+- * Random Terminology:
+- *  * invalidate = reset the cache line's valid bit
+- *  * push = generate a line-sized store of the data if its contents are marked
+- *         as modifed (the modified flag is cleared after the store)
+- *  * clear = push + invalidate
+- */
+-
+-/**
+- * flush_icache - Flush all of the instruction cache
+- */
+-static inline void flush_icache(void)
+-{
+-      asm volatile("nop\n"
+-                   "moveq%.l  #0,%%d0\n"
+-                   "moveq%.l  #0,%%d1\n"
+-                   "move%.l   %%d0,%%a0\n"
+-                   "1:\n"
+-                   "cpushl    %%ic,(%%a0)\n"
+-                   "add%.l    #0x0010,%%a0\n"
+-                   "addq%.l   #1,%%d1\n"
+-                   "cmpi%.l   %0,%%d1\n"
+-                   "bne       1b\n"
+-                   "moveq%.l  #0,%%d1\n"
+-                   "addq%.l   #1,%%d0\n"
+-                   "move%.l   %%d0,%%a0\n"
+-                   "cmpi%.l   #4,%%d0\n"
+-                   "bne       1b\n"
+-                   : : "i" (CACHE_SETS)
+-                   : "a0", "d0", "d1");
+-}
+-
+-/**
+- * flush_dcache - Flush all of the data cache
+- */
+-static inline void flush_dcache(void)
+-{
+-      asm volatile("nop\n"
+-                   "moveq%.l  #0,%%d0\n"
+-                   "moveq%.l  #0,%%d1\n"
+-                   "move%.l   %%d0,%%a0\n"
+-                   "1:\n"
+-                   "cpushl    %%dc,(%%a0)\n"
+-                   "add%.l    #0x0010,%%a0\n"
+-                   "addq%.l   #1,%%d1\n"
+-                   "cmpi%.l   %0,%%d1\n"
+-                   "bne       1b\n"
+-                   "moveq%.l  #0,%%d1\n"
+-                   "addq%.l   #1,%%d0\n"
+-                   "move%.l   %%d0,%%a0\n"
+-                   "cmpi%.l   #4,%%d0\n"
+-                   "bne       1b\n"
+-                   : : "i" (CACHE_SETS)
+-                   : "a0", "d0", "d1");
+-}
+-
+-/**
+- * flush_bcache - Flush all of both caches
++ * Cache handling functions
+  */
+-static inline void flush_bcache(void)
+-{
+-      asm volatile("nop\n"
+-                   "moveq%.l  #0,%%d0\n"
+-                   "moveq%.l  #0,%%d1\n"
+-                   "move%.l   %%d0,%%a0\n"
+-                   "1:\n"
+-                   "cpushl    %%bc,(%%a0)\n"
+-                   "add%.l    #0x0010,%%a0\n"
+-                   "addq%.l   #1,%%d1\n"
+-                   "cmpi%.l   %0,%%d1\n"
+-                   "bne       1b\n"
+-                   "moveq%.l  #0,%%d1\n"
+-                   "addq%.l   #1,%%d0\n"
+-                   "move%.l   %%d0,%%a0\n"
+-                   "cmpi%.l   #4,%%d0\n"
+-                   "bne       1b\n"
+-                   : : "i" (CACHE_SETS)
+-                   : "a0", "d0", "d1");
+-}
+-/**
+- * cf_cache_clear - invalidate cache
+- * @paddr: starting physical address
+- * @len: number of bytes
+- *
+- * Invalidate cache lines starting at paddr for len bytes.
+- * Those lines are not pushed.
+- */
+-static inline void cf_cache_clear(unsigned long paddr, int len)
+-{
+-      /* number of lines */
+-      len = (len + (CACHE_LINE_SIZE-1)) / CACHE_LINE_SIZE;
+-      if (len == 0)
+-              return;
+-
+-      /* align on set boundary */
+-      paddr &= 0xfffffff0;
+-
+-      asm volatile("nop\n"
+-                   "move%.l   %2,%%d0\n"
+-                   "or%.l     %3,%%d0\n"
+-                   "movec     %%d0,%%cacr\n"
+-                   "move%.l   %0,%%a0\n"
+-                   "move%.l   %1,%%d0\n"
+-                   "1:\n"
+-                   "cpushl    %%bc,(%%a0)\n"
+-                   "lea       0x10(%%a0),%%a0\n"
+-                   "subq%.l   #1,%%d0\n"
+-                   "bne%.b    1b\n"
+-                   "movec     %2,%%cacr\n"
+-                   : : "a" (paddr), "r" (len),
+-                       "r" (shadow_cacr),
+-                       "i" (CF_CACR_SPA+CF_CACR_IVO)
+-                   : "a0", "d0");
+-}
+-
+-/**
+- * cf_cache_push - Push dirty cache out with no invalidate
+- * @paddr: starting physical address
+- * @len: number of bytes
+- *
+- * Push the any dirty lines starting at paddr for len bytes.
+- * Those lines are not invalidated.
+- */
+-static inline void cf_cache_push(unsigned long paddr, int len)
+-{
+-      /* number of lines */
+-      len = (len + (CACHE_LINE_SIZE-1)) / CACHE_LINE_SIZE;
+-      if (len == 0)
+-              return;
+-
+-      /* align on set boundary */
+-      paddr &= 0xfffffff0;
+-
+-      asm volatile("nop\n"
+-                   "move%.l   %2,%%d0\n"
+-                   "or%.l     %3,%%d0\n"
+-                   "movec     %%d0,%%cacr\n"
+-                   "move%.l   %0,%%a0\n"
+-                   "move%.l   %1,%%d0\n"
+-                   "1:\n"
+-                   "cpushl    %%bc,(%%a0)\n"
+-                   "lea       0x10(%%a0),%%a0\n"
+-                   "subq%.l   #1,%%d0\n"
+-                   "bne.b     1b\n"
+-                   "movec     %2,%%cacr\n"
+-                   : : "a" (paddr), "r" (len),
+-                       "r" (shadow_cacr),
+-                       "i" (CF_CACR_SPA+CF_CACR_DPI+CF_CACR_IDPI)
+-                   : "a0", "d0");
+-}
++#define flush_icache()                                                \
++({                                                            \
++  unsigned long set;                                          \
++  unsigned long start_set;                                    \
++  unsigned long end_set;                                      \
++                                                              \
++  start_set = 0;                                              \
++  end_set = (unsigned long)LAST_DCACHE_ADDR;                  \
++                                                              \
++  for (set = start_set; set <= end_set; set += (0x10 - 3)) {  \
++    asm volatile("cpushl %%ic,(%0)\n"                         \
++                 "\taddq%.l #1,%0\n"                          \
++                 "\tcpushl %%ic,(%0)\n"                               \
++                 "\taddq%.l #1,%0\n"                          \
++                 "\tcpushl %%ic,(%0)\n"                               \
++                 "\taddq%.l #1,%0\n"                          \
++                 "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set));              \
++  }                                                           \
++})
++
++#define flush_dcache()                                                \
++({                                                            \
++  unsigned long set;                                          \
++  unsigned long start_set;                                    \
++  unsigned long end_set;                                      \
++                                                              \
++  start_set = 0;                                              \
++  end_set = (unsigned long)LAST_DCACHE_ADDR;                  \
++                                                              \
++  for (set = start_set; set <= end_set; set += (0x10 - 3)) {  \
++    asm volatile("cpushl %%dc,(%0)\n"                         \
++                 "\taddq%.l #1,%0\n"                          \
++                 "\tcpushl %%dc,(%0)\n"                               \
++                 "\taddq%.l #1,%0\n"                          \
++                 "\tcpushl %%dc,(%0)\n"                               \
++                 "\taddq%.l #1,%0\n"                          \
++                 "\tcpushl %%dc,(%0)" : "=a" (set) : "a" (set));              \
++  }                                                           \
++})
++
++#define flush_bcache()                                                \
++({                                                            \
++  unsigned long set;                                          \
++  unsigned long start_set;                                    \
++  unsigned long end_set;                                      \
++                                                              \
++  start_set = 0;                                              \
++  end_set = (unsigned long)LAST_DCACHE_ADDR;                  \
++                                                              \
++  for (set = start_set; set <= end_set; set += (0x10 - 3)) {  \
++    asm volatile("cpushl %%bc,(%0)\n"                         \
++                 "\taddq%.l #1,%0\n"                          \
++                 "\tcpushl %%bc,(%0)\n"                               \
++                 "\taddq%.l #1,%0\n"                          \
++                 "\tcpushl %%bc,(%0)\n"                               \
++                 "\taddq%.l #1,%0\n"                          \
++                 "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set));              \
++  }                                                           \
++})
+-/**
+- * cf_cache_flush - Push dirty cache out and invalidate
+- * @paddr: starting physical address
+- * @len: number of bytes
+- *
+- * Push the any dirty lines starting at paddr for len bytes and
+- * invalidate those lines.
++/*
++ * invalidate the cache for the specified memory range.
++ * It starts at the physical address specified for
++ * the given number of bytes.
+  */
+-static inline void cf_cache_flush(unsigned long paddr, int len)
+-{
+-      /* number of lines */
+-      len = (len + (CACHE_LINE_SIZE-1)) / CACHE_LINE_SIZE;
+-      if (len == 0)
+-              return;
+-
+-      /* align on set boundary */
+-      paddr &= 0xfffffff0;
+-
+-      asm volatile("nop\n"
+-                   "move%.l   %2,%%d0\n"
+-                   "or%.l     %3,%%d0\n"
+-                   "movec     %%d0,%%cacr\n"
+-                   "move%.l   %0,%%a0\n"
+-                   "move%.l   %1,%%d0\n"
+-                   "1:\n"
+-                   "cpushl    %%bc,(%%a0)\n"
+-                   "lea       0x10(%%a0),%%a0\n"
+-                   "subq%.l   #1,%%d0\n"
+-                   "bne.b     1b\n"
+-                   "movec     %2,%%cacr\n"
+-                   : : "a" (paddr), "r" (len),
+-                       "r" (shadow_cacr),
+-                       "i" (CF_CACR_SPA)
+-                   : "a0", "d0");
+-}
+-
+-/**
+- * cf_cache_flush_range - Push dirty data/inst cache in range out and invalidate
+- * @vstart - starting virtual address
+- * @vend: ending virtual address
+- *
+- * Push the any dirty data/instr lines starting at paddr for len bytes and
+- * invalidate those lines.
++extern void cache_clear(unsigned long paddr, int len);
++/*
++ * push any dirty cache in the specified memory range.
++ * It starts at the physical address specified for
++ * the given number of bytes.
+  */
+-static inline void cf_cache_flush_range(unsigned long vstart, unsigned long vend)
+-{
+-      int len;
+-
+-      /* align on set boundary */
+-      vstart &= 0xfffffff0;
+-      vend = PAGE_ALIGN((vend + (CACHE_LINE_SIZE-1))) & 0xfffffff0;
+-      len = vend - vstart;
+-      if (len == 0)
+-              return;
+-      vstart = __pa(vstart);
+-      vend = vstart + len;
+-
+-      asm volatile("nop\n"
+-                   "move%.l   %2,%%d0\n"
+-                   "or%.l     %3,%%d0\n"
+-                   "movec     %%d0,%%cacr\n"
+-                   "move%.l   %0,%%a0\n"
+-                   "move%.l   %1,%%a1\n"
+-                   "1:\n"
+-                   "cpushl    %%bc,(%%a0)\n"
+-                   "lea       0x10(%%a0),%%a0\n"
+-                   "cmpa%.l   %%a0,%%a1\n"
+-                   "bne.b     1b\n"
+-                   "movec     %2,%%cacr\n"
+-                   : /* no return */
+-                   : "a" (vstart), "a" (vend),
+-                     "r" (shadow_cacr),
+-                     "i" (CF_CACR_SPA)
+-                   : "a0", "a1", "d0");
+-}
++extern void cache_push(unsigned long paddr, int len);
+-/**
+- * cf_dcache_flush_range - Push dirty data cache in range out and invalidate
+- * @vstart - starting virtual address
+- * @vend: ending virtual address
+- *
+- * Push the any dirty data lines starting at paddr for len bytes and
+- * invalidate those lines.
++/*
++ * push and invalidate pages in the specified user virtual
++ * memory range.
+  */
+-static inline void cf_dcache_flush_range(unsigned long vstart, unsigned long vend)
+-{
+-      /* align on set boundary */
+-      vstart &= 0xfffffff0;
+-      vend = (vend + (CACHE_LINE_SIZE-1)) & 0xfffffff0;
+-
+-      asm volatile("nop\n"
+-                   "move%.l   %2,%%d0\n"
+-                   "or%.l     %3,%%d0\n"
+-                   "movec     %%d0,%%cacr\n"
+-                   "move%.l   %0,%%a0\n"
+-                   "move%.l   %1,%%a1\n"
+-                   "1:\n"
+-                   "cpushl    %%dc,(%%a0)\n"
+-                   "lea       0x10(%%a0),%%a0\n"
+-                   "cmpa%.l   %%a0,%%a1\n"
+-                   "bne.b     1b\n"
+-                   "movec     %2,%%cacr\n"
+-                   : /* no return */
+-                   : "a" (__pa(vstart)), "a" (__pa(vend)),
+-                     "r" (shadow_cacr),
+-                     "i" (CF_CACR_SPA)
+-                   : "a0", "a1", "d0");
+-}
++extern void cache_push_v(unsigned long vaddr, int len);
+-/**
+- * cf_icache_flush_range - Push dirty inst cache in range out and invalidate
+- * @vstart - starting virtual address
+- * @vend: ending virtual address
+- *
+- * Push the any dirty instr lines starting at paddr for len bytes and
+- * invalidate those lines.  This should just be an invalidate since you
+- * shouldn't be able to have dirty instruction cache.
+- */
+-static inline void cf_icache_flush_range(unsigned long vstart, unsigned long vend)
+-{
+-      /* align on set boundary */
+-      vstart &= 0xfffffff0;
+-      vend = (vend + (CACHE_LINE_SIZE-1)) & 0xfffffff0;
+-
+-      asm volatile("nop\n"
+-                   "move%.l   %2,%%d0\n"
+-                   "or%.l     %3,%%d0\n"
+-                   "movec     %%d0,%%cacr\n"
+-                   "move%.l   %0,%%a0\n"
+-                   "move%.l   %1,%%a1\n"
+-                   "1:\n"
+-                   "cpushl    %%ic,(%%a0)\n"
+-                   "lea       0x10(%%a0),%%a0\n"
+-                   "cmpa%.l   %%a0,%%a1\n"
+-                   "bne.b     1b\n"
+-                   "movec     %2,%%cacr\n"
+-                   : /* no return */
+-                   : "a" (__pa(vstart)), "a" (__pa(vend)),
+-                     "r" (shadow_cacr),
+-                     "i" (CF_CACR_SPA)
+-                   : "a0", "a1", "d0");
+-}
++/* This is needed whenever the virtual mapping of the current
++   process changes.  */
+ /**
+  * flush_cache_mm - Flush an mm_struct
+@@ -343,6 +100,8 @@ static inline void flush_cache_mm(struct
+ #define flush_cache_dup_mm(mm)        flush_cache_mm(mm)
++#define flush_cache_all()             flush_bcache()
++
+ /**
+  * flush_cache_range - Flush a cache range
+  * @vma: vma struct
+@@ -356,7 +115,8 @@ static inline void flush_cache_range(str
+       unsigned long start, unsigned long end)
+ {
+       if (vma->vm_mm == current->mm)
+-              cf_cache_flush_range(start, end);
++              flush_bcache();
++//            cf_cache_flush_range(start, end);
+ }
+ /**
+@@ -372,76 +132,113 @@ static inline void flush_cache_page(stru
+       unsigned long vmaddr, unsigned long pfn)
+ {
+       if (vma->vm_mm == current->mm)
+-              cf_cache_flush_range(vmaddr, vmaddr+PAGE_SIZE);
++              flush_bcache();
++//            cf_cache_flush_range(vmaddr, vmaddr+PAGE_SIZE);
+ }
+-/**
+- * __flush_page_to_ram - Push a page out of the cache
+- * @vaddr: Virtual address at start of page
+- *
+- * Push the page at kernel virtual address *vaddr* and clear
+- * the icache.
+- */
+-static inline void __flush_page_to_ram(void *vaddr)
+-{
+-      asm volatile("nop\n"
+-                   "move%.l   %2,%%d0\n"
+-                   "or%.l     %3,%%d0\n"
+-                   "movec     %%d0,%%cacr\n"
+-                   "move%.l   %0,%%d0\n"
+-                   "and%.l    #0xfffffff0,%%d0\n"
+-                   "move%.l   %%d0,%%a0\n"
+-                   "move%.l   %1,%%d0\n"
+-                   "1:\n"
+-                   "cpushl    %%bc,(%%a0)\n"
+-                   "lea       0x10(%%a0),%%a0\n"
+-                   "subq%.l   #1,%%d0\n"
+-                   "bne.b     1b\n"
+-                   "movec     %2,%%cacr\n"
+-                   : : "a" (__pa(vaddr)), "i" (PAGE_SIZE / CACHE_LINE_SIZE),
+-                       "r" (shadow_cacr), "i" (CF_CACR_SPA)
+-                   : "a0", "d0");
++/* Push the page at kernel virtual address and clear the icache */
++/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
++#define flush_page_to_ram(page) __flush_page_to_ram((void *) page_address(page))
++extern inline void __flush_page_to_ram(void *address)
++{
++  unsigned long set;
++  unsigned long start_set;
++  unsigned long end_set;
++  unsigned long addr = (unsigned long) address;
++
++  addr &= ~(PAGE_SIZE - 1); /* round down to page start address */
++
++  start_set = addr & _ICACHE_SET_MASK;
++  end_set = (addr + PAGE_SIZE-1) & _ICACHE_SET_MASK;
++
++  if (start_set > end_set) {
++    /* from the begining to the lowest address */
++    for (set = 0; set <= end_set; set += (0x10 - 3)) {
++      asm volatile("cpushl %%bc,(%0)\n"
++                   "\taddq%.l #1,%0\n"
++                   "\tcpushl %%bc,(%0)\n"
++                   "\taddq%.l #1,%0\n"
++                   "\tcpushl %%bc,(%0)\n"
++                   "\taddq%.l #1,%0\n"
++                   "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set));
++    }
++    /* next loop will finish the cache ie pass the hole */
++    end_set = LAST_ICACHE_ADDR;    
++  }
++  for (set = start_set; set <= end_set; set += (0x10 - 3)) {
++    asm volatile("cpushl %%bc,(%0)\n"
++                 "\taddq%.l #1,%0\n"
++                 "\tcpushl %%bc,(%0)\n"
++                 "\taddq%.l #1,%0\n"
++                 "\tcpushl %%bc,(%0)\n"
++                 "\taddq%.l #1,%0\n"
++                 "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set));
++  }
++}
++
++/* Use __flush_page_to_ram() for flush_dcache_page all values are same - MW */
++#define flush_dcache_page(page)                       \
++      __flush_page_to_ram((void *) page_address(page))
++#define flush_icache_page(vma,pg)             \
++      __flush_page_to_ram((void *) page_address(pg))
++#define flush_icache_user_range(adr,len)      do { } while (0)
++/* NL */
++#define flush_icache_user_page(vma,page,addr,len)     do { } while (0)
++
++/* Push n pages at kernel virtual address and clear the icache */
++/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
++extern inline void flush_icache_range (unsigned long address,
++                                     unsigned long endaddr)
++{
++  unsigned long set;
++  unsigned long start_set;
++  unsigned long end_set;
++
++  start_set = address & _ICACHE_SET_MASK;
++  end_set = endaddr & _ICACHE_SET_MASK;
++
++  if (start_set > end_set) {
++    /* from the begining to the lowest address */
++    for (set = 0; set <= end_set; set += (0x10 - 3)) {
++      asm volatile("cpushl %%ic,(%0)\n"
++                   "\taddq%.l #1,%0\n"
++                   "\tcpushl %%ic,(%0)\n"
++                   "\taddq%.l #1,%0\n"
++                   "\tcpushl %%ic,(%0)\n"
++                   "\taddq%.l #1,%0\n"
++                   "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set));
++    }
++    /* next loop will finish the cache ie pass the hole */
++    end_set = LAST_ICACHE_ADDR;    
++  }
++  for (set = start_set; set <= end_set; set += (0x10 - 3)) {
++    asm volatile("cpushl %%ic,(%0)\n"
++                 "\taddq%.l #1,%0\n"
++                 "\tcpushl %%ic,(%0)\n"
++                 "\taddq%.l #1,%0\n"
++                 "\tcpushl %%ic,(%0)\n"
++                 "\taddq%.l #1,%0\n"
++                 "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set));
++  }
+ }
+-/*
+- * Various defines for the kernel.
+- */
+-
+-extern void cache_clear(unsigned long paddr, int len);
+-extern void cache_push(unsigned long paddr, int len);
+-extern void flush_icache_range(unsigned long address, unsigned long endaddr);
+-
+-#define flush_cache_all()                     flush_bcache()
+-#define flush_cache_vmap(start, end)          flush_bcache()
+-#define flush_cache_vunmap(start, end)                flush_bcache()
+-
+-#define flush_dcache_range(vstart, vend)      cf_dcache_flush_range(vstart, vend)
+-#define flush_dcache_page(page)                       __flush_page_to_ram(page_address(page))
+-#define flush_dcache_mmap_lock(mapping)               do { } while (0)
+-#define flush_dcache_mmap_unlock(mapping)     do { } while (0)
+-
+-#define flush_icache_page(vma, page)          __flush_page_to_ram(page_address(page))
+-
+-/**
+- * copy_to_user_page - Copy memory to user page
+- */
+ static inline void copy_to_user_page(struct vm_area_struct *vma,
+                                    struct page *page, unsigned long vaddr,
+                                    void *dst, void *src, int len)
+ {
+       memcpy(dst, src, len);
+-      cf_cache_flush(page_to_phys(page), PAGE_SIZE);
++      flush_icache_user_page(vma, page, vaddr, len);
+ }
+-
+-/**
+- * copy_from_user_page - Copy memory from user page
+- */
+ static inline void copy_from_user_page(struct vm_area_struct *vma,
+                                      struct page *page, unsigned long vaddr,
+                                      void *dst, void *src, int len)
+ {
+-      cf_cache_flush(page_to_phys(page), PAGE_SIZE);
+       memcpy(dst, src, len);
+ }
++#define flush_cache_vmap(start, end)          flush_cache_all()
++#define flush_cache_vunmap(start, end)                flush_cache_all()
++#define flush_dcache_mmap_lock(mapping)               do { } while (0)
++#define flush_dcache_mmap_unlock(mapping)     do { } while (0)
++
+ #endif /* M68K_CF_CACHEFLUSH_H */
+--- a/include/asm-m68k/cf_pgalloc.h
++++ b/include/asm-m68k/cf_pgalloc.h
+@@ -1,13 +1,9 @@
+ #ifndef M68K_CF_PGALLOC_H
+ #define M68K_CF_PGALLOC_H
+-/* JKM -- added -- needed? */
+-#include <linux/highmem.h>
+-
+ #include <asm/coldfire.h>
+ #include <asm/page.h>
+ #include <asm/cf_tlbflush.h>
+-/* JKM -- added -- needed? */
+ #include <asm/cf_cacheflush.h>
+ extern inline void pte_free_kernel(pte_t *pte)
+@@ -41,6 +37,7 @@ extern inline pmd_t *pmd_alloc_kernel(pg
+ #define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \
+       (unsigned long)(page_address(page)))
++
+ #define pmd_populate_kernel(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte))
+ static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *page)
+--- a/include/asm-m68k/cf_pgtable.h
++++ b/include/asm-m68k/cf_pgtable.h
+@@ -341,7 +341,7 @@ static inline pte_t pgoff_to_pte(unsigne
+ #define __pte_to_swp_entry(pte)       ((swp_entry_t) { pte_val(pte) })
+ #define __swp_entry_to_pte(x) (__pte((x).val))
+-#define pmd_page(pmd)         virt_to_page(__pmd_page(pmd))
++#define pmd_page(pmd)         (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
+ #define pte_offset_map(pmdp, address) ((pte_t *)__pmd_page(*pmdp) +   \
+                                      __pte_offset(address))
+--- a/include/asm-m68k/cfcache.h
++++ b/include/asm-m68k/cfcache.h
+@@ -87,6 +87,12 @@
+ #define CACHE_SETS 0x0200             /* 512 sets */
+ #define CACHE_WAYS 0x0004             /* 4 way */
++/* in for the old cpushl caching code */
++#define _DCACHE_SET_MASK ((DCACHE_SIZE/64-1)<<CACHE_WAYS)
++#define _ICACHE_SET_MASK ((ICACHE_SIZE/64-1)<<CACHE_WAYS)
++#define LAST_DCACHE_ADDR _DCACHE_SET_MASK
++#define LAST_ICACHE_ADDR _ICACHE_SET_MASK
++
+ #define CACHE_DISABLE_MODE    (CF_CACR_DCINVA+        \
+                                CF_CACR_BCINVA+        \
+                                CF_CACR_ICINVA)
+--- a/include/asm-m68k/page.h
++++ b/include/asm-m68k/page.h
+@@ -123,13 +123,21 @@ extern unsigned long m68k_memoffset;
+ #if defined(CONFIG_COLDFIRE)
+ static inline unsigned long ___pa(void *vaddr)
+ {
++#if CONFIG_SDRAM_BASE != PAGE_OFFSET
+       return (((unsigned long)vaddr & 0x0fffffff) + CONFIG_SDRAM_BASE);
++#else
++      return (unsigned long)vaddr;
++#endif
+ }
+ #define __pa(vaddr)   ___pa((void *)(vaddr))
+ static inline void *__va(unsigned long paddr)
+ {
++#if CONFIG_SDRAM_BASE != PAGE_OFFSET
+       return (void *)((paddr & 0x0fffffff) + PAGE_OFFSET);
++#else
++      return (void *)paddr;
++#endif
+ }
+ #else
+--- a/include/asm-m68k/page_offset.h
++++ b/include/asm-m68k/page_offset.h
+@@ -1,16 +1,15 @@
++/*
++ * Page and physical memory maps.
++ */
++#if defined(CONFIG_SUN3)
++#define PAGE_OFFSET_RAW               0x0E000000
++
++#elif defined(CONFIG_M54455) || defined(CONFIG_M547X_8X)
++#define       PHYS_OFFSET             CONFIG_SDRAM_BASE
++#define PAGE_OFFSET_RAW               (PHYS_OFFSET)
++/* #define PAGE_OFFSET_RAW            0xC0000000 */
+-/* This handles the memory map.. */
+-#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
++#else
++/* default */
+ #define PAGE_OFFSET_RAW               0x00000000
+-#elif defined(CONFIG_SUN3)
+-#define PAGE_OFFSET_RAW               0x0E000000
+-#else /* CONFIG_COLDFIRE */
+-#if defined(CONFIG_M54455)
+-#define PAGE_OFFSET_RAW               0xC0000000
+-#define       PHYS_OFFSET             0x40000000
+-#elif defined(CONFIG_M547X_8X)
+-#define PAGE_OFFSET_RAW               0xC0000000
+-#define       PHYS_OFFSET             0x00000000
+ #endif
+-#endif
+-