layerscape: add patches-5.4
[openwrt/staging/wigyori.git] / target / linux / layerscape / patches-5.4 / 805-display-0043-gpu-Move-ipu-v3-to-imx-folder.patch
diff --git a/target/linux/layerscape/patches-5.4/805-display-0043-gpu-Move-ipu-v3-to-imx-folder.patch b/target/linux/layerscape/patches-5.4/805-display-0043-gpu-Move-ipu-v3-to-imx-folder.patch
new file mode 100644 (file)
index 0000000..6deb30f
--- /dev/null
@@ -0,0 +1,19995 @@
+From a7782c6e5e37b2b406221827b177c2bfcc8825cd Mon Sep 17 00:00:00 2001
+From: Liu Ying <victor.liu@nxp.com>
+Date: Tue, 22 Jan 2019 17:08:01 +0800
+Subject: [PATCH] gpu: Move ipu-v3 to imx folder
+
+The new imx folder may contain ipu-v3 and dpu common drivers.
+
+Signed-off-by: Liu Ying <victor.liu@nxp.com>
+[ Aisheng: fix source path ]
+Signed-off-by: Dong Aisheng <aisheng.dong@nxp.com>
+---
+ drivers/gpu/Makefile                       |    2 +-
+ drivers/gpu/imx/Kconfig                    |    1 +
+ drivers/gpu/imx/Makefile                   |    1 +
+ drivers/gpu/imx/ipu-v3/Kconfig             |   11 +
+ drivers/gpu/imx/ipu-v3/Makefile            |   10 +
+ drivers/gpu/imx/ipu-v3/ipu-common.c        | 1565 ++++++++++++++++++
+ drivers/gpu/imx/ipu-v3/ipu-cpmem.c         |  976 +++++++++++
+ drivers/gpu/imx/ipu-v3/ipu-csi.c           |  821 +++++++++
+ drivers/gpu/imx/ipu-v3/ipu-dc.c            |  420 +++++
+ drivers/gpu/imx/ipu-v3/ipu-di.c            |  745 +++++++++
+ drivers/gpu/imx/ipu-v3/ipu-dmfc.c          |  214 +++
+ drivers/gpu/imx/ipu-v3/ipu-dp.c            |  357 ++++
+ drivers/gpu/imx/ipu-v3/ipu-ic.c            |  761 +++++++++
+ drivers/gpu/imx/ipu-v3/ipu-image-convert.c | 2475 ++++++++++++++++++++++++++++
+ drivers/gpu/imx/ipu-v3/ipu-pre.c           |  346 ++++
+ drivers/gpu/imx/ipu-v3/ipu-prg.c           |  483 ++++++
+ drivers/gpu/imx/ipu-v3/ipu-prv.h           |  274 +++
+ drivers/gpu/imx/ipu-v3/ipu-smfc.c          |  202 +++
+ drivers/gpu/imx/ipu-v3/ipu-vdi.c           |  234 +++
+ drivers/gpu/ipu-v3/Kconfig                 |   11 -
+ drivers/gpu/ipu-v3/Makefile                |   10 -
+ drivers/gpu/ipu-v3/ipu-common.c            | 1565 ------------------
+ drivers/gpu/ipu-v3/ipu-cpmem.c             |  976 -----------
+ drivers/gpu/ipu-v3/ipu-csi.c               |  821 ---------
+ drivers/gpu/ipu-v3/ipu-dc.c                |  420 -----
+ drivers/gpu/ipu-v3/ipu-di.c                |  745 ---------
+ drivers/gpu/ipu-v3/ipu-dmfc.c              |  214 ---
+ drivers/gpu/ipu-v3/ipu-dp.c                |  357 ----
+ drivers/gpu/ipu-v3/ipu-ic.c                |  761 ---------
+ drivers/gpu/ipu-v3/ipu-image-convert.c     | 2475 ----------------------------
+ drivers/gpu/ipu-v3/ipu-pre.c               |  346 ----
+ drivers/gpu/ipu-v3/ipu-prg.c               |  483 ------
+ drivers/gpu/ipu-v3/ipu-prv.h               |  274 ---
+ drivers/gpu/ipu-v3/ipu-smfc.c              |  202 ---
+ drivers/gpu/ipu-v3/ipu-vdi.c               |  234 ---
+ drivers/video/Kconfig                      |    2 +-
+ 36 files changed, 9898 insertions(+), 9896 deletions(-)
+ create mode 100644 drivers/gpu/imx/Kconfig
+ create mode 100644 drivers/gpu/imx/Makefile
+ create mode 100644 drivers/gpu/imx/ipu-v3/Kconfig
+ create mode 100644 drivers/gpu/imx/ipu-v3/Makefile
+ create mode 100644 drivers/gpu/imx/ipu-v3/ipu-common.c
+ create mode 100644 drivers/gpu/imx/ipu-v3/ipu-cpmem.c
+ create mode 100644 drivers/gpu/imx/ipu-v3/ipu-csi.c
+ create mode 100644 drivers/gpu/imx/ipu-v3/ipu-dc.c
+ create mode 100644 drivers/gpu/imx/ipu-v3/ipu-di.c
+ create mode 100644 drivers/gpu/imx/ipu-v3/ipu-dmfc.c
+ create mode 100644 drivers/gpu/imx/ipu-v3/ipu-dp.c
+ create mode 100644 drivers/gpu/imx/ipu-v3/ipu-ic.c
+ create mode 100644 drivers/gpu/imx/ipu-v3/ipu-image-convert.c
+ create mode 100644 drivers/gpu/imx/ipu-v3/ipu-pre.c
+ create mode 100644 drivers/gpu/imx/ipu-v3/ipu-prg.c
+ create mode 100644 drivers/gpu/imx/ipu-v3/ipu-prv.h
+ create mode 100644 drivers/gpu/imx/ipu-v3/ipu-smfc.c
+ create mode 100644 drivers/gpu/imx/ipu-v3/ipu-vdi.c
+ delete mode 100644 drivers/gpu/ipu-v3/Kconfig
+ delete mode 100644 drivers/gpu/ipu-v3/Makefile
+ delete mode 100644 drivers/gpu/ipu-v3/ipu-common.c
+ delete mode 100644 drivers/gpu/ipu-v3/ipu-cpmem.c
+ delete mode 100644 drivers/gpu/ipu-v3/ipu-csi.c
+ delete mode 100644 drivers/gpu/ipu-v3/ipu-dc.c
+ delete mode 100644 drivers/gpu/ipu-v3/ipu-di.c
+ delete mode 100644 drivers/gpu/ipu-v3/ipu-dmfc.c
+ delete mode 100644 drivers/gpu/ipu-v3/ipu-dp.c
+ delete mode 100644 drivers/gpu/ipu-v3/ipu-ic.c
+ delete mode 100644 drivers/gpu/ipu-v3/ipu-image-convert.c
+ delete mode 100644 drivers/gpu/ipu-v3/ipu-pre.c
+ delete mode 100644 drivers/gpu/ipu-v3/ipu-prg.c
+ delete mode 100644 drivers/gpu/ipu-v3/ipu-prv.h
+ delete mode 100644 drivers/gpu/ipu-v3/ipu-smfc.c
+ delete mode 100644 drivers/gpu/ipu-v3/ipu-vdi.c
+
+--- a/drivers/gpu/Makefile
++++ b/drivers/gpu/Makefile
+@@ -3,5 +3,5 @@
+ # taken to initialize them in the correct order. Link order is the only way
+ # to ensure this currently.
+ obj-$(CONFIG_TEGRA_HOST1X)    += host1x/
++obj-y                 += imx/
+ obj-y                 += drm/ vga/
+-obj-$(CONFIG_IMX_IPUV3_CORE)  += ipu-v3/
+--- /dev/null
++++ b/drivers/gpu/imx/Kconfig
+@@ -0,0 +1 @@
++source "drivers/gpu/imx/ipu-v3/Kconfig"
+--- /dev/null
++++ b/drivers/gpu/imx/Makefile
+@@ -0,0 +1 @@
++obj-$(CONFIG_IMX_IPUV3_CORE)  += ipu-v3/
+--- /dev/null
++++ b/drivers/gpu/imx/ipu-v3/Kconfig
+@@ -0,0 +1,11 @@
++# SPDX-License-Identifier: GPL-2.0-only
++config IMX_IPUV3_CORE
++      tristate "IPUv3 core support"
++      depends on SOC_IMX5 || SOC_IMX6Q || ARCH_MULTIPLATFORM || COMPILE_TEST
++      depends on DRM || !DRM # if DRM=m, this can't be 'y'
++      select BITREVERSE
++      select GENERIC_ALLOCATOR if DRM
++      select GENERIC_IRQ_CHIP
++      help
++        Choose this if you have a i.MX5/6 system and want to use the Image
++        Processing Unit. This option only enables IPU base support.
+--- /dev/null
++++ b/drivers/gpu/imx/ipu-v3/Makefile
+@@ -0,0 +1,10 @@
++# SPDX-License-Identifier: GPL-2.0
++obj-$(CONFIG_IMX_IPUV3_CORE) += imx-ipu-v3.o
++
++imx-ipu-v3-objs := ipu-common.o ipu-cpmem.o ipu-csi.o ipu-dc.o ipu-di.o \
++              ipu-dp.o ipu-dmfc.o ipu-ic.o ipu-ic-csc.o \
++              ipu-image-convert.o ipu-smfc.o ipu-vdi.o
++
++ifdef CONFIG_DRM
++      imx-ipu-v3-objs += ipu-pre.o ipu-prg.o
++endif
+--- /dev/null
++++ b/drivers/gpu/imx/ipu-v3/ipu-common.c
+@@ -0,0 +1,1565 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
++ * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
++ */
++#include <linux/module.h>
++#include <linux/export.h>
++#include <linux/types.h>
++#include <linux/reset.h>
++#include <linux/platform_device.h>
++#include <linux/err.h>
++#include <linux/spinlock.h>
++#include <linux/delay.h>
++#include <linux/interrupt.h>
++#include <linux/io.h>
++#include <linux/clk.h>
++#include <linux/list.h>
++#include <linux/irq.h>
++#include <linux/irqchip/chained_irq.h>
++#include <linux/irqdomain.h>
++#include <linux/of_device.h>
++#include <linux/of_graph.h>
++
++#include <drm/drm_fourcc.h>
++
++#include <video/imx-ipu-v3.h>
++#include "ipu-prv.h"
++
++static inline u32 ipu_cm_read(struct ipu_soc *ipu, unsigned offset)
++{
++      return readl(ipu->cm_reg + offset);
++}
++
++static inline void ipu_cm_write(struct ipu_soc *ipu, u32 value, unsigned offset)
++{
++      writel(value, ipu->cm_reg + offset);
++}
++
++int ipu_get_num(struct ipu_soc *ipu)
++{
++      return ipu->id;
++}
++EXPORT_SYMBOL_GPL(ipu_get_num);
++
++void ipu_srm_dp_update(struct ipu_soc *ipu, bool sync)
++{
++      u32 val;
++
++      val = ipu_cm_read(ipu, IPU_SRM_PRI2);
++      val &= ~DP_S_SRM_MODE_MASK;
++      val |= sync ? DP_S_SRM_MODE_NEXT_FRAME :
++                    DP_S_SRM_MODE_NOW;
++      ipu_cm_write(ipu, val, IPU_SRM_PRI2);
++}
++EXPORT_SYMBOL_GPL(ipu_srm_dp_update);
++
++enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc)
++{
++      switch (drm_fourcc) {
++      case DRM_FORMAT_ARGB1555:
++      case DRM_FORMAT_ABGR1555:
++      case DRM_FORMAT_RGBA5551:
++      case DRM_FORMAT_BGRA5551:
++      case DRM_FORMAT_RGB565:
++      case DRM_FORMAT_BGR565:
++      case DRM_FORMAT_RGB888:
++      case DRM_FORMAT_BGR888:
++      case DRM_FORMAT_ARGB4444:
++      case DRM_FORMAT_XRGB8888:
++      case DRM_FORMAT_XBGR8888:
++      case DRM_FORMAT_RGBX8888:
++      case DRM_FORMAT_BGRX8888:
++      case DRM_FORMAT_ARGB8888:
++      case DRM_FORMAT_ABGR8888:
++      case DRM_FORMAT_RGBA8888:
++      case DRM_FORMAT_BGRA8888:
++      case DRM_FORMAT_RGB565_A8:
++      case DRM_FORMAT_BGR565_A8:
++      case DRM_FORMAT_RGB888_A8:
++      case DRM_FORMAT_BGR888_A8:
++      case DRM_FORMAT_RGBX8888_A8:
++      case DRM_FORMAT_BGRX8888_A8:
++              return IPUV3_COLORSPACE_RGB;
++      case DRM_FORMAT_YUYV:
++      case DRM_FORMAT_UYVY:
++      case DRM_FORMAT_YUV420:
++      case DRM_FORMAT_YVU420:
++      case DRM_FORMAT_YUV422:
++      case DRM_FORMAT_YVU422:
++      case DRM_FORMAT_YUV444:
++      case DRM_FORMAT_YVU444:
++      case DRM_FORMAT_NV12:
++      case DRM_FORMAT_NV21:
++      case DRM_FORMAT_NV16:
++      case DRM_FORMAT_NV61:
++              return IPUV3_COLORSPACE_YUV;
++      default:
++              return IPUV3_COLORSPACE_UNKNOWN;
++      }
++}
++EXPORT_SYMBOL_GPL(ipu_drm_fourcc_to_colorspace);
++
++enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat)
++{
++      switch (pixelformat) {
++      case V4L2_PIX_FMT_YUV420:
++      case V4L2_PIX_FMT_YVU420:
++      case V4L2_PIX_FMT_YUV422P:
++      case V4L2_PIX_FMT_UYVY:
++      case V4L2_PIX_FMT_YUYV:
++      case V4L2_PIX_FMT_NV12:
++      case V4L2_PIX_FMT_NV21:
++      case V4L2_PIX_FMT_NV16:
++      case V4L2_PIX_FMT_NV61:
++              return IPUV3_COLORSPACE_YUV;
++      case V4L2_PIX_FMT_RGB565:
++      case V4L2_PIX_FMT_BGR24:
++      case V4L2_PIX_FMT_RGB24:
++      case V4L2_PIX_FMT_ABGR32:
++      case V4L2_PIX_FMT_XBGR32:
++      case V4L2_PIX_FMT_BGRA32:
++      case V4L2_PIX_FMT_BGRX32:
++      case V4L2_PIX_FMT_RGBA32:
++      case V4L2_PIX_FMT_RGBX32:
++      case V4L2_PIX_FMT_ARGB32:
++      case V4L2_PIX_FMT_XRGB32:
++              return IPUV3_COLORSPACE_RGB;
++      default:
++              return IPUV3_COLORSPACE_UNKNOWN;
++      }
++}
++EXPORT_SYMBOL_GPL(ipu_pixelformat_to_colorspace);
++
++bool ipu_pixelformat_is_planar(u32 pixelformat)
++{
++      switch (pixelformat) {
++      case V4L2_PIX_FMT_YUV420:
++      case V4L2_PIX_FMT_YVU420:
++      case V4L2_PIX_FMT_YUV422P:
++      case V4L2_PIX_FMT_NV12:
++      case V4L2_PIX_FMT_NV21:
++      case V4L2_PIX_FMT_NV16:
++      case V4L2_PIX_FMT_NV61:
++              return true;
++      }
++
++      return false;
++}
++EXPORT_SYMBOL_GPL(ipu_pixelformat_is_planar);
++
++enum ipu_color_space ipu_mbus_code_to_colorspace(u32 mbus_code)
++{
++      switch (mbus_code & 0xf000) {
++      case 0x1000:
++              return IPUV3_COLORSPACE_RGB;
++      case 0x2000:
++              return IPUV3_COLORSPACE_YUV;
++      default:
++              return IPUV3_COLORSPACE_UNKNOWN;
++      }
++}
++EXPORT_SYMBOL_GPL(ipu_mbus_code_to_colorspace);
++
++int ipu_stride_to_bytes(u32 pixel_stride, u32 pixelformat)
++{
++      switch (pixelformat) {
++      case V4L2_PIX_FMT_YUV420:
++      case V4L2_PIX_FMT_YVU420:
++      case V4L2_PIX_FMT_YUV422P:
++      case V4L2_PIX_FMT_NV12:
++      case V4L2_PIX_FMT_NV21:
++      case V4L2_PIX_FMT_NV16:
++      case V4L2_PIX_FMT_NV61:
++              /*
++               * for the planar YUV formats, the stride passed to
++               * cpmem must be the stride in bytes of the Y plane.
++               * And all the planar YUV formats have an 8-bit
++               * Y component.
++               */
++              return (8 * pixel_stride) >> 3;
++      case V4L2_PIX_FMT_RGB565:
++      case V4L2_PIX_FMT_YUYV:
++      case V4L2_PIX_FMT_UYVY:
++              return (16 * pixel_stride) >> 3;
++      case V4L2_PIX_FMT_BGR24:
++      case V4L2_PIX_FMT_RGB24:
++              return (24 * pixel_stride) >> 3;
++      case V4L2_PIX_FMT_BGR32:
++      case V4L2_PIX_FMT_RGB32:
++      case V4L2_PIX_FMT_XBGR32:
++      case V4L2_PIX_FMT_XRGB32:
++              return (32 * pixel_stride) >> 3;
++      default:
++              break;
++      }
++
++      return -EINVAL;
++}
++EXPORT_SYMBOL_GPL(ipu_stride_to_bytes);
++
++int ipu_degrees_to_rot_mode(enum ipu_rotate_mode *mode, int degrees,
++                          bool hflip, bool vflip)
++{
++      u32 r90, vf, hf;
++
++      switch (degrees) {
++      case 0:
++              vf = hf = r90 = 0;
++              break;
++      case 90:
++              vf = hf = 0;
++              r90 = 1;
++              break;
++      case 180:
++              vf = hf = 1;
++              r90 = 0;
++              break;
++      case 270:
++              vf = hf = r90 = 1;
++              break;
++      default:
++              return -EINVAL;
++      }
++
++      hf ^= (u32)hflip;
++      vf ^= (u32)vflip;
++
++      *mode = (enum ipu_rotate_mode)((r90 << 2) | (hf << 1) | vf);
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_degrees_to_rot_mode);
++
++int ipu_rot_mode_to_degrees(int *degrees, enum ipu_rotate_mode mode,
++                          bool hflip, bool vflip)
++{
++      u32 r90, vf, hf;
++
++      r90 = ((u32)mode >> 2) & 0x1;
++      hf = ((u32)mode >> 1) & 0x1;
++      vf = ((u32)mode >> 0) & 0x1;
++      hf ^= (u32)hflip;
++      vf ^= (u32)vflip;
++
++      switch ((enum ipu_rotate_mode)((r90 << 2) | (hf << 1) | vf)) {
++      case IPU_ROTATE_NONE:
++              *degrees = 0;
++              break;
++      case IPU_ROTATE_90_RIGHT:
++              *degrees = 90;
++              break;
++      case IPU_ROTATE_180:
++              *degrees = 180;
++              break;
++      case IPU_ROTATE_90_LEFT:
++              *degrees = 270;
++              break;
++      default:
++              return -EINVAL;
++      }
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_rot_mode_to_degrees);
++
++struct ipuv3_channel *ipu_idmac_get(struct ipu_soc *ipu, unsigned num)
++{
++      struct ipuv3_channel *channel;
++
++      dev_dbg(ipu->dev, "%s %d\n", __func__, num);
++
++      if (num > 63)
++              return ERR_PTR(-ENODEV);
++
++      mutex_lock(&ipu->channel_lock);
++
++      list_for_each_entry(channel, &ipu->channels, list) {
++              if (channel->num == num) {
++                      channel = ERR_PTR(-EBUSY);
++                      goto out;
++              }
++      }
++
++      channel = kzalloc(sizeof(*channel), GFP_KERNEL);
++      if (!channel) {
++              channel = ERR_PTR(-ENOMEM);
++              goto out;
++      }
++
++      channel->num = num;
++      channel->ipu = ipu;
++      list_add(&channel->list, &ipu->channels);
++
++out:
++      mutex_unlock(&ipu->channel_lock);
++
++      return channel;
++}
++EXPORT_SYMBOL_GPL(ipu_idmac_get);
++
++void ipu_idmac_put(struct ipuv3_channel *channel)
++{
++      struct ipu_soc *ipu = channel->ipu;
++
++      dev_dbg(ipu->dev, "%s %d\n", __func__, channel->num);
++
++      mutex_lock(&ipu->channel_lock);
++
++      list_del(&channel->list);
++      kfree(channel);
++
++      mutex_unlock(&ipu->channel_lock);
++}
++EXPORT_SYMBOL_GPL(ipu_idmac_put);
++
++#define idma_mask(ch)                 (1 << ((ch) & 0x1f))
++
++/*
++ * This is an undocumented feature, a write one to a channel bit in
++ * IPU_CHA_CUR_BUF and IPU_CHA_TRIPLE_CUR_BUF will reset the channel's
++ * internal current buffer pointer so that transfers start from buffer
++ * 0 on the next channel enable (that's the theory anyway, the imx6 TRM
++ * only says these are read-only registers). This operation is required
++ * for channel linking to work correctly, for instance video capture
++ * pipelines that carry out image rotations will fail after the first
++ * streaming unless this function is called for each channel before
++ * re-enabling the channels.
++ */
++static void __ipu_idmac_reset_current_buffer(struct ipuv3_channel *channel)
++{
++      struct ipu_soc *ipu = channel->ipu;
++      unsigned int chno = channel->num;
++
++      ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_CUR_BUF(chno));
++}
++
++void ipu_idmac_set_double_buffer(struct ipuv3_channel *channel,
++              bool doublebuffer)
++{
++      struct ipu_soc *ipu = channel->ipu;
++      unsigned long flags;
++      u32 reg;
++
++      spin_lock_irqsave(&ipu->lock, flags);
++
++      reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(channel->num));
++      if (doublebuffer)
++              reg |= idma_mask(channel->num);
++      else
++              reg &= ~idma_mask(channel->num);
++      ipu_cm_write(ipu, reg, IPU_CHA_DB_MODE_SEL(channel->num));
++
++      __ipu_idmac_reset_current_buffer(channel);
++
++      spin_unlock_irqrestore(&ipu->lock, flags);
++}
++EXPORT_SYMBOL_GPL(ipu_idmac_set_double_buffer);
++
++static const struct {
++      int chnum;
++      u32 reg;
++      int shift;
++} idmac_lock_en_info[] = {
++      { .chnum =  5, .reg = IDMAC_CH_LOCK_EN_1, .shift =  0, },
++      { .chnum = 11, .reg = IDMAC_CH_LOCK_EN_1, .shift =  2, },
++      { .chnum = 12, .reg = IDMAC_CH_LOCK_EN_1, .shift =  4, },
++      { .chnum = 14, .reg = IDMAC_CH_LOCK_EN_1, .shift =  6, },
++      { .chnum = 15, .reg = IDMAC_CH_LOCK_EN_1, .shift =  8, },
++      { .chnum = 20, .reg = IDMAC_CH_LOCK_EN_1, .shift = 10, },
++      { .chnum = 21, .reg = IDMAC_CH_LOCK_EN_1, .shift = 12, },
++      { .chnum = 22, .reg = IDMAC_CH_LOCK_EN_1, .shift = 14, },
++      { .chnum = 23, .reg = IDMAC_CH_LOCK_EN_1, .shift = 16, },
++      { .chnum = 27, .reg = IDMAC_CH_LOCK_EN_1, .shift = 18, },
++      { .chnum = 28, .reg = IDMAC_CH_LOCK_EN_1, .shift = 20, },
++      { .chnum = 45, .reg = IDMAC_CH_LOCK_EN_2, .shift =  0, },
++      { .chnum = 46, .reg = IDMAC_CH_LOCK_EN_2, .shift =  2, },
++      { .chnum = 47, .reg = IDMAC_CH_LOCK_EN_2, .shift =  4, },
++      { .chnum = 48, .reg = IDMAC_CH_LOCK_EN_2, .shift =  6, },
++      { .chnum = 49, .reg = IDMAC_CH_LOCK_EN_2, .shift =  8, },
++      { .chnum = 50, .reg = IDMAC_CH_LOCK_EN_2, .shift = 10, },
++};
++
++int ipu_idmac_lock_enable(struct ipuv3_channel *channel, int num_bursts)
++{
++      struct ipu_soc *ipu = channel->ipu;
++      unsigned long flags;
++      u32 bursts, regval;
++      int i;
++
++      switch (num_bursts) {
++      case 0:
++      case 1:
++              bursts = 0x00; /* locking disabled */
++              break;
++      case 2:
++              bursts = 0x01;
++              break;
++      case 4:
++              bursts = 0x02;
++              break;
++      case 8:
++              bursts = 0x03;
++              break;
++      default:
++              return -EINVAL;
++      }
++
++      /*
++       * IPUv3EX / i.MX51 has a different register layout, and on IPUv3M /
++       * i.MX53 channel arbitration locking doesn't seem to work properly.
++       * Allow enabling the lock feature on IPUv3H / i.MX6 only.
++       */
++      if (bursts && ipu->ipu_type != IPUV3H)
++              return -EINVAL;
++
++      for (i = 0; i < ARRAY_SIZE(idmac_lock_en_info); i++) {
++              if (channel->num == idmac_lock_en_info[i].chnum)
++                      break;
++      }
++      if (i >= ARRAY_SIZE(idmac_lock_en_info))
++              return -EINVAL;
++
++      spin_lock_irqsave(&ipu->lock, flags);
++
++      regval = ipu_idmac_read(ipu, idmac_lock_en_info[i].reg);
++      regval &= ~(0x03 << idmac_lock_en_info[i].shift);
++      regval |= (bursts << idmac_lock_en_info[i].shift);
++      ipu_idmac_write(ipu, regval, idmac_lock_en_info[i].reg);
++
++      spin_unlock_irqrestore(&ipu->lock, flags);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_idmac_lock_enable);
++
++int ipu_module_enable(struct ipu_soc *ipu, u32 mask)
++{
++      unsigned long lock_flags;
++      u32 val;
++
++      spin_lock_irqsave(&ipu->lock, lock_flags);
++
++      val = ipu_cm_read(ipu, IPU_DISP_GEN);
++
++      if (mask & IPU_CONF_DI0_EN)
++              val |= IPU_DI0_COUNTER_RELEASE;
++      if (mask & IPU_CONF_DI1_EN)
++              val |= IPU_DI1_COUNTER_RELEASE;
++
++      ipu_cm_write(ipu, val, IPU_DISP_GEN);
++
++      val = ipu_cm_read(ipu, IPU_CONF);
++      val |= mask;
++      ipu_cm_write(ipu, val, IPU_CONF);
++
++      spin_unlock_irqrestore(&ipu->lock, lock_flags);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_module_enable);
++
++int ipu_module_disable(struct ipu_soc *ipu, u32 mask)
++{
++      unsigned long lock_flags;
++      u32 val;
++
++      spin_lock_irqsave(&ipu->lock, lock_flags);
++
++      val = ipu_cm_read(ipu, IPU_CONF);
++      val &= ~mask;
++      ipu_cm_write(ipu, val, IPU_CONF);
++
++      val = ipu_cm_read(ipu, IPU_DISP_GEN);
++
++      if (mask & IPU_CONF_DI0_EN)
++              val &= ~IPU_DI0_COUNTER_RELEASE;
++      if (mask & IPU_CONF_DI1_EN)
++              val &= ~IPU_DI1_COUNTER_RELEASE;
++
++      ipu_cm_write(ipu, val, IPU_DISP_GEN);
++
++      spin_unlock_irqrestore(&ipu->lock, lock_flags);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_module_disable);
++
++int ipu_idmac_get_current_buffer(struct ipuv3_channel *channel)
++{
++      struct ipu_soc *ipu = channel->ipu;
++      unsigned int chno = channel->num;
++
++      return (ipu_cm_read(ipu, IPU_CHA_CUR_BUF(chno)) & idma_mask(chno)) ? 1 : 0;
++}
++EXPORT_SYMBOL_GPL(ipu_idmac_get_current_buffer);
++
++bool ipu_idmac_buffer_is_ready(struct ipuv3_channel *channel, u32 buf_num)
++{
++      struct ipu_soc *ipu = channel->ipu;
++      unsigned long flags;
++      u32 reg = 0;
++
++      spin_lock_irqsave(&ipu->lock, flags);
++      switch (buf_num) {
++      case 0:
++              reg = ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(channel->num));
++              break;
++      case 1:
++              reg = ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(channel->num));
++              break;
++      case 2:
++              reg = ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(channel->num));
++              break;
++      }
++      spin_unlock_irqrestore(&ipu->lock, flags);
++
++      return ((reg & idma_mask(channel->num)) != 0);
++}
++EXPORT_SYMBOL_GPL(ipu_idmac_buffer_is_ready);
++
++void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num)
++{
++      struct ipu_soc *ipu = channel->ipu;
++      unsigned int chno = channel->num;
++      unsigned long flags;
++
++      spin_lock_irqsave(&ipu->lock, flags);
++
++      /* Mark buffer as ready. */
++      if (buf_num == 0)
++              ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF0_RDY(chno));
++      else
++              ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF1_RDY(chno));
++
++      spin_unlock_irqrestore(&ipu->lock, flags);
++}
++EXPORT_SYMBOL_GPL(ipu_idmac_select_buffer);
++
++void ipu_idmac_clear_buffer(struct ipuv3_channel *channel, u32 buf_num)
++{
++      struct ipu_soc *ipu = channel->ipu;
++      unsigned int chno = channel->num;
++      unsigned long flags;
++
++      spin_lock_irqsave(&ipu->lock, flags);
++
++      ipu_cm_write(ipu, 0xF0300000, IPU_GPR); /* write one to clear */
++      switch (buf_num) {
++      case 0:
++              ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF0_RDY(chno));
++              break;
++      case 1:
++              ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF1_RDY(chno));
++              break;
++      case 2:
++              ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF2_RDY(chno));
++              break;
++      default:
++              break;
++      }
++      ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
++
++      spin_unlock_irqrestore(&ipu->lock, flags);
++}
++EXPORT_SYMBOL_GPL(ipu_idmac_clear_buffer);
++
++int ipu_idmac_enable_channel(struct ipuv3_channel *channel)
++{
++      struct ipu_soc *ipu = channel->ipu;
++      u32 val;
++      unsigned long flags;
++
++      spin_lock_irqsave(&ipu->lock, flags);
++
++      val = ipu_idmac_read(ipu, IDMAC_CHA_EN(channel->num));
++      val |= idma_mask(channel->num);
++      ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num));
++
++      spin_unlock_irqrestore(&ipu->lock, flags);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_idmac_enable_channel);
++
++bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno)
++{
++      return (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(chno)) & idma_mask(chno));
++}
++EXPORT_SYMBOL_GPL(ipu_idmac_channel_busy);
++
++int ipu_idmac_wait_busy(struct ipuv3_channel *channel, int ms)
++{
++      struct ipu_soc *ipu = channel->ipu;
++      unsigned long timeout;
++
++      timeout = jiffies + msecs_to_jiffies(ms);
++      while (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(channel->num)) &
++                      idma_mask(channel->num)) {
++              if (time_after(jiffies, timeout))
++                      return -ETIMEDOUT;
++              cpu_relax();
++      }
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_idmac_wait_busy);
++
++int ipu_idmac_disable_channel(struct ipuv3_channel *channel)
++{
++      struct ipu_soc *ipu = channel->ipu;
++      u32 val;
++      unsigned long flags;
++
++      spin_lock_irqsave(&ipu->lock, flags);
++
++      /* Disable DMA channel(s) */
++      val = ipu_idmac_read(ipu, IDMAC_CHA_EN(channel->num));
++      val &= ~idma_mask(channel->num);
++      ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num));
++
++      __ipu_idmac_reset_current_buffer(channel);
++
++      /* Set channel buffers NOT to be ready */
++      ipu_cm_write(ipu, 0xf0000000, IPU_GPR); /* write one to clear */
++
++      if (ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(channel->num)) &
++                      idma_mask(channel->num)) {
++              ipu_cm_write(ipu, idma_mask(channel->num),
++                           IPU_CHA_BUF0_RDY(channel->num));
++      }
++
++      if (ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(channel->num)) &
++                      idma_mask(channel->num)) {
++              ipu_cm_write(ipu, idma_mask(channel->num),
++                           IPU_CHA_BUF1_RDY(channel->num));
++      }
++
++      ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
++
++      /* Reset the double buffer */
++      val = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(channel->num));
++      val &= ~idma_mask(channel->num);
++      ipu_cm_write(ipu, val, IPU_CHA_DB_MODE_SEL(channel->num));
++
++      spin_unlock_irqrestore(&ipu->lock, flags);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_idmac_disable_channel);
++
++/*
++ * The imx6 rev. D TRM says that enabling the WM feature will increase
++ * a channel's priority. Refer to Table 36-8 Calculated priority value.
++ * The sub-module that is the sink or source for the channel must enable
++ * watermark signal for this to take effect (SMFC_WM for instance).
++ */
++void ipu_idmac_enable_watermark(struct ipuv3_channel *channel, bool enable)
++{
++      struct ipu_soc *ipu = channel->ipu;
++      unsigned long flags;
++      u32 val;
++
++      spin_lock_irqsave(&ipu->lock, flags);
++
++      val = ipu_idmac_read(ipu, IDMAC_WM_EN(channel->num));
++      if (enable)
++              val |= 1 << (channel->num % 32);
++      else
++              val &= ~(1 << (channel->num % 32));
++      ipu_idmac_write(ipu, val, IDMAC_WM_EN(channel->num));
++
++      spin_unlock_irqrestore(&ipu->lock, flags);
++}
++EXPORT_SYMBOL_GPL(ipu_idmac_enable_watermark);
++
++static int ipu_memory_reset(struct ipu_soc *ipu)
++{
++      unsigned long timeout;
++
++      ipu_cm_write(ipu, 0x807FFFFF, IPU_MEM_RST);
++
++      timeout = jiffies + msecs_to_jiffies(1000);
++      while (ipu_cm_read(ipu, IPU_MEM_RST) & 0x80000000) {
++              if (time_after(jiffies, timeout))
++                      return -ETIME;
++              cpu_relax();
++      }
++
++      return 0;
++}
++
++/*
++ * Set the source mux for the given CSI. Selects either parallel or
++ * MIPI CSI2 sources.
++ */
++void ipu_set_csi_src_mux(struct ipu_soc *ipu, int csi_id, bool mipi_csi2)
++{
++      unsigned long flags;
++      u32 val, mask;
++
++      mask = (csi_id == 1) ? IPU_CONF_CSI1_DATA_SOURCE :
++              IPU_CONF_CSI0_DATA_SOURCE;
++
++      spin_lock_irqsave(&ipu->lock, flags);
++
++      val = ipu_cm_read(ipu, IPU_CONF);
++      if (mipi_csi2)
++              val |= mask;
++      else
++              val &= ~mask;
++      ipu_cm_write(ipu, val, IPU_CONF);
++
++      spin_unlock_irqrestore(&ipu->lock, flags);
++}
++EXPORT_SYMBOL_GPL(ipu_set_csi_src_mux);
++
++/*
++ * Set the source mux for the IC. Selects either CSI[01] or the VDI.
++ */
++void ipu_set_ic_src_mux(struct ipu_soc *ipu, int csi_id, bool vdi)
++{
++      unsigned long flags;
++      u32 val;
++
++      spin_lock_irqsave(&ipu->lock, flags);
++
++      val = ipu_cm_read(ipu, IPU_CONF);
++      if (vdi)
++              val |= IPU_CONF_IC_INPUT;
++      else
++              val &= ~IPU_CONF_IC_INPUT;
++
++      if (csi_id == 1)
++              val |= IPU_CONF_CSI_SEL;
++      else
++              val &= ~IPU_CONF_CSI_SEL;
++
++      ipu_cm_write(ipu, val, IPU_CONF);
++
++      spin_unlock_irqrestore(&ipu->lock, flags);
++}
++EXPORT_SYMBOL_GPL(ipu_set_ic_src_mux);
++
++
++/* Frame Synchronization Unit Channel Linking */
++
++struct fsu_link_reg_info {
++      int chno;
++      u32 reg;
++      u32 mask;
++      u32 val;
++};
++
++struct fsu_link_info {
++      struct fsu_link_reg_info src;
++      struct fsu_link_reg_info sink;
++};
++
++static const struct fsu_link_info fsu_link_info[] = {
++      {
++              .src  = { IPUV3_CHANNEL_IC_PRP_ENC_MEM, IPU_FS_PROC_FLOW2,
++                        FS_PRP_ENC_DEST_SEL_MASK, FS_PRP_ENC_DEST_SEL_IRT_ENC },
++              .sink = { IPUV3_CHANNEL_MEM_ROT_ENC, IPU_FS_PROC_FLOW1,
++                        FS_PRPENC_ROT_SRC_SEL_MASK, FS_PRPENC_ROT_SRC_SEL_ENC },
++      }, {
++              .src =  { IPUV3_CHANNEL_IC_PRP_VF_MEM, IPU_FS_PROC_FLOW2,
++                        FS_PRPVF_DEST_SEL_MASK, FS_PRPVF_DEST_SEL_IRT_VF },
++              .sink = { IPUV3_CHANNEL_MEM_ROT_VF, IPU_FS_PROC_FLOW1,
++                        FS_PRPVF_ROT_SRC_SEL_MASK, FS_PRPVF_ROT_SRC_SEL_VF },
++      }, {
++              .src =  { IPUV3_CHANNEL_IC_PP_MEM, IPU_FS_PROC_FLOW2,
++                        FS_PP_DEST_SEL_MASK, FS_PP_DEST_SEL_IRT_PP },
++              .sink = { IPUV3_CHANNEL_MEM_ROT_PP, IPU_FS_PROC_FLOW1,
++                        FS_PP_ROT_SRC_SEL_MASK, FS_PP_ROT_SRC_SEL_PP },
++      }, {
++              .src =  { IPUV3_CHANNEL_CSI_DIRECT, 0 },
++              .sink = { IPUV3_CHANNEL_CSI_VDI_PREV, IPU_FS_PROC_FLOW1,
++                        FS_VDI_SRC_SEL_MASK, FS_VDI_SRC_SEL_CSI_DIRECT },
++      },
++};
++
++static const struct fsu_link_info *find_fsu_link_info(int src, int sink)
++{
++      int i;
++
++      for (i = 0; i < ARRAY_SIZE(fsu_link_info); i++) {
++              if (src == fsu_link_info[i].src.chno &&
++                  sink == fsu_link_info[i].sink.chno)
++                      return &fsu_link_info[i];
++      }
++
++      return NULL;
++}
++
++/*
++ * Links a source channel to a sink channel in the FSU.
++ */
++int ipu_fsu_link(struct ipu_soc *ipu, int src_ch, int sink_ch)
++{
++      const struct fsu_link_info *link;
++      u32 src_reg, sink_reg;
++      unsigned long flags;
++
++      link = find_fsu_link_info(src_ch, sink_ch);
++      if (!link)
++              return -EINVAL;
++
++      spin_lock_irqsave(&ipu->lock, flags);
++
++      if (link->src.mask) {
++              src_reg = ipu_cm_read(ipu, link->src.reg);
++              src_reg &= ~link->src.mask;
++              src_reg |= link->src.val;
++              ipu_cm_write(ipu, src_reg, link->src.reg);
++      }
++
++      if (link->sink.mask) {
++              sink_reg = ipu_cm_read(ipu, link->sink.reg);
++              sink_reg &= ~link->sink.mask;
++              sink_reg |= link->sink.val;
++              ipu_cm_write(ipu, sink_reg, link->sink.reg);
++      }
++
++      spin_unlock_irqrestore(&ipu->lock, flags);
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_fsu_link);
++
++/*
++ * Unlinks source and sink channels in the FSU.
++ */
++int ipu_fsu_unlink(struct ipu_soc *ipu, int src_ch, int sink_ch)
++{
++      const struct fsu_link_info *link;
++      u32 src_reg, sink_reg;
++      unsigned long flags;
++
++      link = find_fsu_link_info(src_ch, sink_ch);
++      if (!link)
++              return -EINVAL;
++
++      spin_lock_irqsave(&ipu->lock, flags);
++
++      if (link->src.mask) {
++              src_reg = ipu_cm_read(ipu, link->src.reg);
++              src_reg &= ~link->src.mask;
++              ipu_cm_write(ipu, src_reg, link->src.reg);
++      }
++
++      if (link->sink.mask) {
++              sink_reg = ipu_cm_read(ipu, link->sink.reg);
++              sink_reg &= ~link->sink.mask;
++              ipu_cm_write(ipu, sink_reg, link->sink.reg);
++      }
++
++      spin_unlock_irqrestore(&ipu->lock, flags);
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_fsu_unlink);
++
++/* Link IDMAC channels in the FSU */
++int ipu_idmac_link(struct ipuv3_channel *src, struct ipuv3_channel *sink)
++{
++      return ipu_fsu_link(src->ipu, src->num, sink->num);
++}
++EXPORT_SYMBOL_GPL(ipu_idmac_link);
++
++/* Unlink IDMAC channels in the FSU */
++int ipu_idmac_unlink(struct ipuv3_channel *src, struct ipuv3_channel *sink)
++{
++      return ipu_fsu_unlink(src->ipu, src->num, sink->num);
++}
++EXPORT_SYMBOL_GPL(ipu_idmac_unlink);
++
++struct ipu_devtype {
++      const char *name;
++      unsigned long cm_ofs;
++      unsigned long cpmem_ofs;
++      unsigned long srm_ofs;
++      unsigned long tpm_ofs;
++      unsigned long csi0_ofs;
++      unsigned long csi1_ofs;
++      unsigned long ic_ofs;
++      unsigned long disp0_ofs;
++      unsigned long disp1_ofs;
++      unsigned long dc_tmpl_ofs;
++      unsigned long vdi_ofs;
++      enum ipuv3_type type;
++};
++
++static struct ipu_devtype ipu_type_imx51 = {
++      .name = "IPUv3EX",
++      .cm_ofs = 0x1e000000,
++      .cpmem_ofs = 0x1f000000,
++      .srm_ofs = 0x1f040000,
++      .tpm_ofs = 0x1f060000,
++      .csi0_ofs = 0x1e030000,
++      .csi1_ofs = 0x1e038000,
++      .ic_ofs = 0x1e020000,
++      .disp0_ofs = 0x1e040000,
++      .disp1_ofs = 0x1e048000,
++      .dc_tmpl_ofs = 0x1f080000,
++      .vdi_ofs = 0x1e068000,
++      .type = IPUV3EX,
++};
++
++static struct ipu_devtype ipu_type_imx53 = {
++      .name = "IPUv3M",
++      .cm_ofs = 0x06000000,
++      .cpmem_ofs = 0x07000000,
++      .srm_ofs = 0x07040000,
++      .tpm_ofs = 0x07060000,
++      .csi0_ofs = 0x06030000,
++      .csi1_ofs = 0x06038000,
++      .ic_ofs = 0x06020000,
++      .disp0_ofs = 0x06040000,
++      .disp1_ofs = 0x06048000,
++      .dc_tmpl_ofs = 0x07080000,
++      .vdi_ofs = 0x06068000,
++      .type = IPUV3M,
++};
++
++static struct ipu_devtype ipu_type_imx6q = {
++      .name = "IPUv3H",
++      .cm_ofs = 0x00200000,
++      .cpmem_ofs = 0x00300000,
++      .srm_ofs = 0x00340000,
++      .tpm_ofs = 0x00360000,
++      .csi0_ofs = 0x00230000,
++      .csi1_ofs = 0x00238000,
++      .ic_ofs = 0x00220000,
++      .disp0_ofs = 0x00240000,
++      .disp1_ofs = 0x00248000,
++      .dc_tmpl_ofs = 0x00380000,
++      .vdi_ofs = 0x00268000,
++      .type = IPUV3H,
++};
++
++static const struct of_device_id imx_ipu_dt_ids[] = {
++      { .compatible = "fsl,imx51-ipu", .data = &ipu_type_imx51, },
++      { .compatible = "fsl,imx53-ipu", .data = &ipu_type_imx53, },
++      { .compatible = "fsl,imx6q-ipu", .data = &ipu_type_imx6q, },
++      { .compatible = "fsl,imx6qp-ipu", .data = &ipu_type_imx6q, },
++      { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, imx_ipu_dt_ids);
++
++static int ipu_submodules_init(struct ipu_soc *ipu,
++              struct platform_device *pdev, unsigned long ipu_base,
++              struct clk *ipu_clk)
++{
++      char *unit;
++      int ret;
++      struct device *dev = &pdev->dev;
++      const struct ipu_devtype *devtype = ipu->devtype;
++
++      ret = ipu_cpmem_init(ipu, dev, ipu_base + devtype->cpmem_ofs);
++      if (ret) {
++              unit = "cpmem";
++              goto err_cpmem;
++      }
++
++      ret = ipu_csi_init(ipu, dev, 0, ipu_base + devtype->csi0_ofs,
++                         IPU_CONF_CSI0_EN, ipu_clk);
++      if (ret) {
++              unit = "csi0";
++              goto err_csi_0;
++      }
++
++      ret = ipu_csi_init(ipu, dev, 1, ipu_base + devtype->csi1_ofs,
++                         IPU_CONF_CSI1_EN, ipu_clk);
++      if (ret) {
++              unit = "csi1";
++              goto err_csi_1;
++      }
++
++      ret = ipu_ic_init(ipu, dev,
++                        ipu_base + devtype->ic_ofs,
++                        ipu_base + devtype->tpm_ofs);
++      if (ret) {
++              unit = "ic";
++              goto err_ic;
++      }
++
++      ret = ipu_vdi_init(ipu, dev, ipu_base + devtype->vdi_ofs,
++                         IPU_CONF_VDI_EN | IPU_CONF_ISP_EN |
++                         IPU_CONF_IC_INPUT);
++      if (ret) {
++              unit = "vdi";
++              goto err_vdi;
++      }
++
++      ret = ipu_image_convert_init(ipu, dev);
++      if (ret) {
++              unit = "image_convert";
++              goto err_image_convert;
++      }
++
++      ret = ipu_di_init(ipu, dev, 0, ipu_base + devtype->disp0_ofs,
++                        IPU_CONF_DI0_EN, ipu_clk);
++      if (ret) {
++              unit = "di0";
++              goto err_di_0;
++      }
++
++      ret = ipu_di_init(ipu, dev, 1, ipu_base + devtype->disp1_ofs,
++                      IPU_CONF_DI1_EN, ipu_clk);
++      if (ret) {
++              unit = "di1";
++              goto err_di_1;
++      }
++
++      ret = ipu_dc_init(ipu, dev, ipu_base + devtype->cm_ofs +
++                      IPU_CM_DC_REG_OFS, ipu_base + devtype->dc_tmpl_ofs);
++      if (ret) {
++              unit = "dc_template";
++              goto err_dc;
++      }
++
++      ret = ipu_dmfc_init(ipu, dev, ipu_base +
++                      devtype->cm_ofs + IPU_CM_DMFC_REG_OFS, ipu_clk);
++      if (ret) {
++              unit = "dmfc";
++              goto err_dmfc;
++      }
++
++      ret = ipu_dp_init(ipu, dev, ipu_base + devtype->srm_ofs);
++      if (ret) {
++              unit = "dp";
++              goto err_dp;
++      }
++
++      ret = ipu_smfc_init(ipu, dev, ipu_base +
++                      devtype->cm_ofs + IPU_CM_SMFC_REG_OFS);
++      if (ret) {
++              unit = "smfc";
++              goto err_smfc;
++      }
++
++      return 0;
++
++err_smfc:
++      ipu_dp_exit(ipu);
++err_dp:
++      ipu_dmfc_exit(ipu);
++err_dmfc:
++      ipu_dc_exit(ipu);
++err_dc:
++      ipu_di_exit(ipu, 1);
++err_di_1:
++      ipu_di_exit(ipu, 0);
++err_di_0:
++      ipu_image_convert_exit(ipu);
++err_image_convert:
++      ipu_vdi_exit(ipu);
++err_vdi:
++      ipu_ic_exit(ipu);
++err_ic:
++      ipu_csi_exit(ipu, 1);
++err_csi_1:
++      ipu_csi_exit(ipu, 0);
++err_csi_0:
++      ipu_cpmem_exit(ipu);
++err_cpmem:
++      dev_err(&pdev->dev, "init %s failed with %d\n", unit, ret);
++      return ret;
++}
++
++static void ipu_irq_handle(struct ipu_soc *ipu, const int *regs, int num_regs)
++{
++      unsigned long status;
++      int i, bit, irq;
++
++      for (i = 0; i < num_regs; i++) {
++
++              status = ipu_cm_read(ipu, IPU_INT_STAT(regs[i]));
++              status &= ipu_cm_read(ipu, IPU_INT_CTRL(regs[i]));
++
++              for_each_set_bit(bit, &status, 32) {
++                      irq = irq_linear_revmap(ipu->domain,
++                                              regs[i] * 32 + bit);
++                      if (irq)
++                              generic_handle_irq(irq);
++              }
++      }
++}
++
++static void ipu_irq_handler(struct irq_desc *desc)
++{
++      struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
++      struct irq_chip *chip = irq_desc_get_chip(desc);
++      static const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14};
++
++      chained_irq_enter(chip, desc);
++
++      ipu_irq_handle(ipu, int_reg, ARRAY_SIZE(int_reg));
++
++      chained_irq_exit(chip, desc);
++}
++
++static void ipu_err_irq_handler(struct irq_desc *desc)
++{
++      struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
++      struct irq_chip *chip = irq_desc_get_chip(desc);
++      static const int int_reg[] = { 4, 5, 8, 9};
++
++      chained_irq_enter(chip, desc);
++
++      ipu_irq_handle(ipu, int_reg, ARRAY_SIZE(int_reg));
++
++      chained_irq_exit(chip, desc);
++}
++
++int ipu_map_irq(struct ipu_soc *ipu, int irq)
++{
++      int virq;
++
++      virq = irq_linear_revmap(ipu->domain, irq);
++      if (!virq)
++              virq = irq_create_mapping(ipu->domain, irq);
++
++      return virq;
++}
++EXPORT_SYMBOL_GPL(ipu_map_irq);
++
++int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
++              enum ipu_channel_irq irq_type)
++{
++      return ipu_map_irq(ipu, irq_type + channel->num);
++}
++EXPORT_SYMBOL_GPL(ipu_idmac_channel_irq);
++
++static void ipu_submodules_exit(struct ipu_soc *ipu)
++{
++      ipu_smfc_exit(ipu);
++      ipu_dp_exit(ipu);
++      ipu_dmfc_exit(ipu);
++      ipu_dc_exit(ipu);
++      ipu_di_exit(ipu, 1);
++      ipu_di_exit(ipu, 0);
++      ipu_image_convert_exit(ipu);
++      ipu_vdi_exit(ipu);
++      ipu_ic_exit(ipu);
++      ipu_csi_exit(ipu, 1);
++      ipu_csi_exit(ipu, 0);
++      ipu_cpmem_exit(ipu);
++}
++
++static int platform_remove_devices_fn(struct device *dev, void *unused)
++{
++      struct platform_device *pdev = to_platform_device(dev);
++
++      platform_device_unregister(pdev);
++
++      return 0;
++}
++
++static void platform_device_unregister_children(struct platform_device *pdev)
++{
++      device_for_each_child(&pdev->dev, NULL, platform_remove_devices_fn);
++}
++
++struct ipu_platform_reg {
++      struct ipu_client_platformdata pdata;
++      const char *name;
++};
++
++/* These must be in the order of the corresponding device tree port nodes */
++static struct ipu_platform_reg client_reg[] = {
++      {
++              .pdata = {
++                      .csi = 0,
++                      .dma[0] = IPUV3_CHANNEL_CSI0,
++                      .dma[1] = -EINVAL,
++              },
++              .name = "imx-ipuv3-csi",
++      }, {
++              .pdata = {
++                      .csi = 1,
++                      .dma[0] = IPUV3_CHANNEL_CSI1,
++                      .dma[1] = -EINVAL,
++              },
++              .name = "imx-ipuv3-csi",
++      }, {
++              .pdata = {
++                      .di = 0,
++                      .dc = 5,
++                      .dp = IPU_DP_FLOW_SYNC_BG,
++                      .dma[0] = IPUV3_CHANNEL_MEM_BG_SYNC,
++                      .dma[1] = IPUV3_CHANNEL_MEM_FG_SYNC,
++              },
++              .name = "imx-ipuv3-crtc",
++      }, {
++              .pdata = {
++                      .di = 1,
++                      .dc = 1,
++                      .dp = -EINVAL,
++                      .dma[0] = IPUV3_CHANNEL_MEM_DC_SYNC,
++                      .dma[1] = -EINVAL,
++              },
++              .name = "imx-ipuv3-crtc",
++      },
++};
++
++static DEFINE_MUTEX(ipu_client_id_mutex);
++static int ipu_client_id;
++
++static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
++{
++      struct device *dev = ipu->dev;
++      unsigned i;
++      int id, ret;
++
++      mutex_lock(&ipu_client_id_mutex);
++      id = ipu_client_id;
++      ipu_client_id += ARRAY_SIZE(client_reg);
++      mutex_unlock(&ipu_client_id_mutex);
++
++      for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
++              struct ipu_platform_reg *reg = &client_reg[i];
++              struct platform_device *pdev;
++              struct device_node *of_node;
++
++              /* Associate subdevice with the corresponding port node */
++              of_node = of_graph_get_port_by_id(dev->of_node, i);
++              if (!of_node) {
++                      dev_info(dev,
++                               "no port@%d node in %pOF, not using %s%d\n",
++                               i, dev->of_node,
++                               (i / 2) ? "DI" : "CSI", i % 2);
++                      continue;
++              }
++
++              pdev = platform_device_alloc(reg->name, id++);
++              if (!pdev) {
++                      ret = -ENOMEM;
++                      goto err_register;
++              }
++
++              pdev->dev.parent = dev;
++
++              reg->pdata.of_node = of_node;
++              ret = platform_device_add_data(pdev, &reg->pdata,
++                                             sizeof(reg->pdata));
++              if (!ret)
++                      ret = platform_device_add(pdev);
++              if (ret) {
++                      platform_device_put(pdev);
++                      goto err_register;
++              }
++      }
++
++      return 0;
++
++err_register:
++      platform_device_unregister_children(to_platform_device(dev));
++
++      return ret;
++}
++
++
++static int ipu_irq_init(struct ipu_soc *ipu)
++{
++      struct irq_chip_generic *gc;
++      struct irq_chip_type *ct;
++      unsigned long unused[IPU_NUM_IRQS / 32] = {
++              0x400100d0, 0xffe000fd,
++              0x400100d0, 0xffe000fd,
++              0x400100d0, 0xffe000fd,
++              0x4077ffff, 0xffe7e1fd,
++              0x23fffffe, 0x8880fff0,
++              0xf98fe7d0, 0xfff81fff,
++              0x400100d0, 0xffe000fd,
++              0x00000000,
++      };
++      int ret, i;
++
++      ipu->domain = irq_domain_add_linear(ipu->dev->of_node, IPU_NUM_IRQS,
++                                          &irq_generic_chip_ops, ipu);
++      if (!ipu->domain) {
++              dev_err(ipu->dev, "failed to add irq domain\n");
++              return -ENODEV;
++      }
++
++      ret = irq_alloc_domain_generic_chips(ipu->domain, 32, 1, "IPU",
++                                           handle_level_irq, 0, 0, 0);
++      if (ret < 0) {
++              dev_err(ipu->dev, "failed to alloc generic irq chips\n");
++              irq_domain_remove(ipu->domain);
++              return ret;
++      }
++
++      /* Mask and clear all interrupts */
++      for (i = 0; i < IPU_NUM_IRQS; i += 32) {
++              ipu_cm_write(ipu, 0, IPU_INT_CTRL(i / 32));
++              ipu_cm_write(ipu, ~unused[i / 32], IPU_INT_STAT(i / 32));
++      }
++
++      for (i = 0; i < IPU_NUM_IRQS; i += 32) {
++              gc = irq_get_domain_generic_chip(ipu->domain, i);
++              gc->reg_base = ipu->cm_reg;
++              gc->unused = unused[i / 32];
++              ct = gc->chip_types;
++              ct->chip.irq_ack = irq_gc_ack_set_bit;
++              ct->chip.irq_mask = irq_gc_mask_clr_bit;
++              ct->chip.irq_unmask = irq_gc_mask_set_bit;
++              ct->regs.ack = IPU_INT_STAT(i / 32);
++              ct->regs.mask = IPU_INT_CTRL(i / 32);
++      }
++
++      irq_set_chained_handler_and_data(ipu->irq_sync, ipu_irq_handler, ipu);
++      irq_set_chained_handler_and_data(ipu->irq_err, ipu_err_irq_handler,
++                                       ipu);
++
++      return 0;
++}
++
++static void ipu_irq_exit(struct ipu_soc *ipu)
++{
++      int i, irq;
++
++      irq_set_chained_handler_and_data(ipu->irq_err, NULL, NULL);
++      irq_set_chained_handler_and_data(ipu->irq_sync, NULL, NULL);
++
++      /* TODO: remove irq_domain_generic_chips */
++
++      for (i = 0; i < IPU_NUM_IRQS; i++) {
++              irq = irq_linear_revmap(ipu->domain, i);
++              if (irq)
++                      irq_dispose_mapping(irq);
++      }
++
++      irq_domain_remove(ipu->domain);
++}
++
++void ipu_dump(struct ipu_soc *ipu)
++{
++      int i;
++
++      dev_dbg(ipu->dev, "IPU_CONF = \t0x%08X\n",
++              ipu_cm_read(ipu, IPU_CONF));
++      dev_dbg(ipu->dev, "IDMAC_CONF = \t0x%08X\n",
++              ipu_idmac_read(ipu, IDMAC_CONF));
++      dev_dbg(ipu->dev, "IDMAC_CHA_EN1 = \t0x%08X\n",
++              ipu_idmac_read(ipu, IDMAC_CHA_EN(0)));
++      dev_dbg(ipu->dev, "IDMAC_CHA_EN2 = \t0x%08X\n",
++              ipu_idmac_read(ipu, IDMAC_CHA_EN(32)));
++      dev_dbg(ipu->dev, "IDMAC_CHA_PRI1 = \t0x%08X\n",
++              ipu_idmac_read(ipu, IDMAC_CHA_PRI(0)));
++      dev_dbg(ipu->dev, "IDMAC_CHA_PRI2 = \t0x%08X\n",
++              ipu_idmac_read(ipu, IDMAC_CHA_PRI(32)));
++      dev_dbg(ipu->dev, "IDMAC_BAND_EN1 = \t0x%08X\n",
++              ipu_idmac_read(ipu, IDMAC_BAND_EN(0)));
++      dev_dbg(ipu->dev, "IDMAC_BAND_EN2 = \t0x%08X\n",
++              ipu_idmac_read(ipu, IDMAC_BAND_EN(32)));
++      dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL0 = \t0x%08X\n",
++              ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(0)));
++      dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL1 = \t0x%08X\n",
++              ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(32)));
++      dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW1 = \t0x%08X\n",
++              ipu_cm_read(ipu, IPU_FS_PROC_FLOW1));
++      dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW2 = \t0x%08X\n",
++              ipu_cm_read(ipu, IPU_FS_PROC_FLOW2));
++      dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW3 = \t0x%08X\n",
++              ipu_cm_read(ipu, IPU_FS_PROC_FLOW3));
++      dev_dbg(ipu->dev, "IPU_FS_DISP_FLOW1 = \t0x%08X\n",
++              ipu_cm_read(ipu, IPU_FS_DISP_FLOW1));
++      for (i = 0; i < 15; i++)
++              dev_dbg(ipu->dev, "IPU_INT_CTRL(%d) = \t%08X\n", i,
++                      ipu_cm_read(ipu, IPU_INT_CTRL(i)));
++}
++EXPORT_SYMBOL_GPL(ipu_dump);
++
++static int ipu_probe(struct platform_device *pdev)
++{
++      struct device_node *np = pdev->dev.of_node;
++      struct ipu_soc *ipu;
++      struct resource *res;
++      unsigned long ipu_base;
++      int ret, irq_sync, irq_err;
++      const struct ipu_devtype *devtype;
++
++      devtype = of_device_get_match_data(&pdev->dev);
++      if (!devtype)
++              return -EINVAL;
++
++      irq_sync = platform_get_irq(pdev, 0);
++      irq_err = platform_get_irq(pdev, 1);
++      res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++
++      dev_dbg(&pdev->dev, "irq_sync: %d irq_err: %d\n",
++                      irq_sync, irq_err);
++
++      if (!res || irq_sync < 0 || irq_err < 0)
++              return -ENODEV;
++
++      ipu_base = res->start;
++
++      ipu = devm_kzalloc(&pdev->dev, sizeof(*ipu), GFP_KERNEL);
++      if (!ipu)
++              return -ENODEV;
++
++      ipu->id = of_alias_get_id(np, "ipu");
++      if (ipu->id < 0)
++              ipu->id = 0;
++
++      if (of_device_is_compatible(np, "fsl,imx6qp-ipu") &&
++          IS_ENABLED(CONFIG_DRM)) {
++              ipu->prg_priv = ipu_prg_lookup_by_phandle(&pdev->dev,
++                                                        "fsl,prg", ipu->id);
++              if (!ipu->prg_priv)
++                      return -EPROBE_DEFER;
++      }
++
++      ipu->devtype = devtype;
++      ipu->ipu_type = devtype->type;
++
++      spin_lock_init(&ipu->lock);
++      mutex_init(&ipu->channel_lock);
++      INIT_LIST_HEAD(&ipu->channels);
++
++      dev_dbg(&pdev->dev, "cm_reg:   0x%08lx\n",
++                      ipu_base + devtype->cm_ofs);
++      dev_dbg(&pdev->dev, "idmac:    0x%08lx\n",
++                      ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS);
++      dev_dbg(&pdev->dev, "cpmem:    0x%08lx\n",
++                      ipu_base + devtype->cpmem_ofs);
++      dev_dbg(&pdev->dev, "csi0:    0x%08lx\n",
++                      ipu_base + devtype->csi0_ofs);
++      dev_dbg(&pdev->dev, "csi1:    0x%08lx\n",
++                      ipu_base + devtype->csi1_ofs);
++      dev_dbg(&pdev->dev, "ic:      0x%08lx\n",
++                      ipu_base + devtype->ic_ofs);
++      dev_dbg(&pdev->dev, "disp0:    0x%08lx\n",
++                      ipu_base + devtype->disp0_ofs);
++      dev_dbg(&pdev->dev, "disp1:    0x%08lx\n",
++                      ipu_base + devtype->disp1_ofs);
++      dev_dbg(&pdev->dev, "srm:      0x%08lx\n",
++                      ipu_base + devtype->srm_ofs);
++      dev_dbg(&pdev->dev, "tpm:      0x%08lx\n",
++                      ipu_base + devtype->tpm_ofs);
++      dev_dbg(&pdev->dev, "dc:       0x%08lx\n",
++                      ipu_base + devtype->cm_ofs + IPU_CM_DC_REG_OFS);
++      dev_dbg(&pdev->dev, "ic:       0x%08lx\n",
++                      ipu_base + devtype->cm_ofs + IPU_CM_IC_REG_OFS);
++      dev_dbg(&pdev->dev, "dmfc:     0x%08lx\n",
++                      ipu_base + devtype->cm_ofs + IPU_CM_DMFC_REG_OFS);
++      dev_dbg(&pdev->dev, "vdi:      0x%08lx\n",
++                      ipu_base + devtype->vdi_ofs);
++
++      ipu->cm_reg = devm_ioremap(&pdev->dev,
++                      ipu_base + devtype->cm_ofs, PAGE_SIZE);
++      ipu->idmac_reg = devm_ioremap(&pdev->dev,
++                      ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS,
++                      PAGE_SIZE);
++
++      if (!ipu->cm_reg || !ipu->idmac_reg)
++              return -ENOMEM;
++
++      ipu->clk = devm_clk_get(&pdev->dev, "bus");
++      if (IS_ERR(ipu->clk)) {
++              ret = PTR_ERR(ipu->clk);
++              dev_err(&pdev->dev, "clk_get failed with %d", ret);
++              return ret;
++      }
++
++      platform_set_drvdata(pdev, ipu);
++
++      ret = clk_prepare_enable(ipu->clk);
++      if (ret) {
++              dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
++              return ret;
++      }
++
++      ipu->dev = &pdev->dev;
++      ipu->irq_sync = irq_sync;
++      ipu->irq_err = irq_err;
++
++      ret = device_reset(&pdev->dev);
++      if (ret) {
++              dev_err(&pdev->dev, "failed to reset: %d\n", ret);
++              goto out_failed_reset;
++      }
++      ret = ipu_memory_reset(ipu);
++      if (ret)
++              goto out_failed_reset;
++
++      ret = ipu_irq_init(ipu);
++      if (ret)
++              goto out_failed_irq;
++
++      /* Set MCU_T to divide MCU access window into 2 */
++      ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18),
++                      IPU_DISP_GEN);
++
++      ret = ipu_submodules_init(ipu, pdev, ipu_base, ipu->clk);
++      if (ret)
++              goto failed_submodules_init;
++
++      ret = ipu_add_client_devices(ipu, ipu_base);
++      if (ret) {
++              dev_err(&pdev->dev, "adding client devices failed with %d\n",
++                              ret);
++              goto failed_add_clients;
++      }
++
++      dev_info(&pdev->dev, "%s probed\n", devtype->name);
++
++      return 0;
++
++failed_add_clients:
++      ipu_submodules_exit(ipu);
++failed_submodules_init:
++      ipu_irq_exit(ipu);
++out_failed_irq:
++out_failed_reset:
++      clk_disable_unprepare(ipu->clk);
++      return ret;
++}
++
++static int ipu_remove(struct platform_device *pdev)
++{
++      struct ipu_soc *ipu = platform_get_drvdata(pdev);
++
++      platform_device_unregister_children(pdev);
++      ipu_submodules_exit(ipu);
++      ipu_irq_exit(ipu);
++
++      clk_disable_unprepare(ipu->clk);
++
++      return 0;
++}
++
++static struct platform_driver imx_ipu_driver = {
++      .driver = {
++              .name = "imx-ipuv3",
++              .of_match_table = imx_ipu_dt_ids,
++      },
++      .probe = ipu_probe,
++      .remove = ipu_remove,
++};
++
++static struct platform_driver * const drivers[] = {
++#if IS_ENABLED(CONFIG_DRM)
++      &ipu_pre_drv,
++      &ipu_prg_drv,
++#endif
++      &imx_ipu_driver,
++};
++
++static int __init imx_ipu_init(void)
++{
++      return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
++}
++module_init(imx_ipu_init);
++
++static void __exit imx_ipu_exit(void)
++{
++      platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
++}
++module_exit(imx_ipu_exit);
++
++MODULE_ALIAS("platform:imx-ipuv3");
++MODULE_DESCRIPTION("i.MX IPU v3 driver");
++MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
++MODULE_LICENSE("GPL");
+--- /dev/null
++++ b/drivers/gpu/imx/ipu-v3/ipu-cpmem.c
+@@ -0,0 +1,976 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * Copyright (C) 2012 Mentor Graphics Inc.
++ * Copyright 2005-2012 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++#include <linux/types.h>
++#include <linux/bitrev.h>
++#include <linux/io.h>
++#include <linux/sizes.h>
++#include <drm/drm_fourcc.h>
++#include "ipu-prv.h"
++
++struct ipu_cpmem_word {
++      u32 data[5];
++      u32 res[3];
++};
++
++struct ipu_ch_param {
++      struct ipu_cpmem_word word[2];
++};
++
++struct ipu_cpmem {
++      struct ipu_ch_param __iomem *base;
++      u32 module;
++      spinlock_t lock;
++      int use_count;
++      struct ipu_soc *ipu;
++};
++
++#define IPU_CPMEM_WORD(word, ofs, size) ((((word) * 160 + (ofs)) << 8) | (size))
++
++#define IPU_FIELD_UBO         IPU_CPMEM_WORD(0, 46, 22)
++#define IPU_FIELD_VBO         IPU_CPMEM_WORD(0, 68, 22)
++#define IPU_FIELD_IOX         IPU_CPMEM_WORD(0, 90, 4)
++#define IPU_FIELD_RDRW                IPU_CPMEM_WORD(0, 94, 1)
++#define IPU_FIELD_SO          IPU_CPMEM_WORD(0, 113, 1)
++#define IPU_FIELD_SLY         IPU_CPMEM_WORD(1, 102, 14)
++#define IPU_FIELD_SLUV                IPU_CPMEM_WORD(1, 128, 14)
++
++#define IPU_FIELD_XV          IPU_CPMEM_WORD(0, 0, 10)
++#define IPU_FIELD_YV          IPU_CPMEM_WORD(0, 10, 9)
++#define IPU_FIELD_XB          IPU_CPMEM_WORD(0, 19, 13)
++#define IPU_FIELD_YB          IPU_CPMEM_WORD(0, 32, 12)
++#define IPU_FIELD_NSB_B               IPU_CPMEM_WORD(0, 44, 1)
++#define IPU_FIELD_CF          IPU_CPMEM_WORD(0, 45, 1)
++#define IPU_FIELD_SX          IPU_CPMEM_WORD(0, 46, 12)
++#define IPU_FIELD_SY          IPU_CPMEM_WORD(0, 58, 11)
++#define IPU_FIELD_NS          IPU_CPMEM_WORD(0, 69, 10)
++#define IPU_FIELD_SDX         IPU_CPMEM_WORD(0, 79, 7)
++#define IPU_FIELD_SM          IPU_CPMEM_WORD(0, 86, 10)
++#define IPU_FIELD_SCC         IPU_CPMEM_WORD(0, 96, 1)
++#define IPU_FIELD_SCE         IPU_CPMEM_WORD(0, 97, 1)
++#define IPU_FIELD_SDY         IPU_CPMEM_WORD(0, 98, 7)
++#define IPU_FIELD_SDRX                IPU_CPMEM_WORD(0, 105, 1)
++#define IPU_FIELD_SDRY                IPU_CPMEM_WORD(0, 106, 1)
++#define IPU_FIELD_BPP         IPU_CPMEM_WORD(0, 107, 3)
++#define IPU_FIELD_DEC_SEL     IPU_CPMEM_WORD(0, 110, 2)
++#define IPU_FIELD_DIM         IPU_CPMEM_WORD(0, 112, 1)
++#define IPU_FIELD_BNDM                IPU_CPMEM_WORD(0, 114, 3)
++#define IPU_FIELD_BM          IPU_CPMEM_WORD(0, 117, 2)
++#define IPU_FIELD_ROT         IPU_CPMEM_WORD(0, 119, 1)
++#define IPU_FIELD_ROT_HF_VF   IPU_CPMEM_WORD(0, 119, 3)
++#define IPU_FIELD_HF          IPU_CPMEM_WORD(0, 120, 1)
++#define IPU_FIELD_VF          IPU_CPMEM_WORD(0, 121, 1)
++#define IPU_FIELD_THE         IPU_CPMEM_WORD(0, 122, 1)
++#define IPU_FIELD_CAP         IPU_CPMEM_WORD(0, 123, 1)
++#define IPU_FIELD_CAE         IPU_CPMEM_WORD(0, 124, 1)
++#define IPU_FIELD_FW          IPU_CPMEM_WORD(0, 125, 13)
++#define IPU_FIELD_FH          IPU_CPMEM_WORD(0, 138, 12)
++#define IPU_FIELD_EBA0                IPU_CPMEM_WORD(1, 0, 29)
++#define IPU_FIELD_EBA1                IPU_CPMEM_WORD(1, 29, 29)
++#define IPU_FIELD_ILO         IPU_CPMEM_WORD(1, 58, 20)
++#define IPU_FIELD_NPB         IPU_CPMEM_WORD(1, 78, 7)
++#define IPU_FIELD_PFS         IPU_CPMEM_WORD(1, 85, 4)
++#define IPU_FIELD_ALU         IPU_CPMEM_WORD(1, 89, 1)
++#define IPU_FIELD_ALBM                IPU_CPMEM_WORD(1, 90, 3)
++#define IPU_FIELD_ID          IPU_CPMEM_WORD(1, 93, 2)
++#define IPU_FIELD_TH          IPU_CPMEM_WORD(1, 95, 7)
++#define IPU_FIELD_SL          IPU_CPMEM_WORD(1, 102, 14)
++#define IPU_FIELD_WID0                IPU_CPMEM_WORD(1, 116, 3)
++#define IPU_FIELD_WID1                IPU_CPMEM_WORD(1, 119, 3)
++#define IPU_FIELD_WID2                IPU_CPMEM_WORD(1, 122, 3)
++#define IPU_FIELD_WID3                IPU_CPMEM_WORD(1, 125, 3)
++#define IPU_FIELD_OFS0                IPU_CPMEM_WORD(1, 128, 5)
++#define IPU_FIELD_OFS1                IPU_CPMEM_WORD(1, 133, 5)
++#define IPU_FIELD_OFS2                IPU_CPMEM_WORD(1, 138, 5)
++#define IPU_FIELD_OFS3                IPU_CPMEM_WORD(1, 143, 5)
++#define IPU_FIELD_SXYS                IPU_CPMEM_WORD(1, 148, 1)
++#define IPU_FIELD_CRE         IPU_CPMEM_WORD(1, 149, 1)
++#define IPU_FIELD_DEC_SEL2    IPU_CPMEM_WORD(1, 150, 1)
++
++static inline struct ipu_ch_param __iomem *
++ipu_get_cpmem(struct ipuv3_channel *ch)
++{
++      struct ipu_cpmem *cpmem = ch->ipu->cpmem_priv;
++
++      return cpmem->base + ch->num;
++}
++
++static void ipu_ch_param_write_field(struct ipuv3_channel *ch, u32 wbs, u32 v)
++{
++      struct ipu_ch_param __iomem *base = ipu_get_cpmem(ch);
++      u32 bit = (wbs >> 8) % 160;
++      u32 size = wbs & 0xff;
++      u32 word = (wbs >> 8) / 160;
++      u32 i = bit / 32;
++      u32 ofs = bit % 32;
++      u32 mask = (1 << size) - 1;
++      u32 val;
++
++      pr_debug("%s %d %d %d\n", __func__, word, bit , size);
++
++      val = readl(&base->word[word].data[i]);
++      val &= ~(mask << ofs);
++      val |= v << ofs;
++      writel(val, &base->word[word].data[i]);
++
++      if ((bit + size - 1) / 32 > i) {
++              val = readl(&base->word[word].data[i + 1]);
++              val &= ~(mask >> (ofs ? (32 - ofs) : 0));
++              val |= v >> (ofs ? (32 - ofs) : 0);
++              writel(val, &base->word[word].data[i + 1]);
++      }
++}
++
++static u32 ipu_ch_param_read_field(struct ipuv3_channel *ch, u32 wbs)
++{
++      struct ipu_ch_param __iomem *base = ipu_get_cpmem(ch);
++      u32 bit = (wbs >> 8) % 160;
++      u32 size = wbs & 0xff;
++      u32 word = (wbs >> 8) / 160;
++      u32 i = bit / 32;
++      u32 ofs = bit % 32;
++      u32 mask = (1 << size) - 1;
++      u32 val = 0;
++
++      pr_debug("%s %d %d %d\n", __func__, word, bit , size);
++
++      val = (readl(&base->word[word].data[i]) >> ofs) & mask;
++
++      if ((bit + size - 1) / 32 > i) {
++              u32 tmp;
++
++              tmp = readl(&base->word[word].data[i + 1]);
++              tmp &= mask >> (ofs ? (32 - ofs) : 0);
++              val |= tmp << (ofs ? (32 - ofs) : 0);
++      }
++
++      return val;
++}
++
++/*
++ * The V4L2 spec defines packed RGB formats in memory byte order, which from
++ * point of view of the IPU corresponds to little-endian words with the first
++ * component in the least significant bits.
++ * The DRM pixel formats and IPU internal representation are ordered the other
++ * way around, with the first named component ordered at the most significant
++ * bits. Further, V4L2 formats are not well defined:
++ *     https://linuxtv.org/downloads/v4l-dvb-apis/packed-rgb.html
++ * We choose the interpretation which matches GStreamer behavior.
++ */
++static int v4l2_pix_fmt_to_drm_fourcc(u32 pixelformat)
++{
++      switch (pixelformat) {
++      case V4L2_PIX_FMT_RGB565:
++              /*
++               * Here we choose the 'corrected' interpretation of RGBP, a
++               * little-endian 16-bit word with the red component at the most
++               * significant bits:
++               * g[2:0]b[4:0] r[4:0]g[5:3] <=> [16:0] R:G:B
++               */
++              return DRM_FORMAT_RGB565;
++      case V4L2_PIX_FMT_BGR24:
++              /* B G R <=> [24:0] R:G:B */
++              return DRM_FORMAT_RGB888;
++      case V4L2_PIX_FMT_RGB24:
++              /* R G B <=> [24:0] B:G:R */
++              return DRM_FORMAT_BGR888;
++      case V4L2_PIX_FMT_BGR32:
++              /* B G R A <=> [32:0] A:B:G:R */
++              return DRM_FORMAT_XRGB8888;
++      case V4L2_PIX_FMT_RGB32:
++              /* R G B A <=> [32:0] A:B:G:R */
++              return DRM_FORMAT_XBGR8888;
++      case V4L2_PIX_FMT_ABGR32:
++              /* B G R A <=> [32:0] A:R:G:B */
++              return DRM_FORMAT_ARGB8888;
++      case V4L2_PIX_FMT_XBGR32:
++              /* B G R X <=> [32:0] X:R:G:B */
++              return DRM_FORMAT_XRGB8888;
++      case V4L2_PIX_FMT_BGRA32:
++              /* A B G R <=> [32:0] R:G:B:A */
++              return DRM_FORMAT_RGBA8888;
++      case V4L2_PIX_FMT_BGRX32:
++              /* X B G R <=> [32:0] R:G:B:X */
++              return DRM_FORMAT_RGBX8888;
++      case V4L2_PIX_FMT_RGBA32:
++              /* R G B A <=> [32:0] A:B:G:R */
++              return DRM_FORMAT_ABGR8888;
++      case V4L2_PIX_FMT_RGBX32:
++              /* R G B X <=> [32:0] X:B:G:R */
++              return DRM_FORMAT_XBGR8888;
++      case V4L2_PIX_FMT_ARGB32:
++              /* A R G B <=> [32:0] B:G:R:A */
++              return DRM_FORMAT_BGRA8888;
++      case V4L2_PIX_FMT_XRGB32:
++              /* X R G B <=> [32:0] B:G:R:X */
++              return DRM_FORMAT_BGRX8888;
++      case V4L2_PIX_FMT_UYVY:
++              return DRM_FORMAT_UYVY;
++      case V4L2_PIX_FMT_YUYV:
++              return DRM_FORMAT_YUYV;
++      case V4L2_PIX_FMT_YUV420:
++              return DRM_FORMAT_YUV420;
++      case V4L2_PIX_FMT_YUV422P:
++              return DRM_FORMAT_YUV422;
++      case V4L2_PIX_FMT_YVU420:
++              return DRM_FORMAT_YVU420;
++      case V4L2_PIX_FMT_NV12:
++              return DRM_FORMAT_NV12;
++      case V4L2_PIX_FMT_NV16:
++              return DRM_FORMAT_NV16;
++      }
++
++      return -EINVAL;
++}
++
++void ipu_cpmem_zero(struct ipuv3_channel *ch)
++{
++      struct ipu_ch_param __iomem *p = ipu_get_cpmem(ch);
++      void __iomem *base = p;
++      int i;
++
++      for (i = 0; i < sizeof(*p) / sizeof(u32); i++)
++              writel(0, base + i * sizeof(u32));
++}
++EXPORT_SYMBOL_GPL(ipu_cpmem_zero);
++
++void ipu_cpmem_set_resolution(struct ipuv3_channel *ch, int xres, int yres)
++{
++      ipu_ch_param_write_field(ch, IPU_FIELD_FW, xres - 1);
++      ipu_ch_param_write_field(ch, IPU_FIELD_FH, yres - 1);
++}
++EXPORT_SYMBOL_GPL(ipu_cpmem_set_resolution);
++
++void ipu_cpmem_skip_odd_chroma_rows(struct ipuv3_channel *ch)
++{
++      ipu_ch_param_write_field(ch, IPU_FIELD_RDRW, 1);
++}
++EXPORT_SYMBOL_GPL(ipu_cpmem_skip_odd_chroma_rows);
++
++void ipu_cpmem_set_stride(struct ipuv3_channel *ch, int stride)
++{
++      ipu_ch_param_write_field(ch, IPU_FIELD_SLY, stride - 1);
++}
++EXPORT_SYMBOL_GPL(ipu_cpmem_set_stride);
++
++void ipu_cpmem_set_high_priority(struct ipuv3_channel *ch)
++{
++      struct ipu_soc *ipu = ch->ipu;
++      u32 val;
++
++      if (ipu->ipu_type == IPUV3EX)
++              ipu_ch_param_write_field(ch, IPU_FIELD_ID, 1);
++
++      val = ipu_idmac_read(ipu, IDMAC_CHA_PRI(ch->num));
++      val |= 1 << (ch->num % 32);
++      ipu_idmac_write(ipu, val, IDMAC_CHA_PRI(ch->num));
++};
++EXPORT_SYMBOL_GPL(ipu_cpmem_set_high_priority);
++
++void ipu_cpmem_set_buffer(struct ipuv3_channel *ch, int bufnum, dma_addr_t buf)
++{
++      WARN_ON_ONCE(buf & 0x7);
++
++      if (bufnum)
++              ipu_ch_param_write_field(ch, IPU_FIELD_EBA1, buf >> 3);
++      else
++              ipu_ch_param_write_field(ch, IPU_FIELD_EBA0, buf >> 3);
++}
++EXPORT_SYMBOL_GPL(ipu_cpmem_set_buffer);
++
++void ipu_cpmem_set_uv_offset(struct ipuv3_channel *ch, u32 u_off, u32 v_off)
++{
++      WARN_ON_ONCE((u_off & 0x7) || (v_off & 0x7));
++
++      ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_off / 8);
++      ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_off / 8);
++}
++EXPORT_SYMBOL_GPL(ipu_cpmem_set_uv_offset);
++
++void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride,
++                             u32 pixelformat)
++{
++      u32 ilo, sly, sluv;
++
++      if (stride < 0) {
++              stride = -stride;
++              ilo = 0x100000 - (stride / 8);
++      } else {
++              ilo = stride / 8;
++      }
++
++      sly = (stride * 2) - 1;
++
++      switch (pixelformat) {
++      case V4L2_PIX_FMT_YUV420:
++      case V4L2_PIX_FMT_YVU420:
++              sluv = stride / 2 - 1;
++              break;
++      case V4L2_PIX_FMT_NV12:
++              sluv = stride - 1;
++              break;
++      case V4L2_PIX_FMT_YUV422P:
++              sluv = stride - 1;
++              break;
++      case V4L2_PIX_FMT_NV16:
++              sluv = stride * 2 - 1;
++              break;
++      default:
++              sluv = 0;
++              break;
++      }
++
++      ipu_ch_param_write_field(ch, IPU_FIELD_SO, 1);
++      ipu_ch_param_write_field(ch, IPU_FIELD_ILO, ilo);
++      ipu_ch_param_write_field(ch, IPU_FIELD_SLY, sly);
++      if (sluv)
++              ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, sluv);
++};
++EXPORT_SYMBOL_GPL(ipu_cpmem_interlaced_scan);
++
++void ipu_cpmem_set_axi_id(struct ipuv3_channel *ch, u32 id)
++{
++      id &= 0x3;
++      ipu_ch_param_write_field(ch, IPU_FIELD_ID, id);
++}
++EXPORT_SYMBOL_GPL(ipu_cpmem_set_axi_id);
++
++int ipu_cpmem_get_burstsize(struct ipuv3_channel *ch)
++{
++      return ipu_ch_param_read_field(ch, IPU_FIELD_NPB) + 1;
++}
++EXPORT_SYMBOL_GPL(ipu_cpmem_get_burstsize);
++
++void ipu_cpmem_set_burstsize(struct ipuv3_channel *ch, int burstsize)
++{
++      ipu_ch_param_write_field(ch, IPU_FIELD_NPB, burstsize - 1);
++};
++EXPORT_SYMBOL_GPL(ipu_cpmem_set_burstsize);
++
++void ipu_cpmem_set_block_mode(struct ipuv3_channel *ch)
++{
++      ipu_ch_param_write_field(ch, IPU_FIELD_BM, 1);
++}
++EXPORT_SYMBOL_GPL(ipu_cpmem_set_block_mode);
++
++void ipu_cpmem_set_rotation(struct ipuv3_channel *ch,
++                          enum ipu_rotate_mode rot)
++{
++      u32 temp_rot = bitrev8(rot) >> 5;
++
++      ipu_ch_param_write_field(ch, IPU_FIELD_ROT_HF_VF, temp_rot);
++}
++EXPORT_SYMBOL_GPL(ipu_cpmem_set_rotation);
++
++int ipu_cpmem_set_format_rgb(struct ipuv3_channel *ch,
++                           const struct ipu_rgb *rgb)
++{
++      int bpp = 0, npb = 0, ro, go, bo, to;
++
++      ro = rgb->bits_per_pixel - rgb->red.length - rgb->red.offset;
++      go = rgb->bits_per_pixel - rgb->green.length - rgb->green.offset;
++      bo = rgb->bits_per_pixel - rgb->blue.length - rgb->blue.offset;
++      to = rgb->bits_per_pixel - rgb->transp.length - rgb->transp.offset;
++
++      ipu_ch_param_write_field(ch, IPU_FIELD_WID0, rgb->red.length - 1);
++      ipu_ch_param_write_field(ch, IPU_FIELD_OFS0, ro);
++      ipu_ch_param_write_field(ch, IPU_FIELD_WID1, rgb->green.length - 1);
++      ipu_ch_param_write_field(ch, IPU_FIELD_OFS1, go);
++      ipu_ch_param_write_field(ch, IPU_FIELD_WID2, rgb->blue.length - 1);
++      ipu_ch_param_write_field(ch, IPU_FIELD_OFS2, bo);
++
++      if (rgb->transp.length) {
++              ipu_ch_param_write_field(ch, IPU_FIELD_WID3,
++                              rgb->transp.length - 1);
++              ipu_ch_param_write_field(ch, IPU_FIELD_OFS3, to);
++      } else {
++              ipu_ch_param_write_field(ch, IPU_FIELD_WID3, 7);
++              ipu_ch_param_write_field(ch, IPU_FIELD_OFS3,
++                              rgb->bits_per_pixel);
++      }
++
++      switch (rgb->bits_per_pixel) {
++      case 32:
++              bpp = 0;
++              npb = 15;
++              break;
++      case 24:
++              bpp = 1;
++              npb = 19;
++              break;
++      case 16:
++              bpp = 3;
++              npb = 31;
++              break;
++      case 8:
++              bpp = 5;
++              npb = 63;
++              break;
++      default:
++              return -EINVAL;
++      }
++      ipu_ch_param_write_field(ch, IPU_FIELD_BPP, bpp);
++      ipu_ch_param_write_field(ch, IPU_FIELD_NPB, npb);
++      ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 7); /* rgb mode */
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_cpmem_set_format_rgb);
++
++int ipu_cpmem_set_format_passthrough(struct ipuv3_channel *ch, int width)
++{
++      int bpp = 0, npb = 0;
++
++      switch (width) {
++      case 32:
++              bpp = 0;
++              npb = 15;
++              break;
++      case 24:
++              bpp = 1;
++              npb = 19;
++              break;
++      case 16:
++              bpp = 3;
++              npb = 31;
++              break;
++      case 8:
++              bpp = 5;
++              npb = 63;
++              break;
++      default:
++              return -EINVAL;
++      }
++
++      ipu_ch_param_write_field(ch, IPU_FIELD_BPP, bpp);
++      ipu_ch_param_write_field(ch, IPU_FIELD_NPB, npb);
++      ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 6); /* raw mode */
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_cpmem_set_format_passthrough);
++
++void ipu_cpmem_set_yuv_interleaved(struct ipuv3_channel *ch, u32 pixel_format)
++{
++      switch (pixel_format) {
++      case V4L2_PIX_FMT_UYVY:
++              ipu_ch_param_write_field(ch, IPU_FIELD_BPP, 3); /* bits/pixel */
++              ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0xA);/* pix fmt */
++              ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);/* burst size */
++              break;
++      case V4L2_PIX_FMT_YUYV:
++              ipu_ch_param_write_field(ch, IPU_FIELD_BPP, 3); /* bits/pixel */
++              ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0x8);/* pix fmt */
++              ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);/* burst size */
++              break;
++      }
++}
++EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_interleaved);
++
++void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
++                                 unsigned int uv_stride,
++                                 unsigned int u_offset, unsigned int v_offset)
++{
++      WARN_ON_ONCE((u_offset & 0x7) || (v_offset & 0x7));
++
++      ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, uv_stride - 1);
++      ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
++      ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_offset / 8);
++}
++EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar_full);
++
++static const struct ipu_rgb def_xrgb_32 = {
++      .red    = { .offset = 16, .length = 8, },
++      .green  = { .offset =  8, .length = 8, },
++      .blue   = { .offset =  0, .length = 8, },
++      .transp = { .offset = 24, .length = 8, },
++      .bits_per_pixel = 32,
++};
++
++static const struct ipu_rgb def_xbgr_32 = {
++      .red    = { .offset =  0, .length = 8, },
++      .green  = { .offset =  8, .length = 8, },
++      .blue   = { .offset = 16, .length = 8, },
++      .transp = { .offset = 24, .length = 8, },
++      .bits_per_pixel = 32,
++};
++
++static const struct ipu_rgb def_rgbx_32 = {
++      .red    = { .offset = 24, .length = 8, },
++      .green  = { .offset = 16, .length = 8, },
++      .blue   = { .offset =  8, .length = 8, },
++      .transp = { .offset =  0, .length = 8, },
++      .bits_per_pixel = 32,
++};
++
++static const struct ipu_rgb def_bgrx_32 = {
++      .red    = { .offset =  8, .length = 8, },
++      .green  = { .offset = 16, .length = 8, },
++      .blue   = { .offset = 24, .length = 8, },
++      .transp = { .offset =  0, .length = 8, },
++      .bits_per_pixel = 32,
++};
++
++static const struct ipu_rgb def_rgb_24 = {
++      .red    = { .offset = 16, .length = 8, },
++      .green  = { .offset =  8, .length = 8, },
++      .blue   = { .offset =  0, .length = 8, },
++      .transp = { .offset =  0, .length = 0, },
++      .bits_per_pixel = 24,
++};
++
++static const struct ipu_rgb def_bgr_24 = {
++      .red    = { .offset =  0, .length = 8, },
++      .green  = { .offset =  8, .length = 8, },
++      .blue   = { .offset = 16, .length = 8, },
++      .transp = { .offset =  0, .length = 0, },
++      .bits_per_pixel = 24,
++};
++
++static const struct ipu_rgb def_rgb_16 = {
++      .red    = { .offset = 11, .length = 5, },
++      .green  = { .offset =  5, .length = 6, },
++      .blue   = { .offset =  0, .length = 5, },
++      .transp = { .offset =  0, .length = 0, },
++      .bits_per_pixel = 16,
++};
++
++static const struct ipu_rgb def_bgr_16 = {
++      .red    = { .offset =  0, .length = 5, },
++      .green  = { .offset =  5, .length = 6, },
++      .blue   = { .offset = 11, .length = 5, },
++      .transp = { .offset =  0, .length = 0, },
++      .bits_per_pixel = 16,
++};
++
++static const struct ipu_rgb def_argb_16 = {
++      .red    = { .offset = 10, .length = 5, },
++      .green  = { .offset =  5, .length = 5, },
++      .blue   = { .offset =  0, .length = 5, },
++      .transp = { .offset = 15, .length = 1, },
++      .bits_per_pixel = 16,
++};
++
++static const struct ipu_rgb def_argb_16_4444 = {
++      .red    = { .offset =  8, .length = 4, },
++      .green  = { .offset =  4, .length = 4, },
++      .blue   = { .offset =  0, .length = 4, },
++      .transp = { .offset = 12, .length = 4, },
++      .bits_per_pixel = 16,
++};
++
++static const struct ipu_rgb def_abgr_16 = {
++      .red    = { .offset =  0, .length = 5, },
++      .green  = { .offset =  5, .length = 5, },
++      .blue   = { .offset = 10, .length = 5, },
++      .transp = { .offset = 15, .length = 1, },
++      .bits_per_pixel = 16,
++};
++
++static const struct ipu_rgb def_rgba_16 = {
++      .red    = { .offset = 11, .length = 5, },
++      .green  = { .offset =  6, .length = 5, },
++      .blue   = { .offset =  1, .length = 5, },
++      .transp = { .offset =  0, .length = 1, },
++      .bits_per_pixel = 16,
++};
++
++static const struct ipu_rgb def_bgra_16 = {
++      .red    = { .offset =  1, .length = 5, },
++      .green  = { .offset =  6, .length = 5, },
++      .blue   = { .offset = 11, .length = 5, },
++      .transp = { .offset =  0, .length = 1, },
++      .bits_per_pixel = 16,
++};
++
++#define Y_OFFSET(pix, x, y)   ((x) + pix->width * (y))
++#define U_OFFSET(pix, x, y)   ((pix->width * pix->height) +           \
++                               (pix->width * ((y) / 2) / 2) + (x) / 2)
++#define V_OFFSET(pix, x, y)   ((pix->width * pix->height) +           \
++                               (pix->width * pix->height / 4) +       \
++                               (pix->width * ((y) / 2) / 2) + (x) / 2)
++#define U2_OFFSET(pix, x, y)  ((pix->width * pix->height) +           \
++                               (pix->width * (y) / 2) + (x) / 2)
++#define V2_OFFSET(pix, x, y)  ((pix->width * pix->height) +           \
++                               (pix->width * pix->height / 2) +       \
++                               (pix->width * (y) / 2) + (x) / 2)
++#define UV_OFFSET(pix, x, y)  ((pix->width * pix->height) +   \
++                               (pix->width * ((y) / 2)) + (x))
++#define UV2_OFFSET(pix, x, y) ((pix->width * pix->height) +   \
++                               (pix->width * y) + (x))
++
++#define NUM_ALPHA_CHANNELS    7
++
++/* See Table 37-12. Alpha channels mapping. */
++static int ipu_channel_albm(int ch_num)
++{
++      switch (ch_num) {
++      case IPUV3_CHANNEL_G_MEM_IC_PRP_VF:     return 0;
++      case IPUV3_CHANNEL_G_MEM_IC_PP:         return 1;
++      case IPUV3_CHANNEL_MEM_FG_SYNC:         return 2;
++      case IPUV3_CHANNEL_MEM_FG_ASYNC:        return 3;
++      case IPUV3_CHANNEL_MEM_BG_SYNC:         return 4;
++      case IPUV3_CHANNEL_MEM_BG_ASYNC:        return 5;
++      case IPUV3_CHANNEL_MEM_VDI_PLANE1_COMB: return 6;
++      default:
++              return -EINVAL;
++      }
++}
++
++static void ipu_cpmem_set_separate_alpha(struct ipuv3_channel *ch)
++{
++      struct ipu_soc *ipu = ch->ipu;
++      int albm;
++      u32 val;
++
++      albm = ipu_channel_albm(ch->num);
++      if (albm < 0)
++              return;
++
++      ipu_ch_param_write_field(ch, IPU_FIELD_ALU, 1);
++      ipu_ch_param_write_field(ch, IPU_FIELD_ALBM, albm);
++      ipu_ch_param_write_field(ch, IPU_FIELD_CRE, 1);
++
++      val = ipu_idmac_read(ipu, IDMAC_SEP_ALPHA);
++      val |= BIT(ch->num);
++      ipu_idmac_write(ipu, val, IDMAC_SEP_ALPHA);
++}
++
++int ipu_cpmem_set_fmt(struct ipuv3_channel *ch, u32 drm_fourcc)
++{
++      switch (drm_fourcc) {
++      case DRM_FORMAT_YUV420:
++      case DRM_FORMAT_YVU420:
++              /* pix format */
++              ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 2);
++              /* burst size */
++              ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
++              break;
++      case DRM_FORMAT_YUV422:
++      case DRM_FORMAT_YVU422:
++              /* pix format */
++              ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 1);
++              /* burst size */
++              ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
++              break;
++      case DRM_FORMAT_YUV444:
++      case DRM_FORMAT_YVU444:
++              /* pix format */
++              ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0);
++              /* burst size */
++              ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
++              break;
++      case DRM_FORMAT_NV12:
++              /* pix format */
++              ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 4);
++              /* burst size */
++              ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
++              break;
++      case DRM_FORMAT_NV16:
++              /* pix format */
++              ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 3);
++              /* burst size */
++              ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
++              break;
++      case DRM_FORMAT_UYVY:
++              /* bits/pixel */
++              ipu_ch_param_write_field(ch, IPU_FIELD_BPP, 3);
++              /* pix format */
++              ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0xA);
++              /* burst size */
++              ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
++              break;
++      case DRM_FORMAT_YUYV:
++              /* bits/pixel */
++              ipu_ch_param_write_field(ch, IPU_FIELD_BPP, 3);
++              /* pix format */
++              ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0x8);
++              /* burst size */
++              ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
++              break;
++      case DRM_FORMAT_ABGR8888:
++      case DRM_FORMAT_XBGR8888:
++              ipu_cpmem_set_format_rgb(ch, &def_xbgr_32);
++              break;
++      case DRM_FORMAT_ARGB8888:
++      case DRM_FORMAT_XRGB8888:
++              ipu_cpmem_set_format_rgb(ch, &def_xrgb_32);
++              break;
++      case DRM_FORMAT_RGBA8888:
++      case DRM_FORMAT_RGBX8888:
++      case DRM_FORMAT_RGBX8888_A8:
++              ipu_cpmem_set_format_rgb(ch, &def_rgbx_32);
++              break;
++      case DRM_FORMAT_BGRA8888:
++      case DRM_FORMAT_BGRX8888:
++      case DRM_FORMAT_BGRX8888_A8:
++              ipu_cpmem_set_format_rgb(ch, &def_bgrx_32);
++              break;
++      case DRM_FORMAT_BGR888:
++      case DRM_FORMAT_BGR888_A8:
++              ipu_cpmem_set_format_rgb(ch, &def_bgr_24);
++              break;
++      case DRM_FORMAT_RGB888:
++      case DRM_FORMAT_RGB888_A8:
++              ipu_cpmem_set_format_rgb(ch, &def_rgb_24);
++              break;
++      case DRM_FORMAT_RGB565:
++      case DRM_FORMAT_RGB565_A8:
++              ipu_cpmem_set_format_rgb(ch, &def_rgb_16);
++              break;
++      case DRM_FORMAT_BGR565:
++      case DRM_FORMAT_BGR565_A8:
++              ipu_cpmem_set_format_rgb(ch, &def_bgr_16);
++              break;
++      case DRM_FORMAT_ARGB1555:
++              ipu_cpmem_set_format_rgb(ch, &def_argb_16);
++              break;
++      case DRM_FORMAT_ABGR1555:
++              ipu_cpmem_set_format_rgb(ch, &def_abgr_16);
++              break;
++      case DRM_FORMAT_RGBA5551:
++              ipu_cpmem_set_format_rgb(ch, &def_rgba_16);
++              break;
++      case DRM_FORMAT_BGRA5551:
++              ipu_cpmem_set_format_rgb(ch, &def_bgra_16);
++              break;
++      case DRM_FORMAT_ARGB4444:
++              ipu_cpmem_set_format_rgb(ch, &def_argb_16_4444);
++              break;
++      default:
++              return -EINVAL;
++      }
++
++      switch (drm_fourcc) {
++      case DRM_FORMAT_RGB565_A8:
++      case DRM_FORMAT_BGR565_A8:
++      case DRM_FORMAT_RGB888_A8:
++      case DRM_FORMAT_BGR888_A8:
++      case DRM_FORMAT_RGBX8888_A8:
++      case DRM_FORMAT_BGRX8888_A8:
++              ipu_ch_param_write_field(ch, IPU_FIELD_WID3, 7);
++              ipu_cpmem_set_separate_alpha(ch);
++              break;
++      default:
++              break;
++      }
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_cpmem_set_fmt);
++
++int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
++{
++      struct v4l2_pix_format *pix = &image->pix;
++      int offset, u_offset, v_offset;
++      int ret = 0;
++
++      pr_debug("%s: resolution: %dx%d stride: %d\n",
++               __func__, pix->width, pix->height,
++               pix->bytesperline);
++
++      ipu_cpmem_set_resolution(ch, image->rect.width, image->rect.height);
++      ipu_cpmem_set_stride(ch, pix->bytesperline);
++
++      ipu_cpmem_set_fmt(ch, v4l2_pix_fmt_to_drm_fourcc(pix->pixelformat));
++
++      switch (pix->pixelformat) {
++      case V4L2_PIX_FMT_YUV420:
++              offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
++              u_offset = image->u_offset ?
++                      image->u_offset : U_OFFSET(pix, image->rect.left,
++                                                 image->rect.top) - offset;
++              v_offset = image->v_offset ?
++                      image->v_offset : V_OFFSET(pix, image->rect.left,
++                                                 image->rect.top) - offset;
++
++              ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
++                                            u_offset, v_offset);
++              break;
++      case V4L2_PIX_FMT_YVU420:
++              offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
++              u_offset = image->u_offset ?
++                      image->u_offset : V_OFFSET(pix, image->rect.left,
++                                                 image->rect.top) - offset;
++              v_offset = image->v_offset ?
++                      image->v_offset : U_OFFSET(pix, image->rect.left,
++                                                 image->rect.top) - offset;
++
++              ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
++                                            u_offset, v_offset);
++              break;
++      case V4L2_PIX_FMT_YUV422P:
++              offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
++              u_offset = image->u_offset ?
++                      image->u_offset : U2_OFFSET(pix, image->rect.left,
++                                                  image->rect.top) - offset;
++              v_offset = image->v_offset ?
++                      image->v_offset : V2_OFFSET(pix, image->rect.left,
++                                                  image->rect.top) - offset;
++
++              ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
++                                            u_offset, v_offset);
++              break;
++      case V4L2_PIX_FMT_NV12:
++              offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
++              u_offset = image->u_offset ?
++                      image->u_offset : UV_OFFSET(pix, image->rect.left,
++                                                  image->rect.top) - offset;
++              v_offset = image->v_offset ? image->v_offset : 0;
++
++              ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline,
++                                            u_offset, v_offset);
++              break;
++      case V4L2_PIX_FMT_NV16:
++              offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
++              u_offset = image->u_offset ?
++                      image->u_offset : UV2_OFFSET(pix, image->rect.left,
++                                                   image->rect.top) - offset;
++              v_offset = image->v_offset ? image->v_offset : 0;
++
++              ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline,
++                                            u_offset, v_offset);
++              break;
++      case V4L2_PIX_FMT_UYVY:
++      case V4L2_PIX_FMT_YUYV:
++      case V4L2_PIX_FMT_RGB565:
++              offset = image->rect.left * 2 +
++                      image->rect.top * pix->bytesperline;
++              break;
++      case V4L2_PIX_FMT_RGB32:
++      case V4L2_PIX_FMT_BGR32:
++      case V4L2_PIX_FMT_ABGR32:
++      case V4L2_PIX_FMT_XBGR32:
++      case V4L2_PIX_FMT_BGRA32:
++      case V4L2_PIX_FMT_BGRX32:
++      case V4L2_PIX_FMT_RGBA32:
++      case V4L2_PIX_FMT_RGBX32:
++      case V4L2_PIX_FMT_ARGB32:
++      case V4L2_PIX_FMT_XRGB32:
++              offset = image->rect.left * 4 +
++                      image->rect.top * pix->bytesperline;
++              break;
++      case V4L2_PIX_FMT_RGB24:
++      case V4L2_PIX_FMT_BGR24:
++              offset = image->rect.left * 3 +
++                      image->rect.top * pix->bytesperline;
++              break;
++      case V4L2_PIX_FMT_SBGGR8:
++      case V4L2_PIX_FMT_SGBRG8:
++      case V4L2_PIX_FMT_SGRBG8:
++      case V4L2_PIX_FMT_SRGGB8:
++      case V4L2_PIX_FMT_GREY:
++              offset = image->rect.left + image->rect.top * pix->bytesperline;
++              break;
++      case V4L2_PIX_FMT_SBGGR16:
++      case V4L2_PIX_FMT_SGBRG16:
++      case V4L2_PIX_FMT_SGRBG16:
++      case V4L2_PIX_FMT_SRGGB16:
++      case V4L2_PIX_FMT_Y16:
++              offset = image->rect.left * 2 +
++                       image->rect.top * pix->bytesperline;
++              break;
++      default:
++              /* This should not happen */
++              WARN_ON(1);
++              offset = 0;
++              ret = -EINVAL;
++      }
++
++      ipu_cpmem_set_buffer(ch, 0, image->phys0 + offset);
++      ipu_cpmem_set_buffer(ch, 1, image->phys1 + offset);
++
++      return ret;
++}
++EXPORT_SYMBOL_GPL(ipu_cpmem_set_image);
++
++void ipu_cpmem_dump(struct ipuv3_channel *ch)
++{
++      struct ipu_ch_param __iomem *p = ipu_get_cpmem(ch);
++      struct ipu_soc *ipu = ch->ipu;
++      int chno = ch->num;
++
++      dev_dbg(ipu->dev, "ch %d word 0 - %08X %08X %08X %08X %08X\n", chno,
++              readl(&p->word[0].data[0]),
++              readl(&p->word[0].data[1]),
++              readl(&p->word[0].data[2]),
++              readl(&p->word[0].data[3]),
++              readl(&p->word[0].data[4]));
++      dev_dbg(ipu->dev, "ch %d word 1 - %08X %08X %08X %08X %08X\n", chno,
++              readl(&p->word[1].data[0]),
++              readl(&p->word[1].data[1]),
++              readl(&p->word[1].data[2]),
++              readl(&p->word[1].data[3]),
++              readl(&p->word[1].data[4]));
++      dev_dbg(ipu->dev, "PFS 0x%x, ",
++               ipu_ch_param_read_field(ch, IPU_FIELD_PFS));
++      dev_dbg(ipu->dev, "BPP 0x%x, ",
++              ipu_ch_param_read_field(ch, IPU_FIELD_BPP));
++      dev_dbg(ipu->dev, "NPB 0x%x\n",
++               ipu_ch_param_read_field(ch, IPU_FIELD_NPB));
++
++      dev_dbg(ipu->dev, "FW %d, ",
++               ipu_ch_param_read_field(ch, IPU_FIELD_FW));
++      dev_dbg(ipu->dev, "FH %d, ",
++               ipu_ch_param_read_field(ch, IPU_FIELD_FH));
++      dev_dbg(ipu->dev, "EBA0 0x%x\n",
++               ipu_ch_param_read_field(ch, IPU_FIELD_EBA0) << 3);
++      dev_dbg(ipu->dev, "EBA1 0x%x\n",
++               ipu_ch_param_read_field(ch, IPU_FIELD_EBA1) << 3);
++      dev_dbg(ipu->dev, "Stride %d\n",
++               ipu_ch_param_read_field(ch, IPU_FIELD_SL));
++      dev_dbg(ipu->dev, "scan_order %d\n",
++               ipu_ch_param_read_field(ch, IPU_FIELD_SO));
++      dev_dbg(ipu->dev, "uv_stride %d\n",
++               ipu_ch_param_read_field(ch, IPU_FIELD_SLUV));
++      dev_dbg(ipu->dev, "u_offset 0x%x\n",
++               ipu_ch_param_read_field(ch, IPU_FIELD_UBO) << 3);
++      dev_dbg(ipu->dev, "v_offset 0x%x\n",
++               ipu_ch_param_read_field(ch, IPU_FIELD_VBO) << 3);
++
++      dev_dbg(ipu->dev, "Width0 %d+1, ",
++               ipu_ch_param_read_field(ch, IPU_FIELD_WID0));
++      dev_dbg(ipu->dev, "Width1 %d+1, ",
++               ipu_ch_param_read_field(ch, IPU_FIELD_WID1));
++      dev_dbg(ipu->dev, "Width2 %d+1, ",
++               ipu_ch_param_read_field(ch, IPU_FIELD_WID2));
++      dev_dbg(ipu->dev, "Width3 %d+1, ",
++               ipu_ch_param_read_field(ch, IPU_FIELD_WID3));
++      dev_dbg(ipu->dev, "Offset0 %d, ",
++               ipu_ch_param_read_field(ch, IPU_FIELD_OFS0));
++      dev_dbg(ipu->dev, "Offset1 %d, ",
++               ipu_ch_param_read_field(ch, IPU_FIELD_OFS1));
++      dev_dbg(ipu->dev, "Offset2 %d, ",
++               ipu_ch_param_read_field(ch, IPU_FIELD_OFS2));
++      dev_dbg(ipu->dev, "Offset3 %d\n",
++               ipu_ch_param_read_field(ch, IPU_FIELD_OFS3));
++}
++EXPORT_SYMBOL_GPL(ipu_cpmem_dump);
++
++int ipu_cpmem_init(struct ipu_soc *ipu, struct device *dev, unsigned long base)
++{
++      struct ipu_cpmem *cpmem;
++
++      cpmem = devm_kzalloc(dev, sizeof(*cpmem), GFP_KERNEL);
++      if (!cpmem)
++              return -ENOMEM;
++
++      ipu->cpmem_priv = cpmem;
++
++      spin_lock_init(&cpmem->lock);
++      cpmem->base = devm_ioremap(dev, base, SZ_128K);
++      if (!cpmem->base)
++              return -ENOMEM;
++
++      dev_dbg(dev, "CPMEM base: 0x%08lx remapped to %p\n",
++              base, cpmem->base);
++      cpmem->ipu = ipu;
++
++      return 0;
++}
++
++void ipu_cpmem_exit(struct ipu_soc *ipu)
++{
++}
+--- /dev/null
++++ b/drivers/gpu/imx/ipu-v3/ipu-csi.c
+@@ -0,0 +1,821 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * Copyright (C) 2012-2014 Mentor Graphics Inc.
++ * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
++ */
++#include <linux/export.h>
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/errno.h>
++#include <linux/delay.h>
++#include <linux/io.h>
++#include <linux/err.h>
++#include <linux/platform_device.h>
++#include <linux/videodev2.h>
++#include <uapi/linux/v4l2-mediabus.h>
++#include <linux/clk.h>
++#include <linux/clk-provider.h>
++#include <linux/clkdev.h>
++
++#include "ipu-prv.h"
++
++struct ipu_csi {
++      void __iomem *base;
++      int id;
++      u32 module;
++      struct clk *clk_ipu;    /* IPU bus clock */
++      spinlock_t lock;
++      bool inuse;
++      struct ipu_soc *ipu;
++};
++
++/* CSI Register Offsets */
++#define CSI_SENS_CONF         0x0000
++#define CSI_SENS_FRM_SIZE     0x0004
++#define CSI_ACT_FRM_SIZE      0x0008
++#define CSI_OUT_FRM_CTRL      0x000c
++#define CSI_TST_CTRL          0x0010
++#define CSI_CCIR_CODE_1               0x0014
++#define CSI_CCIR_CODE_2               0x0018
++#define CSI_CCIR_CODE_3               0x001c
++#define CSI_MIPI_DI           0x0020
++#define CSI_SKIP              0x0024
++#define CSI_CPD_CTRL          0x0028
++#define CSI_CPD_RC(n)         (0x002c + ((n)*4))
++#define CSI_CPD_RS(n)         (0x004c + ((n)*4))
++#define CSI_CPD_GRC(n)                (0x005c + ((n)*4))
++#define CSI_CPD_GRS(n)                (0x007c + ((n)*4))
++#define CSI_CPD_GBC(n)                (0x008c + ((n)*4))
++#define CSI_CPD_GBS(n)                (0x00Ac + ((n)*4))
++#define CSI_CPD_BC(n)         (0x00Bc + ((n)*4))
++#define CSI_CPD_BS(n)         (0x00Dc + ((n)*4))
++#define CSI_CPD_OFFSET1               0x00ec
++#define CSI_CPD_OFFSET2               0x00f0
++
++/* CSI Register Fields */
++#define CSI_SENS_CONF_DATA_FMT_SHIFT          8
++#define CSI_SENS_CONF_DATA_FMT_MASK           0x00000700
++#define CSI_SENS_CONF_DATA_FMT_RGB_YUV444     0L
++#define CSI_SENS_CONF_DATA_FMT_YUV422_YUYV    1L
++#define CSI_SENS_CONF_DATA_FMT_YUV422_UYVY    2L
++#define CSI_SENS_CONF_DATA_FMT_BAYER          3L
++#define CSI_SENS_CONF_DATA_FMT_RGB565         4L
++#define CSI_SENS_CONF_DATA_FMT_RGB555         5L
++#define CSI_SENS_CONF_DATA_FMT_RGB444         6L
++#define CSI_SENS_CONF_DATA_FMT_JPEG           7L
++
++#define CSI_SENS_CONF_VSYNC_POL_SHIFT         0
++#define CSI_SENS_CONF_HSYNC_POL_SHIFT         1
++#define CSI_SENS_CONF_DATA_POL_SHIFT          2
++#define CSI_SENS_CONF_PIX_CLK_POL_SHIFT               3
++#define CSI_SENS_CONF_SENS_PRTCL_MASK         0x00000070
++#define CSI_SENS_CONF_SENS_PRTCL_SHIFT                4
++#define CSI_SENS_CONF_PACK_TIGHT_SHIFT                7
++#define CSI_SENS_CONF_DATA_WIDTH_SHIFT                11
++#define CSI_SENS_CONF_EXT_VSYNC_SHIFT         15
++#define CSI_SENS_CONF_DIVRATIO_SHIFT          16
++
++#define CSI_SENS_CONF_DIVRATIO_MASK           0x00ff0000
++#define CSI_SENS_CONF_DATA_DEST_SHIFT         24
++#define CSI_SENS_CONF_DATA_DEST_MASK          0x07000000
++#define CSI_SENS_CONF_JPEG8_EN_SHIFT          27
++#define CSI_SENS_CONF_JPEG_EN_SHIFT           28
++#define CSI_SENS_CONF_FORCE_EOF_SHIFT         29
++#define CSI_SENS_CONF_DATA_EN_POL_SHIFT               31
++
++#define CSI_DATA_DEST_IC                      2
++#define CSI_DATA_DEST_IDMAC                   4
++
++#define CSI_CCIR_ERR_DET_EN                   0x01000000
++#define CSI_HORI_DOWNSIZE_EN                  0x80000000
++#define CSI_VERT_DOWNSIZE_EN                  0x40000000
++#define CSI_TEST_GEN_MODE_EN                  0x01000000
++
++#define CSI_HSC_MASK                          0x1fff0000
++#define CSI_HSC_SHIFT                         16
++#define CSI_VSC_MASK                          0x00000fff
++#define CSI_VSC_SHIFT                         0
++
++#define CSI_TEST_GEN_R_MASK                   0x000000ff
++#define CSI_TEST_GEN_R_SHIFT                  0
++#define CSI_TEST_GEN_G_MASK                   0x0000ff00
++#define CSI_TEST_GEN_G_SHIFT                  8
++#define CSI_TEST_GEN_B_MASK                   0x00ff0000
++#define CSI_TEST_GEN_B_SHIFT                  16
++
++#define CSI_MAX_RATIO_SKIP_SMFC_MASK          0x00000007
++#define CSI_MAX_RATIO_SKIP_SMFC_SHIFT         0
++#define CSI_SKIP_SMFC_MASK                    0x000000f8
++#define CSI_SKIP_SMFC_SHIFT                   3
++#define CSI_ID_2_SKIP_MASK                    0x00000300
++#define CSI_ID_2_SKIP_SHIFT                   8
++
++#define CSI_COLOR_FIRST_ROW_MASK              0x00000002
++#define CSI_COLOR_FIRST_COMP_MASK             0x00000001
++
++/* MIPI CSI-2 data types */
++#define MIPI_DT_YUV420                0x18 /* YYY.../UYVY.... */
++#define MIPI_DT_YUV420_LEGACY 0x1a /* UYY.../VYY...   */
++#define MIPI_DT_YUV422                0x1e /* UYVY...         */
++#define MIPI_DT_RGB444                0x20
++#define MIPI_DT_RGB555                0x21
++#define MIPI_DT_RGB565                0x22
++#define MIPI_DT_RGB666                0x23
++#define MIPI_DT_RGB888                0x24
++#define MIPI_DT_RAW6          0x28
++#define MIPI_DT_RAW7          0x29
++#define MIPI_DT_RAW8          0x2a
++#define MIPI_DT_RAW10         0x2b
++#define MIPI_DT_RAW12         0x2c
++#define MIPI_DT_RAW14         0x2d
++
++/*
++ * Bitfield of CSI bus signal polarities and modes.
++ */
++struct ipu_csi_bus_config {
++      unsigned data_width:4;
++      unsigned clk_mode:3;
++      unsigned ext_vsync:1;
++      unsigned vsync_pol:1;
++      unsigned hsync_pol:1;
++      unsigned pixclk_pol:1;
++      unsigned data_pol:1;
++      unsigned sens_clksrc:1;
++      unsigned pack_tight:1;
++      unsigned force_eof:1;
++      unsigned data_en_pol:1;
++
++      unsigned data_fmt;
++      unsigned mipi_dt;
++};
++
++/*
++ * Enumeration of CSI data bus widths.
++ */
++enum ipu_csi_data_width {
++      IPU_CSI_DATA_WIDTH_4   = 0,
++      IPU_CSI_DATA_WIDTH_8   = 1,
++      IPU_CSI_DATA_WIDTH_10  = 3,
++      IPU_CSI_DATA_WIDTH_12  = 5,
++      IPU_CSI_DATA_WIDTH_16  = 9,
++};
++
++/*
++ * Enumeration of CSI clock modes.
++ */
++enum ipu_csi_clk_mode {
++      IPU_CSI_CLK_MODE_GATED_CLK,
++      IPU_CSI_CLK_MODE_NONGATED_CLK,
++      IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE,
++      IPU_CSI_CLK_MODE_CCIR656_INTERLACED,
++      IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR,
++      IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR,
++      IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR,
++      IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR,
++};
++
++static inline u32 ipu_csi_read(struct ipu_csi *csi, unsigned offset)
++{
++      return readl(csi->base + offset);
++}
++
++static inline void ipu_csi_write(struct ipu_csi *csi, u32 value,
++                               unsigned offset)
++{
++      writel(value, csi->base + offset);
++}
++
++/*
++ * Set mclk division ratio for generating test mode mclk. Only used
++ * for test generator.
++ */
++static int ipu_csi_set_testgen_mclk(struct ipu_csi *csi, u32 pixel_clk,
++                                      u32 ipu_clk)
++{
++      u32 temp;
++      int div_ratio;
++
++      div_ratio = (ipu_clk / pixel_clk) - 1;
++
++      if (div_ratio > 0xFF || div_ratio < 0) {
++              dev_err(csi->ipu->dev,
++                      "value of pixel_clk extends normal range\n");
++              return -EINVAL;
++      }
++
++      temp = ipu_csi_read(csi, CSI_SENS_CONF);
++      temp &= ~CSI_SENS_CONF_DIVRATIO_MASK;
++      ipu_csi_write(csi, temp | (div_ratio << CSI_SENS_CONF_DIVRATIO_SHIFT),
++                        CSI_SENS_CONF);
++
++      return 0;
++}
++
++/*
++ * Find the CSI data format and data width for the given V4L2 media
++ * bus pixel format code.
++ */
++static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code,
++                              enum v4l2_mbus_type mbus_type)
++{
++      switch (mbus_code) {
++      case MEDIA_BUS_FMT_BGR565_2X8_BE:
++      case MEDIA_BUS_FMT_BGR565_2X8_LE:
++      case MEDIA_BUS_FMT_RGB565_2X8_BE:
++      case MEDIA_BUS_FMT_RGB565_2X8_LE:
++              if (mbus_type == V4L2_MBUS_CSI2_DPHY)
++                      cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB565;
++              else
++                      cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
++              cfg->mipi_dt = MIPI_DT_RGB565;
++              cfg->data_width = IPU_CSI_DATA_WIDTH_8;
++              break;
++      case MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE:
++      case MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE:
++              cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB444;
++              cfg->mipi_dt = MIPI_DT_RGB444;
++              cfg->data_width = IPU_CSI_DATA_WIDTH_8;
++              break;
++      case MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE:
++      case MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE:
++              cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB555;
++              cfg->mipi_dt = MIPI_DT_RGB555;
++              cfg->data_width = IPU_CSI_DATA_WIDTH_8;
++              break;
++      case MEDIA_BUS_FMT_RGB888_1X24:
++      case MEDIA_BUS_FMT_BGR888_1X24:
++              cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB_YUV444;
++              cfg->mipi_dt = MIPI_DT_RGB888;
++              cfg->data_width = IPU_CSI_DATA_WIDTH_8;
++              break;
++      case MEDIA_BUS_FMT_UYVY8_2X8:
++              cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_UYVY;
++              cfg->mipi_dt = MIPI_DT_YUV422;
++              cfg->data_width = IPU_CSI_DATA_WIDTH_8;
++              break;
++      case MEDIA_BUS_FMT_YUYV8_2X8:
++              cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_YUYV;
++              cfg->mipi_dt = MIPI_DT_YUV422;
++              cfg->data_width = IPU_CSI_DATA_WIDTH_8;
++              break;
++      case MEDIA_BUS_FMT_UYVY8_1X16:
++      case MEDIA_BUS_FMT_YUYV8_1X16:
++              cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
++              cfg->mipi_dt = MIPI_DT_YUV422;
++              cfg->data_width = IPU_CSI_DATA_WIDTH_16;
++              break;
++      case MEDIA_BUS_FMT_SBGGR8_1X8:
++      case MEDIA_BUS_FMT_SGBRG8_1X8:
++      case MEDIA_BUS_FMT_SGRBG8_1X8:
++      case MEDIA_BUS_FMT_SRGGB8_1X8:
++      case MEDIA_BUS_FMT_Y8_1X8:
++              cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
++              cfg->mipi_dt = MIPI_DT_RAW8;
++              cfg->data_width = IPU_CSI_DATA_WIDTH_8;
++              break;
++      case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8:
++      case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8:
++      case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
++      case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8:
++      case MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE:
++      case MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE:
++      case MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE:
++      case MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE:
++              cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
++              cfg->mipi_dt = MIPI_DT_RAW10;
++              cfg->data_width = IPU_CSI_DATA_WIDTH_8;
++              break;
++      case MEDIA_BUS_FMT_SBGGR10_1X10:
++      case MEDIA_BUS_FMT_SGBRG10_1X10:
++      case MEDIA_BUS_FMT_SGRBG10_1X10:
++      case MEDIA_BUS_FMT_SRGGB10_1X10:
++      case MEDIA_BUS_FMT_Y10_1X10:
++              cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
++              cfg->mipi_dt = MIPI_DT_RAW10;
++              cfg->data_width = IPU_CSI_DATA_WIDTH_10;
++              break;
++      case MEDIA_BUS_FMT_SBGGR12_1X12:
++      case MEDIA_BUS_FMT_SGBRG12_1X12:
++      case MEDIA_BUS_FMT_SGRBG12_1X12:
++      case MEDIA_BUS_FMT_SRGGB12_1X12:
++      case MEDIA_BUS_FMT_Y12_1X12:
++              cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
++              cfg->mipi_dt = MIPI_DT_RAW12;
++              cfg->data_width = IPU_CSI_DATA_WIDTH_12;
++              break;
++      case MEDIA_BUS_FMT_JPEG_1X8:
++              /* TODO */
++              cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_JPEG;
++              cfg->mipi_dt = MIPI_DT_RAW8;
++              cfg->data_width = IPU_CSI_DATA_WIDTH_8;
++              break;
++      default:
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++/* translate alternate field mode based on given standard */
++static inline enum v4l2_field
++ipu_csi_translate_field(enum v4l2_field field, v4l2_std_id std)
++{
++      return (field != V4L2_FIELD_ALTERNATE) ? field :
++              ((std & V4L2_STD_525_60) ?
++               V4L2_FIELD_SEQ_BT : V4L2_FIELD_SEQ_TB);
++}
++
++/*
++ * Fill a CSI bus config struct from mbus_config and mbus_framefmt.
++ */
++static int fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
++                          const struct v4l2_mbus_config *mbus_cfg,
++                          const struct v4l2_mbus_framefmt *mbus_fmt)
++{
++      int ret;
++
++      memset(csicfg, 0, sizeof(*csicfg));
++
++      ret = mbus_code_to_bus_cfg(csicfg, mbus_fmt->code, mbus_cfg->type);
++      if (ret < 0)
++              return ret;
++
++      switch (mbus_cfg->type) {
++      case V4L2_MBUS_PARALLEL:
++              csicfg->ext_vsync = 1;
++              csicfg->vsync_pol = (mbus_cfg->flags &
++                                   V4L2_MBUS_VSYNC_ACTIVE_LOW) ? 1 : 0;
++              csicfg->hsync_pol = (mbus_cfg->flags &
++                                   V4L2_MBUS_HSYNC_ACTIVE_LOW) ? 1 : 0;
++              csicfg->pixclk_pol = (mbus_cfg->flags &
++                                    V4L2_MBUS_PCLK_SAMPLE_FALLING) ? 1 : 0;
++              csicfg->clk_mode = IPU_CSI_CLK_MODE_GATED_CLK;
++              break;
++      case V4L2_MBUS_BT656:
++              csicfg->ext_vsync = 0;
++              if (V4L2_FIELD_HAS_BOTH(mbus_fmt->field) ||
++                  mbus_fmt->field == V4L2_FIELD_ALTERNATE)
++                      csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_INTERLACED;
++              else
++                      csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE;
++              break;
++      case V4L2_MBUS_CSI2_DPHY:
++              /*
++               * MIPI CSI-2 requires non gated clock mode, all other
++               * parameters are not applicable for MIPI CSI-2 bus.
++               */
++              csicfg->clk_mode = IPU_CSI_CLK_MODE_NONGATED_CLK;
++              break;
++      default:
++              /* will never get here, keep compiler quiet */
++              break;
++      }
++
++      return 0;
++}
++
++static int
++ipu_csi_set_bt_interlaced_codes(struct ipu_csi *csi,
++                              const struct v4l2_mbus_framefmt *infmt,
++                              const struct v4l2_mbus_framefmt *outfmt,
++                              v4l2_std_id std)
++{
++      enum v4l2_field infield, outfield;
++      bool swap_fields;
++
++      /* get translated field type of input and output */
++      infield = ipu_csi_translate_field(infmt->field, std);
++      outfield = ipu_csi_translate_field(outfmt->field, std);
++
++      /*
++       * Write the H-V-F codes the CSI will match against the
++       * incoming data for start/end of active and blanking
++       * field intervals. If input and output field types are
++       * sequential but not the same (one is SEQ_BT and the other
++       * is SEQ_TB), swap the F-bit so that the CSI will capture
++       * field 1 lines before field 0 lines.
++       */
++      swap_fields = (V4L2_FIELD_IS_SEQUENTIAL(infield) &&
++                     V4L2_FIELD_IS_SEQUENTIAL(outfield) &&
++                     infield != outfield);
++
++      if (!swap_fields) {
++              /*
++               * Field0BlankEnd  = 110, Field0BlankStart  = 010
++               * Field0ActiveEnd = 100, Field0ActiveStart = 000
++               * Field1BlankEnd  = 111, Field1BlankStart  = 011
++               * Field1ActiveEnd = 101, Field1ActiveStart = 001
++               */
++              ipu_csi_write(csi, 0x40596 | CSI_CCIR_ERR_DET_EN,
++                            CSI_CCIR_CODE_1);
++              ipu_csi_write(csi, 0xD07DF, CSI_CCIR_CODE_2);
++      } else {
++              dev_dbg(csi->ipu->dev, "capture field swap\n");
++
++              /* same as above but with F-bit inverted */
++              ipu_csi_write(csi, 0xD07DF | CSI_CCIR_ERR_DET_EN,
++                            CSI_CCIR_CODE_1);
++              ipu_csi_write(csi, 0x40596, CSI_CCIR_CODE_2);
++      }
++
++      ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3);
++
++      return 0;
++}
++
++
++int ipu_csi_init_interface(struct ipu_csi *csi,
++                         const struct v4l2_mbus_config *mbus_cfg,
++                         const struct v4l2_mbus_framefmt *infmt,
++                         const struct v4l2_mbus_framefmt *outfmt)
++{
++      struct ipu_csi_bus_config cfg;
++      unsigned long flags;
++      u32 width, height, data = 0;
++      v4l2_std_id std;
++      int ret;
++
++      ret = fill_csi_bus_cfg(&cfg, mbus_cfg, infmt);
++      if (ret < 0)
++              return ret;
++
++      /* set default sensor frame width and height */
++      width = infmt->width;
++      height = infmt->height;
++      if (infmt->field == V4L2_FIELD_ALTERNATE)
++              height *= 2;
++
++      /* Set the CSI_SENS_CONF register remaining fields */
++      data |= cfg.data_width << CSI_SENS_CONF_DATA_WIDTH_SHIFT |
++              cfg.data_fmt << CSI_SENS_CONF_DATA_FMT_SHIFT |
++              cfg.data_pol << CSI_SENS_CONF_DATA_POL_SHIFT |
++              cfg.vsync_pol << CSI_SENS_CONF_VSYNC_POL_SHIFT |
++              cfg.hsync_pol << CSI_SENS_CONF_HSYNC_POL_SHIFT |
++              cfg.pixclk_pol << CSI_SENS_CONF_PIX_CLK_POL_SHIFT |
++              cfg.ext_vsync << CSI_SENS_CONF_EXT_VSYNC_SHIFT |
++              cfg.clk_mode << CSI_SENS_CONF_SENS_PRTCL_SHIFT |
++              cfg.pack_tight << CSI_SENS_CONF_PACK_TIGHT_SHIFT |
++              cfg.force_eof << CSI_SENS_CONF_FORCE_EOF_SHIFT |
++              cfg.data_en_pol << CSI_SENS_CONF_DATA_EN_POL_SHIFT;
++
++      spin_lock_irqsave(&csi->lock, flags);
++
++      ipu_csi_write(csi, data, CSI_SENS_CONF);
++
++      /* Set CCIR registers */
++
++      switch (cfg.clk_mode) {
++      case IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE:
++              ipu_csi_write(csi, 0x40030, CSI_CCIR_CODE_1);
++              ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3);
++              break;
++      case IPU_CSI_CLK_MODE_CCIR656_INTERLACED:
++              if (width == 720 && height == 480) {
++                      std = V4L2_STD_NTSC;
++                      height = 525;
++              } else if (width == 720 && height == 576) {
++                      std = V4L2_STD_PAL;
++                      height = 625;
++              } else {
++                      dev_err(csi->ipu->dev,
++                              "Unsupported interlaced video mode\n");
++                      ret = -EINVAL;
++                      goto out_unlock;
++              }
++
++              ret = ipu_csi_set_bt_interlaced_codes(csi, infmt, outfmt, std);
++              if (ret)
++                      goto out_unlock;
++              break;
++      case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR:
++      case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR:
++      case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR:
++      case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR:
++              ipu_csi_write(csi, 0x40030 | CSI_CCIR_ERR_DET_EN,
++                                 CSI_CCIR_CODE_1);
++              ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3);
++              break;
++      case IPU_CSI_CLK_MODE_GATED_CLK:
++      case IPU_CSI_CLK_MODE_NONGATED_CLK:
++              ipu_csi_write(csi, 0, CSI_CCIR_CODE_1);
++              break;
++      }
++
++      /* Setup sensor frame size */
++      ipu_csi_write(csi, (width - 1) | ((height - 1) << 16),
++                    CSI_SENS_FRM_SIZE);
++
++      dev_dbg(csi->ipu->dev, "CSI_SENS_CONF = 0x%08X\n",
++              ipu_csi_read(csi, CSI_SENS_CONF));
++      dev_dbg(csi->ipu->dev, "CSI_ACT_FRM_SIZE = 0x%08X\n",
++              ipu_csi_read(csi, CSI_ACT_FRM_SIZE));
++
++out_unlock:
++      spin_unlock_irqrestore(&csi->lock, flags);
++
++      return ret;
++}
++EXPORT_SYMBOL_GPL(ipu_csi_init_interface);
++
++bool ipu_csi_is_interlaced(struct ipu_csi *csi)
++{
++      unsigned long flags;
++      u32 sensor_protocol;
++
++      spin_lock_irqsave(&csi->lock, flags);
++      sensor_protocol =
++              (ipu_csi_read(csi, CSI_SENS_CONF) &
++               CSI_SENS_CONF_SENS_PRTCL_MASK) >>
++              CSI_SENS_CONF_SENS_PRTCL_SHIFT;
++      spin_unlock_irqrestore(&csi->lock, flags);
++
++      switch (sensor_protocol) {
++      case IPU_CSI_CLK_MODE_GATED_CLK:
++      case IPU_CSI_CLK_MODE_NONGATED_CLK:
++      case IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE:
++      case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR:
++      case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR:
++              return false;
++      case IPU_CSI_CLK_MODE_CCIR656_INTERLACED:
++      case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR:
++      case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR:
++              return true;
++      default:
++              dev_err(csi->ipu->dev,
++                      "CSI %d sensor protocol unsupported\n", csi->id);
++              return false;
++      }
++}
++EXPORT_SYMBOL_GPL(ipu_csi_is_interlaced);
++
++void ipu_csi_get_window(struct ipu_csi *csi, struct v4l2_rect *w)
++{
++      unsigned long flags;
++      u32 reg;
++
++      spin_lock_irqsave(&csi->lock, flags);
++
++      reg = ipu_csi_read(csi, CSI_ACT_FRM_SIZE);
++      w->width = (reg & 0xFFFF) + 1;
++      w->height = (reg >> 16 & 0xFFFF) + 1;
++
++      reg = ipu_csi_read(csi, CSI_OUT_FRM_CTRL);
++      w->left = (reg & CSI_HSC_MASK) >> CSI_HSC_SHIFT;
++      w->top = (reg & CSI_VSC_MASK) >> CSI_VSC_SHIFT;
++
++      spin_unlock_irqrestore(&csi->lock, flags);
++}
++EXPORT_SYMBOL_GPL(ipu_csi_get_window);
++
++void ipu_csi_set_window(struct ipu_csi *csi, struct v4l2_rect *w)
++{
++      unsigned long flags;
++      u32 reg;
++
++      spin_lock_irqsave(&csi->lock, flags);
++
++      ipu_csi_write(csi, (w->width - 1) | ((w->height - 1) << 16),
++                        CSI_ACT_FRM_SIZE);
++
++      reg = ipu_csi_read(csi, CSI_OUT_FRM_CTRL);
++      reg &= ~(CSI_HSC_MASK | CSI_VSC_MASK);
++      reg |= ((w->top << CSI_VSC_SHIFT) | (w->left << CSI_HSC_SHIFT));
++      ipu_csi_write(csi, reg, CSI_OUT_FRM_CTRL);
++
++      spin_unlock_irqrestore(&csi->lock, flags);
++}
++EXPORT_SYMBOL_GPL(ipu_csi_set_window);
++
++void ipu_csi_set_downsize(struct ipu_csi *csi, bool horiz, bool vert)
++{
++      unsigned long flags;
++      u32 reg;
++
++      spin_lock_irqsave(&csi->lock, flags);
++
++      reg = ipu_csi_read(csi, CSI_OUT_FRM_CTRL);
++      reg &= ~(CSI_HORI_DOWNSIZE_EN | CSI_VERT_DOWNSIZE_EN);
++      reg |= (horiz ? CSI_HORI_DOWNSIZE_EN : 0) |
++             (vert ? CSI_VERT_DOWNSIZE_EN : 0);
++      ipu_csi_write(csi, reg, CSI_OUT_FRM_CTRL);
++
++      spin_unlock_irqrestore(&csi->lock, flags);
++}
++EXPORT_SYMBOL_GPL(ipu_csi_set_downsize);
++
++void ipu_csi_set_test_generator(struct ipu_csi *csi, bool active,
++                              u32 r_value, u32 g_value, u32 b_value,
++                              u32 pix_clk)
++{
++      unsigned long flags;
++      u32 ipu_clk = clk_get_rate(csi->clk_ipu);
++      u32 temp;
++
++      spin_lock_irqsave(&csi->lock, flags);
++
++      temp = ipu_csi_read(csi, CSI_TST_CTRL);
++
++      if (!active) {
++              temp &= ~CSI_TEST_GEN_MODE_EN;
++              ipu_csi_write(csi, temp, CSI_TST_CTRL);
++      } else {
++              /* Set sensb_mclk div_ratio */
++              ipu_csi_set_testgen_mclk(csi, pix_clk, ipu_clk);
++
++              temp &= ~(CSI_TEST_GEN_R_MASK | CSI_TEST_GEN_G_MASK |
++                        CSI_TEST_GEN_B_MASK);
++              temp |= CSI_TEST_GEN_MODE_EN;
++              temp |= (r_value << CSI_TEST_GEN_R_SHIFT) |
++                      (g_value << CSI_TEST_GEN_G_SHIFT) |
++                      (b_value << CSI_TEST_GEN_B_SHIFT);
++              ipu_csi_write(csi, temp, CSI_TST_CTRL);
++      }
++
++      spin_unlock_irqrestore(&csi->lock, flags);
++}
++EXPORT_SYMBOL_GPL(ipu_csi_set_test_generator);
++
++int ipu_csi_set_mipi_datatype(struct ipu_csi *csi, u32 vc,
++                            struct v4l2_mbus_framefmt *mbus_fmt)
++{
++      struct ipu_csi_bus_config cfg;
++      unsigned long flags;
++      u32 temp;
++      int ret;
++
++      if (vc > 3)
++              return -EINVAL;
++
++      ret = mbus_code_to_bus_cfg(&cfg, mbus_fmt->code, V4L2_MBUS_CSI2_DPHY);
++      if (ret < 0)
++              return ret;
++
++      spin_lock_irqsave(&csi->lock, flags);
++
++      temp = ipu_csi_read(csi, CSI_MIPI_DI);
++      temp &= ~(0xff << (vc * 8));
++      temp |= (cfg.mipi_dt << (vc * 8));
++      ipu_csi_write(csi, temp, CSI_MIPI_DI);
++
++      spin_unlock_irqrestore(&csi->lock, flags);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_csi_set_mipi_datatype);
++
++int ipu_csi_set_skip_smfc(struct ipu_csi *csi, u32 skip,
++                        u32 max_ratio, u32 id)
++{
++      unsigned long flags;
++      u32 temp;
++
++      if (max_ratio > 5 || id > 3)
++              return -EINVAL;
++
++      spin_lock_irqsave(&csi->lock, flags);
++
++      temp = ipu_csi_read(csi, CSI_SKIP);
++      temp &= ~(CSI_MAX_RATIO_SKIP_SMFC_MASK | CSI_ID_2_SKIP_MASK |
++                CSI_SKIP_SMFC_MASK);
++      temp |= (max_ratio << CSI_MAX_RATIO_SKIP_SMFC_SHIFT) |
++              (id << CSI_ID_2_SKIP_SHIFT) |
++              (skip << CSI_SKIP_SMFC_SHIFT);
++      ipu_csi_write(csi, temp, CSI_SKIP);
++
++      spin_unlock_irqrestore(&csi->lock, flags);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_csi_set_skip_smfc);
++
++int ipu_csi_set_dest(struct ipu_csi *csi, enum ipu_csi_dest csi_dest)
++{
++      unsigned long flags;
++      u32 csi_sens_conf, dest;
++
++      if (csi_dest == IPU_CSI_DEST_IDMAC)
++              dest = CSI_DATA_DEST_IDMAC;
++      else
++              dest = CSI_DATA_DEST_IC; /* IC or VDIC */
++
++      spin_lock_irqsave(&csi->lock, flags);
++
++      csi_sens_conf = ipu_csi_read(csi, CSI_SENS_CONF);
++      csi_sens_conf &= ~CSI_SENS_CONF_DATA_DEST_MASK;
++      csi_sens_conf |= (dest << CSI_SENS_CONF_DATA_DEST_SHIFT);
++      ipu_csi_write(csi, csi_sens_conf, CSI_SENS_CONF);
++
++      spin_unlock_irqrestore(&csi->lock, flags);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_csi_set_dest);
++
++int ipu_csi_enable(struct ipu_csi *csi)
++{
++      ipu_module_enable(csi->ipu, csi->module);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_csi_enable);
++
++int ipu_csi_disable(struct ipu_csi *csi)
++{
++      ipu_module_disable(csi->ipu, csi->module);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_csi_disable);
++
++struct ipu_csi *ipu_csi_get(struct ipu_soc *ipu, int id)
++{
++      unsigned long flags;
++      struct ipu_csi *csi, *ret;
++
++      if (id > 1)
++              return ERR_PTR(-EINVAL);
++
++      csi = ipu->csi_priv[id];
++      ret = csi;
++
++      spin_lock_irqsave(&csi->lock, flags);
++
++      if (csi->inuse) {
++              ret = ERR_PTR(-EBUSY);
++              goto unlock;
++      }
++
++      csi->inuse = true;
++unlock:
++      spin_unlock_irqrestore(&csi->lock, flags);
++      return ret;
++}
++EXPORT_SYMBOL_GPL(ipu_csi_get);
++
++void ipu_csi_put(struct ipu_csi *csi)
++{
++      unsigned long flags;
++
++      spin_lock_irqsave(&csi->lock, flags);
++      csi->inuse = false;
++      spin_unlock_irqrestore(&csi->lock, flags);
++}
++EXPORT_SYMBOL_GPL(ipu_csi_put);
++
++int ipu_csi_init(struct ipu_soc *ipu, struct device *dev, int id,
++               unsigned long base, u32 module, struct clk *clk_ipu)
++{
++      struct ipu_csi *csi;
++
++      if (id > 1)
++              return -ENODEV;
++
++      csi = devm_kzalloc(dev, sizeof(*csi), GFP_KERNEL);
++      if (!csi)
++              return -ENOMEM;
++
++      ipu->csi_priv[id] = csi;
++
++      spin_lock_init(&csi->lock);
++      csi->module = module;
++      csi->id = id;
++      csi->clk_ipu = clk_ipu;
++      csi->base = devm_ioremap(dev, base, PAGE_SIZE);
++      if (!csi->base)
++              return -ENOMEM;
++
++      dev_dbg(dev, "CSI%d base: 0x%08lx remapped to %p\n",
++              id, base, csi->base);
++      csi->ipu = ipu;
++
++      return 0;
++}
++
++void ipu_csi_exit(struct ipu_soc *ipu, int id)
++{
++}
++
++void ipu_csi_dump(struct ipu_csi *csi)
++{
++      dev_dbg(csi->ipu->dev, "CSI_SENS_CONF:     %08x\n",
++              ipu_csi_read(csi, CSI_SENS_CONF));
++      dev_dbg(csi->ipu->dev, "CSI_SENS_FRM_SIZE: %08x\n",
++              ipu_csi_read(csi, CSI_SENS_FRM_SIZE));
++      dev_dbg(csi->ipu->dev, "CSI_ACT_FRM_SIZE:  %08x\n",
++              ipu_csi_read(csi, CSI_ACT_FRM_SIZE));
++      dev_dbg(csi->ipu->dev, "CSI_OUT_FRM_CTRL:  %08x\n",
++              ipu_csi_read(csi, CSI_OUT_FRM_CTRL));
++      dev_dbg(csi->ipu->dev, "CSI_TST_CTRL:      %08x\n",
++              ipu_csi_read(csi, CSI_TST_CTRL));
++      dev_dbg(csi->ipu->dev, "CSI_CCIR_CODE_1:   %08x\n",
++              ipu_csi_read(csi, CSI_CCIR_CODE_1));
++      dev_dbg(csi->ipu->dev, "CSI_CCIR_CODE_2:   %08x\n",
++              ipu_csi_read(csi, CSI_CCIR_CODE_2));
++      dev_dbg(csi->ipu->dev, "CSI_CCIR_CODE_3:   %08x\n",
++              ipu_csi_read(csi, CSI_CCIR_CODE_3));
++      dev_dbg(csi->ipu->dev, "CSI_MIPI_DI:       %08x\n",
++              ipu_csi_read(csi, CSI_MIPI_DI));
++      dev_dbg(csi->ipu->dev, "CSI_SKIP:          %08x\n",
++              ipu_csi_read(csi, CSI_SKIP));
++}
++EXPORT_SYMBOL_GPL(ipu_csi_dump);
+--- /dev/null
++++ b/drivers/gpu/imx/ipu-v3/ipu-dc.c
+@@ -0,0 +1,420 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
++ * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
++ */
++
++#include <linux/export.h>
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/errno.h>
++#include <linux/delay.h>
++#include <linux/interrupt.h>
++#include <linux/io.h>
++
++#include <video/imx-ipu-v3.h>
++#include "ipu-prv.h"
++
++#define DC_MAP_CONF_PTR(n)    (0x108 + ((n) & ~0x1) * 2)
++#define DC_MAP_CONF_VAL(n)    (0x144 + ((n) & ~0x1) * 2)
++
++#define DC_EVT_NF             0
++#define DC_EVT_NL             1
++#define DC_EVT_EOF            2
++#define DC_EVT_NFIELD         3
++#define DC_EVT_EOL            4
++#define DC_EVT_EOFIELD                5
++#define DC_EVT_NEW_ADDR               6
++#define DC_EVT_NEW_CHAN               7
++#define DC_EVT_NEW_DATA               8
++
++#define DC_EVT_NEW_ADDR_W_0   0
++#define DC_EVT_NEW_ADDR_W_1   1
++#define DC_EVT_NEW_CHAN_W_0   2
++#define DC_EVT_NEW_CHAN_W_1   3
++#define DC_EVT_NEW_DATA_W_0   4
++#define DC_EVT_NEW_DATA_W_1   5
++#define DC_EVT_NEW_ADDR_R_0   6
++#define DC_EVT_NEW_ADDR_R_1   7
++#define DC_EVT_NEW_CHAN_R_0   8
++#define DC_EVT_NEW_CHAN_R_1   9
++#define DC_EVT_NEW_DATA_R_0   10
++#define DC_EVT_NEW_DATA_R_1   11
++
++#define DC_WR_CH_CONF         0x0
++#define DC_WR_CH_ADDR         0x4
++#define DC_RL_CH(evt)         (8 + ((evt) & ~0x1) * 2)
++
++#define DC_GEN                        0xd4
++#define DC_DISP_CONF1(disp)   (0xd8 + (disp) * 4)
++#define DC_DISP_CONF2(disp)   (0xe8 + (disp) * 4)
++#define DC_STAT                       0x1c8
++
++#define WROD(lf)              (0x18 | ((lf) << 1))
++#define WRG                   0x01
++#define WCLK                  0xc9
++
++#define SYNC_WAVE 0
++#define NULL_WAVE (-1)
++
++#define DC_GEN_SYNC_1_6_SYNC  (2 << 1)
++#define DC_GEN_SYNC_PRIORITY_1        (1 << 7)
++
++#define DC_WR_CH_CONF_WORD_SIZE_8             (0 << 0)
++#define DC_WR_CH_CONF_WORD_SIZE_16            (1 << 0)
++#define DC_WR_CH_CONF_WORD_SIZE_24            (2 << 0)
++#define DC_WR_CH_CONF_WORD_SIZE_32            (3 << 0)
++#define DC_WR_CH_CONF_DISP_ID_PARALLEL(i)     (((i) & 0x1) << 3)
++#define DC_WR_CH_CONF_DISP_ID_SERIAL          (2 << 3)
++#define DC_WR_CH_CONF_DISP_ID_ASYNC           (3 << 4)
++#define DC_WR_CH_CONF_FIELD_MODE              (1 << 9)
++#define DC_WR_CH_CONF_PROG_TYPE_NORMAL                (4 << 5)
++#define DC_WR_CH_CONF_PROG_TYPE_MASK          (7 << 5)
++#define DC_WR_CH_CONF_PROG_DI_ID              (1 << 2)
++#define DC_WR_CH_CONF_PROG_DISP_ID(i)         (((i) & 0x1) << 3)
++
++#define IPU_DC_NUM_CHANNELS   10
++
++struct ipu_dc_priv;
++
++enum ipu_dc_map {
++      IPU_DC_MAP_RGB24,
++      IPU_DC_MAP_RGB565,
++      IPU_DC_MAP_GBR24, /* TVEv2 */
++      IPU_DC_MAP_BGR666,
++      IPU_DC_MAP_LVDS666,
++      IPU_DC_MAP_BGR24,
++};
++
++struct ipu_dc {
++      /* The display interface number assigned to this dc channel */
++      unsigned int            di;
++      void __iomem            *base;
++      struct ipu_dc_priv      *priv;
++      int                     chno;
++      bool                    in_use;
++};
++
++struct ipu_dc_priv {
++      void __iomem            *dc_reg;
++      void __iomem            *dc_tmpl_reg;
++      struct ipu_soc          *ipu;
++      struct device           *dev;
++      struct ipu_dc           channels[IPU_DC_NUM_CHANNELS];
++      struct mutex            mutex;
++      struct completion       comp;
++      int                     use_count;
++};
++
++static void dc_link_event(struct ipu_dc *dc, int event, int addr, int priority)
++{
++      u32 reg;
++
++      reg = readl(dc->base + DC_RL_CH(event));
++      reg &= ~(0xffff << (16 * (event & 0x1)));
++      reg |= ((addr << 8) | priority) << (16 * (event & 0x1));
++      writel(reg, dc->base + DC_RL_CH(event));
++}
++
++static void dc_write_tmpl(struct ipu_dc *dc, int word, u32 opcode, u32 operand,
++              int map, int wave, int glue, int sync, int stop)
++{
++      struct ipu_dc_priv *priv = dc->priv;
++      u32 reg1, reg2;
++
++      if (opcode == WCLK) {
++              reg1 = (operand << 20) & 0xfff00000;
++              reg2 = operand >> 12 | opcode << 1 | stop << 9;
++      } else if (opcode == WRG) {
++              reg1 = sync | glue << 4 | ++wave << 11 | ((operand << 15) & 0xffff8000);
++              reg2 = operand >> 17 | opcode << 7 | stop << 9;
++      } else {
++              reg1 = sync | glue << 4 | ++wave << 11 | ++map << 15 | ((operand << 20) & 0xfff00000);
++              reg2 = operand >> 12 | opcode << 4 | stop << 9;
++      }
++      writel(reg1, priv->dc_tmpl_reg + word * 8);
++      writel(reg2, priv->dc_tmpl_reg + word * 8 + 4);
++}
++
++static int ipu_bus_format_to_map(u32 fmt)
++{
++      switch (fmt) {
++      default:
++              WARN_ON(1);
++              /* fall-through */
++      case MEDIA_BUS_FMT_RGB888_1X24:
++              return IPU_DC_MAP_RGB24;
++      case MEDIA_BUS_FMT_RGB565_1X16:
++              return IPU_DC_MAP_RGB565;
++      case MEDIA_BUS_FMT_GBR888_1X24:
++              return IPU_DC_MAP_GBR24;
++      case MEDIA_BUS_FMT_RGB666_1X18:
++              return IPU_DC_MAP_BGR666;
++      case MEDIA_BUS_FMT_RGB666_1X24_CPADHI:
++              return IPU_DC_MAP_LVDS666;
++      case MEDIA_BUS_FMT_BGR888_1X24:
++              return IPU_DC_MAP_BGR24;
++      }
++}
++
++int ipu_dc_init_sync(struct ipu_dc *dc, struct ipu_di *di, bool interlaced,
++              u32 bus_format, u32 width)
++{
++      struct ipu_dc_priv *priv = dc->priv;
++      int addr, sync;
++      u32 reg = 0;
++      int map;
++
++      dc->di = ipu_di_get_num(di);
++
++      map = ipu_bus_format_to_map(bus_format);
++
++      /*
++       * In interlaced mode we need more counters to create the asymmetric
++       * per-field VSYNC signals. The pixel active signal synchronising DC
++       * to DI moves to signal generator #6 (see ipu-di.c). In progressive
++       * mode counter #5 is used.
++       */
++      sync = interlaced ? 6 : 5;
++
++      /* Reserve 5 microcode template words for each DI */
++      if (dc->di)
++              addr = 5;
++      else
++              addr = 0;
++
++      if (interlaced) {
++              dc_link_event(dc, DC_EVT_NL, addr, 3);
++              dc_link_event(dc, DC_EVT_EOL, addr, 2);
++              dc_link_event(dc, DC_EVT_NEW_DATA, addr, 1);
++
++              /* Init template microcode */
++              dc_write_tmpl(dc, addr, WROD(0), 0, map, SYNC_WAVE, 0, sync, 1);
++      } else {
++              dc_link_event(dc, DC_EVT_NL, addr + 2, 3);
++              dc_link_event(dc, DC_EVT_EOL, addr + 3, 2);
++              dc_link_event(dc, DC_EVT_NEW_DATA, addr + 1, 1);
++
++              /* Init template microcode */
++              dc_write_tmpl(dc, addr + 2, WROD(0), 0, map, SYNC_WAVE, 8, sync, 1);
++              dc_write_tmpl(dc, addr + 3, WROD(0), 0, map, SYNC_WAVE, 4, sync, 0);
++              dc_write_tmpl(dc, addr + 4, WRG, 0, map, NULL_WAVE, 0, 0, 1);
++              dc_write_tmpl(dc, addr + 1, WROD(0), 0, map, SYNC_WAVE, 0, sync, 1);
++      }
++
++      dc_link_event(dc, DC_EVT_NF, 0, 0);
++      dc_link_event(dc, DC_EVT_NFIELD, 0, 0);
++      dc_link_event(dc, DC_EVT_EOF, 0, 0);
++      dc_link_event(dc, DC_EVT_EOFIELD, 0, 0);
++      dc_link_event(dc, DC_EVT_NEW_CHAN, 0, 0);
++      dc_link_event(dc, DC_EVT_NEW_ADDR, 0, 0);
++
++      reg = readl(dc->base + DC_WR_CH_CONF);
++      if (interlaced)
++              reg |= DC_WR_CH_CONF_FIELD_MODE;
++      else
++              reg &= ~DC_WR_CH_CONF_FIELD_MODE;
++      writel(reg, dc->base + DC_WR_CH_CONF);
++
++      writel(0x0, dc->base + DC_WR_CH_ADDR);
++      writel(width, priv->dc_reg + DC_DISP_CONF2(dc->di));
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_dc_init_sync);
++
++void ipu_dc_enable(struct ipu_soc *ipu)
++{
++      struct ipu_dc_priv *priv = ipu->dc_priv;
++
++      mutex_lock(&priv->mutex);
++
++      if (!priv->use_count)
++              ipu_module_enable(priv->ipu, IPU_CONF_DC_EN);
++
++      priv->use_count++;
++
++      mutex_unlock(&priv->mutex);
++}
++EXPORT_SYMBOL_GPL(ipu_dc_enable);
++
++void ipu_dc_enable_channel(struct ipu_dc *dc)
++{
++      u32 reg;
++
++      reg = readl(dc->base + DC_WR_CH_CONF);
++      reg |= DC_WR_CH_CONF_PROG_TYPE_NORMAL;
++      writel(reg, dc->base + DC_WR_CH_CONF);
++}
++EXPORT_SYMBOL_GPL(ipu_dc_enable_channel);
++
++void ipu_dc_disable_channel(struct ipu_dc *dc)
++{
++      u32 val;
++
++      val = readl(dc->base + DC_WR_CH_CONF);
++      val &= ~DC_WR_CH_CONF_PROG_TYPE_MASK;
++      writel(val, dc->base + DC_WR_CH_CONF);
++}
++EXPORT_SYMBOL_GPL(ipu_dc_disable_channel);
++
++void ipu_dc_disable(struct ipu_soc *ipu)
++{
++      struct ipu_dc_priv *priv = ipu->dc_priv;
++
++      mutex_lock(&priv->mutex);
++
++      priv->use_count--;
++      if (!priv->use_count)
++              ipu_module_disable(priv->ipu, IPU_CONF_DC_EN);
++
++      if (priv->use_count < 0)
++              priv->use_count = 0;
++
++      mutex_unlock(&priv->mutex);
++}
++EXPORT_SYMBOL_GPL(ipu_dc_disable);
++
++static void ipu_dc_map_config(struct ipu_dc_priv *priv, enum ipu_dc_map map,
++              int byte_num, int offset, int mask)
++{
++      int ptr = map * 3 + byte_num;
++      u32 reg;
++
++      reg = readl(priv->dc_reg + DC_MAP_CONF_VAL(ptr));
++      reg &= ~(0xffff << (16 * (ptr & 0x1)));
++      reg |= ((offset << 8) | mask) << (16 * (ptr & 0x1));
++      writel(reg, priv->dc_reg + DC_MAP_CONF_VAL(ptr));
++
++      reg = readl(priv->dc_reg + DC_MAP_CONF_PTR(map));
++      reg &= ~(0x1f << ((16 * (map & 0x1)) + (5 * byte_num)));
++      reg |= ptr << ((16 * (map & 0x1)) + (5 * byte_num));
++      writel(reg, priv->dc_reg + DC_MAP_CONF_PTR(map));
++}
++
++static void ipu_dc_map_clear(struct ipu_dc_priv *priv, int map)
++{
++      u32 reg = readl(priv->dc_reg + DC_MAP_CONF_PTR(map));
++
++      writel(reg & ~(0xffff << (16 * (map & 0x1))),
++                   priv->dc_reg + DC_MAP_CONF_PTR(map));
++}
++
++struct ipu_dc *ipu_dc_get(struct ipu_soc *ipu, int channel)
++{
++      struct ipu_dc_priv *priv = ipu->dc_priv;
++      struct ipu_dc *dc;
++
++      if (channel >= IPU_DC_NUM_CHANNELS)
++              return ERR_PTR(-ENODEV);
++
++      dc = &priv->channels[channel];
++
++      mutex_lock(&priv->mutex);
++
++      if (dc->in_use) {
++              mutex_unlock(&priv->mutex);
++              return ERR_PTR(-EBUSY);
++      }
++
++      dc->in_use = true;
++
++      mutex_unlock(&priv->mutex);
++
++      return dc;
++}
++EXPORT_SYMBOL_GPL(ipu_dc_get);
++
++void ipu_dc_put(struct ipu_dc *dc)
++{
++      struct ipu_dc_priv *priv = dc->priv;
++
++      mutex_lock(&priv->mutex);
++      dc->in_use = false;
++      mutex_unlock(&priv->mutex);
++}
++EXPORT_SYMBOL_GPL(ipu_dc_put);
++
++int ipu_dc_init(struct ipu_soc *ipu, struct device *dev,
++              unsigned long base, unsigned long template_base)
++{
++      struct ipu_dc_priv *priv;
++      static int channel_offsets[] = { 0, 0x1c, 0x38, 0x54, 0x58, 0x5c,
++              0x78, 0, 0x94, 0xb4};
++      int i;
++
++      priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
++      if (!priv)
++              return -ENOMEM;
++
++      mutex_init(&priv->mutex);
++
++      priv->dev = dev;
++      priv->ipu = ipu;
++      priv->dc_reg = devm_ioremap(dev, base, PAGE_SIZE);
++      priv->dc_tmpl_reg = devm_ioremap(dev, template_base, PAGE_SIZE);
++      if (!priv->dc_reg || !priv->dc_tmpl_reg)
++              return -ENOMEM;
++
++      for (i = 0; i < IPU_DC_NUM_CHANNELS; i++) {
++              priv->channels[i].chno = i;
++              priv->channels[i].priv = priv;
++              priv->channels[i].base = priv->dc_reg + channel_offsets[i];
++      }
++
++      writel(DC_WR_CH_CONF_WORD_SIZE_24 | DC_WR_CH_CONF_DISP_ID_PARALLEL(1) |
++                      DC_WR_CH_CONF_PROG_DI_ID,
++                      priv->channels[1].base + DC_WR_CH_CONF);
++      writel(DC_WR_CH_CONF_WORD_SIZE_24 | DC_WR_CH_CONF_DISP_ID_PARALLEL(0),
++                      priv->channels[5].base + DC_WR_CH_CONF);
++
++      writel(DC_GEN_SYNC_1_6_SYNC | DC_GEN_SYNC_PRIORITY_1,
++              priv->dc_reg + DC_GEN);
++
++      ipu->dc_priv = priv;
++
++      dev_dbg(dev, "DC base: 0x%08lx template base: 0x%08lx\n",
++                      base, template_base);
++
++      /* rgb24 */
++      ipu_dc_map_clear(priv, IPU_DC_MAP_RGB24);
++      ipu_dc_map_config(priv, IPU_DC_MAP_RGB24, 0, 7, 0xff); /* blue */
++      ipu_dc_map_config(priv, IPU_DC_MAP_RGB24, 1, 15, 0xff); /* green */
++      ipu_dc_map_config(priv, IPU_DC_MAP_RGB24, 2, 23, 0xff); /* red */
++
++      /* rgb565 */
++      ipu_dc_map_clear(priv, IPU_DC_MAP_RGB565);
++      ipu_dc_map_config(priv, IPU_DC_MAP_RGB565, 0, 4, 0xf8); /* blue */
++      ipu_dc_map_config(priv, IPU_DC_MAP_RGB565, 1, 10, 0xfc); /* green */
++      ipu_dc_map_config(priv, IPU_DC_MAP_RGB565, 2, 15, 0xf8); /* red */
++
++      /* gbr24 */
++      ipu_dc_map_clear(priv, IPU_DC_MAP_GBR24);
++      ipu_dc_map_config(priv, IPU_DC_MAP_GBR24, 2, 15, 0xff); /* green */
++      ipu_dc_map_config(priv, IPU_DC_MAP_GBR24, 1, 7, 0xff); /* blue */
++      ipu_dc_map_config(priv, IPU_DC_MAP_GBR24, 0, 23, 0xff); /* red */
++
++      /* bgr666 */
++      ipu_dc_map_clear(priv, IPU_DC_MAP_BGR666);
++      ipu_dc_map_config(priv, IPU_DC_MAP_BGR666, 0, 5, 0xfc); /* blue */
++      ipu_dc_map_config(priv, IPU_DC_MAP_BGR666, 1, 11, 0xfc); /* green */
++      ipu_dc_map_config(priv, IPU_DC_MAP_BGR666, 2, 17, 0xfc); /* red */
++
++      /* lvds666 */
++      ipu_dc_map_clear(priv, IPU_DC_MAP_LVDS666);
++      ipu_dc_map_config(priv, IPU_DC_MAP_LVDS666, 0, 5, 0xfc); /* blue */
++      ipu_dc_map_config(priv, IPU_DC_MAP_LVDS666, 1, 13, 0xfc); /* green */
++      ipu_dc_map_config(priv, IPU_DC_MAP_LVDS666, 2, 21, 0xfc); /* red */
++
++      /* bgr24 */
++      ipu_dc_map_clear(priv, IPU_DC_MAP_BGR24);
++      ipu_dc_map_config(priv, IPU_DC_MAP_BGR24, 2, 7, 0xff); /* red */
++      ipu_dc_map_config(priv, IPU_DC_MAP_BGR24, 1, 15, 0xff); /* green */
++      ipu_dc_map_config(priv, IPU_DC_MAP_BGR24, 0, 23, 0xff); /* blue */
++
++      return 0;
++}
++
++void ipu_dc_exit(struct ipu_soc *ipu)
++{
++}
+--- /dev/null
++++ b/drivers/gpu/imx/ipu-v3/ipu-di.c
+@@ -0,0 +1,745 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
++ * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
++ */
++#include <linux/export.h>
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/errno.h>
++#include <linux/io.h>
++#include <linux/err.h>
++#include <linux/platform_device.h>
++
++#include <video/imx-ipu-v3.h>
++#include "ipu-prv.h"
++
++struct ipu_di {
++      void __iomem *base;
++      int id;
++      u32 module;
++      struct clk *clk_di;     /* display input clock */
++      struct clk *clk_ipu;    /* IPU bus clock */
++      struct clk *clk_di_pixel; /* resulting pixel clock */
++      bool inuse;
++      struct ipu_soc *ipu;
++};
++
++static DEFINE_MUTEX(di_mutex);
++
++struct di_sync_config {
++      int run_count;
++      int run_src;
++      int offset_count;
++      int offset_src;
++      int repeat_count;
++      int cnt_clr_src;
++      int cnt_polarity_gen_en;
++      int cnt_polarity_clr_src;
++      int cnt_polarity_trigger_src;
++      int cnt_up;
++      int cnt_down;
++};
++
++enum di_pins {
++      DI_PIN11 = 0,
++      DI_PIN12 = 1,
++      DI_PIN13 = 2,
++      DI_PIN14 = 3,
++      DI_PIN15 = 4,
++      DI_PIN16 = 5,
++      DI_PIN17 = 6,
++      DI_PIN_CS = 7,
++
++      DI_PIN_SER_CLK = 0,
++      DI_PIN_SER_RS = 1,
++};
++
++enum di_sync_wave {
++      DI_SYNC_NONE = 0,
++      DI_SYNC_CLK = 1,
++      DI_SYNC_INT_HSYNC = 2,
++      DI_SYNC_HSYNC = 3,
++      DI_SYNC_VSYNC = 4,
++      DI_SYNC_DE = 6,
++
++      DI_SYNC_CNT1 = 2,       /* counter >= 2 only */
++      DI_SYNC_CNT4 = 5,       /* counter >= 5 only */
++      DI_SYNC_CNT5 = 6,       /* counter >= 6 only */
++};
++
++#define SYNC_WAVE 0
++
++#define DI_GENERAL            0x0000
++#define DI_BS_CLKGEN0         0x0004
++#define DI_BS_CLKGEN1         0x0008
++#define DI_SW_GEN0(gen)               (0x000c + 4 * ((gen) - 1))
++#define DI_SW_GEN1(gen)               (0x0030 + 4 * ((gen) - 1))
++#define DI_STP_REP(gen)               (0x0148 + 4 * (((gen) - 1)/2))
++#define DI_SYNC_AS_GEN                0x0054
++#define DI_DW_GEN(gen)                (0x0058 + 4 * (gen))
++#define DI_DW_SET(gen, set)   (0x0088 + 4 * ((gen) + 0xc * (set)))
++#define DI_SER_CONF           0x015c
++#define DI_SSC                        0x0160
++#define DI_POL                        0x0164
++#define DI_AW0                        0x0168
++#define DI_AW1                        0x016c
++#define DI_SCR_CONF           0x0170
++#define DI_STAT                       0x0174
++
++#define DI_SW_GEN0_RUN_COUNT(x)                       ((x) << 19)
++#define DI_SW_GEN0_RUN_SRC(x)                 ((x) << 16)
++#define DI_SW_GEN0_OFFSET_COUNT(x)            ((x) << 3)
++#define DI_SW_GEN0_OFFSET_SRC(x)              ((x) << 0)
++
++#define DI_SW_GEN1_CNT_POL_GEN_EN(x)          ((x) << 29)
++#define DI_SW_GEN1_CNT_CLR_SRC(x)             ((x) << 25)
++#define DI_SW_GEN1_CNT_POL_TRIGGER_SRC(x)     ((x) << 12)
++#define DI_SW_GEN1_CNT_POL_CLR_SRC(x)         ((x) << 9)
++#define DI_SW_GEN1_CNT_DOWN(x)                        ((x) << 16)
++#define DI_SW_GEN1_CNT_UP(x)                  (x)
++#define DI_SW_GEN1_AUTO_RELOAD                        (0x10000000)
++
++#define DI_DW_GEN_ACCESS_SIZE_OFFSET          24
++#define DI_DW_GEN_COMPONENT_SIZE_OFFSET               16
++
++#define DI_GEN_POLARITY_1                     (1 << 0)
++#define DI_GEN_POLARITY_2                     (1 << 1)
++#define DI_GEN_POLARITY_3                     (1 << 2)
++#define DI_GEN_POLARITY_4                     (1 << 3)
++#define DI_GEN_POLARITY_5                     (1 << 4)
++#define DI_GEN_POLARITY_6                     (1 << 5)
++#define DI_GEN_POLARITY_7                     (1 << 6)
++#define DI_GEN_POLARITY_8                     (1 << 7)
++#define DI_GEN_POLARITY_DISP_CLK              (1 << 17)
++#define DI_GEN_DI_CLK_EXT                     (1 << 20)
++#define DI_GEN_DI_VSYNC_EXT                   (1 << 21)
++
++#define DI_POL_DRDY_DATA_POLARITY             (1 << 7)
++#define DI_POL_DRDY_POLARITY_15                       (1 << 4)
++
++#define DI_VSYNC_SEL_OFFSET                   13
++
++static inline u32 ipu_di_read(struct ipu_di *di, unsigned offset)
++{
++      return readl(di->base + offset);
++}
++
++static inline void ipu_di_write(struct ipu_di *di, u32 value, unsigned offset)
++{
++      writel(value, di->base + offset);
++}
++
++static void ipu_di_data_wave_config(struct ipu_di *di,
++                                   int wave_gen,
++                                   int access_size, int component_size)
++{
++      u32 reg;
++      reg = (access_size << DI_DW_GEN_ACCESS_SIZE_OFFSET) |
++          (component_size << DI_DW_GEN_COMPONENT_SIZE_OFFSET);
++      ipu_di_write(di, reg, DI_DW_GEN(wave_gen));
++}
++
++static void ipu_di_data_pin_config(struct ipu_di *di, int wave_gen, int di_pin,
++              int set, int up, int down)
++{
++      u32 reg;
++
++      reg = ipu_di_read(di, DI_DW_GEN(wave_gen));
++      reg &= ~(0x3 << (di_pin * 2));
++      reg |= set << (di_pin * 2);
++      ipu_di_write(di, reg, DI_DW_GEN(wave_gen));
++
++      ipu_di_write(di, (down << 16) | up, DI_DW_SET(wave_gen, set));
++}
++
++static void ipu_di_sync_config(struct ipu_di *di, struct di_sync_config *config,
++              int start, int count)
++{
++      u32 reg;
++      int i;
++
++      for (i = 0; i < count; i++) {
++              struct di_sync_config *c = &config[i];
++              int wave_gen = start + i + 1;
++
++              if ((c->run_count >= 0x1000) || (c->offset_count >= 0x1000) ||
++                              (c->repeat_count >= 0x1000) ||
++                              (c->cnt_up >= 0x400) ||
++                              (c->cnt_down >= 0x400)) {
++                      dev_err(di->ipu->dev, "DI%d counters out of range.\n",
++                                      di->id);
++                      return;
++              }
++
++              reg = DI_SW_GEN0_RUN_COUNT(c->run_count) |
++                      DI_SW_GEN0_RUN_SRC(c->run_src) |
++                      DI_SW_GEN0_OFFSET_COUNT(c->offset_count) |
++                      DI_SW_GEN0_OFFSET_SRC(c->offset_src);
++              ipu_di_write(di, reg, DI_SW_GEN0(wave_gen));
++
++              reg = DI_SW_GEN1_CNT_POL_GEN_EN(c->cnt_polarity_gen_en) |
++                      DI_SW_GEN1_CNT_CLR_SRC(c->cnt_clr_src) |
++                      DI_SW_GEN1_CNT_POL_TRIGGER_SRC(
++                                      c->cnt_polarity_trigger_src) |
++                      DI_SW_GEN1_CNT_POL_CLR_SRC(c->cnt_polarity_clr_src) |
++                      DI_SW_GEN1_CNT_DOWN(c->cnt_down) |
++                      DI_SW_GEN1_CNT_UP(c->cnt_up);
++
++              /* Enable auto reload */
++              if (c->repeat_count == 0)
++                      reg |= DI_SW_GEN1_AUTO_RELOAD;
++
++              ipu_di_write(di, reg, DI_SW_GEN1(wave_gen));
++
++              reg = ipu_di_read(di, DI_STP_REP(wave_gen));
++              reg &= ~(0xffff << (16 * ((wave_gen - 1) & 0x1)));
++              reg |= c->repeat_count << (16 * ((wave_gen - 1) & 0x1));
++              ipu_di_write(di, reg, DI_STP_REP(wave_gen));
++      }
++}
++
++static void ipu_di_sync_config_interlaced(struct ipu_di *di,
++              struct ipu_di_signal_cfg *sig)
++{
++      u32 h_total = sig->mode.hactive + sig->mode.hsync_len +
++              sig->mode.hback_porch + sig->mode.hfront_porch;
++      u32 v_total = sig->mode.vactive + sig->mode.vsync_len +
++              sig->mode.vback_porch + sig->mode.vfront_porch;
++      struct di_sync_config cfg[] = {
++              {
++                      /* 1: internal VSYNC for each frame */
++                      .run_count = v_total * 2 - 1,
++                      .run_src = 3,                   /* == counter 7 */
++              }, {
++                      /* PIN2: HSYNC waveform */
++                      .run_count = h_total - 1,
++                      .run_src = DI_SYNC_CLK,
++                      .cnt_polarity_gen_en = 1,
++                      .cnt_polarity_trigger_src = DI_SYNC_CLK,
++                      .cnt_down = sig->mode.hsync_len * 2,
++              }, {
++                      /* PIN3: VSYNC waveform */
++                      .run_count = v_total - 1,
++                      .run_src = 4,                   /* == counter 7 */
++                      .cnt_polarity_gen_en = 1,
++                      .cnt_polarity_trigger_src = 4,  /* == counter 7 */
++                      .cnt_down = sig->mode.vsync_len * 2,
++                      .cnt_clr_src = DI_SYNC_CNT1,
++              }, {
++                      /* 4: Field */
++                      .run_count = v_total / 2,
++                      .run_src = DI_SYNC_HSYNC,
++                      .offset_count = h_total / 2,
++                      .offset_src = DI_SYNC_CLK,
++                      .repeat_count = 2,
++                      .cnt_clr_src = DI_SYNC_CNT1,
++              }, {
++                      /* 5: Active lines */
++                      .run_src = DI_SYNC_HSYNC,
++                      .offset_count = (sig->mode.vsync_len +
++                                       sig->mode.vback_porch) / 2,
++                      .offset_src = DI_SYNC_HSYNC,
++                      .repeat_count = sig->mode.vactive / 2,
++                      .cnt_clr_src = DI_SYNC_CNT4,
++              }, {
++                      /* 6: Active pixel, referenced by DC */
++                      .run_src = DI_SYNC_CLK,
++                      .offset_count = sig->mode.hsync_len +
++                                      sig->mode.hback_porch,
++                      .offset_src = DI_SYNC_CLK,
++                      .repeat_count = sig->mode.hactive,
++                      .cnt_clr_src = DI_SYNC_CNT5,
++              }, {
++                      /* 7: Half line HSYNC */
++                      .run_count = h_total / 2 - 1,
++                      .run_src = DI_SYNC_CLK,
++              }
++      };
++
++      ipu_di_sync_config(di, cfg, 0, ARRAY_SIZE(cfg));
++
++      ipu_di_write(di, v_total / 2 - 1, DI_SCR_CONF);
++}
++
++static void ipu_di_sync_config_noninterlaced(struct ipu_di *di,
++              struct ipu_di_signal_cfg *sig, int div)
++{
++      u32 h_total = sig->mode.hactive + sig->mode.hsync_len +
++              sig->mode.hback_porch + sig->mode.hfront_porch;
++      u32 v_total = sig->mode.vactive + sig->mode.vsync_len +
++              sig->mode.vback_porch + sig->mode.vfront_porch;
++      struct di_sync_config cfg[] = {
++              {
++                      /* 1: INT_HSYNC */
++                      .run_count = h_total - 1,
++                      .run_src = DI_SYNC_CLK,
++              } , {
++                      /* PIN2: HSYNC */
++                      .run_count = h_total - 1,
++                      .run_src = DI_SYNC_CLK,
++                      .offset_count = div * sig->v_to_h_sync,
++                      .offset_src = DI_SYNC_CLK,
++                      .cnt_polarity_gen_en = 1,
++                      .cnt_polarity_trigger_src = DI_SYNC_CLK,
++                      .cnt_down = sig->mode.hsync_len * 2,
++              } , {
++                      /* PIN3: VSYNC */
++                      .run_count = v_total - 1,
++                      .run_src = DI_SYNC_INT_HSYNC,
++                      .cnt_polarity_gen_en = 1,
++                      .cnt_polarity_trigger_src = DI_SYNC_INT_HSYNC,
++                      .cnt_down = sig->mode.vsync_len * 2,
++              } , {
++                      /* 4: Line Active */
++                      .run_src = DI_SYNC_HSYNC,
++                      .offset_count = sig->mode.vsync_len +
++                                      sig->mode.vback_porch,
++                      .offset_src = DI_SYNC_HSYNC,
++                      .repeat_count = sig->mode.vactive,
++                      .cnt_clr_src = DI_SYNC_VSYNC,
++              } , {
++                      /* 5: Pixel Active, referenced by DC */
++                      .run_src = DI_SYNC_CLK,
++                      .offset_count = sig->mode.hsync_len +
++                                      sig->mode.hback_porch,
++                      .offset_src = DI_SYNC_CLK,
++                      .repeat_count = sig->mode.hactive,
++                      .cnt_clr_src = 5, /* Line Active */
++              } , {
++                      /* unused */
++              } , {
++                      /* unused */
++              } , {
++                      /* unused */
++              } , {
++                      /* unused */
++              },
++      };
++      /* can't use #7 and #8 for line active and pixel active counters */
++      struct di_sync_config cfg_vga[] = {
++              {
++                      /* 1: INT_HSYNC */
++                      .run_count = h_total - 1,
++                      .run_src = DI_SYNC_CLK,
++              } , {
++                      /* 2: VSYNC */
++                      .run_count = v_total - 1,
++                      .run_src = DI_SYNC_INT_HSYNC,
++              } , {
++                      /* 3: Line Active */
++                      .run_src = DI_SYNC_INT_HSYNC,
++                      .offset_count = sig->mode.vsync_len +
++                                      sig->mode.vback_porch,
++                      .offset_src = DI_SYNC_INT_HSYNC,
++                      .repeat_count = sig->mode.vactive,
++                      .cnt_clr_src = 3 /* VSYNC */,
++              } , {
++                      /* PIN4: HSYNC for VGA via TVEv2 on TQ MBa53 */
++                      .run_count = h_total - 1,
++                      .run_src = DI_SYNC_CLK,
++                      .offset_count = div * sig->v_to_h_sync + 18, /* magic value from Freescale TVE driver */
++                      .offset_src = DI_SYNC_CLK,
++                      .cnt_polarity_gen_en = 1,
++                      .cnt_polarity_trigger_src = DI_SYNC_CLK,
++                      .cnt_down = sig->mode.hsync_len * 2,
++              } , {
++                      /* 5: Pixel Active signal to DC */
++                      .run_src = DI_SYNC_CLK,
++                      .offset_count = sig->mode.hsync_len +
++                                      sig->mode.hback_porch,
++                      .offset_src = DI_SYNC_CLK,
++                      .repeat_count = sig->mode.hactive,
++                      .cnt_clr_src = 4, /* Line Active */
++              } , {
++                      /* PIN6: VSYNC for VGA via TVEv2 on TQ MBa53 */
++                      .run_count = v_total - 1,
++                      .run_src = DI_SYNC_INT_HSYNC,
++                      .offset_count = 1, /* magic value from Freescale TVE driver */
++                      .offset_src = DI_SYNC_INT_HSYNC,
++                      .cnt_polarity_gen_en = 1,
++                      .cnt_polarity_trigger_src = DI_SYNC_INT_HSYNC,
++                      .cnt_down = sig->mode.vsync_len * 2,
++              } , {
++                      /* PIN4: HSYNC for VGA via TVEv2 on i.MX53-QSB */
++                      .run_count = h_total - 1,
++                      .run_src = DI_SYNC_CLK,
++                      .offset_count = div * sig->v_to_h_sync + 18, /* magic value from Freescale TVE driver */
++                      .offset_src = DI_SYNC_CLK,
++                      .cnt_polarity_gen_en = 1,
++                      .cnt_polarity_trigger_src = DI_SYNC_CLK,
++                      .cnt_down = sig->mode.hsync_len * 2,
++              } , {
++                      /* PIN6: VSYNC for VGA via TVEv2 on i.MX53-QSB */
++                      .run_count = v_total - 1,
++                      .run_src = DI_SYNC_INT_HSYNC,
++                      .offset_count = 1, /* magic value from Freescale TVE driver */
++                      .offset_src = DI_SYNC_INT_HSYNC,
++                      .cnt_polarity_gen_en = 1,
++                      .cnt_polarity_trigger_src = DI_SYNC_INT_HSYNC,
++                      .cnt_down = sig->mode.vsync_len * 2,
++              } , {
++                      /* unused */
++              },
++      };
++
++      ipu_di_write(di, v_total - 1, DI_SCR_CONF);
++      if (sig->hsync_pin == 2 && sig->vsync_pin == 3)
++              ipu_di_sync_config(di, cfg, 0, ARRAY_SIZE(cfg));
++      else
++              ipu_di_sync_config(di, cfg_vga, 0, ARRAY_SIZE(cfg_vga));
++}
++
++static void ipu_di_config_clock(struct ipu_di *di,
++      const struct ipu_di_signal_cfg *sig)
++{
++      struct clk *clk;
++      unsigned clkgen0;
++      uint32_t val;
++
++      if (sig->clkflags & IPU_DI_CLKMODE_EXT) {
++              /*
++               * CLKMODE_EXT means we must use the DI clock: this is
++               * needed for things like LVDS which needs to feed the
++               * DI and LDB with the same pixel clock.
++               */
++              clk = di->clk_di;
++
++              if (sig->clkflags & IPU_DI_CLKMODE_SYNC) {
++                      /*
++                       * CLKMODE_SYNC means that we want the DI to be
++                       * clocked at the same rate as the parent clock.
++                       * This is needed (eg) for LDB which needs to be
++                       * fed with the same pixel clock.  We assume that
++                       * the LDB clock has already been set correctly.
++                       */
++                      clkgen0 = 1 << 4;
++              } else {
++                      /*
++                       * We can use the divider.  We should really have
++                       * a flag here indicating whether the bridge can
++                       * cope with a fractional divider or not.  For the
++                       * time being, let's go for simplicitly and
++                       * reliability.
++                       */
++                      unsigned long in_rate;
++                      unsigned div;
++
++                      clk_set_rate(clk, sig->mode.pixelclock);
++
++                      in_rate = clk_get_rate(clk);
++                      div = DIV_ROUND_CLOSEST(in_rate, sig->mode.pixelclock);
++                      div = clamp(div, 1U, 255U);
++
++                      clkgen0 = div << 4;
++              }
++      } else {
++              /*
++               * For other interfaces, we can arbitarily select between
++               * the DI specific clock and the internal IPU clock.  See
++               * DI_GENERAL bit 20.  We select the IPU clock if it can
++               * give us a clock rate within 1% of the requested frequency,
++               * otherwise we use the DI clock.
++               */
++              unsigned long rate, clkrate;
++              unsigned div, error;
++
++              clkrate = clk_get_rate(di->clk_ipu);
++              div = DIV_ROUND_CLOSEST(clkrate, sig->mode.pixelclock);
++              div = clamp(div, 1U, 255U);
++              rate = clkrate / div;
++
++              error = rate / (sig->mode.pixelclock / 1000);
++
++              dev_dbg(di->ipu->dev, "  IPU clock can give %lu with divider %u, error %d.%u%%\n",
++                      rate, div, (signed)(error - 1000) / 10, error % 10);
++
++              /* Allow a 1% error */
++              if (error < 1010 && error >= 990) {
++                      clk = di->clk_ipu;
++
++                      clkgen0 = div << 4;
++              } else {
++                      unsigned long in_rate;
++                      unsigned div;
++
++                      clk = di->clk_di;
++
++                      clk_set_rate(clk, sig->mode.pixelclock);
++
++                      in_rate = clk_get_rate(clk);
++                      div = DIV_ROUND_CLOSEST(in_rate, sig->mode.pixelclock);
++                      div = clamp(div, 1U, 255U);
++
++                      clkgen0 = div << 4;
++              }
++      }
++
++      di->clk_di_pixel = clk;
++
++      /* Set the divider */
++      ipu_di_write(di, clkgen0, DI_BS_CLKGEN0);
++
++      /*
++       * Set the high/low periods.  Bits 24:16 give us the falling edge,
++       * and bits 8:0 give the rising edge.  LSB is fraction, and is
++       * based on the divider above.  We want a 50% duty cycle, so set
++       * the falling edge to be half the divider.
++       */
++      ipu_di_write(di, (clkgen0 >> 4) << 16, DI_BS_CLKGEN1);
++
++      /* Finally select the input clock */
++      val = ipu_di_read(di, DI_GENERAL) & ~DI_GEN_DI_CLK_EXT;
++      if (clk == di->clk_di)
++              val |= DI_GEN_DI_CLK_EXT;
++      ipu_di_write(di, val, DI_GENERAL);
++
++      dev_dbg(di->ipu->dev, "Want %luHz IPU %luHz DI %luHz using %s, %luHz\n",
++              sig->mode.pixelclock,
++              clk_get_rate(di->clk_ipu),
++              clk_get_rate(di->clk_di),
++              clk == di->clk_di ? "DI" : "IPU",
++              clk_get_rate(di->clk_di_pixel) / (clkgen0 >> 4));
++}
++
++/*
++ * This function is called to adjust a video mode to IPU restrictions.
++ * It is meant to be called from drm crtc mode_fixup() methods.
++ */
++int ipu_di_adjust_videomode(struct ipu_di *di, struct videomode *mode)
++{
++      u32 diff;
++
++      if (mode->vfront_porch >= 2)
++              return 0;
++
++      diff = 2 - mode->vfront_porch;
++
++      if (mode->vback_porch >= diff) {
++              mode->vfront_porch = 2;
++              mode->vback_porch -= diff;
++      } else if (mode->vsync_len > diff) {
++              mode->vfront_porch = 2;
++              mode->vsync_len = mode->vsync_len - diff;
++      } else {
++              dev_warn(di->ipu->dev, "failed to adjust videomode\n");
++              return -EINVAL;
++      }
++
++      dev_dbg(di->ipu->dev, "videomode adapted for IPU restrictions\n");
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_di_adjust_videomode);
++
++static u32 ipu_di_gen_polarity(int pin)
++{
++      switch (pin) {
++      case 1:
++              return DI_GEN_POLARITY_1;
++      case 2:
++              return DI_GEN_POLARITY_2;
++      case 3:
++              return DI_GEN_POLARITY_3;
++      case 4:
++              return DI_GEN_POLARITY_4;
++      case 5:
++              return DI_GEN_POLARITY_5;
++      case 6:
++              return DI_GEN_POLARITY_6;
++      case 7:
++              return DI_GEN_POLARITY_7;
++      case 8:
++              return DI_GEN_POLARITY_8;
++      }
++      return 0;
++}
++
++int ipu_di_init_sync_panel(struct ipu_di *di, struct ipu_di_signal_cfg *sig)
++{
++      u32 reg;
++      u32 di_gen, vsync_cnt;
++      u32 div;
++
++      dev_dbg(di->ipu->dev, "disp %d: panel size = %d x %d\n",
++              di->id, sig->mode.hactive, sig->mode.vactive);
++
++      dev_dbg(di->ipu->dev, "Clocks: IPU %luHz DI %luHz Needed %luHz\n",
++              clk_get_rate(di->clk_ipu),
++              clk_get_rate(di->clk_di),
++              sig->mode.pixelclock);
++
++      mutex_lock(&di_mutex);
++
++      ipu_di_config_clock(di, sig);
++
++      div = ipu_di_read(di, DI_BS_CLKGEN0) & 0xfff;
++      div = div / 16;         /* Now divider is integer portion */
++
++      /* Setup pixel clock timing */
++      /* Down time is half of period */
++      ipu_di_write(di, (div << 16), DI_BS_CLKGEN1);
++
++      ipu_di_data_wave_config(di, SYNC_WAVE, div - 1, div - 1);
++      ipu_di_data_pin_config(di, SYNC_WAVE, DI_PIN15, 3, 0, div * 2);
++
++      di_gen = ipu_di_read(di, DI_GENERAL) & DI_GEN_DI_CLK_EXT;
++      di_gen |= DI_GEN_DI_VSYNC_EXT;
++
++      if (sig->mode.flags & DISPLAY_FLAGS_INTERLACED) {
++              ipu_di_sync_config_interlaced(di, sig);
++
++              /* set y_sel = 1 */
++              di_gen |= 0x10000000;
++
++              vsync_cnt = 3;
++      } else {
++              ipu_di_sync_config_noninterlaced(di, sig, div);
++
++              vsync_cnt = 3;
++              if (di->id == 1)
++                      /*
++                       * TODO: change only for TVEv2, parallel display
++                       * uses pin 2 / 3
++                       */
++                      if (!(sig->hsync_pin == 2 && sig->vsync_pin == 3))
++                              vsync_cnt = 6;
++      }
++
++      if (sig->mode.flags & DISPLAY_FLAGS_HSYNC_HIGH)
++              di_gen |= ipu_di_gen_polarity(sig->hsync_pin);
++      if (sig->mode.flags & DISPLAY_FLAGS_VSYNC_HIGH)
++              di_gen |= ipu_di_gen_polarity(sig->vsync_pin);
++
++      if (sig->clk_pol)
++              di_gen |= DI_GEN_POLARITY_DISP_CLK;
++
++      ipu_di_write(di, di_gen, DI_GENERAL);
++
++      ipu_di_write(di, (--vsync_cnt << DI_VSYNC_SEL_OFFSET) | 0x00000002,
++                   DI_SYNC_AS_GEN);
++
++      reg = ipu_di_read(di, DI_POL);
++      reg &= ~(DI_POL_DRDY_DATA_POLARITY | DI_POL_DRDY_POLARITY_15);
++
++      if (sig->enable_pol)
++              reg |= DI_POL_DRDY_POLARITY_15;
++      if (sig->data_pol)
++              reg |= DI_POL_DRDY_DATA_POLARITY;
++
++      ipu_di_write(di, reg, DI_POL);
++
++      mutex_unlock(&di_mutex);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_di_init_sync_panel);
++
++int ipu_di_enable(struct ipu_di *di)
++{
++      int ret;
++
++      WARN_ON(IS_ERR(di->clk_di_pixel));
++
++      ret = clk_prepare_enable(di->clk_di_pixel);
++      if (ret)
++              return ret;
++
++      ipu_module_enable(di->ipu, di->module);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_di_enable);
++
++int ipu_di_disable(struct ipu_di *di)
++{
++      WARN_ON(IS_ERR(di->clk_di_pixel));
++
++      ipu_module_disable(di->ipu, di->module);
++
++      clk_disable_unprepare(di->clk_di_pixel);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_di_disable);
++
++int ipu_di_get_num(struct ipu_di *di)
++{
++      return di->id;
++}
++EXPORT_SYMBOL_GPL(ipu_di_get_num);
++
++static DEFINE_MUTEX(ipu_di_lock);
++
++struct ipu_di *ipu_di_get(struct ipu_soc *ipu, int disp)
++{
++      struct ipu_di *di;
++
++      if (disp > 1)
++              return ERR_PTR(-EINVAL);
++
++      di = ipu->di_priv[disp];
++
++      mutex_lock(&ipu_di_lock);
++
++      if (di->inuse) {
++              di = ERR_PTR(-EBUSY);
++              goto out;
++      }
++
++      di->inuse = true;
++out:
++      mutex_unlock(&ipu_di_lock);
++
++      return di;
++}
++EXPORT_SYMBOL_GPL(ipu_di_get);
++
++void ipu_di_put(struct ipu_di *di)
++{
++      mutex_lock(&ipu_di_lock);
++
++      di->inuse = false;
++
++      mutex_unlock(&ipu_di_lock);
++}
++EXPORT_SYMBOL_GPL(ipu_di_put);
++
++int ipu_di_init(struct ipu_soc *ipu, struct device *dev, int id,
++              unsigned long base,
++              u32 module, struct clk *clk_ipu)
++{
++      struct ipu_di *di;
++
++      if (id > 1)
++              return -ENODEV;
++
++      di = devm_kzalloc(dev, sizeof(*di), GFP_KERNEL);
++      if (!di)
++              return -ENOMEM;
++
++      ipu->di_priv[id] = di;
++
++      di->clk_di = devm_clk_get(dev, id ? "di1" : "di0");
++      if (IS_ERR(di->clk_di))
++              return PTR_ERR(di->clk_di);
++
++      di->module = module;
++      di->id = id;
++      di->clk_ipu = clk_ipu;
++      di->base = devm_ioremap(dev, base, PAGE_SIZE);
++      if (!di->base)
++              return -ENOMEM;
++
++      ipu_di_write(di, 0x10, DI_BS_CLKGEN0);
++
++      dev_dbg(dev, "DI%d base: 0x%08lx remapped to %p\n",
++                      id, base, di->base);
++      di->inuse = false;
++      di->ipu = ipu;
++
++      return 0;
++}
++
++void ipu_di_exit(struct ipu_soc *ipu, int id)
++{
++}
+--- /dev/null
++++ b/drivers/gpu/imx/ipu-v3/ipu-dmfc.c
+@@ -0,0 +1,214 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
++ * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
++ */
++#include <linux/export.h>
++#include <linux/types.h>
++#include <linux/errno.h>
++#include <linux/io.h>
++
++#include <video/imx-ipu-v3.h>
++#include "ipu-prv.h"
++
++#define DMFC_RD_CHAN          0x0000
++#define DMFC_WR_CHAN          0x0004
++#define DMFC_WR_CHAN_DEF      0x0008
++#define DMFC_DP_CHAN          0x000c
++#define DMFC_DP_CHAN_DEF      0x0010
++#define DMFC_GENERAL1         0x0014
++#define DMFC_GENERAL2         0x0018
++#define DMFC_IC_CTRL          0x001c
++#define DMFC_WR_CHAN_ALT      0x0020
++#define DMFC_WR_CHAN_DEF_ALT  0x0024
++#define DMFC_DP_CHAN_ALT      0x0028
++#define DMFC_DP_CHAN_DEF_ALT  0x002c
++#define DMFC_GENERAL1_ALT     0x0030
++#define DMFC_STAT             0x0034
++
++#define DMFC_WR_CHAN_1_28             0
++#define DMFC_WR_CHAN_2_41             8
++#define DMFC_WR_CHAN_1C_42            16
++#define DMFC_WR_CHAN_2C_43            24
++
++#define DMFC_DP_CHAN_5B_23            0
++#define DMFC_DP_CHAN_5F_27            8
++#define DMFC_DP_CHAN_6B_24            16
++#define DMFC_DP_CHAN_6F_29            24
++
++struct dmfc_channel_data {
++      int             ipu_channel;
++      unsigned long   channel_reg;
++      unsigned long   shift;
++      unsigned        eot_shift;
++      unsigned        max_fifo_lines;
++};
++
++static const struct dmfc_channel_data dmfcdata[] = {
++      {
++              .ipu_channel    = IPUV3_CHANNEL_MEM_BG_SYNC,
++              .channel_reg    = DMFC_DP_CHAN,
++              .shift          = DMFC_DP_CHAN_5B_23,
++              .eot_shift      = 20,
++              .max_fifo_lines = 3,
++      }, {
++              .ipu_channel    = 24,
++              .channel_reg    = DMFC_DP_CHAN,
++              .shift          = DMFC_DP_CHAN_6B_24,
++              .eot_shift      = 22,
++              .max_fifo_lines = 1,
++      }, {
++              .ipu_channel    = IPUV3_CHANNEL_MEM_FG_SYNC,
++              .channel_reg    = DMFC_DP_CHAN,
++              .shift          = DMFC_DP_CHAN_5F_27,
++              .eot_shift      = 21,
++              .max_fifo_lines = 2,
++      }, {
++              .ipu_channel    = IPUV3_CHANNEL_MEM_DC_SYNC,
++              .channel_reg    = DMFC_WR_CHAN,
++              .shift          = DMFC_WR_CHAN_1_28,
++              .eot_shift      = 16,
++              .max_fifo_lines = 2,
++      }, {
++              .ipu_channel    = 29,
++              .channel_reg    = DMFC_DP_CHAN,
++              .shift          = DMFC_DP_CHAN_6F_29,
++              .eot_shift      = 23,
++              .max_fifo_lines = 1,
++      },
++};
++
++#define DMFC_NUM_CHANNELS     ARRAY_SIZE(dmfcdata)
++
++struct ipu_dmfc_priv;
++
++struct dmfc_channel {
++      unsigned                        slots;
++      struct ipu_soc                  *ipu;
++      struct ipu_dmfc_priv            *priv;
++      const struct dmfc_channel_data  *data;
++};
++
++struct ipu_dmfc_priv {
++      struct ipu_soc *ipu;
++      struct device *dev;
++      struct dmfc_channel channels[DMFC_NUM_CHANNELS];
++      struct mutex mutex;
++      void __iomem *base;
++      int use_count;
++};
++
++int ipu_dmfc_enable_channel(struct dmfc_channel *dmfc)
++{
++      struct ipu_dmfc_priv *priv = dmfc->priv;
++      mutex_lock(&priv->mutex);
++
++      if (!priv->use_count)
++              ipu_module_enable(priv->ipu, IPU_CONF_DMFC_EN);
++
++      priv->use_count++;
++
++      mutex_unlock(&priv->mutex);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_dmfc_enable_channel);
++
++void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc)
++{
++      struct ipu_dmfc_priv *priv = dmfc->priv;
++
++      mutex_lock(&priv->mutex);
++
++      priv->use_count--;
++
++      if (!priv->use_count)
++              ipu_module_disable(priv->ipu, IPU_CONF_DMFC_EN);
++
++      if (priv->use_count < 0)
++              priv->use_count = 0;
++
++      mutex_unlock(&priv->mutex);
++}
++EXPORT_SYMBOL_GPL(ipu_dmfc_disable_channel);
++
++void ipu_dmfc_config_wait4eot(struct dmfc_channel *dmfc, int width)
++{
++      struct ipu_dmfc_priv *priv = dmfc->priv;
++      u32 dmfc_gen1;
++
++      mutex_lock(&priv->mutex);
++
++      dmfc_gen1 = readl(priv->base + DMFC_GENERAL1);
++
++      if ((dmfc->slots * 64 * 4) / width > dmfc->data->max_fifo_lines)
++              dmfc_gen1 |= 1 << dmfc->data->eot_shift;
++      else
++              dmfc_gen1 &= ~(1 << dmfc->data->eot_shift);
++
++      writel(dmfc_gen1, priv->base + DMFC_GENERAL1);
++
++      mutex_unlock(&priv->mutex);
++}
++EXPORT_SYMBOL_GPL(ipu_dmfc_config_wait4eot);
++
++struct dmfc_channel *ipu_dmfc_get(struct ipu_soc *ipu, int ipu_channel)
++{
++      struct ipu_dmfc_priv *priv = ipu->dmfc_priv;
++      int i;
++
++      for (i = 0; i < DMFC_NUM_CHANNELS; i++)
++              if (dmfcdata[i].ipu_channel == ipu_channel)
++                      return &priv->channels[i];
++      return ERR_PTR(-ENODEV);
++}
++EXPORT_SYMBOL_GPL(ipu_dmfc_get);
++
++void ipu_dmfc_put(struct dmfc_channel *dmfc)
++{
++}
++EXPORT_SYMBOL_GPL(ipu_dmfc_put);
++
++int ipu_dmfc_init(struct ipu_soc *ipu, struct device *dev, unsigned long base,
++              struct clk *ipu_clk)
++{
++      struct ipu_dmfc_priv *priv;
++      int i;
++
++      priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
++      if (!priv)
++              return -ENOMEM;
++
++      priv->base = devm_ioremap(dev, base, PAGE_SIZE);
++      if (!priv->base)
++              return -ENOMEM;
++
++      priv->dev = dev;
++      priv->ipu = ipu;
++      mutex_init(&priv->mutex);
++
++      ipu->dmfc_priv = priv;
++
++      for (i = 0; i < DMFC_NUM_CHANNELS; i++) {
++              priv->channels[i].priv = priv;
++              priv->channels[i].ipu = ipu;
++              priv->channels[i].data = &dmfcdata[i];
++
++              if (dmfcdata[i].ipu_channel == IPUV3_CHANNEL_MEM_BG_SYNC ||
++                  dmfcdata[i].ipu_channel == IPUV3_CHANNEL_MEM_FG_SYNC ||
++                  dmfcdata[i].ipu_channel == IPUV3_CHANNEL_MEM_DC_SYNC)
++                      priv->channels[i].slots = 2;
++      }
++
++      writel(0x00000050, priv->base + DMFC_WR_CHAN);
++      writel(0x00005654, priv->base + DMFC_DP_CHAN);
++      writel(0x202020f6, priv->base + DMFC_WR_CHAN_DEF);
++      writel(0x2020f6f6, priv->base + DMFC_DP_CHAN_DEF);
++      writel(0x00000003, priv->base + DMFC_GENERAL1);
++
++      return 0;
++}
++
++void ipu_dmfc_exit(struct ipu_soc *ipu)
++{
++}
+--- /dev/null
++++ b/drivers/gpu/imx/ipu-v3/ipu-dp.c
+@@ -0,0 +1,357 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
++ * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
++ */
++#include <linux/export.h>
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <linux/errno.h>
++#include <linux/io.h>
++#include <linux/err.h>
++
++#include <video/imx-ipu-v3.h>
++#include "ipu-prv.h"
++
++#define DP_SYNC 0
++#define DP_ASYNC0 0x60
++#define DP_ASYNC1 0xBC
++
++#define DP_COM_CONF           0x0
++#define DP_GRAPH_WIND_CTRL    0x0004
++#define DP_FG_POS             0x0008
++#define DP_CSC_A_0            0x0044
++#define DP_CSC_A_1            0x0048
++#define DP_CSC_A_2            0x004C
++#define DP_CSC_A_3            0x0050
++#define DP_CSC_0              0x0054
++#define DP_CSC_1              0x0058
++
++#define DP_COM_CONF_FG_EN             (1 << 0)
++#define DP_COM_CONF_GWSEL             (1 << 1)
++#define DP_COM_CONF_GWAM              (1 << 2)
++#define DP_COM_CONF_GWCKE             (1 << 3)
++#define DP_COM_CONF_CSC_DEF_MASK      (3 << 8)
++#define DP_COM_CONF_CSC_DEF_OFFSET    8
++#define DP_COM_CONF_CSC_DEF_FG                (3 << 8)
++#define DP_COM_CONF_CSC_DEF_BG                (2 << 8)
++#define DP_COM_CONF_CSC_DEF_BOTH      (1 << 8)
++
++#define IPUV3_NUM_FLOWS               3
++
++struct ipu_dp_priv;
++
++struct ipu_dp {
++      u32 flow;
++      bool in_use;
++      bool foreground;
++      enum ipu_color_space in_cs;
++};
++
++struct ipu_flow {
++      struct ipu_dp foreground;
++      struct ipu_dp background;
++      enum ipu_color_space out_cs;
++      void __iomem *base;
++      struct ipu_dp_priv *priv;
++};
++
++struct ipu_dp_priv {
++      struct ipu_soc *ipu;
++      struct device *dev;
++      void __iomem *base;
++      struct ipu_flow flow[IPUV3_NUM_FLOWS];
++      struct mutex mutex;
++      int use_count;
++};
++
++static u32 ipu_dp_flow_base[] = {DP_SYNC, DP_ASYNC0, DP_ASYNC1};
++
++static inline struct ipu_flow *to_flow(struct ipu_dp *dp)
++{
++      if (dp->foreground)
++              return container_of(dp, struct ipu_flow, foreground);
++      else
++              return container_of(dp, struct ipu_flow, background);
++}
++
++int ipu_dp_set_global_alpha(struct ipu_dp *dp, bool enable,
++              u8 alpha, bool bg_chan)
++{
++      struct ipu_flow *flow = to_flow(dp);
++      struct ipu_dp_priv *priv = flow->priv;
++      u32 reg;
++
++      mutex_lock(&priv->mutex);
++
++      reg = readl(flow->base + DP_COM_CONF);
++      if (bg_chan)
++              reg &= ~DP_COM_CONF_GWSEL;
++      else
++              reg |= DP_COM_CONF_GWSEL;
++      writel(reg, flow->base + DP_COM_CONF);
++
++      if (enable) {
++              reg = readl(flow->base + DP_GRAPH_WIND_CTRL) & 0x00FFFFFFL;
++              writel(reg | ((u32) alpha << 24),
++                           flow->base + DP_GRAPH_WIND_CTRL);
++
++              reg = readl(flow->base + DP_COM_CONF);
++              writel(reg | DP_COM_CONF_GWAM, flow->base + DP_COM_CONF);
++      } else {
++              reg = readl(flow->base + DP_COM_CONF);
++              writel(reg & ~DP_COM_CONF_GWAM, flow->base + DP_COM_CONF);
++      }
++
++      ipu_srm_dp_update(priv->ipu, true);
++
++      mutex_unlock(&priv->mutex);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_dp_set_global_alpha);
++
++int ipu_dp_set_window_pos(struct ipu_dp *dp, u16 x_pos, u16 y_pos)
++{
++      struct ipu_flow *flow = to_flow(dp);
++      struct ipu_dp_priv *priv = flow->priv;
++
++      writel((x_pos << 16) | y_pos, flow->base + DP_FG_POS);
++
++      ipu_srm_dp_update(priv->ipu, true);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_dp_set_window_pos);
++
++static void ipu_dp_csc_init(struct ipu_flow *flow,
++              enum ipu_color_space in,
++              enum ipu_color_space out,
++              u32 place)
++{
++      u32 reg;
++
++      reg = readl(flow->base + DP_COM_CONF);
++      reg &= ~DP_COM_CONF_CSC_DEF_MASK;
++
++      if (in == out) {
++              writel(reg, flow->base + DP_COM_CONF);
++              return;
++      }
++
++      if (in == IPUV3_COLORSPACE_RGB && out == IPUV3_COLORSPACE_YUV) {
++              writel(0x099 | (0x12d << 16), flow->base + DP_CSC_A_0);
++              writel(0x03a | (0x3a9 << 16), flow->base + DP_CSC_A_1);
++              writel(0x356 | (0x100 << 16), flow->base + DP_CSC_A_2);
++              writel(0x100 | (0x329 << 16), flow->base + DP_CSC_A_3);
++              writel(0x3d6 | (0x0000 << 16) | (2 << 30),
++                              flow->base + DP_CSC_0);
++              writel(0x200 | (2 << 14) | (0x200 << 16) | (2 << 30),
++                              flow->base + DP_CSC_1);
++      } else {
++              writel(0x095 | (0x000 << 16), flow->base + DP_CSC_A_0);
++              writel(0x0cc | (0x095 << 16), flow->base + DP_CSC_A_1);
++              writel(0x3ce | (0x398 << 16), flow->base + DP_CSC_A_2);
++              writel(0x095 | (0x0ff << 16), flow->base + DP_CSC_A_3);
++              writel(0x000 | (0x3e42 << 16) | (1 << 30),
++                              flow->base + DP_CSC_0);
++              writel(0x10a | (1 << 14) | (0x3dd6 << 16) | (1 << 30),
++                              flow->base + DP_CSC_1);
++      }
++
++      reg |= place;
++
++      writel(reg, flow->base + DP_COM_CONF);
++}
++
++int ipu_dp_setup_channel(struct ipu_dp *dp,
++              enum ipu_color_space in,
++              enum ipu_color_space out)
++{
++      struct ipu_flow *flow = to_flow(dp);
++      struct ipu_dp_priv *priv = flow->priv;
++
++      mutex_lock(&priv->mutex);
++
++      dp->in_cs = in;
++
++      if (!dp->foreground)
++              flow->out_cs = out;
++
++      if (flow->foreground.in_cs == flow->background.in_cs) {
++              /*
++               * foreground and background are of same colorspace, put
++               * colorspace converter after combining unit.
++               */
++              ipu_dp_csc_init(flow, flow->foreground.in_cs, flow->out_cs,
++                              DP_COM_CONF_CSC_DEF_BOTH);
++      } else {
++              if (flow->foreground.in_cs == IPUV3_COLORSPACE_UNKNOWN ||
++                  flow->foreground.in_cs == flow->out_cs)
++                      /*
++                       * foreground identical to output, apply color
++                       * conversion on background
++                       */
++                      ipu_dp_csc_init(flow, flow->background.in_cs,
++                                      flow->out_cs, DP_COM_CONF_CSC_DEF_BG);
++              else
++                      ipu_dp_csc_init(flow, flow->foreground.in_cs,
++                                      flow->out_cs, DP_COM_CONF_CSC_DEF_FG);
++      }
++
++      ipu_srm_dp_update(priv->ipu, true);
++
++      mutex_unlock(&priv->mutex);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_dp_setup_channel);
++
++int ipu_dp_enable(struct ipu_soc *ipu)
++{
++      struct ipu_dp_priv *priv = ipu->dp_priv;
++
++      mutex_lock(&priv->mutex);
++
++      if (!priv->use_count)
++              ipu_module_enable(priv->ipu, IPU_CONF_DP_EN);
++
++      priv->use_count++;
++
++      mutex_unlock(&priv->mutex);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_dp_enable);
++
++int ipu_dp_enable_channel(struct ipu_dp *dp)
++{
++      struct ipu_flow *flow = to_flow(dp);
++      struct ipu_dp_priv *priv = flow->priv;
++      u32 reg;
++
++      if (!dp->foreground)
++              return 0;
++
++      mutex_lock(&priv->mutex);
++
++      reg = readl(flow->base + DP_COM_CONF);
++      reg |= DP_COM_CONF_FG_EN;
++      writel(reg, flow->base + DP_COM_CONF);
++
++      ipu_srm_dp_update(priv->ipu, true);
++
++      mutex_unlock(&priv->mutex);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_dp_enable_channel);
++
++void ipu_dp_disable_channel(struct ipu_dp *dp, bool sync)
++{
++      struct ipu_flow *flow = to_flow(dp);
++      struct ipu_dp_priv *priv = flow->priv;
++      u32 reg, csc;
++
++      dp->in_cs = IPUV3_COLORSPACE_UNKNOWN;
++
++      if (!dp->foreground)
++              return;
++
++      mutex_lock(&priv->mutex);
++
++      reg = readl(flow->base + DP_COM_CONF);
++      csc = reg & DP_COM_CONF_CSC_DEF_MASK;
++      reg &= ~DP_COM_CONF_CSC_DEF_MASK;
++      if (csc == DP_COM_CONF_CSC_DEF_BOTH || csc == DP_COM_CONF_CSC_DEF_BG)
++              reg |= DP_COM_CONF_CSC_DEF_BG;
++
++      reg &= ~DP_COM_CONF_FG_EN;
++      writel(reg, flow->base + DP_COM_CONF);
++
++      writel(0, flow->base + DP_FG_POS);
++      ipu_srm_dp_update(priv->ipu, sync);
++
++      mutex_unlock(&priv->mutex);
++}
++EXPORT_SYMBOL_GPL(ipu_dp_disable_channel);
++
++void ipu_dp_disable(struct ipu_soc *ipu)
++{
++      struct ipu_dp_priv *priv = ipu->dp_priv;
++
++      mutex_lock(&priv->mutex);
++
++      priv->use_count--;
++
++      if (!priv->use_count)
++              ipu_module_disable(priv->ipu, IPU_CONF_DP_EN);
++
++      if (priv->use_count < 0)
++              priv->use_count = 0;
++
++      mutex_unlock(&priv->mutex);
++}
++EXPORT_SYMBOL_GPL(ipu_dp_disable);
++
++struct ipu_dp *ipu_dp_get(struct ipu_soc *ipu, unsigned int flow)
++{
++      struct ipu_dp_priv *priv = ipu->dp_priv;
++      struct ipu_dp *dp;
++
++      if ((flow >> 1) >= IPUV3_NUM_FLOWS)
++              return ERR_PTR(-EINVAL);
++
++      if (flow & 1)
++              dp = &priv->flow[flow >> 1].foreground;
++      else
++              dp = &priv->flow[flow >> 1].background;
++
++      if (dp->in_use)
++              return ERR_PTR(-EBUSY);
++
++      dp->in_use = true;
++
++      return dp;
++}
++EXPORT_SYMBOL_GPL(ipu_dp_get);
++
++void ipu_dp_put(struct ipu_dp *dp)
++{
++      dp->in_use = false;
++}
++EXPORT_SYMBOL_GPL(ipu_dp_put);
++
++int ipu_dp_init(struct ipu_soc *ipu, struct device *dev, unsigned long base)
++{
++      struct ipu_dp_priv *priv;
++      int i;
++
++      priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
++      if (!priv)
++              return -ENOMEM;
++      priv->dev = dev;
++      priv->ipu = ipu;
++
++      ipu->dp_priv = priv;
++
++      priv->base = devm_ioremap(dev, base, PAGE_SIZE);
++      if (!priv->base)
++              return -ENOMEM;
++
++      mutex_init(&priv->mutex);
++
++      for (i = 0; i < IPUV3_NUM_FLOWS; i++) {
++              priv->flow[i].background.in_cs = IPUV3_COLORSPACE_UNKNOWN;
++              priv->flow[i].foreground.in_cs = IPUV3_COLORSPACE_UNKNOWN;
++              priv->flow[i].foreground.foreground = true;
++              priv->flow[i].base = priv->base + ipu_dp_flow_base[i];
++              priv->flow[i].priv = priv;
++      }
++
++      return 0;
++}
++
++void ipu_dp_exit(struct ipu_soc *ipu)
++{
++}
+--- /dev/null
++++ b/drivers/gpu/imx/ipu-v3/ipu-ic.c
+@@ -0,0 +1,761 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * Copyright (C) 2012-2014 Mentor Graphics Inc.
++ * Copyright 2005-2012 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++#include <linux/types.h>
++#include <linux/init.h>
++#include <linux/errno.h>
++#include <linux/spinlock.h>
++#include <linux/bitrev.h>
++#include <linux/io.h>
++#include <linux/err.h>
++#include <linux/sizes.h>
++#include "ipu-prv.h"
++
++/* IC Register Offsets */
++#define IC_CONF                 0x0000
++#define IC_PRP_ENC_RSC          0x0004
++#define IC_PRP_VF_RSC           0x0008
++#define IC_PP_RSC               0x000C
++#define IC_CMBP_1               0x0010
++#define IC_CMBP_2               0x0014
++#define IC_IDMAC_1              0x0018
++#define IC_IDMAC_2              0x001C
++#define IC_IDMAC_3              0x0020
++#define IC_IDMAC_4              0x0024
++
++/* IC Register Fields */
++#define IC_CONF_PRPENC_EN       (1 << 0)
++#define IC_CONF_PRPENC_CSC1     (1 << 1)
++#define IC_CONF_PRPENC_ROT_EN   (1 << 2)
++#define IC_CONF_PRPVF_EN        (1 << 8)
++#define IC_CONF_PRPVF_CSC1      (1 << 9)
++#define IC_CONF_PRPVF_CSC2      (1 << 10)
++#define IC_CONF_PRPVF_CMB       (1 << 11)
++#define IC_CONF_PRPVF_ROT_EN    (1 << 12)
++#define IC_CONF_PP_EN           (1 << 16)
++#define IC_CONF_PP_CSC1         (1 << 17)
++#define IC_CONF_PP_CSC2         (1 << 18)
++#define IC_CONF_PP_CMB          (1 << 19)
++#define IC_CONF_PP_ROT_EN       (1 << 20)
++#define IC_CONF_IC_GLB_LOC_A    (1 << 28)
++#define IC_CONF_KEY_COLOR_EN    (1 << 29)
++#define IC_CONF_RWS_EN          (1 << 30)
++#define IC_CONF_CSI_MEM_WR_EN   (1 << 31)
++
++#define IC_IDMAC_1_CB0_BURST_16         (1 << 0)
++#define IC_IDMAC_1_CB1_BURST_16         (1 << 1)
++#define IC_IDMAC_1_CB2_BURST_16         (1 << 2)
++#define IC_IDMAC_1_CB3_BURST_16         (1 << 3)
++#define IC_IDMAC_1_CB4_BURST_16         (1 << 4)
++#define IC_IDMAC_1_CB5_BURST_16         (1 << 5)
++#define IC_IDMAC_1_CB6_BURST_16         (1 << 6)
++#define IC_IDMAC_1_CB7_BURST_16         (1 << 7)
++#define IC_IDMAC_1_PRPENC_ROT_MASK      (0x7 << 11)
++#define IC_IDMAC_1_PRPENC_ROT_OFFSET    11
++#define IC_IDMAC_1_PRPVF_ROT_MASK       (0x7 << 14)
++#define IC_IDMAC_1_PRPVF_ROT_OFFSET     14
++#define IC_IDMAC_1_PP_ROT_MASK          (0x7 << 17)
++#define IC_IDMAC_1_PP_ROT_OFFSET        17
++#define IC_IDMAC_1_PP_FLIP_RS           (1 << 22)
++#define IC_IDMAC_1_PRPVF_FLIP_RS        (1 << 21)
++#define IC_IDMAC_1_PRPENC_FLIP_RS       (1 << 20)
++
++#define IC_IDMAC_2_PRPENC_HEIGHT_MASK   (0x3ff << 0)
++#define IC_IDMAC_2_PRPENC_HEIGHT_OFFSET 0
++#define IC_IDMAC_2_PRPVF_HEIGHT_MASK    (0x3ff << 10)
++#define IC_IDMAC_2_PRPVF_HEIGHT_OFFSET  10
++#define IC_IDMAC_2_PP_HEIGHT_MASK       (0x3ff << 20)
++#define IC_IDMAC_2_PP_HEIGHT_OFFSET     20
++
++#define IC_IDMAC_3_PRPENC_WIDTH_MASK    (0x3ff << 0)
++#define IC_IDMAC_3_PRPENC_WIDTH_OFFSET  0
++#define IC_IDMAC_3_PRPVF_WIDTH_MASK     (0x3ff << 10)
++#define IC_IDMAC_3_PRPVF_WIDTH_OFFSET   10
++#define IC_IDMAC_3_PP_WIDTH_MASK        (0x3ff << 20)
++#define IC_IDMAC_3_PP_WIDTH_OFFSET      20
++
++struct ic_task_regoffs {
++      u32 rsc;
++      u32 tpmem_csc[2];
++};
++
++struct ic_task_bitfields {
++      u32 ic_conf_en;
++      u32 ic_conf_rot_en;
++      u32 ic_conf_cmb_en;
++      u32 ic_conf_csc1_en;
++      u32 ic_conf_csc2_en;
++      u32 ic_cmb_galpha_bit;
++};
++
++static const struct ic_task_regoffs ic_task_reg[IC_NUM_TASKS] = {
++      [IC_TASK_ENCODER] = {
++              .rsc = IC_PRP_ENC_RSC,
++              .tpmem_csc = {0x2008, 0},
++      },
++      [IC_TASK_VIEWFINDER] = {
++              .rsc = IC_PRP_VF_RSC,
++              .tpmem_csc = {0x4028, 0x4040},
++      },
++      [IC_TASK_POST_PROCESSOR] = {
++              .rsc = IC_PP_RSC,
++              .tpmem_csc = {0x6060, 0x6078},
++      },
++};
++
++static const struct ic_task_bitfields ic_task_bit[IC_NUM_TASKS] = {
++      [IC_TASK_ENCODER] = {
++              .ic_conf_en = IC_CONF_PRPENC_EN,
++              .ic_conf_rot_en = IC_CONF_PRPENC_ROT_EN,
++              .ic_conf_cmb_en = 0,    /* NA */
++              .ic_conf_csc1_en = IC_CONF_PRPENC_CSC1,
++              .ic_conf_csc2_en = 0,   /* NA */
++              .ic_cmb_galpha_bit = 0, /* NA */
++      },
++      [IC_TASK_VIEWFINDER] = {
++              .ic_conf_en = IC_CONF_PRPVF_EN,
++              .ic_conf_rot_en = IC_CONF_PRPVF_ROT_EN,
++              .ic_conf_cmb_en = IC_CONF_PRPVF_CMB,
++              .ic_conf_csc1_en = IC_CONF_PRPVF_CSC1,
++              .ic_conf_csc2_en = IC_CONF_PRPVF_CSC2,
++              .ic_cmb_galpha_bit = 0,
++      },
++      [IC_TASK_POST_PROCESSOR] = {
++              .ic_conf_en = IC_CONF_PP_EN,
++              .ic_conf_rot_en = IC_CONF_PP_ROT_EN,
++              .ic_conf_cmb_en = IC_CONF_PP_CMB,
++              .ic_conf_csc1_en = IC_CONF_PP_CSC1,
++              .ic_conf_csc2_en = IC_CONF_PP_CSC2,
++              .ic_cmb_galpha_bit = 8,
++      },
++};
++
++struct ipu_ic_priv;
++
++struct ipu_ic {
++      enum ipu_ic_task task;
++      const struct ic_task_regoffs *reg;
++      const struct ic_task_bitfields *bit;
++
++      struct ipu_ic_colorspace in_cs;
++      struct ipu_ic_colorspace g_in_cs;
++      struct ipu_ic_colorspace out_cs;
++
++      bool graphics;
++      bool rotation;
++      bool in_use;
++
++      struct ipu_ic_priv *priv;
++};
++
++struct ipu_ic_priv {
++      void __iomem *base;
++      void __iomem *tpmem_base;
++      spinlock_t lock;
++      struct ipu_soc *ipu;
++      int use_count;
++      int irt_use_count;
++      struct ipu_ic task[IC_NUM_TASKS];
++};
++
++static inline u32 ipu_ic_read(struct ipu_ic *ic, unsigned offset)
++{
++      return readl(ic->priv->base + offset);
++}
++
++static inline void ipu_ic_write(struct ipu_ic *ic, u32 value, unsigned offset)
++{
++      writel(value, ic->priv->base + offset);
++}
++
++static int init_csc(struct ipu_ic *ic,
++                  const struct ipu_ic_csc *csc,
++                  int csc_index)
++{
++      struct ipu_ic_priv *priv = ic->priv;
++      u32 __iomem *base;
++      const u16 (*c)[3];
++      const u16 *a;
++      u32 param;
++
++      base = (u32 __iomem *)
++              (priv->tpmem_base + ic->reg->tpmem_csc[csc_index]);
++
++      /* Cast to unsigned */
++      c = (const u16 (*)[3])csc->params.coeff;
++      a = (const u16 *)csc->params.offset;
++
++      param = ((a[0] & 0x1f) << 27) | ((c[0][0] & 0x1ff) << 18) |
++              ((c[1][1] & 0x1ff) << 9) | (c[2][2] & 0x1ff);
++      writel(param, base++);
++
++      param = ((a[0] & 0x1fe0) >> 5) | (csc->params.scale << 8) |
++              (csc->params.sat << 10);
++      writel(param, base++);
++
++      param = ((a[1] & 0x1f) << 27) | ((c[0][1] & 0x1ff) << 18) |
++              ((c[1][0] & 0x1ff) << 9) | (c[2][0] & 0x1ff);
++      writel(param, base++);
++
++      param = ((a[1] & 0x1fe0) >> 5);
++      writel(param, base++);
++
++      param = ((a[2] & 0x1f) << 27) | ((c[0][2] & 0x1ff) << 18) |
++              ((c[1][2] & 0x1ff) << 9) | (c[2][1] & 0x1ff);
++      writel(param, base++);
++
++      param = ((a[2] & 0x1fe0) >> 5);
++      writel(param, base++);
++
++      return 0;
++}
++
++static int calc_resize_coeffs(struct ipu_ic *ic,
++                            u32 in_size, u32 out_size,
++                            u32 *resize_coeff,
++                            u32 *downsize_coeff)
++{
++      struct ipu_ic_priv *priv = ic->priv;
++      struct ipu_soc *ipu = priv->ipu;
++      u32 temp_size, temp_downsize;
++
++      /*
++       * Input size cannot be more than 4096, and output size cannot
++       * be more than 1024
++       */
++      if (in_size > 4096) {
++              dev_err(ipu->dev, "Unsupported resize (in_size > 4096)\n");
++              return -EINVAL;
++      }
++      if (out_size > 1024) {
++              dev_err(ipu->dev, "Unsupported resize (out_size > 1024)\n");
++              return -EINVAL;
++      }
++
++      /* Cannot downsize more than 4:1 */
++      if ((out_size << 2) < in_size) {
++              dev_err(ipu->dev, "Unsupported downsize\n");
++              return -EINVAL;
++      }
++
++      /* Compute downsizing coefficient */
++      temp_downsize = 0;
++      temp_size = in_size;
++      while (((temp_size > 1024) || (temp_size >= out_size * 2)) &&
++             (temp_downsize < 2)) {
++              temp_size >>= 1;
++              temp_downsize++;
++      }
++      *downsize_coeff = temp_downsize;
++
++      /*
++       * compute resizing coefficient using the following equation:
++       * resize_coeff = M * (SI - 1) / (SO - 1)
++       * where M = 2^13, SI = input size, SO = output size
++       */
++      *resize_coeff = (8192L * (temp_size - 1)) / (out_size - 1);
++      if (*resize_coeff >= 16384L) {
++              dev_err(ipu->dev, "Warning! Overflow on resize coeff.\n");
++              *resize_coeff = 0x3FFF;
++      }
++
++      return 0;
++}
++
++void ipu_ic_task_enable(struct ipu_ic *ic)
++{
++      struct ipu_ic_priv *priv = ic->priv;
++      unsigned long flags;
++      u32 ic_conf;
++
++      spin_lock_irqsave(&priv->lock, flags);
++
++      ic_conf = ipu_ic_read(ic, IC_CONF);
++
++      ic_conf |= ic->bit->ic_conf_en;
++
++      if (ic->rotation)
++              ic_conf |= ic->bit->ic_conf_rot_en;
++
++      if (ic->in_cs.cs != ic->out_cs.cs)
++              ic_conf |= ic->bit->ic_conf_csc1_en;
++
++      if (ic->graphics) {
++              ic_conf |= ic->bit->ic_conf_cmb_en;
++              ic_conf |= ic->bit->ic_conf_csc1_en;
++
++              if (ic->g_in_cs.cs != ic->out_cs.cs)
++                      ic_conf |= ic->bit->ic_conf_csc2_en;
++      }
++
++      ipu_ic_write(ic, ic_conf, IC_CONF);
++
++      spin_unlock_irqrestore(&priv->lock, flags);
++}
++EXPORT_SYMBOL_GPL(ipu_ic_task_enable);
++
++void ipu_ic_task_disable(struct ipu_ic *ic)
++{
++      struct ipu_ic_priv *priv = ic->priv;
++      unsigned long flags;
++      u32 ic_conf;
++
++      spin_lock_irqsave(&priv->lock, flags);
++
++      ic_conf = ipu_ic_read(ic, IC_CONF);
++
++      ic_conf &= ~(ic->bit->ic_conf_en |
++                   ic->bit->ic_conf_csc1_en |
++                   ic->bit->ic_conf_rot_en);
++      if (ic->bit->ic_conf_csc2_en)
++              ic_conf &= ~ic->bit->ic_conf_csc2_en;
++      if (ic->bit->ic_conf_cmb_en)
++              ic_conf &= ~ic->bit->ic_conf_cmb_en;
++
++      ipu_ic_write(ic, ic_conf, IC_CONF);
++
++      spin_unlock_irqrestore(&priv->lock, flags);
++}
++EXPORT_SYMBOL_GPL(ipu_ic_task_disable);
++
++int ipu_ic_task_graphics_init(struct ipu_ic *ic,
++                            const struct ipu_ic_colorspace *g_in_cs,
++                            bool galpha_en, u32 galpha,
++                            bool colorkey_en, u32 colorkey)
++{
++      struct ipu_ic_priv *priv = ic->priv;
++      struct ipu_ic_csc csc2;
++      unsigned long flags;
++      u32 reg, ic_conf;
++      int ret = 0;
++
++      if (ic->task == IC_TASK_ENCODER)
++              return -EINVAL;
++
++      spin_lock_irqsave(&priv->lock, flags);
++
++      ic_conf = ipu_ic_read(ic, IC_CONF);
++
++      if (!(ic_conf & ic->bit->ic_conf_csc1_en)) {
++              struct ipu_ic_csc csc1;
++
++              ret = ipu_ic_calc_csc(&csc1,
++                                    V4L2_YCBCR_ENC_601,
++                                    V4L2_QUANTIZATION_FULL_RANGE,
++                                    IPUV3_COLORSPACE_RGB,
++                                    V4L2_YCBCR_ENC_601,
++                                    V4L2_QUANTIZATION_FULL_RANGE,
++                                    IPUV3_COLORSPACE_RGB);
++              if (ret)
++                      goto unlock;
++
++              /* need transparent CSC1 conversion */
++              ret = init_csc(ic, &csc1, 0);
++              if (ret)
++                      goto unlock;
++      }
++
++      ic->g_in_cs = *g_in_cs;
++      csc2.in_cs = ic->g_in_cs;
++      csc2.out_cs = ic->out_cs;
++
++      ret = __ipu_ic_calc_csc(&csc2);
++      if (ret)
++              goto unlock;
++
++      ret = init_csc(ic, &csc2, 1);
++      if (ret)
++              goto unlock;
++
++      if (galpha_en) {
++              ic_conf |= IC_CONF_IC_GLB_LOC_A;
++              reg = ipu_ic_read(ic, IC_CMBP_1);
++              reg &= ~(0xff << ic->bit->ic_cmb_galpha_bit);
++              reg |= (galpha << ic->bit->ic_cmb_galpha_bit);
++              ipu_ic_write(ic, reg, IC_CMBP_1);
++      } else
++              ic_conf &= ~IC_CONF_IC_GLB_LOC_A;
++
++      if (colorkey_en) {
++              ic_conf |= IC_CONF_KEY_COLOR_EN;
++              ipu_ic_write(ic, colorkey, IC_CMBP_2);
++      } else
++              ic_conf &= ~IC_CONF_KEY_COLOR_EN;
++
++      ipu_ic_write(ic, ic_conf, IC_CONF);
++
++      ic->graphics = true;
++unlock:
++      spin_unlock_irqrestore(&priv->lock, flags);
++      return ret;
++}
++EXPORT_SYMBOL_GPL(ipu_ic_task_graphics_init);
++
++int ipu_ic_task_init_rsc(struct ipu_ic *ic,
++                       const struct ipu_ic_csc *csc,
++                       int in_width, int in_height,
++                       int out_width, int out_height,
++                       u32 rsc)
++{
++      struct ipu_ic_priv *priv = ic->priv;
++      u32 downsize_coeff, resize_coeff;
++      unsigned long flags;
++      int ret = 0;
++
++      if (!rsc) {
++              /* Setup vertical resizing */
++
++              ret = calc_resize_coeffs(ic, in_height, out_height,
++                                       &resize_coeff, &downsize_coeff);
++              if (ret)
++                      return ret;
++
++              rsc = (downsize_coeff << 30) | (resize_coeff << 16);
++
++              /* Setup horizontal resizing */
++              ret = calc_resize_coeffs(ic, in_width, out_width,
++                                       &resize_coeff, &downsize_coeff);
++              if (ret)
++                      return ret;
++
++              rsc |= (downsize_coeff << 14) | resize_coeff;
++      }
++
++      spin_lock_irqsave(&priv->lock, flags);
++
++      ipu_ic_write(ic, rsc, ic->reg->rsc);
++
++      /* Setup color space conversion */
++      ic->in_cs = csc->in_cs;
++      ic->out_cs = csc->out_cs;
++
++      ret = init_csc(ic, csc, 0);
++
++      spin_unlock_irqrestore(&priv->lock, flags);
++      return ret;
++}
++
++int ipu_ic_task_init(struct ipu_ic *ic,
++                   const struct ipu_ic_csc *csc,
++                   int in_width, int in_height,
++                   int out_width, int out_height)
++{
++      return ipu_ic_task_init_rsc(ic, csc,
++                                  in_width, in_height,
++                                  out_width, out_height, 0);
++}
++EXPORT_SYMBOL_GPL(ipu_ic_task_init);
++
++int ipu_ic_task_idma_init(struct ipu_ic *ic, struct ipuv3_channel *channel,
++                        u32 width, u32 height, int burst_size,
++                        enum ipu_rotate_mode rot)
++{
++      struct ipu_ic_priv *priv = ic->priv;
++      struct ipu_soc *ipu = priv->ipu;
++      u32 ic_idmac_1, ic_idmac_2, ic_idmac_3;
++      u32 temp_rot = bitrev8(rot) >> 5;
++      bool need_hor_flip = false;
++      unsigned long flags;
++      int ret = 0;
++
++      if ((burst_size != 8) && (burst_size != 16)) {
++              dev_err(ipu->dev, "Illegal burst length for IC\n");
++              return -EINVAL;
++      }
++
++      width--;
++      height--;
++
++      if (temp_rot & 0x2)     /* Need horizontal flip */
++              need_hor_flip = true;
++
++      spin_lock_irqsave(&priv->lock, flags);
++
++      ic_idmac_1 = ipu_ic_read(ic, IC_IDMAC_1);
++      ic_idmac_2 = ipu_ic_read(ic, IC_IDMAC_2);
++      ic_idmac_3 = ipu_ic_read(ic, IC_IDMAC_3);
++
++      switch (channel->num) {
++      case IPUV3_CHANNEL_IC_PP_MEM:
++              if (burst_size == 16)
++                      ic_idmac_1 |= IC_IDMAC_1_CB2_BURST_16;
++              else
++                      ic_idmac_1 &= ~IC_IDMAC_1_CB2_BURST_16;
++
++              if (need_hor_flip)
++                      ic_idmac_1 |= IC_IDMAC_1_PP_FLIP_RS;
++              else
++                      ic_idmac_1 &= ~IC_IDMAC_1_PP_FLIP_RS;
++
++              ic_idmac_2 &= ~IC_IDMAC_2_PP_HEIGHT_MASK;
++              ic_idmac_2 |= height << IC_IDMAC_2_PP_HEIGHT_OFFSET;
++
++              ic_idmac_3 &= ~IC_IDMAC_3_PP_WIDTH_MASK;
++              ic_idmac_3 |= width << IC_IDMAC_3_PP_WIDTH_OFFSET;
++              break;
++      case IPUV3_CHANNEL_MEM_IC_PP:
++              if (burst_size == 16)
++                      ic_idmac_1 |= IC_IDMAC_1_CB5_BURST_16;
++              else
++                      ic_idmac_1 &= ~IC_IDMAC_1_CB5_BURST_16;
++              break;
++      case IPUV3_CHANNEL_MEM_ROT_PP:
++              ic_idmac_1 &= ~IC_IDMAC_1_PP_ROT_MASK;
++              ic_idmac_1 |= temp_rot << IC_IDMAC_1_PP_ROT_OFFSET;
++              break;
++      case IPUV3_CHANNEL_MEM_IC_PRP_VF:
++              if (burst_size == 16)
++                      ic_idmac_1 |= IC_IDMAC_1_CB6_BURST_16;
++              else
++                      ic_idmac_1 &= ~IC_IDMAC_1_CB6_BURST_16;
++              break;
++      case IPUV3_CHANNEL_IC_PRP_ENC_MEM:
++              if (burst_size == 16)
++                      ic_idmac_1 |= IC_IDMAC_1_CB0_BURST_16;
++              else
++                      ic_idmac_1 &= ~IC_IDMAC_1_CB0_BURST_16;
++
++              if (need_hor_flip)
++                      ic_idmac_1 |= IC_IDMAC_1_PRPENC_FLIP_RS;
++              else
++                      ic_idmac_1 &= ~IC_IDMAC_1_PRPENC_FLIP_RS;
++
++              ic_idmac_2 &= ~IC_IDMAC_2_PRPENC_HEIGHT_MASK;
++              ic_idmac_2 |= height << IC_IDMAC_2_PRPENC_HEIGHT_OFFSET;
++
++              ic_idmac_3 &= ~IC_IDMAC_3_PRPENC_WIDTH_MASK;
++              ic_idmac_3 |= width << IC_IDMAC_3_PRPENC_WIDTH_OFFSET;
++              break;
++      case IPUV3_CHANNEL_MEM_ROT_ENC:
++              ic_idmac_1 &= ~IC_IDMAC_1_PRPENC_ROT_MASK;
++              ic_idmac_1 |= temp_rot << IC_IDMAC_1_PRPENC_ROT_OFFSET;
++              break;
++      case IPUV3_CHANNEL_IC_PRP_VF_MEM:
++              if (burst_size == 16)
++                      ic_idmac_1 |= IC_IDMAC_1_CB1_BURST_16;
++              else
++                      ic_idmac_1 &= ~IC_IDMAC_1_CB1_BURST_16;
++
++              if (need_hor_flip)
++                      ic_idmac_1 |= IC_IDMAC_1_PRPVF_FLIP_RS;
++              else
++                      ic_idmac_1 &= ~IC_IDMAC_1_PRPVF_FLIP_RS;
++
++              ic_idmac_2 &= ~IC_IDMAC_2_PRPVF_HEIGHT_MASK;
++              ic_idmac_2 |= height << IC_IDMAC_2_PRPVF_HEIGHT_OFFSET;
++
++              ic_idmac_3 &= ~IC_IDMAC_3_PRPVF_WIDTH_MASK;
++              ic_idmac_3 |= width << IC_IDMAC_3_PRPVF_WIDTH_OFFSET;
++              break;
++      case IPUV3_CHANNEL_MEM_ROT_VF:
++              ic_idmac_1 &= ~IC_IDMAC_1_PRPVF_ROT_MASK;
++              ic_idmac_1 |= temp_rot << IC_IDMAC_1_PRPVF_ROT_OFFSET;
++              break;
++      case IPUV3_CHANNEL_G_MEM_IC_PRP_VF:
++              if (burst_size == 16)
++                      ic_idmac_1 |= IC_IDMAC_1_CB3_BURST_16;
++              else
++                      ic_idmac_1 &= ~IC_IDMAC_1_CB3_BURST_16;
++              break;
++      case IPUV3_CHANNEL_G_MEM_IC_PP:
++              if (burst_size == 16)
++                      ic_idmac_1 |= IC_IDMAC_1_CB4_BURST_16;
++              else
++                      ic_idmac_1 &= ~IC_IDMAC_1_CB4_BURST_16;
++              break;
++      case IPUV3_CHANNEL_VDI_MEM_IC_VF:
++              if (burst_size == 16)
++                      ic_idmac_1 |= IC_IDMAC_1_CB7_BURST_16;
++              else
++                      ic_idmac_1 &= ~IC_IDMAC_1_CB7_BURST_16;
++              break;
++      default:
++              goto unlock;
++      }
++
++      ipu_ic_write(ic, ic_idmac_1, IC_IDMAC_1);
++      ipu_ic_write(ic, ic_idmac_2, IC_IDMAC_2);
++      ipu_ic_write(ic, ic_idmac_3, IC_IDMAC_3);
++
++      if (ipu_rot_mode_is_irt(rot))
++              ic->rotation = true;
++
++unlock:
++      spin_unlock_irqrestore(&priv->lock, flags);
++      return ret;
++}
++EXPORT_SYMBOL_GPL(ipu_ic_task_idma_init);
++
++static void ipu_irt_enable(struct ipu_ic *ic)
++{
++      struct ipu_ic_priv *priv = ic->priv;
++
++      if (!priv->irt_use_count)
++              ipu_module_enable(priv->ipu, IPU_CONF_ROT_EN);
++
++      priv->irt_use_count++;
++}
++
++static void ipu_irt_disable(struct ipu_ic *ic)
++{
++      struct ipu_ic_priv *priv = ic->priv;
++
++      if (priv->irt_use_count) {
++              if (!--priv->irt_use_count)
++                      ipu_module_disable(priv->ipu, IPU_CONF_ROT_EN);
++      }
++}
++
++int ipu_ic_enable(struct ipu_ic *ic)
++{
++      struct ipu_ic_priv *priv = ic->priv;
++      unsigned long flags;
++
++      spin_lock_irqsave(&priv->lock, flags);
++
++      if (!priv->use_count)
++              ipu_module_enable(priv->ipu, IPU_CONF_IC_EN);
++
++      priv->use_count++;
++
++      if (ic->rotation)
++              ipu_irt_enable(ic);
++
++      spin_unlock_irqrestore(&priv->lock, flags);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_ic_enable);
++
++int ipu_ic_disable(struct ipu_ic *ic)
++{
++      struct ipu_ic_priv *priv = ic->priv;
++      unsigned long flags;
++
++      spin_lock_irqsave(&priv->lock, flags);
++
++      priv->use_count--;
++
++      if (!priv->use_count)
++              ipu_module_disable(priv->ipu, IPU_CONF_IC_EN);
++
++      if (priv->use_count < 0)
++              priv->use_count = 0;
++
++      if (ic->rotation)
++              ipu_irt_disable(ic);
++
++      ic->rotation = ic->graphics = false;
++
++      spin_unlock_irqrestore(&priv->lock, flags);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_ic_disable);
++
++struct ipu_ic *ipu_ic_get(struct ipu_soc *ipu, enum ipu_ic_task task)
++{
++      struct ipu_ic_priv *priv = ipu->ic_priv;
++      unsigned long flags;
++      struct ipu_ic *ic, *ret;
++
++      if (task >= IC_NUM_TASKS)
++              return ERR_PTR(-EINVAL);
++
++      ic = &priv->task[task];
++
++      spin_lock_irqsave(&priv->lock, flags);
++
++      if (ic->in_use) {
++              ret = ERR_PTR(-EBUSY);
++              goto unlock;
++      }
++
++      ic->in_use = true;
++      ret = ic;
++
++unlock:
++      spin_unlock_irqrestore(&priv->lock, flags);
++      return ret;
++}
++EXPORT_SYMBOL_GPL(ipu_ic_get);
++
++void ipu_ic_put(struct ipu_ic *ic)
++{
++      struct ipu_ic_priv *priv = ic->priv;
++      unsigned long flags;
++
++      spin_lock_irqsave(&priv->lock, flags);
++      ic->in_use = false;
++      spin_unlock_irqrestore(&priv->lock, flags);
++}
++EXPORT_SYMBOL_GPL(ipu_ic_put);
++
++int ipu_ic_init(struct ipu_soc *ipu, struct device *dev,
++              unsigned long base, unsigned long tpmem_base)
++{
++      struct ipu_ic_priv *priv;
++      int i;
++
++      priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
++      if (!priv)
++              return -ENOMEM;
++
++      ipu->ic_priv = priv;
++
++      spin_lock_init(&priv->lock);
++      priv->base = devm_ioremap(dev, base, PAGE_SIZE);
++      if (!priv->base)
++              return -ENOMEM;
++      priv->tpmem_base = devm_ioremap(dev, tpmem_base, SZ_64K);
++      if (!priv->tpmem_base)
++              return -ENOMEM;
++
++      dev_dbg(dev, "IC base: 0x%08lx remapped to %p\n", base, priv->base);
++
++      priv->ipu = ipu;
++
++      for (i = 0; i < IC_NUM_TASKS; i++) {
++              priv->task[i].task = i;
++              priv->task[i].priv = priv;
++              priv->task[i].reg = &ic_task_reg[i];
++              priv->task[i].bit = &ic_task_bit[i];
++      }
++
++      return 0;
++}
++
++void ipu_ic_exit(struct ipu_soc *ipu)
++{
++}
++
++void ipu_ic_dump(struct ipu_ic *ic)
++{
++      struct ipu_ic_priv *priv = ic->priv;
++      struct ipu_soc *ipu = priv->ipu;
++
++      dev_dbg(ipu->dev, "IC_CONF = \t0x%08X\n",
++              ipu_ic_read(ic, IC_CONF));
++      dev_dbg(ipu->dev, "IC_PRP_ENC_RSC = \t0x%08X\n",
++              ipu_ic_read(ic, IC_PRP_ENC_RSC));
++      dev_dbg(ipu->dev, "IC_PRP_VF_RSC = \t0x%08X\n",
++              ipu_ic_read(ic, IC_PRP_VF_RSC));
++      dev_dbg(ipu->dev, "IC_PP_RSC = \t0x%08X\n",
++              ipu_ic_read(ic, IC_PP_RSC));
++      dev_dbg(ipu->dev, "IC_CMBP_1 = \t0x%08X\n",
++              ipu_ic_read(ic, IC_CMBP_1));
++      dev_dbg(ipu->dev, "IC_CMBP_2 = \t0x%08X\n",
++              ipu_ic_read(ic, IC_CMBP_2));
++      dev_dbg(ipu->dev, "IC_IDMAC_1 = \t0x%08X\n",
++              ipu_ic_read(ic, IC_IDMAC_1));
++      dev_dbg(ipu->dev, "IC_IDMAC_2 = \t0x%08X\n",
++              ipu_ic_read(ic, IC_IDMAC_2));
++      dev_dbg(ipu->dev, "IC_IDMAC_3 = \t0x%08X\n",
++              ipu_ic_read(ic, IC_IDMAC_3));
++      dev_dbg(ipu->dev, "IC_IDMAC_4 = \t0x%08X\n",
++              ipu_ic_read(ic, IC_IDMAC_4));
++}
++EXPORT_SYMBOL_GPL(ipu_ic_dump);
+--- /dev/null
++++ b/drivers/gpu/imx/ipu-v3/ipu-image-convert.c
+@@ -0,0 +1,2475 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * Copyright (C) 2012-2016 Mentor Graphics Inc.
++ *
++ * Queued image conversion support, with tiling and rotation.
++ */
++
++#include <linux/interrupt.h>
++#include <linux/dma-mapping.h>
++#include <video/imx-ipu-image-convert.h>
++#include "ipu-prv.h"
++
++/*
++ * The IC Resizer has a restriction that the output frame from the
++ * resizer must be 1024 or less in both width (pixels) and height
++ * (lines).
++ *
++ * The image converter attempts to split up a conversion when
++ * the desired output (converted) frame resolution exceeds the
++ * IC resizer limit of 1024 in either dimension.
++ *
++ * If either dimension of the output frame exceeds the limit, the
++ * dimension is split into 1, 2, or 4 equal stripes, for a maximum
++ * of 4*4 or 16 tiles. A conversion is then carried out for each
++ * tile (but taking care to pass the full frame stride length to
++ * the DMA channel's parameter memory!). IDMA double-buffering is used
++ * to convert each tile back-to-back when possible (see note below
++ * when double_buffering boolean is set).
++ *
++ * Note that the input frame must be split up into the same number
++ * of tiles as the output frame:
++ *
++ *                       +---------+-----+
++ *   +-----+---+         |  A      | B   |
++ *   | A   | B |         |         |     |
++ *   +-----+---+   -->   +---------+-----+
++ *   | C   | D |         |  C      | D   |
++ *   +-----+---+         |         |     |
++ *                       +---------+-----+
++ *
++ * Clockwise 90° rotations are handled by first rescaling into a
++ * reusable temporary tile buffer and then rotating with the 8x8
++ * block rotator, writing to the correct destination:
++ *
++ *                                         +-----+-----+
++ *                                         |     |     |
++ *   +-----+---+         +---------+       | C   | A   |
++ *   | A   | B |         | A,B, |  |       |     |     |
++ *   +-----+---+   -->   | C,D  |  |  -->  |     |     |
++ *   | C   | D |         +---------+       +-----+-----+
++ *   +-----+---+                           | D   | B   |
++ *                                         |     |     |
++ *                                         +-----+-----+
++ *
++ * If the 8x8 block rotator is used, horizontal or vertical flipping
++ * is done during the rotation step, otherwise flipping is done
++ * during the scaling step.
++ * With rotation or flipping, tile order changes between input and
++ * output image. Tiles are numbered row major from top left to bottom
++ * right for both input and output image.
++ */
++
++#define MAX_STRIPES_W    4
++#define MAX_STRIPES_H    4
++#define MAX_TILES (MAX_STRIPES_W * MAX_STRIPES_H)
++
++#define MIN_W     16
++#define MIN_H     8
++#define MAX_W     4096
++#define MAX_H     4096
++
++enum ipu_image_convert_type {
++      IMAGE_CONVERT_IN = 0,
++      IMAGE_CONVERT_OUT,
++};
++
++struct ipu_image_convert_dma_buf {
++      void          *virt;
++      dma_addr_t    phys;
++      unsigned long len;
++};
++
++struct ipu_image_convert_dma_chan {
++      int in;
++      int out;
++      int rot_in;
++      int rot_out;
++      int vdi_in_p;
++      int vdi_in;
++      int vdi_in_n;
++};
++
++/* dimensions of one tile */
++struct ipu_image_tile {
++      u32 width;
++      u32 height;
++      u32 left;
++      u32 top;
++      /* size and strides are in bytes */
++      u32 size;
++      u32 stride;
++      u32 rot_stride;
++      /* start Y or packed offset of this tile */
++      u32 offset;
++      /* offset from start to tile in U plane, for planar formats */
++      u32 u_off;
++      /* offset from start to tile in V plane, for planar formats */
++      u32 v_off;
++};
++
++struct ipu_image_convert_image {
++      struct ipu_image base;
++      enum ipu_image_convert_type type;
++
++      const struct ipu_image_pixfmt *fmt;
++      unsigned int stride;
++
++      /* # of rows (horizontal stripes) if dest height is > 1024 */
++      unsigned int num_rows;
++      /* # of columns (vertical stripes) if dest width is > 1024 */
++      unsigned int num_cols;
++
++      struct ipu_image_tile tile[MAX_TILES];
++};
++
++struct ipu_image_pixfmt {
++      u32     fourcc;        /* V4L2 fourcc */
++      int     bpp;           /* total bpp */
++      int     uv_width_dec;  /* decimation in width for U/V planes */
++      int     uv_height_dec; /* decimation in height for U/V planes */
++      bool    planar;        /* planar format */
++      bool    uv_swapped;    /* U and V planes are swapped */
++      bool    uv_packed;     /* partial planar (U and V in same plane) */
++};
++
++struct ipu_image_convert_ctx;
++struct ipu_image_convert_chan;
++struct ipu_image_convert_priv;
++
++struct ipu_image_convert_ctx {
++      struct ipu_image_convert_chan *chan;
++
++      ipu_image_convert_cb_t complete;
++      void *complete_context;
++
++      /* Source/destination image data and rotation mode */
++      struct ipu_image_convert_image in;
++      struct ipu_image_convert_image out;
++      struct ipu_ic_csc csc;
++      enum ipu_rotate_mode rot_mode;
++      u32 downsize_coeff_h;
++      u32 downsize_coeff_v;
++      u32 image_resize_coeff_h;
++      u32 image_resize_coeff_v;
++      u32 resize_coeffs_h[MAX_STRIPES_W];
++      u32 resize_coeffs_v[MAX_STRIPES_H];
++
++      /* intermediate buffer for rotation */
++      struct ipu_image_convert_dma_buf rot_intermediate[2];
++
++      /* current buffer number for double buffering */
++      int cur_buf_num;
++
++      bool aborting;
++      struct completion aborted;
++
++      /* can we use double-buffering for this conversion operation? */
++      bool double_buffering;
++      /* num_rows * num_cols */
++      unsigned int num_tiles;
++      /* next tile to process */
++      unsigned int next_tile;
++      /* where to place converted tile in dest image */
++      unsigned int out_tile_map[MAX_TILES];
++
++      struct list_head list;
++};
++
++struct ipu_image_convert_chan {
++      struct ipu_image_convert_priv *priv;
++
++      enum ipu_ic_task ic_task;
++      const struct ipu_image_convert_dma_chan *dma_ch;
++
++      struct ipu_ic *ic;
++      struct ipuv3_channel *in_chan;
++      struct ipuv3_channel *out_chan;
++      struct ipuv3_channel *rotation_in_chan;
++      struct ipuv3_channel *rotation_out_chan;
++
++      /* the IPU end-of-frame irqs */
++      int out_eof_irq;
++      int rot_out_eof_irq;
++
++      spinlock_t irqlock;
++
++      /* list of convert contexts */
++      struct list_head ctx_list;
++      /* queue of conversion runs */
++      struct list_head pending_q;
++      /* queue of completed runs */
++      struct list_head done_q;
++
++      /* the current conversion run */
++      struct ipu_image_convert_run *current_run;
++};
++
++struct ipu_image_convert_priv {
++      struct ipu_image_convert_chan chan[IC_NUM_TASKS];
++      struct ipu_soc *ipu;
++};
++
++static const struct ipu_image_convert_dma_chan
++image_convert_dma_chan[IC_NUM_TASKS] = {
++      [IC_TASK_VIEWFINDER] = {
++              .in = IPUV3_CHANNEL_MEM_IC_PRP_VF,
++              .out = IPUV3_CHANNEL_IC_PRP_VF_MEM,
++              .rot_in = IPUV3_CHANNEL_MEM_ROT_VF,
++              .rot_out = IPUV3_CHANNEL_ROT_VF_MEM,
++              .vdi_in_p = IPUV3_CHANNEL_MEM_VDI_PREV,
++              .vdi_in = IPUV3_CHANNEL_MEM_VDI_CUR,
++              .vdi_in_n = IPUV3_CHANNEL_MEM_VDI_NEXT,
++      },
++      [IC_TASK_POST_PROCESSOR] = {
++              .in = IPUV3_CHANNEL_MEM_IC_PP,
++              .out = IPUV3_CHANNEL_IC_PP_MEM,
++              .rot_in = IPUV3_CHANNEL_MEM_ROT_PP,
++              .rot_out = IPUV3_CHANNEL_ROT_PP_MEM,
++      },
++};
++
++static const struct ipu_image_pixfmt image_convert_formats[] = {
++      {
++              .fourcc = V4L2_PIX_FMT_RGB565,
++              .bpp    = 16,
++      }, {
++              .fourcc = V4L2_PIX_FMT_RGB24,
++              .bpp    = 24,
++      }, {
++              .fourcc = V4L2_PIX_FMT_BGR24,
++              .bpp    = 24,
++      }, {
++              .fourcc = V4L2_PIX_FMT_RGB32,
++              .bpp    = 32,
++      }, {
++              .fourcc = V4L2_PIX_FMT_BGR32,
++              .bpp    = 32,
++      }, {
++              .fourcc = V4L2_PIX_FMT_XRGB32,
++              .bpp    = 32,
++      }, {
++              .fourcc = V4L2_PIX_FMT_XBGR32,
++              .bpp    = 32,
++      }, {
++              .fourcc = V4L2_PIX_FMT_BGRX32,
++              .bpp    = 32,
++      }, {
++              .fourcc = V4L2_PIX_FMT_RGBX32,
++              .bpp    = 32,
++      }, {
++              .fourcc = V4L2_PIX_FMT_YUYV,
++              .bpp    = 16,
++              .uv_width_dec = 2,
++              .uv_height_dec = 1,
++      }, {
++              .fourcc = V4L2_PIX_FMT_UYVY,
++              .bpp    = 16,
++              .uv_width_dec = 2,
++              .uv_height_dec = 1,
++      }, {
++              .fourcc = V4L2_PIX_FMT_YUV420,
++              .bpp    = 12,
++              .planar = true,
++              .uv_width_dec = 2,
++              .uv_height_dec = 2,
++      }, {
++              .fourcc = V4L2_PIX_FMT_YVU420,
++              .bpp    = 12,
++              .planar = true,
++              .uv_width_dec = 2,
++              .uv_height_dec = 2,
++              .uv_swapped = true,
++      }, {
++              .fourcc = V4L2_PIX_FMT_NV12,
++              .bpp    = 12,
++              .planar = true,
++              .uv_width_dec = 2,
++              .uv_height_dec = 2,
++              .uv_packed = true,
++      }, {
++              .fourcc = V4L2_PIX_FMT_YUV422P,
++              .bpp    = 16,
++              .planar = true,
++              .uv_width_dec = 2,
++              .uv_height_dec = 1,
++      }, {
++              .fourcc = V4L2_PIX_FMT_NV16,
++              .bpp    = 16,
++              .planar = true,
++              .uv_width_dec = 2,
++              .uv_height_dec = 1,
++              .uv_packed = true,
++      },
++};
++
++static const struct ipu_image_pixfmt *get_format(u32 fourcc)
++{
++      const struct ipu_image_pixfmt *ret = NULL;
++      unsigned int i;
++
++      for (i = 0; i < ARRAY_SIZE(image_convert_formats); i++) {
++              if (image_convert_formats[i].fourcc == fourcc) {
++                      ret = &image_convert_formats[i];
++                      break;
++              }
++      }
++
++      return ret;
++}
++
++static void dump_format(struct ipu_image_convert_ctx *ctx,
++                      struct ipu_image_convert_image *ic_image)
++{
++      struct ipu_image_convert_chan *chan = ctx->chan;
++      struct ipu_image_convert_priv *priv = chan->priv;
++
++      dev_dbg(priv->ipu->dev,
++              "task %u: ctx %p: %s format: %dx%d (%dx%d tiles), %c%c%c%c\n",
++              chan->ic_task, ctx,
++              ic_image->type == IMAGE_CONVERT_OUT ? "Output" : "Input",
++              ic_image->base.pix.width, ic_image->base.pix.height,
++              ic_image->num_cols, ic_image->num_rows,
++              ic_image->fmt->fourcc & 0xff,
++              (ic_image->fmt->fourcc >> 8) & 0xff,
++              (ic_image->fmt->fourcc >> 16) & 0xff,
++              (ic_image->fmt->fourcc >> 24) & 0xff);
++}
++
++int ipu_image_convert_enum_format(int index, u32 *fourcc)
++{
++      const struct ipu_image_pixfmt *fmt;
++
++      if (index >= (int)ARRAY_SIZE(image_convert_formats))
++              return -EINVAL;
++
++      /* Format found */
++      fmt = &image_convert_formats[index];
++      *fourcc = fmt->fourcc;
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_image_convert_enum_format);
++
++static void free_dma_buf(struct ipu_image_convert_priv *priv,
++                       struct ipu_image_convert_dma_buf *buf)
++{
++      if (buf->virt)
++              dma_free_coherent(priv->ipu->dev,
++                                buf->len, buf->virt, buf->phys);
++      buf->virt = NULL;
++      buf->phys = 0;
++}
++
++static int alloc_dma_buf(struct ipu_image_convert_priv *priv,
++                       struct ipu_image_convert_dma_buf *buf,
++                       int size)
++{
++      buf->len = PAGE_ALIGN(size);
++      buf->virt = dma_alloc_coherent(priv->ipu->dev, buf->len, &buf->phys,
++                                     GFP_DMA | GFP_KERNEL);
++      if (!buf->virt) {
++              dev_err(priv->ipu->dev, "failed to alloc dma buffer\n");
++              return -ENOMEM;
++      }
++
++      return 0;
++}
++
++static inline int num_stripes(int dim)
++{
++      return (dim - 1) / 1024 + 1;
++}
++
++/*
++ * Calculate downsizing coefficients, which are the same for all tiles,
++ * and initial bilinear resizing coefficients, which are used to find the
++ * best seam positions.
++ * Also determine the number of tiles necessary to guarantee that no tile
++ * is larger than 1024 pixels in either dimension at the output and between
++ * IC downsizing and main processing sections.
++ */
++static int calc_image_resize_coefficients(struct ipu_image_convert_ctx *ctx,
++                                        struct ipu_image *in,
++                                        struct ipu_image *out)
++{
++      u32 downsized_width = in->rect.width;
++      u32 downsized_height = in->rect.height;
++      u32 downsize_coeff_v = 0;
++      u32 downsize_coeff_h = 0;
++      u32 resized_width = out->rect.width;
++      u32 resized_height = out->rect.height;
++      u32 resize_coeff_h;
++      u32 resize_coeff_v;
++      u32 cols;
++      u32 rows;
++
++      if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
++              resized_width = out->rect.height;
++              resized_height = out->rect.width;
++      }
++
++      /* Do not let invalid input lead to an endless loop below */
++      if (WARN_ON(resized_width == 0 || resized_height == 0))
++              return -EINVAL;
++
++      while (downsized_width >= resized_width * 2) {
++              downsized_width >>= 1;
++              downsize_coeff_h++;
++      }
++
++      while (downsized_height >= resized_height * 2) {
++              downsized_height >>= 1;
++              downsize_coeff_v++;
++      }
++
++      /*
++       * Calculate the bilinear resizing coefficients that could be used if
++       * we were converting with a single tile. The bottom right output pixel
++       * should sample as close as possible to the bottom right input pixel
++       * out of the decimator, but not overshoot it:
++       */
++      resize_coeff_h = 8192 * (downsized_width - 1) / (resized_width - 1);
++      resize_coeff_v = 8192 * (downsized_height - 1) / (resized_height - 1);
++
++      /*
++       * Both the output of the IC downsizing section before being passed to
++       * the IC main processing section and the final output of the IC main
++       * processing section must be <= 1024 pixels in both dimensions.
++       */
++      cols = num_stripes(max_t(u32, downsized_width, resized_width));
++      rows = num_stripes(max_t(u32, downsized_height, resized_height));
++
++      dev_dbg(ctx->chan->priv->ipu->dev,
++              "%s: hscale: >>%u, *8192/%u vscale: >>%u, *8192/%u, %ux%u tiles\n",
++              __func__, downsize_coeff_h, resize_coeff_h, downsize_coeff_v,
++              resize_coeff_v, cols, rows);
++
++      if (downsize_coeff_h > 2 || downsize_coeff_v  > 2 ||
++          resize_coeff_h > 0x3fff || resize_coeff_v > 0x3fff)
++              return -EINVAL;
++
++      ctx->downsize_coeff_h = downsize_coeff_h;
++      ctx->downsize_coeff_v = downsize_coeff_v;
++      ctx->image_resize_coeff_h = resize_coeff_h;
++      ctx->image_resize_coeff_v = resize_coeff_v;
++      ctx->in.num_cols = cols;
++      ctx->in.num_rows = rows;
++
++      return 0;
++}
++
++#define round_closest(x, y) round_down((x) + (y)/2, (y))
++
++/*
++ * Find the best aligned seam position for the given column / row index.
++ * Rotation and image offsets are out of scope.
++ *
++ * @index: column / row index, used to calculate valid interval
++ * @in_edge: input right / bottom edge
++ * @out_edge: output right / bottom edge
++ * @in_align: input alignment, either horizontal 8-byte line start address
++ *            alignment, or pixel alignment due to image format
++ * @out_align: output alignment, either horizontal 8-byte line start address
++ *             alignment, or pixel alignment due to image format or rotator
++ *             block size
++ * @in_burst: horizontal input burst size in case of horizontal flip
++ * @out_burst: horizontal output burst size or rotator block size
++ * @downsize_coeff: downsizing section coefficient
++ * @resize_coeff: main processing section resizing coefficient
++ * @_in_seam: aligned input seam position return value
++ * @_out_seam: aligned output seam position return value
++ */
++static void find_best_seam(struct ipu_image_convert_ctx *ctx,
++                         unsigned int index,
++                         unsigned int in_edge,
++                         unsigned int out_edge,
++                         unsigned int in_align,
++                         unsigned int out_align,
++                         unsigned int in_burst,
++                         unsigned int out_burst,
++                         unsigned int downsize_coeff,
++                         unsigned int resize_coeff,
++                         u32 *_in_seam,
++                         u32 *_out_seam)
++{
++      struct device *dev = ctx->chan->priv->ipu->dev;
++      unsigned int out_pos;
++      /* Input / output seam position candidates */
++      unsigned int out_seam = 0;
++      unsigned int in_seam = 0;
++      unsigned int min_diff = UINT_MAX;
++      unsigned int out_start;
++      unsigned int out_end;
++      unsigned int in_start;
++      unsigned int in_end;
++
++      /* Start within 1024 pixels of the right / bottom edge */
++      out_start = max_t(int, index * out_align, out_edge - 1024);
++      /* End before having to add more columns to the left / rows above */
++      out_end = min_t(unsigned int, out_edge, index * 1024 + 1);
++
++      /*
++       * Limit input seam position to make sure that the downsized input tile
++       * to the right or bottom does not exceed 1024 pixels.
++       */
++      in_start = max_t(int, index * in_align,
++                       in_edge - (1024 << downsize_coeff));
++      in_end = min_t(unsigned int, in_edge,
++                     index * (1024 << downsize_coeff) + 1);
++
++      /*
++       * Output tiles must start at a multiple of 8 bytes horizontally and
++       * possibly at an even line horizontally depending on the pixel format.
++       * Only consider output aligned positions for the seam.
++       */
++      out_start = round_up(out_start, out_align);
++      for (out_pos = out_start; out_pos < out_end; out_pos += out_align) {
++              unsigned int in_pos;
++              unsigned int in_pos_aligned;
++              unsigned int in_pos_rounded;
++              unsigned int abs_diff;
++
++              /*
++               * Tiles in the right row / bottom column may not be allowed to
++               * overshoot horizontally / vertically. out_burst may be the
++               * actual DMA burst size, or the rotator block size.
++               */
++              if ((out_burst > 1) && (out_edge - out_pos) % out_burst)
++                      continue;
++
++              /*
++               * Input sample position, corresponding to out_pos, 19.13 fixed
++               * point.
++               */
++              in_pos = (out_pos * resize_coeff) << downsize_coeff;
++              /*
++               * The closest input sample position that we could actually
++               * start the input tile at, 19.13 fixed point.
++               */
++              in_pos_aligned = round_closest(in_pos, 8192U * in_align);
++              /* Convert 19.13 fixed point to integer */
++              in_pos_rounded = in_pos_aligned / 8192U;
++
++              if (in_pos_rounded < in_start)
++                      continue;
++              if (in_pos_rounded >= in_end)
++                      break;
++
++              if ((in_burst > 1) &&
++                  (in_edge - in_pos_rounded) % in_burst)
++                      continue;
++
++              if (in_pos < in_pos_aligned)
++                      abs_diff = in_pos_aligned - in_pos;
++              else
++                      abs_diff = in_pos - in_pos_aligned;
++
++              if (abs_diff < min_diff) {
++                      in_seam = in_pos_rounded;
++                      out_seam = out_pos;
++                      min_diff = abs_diff;
++              }
++      }
++
++      *_out_seam = out_seam;
++      *_in_seam = in_seam;
++
++      dev_dbg(dev, "%s: out_seam %u(%u) in [%u, %u], in_seam %u(%u) in [%u, %u] diff %u.%03u\n",
++              __func__, out_seam, out_align, out_start, out_end,
++              in_seam, in_align, in_start, in_end, min_diff / 8192,
++              DIV_ROUND_CLOSEST(min_diff % 8192 * 1000, 8192));
++}
++
++/*
++ * Tile left edges are required to be aligned to multiples of 8 bytes
++ * by the IDMAC.
++ */
++static inline u32 tile_left_align(const struct ipu_image_pixfmt *fmt)
++{
++      if (fmt->planar)
++              return fmt->uv_packed ? 8 : 8 * fmt->uv_width_dec;
++      else
++              return fmt->bpp == 32 ? 2 : fmt->bpp == 16 ? 4 : 8;
++}
++
++/*
++ * Tile top edge alignment is only limited by chroma subsampling.
++ */
++static inline u32 tile_top_align(const struct ipu_image_pixfmt *fmt)
++{
++      return fmt->uv_height_dec > 1 ? 2 : 1;
++}
++
++static inline u32 tile_width_align(enum ipu_image_convert_type type,
++                                 const struct ipu_image_pixfmt *fmt,
++                                 enum ipu_rotate_mode rot_mode)
++{
++      if (type == IMAGE_CONVERT_IN) {
++              /*
++               * The IC burst reads 8 pixels at a time. Reading beyond the
++               * end of the line is usually acceptable. Those pixels are
++               * ignored, unless the IC has to write the scaled line in
++               * reverse.
++               */
++              return (!ipu_rot_mode_is_irt(rot_mode) &&
++                      (rot_mode & IPU_ROT_BIT_HFLIP)) ? 8 : 2;
++      }
++
++      /*
++       * Align to 16x16 pixel blocks for planar 4:2:0 chroma subsampled
++       * formats to guarantee 8-byte aligned line start addresses in the
++       * chroma planes when IRT is used. Align to 8x8 pixel IRT block size
++       * for all other formats.
++       */
++      return (ipu_rot_mode_is_irt(rot_mode) &&
++              fmt->planar && !fmt->uv_packed) ?
++              8 * fmt->uv_width_dec : 8;
++}
++
++static inline u32 tile_height_align(enum ipu_image_convert_type type,
++                                  const struct ipu_image_pixfmt *fmt,
++                                  enum ipu_rotate_mode rot_mode)
++{
++      if (type == IMAGE_CONVERT_IN || !ipu_rot_mode_is_irt(rot_mode))
++              return 2;
++
++      /*
++       * Align to 16x16 pixel blocks for planar 4:2:0 chroma subsampled
++       * formats to guarantee 8-byte aligned line start addresses in the
++       * chroma planes when IRT is used. Align to 8x8 pixel IRT block size
++       * for all other formats.
++       */
++      return (fmt->planar && !fmt->uv_packed) ? 8 * fmt->uv_width_dec : 8;
++}
++
++/*
++ * Fill in left position and width and for all tiles in an input column, and
++ * for all corresponding output tiles. If the 90° rotator is used, the output
++ * tiles are in a row, and output tile top position and height are set.
++ */
++static void fill_tile_column(struct ipu_image_convert_ctx *ctx,
++                           unsigned int col,
++                           struct ipu_image_convert_image *in,
++                           unsigned int in_left, unsigned int in_width,
++                           struct ipu_image_convert_image *out,
++                           unsigned int out_left, unsigned int out_width)
++{
++      unsigned int row, tile_idx;
++      struct ipu_image_tile *in_tile, *out_tile;
++
++      for (row = 0; row < in->num_rows; row++) {
++              tile_idx = in->num_cols * row + col;
++              in_tile = &in->tile[tile_idx];
++              out_tile = &out->tile[ctx->out_tile_map[tile_idx]];
++
++              in_tile->left = in_left;
++              in_tile->width = in_width;
++
++              if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
++                      out_tile->top = out_left;
++                      out_tile->height = out_width;
++              } else {
++                      out_tile->left = out_left;
++                      out_tile->width = out_width;
++              }
++      }
++}
++
++/*
++ * Fill in top position and height and for all tiles in an input row, and
++ * for all corresponding output tiles. If the 90° rotator is used, the output
++ * tiles are in a column, and output tile left position and width are set.
++ */
++static void fill_tile_row(struct ipu_image_convert_ctx *ctx, unsigned int row,
++                        struct ipu_image_convert_image *in,
++                        unsigned int in_top, unsigned int in_height,
++                        struct ipu_image_convert_image *out,
++                        unsigned int out_top, unsigned int out_height)
++{
++      unsigned int col, tile_idx;
++      struct ipu_image_tile *in_tile, *out_tile;
++
++      for (col = 0; col < in->num_cols; col++) {
++              tile_idx = in->num_cols * row + col;
++              in_tile = &in->tile[tile_idx];
++              out_tile = &out->tile[ctx->out_tile_map[tile_idx]];
++
++              in_tile->top = in_top;
++              in_tile->height = in_height;
++
++              if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
++                      out_tile->left = out_top;
++                      out_tile->width = out_height;
++              } else {
++                      out_tile->top = out_top;
++                      out_tile->height = out_height;
++              }
++      }
++}
++
++/*
++ * Find the best horizontal and vertical seam positions to split into tiles.
++ * Minimize the fractional part of the input sampling position for the
++ * top / left pixels of each tile.
++ */
++static void find_seams(struct ipu_image_convert_ctx *ctx,
++                     struct ipu_image_convert_image *in,
++                     struct ipu_image_convert_image *out)
++{
++      struct device *dev = ctx->chan->priv->ipu->dev;
++      unsigned int resized_width = out->base.rect.width;
++      unsigned int resized_height = out->base.rect.height;
++      unsigned int col;
++      unsigned int row;
++      unsigned int in_left_align = tile_left_align(in->fmt);
++      unsigned int in_top_align = tile_top_align(in->fmt);
++      unsigned int out_left_align = tile_left_align(out->fmt);
++      unsigned int out_top_align = tile_top_align(out->fmt);
++      unsigned int out_width_align = tile_width_align(out->type, out->fmt,
++                                                      ctx->rot_mode);
++      unsigned int out_height_align = tile_height_align(out->type, out->fmt,
++                                                        ctx->rot_mode);
++      unsigned int in_right = in->base.rect.width;
++      unsigned int in_bottom = in->base.rect.height;
++      unsigned int out_right = out->base.rect.width;
++      unsigned int out_bottom = out->base.rect.height;
++      unsigned int flipped_out_left;
++      unsigned int flipped_out_top;
++
++      if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
++              /* Switch width/height and align top left to IRT block size */
++              resized_width = out->base.rect.height;
++              resized_height = out->base.rect.width;
++              out_left_align = out_height_align;
++              out_top_align = out_width_align;
++              out_width_align = out_left_align;
++              out_height_align = out_top_align;
++              out_right = out->base.rect.height;
++              out_bottom = out->base.rect.width;
++      }
++
++      for (col = in->num_cols - 1; col > 0; col--) {
++              bool allow_in_overshoot = ipu_rot_mode_is_irt(ctx->rot_mode) ||
++                                        !(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
++              bool allow_out_overshoot = (col < in->num_cols - 1) &&
++                                         !(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
++              unsigned int in_left;
++              unsigned int out_left;
++
++              /*
++               * Align input width to burst length if the scaling step flips
++               * horizontally.
++               */
++
++              find_best_seam(ctx, col,
++                             in_right, out_right,
++                             in_left_align, out_left_align,
++                             allow_in_overshoot ? 1 : 8 /* burst length */,
++                             allow_out_overshoot ? 1 : out_width_align,
++                             ctx->downsize_coeff_h, ctx->image_resize_coeff_h,
++                             &in_left, &out_left);
++
++              if (ctx->rot_mode & IPU_ROT_BIT_HFLIP)
++                      flipped_out_left = resized_width - out_right;
++              else
++                      flipped_out_left = out_left;
++
++              fill_tile_column(ctx, col, in, in_left, in_right - in_left,
++                               out, flipped_out_left, out_right - out_left);
++
++              dev_dbg(dev, "%s: col %u: %u, %u -> %u, %u\n", __func__, col,
++                      in_left, in_right - in_left,
++                      flipped_out_left, out_right - out_left);
++
++              in_right = in_left;
++              out_right = out_left;
++      }
++
++      flipped_out_left = (ctx->rot_mode & IPU_ROT_BIT_HFLIP) ?
++                         resized_width - out_right : 0;
++
++      fill_tile_column(ctx, 0, in, 0, in_right,
++                       out, flipped_out_left, out_right);
++
++      dev_dbg(dev, "%s: col 0: 0, %u -> %u, %u\n", __func__,
++              in_right, flipped_out_left, out_right);
++
++      for (row = in->num_rows - 1; row > 0; row--) {
++              bool allow_overshoot = row < in->num_rows - 1;
++              unsigned int in_top;
++              unsigned int out_top;
++
++              find_best_seam(ctx, row,
++                             in_bottom, out_bottom,
++                             in_top_align, out_top_align,
++                             1, allow_overshoot ? 1 : out_height_align,
++                             ctx->downsize_coeff_v, ctx->image_resize_coeff_v,
++                             &in_top, &out_top);
++
++              if ((ctx->rot_mode & IPU_ROT_BIT_VFLIP) ^
++                  ipu_rot_mode_is_irt(ctx->rot_mode))
++                      flipped_out_top = resized_height - out_bottom;
++              else
++                      flipped_out_top = out_top;
++
++              fill_tile_row(ctx, row, in, in_top, in_bottom - in_top,
++                            out, flipped_out_top, out_bottom - out_top);
++
++              dev_dbg(dev, "%s: row %u: %u, %u -> %u, %u\n", __func__, row,
++                      in_top, in_bottom - in_top,
++                      flipped_out_top, out_bottom - out_top);
++
++              in_bottom = in_top;
++              out_bottom = out_top;
++      }
++
++      if ((ctx->rot_mode & IPU_ROT_BIT_VFLIP) ^
++          ipu_rot_mode_is_irt(ctx->rot_mode))
++              flipped_out_top = resized_height - out_bottom;
++      else
++              flipped_out_top = 0;
++
++      fill_tile_row(ctx, 0, in, 0, in_bottom,
++                    out, flipped_out_top, out_bottom);
++
++      dev_dbg(dev, "%s: row 0: 0, %u -> %u, %u\n", __func__,
++              in_bottom, flipped_out_top, out_bottom);
++}
++
++static int calc_tile_dimensions(struct ipu_image_convert_ctx *ctx,
++                              struct ipu_image_convert_image *image)
++{
++      struct ipu_image_convert_chan *chan = ctx->chan;
++      struct ipu_image_convert_priv *priv = chan->priv;
++      unsigned int max_width = 1024;
++      unsigned int max_height = 1024;
++      unsigned int i;
++
++      if (image->type == IMAGE_CONVERT_IN) {
++              /* Up to 4096x4096 input tile size */
++              max_width <<= ctx->downsize_coeff_h;
++              max_height <<= ctx->downsize_coeff_v;
++      }
++
++      for (i = 0; i < ctx->num_tiles; i++) {
++              struct ipu_image_tile *tile;
++              const unsigned int row = i / image->num_cols;
++              const unsigned int col = i % image->num_cols;
++
++              if (image->type == IMAGE_CONVERT_OUT)
++                      tile = &image->tile[ctx->out_tile_map[i]];
++              else
++                      tile = &image->tile[i];
++
++              tile->size = ((tile->height * image->fmt->bpp) >> 3) *
++                      tile->width;
++
++              if (image->fmt->planar) {
++                      tile->stride = tile->width;
++                      tile->rot_stride = tile->height;
++              } else {
++                      tile->stride =
++                              (image->fmt->bpp * tile->width) >> 3;
++                      tile->rot_stride =
++                              (image->fmt->bpp * tile->height) >> 3;
++              }
++
++              dev_dbg(priv->ipu->dev,
++                      "task %u: ctx %p: %s@[%u,%u]: %ux%u@%u,%u\n",
++                      chan->ic_task, ctx,
++                      image->type == IMAGE_CONVERT_IN ? "Input" : "Output",
++                      row, col,
++                      tile->width, tile->height, tile->left, tile->top);
++
++              if (!tile->width || tile->width > max_width ||
++                  !tile->height || tile->height > max_height) {
++                      dev_err(priv->ipu->dev, "invalid %s tile size: %ux%u\n",
++                              image->type == IMAGE_CONVERT_IN ? "input" :
++                              "output", tile->width, tile->height);
++                      return -EINVAL;
++              }
++      }
++
++      return 0;
++}
++
++/*
++ * Use the rotation transformation to find the tile coordinates
++ * (row, col) of a tile in the destination frame that corresponds
++ * to the given tile coordinates of a source frame. The destination
++ * coordinate is then converted to a tile index.
++ */
++static int transform_tile_index(struct ipu_image_convert_ctx *ctx,
++                              int src_row, int src_col)
++{
++      struct ipu_image_convert_chan *chan = ctx->chan;
++      struct ipu_image_convert_priv *priv = chan->priv;
++      struct ipu_image_convert_image *s_image = &ctx->in;
++      struct ipu_image_convert_image *d_image = &ctx->out;
++      int dst_row, dst_col;
++
++      /* with no rotation it's a 1:1 mapping */
++      if (ctx->rot_mode == IPU_ROTATE_NONE)
++              return src_row * s_image->num_cols + src_col;
++
++      /*
++       * before doing the transform, first we have to translate
++       * source row,col for an origin in the center of s_image
++       */
++      src_row = src_row * 2 - (s_image->num_rows - 1);
++      src_col = src_col * 2 - (s_image->num_cols - 1);
++
++      /* do the rotation transform */
++      if (ctx->rot_mode & IPU_ROT_BIT_90) {
++              dst_col = -src_row;
++              dst_row = src_col;
++      } else {
++              dst_col = src_col;
++              dst_row = src_row;
++      }
++
++      /* apply flip */
++      if (ctx->rot_mode & IPU_ROT_BIT_HFLIP)
++              dst_col = -dst_col;
++      if (ctx->rot_mode & IPU_ROT_BIT_VFLIP)
++              dst_row = -dst_row;
++
++      dev_dbg(priv->ipu->dev, "task %u: ctx %p: [%d,%d] --> [%d,%d]\n",
++              chan->ic_task, ctx, src_col, src_row, dst_col, dst_row);
++
++      /*
++       * finally translate dest row,col using an origin in upper
++       * left of d_image
++       */
++      dst_row += d_image->num_rows - 1;
++      dst_col += d_image->num_cols - 1;
++      dst_row /= 2;
++      dst_col /= 2;
++
++      return dst_row * d_image->num_cols + dst_col;
++}
++
++/*
++ * Fill the out_tile_map[] with transformed destination tile indeces.
++ */
++static void calc_out_tile_map(struct ipu_image_convert_ctx *ctx)
++{
++      struct ipu_image_convert_image *s_image = &ctx->in;
++      unsigned int row, col, tile = 0;
++
++      for (row = 0; row < s_image->num_rows; row++) {
++              for (col = 0; col < s_image->num_cols; col++) {
++                      ctx->out_tile_map[tile] =
++                              transform_tile_index(ctx, row, col);
++                      tile++;
++              }
++      }
++}
++
++static int calc_tile_offsets_planar(struct ipu_image_convert_ctx *ctx,
++                                  struct ipu_image_convert_image *image)
++{
++      struct ipu_image_convert_chan *chan = ctx->chan;
++      struct ipu_image_convert_priv *priv = chan->priv;
++      const struct ipu_image_pixfmt *fmt = image->fmt;
++      unsigned int row, col, tile = 0;
++      u32 H, top, y_stride, uv_stride;
++      u32 uv_row_off, uv_col_off, uv_off, u_off, v_off, tmp;
++      u32 y_row_off, y_col_off, y_off;
++      u32 y_size, uv_size;
++
++      /* setup some convenience vars */
++      H = image->base.pix.height;
++
++      y_stride = image->stride;
++      uv_stride = y_stride / fmt->uv_width_dec;
++      if (fmt->uv_packed)
++              uv_stride *= 2;
++
++      y_size = H * y_stride;
++      uv_size = y_size / (fmt->uv_width_dec * fmt->uv_height_dec);
++
++      for (row = 0; row < image->num_rows; row++) {
++              top = image->tile[tile].top;
++              y_row_off = top * y_stride;
++              uv_row_off = (top * uv_stride) / fmt->uv_height_dec;
++
++              for (col = 0; col < image->num_cols; col++) {
++                      y_col_off = image->tile[tile].left;
++                      uv_col_off = y_col_off / fmt->uv_width_dec;
++                      if (fmt->uv_packed)
++                              uv_col_off *= 2;
++
++                      y_off = y_row_off + y_col_off;
++                      uv_off = uv_row_off + uv_col_off;
++
++                      u_off = y_size - y_off + uv_off;
++                      v_off = (fmt->uv_packed) ? 0 : u_off + uv_size;
++                      if (fmt->uv_swapped) {
++                              tmp = u_off;
++                              u_off = v_off;
++                              v_off = tmp;
++                      }
++
++                      image->tile[tile].offset = y_off;
++                      image->tile[tile].u_off = u_off;
++                      image->tile[tile++].v_off = v_off;
++
++                      if ((y_off & 0x7) || (u_off & 0x7) || (v_off & 0x7)) {
++                              dev_err(priv->ipu->dev,
++                                      "task %u: ctx %p: %s@[%d,%d]: "
++                                      "y_off %08x, u_off %08x, v_off %08x\n",
++                                      chan->ic_task, ctx,
++                                      image->type == IMAGE_CONVERT_IN ?
++                                      "Input" : "Output", row, col,
++                                      y_off, u_off, v_off);
++                              return -EINVAL;
++                      }
++              }
++      }
++
++      return 0;
++}
++
++static int calc_tile_offsets_packed(struct ipu_image_convert_ctx *ctx,
++                                  struct ipu_image_convert_image *image)
++{
++      struct ipu_image_convert_chan *chan = ctx->chan;
++      struct ipu_image_convert_priv *priv = chan->priv;
++      const struct ipu_image_pixfmt *fmt = image->fmt;
++      unsigned int row, col, tile = 0;
++      u32 bpp, stride, offset;
++      u32 row_off, col_off;
++
++      /* setup some convenience vars */
++      stride = image->stride;
++      bpp = fmt->bpp;
++
++      for (row = 0; row < image->num_rows; row++) {
++              row_off = image->tile[tile].top * stride;
++
++              for (col = 0; col < image->num_cols; col++) {
++                      col_off = (image->tile[tile].left * bpp) >> 3;
++
++                      offset = row_off + col_off;
++
++                      image->tile[tile].offset = offset;
++                      image->tile[tile].u_off = 0;
++                      image->tile[tile++].v_off = 0;
++
++                      if (offset & 0x7) {
++                              dev_err(priv->ipu->dev,
++                                      "task %u: ctx %p: %s@[%d,%d]: "
++                                      "phys %08x\n",
++                                      chan->ic_task, ctx,
++                                      image->type == IMAGE_CONVERT_IN ?
++                                      "Input" : "Output", row, col,
++                                      row_off + col_off);
++                              return -EINVAL;
++                      }
++              }
++      }
++
++      return 0;
++}
++
++static int calc_tile_offsets(struct ipu_image_convert_ctx *ctx,
++                            struct ipu_image_convert_image *image)
++{
++      if (image->fmt->planar)
++              return calc_tile_offsets_planar(ctx, image);
++
++      return calc_tile_offsets_packed(ctx, image);
++}
++
++/*
++ * Calculate the resizing ratio for the IC main processing section given input
++ * size, fixed downsizing coefficient, and output size.
++ * Either round to closest for the next tile's first pixel to minimize seams
++ * and distortion (for all but right column / bottom row), or round down to
++ * avoid sampling beyond the edges of the input image for this tile's last
++ * pixel.
++ * Returns the resizing coefficient, resizing ratio is 8192.0 / resize_coeff.
++ */
++static u32 calc_resize_coeff(u32 input_size, u32 downsize_coeff,
++                           u32 output_size, bool allow_overshoot)
++{
++      u32 downsized = input_size >> downsize_coeff;
++
++      if (allow_overshoot)
++              return DIV_ROUND_CLOSEST(8192 * downsized, output_size);
++      else
++              return 8192 * (downsized - 1) / (output_size - 1);
++}
++
++/*
++ * Slightly modify resize coefficients per tile to hide the bilinear
++ * interpolator reset at tile borders, shifting the right / bottom edge
++ * by up to a half input pixel. This removes noticeable seams between
++ * tiles at higher upscaling factors.
++ */
++static void calc_tile_resize_coefficients(struct ipu_image_convert_ctx *ctx)
++{
++      struct ipu_image_convert_chan *chan = ctx->chan;
++      struct ipu_image_convert_priv *priv = chan->priv;
++      struct ipu_image_tile *in_tile, *out_tile;
++      unsigned int col, row, tile_idx;
++      unsigned int last_output;
++
++      for (col = 0; col < ctx->in.num_cols; col++) {
++              bool closest = (col < ctx->in.num_cols - 1) &&
++                             !(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
++              u32 resized_width;
++              u32 resize_coeff_h;
++              u32 in_width;
++
++              tile_idx = col;
++              in_tile = &ctx->in.tile[tile_idx];
++              out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
++
++              if (ipu_rot_mode_is_irt(ctx->rot_mode))
++                      resized_width = out_tile->height;
++              else
++                      resized_width = out_tile->width;
++
++              resize_coeff_h = calc_resize_coeff(in_tile->width,
++                                                 ctx->downsize_coeff_h,
++                                                 resized_width, closest);
++
++              dev_dbg(priv->ipu->dev, "%s: column %u hscale: *8192/%u\n",
++                      __func__, col, resize_coeff_h);
++
++              /*
++               * With the horizontal scaling factor known, round up resized
++               * width (output width or height) to burst size.
++               */
++              resized_width = round_up(resized_width, 8);
++
++              /*
++               * Calculate input width from the last accessed input pixel
++               * given resized width and scaling coefficients. Round up to
++               * burst size.
++               */
++              last_output = resized_width - 1;
++              if (closest && ((last_output * resize_coeff_h) % 8192))
++                      last_output++;
++              in_width = round_up(
++                      (DIV_ROUND_UP(last_output * resize_coeff_h, 8192) + 1)
++                      << ctx->downsize_coeff_h, 8);
++
++              for (row = 0; row < ctx->in.num_rows; row++) {
++                      tile_idx = row * ctx->in.num_cols + col;
++                      in_tile = &ctx->in.tile[tile_idx];
++                      out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
++
++                      if (ipu_rot_mode_is_irt(ctx->rot_mode))
++                              out_tile->height = resized_width;
++                      else
++                              out_tile->width = resized_width;
++
++                      in_tile->width = in_width;
++              }
++
++              ctx->resize_coeffs_h[col] = resize_coeff_h;
++      }
++
++      for (row = 0; row < ctx->in.num_rows; row++) {
++              bool closest = (row < ctx->in.num_rows - 1) &&
++                             !(ctx->rot_mode & IPU_ROT_BIT_VFLIP);
++              u32 resized_height;
++              u32 resize_coeff_v;
++              u32 in_height;
++
++              tile_idx = row * ctx->in.num_cols;
++              in_tile = &ctx->in.tile[tile_idx];
++              out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
++
++              if (ipu_rot_mode_is_irt(ctx->rot_mode))
++                      resized_height = out_tile->width;
++              else
++                      resized_height = out_tile->height;
++
++              resize_coeff_v = calc_resize_coeff(in_tile->height,
++                                                 ctx->downsize_coeff_v,
++                                                 resized_height, closest);
++
++              dev_dbg(priv->ipu->dev, "%s: row %u vscale: *8192/%u\n",
++                      __func__, row, resize_coeff_v);
++
++              /*
++               * With the vertical scaling factor known, round up resized
++               * height (output width or height) to IDMAC limitations.
++               */
++              resized_height = round_up(resized_height, 2);
++
++              /*
++               * Calculate input width from the last accessed input pixel
++               * given resized height and scaling coefficients. Align to
++               * IDMAC restrictions.
++               */
++              last_output = resized_height - 1;
++              if (closest && ((last_output * resize_coeff_v) % 8192))
++                      last_output++;
++              in_height = round_up(
++                      (DIV_ROUND_UP(last_output * resize_coeff_v, 8192) + 1)
++                      << ctx->downsize_coeff_v, 2);
++
++              for (col = 0; col < ctx->in.num_cols; col++) {
++                      tile_idx = row * ctx->in.num_cols + col;
++                      in_tile = &ctx->in.tile[tile_idx];
++                      out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
++
++                      if (ipu_rot_mode_is_irt(ctx->rot_mode))
++                              out_tile->width = resized_height;
++                      else
++                              out_tile->height = resized_height;
++
++                      in_tile->height = in_height;
++              }
++
++              ctx->resize_coeffs_v[row] = resize_coeff_v;
++      }
++}
++
++/*
++ * return the number of runs in given queue (pending_q or done_q)
++ * for this context. hold irqlock when calling.
++ */
++static int get_run_count(struct ipu_image_convert_ctx *ctx,
++                       struct list_head *q)
++{
++      struct ipu_image_convert_run *run;
++      int count = 0;
++
++      lockdep_assert_held(&ctx->chan->irqlock);
++
++      list_for_each_entry(run, q, list) {
++              if (run->ctx == ctx)
++                      count++;
++      }
++
++      return count;
++}
++
++static void convert_stop(struct ipu_image_convert_run *run)
++{
++      struct ipu_image_convert_ctx *ctx = run->ctx;
++      struct ipu_image_convert_chan *chan = ctx->chan;
++      struct ipu_image_convert_priv *priv = chan->priv;
++
++      dev_dbg(priv->ipu->dev, "%s: task %u: stopping ctx %p run %p\n",
++              __func__, chan->ic_task, ctx, run);
++
++      /* disable IC tasks and the channels */
++      ipu_ic_task_disable(chan->ic);
++      ipu_idmac_disable_channel(chan->in_chan);
++      ipu_idmac_disable_channel(chan->out_chan);
++
++      if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
++              ipu_idmac_disable_channel(chan->rotation_in_chan);
++              ipu_idmac_disable_channel(chan->rotation_out_chan);
++              ipu_idmac_unlink(chan->out_chan, chan->rotation_in_chan);
++      }
++
++      ipu_ic_disable(chan->ic);
++}
++
++static void init_idmac_channel(struct ipu_image_convert_ctx *ctx,
++                             struct ipuv3_channel *channel,
++                             struct ipu_image_convert_image *image,
++                             enum ipu_rotate_mode rot_mode,
++                             bool rot_swap_width_height,
++                             unsigned int tile)
++{
++      struct ipu_image_convert_chan *chan = ctx->chan;
++      unsigned int burst_size;
++      u32 width, height, stride;
++      dma_addr_t addr0, addr1 = 0;
++      struct ipu_image tile_image;
++      unsigned int tile_idx[2];
++
++      if (image->type == IMAGE_CONVERT_OUT) {
++              tile_idx[0] = ctx->out_tile_map[tile];
++              tile_idx[1] = ctx->out_tile_map[1];
++      } else {
++              tile_idx[0] = tile;
++              tile_idx[1] = 1;
++      }
++
++      if (rot_swap_width_height) {
++              width = image->tile[tile_idx[0]].height;
++              height = image->tile[tile_idx[0]].width;
++              stride = image->tile[tile_idx[0]].rot_stride;
++              addr0 = ctx->rot_intermediate[0].phys;
++              if (ctx->double_buffering)
++                      addr1 = ctx->rot_intermediate[1].phys;
++      } else {
++              width = image->tile[tile_idx[0]].width;
++              height = image->tile[tile_idx[0]].height;
++              stride = image->stride;
++              addr0 = image->base.phys0 +
++                      image->tile[tile_idx[0]].offset;
++              if (ctx->double_buffering)
++                      addr1 = image->base.phys0 +
++                              image->tile[tile_idx[1]].offset;
++      }
++
++      ipu_cpmem_zero(channel);
++
++      memset(&tile_image, 0, sizeof(tile_image));
++      tile_image.pix.width = tile_image.rect.width = width;
++      tile_image.pix.height = tile_image.rect.height = height;
++      tile_image.pix.bytesperline = stride;
++      tile_image.pix.pixelformat =  image->fmt->fourcc;
++      tile_image.phys0 = addr0;
++      tile_image.phys1 = addr1;
++      if (image->fmt->planar && !rot_swap_width_height) {
++              tile_image.u_offset = image->tile[tile_idx[0]].u_off;
++              tile_image.v_offset = image->tile[tile_idx[0]].v_off;
++      }
++
++      ipu_cpmem_set_image(channel, &tile_image);
++
++      if (rot_mode)
++              ipu_cpmem_set_rotation(channel, rot_mode);
++
++      /*
++       * Skip writing U and V components to odd rows in the output
++       * channels for planar 4:2:0.
++       */
++      if ((channel == chan->out_chan ||
++           channel == chan->rotation_out_chan) &&
++          image->fmt->planar && image->fmt->uv_height_dec == 2)
++              ipu_cpmem_skip_odd_chroma_rows(channel);
++
++      if (channel == chan->rotation_in_chan ||
++          channel == chan->rotation_out_chan) {
++              burst_size = 8;
++              ipu_cpmem_set_block_mode(channel);
++      } else
++              burst_size = (width % 16) ? 8 : 16;
++
++      ipu_cpmem_set_burstsize(channel, burst_size);
++
++      ipu_ic_task_idma_init(chan->ic, channel, width, height,
++                            burst_size, rot_mode);
++
++      /*
++       * Setting a non-zero AXI ID collides with the PRG AXI snooping, so
++       * only do this when there is no PRG present.
++       */
++      if (!channel->ipu->prg_priv)
++              ipu_cpmem_set_axi_id(channel, 1);
++
++      ipu_idmac_set_double_buffer(channel, ctx->double_buffering);
++}
++
++static int convert_start(struct ipu_image_convert_run *run, unsigned int tile)
++{
++      struct ipu_image_convert_ctx *ctx = run->ctx;
++      struct ipu_image_convert_chan *chan = ctx->chan;
++      struct ipu_image_convert_priv *priv = chan->priv;
++      struct ipu_image_convert_image *s_image = &ctx->in;
++      struct ipu_image_convert_image *d_image = &ctx->out;
++      unsigned int dst_tile = ctx->out_tile_map[tile];
++      unsigned int dest_width, dest_height;
++      unsigned int col, row;
++      u32 rsc;
++      int ret;
++
++      dev_dbg(priv->ipu->dev, "%s: task %u: starting ctx %p run %p tile %u -> %u\n",
++              __func__, chan->ic_task, ctx, run, tile, dst_tile);
++
++      if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
++              /* swap width/height for resizer */
++              dest_width = d_image->tile[dst_tile].height;
++              dest_height = d_image->tile[dst_tile].width;
++      } else {
++              dest_width = d_image->tile[dst_tile].width;
++              dest_height = d_image->tile[dst_tile].height;
++      }
++
++      row = tile / s_image->num_cols;
++      col = tile % s_image->num_cols;
++
++      rsc =  (ctx->downsize_coeff_v << 30) |
++             (ctx->resize_coeffs_v[row] << 16) |
++             (ctx->downsize_coeff_h << 14) |
++             (ctx->resize_coeffs_h[col]);
++
++      dev_dbg(priv->ipu->dev, "%s: %ux%u -> %ux%u (rsc = 0x%x)\n",
++              __func__, s_image->tile[tile].width,
++              s_image->tile[tile].height, dest_width, dest_height, rsc);
++
++      /* setup the IC resizer and CSC */
++      ret = ipu_ic_task_init_rsc(chan->ic, &ctx->csc,
++                                 s_image->tile[tile].width,
++                                 s_image->tile[tile].height,
++                                 dest_width,
++                                 dest_height,
++                                 rsc);
++      if (ret) {
++              dev_err(priv->ipu->dev, "ipu_ic_task_init failed, %d\n", ret);
++              return ret;
++      }
++
++      /* init the source MEM-->IC PP IDMAC channel */
++      init_idmac_channel(ctx, chan->in_chan, s_image,
++                         IPU_ROTATE_NONE, false, tile);
++
++      if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
++              /* init the IC PP-->MEM IDMAC channel */
++              init_idmac_channel(ctx, chan->out_chan, d_image,
++                                 IPU_ROTATE_NONE, true, tile);
++
++              /* init the MEM-->IC PP ROT IDMAC channel */
++              init_idmac_channel(ctx, chan->rotation_in_chan, d_image,
++                                 ctx->rot_mode, true, tile);
++
++              /* init the destination IC PP ROT-->MEM IDMAC channel */
++              init_idmac_channel(ctx, chan->rotation_out_chan, d_image,
++                                 IPU_ROTATE_NONE, false, tile);
++
++              /* now link IC PP-->MEM to MEM-->IC PP ROT */
++              ipu_idmac_link(chan->out_chan, chan->rotation_in_chan);
++      } else {
++              /* init the destination IC PP-->MEM IDMAC channel */
++              init_idmac_channel(ctx, chan->out_chan, d_image,
++                                 ctx->rot_mode, false, tile);
++      }
++
++      /* enable the IC */
++      ipu_ic_enable(chan->ic);
++
++      /* set buffers ready */
++      ipu_idmac_select_buffer(chan->in_chan, 0);
++      ipu_idmac_select_buffer(chan->out_chan, 0);
++      if (ipu_rot_mode_is_irt(ctx->rot_mode))
++              ipu_idmac_select_buffer(chan->rotation_out_chan, 0);
++      if (ctx->double_buffering) {
++              ipu_idmac_select_buffer(chan->in_chan, 1);
++              ipu_idmac_select_buffer(chan->out_chan, 1);
++              if (ipu_rot_mode_is_irt(ctx->rot_mode))
++                      ipu_idmac_select_buffer(chan->rotation_out_chan, 1);
++      }
++
++      /* enable the channels! */
++      ipu_idmac_enable_channel(chan->in_chan);
++      ipu_idmac_enable_channel(chan->out_chan);
++      if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
++              ipu_idmac_enable_channel(chan->rotation_in_chan);
++              ipu_idmac_enable_channel(chan->rotation_out_chan);
++      }
++
++      ipu_ic_task_enable(chan->ic);
++
++      ipu_cpmem_dump(chan->in_chan);
++      ipu_cpmem_dump(chan->out_chan);
++      if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
++              ipu_cpmem_dump(chan->rotation_in_chan);
++              ipu_cpmem_dump(chan->rotation_out_chan);
++      }
++
++      ipu_dump(priv->ipu);
++
++      return 0;
++}
++
++/* hold irqlock when calling */
++static int do_run(struct ipu_image_convert_run *run)
++{
++      struct ipu_image_convert_ctx *ctx = run->ctx;
++      struct ipu_image_convert_chan *chan = ctx->chan;
++
++      lockdep_assert_held(&chan->irqlock);
++
++      ctx->in.base.phys0 = run->in_phys;
++      ctx->out.base.phys0 = run->out_phys;
++
++      ctx->cur_buf_num = 0;
++      ctx->next_tile = 1;
++
++      /* remove run from pending_q and set as current */
++      list_del(&run->list);
++      chan->current_run = run;
++
++      return convert_start(run, 0);
++}
++
++/* hold irqlock when calling */
++static void run_next(struct ipu_image_convert_chan *chan)
++{
++      struct ipu_image_convert_priv *priv = chan->priv;
++      struct ipu_image_convert_run *run, *tmp;
++      int ret;
++
++      lockdep_assert_held(&chan->irqlock);
++
++      list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
++              /* skip contexts that are aborting */
++              if (run->ctx->aborting) {
++                      dev_dbg(priv->ipu->dev,
++                              "%s: task %u: skipping aborting ctx %p run %p\n",
++                              __func__, chan->ic_task, run->ctx, run);
++                      continue;
++              }
++
++              ret = do_run(run);
++              if (!ret)
++                      break;
++
++              /*
++               * something went wrong with start, add the run
++               * to done q and continue to the next run in the
++               * pending q.
++               */
++              run->status = ret;
++              list_add_tail(&run->list, &chan->done_q);
++              chan->current_run = NULL;
++      }
++}
++
++static void empty_done_q(struct ipu_image_convert_chan *chan)
++{
++      struct ipu_image_convert_priv *priv = chan->priv;
++      struct ipu_image_convert_run *run;
++      unsigned long flags;
++
++      spin_lock_irqsave(&chan->irqlock, flags);
++
++      while (!list_empty(&chan->done_q)) {
++              run = list_entry(chan->done_q.next,
++                               struct ipu_image_convert_run,
++                               list);
++
++              list_del(&run->list);
++
++              dev_dbg(priv->ipu->dev,
++                      "%s: task %u: completing ctx %p run %p with %d\n",
++                      __func__, chan->ic_task, run->ctx, run, run->status);
++
++              /* call the completion callback and free the run */
++              spin_unlock_irqrestore(&chan->irqlock, flags);
++              run->ctx->complete(run, run->ctx->complete_context);
++              spin_lock_irqsave(&chan->irqlock, flags);
++      }
++
++      spin_unlock_irqrestore(&chan->irqlock, flags);
++}
++
++/*
++ * the bottom half thread clears out the done_q, calling the
++ * completion handler for each.
++ */
++static irqreturn_t do_bh(int irq, void *dev_id)
++{
++      struct ipu_image_convert_chan *chan = dev_id;
++      struct ipu_image_convert_priv *priv = chan->priv;
++      struct ipu_image_convert_ctx *ctx;
++      unsigned long flags;
++
++      dev_dbg(priv->ipu->dev, "%s: task %u: enter\n", __func__,
++              chan->ic_task);
++
++      empty_done_q(chan);
++
++      spin_lock_irqsave(&chan->irqlock, flags);
++
++      /*
++       * the done_q is cleared out, signal any contexts
++       * that are aborting that abort can complete.
++       */
++      list_for_each_entry(ctx, &chan->ctx_list, list) {
++              if (ctx->aborting) {
++                      dev_dbg(priv->ipu->dev,
++                              "%s: task %u: signaling abort for ctx %p\n",
++                              __func__, chan->ic_task, ctx);
++                      complete_all(&ctx->aborted);
++              }
++      }
++
++      spin_unlock_irqrestore(&chan->irqlock, flags);
++
++      dev_dbg(priv->ipu->dev, "%s: task %u: exit\n", __func__,
++              chan->ic_task);
++
++      return IRQ_HANDLED;
++}
++
++static bool ic_settings_changed(struct ipu_image_convert_ctx *ctx)
++{
++      unsigned int cur_tile = ctx->next_tile - 1;
++      unsigned int next_tile = ctx->next_tile;
++
++      if (ctx->resize_coeffs_h[cur_tile % ctx->in.num_cols] !=
++          ctx->resize_coeffs_h[next_tile % ctx->in.num_cols] ||
++          ctx->resize_coeffs_v[cur_tile / ctx->in.num_cols] !=
++          ctx->resize_coeffs_v[next_tile / ctx->in.num_cols] ||
++          ctx->in.tile[cur_tile].width != ctx->in.tile[next_tile].width ||
++          ctx->in.tile[cur_tile].height != ctx->in.tile[next_tile].height ||
++          ctx->out.tile[cur_tile].width != ctx->out.tile[next_tile].width ||
++          ctx->out.tile[cur_tile].height != ctx->out.tile[next_tile].height)
++              return true;
++
++      return false;
++}
++
++/* hold irqlock when calling */
++static irqreturn_t do_irq(struct ipu_image_convert_run *run)
++{
++      struct ipu_image_convert_ctx *ctx = run->ctx;
++      struct ipu_image_convert_chan *chan = ctx->chan;
++      struct ipu_image_tile *src_tile, *dst_tile;
++      struct ipu_image_convert_image *s_image = &ctx->in;
++      struct ipu_image_convert_image *d_image = &ctx->out;
++      struct ipuv3_channel *outch;
++      unsigned int dst_idx;
++
++      lockdep_assert_held(&chan->irqlock);
++
++      outch = ipu_rot_mode_is_irt(ctx->rot_mode) ?
++              chan->rotation_out_chan : chan->out_chan;
++
++      /*
++       * It is difficult to stop the channel DMA before the channels
++       * enter the paused state. Without double-buffering the channels
++       * are always in a paused state when the EOF irq occurs, so it
++       * is safe to stop the channels now. For double-buffering we
++       * just ignore the abort until the operation completes, when it
++       * is safe to shut down.
++       */
++      if (ctx->aborting && !ctx->double_buffering) {
++              convert_stop(run);
++              run->status = -EIO;
++              goto done;
++      }
++
++      if (ctx->next_tile == ctx->num_tiles) {
++              /*
++               * the conversion is complete
++               */
++              convert_stop(run);
++              run->status = 0;
++              goto done;
++      }
++
++      /*
++       * not done, place the next tile buffers.
++       */
++      if (!ctx->double_buffering) {
++              if (ic_settings_changed(ctx)) {
++                      convert_stop(run);
++                      convert_start(run, ctx->next_tile);
++              } else {
++                      src_tile = &s_image->tile[ctx->next_tile];
++                      dst_idx = ctx->out_tile_map[ctx->next_tile];
++                      dst_tile = &d_image->tile[dst_idx];
++
++                      ipu_cpmem_set_buffer(chan->in_chan, 0,
++                                           s_image->base.phys0 +
++                                           src_tile->offset);
++                      ipu_cpmem_set_buffer(outch, 0,
++                                           d_image->base.phys0 +
++                                           dst_tile->offset);
++                      if (s_image->fmt->planar)
++                              ipu_cpmem_set_uv_offset(chan->in_chan,
++                                                      src_tile->u_off,
++                                                      src_tile->v_off);
++                      if (d_image->fmt->planar)
++                              ipu_cpmem_set_uv_offset(outch,
++                                                      dst_tile->u_off,
++                                                      dst_tile->v_off);
++
++                      ipu_idmac_select_buffer(chan->in_chan, 0);
++                      ipu_idmac_select_buffer(outch, 0);
++              }
++      } else if (ctx->next_tile < ctx->num_tiles - 1) {
++
++              src_tile = &s_image->tile[ctx->next_tile + 1];
++              dst_idx = ctx->out_tile_map[ctx->next_tile + 1];
++              dst_tile = &d_image->tile[dst_idx];
++
++              ipu_cpmem_set_buffer(chan->in_chan, ctx->cur_buf_num,
++                                   s_image->base.phys0 + src_tile->offset);
++              ipu_cpmem_set_buffer(outch, ctx->cur_buf_num,
++                                   d_image->base.phys0 + dst_tile->offset);
++
++              ipu_idmac_select_buffer(chan->in_chan, ctx->cur_buf_num);
++              ipu_idmac_select_buffer(outch, ctx->cur_buf_num);
++
++              ctx->cur_buf_num ^= 1;
++      }
++
++      ctx->next_tile++;
++      return IRQ_HANDLED;
++done:
++      list_add_tail(&run->list, &chan->done_q);
++      chan->current_run = NULL;
++      run_next(chan);
++      return IRQ_WAKE_THREAD;
++}
++
++static irqreturn_t norotate_irq(int irq, void *data)
++{
++      struct ipu_image_convert_chan *chan = data;
++      struct ipu_image_convert_ctx *ctx;
++      struct ipu_image_convert_run *run;
++      unsigned long flags;
++      irqreturn_t ret;
++
++      spin_lock_irqsave(&chan->irqlock, flags);
++
++      /* get current run and its context */
++      run = chan->current_run;
++      if (!run) {
++              ret = IRQ_NONE;
++              goto out;
++      }
++
++      ctx = run->ctx;
++
++      if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
++              /* this is a rotation operation, just ignore */
++              spin_unlock_irqrestore(&chan->irqlock, flags);
++              return IRQ_HANDLED;
++      }
++
++      ret = do_irq(run);
++out:
++      spin_unlock_irqrestore(&chan->irqlock, flags);
++      return ret;
++}
++
++static irqreturn_t rotate_irq(int irq, void *data)
++{
++      struct ipu_image_convert_chan *chan = data;
++      struct ipu_image_convert_priv *priv = chan->priv;
++      struct ipu_image_convert_ctx *ctx;
++      struct ipu_image_convert_run *run;
++      unsigned long flags;
++      irqreturn_t ret;
++
++      spin_lock_irqsave(&chan->irqlock, flags);
++
++      /* get current run and its context */
++      run = chan->current_run;
++      if (!run) {
++              ret = IRQ_NONE;
++              goto out;
++      }
++
++      ctx = run->ctx;
++
++      if (!ipu_rot_mode_is_irt(ctx->rot_mode)) {
++              /* this was NOT a rotation operation, shouldn't happen */
++              dev_err(priv->ipu->dev, "Unexpected rotation interrupt\n");
++              spin_unlock_irqrestore(&chan->irqlock, flags);
++              return IRQ_HANDLED;
++      }
++
++      ret = do_irq(run);
++out:
++      spin_unlock_irqrestore(&chan->irqlock, flags);
++      return ret;
++}
++
++/*
++ * try to force the completion of runs for this ctx. Called when
++ * abort wait times out in ipu_image_convert_abort().
++ */
++static void force_abort(struct ipu_image_convert_ctx *ctx)
++{
++      struct ipu_image_convert_chan *chan = ctx->chan;
++      struct ipu_image_convert_run *run;
++      unsigned long flags;
++
++      spin_lock_irqsave(&chan->irqlock, flags);
++
++      run = chan->current_run;
++      if (run && run->ctx == ctx) {
++              convert_stop(run);
++              run->status = -EIO;
++              list_add_tail(&run->list, &chan->done_q);
++              chan->current_run = NULL;
++              run_next(chan);
++      }
++
++      spin_unlock_irqrestore(&chan->irqlock, flags);
++
++      empty_done_q(chan);
++}
++
++static void release_ipu_resources(struct ipu_image_convert_chan *chan)
++{
++      if (chan->out_eof_irq >= 0)
++              free_irq(chan->out_eof_irq, chan);
++      if (chan->rot_out_eof_irq >= 0)
++              free_irq(chan->rot_out_eof_irq, chan);
++
++      if (!IS_ERR_OR_NULL(chan->in_chan))
++              ipu_idmac_put(chan->in_chan);
++      if (!IS_ERR_OR_NULL(chan->out_chan))
++              ipu_idmac_put(chan->out_chan);
++      if (!IS_ERR_OR_NULL(chan->rotation_in_chan))
++              ipu_idmac_put(chan->rotation_in_chan);
++      if (!IS_ERR_OR_NULL(chan->rotation_out_chan))
++              ipu_idmac_put(chan->rotation_out_chan);
++      if (!IS_ERR_OR_NULL(chan->ic))
++              ipu_ic_put(chan->ic);
++
++      chan->in_chan = chan->out_chan = chan->rotation_in_chan =
++              chan->rotation_out_chan = NULL;
++      chan->out_eof_irq = chan->rot_out_eof_irq = -1;
++}
++
++static int get_ipu_resources(struct ipu_image_convert_chan *chan)
++{
++      const struct ipu_image_convert_dma_chan *dma = chan->dma_ch;
++      struct ipu_image_convert_priv *priv = chan->priv;
++      int ret;
++
++      /* get IC */
++      chan->ic = ipu_ic_get(priv->ipu, chan->ic_task);
++      if (IS_ERR(chan->ic)) {
++              dev_err(priv->ipu->dev, "could not acquire IC\n");
++              ret = PTR_ERR(chan->ic);
++              goto err;
++      }
++
++      /* get IDMAC channels */
++      chan->in_chan = ipu_idmac_get(priv->ipu, dma->in);
++      chan->out_chan = ipu_idmac_get(priv->ipu, dma->out);
++      if (IS_ERR(chan->in_chan) || IS_ERR(chan->out_chan)) {
++              dev_err(priv->ipu->dev, "could not acquire idmac channels\n");
++              ret = -EBUSY;
++              goto err;
++      }
++
++      chan->rotation_in_chan = ipu_idmac_get(priv->ipu, dma->rot_in);
++      chan->rotation_out_chan = ipu_idmac_get(priv->ipu, dma->rot_out);
++      if (IS_ERR(chan->rotation_in_chan) || IS_ERR(chan->rotation_out_chan)) {
++              dev_err(priv->ipu->dev,
++                      "could not acquire idmac rotation channels\n");
++              ret = -EBUSY;
++              goto err;
++      }
++
++      /* acquire the EOF interrupts */
++      chan->out_eof_irq = ipu_idmac_channel_irq(priv->ipu,
++                                                chan->out_chan,
++                                                IPU_IRQ_EOF);
++
++      ret = request_threaded_irq(chan->out_eof_irq, norotate_irq, do_bh,
++                                 0, "ipu-ic", chan);
++      if (ret < 0) {
++              dev_err(priv->ipu->dev, "could not acquire irq %d\n",
++                       chan->out_eof_irq);
++              chan->out_eof_irq = -1;
++              goto err;
++      }
++
++      chan->rot_out_eof_irq = ipu_idmac_channel_irq(priv->ipu,
++                                                   chan->rotation_out_chan,
++                                                   IPU_IRQ_EOF);
++
++      ret = request_threaded_irq(chan->rot_out_eof_irq, rotate_irq, do_bh,
++                                 0, "ipu-ic", chan);
++      if (ret < 0) {
++              dev_err(priv->ipu->dev, "could not acquire irq %d\n",
++                      chan->rot_out_eof_irq);
++              chan->rot_out_eof_irq = -1;
++              goto err;
++      }
++
++      return 0;
++err:
++      release_ipu_resources(chan);
++      return ret;
++}
++
++static int fill_image(struct ipu_image_convert_ctx *ctx,
++                    struct ipu_image_convert_image *ic_image,
++                    struct ipu_image *image,
++                    enum ipu_image_convert_type type)
++{
++      struct ipu_image_convert_priv *priv = ctx->chan->priv;
++
++      ic_image->base = *image;
++      ic_image->type = type;
++
++      ic_image->fmt = get_format(image->pix.pixelformat);
++      if (!ic_image->fmt) {
++              dev_err(priv->ipu->dev, "pixelformat not supported for %s\n",
++                      type == IMAGE_CONVERT_OUT ? "Output" : "Input");
++              return -EINVAL;
++      }
++
++      if (ic_image->fmt->planar)
++              ic_image->stride = ic_image->base.pix.width;
++      else
++              ic_image->stride  = ic_image->base.pix.bytesperline;
++
++      return 0;
++}
++
++/* borrowed from drivers/media/v4l2-core/v4l2-common.c */
++static unsigned int clamp_align(unsigned int x, unsigned int min,
++                              unsigned int max, unsigned int align)
++{
++      /* Bits that must be zero to be aligned */
++      unsigned int mask = ~((1 << align) - 1);
++
++      /* Clamp to aligned min and max */
++      x = clamp(x, (min + ~mask) & mask, max & mask);
++
++      /* Round to nearest aligned value */
++      if (align)
++              x = (x + (1 << (align - 1))) & mask;
++
++      return x;
++}
++
++/* Adjusts input/output images to IPU restrictions */
++void ipu_image_convert_adjust(struct ipu_image *in, struct ipu_image *out,
++                            enum ipu_rotate_mode rot_mode)
++{
++      const struct ipu_image_pixfmt *infmt, *outfmt;
++      u32 w_align_out, h_align_out;
++      u32 w_align_in, h_align_in;
++
++      infmt = get_format(in->pix.pixelformat);
++      outfmt = get_format(out->pix.pixelformat);
++
++      /* set some default pixel formats if needed */
++      if (!infmt) {
++              in->pix.pixelformat = V4L2_PIX_FMT_RGB24;
++              infmt = get_format(V4L2_PIX_FMT_RGB24);
++      }
++      if (!outfmt) {
++              out->pix.pixelformat = V4L2_PIX_FMT_RGB24;
++              outfmt = get_format(V4L2_PIX_FMT_RGB24);
++      }
++
++      /* image converter does not handle fields */
++      in->pix.field = out->pix.field = V4L2_FIELD_NONE;
++
++      /* resizer cannot downsize more than 4:1 */
++      if (ipu_rot_mode_is_irt(rot_mode)) {
++              out->pix.height = max_t(__u32, out->pix.height,
++                                      in->pix.width / 4);
++              out->pix.width = max_t(__u32, out->pix.width,
++                                     in->pix.height / 4);
++      } else {
++              out->pix.width = max_t(__u32, out->pix.width,
++                                     in->pix.width / 4);
++              out->pix.height = max_t(__u32, out->pix.height,
++                                      in->pix.height / 4);
++      }
++
++      /* align input width/height */
++      w_align_in = ilog2(tile_width_align(IMAGE_CONVERT_IN, infmt,
++                                          rot_mode));
++      h_align_in = ilog2(tile_height_align(IMAGE_CONVERT_IN, infmt,
++                                           rot_mode));
++      in->pix.width = clamp_align(in->pix.width, MIN_W, MAX_W,
++                                  w_align_in);
++      in->pix.height = clamp_align(in->pix.height, MIN_H, MAX_H,
++                                   h_align_in);
++
++      /* align output width/height */
++      w_align_out = ilog2(tile_width_align(IMAGE_CONVERT_OUT, outfmt,
++                                           rot_mode));
++      h_align_out = ilog2(tile_height_align(IMAGE_CONVERT_OUT, outfmt,
++                                            rot_mode));
++      out->pix.width = clamp_align(out->pix.width, MIN_W, MAX_W,
++                                   w_align_out);
++      out->pix.height = clamp_align(out->pix.height, MIN_H, MAX_H,
++                                    h_align_out);
++
++      /* set input/output strides and image sizes */
++      in->pix.bytesperline = infmt->planar ?
++              clamp_align(in->pix.width, 2 << w_align_in, MAX_W,
++                          w_align_in) :
++              clamp_align((in->pix.width * infmt->bpp) >> 3,
++                          ((2 << w_align_in) * infmt->bpp) >> 3,
++                          (MAX_W * infmt->bpp) >> 3,
++                          w_align_in);
++      in->pix.sizeimage = infmt->planar ?
++              (in->pix.height * in->pix.bytesperline * infmt->bpp) >> 3 :
++              in->pix.height * in->pix.bytesperline;
++      out->pix.bytesperline = outfmt->planar ? out->pix.width :
++              (out->pix.width * outfmt->bpp) >> 3;
++      out->pix.sizeimage = outfmt->planar ?
++              (out->pix.height * out->pix.bytesperline * outfmt->bpp) >> 3 :
++              out->pix.height * out->pix.bytesperline;
++}
++EXPORT_SYMBOL_GPL(ipu_image_convert_adjust);
++
++/*
++ * this is used by ipu_image_convert_prepare() to verify set input and
++ * output images are valid before starting the conversion. Clients can
++ * also call it before calling ipu_image_convert_prepare().
++ */
++int ipu_image_convert_verify(struct ipu_image *in, struct ipu_image *out,
++                           enum ipu_rotate_mode rot_mode)
++{
++      struct ipu_image testin, testout;
++
++      testin = *in;
++      testout = *out;
++
++      ipu_image_convert_adjust(&testin, &testout, rot_mode);
++
++      if (testin.pix.width != in->pix.width ||
++          testin.pix.height != in->pix.height ||
++          testout.pix.width != out->pix.width ||
++          testout.pix.height != out->pix.height)
++              return -EINVAL;
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_image_convert_verify);
++
++/*
++ * Call ipu_image_convert_prepare() to prepare for the conversion of
++ * given images and rotation mode. Returns a new conversion context.
++ */
++struct ipu_image_convert_ctx *
++ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
++                        struct ipu_image *in, struct ipu_image *out,
++                        enum ipu_rotate_mode rot_mode,
++                        ipu_image_convert_cb_t complete,
++                        void *complete_context)
++{
++      struct ipu_image_convert_priv *priv = ipu->image_convert_priv;
++      struct ipu_image_convert_image *s_image, *d_image;
++      struct ipu_image_convert_chan *chan;
++      struct ipu_image_convert_ctx *ctx;
++      unsigned long flags;
++      unsigned int i;
++      bool get_res;
++      int ret;
++
++      if (!in || !out || !complete ||
++          (ic_task != IC_TASK_VIEWFINDER &&
++           ic_task != IC_TASK_POST_PROCESSOR))
++              return ERR_PTR(-EINVAL);
++
++      /* verify the in/out images before continuing */
++      ret = ipu_image_convert_verify(in, out, rot_mode);
++      if (ret) {
++              dev_err(priv->ipu->dev, "%s: in/out formats invalid\n",
++                      __func__);
++              return ERR_PTR(ret);
++      }
++
++      chan = &priv->chan[ic_task];
++
++      ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
++      if (!ctx)
++              return ERR_PTR(-ENOMEM);
++
++      dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p\n", __func__,
++              chan->ic_task, ctx);
++
++      ctx->chan = chan;
++      init_completion(&ctx->aborted);
++
++      ctx->rot_mode = rot_mode;
++
++      /* Sets ctx->in.num_rows/cols as well */
++      ret = calc_image_resize_coefficients(ctx, in, out);
++      if (ret)
++              goto out_free;
++
++      s_image = &ctx->in;
++      d_image = &ctx->out;
++
++      /* set tiling and rotation */
++      if (ipu_rot_mode_is_irt(rot_mode)) {
++              d_image->num_rows = s_image->num_cols;
++              d_image->num_cols = s_image->num_rows;
++      } else {
++              d_image->num_rows = s_image->num_rows;
++              d_image->num_cols = s_image->num_cols;
++      }
++
++      ctx->num_tiles = d_image->num_cols * d_image->num_rows;
++
++      ret = fill_image(ctx, s_image, in, IMAGE_CONVERT_IN);
++      if (ret)
++              goto out_free;
++      ret = fill_image(ctx, d_image, out, IMAGE_CONVERT_OUT);
++      if (ret)
++              goto out_free;
++
++      calc_out_tile_map(ctx);
++
++      find_seams(ctx, s_image, d_image);
++
++      ret = calc_tile_dimensions(ctx, s_image);
++      if (ret)
++              goto out_free;
++
++      ret = calc_tile_offsets(ctx, s_image);
++      if (ret)
++              goto out_free;
++
++      calc_tile_dimensions(ctx, d_image);
++      ret = calc_tile_offsets(ctx, d_image);
++      if (ret)
++              goto out_free;
++
++      calc_tile_resize_coefficients(ctx);
++
++      ret = ipu_ic_calc_csc(&ctx->csc,
++                      s_image->base.pix.ycbcr_enc,
++                      s_image->base.pix.quantization,
++                      ipu_pixelformat_to_colorspace(s_image->fmt->fourcc),
++                      d_image->base.pix.ycbcr_enc,
++                      d_image->base.pix.quantization,
++                      ipu_pixelformat_to_colorspace(d_image->fmt->fourcc));
++      if (ret)
++              goto out_free;
++
++      dump_format(ctx, s_image);
++      dump_format(ctx, d_image);
++
++      ctx->complete = complete;
++      ctx->complete_context = complete_context;
++
++      /*
++       * Can we use double-buffering for this operation? If there is
++       * only one tile (the whole image can be converted in a single
++       * operation) there's no point in using double-buffering. Also,
++       * the IPU's IDMAC channels allow only a single U and V plane
++       * offset shared between both buffers, but these offsets change
++       * for every tile, and therefore would have to be updated for
++       * each buffer which is not possible. So double-buffering is
++       * impossible when either the source or destination images are
++       * a planar format (YUV420, YUV422P, etc.). Further, differently
++       * sized tiles or different resizing coefficients per tile
++       * prevent double-buffering as well.
++       */
++      ctx->double_buffering = (ctx->num_tiles > 1 &&
++                               !s_image->fmt->planar &&
++                               !d_image->fmt->planar);
++      for (i = 1; i < ctx->num_tiles; i++) {
++              if (ctx->in.tile[i].width != ctx->in.tile[0].width ||
++                  ctx->in.tile[i].height != ctx->in.tile[0].height ||
++                  ctx->out.tile[i].width != ctx->out.tile[0].width ||
++                  ctx->out.tile[i].height != ctx->out.tile[0].height) {
++                      ctx->double_buffering = false;
++                      break;
++              }
++      }
++      for (i = 1; i < ctx->in.num_cols; i++) {
++              if (ctx->resize_coeffs_h[i] != ctx->resize_coeffs_h[0]) {
++                      ctx->double_buffering = false;
++                      break;
++              }
++      }
++      for (i = 1; i < ctx->in.num_rows; i++) {
++              if (ctx->resize_coeffs_v[i] != ctx->resize_coeffs_v[0]) {
++                      ctx->double_buffering = false;
++                      break;
++              }
++      }
++
++      if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
++              unsigned long intermediate_size = d_image->tile[0].size;
++
++              for (i = 1; i < ctx->num_tiles; i++) {
++                      if (d_image->tile[i].size > intermediate_size)
++                              intermediate_size = d_image->tile[i].size;
++              }
++
++              ret = alloc_dma_buf(priv, &ctx->rot_intermediate[0],
++                                  intermediate_size);
++              if (ret)
++                      goto out_free;
++              if (ctx->double_buffering) {
++                      ret = alloc_dma_buf(priv,
++                                          &ctx->rot_intermediate[1],
++                                          intermediate_size);
++                      if (ret)
++                              goto out_free_dmabuf0;
++              }
++      }
++
++      spin_lock_irqsave(&chan->irqlock, flags);
++
++      get_res = list_empty(&chan->ctx_list);
++
++      list_add_tail(&ctx->list, &chan->ctx_list);
++
++      spin_unlock_irqrestore(&chan->irqlock, flags);
++
++      if (get_res) {
++              ret = get_ipu_resources(chan);
++              if (ret)
++                      goto out_free_dmabuf1;
++      }
++
++      return ctx;
++
++out_free_dmabuf1:
++      free_dma_buf(priv, &ctx->rot_intermediate[1]);
++      spin_lock_irqsave(&chan->irqlock, flags);
++      list_del(&ctx->list);
++      spin_unlock_irqrestore(&chan->irqlock, flags);
++out_free_dmabuf0:
++      free_dma_buf(priv, &ctx->rot_intermediate[0]);
++out_free:
++      kfree(ctx);
++      return ERR_PTR(ret);
++}
++EXPORT_SYMBOL_GPL(ipu_image_convert_prepare);
++
++/*
++ * Carry out a single image conversion run. Only the physaddr's of the input
++ * and output image buffers are needed. The conversion context must have
++ * been created previously with ipu_image_convert_prepare().
++ */
++int ipu_image_convert_queue(struct ipu_image_convert_run *run)
++{
++      struct ipu_image_convert_chan *chan;
++      struct ipu_image_convert_priv *priv;
++      struct ipu_image_convert_ctx *ctx;
++      unsigned long flags;
++      int ret = 0;
++
++      if (!run || !run->ctx || !run->in_phys || !run->out_phys)
++              return -EINVAL;
++
++      ctx = run->ctx;
++      chan = ctx->chan;
++      priv = chan->priv;
++
++      dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p run %p\n", __func__,
++              chan->ic_task, ctx, run);
++
++      INIT_LIST_HEAD(&run->list);
++
++      spin_lock_irqsave(&chan->irqlock, flags);
++
++      if (ctx->aborting) {
++              ret = -EIO;
++              goto unlock;
++      }
++
++      list_add_tail(&run->list, &chan->pending_q);
++
++      if (!chan->current_run) {
++              ret = do_run(run);
++              if (ret)
++                      chan->current_run = NULL;
++      }
++unlock:
++      spin_unlock_irqrestore(&chan->irqlock, flags);
++      return ret;
++}
++EXPORT_SYMBOL_GPL(ipu_image_convert_queue);
++
++/* Abort any active or pending conversions for this context */
++static void __ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
++{
++      struct ipu_image_convert_chan *chan = ctx->chan;
++      struct ipu_image_convert_priv *priv = chan->priv;
++      struct ipu_image_convert_run *run, *active_run, *tmp;
++      unsigned long flags;
++      int run_count, ret;
++
++      spin_lock_irqsave(&chan->irqlock, flags);
++
++      /* move all remaining pending runs in this context to done_q */
++      list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
++              if (run->ctx != ctx)
++                      continue;
++              run->status = -EIO;
++              list_move_tail(&run->list, &chan->done_q);
++      }
++
++      run_count = get_run_count(ctx, &chan->done_q);
++      active_run = (chan->current_run && chan->current_run->ctx == ctx) ?
++              chan->current_run : NULL;
++
++      if (active_run)
++              reinit_completion(&ctx->aborted);
++
++      ctx->aborting = true;
++
++      spin_unlock_irqrestore(&chan->irqlock, flags);
++
++      if (!run_count && !active_run) {
++              dev_dbg(priv->ipu->dev,
++                      "%s: task %u: no abort needed for ctx %p\n",
++                      __func__, chan->ic_task, ctx);
++              return;
++      }
++
++      if (!active_run) {
++              empty_done_q(chan);
++              return;
++      }
++
++      dev_dbg(priv->ipu->dev,
++              "%s: task %u: wait for completion: %d runs\n",
++              __func__, chan->ic_task, run_count);
++
++      ret = wait_for_completion_timeout(&ctx->aborted,
++                                        msecs_to_jiffies(10000));
++      if (ret == 0) {
++              dev_warn(priv->ipu->dev, "%s: timeout\n", __func__);
++              force_abort(ctx);
++      }
++}
++
++void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
++{
++      __ipu_image_convert_abort(ctx);
++      ctx->aborting = false;
++}
++EXPORT_SYMBOL_GPL(ipu_image_convert_abort);
++
++/* Unprepare image conversion context */
++void ipu_image_convert_unprepare(struct ipu_image_convert_ctx *ctx)
++{
++      struct ipu_image_convert_chan *chan = ctx->chan;
++      struct ipu_image_convert_priv *priv = chan->priv;
++      unsigned long flags;
++      bool put_res;
++
++      /* make sure no runs are hanging around */
++      __ipu_image_convert_abort(ctx);
++
++      dev_dbg(priv->ipu->dev, "%s: task %u: removing ctx %p\n", __func__,
++              chan->ic_task, ctx);
++
++      spin_lock_irqsave(&chan->irqlock, flags);
++
++      list_del(&ctx->list);
++
++      put_res = list_empty(&chan->ctx_list);
++
++      spin_unlock_irqrestore(&chan->irqlock, flags);
++
++      if (put_res)
++              release_ipu_resources(chan);
++
++      free_dma_buf(priv, &ctx->rot_intermediate[1]);
++      free_dma_buf(priv, &ctx->rot_intermediate[0]);
++
++      kfree(ctx);
++}
++EXPORT_SYMBOL_GPL(ipu_image_convert_unprepare);
++
++/*
++ * "Canned" asynchronous single image conversion. Allocates and returns
++ * a new conversion run.  On successful return the caller must free the
++ * run and call ipu_image_convert_unprepare() after conversion completes.
++ */
++struct ipu_image_convert_run *
++ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
++                struct ipu_image *in, struct ipu_image *out,
++                enum ipu_rotate_mode rot_mode,
++                ipu_image_convert_cb_t complete,
++                void *complete_context)
++{
++      struct ipu_image_convert_ctx *ctx;
++      struct ipu_image_convert_run *run;
++      int ret;
++
++      ctx = ipu_image_convert_prepare(ipu, ic_task, in, out, rot_mode,
++                                      complete, complete_context);
++      if (IS_ERR(ctx))
++              return ERR_CAST(ctx);
++
++      run = kzalloc(sizeof(*run), GFP_KERNEL);
++      if (!run) {
++              ipu_image_convert_unprepare(ctx);
++              return ERR_PTR(-ENOMEM);
++      }
++
++      run->ctx = ctx;
++      run->in_phys = in->phys0;
++      run->out_phys = out->phys0;
++
++      ret = ipu_image_convert_queue(run);
++      if (ret) {
++              ipu_image_convert_unprepare(ctx);
++              kfree(run);
++              return ERR_PTR(ret);
++      }
++
++      return run;
++}
++EXPORT_SYMBOL_GPL(ipu_image_convert);
++
++/* "Canned" synchronous single image conversion */
++static void image_convert_sync_complete(struct ipu_image_convert_run *run,
++                                      void *data)
++{
++      struct completion *comp = data;
++
++      complete(comp);
++}
++
++int ipu_image_convert_sync(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
++                         struct ipu_image *in, struct ipu_image *out,
++                         enum ipu_rotate_mode rot_mode)
++{
++      struct ipu_image_convert_run *run;
++      struct completion comp;
++      int ret;
++
++      init_completion(&comp);
++
++      run = ipu_image_convert(ipu, ic_task, in, out, rot_mode,
++                              image_convert_sync_complete, &comp);
++      if (IS_ERR(run))
++              return PTR_ERR(run);
++
++      ret = wait_for_completion_timeout(&comp, msecs_to_jiffies(10000));
++      ret = (ret == 0) ? -ETIMEDOUT : 0;
++
++      ipu_image_convert_unprepare(run->ctx);
++      kfree(run);
++
++      return ret;
++}
++EXPORT_SYMBOL_GPL(ipu_image_convert_sync);
++
++int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev)
++{
++      struct ipu_image_convert_priv *priv;
++      int i;
++
++      priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
++      if (!priv)
++              return -ENOMEM;
++
++      ipu->image_convert_priv = priv;
++      priv->ipu = ipu;
++
++      for (i = 0; i < IC_NUM_TASKS; i++) {
++              struct ipu_image_convert_chan *chan = &priv->chan[i];
++
++              chan->ic_task = i;
++              chan->priv = priv;
++              chan->dma_ch = &image_convert_dma_chan[i];
++              chan->out_eof_irq = -1;
++              chan->rot_out_eof_irq = -1;
++
++              spin_lock_init(&chan->irqlock);
++              INIT_LIST_HEAD(&chan->ctx_list);
++              INIT_LIST_HEAD(&chan->pending_q);
++              INIT_LIST_HEAD(&chan->done_q);
++      }
++
++      return 0;
++}
++
++void ipu_image_convert_exit(struct ipu_soc *ipu)
++{
++}
+--- /dev/null
++++ b/drivers/gpu/imx/ipu-v3/ipu-pre.c
+@@ -0,0 +1,346 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Copyright (c) 2017 Lucas Stach, Pengutronix
++ */
++
++#include <drm/drm_fourcc.h>
++#include <linux/clk.h>
++#include <linux/err.h>
++#include <linux/genalloc.h>
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
++#include <video/imx-ipu-v3.h>
++
++#include "ipu-prv.h"
++
++#define IPU_PRE_MAX_WIDTH     2048
++#define IPU_PRE_NUM_SCANLINES 8
++
++#define IPU_PRE_CTRL                                  0x000
++#define IPU_PRE_CTRL_SET                              0x004
++#define  IPU_PRE_CTRL_ENABLE                          (1 << 0)
++#define  IPU_PRE_CTRL_BLOCK_EN                                (1 << 1)
++#define  IPU_PRE_CTRL_BLOCK_16                                (1 << 2)
++#define  IPU_PRE_CTRL_SDW_UPDATE                      (1 << 4)
++#define  IPU_PRE_CTRL_VFLIP                           (1 << 5)
++#define  IPU_PRE_CTRL_SO                              (1 << 6)
++#define  IPU_PRE_CTRL_INTERLACED_FIELD                        (1 << 7)
++#define  IPU_PRE_CTRL_HANDSHAKE_EN                    (1 << 8)
++#define  IPU_PRE_CTRL_HANDSHAKE_LINE_NUM(v)           ((v & 0x3) << 9)
++#define  IPU_PRE_CTRL_HANDSHAKE_ABORT_SKIP_EN         (1 << 11)
++#define  IPU_PRE_CTRL_EN_REPEAT                               (1 << 28)
++#define  IPU_PRE_CTRL_TPR_REST_SEL                    (1 << 29)
++#define  IPU_PRE_CTRL_CLKGATE                         (1 << 30)
++#define  IPU_PRE_CTRL_SFTRST                          (1 << 31)
++
++#define IPU_PRE_CUR_BUF                                       0x030
++
++#define IPU_PRE_NEXT_BUF                              0x040
++
++#define IPU_PRE_TPR_CTRL                              0x070
++#define  IPU_PRE_TPR_CTRL_TILE_FORMAT(v)              ((v & 0xff) << 0)
++#define  IPU_PRE_TPR_CTRL_TILE_FORMAT_MASK            0xff
++#define  IPU_PRE_TPR_CTRL_TILE_FORMAT_16_BIT          (1 << 0)
++#define  IPU_PRE_TPR_CTRL_TILE_FORMAT_SPLIT_BUF               (1 << 4)
++#define  IPU_PRE_TPR_CTRL_TILE_FORMAT_SINGLE_BUF      (1 << 5)
++#define  IPU_PRE_TPR_CTRL_TILE_FORMAT_SUPER_TILED     (1 << 6)
++
++#define IPU_PRE_PREFETCH_ENG_CTRL                     0x080
++#define  IPU_PRE_PREF_ENG_CTRL_PREFETCH_EN            (1 << 0)
++#define  IPU_PRE_PREF_ENG_CTRL_RD_NUM_BYTES(v)                ((v & 0x7) << 1)
++#define  IPU_PRE_PREF_ENG_CTRL_INPUT_ACTIVE_BPP(v)    ((v & 0x3) << 4)
++#define  IPU_PRE_PREF_ENG_CTRL_INPUT_PIXEL_FORMAT(v)  ((v & 0x7) << 8)
++#define  IPU_PRE_PREF_ENG_CTRL_SHIFT_BYPASS           (1 << 11)
++#define  IPU_PRE_PREF_ENG_CTRL_FIELD_INVERSE          (1 << 12)
++#define  IPU_PRE_PREF_ENG_CTRL_PARTIAL_UV_SWAP                (1 << 14)
++#define  IPU_PRE_PREF_ENG_CTRL_TPR_COOR_OFFSET_EN     (1 << 15)
++
++#define IPU_PRE_PREFETCH_ENG_INPUT_SIZE                       0x0a0
++#define  IPU_PRE_PREFETCH_ENG_INPUT_SIZE_WIDTH(v)     ((v & 0xffff) << 0)
++#define  IPU_PRE_PREFETCH_ENG_INPUT_SIZE_HEIGHT(v)    ((v & 0xffff) << 16)
++
++#define IPU_PRE_PREFETCH_ENG_PITCH                    0x0d0
++#define  IPU_PRE_PREFETCH_ENG_PITCH_Y(v)              ((v & 0xffff) << 0)
++#define  IPU_PRE_PREFETCH_ENG_PITCH_UV(v)             ((v & 0xffff) << 16)
++
++#define IPU_PRE_STORE_ENG_CTRL                                0x110
++#define  IPU_PRE_STORE_ENG_CTRL_STORE_EN              (1 << 0)
++#define  IPU_PRE_STORE_ENG_CTRL_WR_NUM_BYTES(v)               ((v & 0x7) << 1)
++#define  IPU_PRE_STORE_ENG_CTRL_OUTPUT_ACTIVE_BPP(v)  ((v & 0x3) << 4)
++
++#define IPU_PRE_STORE_ENG_STATUS                      0x120
++#define  IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_X_MASK  0xffff
++#define  IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_X_SHIFT 0
++#define  IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_MASK  0x3fff
++#define  IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_SHIFT 16
++#define  IPU_PRE_STORE_ENG_STATUS_STORE_FIFO_FULL     (1 << 30)
++#define  IPU_PRE_STORE_ENG_STATUS_STORE_FIELD         (1 << 31)
++
++#define IPU_PRE_STORE_ENG_SIZE                                0x130
++#define  IPU_PRE_STORE_ENG_SIZE_INPUT_WIDTH(v)                ((v & 0xffff) << 0)
++#define  IPU_PRE_STORE_ENG_SIZE_INPUT_HEIGHT(v)               ((v & 0xffff) << 16)
++
++#define IPU_PRE_STORE_ENG_PITCH                               0x140
++#define  IPU_PRE_STORE_ENG_PITCH_OUT_PITCH(v)         ((v & 0xffff) << 0)
++
++#define IPU_PRE_STORE_ENG_ADDR                                0x150
++
++struct ipu_pre {
++      struct list_head        list;
++      struct device           *dev;
++
++      void __iomem            *regs;
++      struct clk              *clk_axi;
++      struct gen_pool         *iram;
++
++      dma_addr_t              buffer_paddr;
++      void                    *buffer_virt;
++      bool                    in_use;
++      unsigned int            safe_window_end;
++      unsigned int            last_bufaddr;
++};
++
++static DEFINE_MUTEX(ipu_pre_list_mutex);
++static LIST_HEAD(ipu_pre_list);
++static int available_pres;
++
++int ipu_pre_get_available_count(void)
++{
++      return available_pres;
++}
++
++struct ipu_pre *
++ipu_pre_lookup_by_phandle(struct device *dev, const char *name, int index)
++{
++      struct device_node *pre_node = of_parse_phandle(dev->of_node,
++                                                      name, index);
++      struct ipu_pre *pre;
++
++      mutex_lock(&ipu_pre_list_mutex);
++      list_for_each_entry(pre, &ipu_pre_list, list) {
++              if (pre_node == pre->dev->of_node) {
++                      mutex_unlock(&ipu_pre_list_mutex);
++                      device_link_add(dev, pre->dev,
++                                      DL_FLAG_AUTOREMOVE_CONSUMER);
++                      of_node_put(pre_node);
++                      return pre;
++              }
++      }
++      mutex_unlock(&ipu_pre_list_mutex);
++
++      of_node_put(pre_node);
++
++      return NULL;
++}
++
++int ipu_pre_get(struct ipu_pre *pre)
++{
++      u32 val;
++
++      if (pre->in_use)
++              return -EBUSY;
++
++      /* first get the engine out of reset and remove clock gating */
++      writel(0, pre->regs + IPU_PRE_CTRL);
++
++      /* init defaults that should be applied to all streams */
++      val = IPU_PRE_CTRL_HANDSHAKE_ABORT_SKIP_EN |
++            IPU_PRE_CTRL_HANDSHAKE_EN |
++            IPU_PRE_CTRL_TPR_REST_SEL |
++            IPU_PRE_CTRL_SDW_UPDATE;
++      writel(val, pre->regs + IPU_PRE_CTRL);
++
++      pre->in_use = true;
++      return 0;
++}
++
++void ipu_pre_put(struct ipu_pre *pre)
++{
++      writel(IPU_PRE_CTRL_SFTRST, pre->regs + IPU_PRE_CTRL);
++
++      pre->in_use = false;
++}
++
++void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
++                     unsigned int height, unsigned int stride, u32 format,
++                     uint64_t modifier, unsigned int bufaddr)
++{
++      const struct drm_format_info *info = drm_format_info(format);
++      u32 active_bpp = info->cpp[0] >> 1;
++      u32 val;
++
++      /* calculate safe window for ctrl register updates */
++      if (modifier == DRM_FORMAT_MOD_LINEAR)
++              pre->safe_window_end = height - 2;
++      else
++              pre->safe_window_end = DIV_ROUND_UP(height, 4) - 1;
++
++      writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF);
++      writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
++      pre->last_bufaddr = bufaddr;
++
++      val = IPU_PRE_PREF_ENG_CTRL_INPUT_PIXEL_FORMAT(0) |
++            IPU_PRE_PREF_ENG_CTRL_INPUT_ACTIVE_BPP(active_bpp) |
++            IPU_PRE_PREF_ENG_CTRL_RD_NUM_BYTES(4) |
++            IPU_PRE_PREF_ENG_CTRL_SHIFT_BYPASS |
++            IPU_PRE_PREF_ENG_CTRL_PREFETCH_EN;
++      writel(val, pre->regs + IPU_PRE_PREFETCH_ENG_CTRL);
++
++      val = IPU_PRE_PREFETCH_ENG_INPUT_SIZE_WIDTH(width) |
++            IPU_PRE_PREFETCH_ENG_INPUT_SIZE_HEIGHT(height);
++      writel(val, pre->regs + IPU_PRE_PREFETCH_ENG_INPUT_SIZE);
++
++      val = IPU_PRE_PREFETCH_ENG_PITCH_Y(stride);
++      writel(val, pre->regs + IPU_PRE_PREFETCH_ENG_PITCH);
++
++      val = IPU_PRE_STORE_ENG_CTRL_OUTPUT_ACTIVE_BPP(active_bpp) |
++            IPU_PRE_STORE_ENG_CTRL_WR_NUM_BYTES(4) |
++            IPU_PRE_STORE_ENG_CTRL_STORE_EN;
++      writel(val, pre->regs + IPU_PRE_STORE_ENG_CTRL);
++
++      val = IPU_PRE_STORE_ENG_SIZE_INPUT_WIDTH(width) |
++            IPU_PRE_STORE_ENG_SIZE_INPUT_HEIGHT(height);
++      writel(val, pre->regs + IPU_PRE_STORE_ENG_SIZE);
++
++      val = IPU_PRE_STORE_ENG_PITCH_OUT_PITCH(stride);
++      writel(val, pre->regs + IPU_PRE_STORE_ENG_PITCH);
++
++      writel(pre->buffer_paddr, pre->regs + IPU_PRE_STORE_ENG_ADDR);
++
++      val = readl(pre->regs + IPU_PRE_TPR_CTRL);
++      val &= ~IPU_PRE_TPR_CTRL_TILE_FORMAT_MASK;
++      if (modifier != DRM_FORMAT_MOD_LINEAR) {
++              /* only support single buffer formats for now */
++              val |= IPU_PRE_TPR_CTRL_TILE_FORMAT_SINGLE_BUF;
++              if (modifier == DRM_FORMAT_MOD_VIVANTE_SUPER_TILED)
++                      val |= IPU_PRE_TPR_CTRL_TILE_FORMAT_SUPER_TILED;
++              if (info->cpp[0] == 2)
++                      val |= IPU_PRE_TPR_CTRL_TILE_FORMAT_16_BIT;
++      }
++      writel(val, pre->regs + IPU_PRE_TPR_CTRL);
++
++      val = readl(pre->regs + IPU_PRE_CTRL);
++      val |= IPU_PRE_CTRL_EN_REPEAT | IPU_PRE_CTRL_ENABLE |
++             IPU_PRE_CTRL_SDW_UPDATE;
++      if (modifier == DRM_FORMAT_MOD_LINEAR)
++              val &= ~IPU_PRE_CTRL_BLOCK_EN;
++      else
++              val |= IPU_PRE_CTRL_BLOCK_EN;
++      writel(val, pre->regs + IPU_PRE_CTRL);
++}
++
++void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr)
++{
++      unsigned long timeout = jiffies + msecs_to_jiffies(5);
++      unsigned short current_yblock;
++      u32 val;
++
++      if (bufaddr == pre->last_bufaddr)
++              return;
++
++      writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
++      pre->last_bufaddr = bufaddr;
++
++      do {
++              if (time_after(jiffies, timeout)) {
++                      dev_warn(pre->dev, "timeout waiting for PRE safe window\n");
++                      return;
++              }
++
++              val = readl(pre->regs + IPU_PRE_STORE_ENG_STATUS);
++              current_yblock =
++                      (val >> IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_SHIFT) &
++                      IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_MASK;
++      } while (current_yblock == 0 || current_yblock >= pre->safe_window_end);
++
++      writel(IPU_PRE_CTRL_SDW_UPDATE, pre->regs + IPU_PRE_CTRL_SET);
++}
++
++bool ipu_pre_update_pending(struct ipu_pre *pre)
++{
++      return !!(readl_relaxed(pre->regs + IPU_PRE_CTRL) &
++                IPU_PRE_CTRL_SDW_UPDATE);
++}
++
++u32 ipu_pre_get_baddr(struct ipu_pre *pre)
++{
++      return (u32)pre->buffer_paddr;
++}
++
++static int ipu_pre_probe(struct platform_device *pdev)
++{
++      struct device *dev = &pdev->dev;
++      struct resource *res;
++      struct ipu_pre *pre;
++
++      pre = devm_kzalloc(dev, sizeof(*pre), GFP_KERNEL);
++      if (!pre)
++              return -ENOMEM;
++
++      res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++      pre->regs = devm_ioremap_resource(&pdev->dev, res);
++      if (IS_ERR(pre->regs))
++              return PTR_ERR(pre->regs);
++
++      pre->clk_axi = devm_clk_get(dev, "axi");
++      if (IS_ERR(pre->clk_axi))
++              return PTR_ERR(pre->clk_axi);
++
++      pre->iram = of_gen_pool_get(dev->of_node, "fsl,iram", 0);
++      if (!pre->iram)
++              return -EPROBE_DEFER;
++
++      /*
++       * Allocate IRAM buffer with maximum size. This could be made dynamic,
++       * but as there is no other user of this IRAM region and we can fit all
++       * max sized buffers into it, there is no need yet.
++       */
++      pre->buffer_virt = gen_pool_dma_alloc(pre->iram, IPU_PRE_MAX_WIDTH *
++                                            IPU_PRE_NUM_SCANLINES * 4,
++                                            &pre->buffer_paddr);
++      if (!pre->buffer_virt)
++              return -ENOMEM;
++
++      clk_prepare_enable(pre->clk_axi);
++
++      pre->dev = dev;
++      platform_set_drvdata(pdev, pre);
++      mutex_lock(&ipu_pre_list_mutex);
++      list_add(&pre->list, &ipu_pre_list);
++      available_pres++;
++      mutex_unlock(&ipu_pre_list_mutex);
++
++      return 0;
++}
++
++static int ipu_pre_remove(struct platform_device *pdev)
++{
++      struct ipu_pre *pre = platform_get_drvdata(pdev);
++
++      mutex_lock(&ipu_pre_list_mutex);
++      list_del(&pre->list);
++      available_pres--;
++      mutex_unlock(&ipu_pre_list_mutex);
++
++      clk_disable_unprepare(pre->clk_axi);
++
++      if (pre->buffer_virt)
++              gen_pool_free(pre->iram, (unsigned long)pre->buffer_virt,
++                            IPU_PRE_MAX_WIDTH * IPU_PRE_NUM_SCANLINES * 4);
++      return 0;
++}
++
++static const struct of_device_id ipu_pre_dt_ids[] = {
++      { .compatible = "fsl,imx6qp-pre", },
++      { /* sentinel */ },
++};
++
++struct platform_driver ipu_pre_drv = {
++      .probe          = ipu_pre_probe,
++      .remove         = ipu_pre_remove,
++      .driver         = {
++              .name   = "imx-ipu-pre",
++              .of_match_table = ipu_pre_dt_ids,
++      },
++};
+--- /dev/null
++++ b/drivers/gpu/imx/ipu-v3/ipu-prg.c
+@@ -0,0 +1,483 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Copyright (c) 2016-2017 Lucas Stach, Pengutronix
++ */
++
++#include <drm/drm_fourcc.h>
++#include <linux/clk.h>
++#include <linux/err.h>
++#include <linux/iopoll.h>
++#include <linux/mfd/syscon.h>
++#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
++#include <linux/pm_runtime.h>
++#include <linux/regmap.h>
++#include <video/imx-ipu-v3.h>
++
++#include "ipu-prv.h"
++
++#define IPU_PRG_CTL                           0x00
++#define  IPU_PRG_CTL_BYPASS(i)                        (1 << (0 + i))
++#define  IPU_PRG_CTL_SOFT_ARID_MASK           0x3
++#define  IPU_PRG_CTL_SOFT_ARID_SHIFT(i)               (8 + i * 2)
++#define  IPU_PRG_CTL_SOFT_ARID(i, v)          ((v & 0x3) << (8 + 2 * i))
++#define  IPU_PRG_CTL_SO(i)                    (1 << (16 + i))
++#define  IPU_PRG_CTL_VFLIP(i)                 (1 << (19 + i))
++#define  IPU_PRG_CTL_BLOCK_MODE(i)            (1 << (22 + i))
++#define  IPU_PRG_CTL_CNT_LOAD_EN(i)           (1 << (25 + i))
++#define  IPU_PRG_CTL_SOFTRST                  (1 << 30)
++#define  IPU_PRG_CTL_SHADOW_EN                        (1 << 31)
++
++#define IPU_PRG_STATUS                                0x04
++#define  IPU_PRG_STATUS_BUFFER0_READY(i)      (1 << (0 + i * 2))
++#define  IPU_PRG_STATUS_BUFFER1_READY(i)      (1 << (1 + i * 2))
++
++#define IPU_PRG_QOS                           0x08
++#define  IPU_PRG_QOS_ARID_MASK                        0xf
++#define  IPU_PRG_QOS_ARID_SHIFT(i)            (0 + i * 4)
++
++#define IPU_PRG_REG_UPDATE                    0x0c
++#define  IPU_PRG_REG_UPDATE_REG_UPDATE                (1 << 0)
++
++#define IPU_PRG_STRIDE(i)                     (0x10 + i * 0x4)
++#define  IPU_PRG_STRIDE_STRIDE_MASK           0x3fff
++
++#define IPU_PRG_CROP_LINE                     0x1c
++
++#define IPU_PRG_THD                           0x20
++
++#define IPU_PRG_BADDR(i)                      (0x24 + i * 0x4)
++
++#define IPU_PRG_OFFSET(i)                     (0x30 + i * 0x4)
++
++#define IPU_PRG_ILO(i)                                (0x3c + i * 0x4)
++
++#define IPU_PRG_HEIGHT(i)                     (0x48 + i * 0x4)
++#define  IPU_PRG_HEIGHT_PRE_HEIGHT_MASK               0xfff
++#define  IPU_PRG_HEIGHT_PRE_HEIGHT_SHIFT      0
++#define  IPU_PRG_HEIGHT_IPU_HEIGHT_MASK               0xfff
++#define  IPU_PRG_HEIGHT_IPU_HEIGHT_SHIFT      16
++
++struct ipu_prg_channel {
++      bool                    enabled;
++      int                     used_pre;
++};
++
++struct ipu_prg {
++      struct list_head        list;
++      struct device           *dev;
++      int                     id;
++
++      void __iomem            *regs;
++      struct clk              *clk_ipg, *clk_axi;
++      struct regmap           *iomuxc_gpr;
++      struct ipu_pre          *pres[3];
++
++      struct ipu_prg_channel  chan[3];
++};
++
++static DEFINE_MUTEX(ipu_prg_list_mutex);
++static LIST_HEAD(ipu_prg_list);
++
++struct ipu_prg *
++ipu_prg_lookup_by_phandle(struct device *dev, const char *name, int ipu_id)
++{
++      struct device_node *prg_node = of_parse_phandle(dev->of_node,
++                                                      name, 0);
++      struct ipu_prg *prg;
++
++      mutex_lock(&ipu_prg_list_mutex);
++      list_for_each_entry(prg, &ipu_prg_list, list) {
++              if (prg_node == prg->dev->of_node) {
++                      mutex_unlock(&ipu_prg_list_mutex);
++                      device_link_add(dev, prg->dev,
++                                      DL_FLAG_AUTOREMOVE_CONSUMER);
++                      prg->id = ipu_id;
++                      of_node_put(prg_node);
++                      return prg;
++              }
++      }
++      mutex_unlock(&ipu_prg_list_mutex);
++
++      of_node_put(prg_node);
++
++      return NULL;
++}
++
++int ipu_prg_max_active_channels(void)
++{
++      return ipu_pre_get_available_count();
++}
++EXPORT_SYMBOL_GPL(ipu_prg_max_active_channels);
++
++bool ipu_prg_present(struct ipu_soc *ipu)
++{
++      if (ipu->prg_priv)
++              return true;
++
++      return false;
++}
++EXPORT_SYMBOL_GPL(ipu_prg_present);
++
++bool ipu_prg_format_supported(struct ipu_soc *ipu, uint32_t format,
++                            uint64_t modifier)
++{
++      const struct drm_format_info *info = drm_format_info(format);
++
++      if (info->num_planes != 1)
++              return false;
++
++      switch (modifier) {
++      case DRM_FORMAT_MOD_LINEAR:
++      case DRM_FORMAT_MOD_VIVANTE_TILED:
++      case DRM_FORMAT_MOD_VIVANTE_SUPER_TILED:
++              return true;
++      default:
++              return false;
++      }
++}
++EXPORT_SYMBOL_GPL(ipu_prg_format_supported);
++
++int ipu_prg_enable(struct ipu_soc *ipu)
++{
++      struct ipu_prg *prg = ipu->prg_priv;
++
++      if (!prg)
++              return 0;
++
++      return pm_runtime_get_sync(prg->dev);
++}
++EXPORT_SYMBOL_GPL(ipu_prg_enable);
++
++void ipu_prg_disable(struct ipu_soc *ipu)
++{
++      struct ipu_prg *prg = ipu->prg_priv;
++
++      if (!prg)
++              return;
++
++      pm_runtime_put(prg->dev);
++}
++EXPORT_SYMBOL_GPL(ipu_prg_disable);
++
++/*
++ * The channel configuartion functions below are not thread safe, as they
++ * must be only called from the atomic commit path in the DRM driver, which
++ * is properly serialized.
++ */
++static int ipu_prg_ipu_to_prg_chan(int ipu_chan)
++{
++      /*
++       * This isn't clearly documented in the RM, but IPU to PRG channel
++       * assignment is fixed, as only with this mapping the control signals
++       * match up.
++       */
++      switch (ipu_chan) {
++      case IPUV3_CHANNEL_MEM_BG_SYNC:
++              return 0;
++      case IPUV3_CHANNEL_MEM_FG_SYNC:
++              return 1;
++      case IPUV3_CHANNEL_MEM_DC_SYNC:
++              return 2;
++      default:
++              return -EINVAL;
++      }
++}
++
++static int ipu_prg_get_pre(struct ipu_prg *prg, int prg_chan)
++{
++      int i, ret;
++
++      /* channel 0 is special as it is hardwired to one of the PREs */
++      if (prg_chan == 0) {
++              ret = ipu_pre_get(prg->pres[0]);
++              if (ret)
++                      goto fail;
++              prg->chan[prg_chan].used_pre = 0;
++              return 0;
++      }
++
++      for (i = 1; i < 3; i++) {
++              ret = ipu_pre_get(prg->pres[i]);
++              if (!ret) {
++                      u32 val, mux;
++                      int shift;
++
++                      prg->chan[prg_chan].used_pre = i;
++
++                      /* configure the PRE to PRG channel mux */
++                      shift = (i == 1) ? 12 : 14;
++                      mux = (prg->id << 1) | (prg_chan - 1);
++                      regmap_update_bits(prg->iomuxc_gpr, IOMUXC_GPR5,
++                                         0x3 << shift, mux << shift);
++
++                      /* check other mux, must not point to same channel */
++                      shift = (i == 1) ? 14 : 12;
++                      regmap_read(prg->iomuxc_gpr, IOMUXC_GPR5, &val);
++                      if (((val >> shift) & 0x3) == mux) {
++                              regmap_update_bits(prg->iomuxc_gpr, IOMUXC_GPR5,
++                                                 0x3 << shift,
++                                                 (mux ^ 0x1) << shift);
++                      }
++
++                      return 0;
++              }
++      }
++
++fail:
++      dev_err(prg->dev, "could not get PRE for PRG chan %d", prg_chan);
++      return ret;
++}
++
++static void ipu_prg_put_pre(struct ipu_prg *prg, int prg_chan)
++{
++      struct ipu_prg_channel *chan = &prg->chan[prg_chan];
++
++      ipu_pre_put(prg->pres[chan->used_pre]);
++      chan->used_pre = -1;
++}
++
++void ipu_prg_channel_disable(struct ipuv3_channel *ipu_chan)
++{
++      int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num);
++      struct ipu_prg *prg = ipu_chan->ipu->prg_priv;
++      struct ipu_prg_channel *chan;
++      u32 val;
++
++      if (prg_chan < 0)
++              return;
++
++      chan = &prg->chan[prg_chan];
++      if (!chan->enabled)
++              return;
++
++      pm_runtime_get_sync(prg->dev);
++
++      val = readl(prg->regs + IPU_PRG_CTL);
++      val |= IPU_PRG_CTL_BYPASS(prg_chan);
++      writel(val, prg->regs + IPU_PRG_CTL);
++
++      val = IPU_PRG_REG_UPDATE_REG_UPDATE;
++      writel(val, prg->regs + IPU_PRG_REG_UPDATE);
++
++      pm_runtime_put(prg->dev);
++
++      ipu_prg_put_pre(prg, prg_chan);
++
++      chan->enabled = false;
++}
++EXPORT_SYMBOL_GPL(ipu_prg_channel_disable);
++
++int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan,
++                            unsigned int axi_id, unsigned int width,
++                            unsigned int height, unsigned int stride,
++                            u32 format, uint64_t modifier, unsigned long *eba)
++{
++      int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num);
++      struct ipu_prg *prg = ipu_chan->ipu->prg_priv;
++      struct ipu_prg_channel *chan;
++      u32 val;
++      int ret;
++
++      if (prg_chan < 0)
++              return prg_chan;
++
++      chan = &prg->chan[prg_chan];
++
++      if (chan->enabled) {
++              ipu_pre_update(prg->pres[chan->used_pre], *eba);
++              return 0;
++      }
++
++      ret = ipu_prg_get_pre(prg, prg_chan);
++      if (ret)
++              return ret;
++
++      ipu_pre_configure(prg->pres[chan->used_pre],
++                        width, height, stride, format, modifier, *eba);
++
++
++      pm_runtime_get_sync(prg->dev);
++
++      val = (stride - 1) & IPU_PRG_STRIDE_STRIDE_MASK;
++      writel(val, prg->regs + IPU_PRG_STRIDE(prg_chan));
++
++      val = ((height & IPU_PRG_HEIGHT_PRE_HEIGHT_MASK) <<
++             IPU_PRG_HEIGHT_PRE_HEIGHT_SHIFT) |
++            ((height & IPU_PRG_HEIGHT_IPU_HEIGHT_MASK) <<
++             IPU_PRG_HEIGHT_IPU_HEIGHT_SHIFT);
++      writel(val, prg->regs + IPU_PRG_HEIGHT(prg_chan));
++
++      val = ipu_pre_get_baddr(prg->pres[chan->used_pre]);
++      *eba = val;
++      writel(val, prg->regs + IPU_PRG_BADDR(prg_chan));
++
++      val = readl(prg->regs + IPU_PRG_CTL);
++      /* config AXI ID */
++      val &= ~(IPU_PRG_CTL_SOFT_ARID_MASK <<
++               IPU_PRG_CTL_SOFT_ARID_SHIFT(prg_chan));
++      val |= IPU_PRG_CTL_SOFT_ARID(prg_chan, axi_id);
++      /* enable channel */
++      val &= ~IPU_PRG_CTL_BYPASS(prg_chan);
++      writel(val, prg->regs + IPU_PRG_CTL);
++
++      val = IPU_PRG_REG_UPDATE_REG_UPDATE;
++      writel(val, prg->regs + IPU_PRG_REG_UPDATE);
++
++      /* wait for both double buffers to be filled */
++      readl_poll_timeout(prg->regs + IPU_PRG_STATUS, val,
++                         (val & IPU_PRG_STATUS_BUFFER0_READY(prg_chan)) &&
++                         (val & IPU_PRG_STATUS_BUFFER1_READY(prg_chan)),
++                         5, 1000);
++
++      pm_runtime_put(prg->dev);
++
++      chan->enabled = true;
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_prg_channel_configure);
++
++bool ipu_prg_channel_configure_pending(struct ipuv3_channel *ipu_chan)
++{
++      int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num);
++      struct ipu_prg *prg = ipu_chan->ipu->prg_priv;
++      struct ipu_prg_channel *chan;
++
++      if (prg_chan < 0)
++              return false;
++
++      chan = &prg->chan[prg_chan];
++      WARN_ON(!chan->enabled);
++
++      return ipu_pre_update_pending(prg->pres[chan->used_pre]);
++}
++EXPORT_SYMBOL_GPL(ipu_prg_channel_configure_pending);
++
++static int ipu_prg_probe(struct platform_device *pdev)
++{
++      struct device *dev = &pdev->dev;
++      struct resource *res;
++      struct ipu_prg *prg;
++      u32 val;
++      int i, ret;
++
++      prg = devm_kzalloc(dev, sizeof(*prg), GFP_KERNEL);
++      if (!prg)
++              return -ENOMEM;
++
++      res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++      prg->regs = devm_ioremap_resource(&pdev->dev, res);
++      if (IS_ERR(prg->regs))
++              return PTR_ERR(prg->regs);
++
++
++      prg->clk_ipg = devm_clk_get(dev, "ipg");
++      if (IS_ERR(prg->clk_ipg))
++              return PTR_ERR(prg->clk_ipg);
++
++      prg->clk_axi = devm_clk_get(dev, "axi");
++      if (IS_ERR(prg->clk_axi))
++              return PTR_ERR(prg->clk_axi);
++
++      prg->iomuxc_gpr =
++              syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
++      if (IS_ERR(prg->iomuxc_gpr))
++              return PTR_ERR(prg->iomuxc_gpr);
++
++      for (i = 0; i < 3; i++) {
++              prg->pres[i] = ipu_pre_lookup_by_phandle(dev, "fsl,pres", i);
++              if (!prg->pres[i])
++                      return -EPROBE_DEFER;
++      }
++
++      ret = clk_prepare_enable(prg->clk_ipg);
++      if (ret)
++              return ret;
++
++      ret = clk_prepare_enable(prg->clk_axi);
++      if (ret) {
++              clk_disable_unprepare(prg->clk_ipg);
++              return ret;
++      }
++
++      /* init to free running mode */
++      val = readl(prg->regs + IPU_PRG_CTL);
++      val |= IPU_PRG_CTL_SHADOW_EN;
++      writel(val, prg->regs + IPU_PRG_CTL);
++
++      /* disable address threshold */
++      writel(0xffffffff, prg->regs + IPU_PRG_THD);
++
++      pm_runtime_set_active(dev);
++      pm_runtime_enable(dev);
++
++      prg->dev = dev;
++      platform_set_drvdata(pdev, prg);
++      mutex_lock(&ipu_prg_list_mutex);
++      list_add(&prg->list, &ipu_prg_list);
++      mutex_unlock(&ipu_prg_list_mutex);
++
++      return 0;
++}
++
++static int ipu_prg_remove(struct platform_device *pdev)
++{
++      struct ipu_prg *prg = platform_get_drvdata(pdev);
++
++      mutex_lock(&ipu_prg_list_mutex);
++      list_del(&prg->list);
++      mutex_unlock(&ipu_prg_list_mutex);
++
++      return 0;
++}
++
++#ifdef CONFIG_PM
++static int prg_suspend(struct device *dev)
++{
++      struct ipu_prg *prg = dev_get_drvdata(dev);
++
++      clk_disable_unprepare(prg->clk_axi);
++      clk_disable_unprepare(prg->clk_ipg);
++
++      return 0;
++}
++
++static int prg_resume(struct device *dev)
++{
++      struct ipu_prg *prg = dev_get_drvdata(dev);
++      int ret;
++
++      ret = clk_prepare_enable(prg->clk_ipg);
++      if (ret)
++              return ret;
++
++      ret = clk_prepare_enable(prg->clk_axi);
++      if (ret) {
++              clk_disable_unprepare(prg->clk_ipg);
++              return ret;
++      }
++
++      return 0;
++}
++#endif
++
++static const struct dev_pm_ops prg_pm_ops = {
++      SET_RUNTIME_PM_OPS(prg_suspend, prg_resume, NULL)
++};
++
++static const struct of_device_id ipu_prg_dt_ids[] = {
++      { .compatible = "fsl,imx6qp-prg", },
++      { /* sentinel */ },
++};
++
++struct platform_driver ipu_prg_drv = {
++      .probe          = ipu_prg_probe,
++      .remove         = ipu_prg_remove,
++      .driver         = {
++              .name   = "imx-ipu-prg",
++              .pm     = &prg_pm_ops,
++              .of_match_table = ipu_prg_dt_ids,
++      },
++};
+--- /dev/null
++++ b/drivers/gpu/imx/ipu-v3/ipu-prv.h
+@@ -0,0 +1,274 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
++ * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
++ */
++#ifndef __IPU_PRV_H__
++#define __IPU_PRV_H__
++
++struct ipu_soc;
++
++#include <linux/types.h>
++#include <linux/device.h>
++#include <linux/clk.h>
++#include <linux/platform_device.h>
++
++#include <video/imx-ipu-v3.h>
++
++#define IPU_MCU_T_DEFAULT     8
++#define IPU_CM_IDMAC_REG_OFS  0x00008000
++#define IPU_CM_IC_REG_OFS     0x00020000
++#define IPU_CM_IRT_REG_OFS    0x00028000
++#define IPU_CM_CSI0_REG_OFS   0x00030000
++#define IPU_CM_CSI1_REG_OFS   0x00038000
++#define IPU_CM_SMFC_REG_OFS   0x00050000
++#define IPU_CM_DC_REG_OFS     0x00058000
++#define IPU_CM_DMFC_REG_OFS   0x00060000
++
++/* Register addresses */
++/* IPU Common registers */
++#define IPU_CM_REG(offset)    (offset)
++
++#define IPU_CONF                      IPU_CM_REG(0)
++
++#define IPU_SRM_PRI1                  IPU_CM_REG(0x00a0)
++#define IPU_SRM_PRI2                  IPU_CM_REG(0x00a4)
++#define IPU_FS_PROC_FLOW1             IPU_CM_REG(0x00a8)
++#define IPU_FS_PROC_FLOW2             IPU_CM_REG(0x00ac)
++#define IPU_FS_PROC_FLOW3             IPU_CM_REG(0x00b0)
++#define IPU_FS_DISP_FLOW1             IPU_CM_REG(0x00b4)
++#define IPU_FS_DISP_FLOW2             IPU_CM_REG(0x00b8)
++#define IPU_SKIP                      IPU_CM_REG(0x00bc)
++#define IPU_DISP_ALT_CONF             IPU_CM_REG(0x00c0)
++#define IPU_DISP_GEN                  IPU_CM_REG(0x00c4)
++#define IPU_DISP_ALT1                 IPU_CM_REG(0x00c8)
++#define IPU_DISP_ALT2                 IPU_CM_REG(0x00cc)
++#define IPU_DISP_ALT3                 IPU_CM_REG(0x00d0)
++#define IPU_DISP_ALT4                 IPU_CM_REG(0x00d4)
++#define IPU_SNOOP                     IPU_CM_REG(0x00d8)
++#define IPU_MEM_RST                   IPU_CM_REG(0x00dc)
++#define IPU_PM                                IPU_CM_REG(0x00e0)
++#define IPU_GPR                               IPU_CM_REG(0x00e4)
++#define IPU_CHA_DB_MODE_SEL(ch)               IPU_CM_REG(0x0150 + 4 * ((ch) / 32))
++#define IPU_ALT_CHA_DB_MODE_SEL(ch)   IPU_CM_REG(0x0168 + 4 * ((ch) / 32))
++#define IPU_CHA_CUR_BUF(ch)           IPU_CM_REG(0x023C + 4 * ((ch) / 32))
++#define IPU_ALT_CUR_BUF0              IPU_CM_REG(0x0244)
++#define IPU_ALT_CUR_BUF1              IPU_CM_REG(0x0248)
++#define IPU_SRM_STAT                  IPU_CM_REG(0x024C)
++#define IPU_PROC_TASK_STAT            IPU_CM_REG(0x0250)
++#define IPU_DISP_TASK_STAT            IPU_CM_REG(0x0254)
++#define IPU_CHA_BUF0_RDY(ch)          IPU_CM_REG(0x0268 + 4 * ((ch) / 32))
++#define IPU_CHA_BUF1_RDY(ch)          IPU_CM_REG(0x0270 + 4 * ((ch) / 32))
++#define IPU_CHA_BUF2_RDY(ch)          IPU_CM_REG(0x0288 + 4 * ((ch) / 32))
++#define IPU_ALT_CHA_BUF0_RDY(ch)      IPU_CM_REG(0x0278 + 4 * ((ch) / 32))
++#define IPU_ALT_CHA_BUF1_RDY(ch)      IPU_CM_REG(0x0280 + 4 * ((ch) / 32))
++
++#define IPU_INT_CTRL(n)               IPU_CM_REG(0x003C + 4 * (n))
++#define IPU_INT_STAT(n)               IPU_CM_REG(0x0200 + 4 * (n))
++
++/* SRM_PRI2 */
++#define DP_S_SRM_MODE_MASK            (0x3 << 3)
++#define DP_S_SRM_MODE_NOW             (0x3 << 3)
++#define DP_S_SRM_MODE_NEXT_FRAME      (0x1 << 3)
++
++/* FS_PROC_FLOW1 */
++#define FS_PRPENC_ROT_SRC_SEL_MASK    (0xf << 0)
++#define FS_PRPENC_ROT_SRC_SEL_ENC             (0x7 << 0)
++#define FS_PRPVF_ROT_SRC_SEL_MASK     (0xf << 8)
++#define FS_PRPVF_ROT_SRC_SEL_VF                       (0x8 << 8)
++#define FS_PP_SRC_SEL_MASK            (0xf << 12)
++#define FS_PP_ROT_SRC_SEL_MASK                (0xf << 16)
++#define FS_PP_ROT_SRC_SEL_PP                  (0x5 << 16)
++#define FS_VDI1_SRC_SEL_MASK          (0x3 << 20)
++#define FS_VDI3_SRC_SEL_MASK          (0x3 << 20)
++#define FS_PRP_SRC_SEL_MASK           (0xf << 24)
++#define FS_VDI_SRC_SEL_MASK           (0x3 << 28)
++#define FS_VDI_SRC_SEL_CSI_DIRECT             (0x1 << 28)
++#define FS_VDI_SRC_SEL_VDOA                   (0x2 << 28)
++
++/* FS_PROC_FLOW2 */
++#define FS_PRP_ENC_DEST_SEL_MASK      (0xf << 0)
++#define FS_PRP_ENC_DEST_SEL_IRT_ENC           (0x1 << 0)
++#define FS_PRPVF_DEST_SEL_MASK                (0xf << 4)
++#define FS_PRPVF_DEST_SEL_IRT_VF              (0x1 << 4)
++#define FS_PRPVF_ROT_DEST_SEL_MASK    (0xf << 8)
++#define FS_PP_DEST_SEL_MASK           (0xf << 12)
++#define FS_PP_DEST_SEL_IRT_PP                 (0x3 << 12)
++#define FS_PP_ROT_DEST_SEL_MASK               (0xf << 16)
++#define FS_PRPENC_ROT_DEST_SEL_MASK   (0xf << 20)
++#define FS_PRP_DEST_SEL_MASK          (0xf << 24)
++
++#define IPU_DI0_COUNTER_RELEASE                       (1 << 24)
++#define IPU_DI1_COUNTER_RELEASE                       (1 << 25)
++
++#define IPU_IDMAC_REG(offset) (offset)
++
++#define IDMAC_CONF                    IPU_IDMAC_REG(0x0000)
++#define IDMAC_CHA_EN(ch)              IPU_IDMAC_REG(0x0004 + 4 * ((ch) / 32))
++#define IDMAC_SEP_ALPHA                       IPU_IDMAC_REG(0x000c)
++#define IDMAC_ALT_SEP_ALPHA           IPU_IDMAC_REG(0x0010)
++#define IDMAC_CHA_PRI(ch)             IPU_IDMAC_REG(0x0014 + 4 * ((ch) / 32))
++#define IDMAC_WM_EN(ch)                       IPU_IDMAC_REG(0x001c + 4 * ((ch) / 32))
++#define IDMAC_CH_LOCK_EN_1            IPU_IDMAC_REG(0x0024)
++#define IDMAC_CH_LOCK_EN_2            IPU_IDMAC_REG(0x0028)
++#define IDMAC_SUB_ADDR_0              IPU_IDMAC_REG(0x002c)
++#define IDMAC_SUB_ADDR_1              IPU_IDMAC_REG(0x0030)
++#define IDMAC_SUB_ADDR_2              IPU_IDMAC_REG(0x0034)
++#define IDMAC_BAND_EN(ch)             IPU_IDMAC_REG(0x0040 + 4 * ((ch) / 32))
++#define IDMAC_CHA_BUSY(ch)            IPU_IDMAC_REG(0x0100 + 4 * ((ch) / 32))
++
++#define IPU_NUM_IRQS  (32 * 15)
++
++enum ipu_modules {
++      IPU_CONF_CSI0_EN                = (1 << 0),
++      IPU_CONF_CSI1_EN                = (1 << 1),
++      IPU_CONF_IC_EN                  = (1 << 2),
++      IPU_CONF_ROT_EN                 = (1 << 3),
++      IPU_CONF_ISP_EN                 = (1 << 4),
++      IPU_CONF_DP_EN                  = (1 << 5),
++      IPU_CONF_DI0_EN                 = (1 << 6),
++      IPU_CONF_DI1_EN                 = (1 << 7),
++      IPU_CONF_SMFC_EN                = (1 << 8),
++      IPU_CONF_DC_EN                  = (1 << 9),
++      IPU_CONF_DMFC_EN                = (1 << 10),
++
++      IPU_CONF_VDI_EN                 = (1 << 12),
++
++      IPU_CONF_IDMAC_DIS              = (1 << 22),
++
++      IPU_CONF_IC_DMFC_SEL            = (1 << 25),
++      IPU_CONF_IC_DMFC_SYNC           = (1 << 26),
++      IPU_CONF_VDI_DMFC_SYNC          = (1 << 27),
++
++      IPU_CONF_CSI0_DATA_SOURCE       = (1 << 28),
++      IPU_CONF_CSI1_DATA_SOURCE       = (1 << 29),
++      IPU_CONF_IC_INPUT               = (1 << 30),
++      IPU_CONF_CSI_SEL                = (1 << 31),
++};
++
++struct ipuv3_channel {
++      unsigned int num;
++      struct ipu_soc *ipu;
++      struct list_head list;
++};
++
++struct ipu_cpmem;
++struct ipu_csi;
++struct ipu_dc_priv;
++struct ipu_dmfc_priv;
++struct ipu_di;
++struct ipu_ic_priv;
++struct ipu_vdi;
++struct ipu_image_convert_priv;
++struct ipu_smfc_priv;
++struct ipu_pre;
++struct ipu_prg;
++
++struct ipu_devtype;
++
++struct ipu_soc {
++      struct device           *dev;
++      const struct ipu_devtype        *devtype;
++      enum ipuv3_type         ipu_type;
++      spinlock_t              lock;
++      struct mutex            channel_lock;
++      struct list_head        channels;
++
++      void __iomem            *cm_reg;
++      void __iomem            *idmac_reg;
++
++      int                     id;
++      int                     usecount;
++
++      struct clk              *clk;
++
++      int                     irq_sync;
++      int                     irq_err;
++      struct irq_domain       *domain;
++
++      struct ipu_cpmem        *cpmem_priv;
++      struct ipu_dc_priv      *dc_priv;
++      struct ipu_dp_priv      *dp_priv;
++      struct ipu_dmfc_priv    *dmfc_priv;
++      struct ipu_di           *di_priv[2];
++      struct ipu_csi          *csi_priv[2];
++      struct ipu_ic_priv      *ic_priv;
++      struct ipu_vdi          *vdi_priv;
++      struct ipu_image_convert_priv *image_convert_priv;
++      struct ipu_smfc_priv    *smfc_priv;
++      struct ipu_prg          *prg_priv;
++};
++
++static inline u32 ipu_idmac_read(struct ipu_soc *ipu, unsigned offset)
++{
++      return readl(ipu->idmac_reg + offset);
++}
++
++static inline void ipu_idmac_write(struct ipu_soc *ipu, u32 value,
++                                 unsigned offset)
++{
++      writel(value, ipu->idmac_reg + offset);
++}
++
++void ipu_srm_dp_update(struct ipu_soc *ipu, bool sync);
++
++int ipu_module_enable(struct ipu_soc *ipu, u32 mask);
++int ipu_module_disable(struct ipu_soc *ipu, u32 mask);
++
++bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno);
++
++int ipu_csi_init(struct ipu_soc *ipu, struct device *dev, int id,
++               unsigned long base, u32 module, struct clk *clk_ipu);
++void ipu_csi_exit(struct ipu_soc *ipu, int id);
++
++int ipu_ic_init(struct ipu_soc *ipu, struct device *dev,
++              unsigned long base, unsigned long tpmem_base);
++void ipu_ic_exit(struct ipu_soc *ipu);
++
++int ipu_vdi_init(struct ipu_soc *ipu, struct device *dev,
++               unsigned long base, u32 module);
++void ipu_vdi_exit(struct ipu_soc *ipu);
++
++int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev);
++void ipu_image_convert_exit(struct ipu_soc *ipu);
++
++int ipu_di_init(struct ipu_soc *ipu, struct device *dev, int id,
++              unsigned long base, u32 module, struct clk *ipu_clk);
++void ipu_di_exit(struct ipu_soc *ipu, int id);
++
++int ipu_dmfc_init(struct ipu_soc *ipu, struct device *dev, unsigned long base,
++              struct clk *ipu_clk);
++void ipu_dmfc_exit(struct ipu_soc *ipu);
++
++int ipu_dp_init(struct ipu_soc *ipu, struct device *dev, unsigned long base);
++void ipu_dp_exit(struct ipu_soc *ipu);
++
++int ipu_dc_init(struct ipu_soc *ipu, struct device *dev, unsigned long base,
++              unsigned long template_base);
++void ipu_dc_exit(struct ipu_soc *ipu);
++
++int ipu_cpmem_init(struct ipu_soc *ipu, struct device *dev, unsigned long base);
++void ipu_cpmem_exit(struct ipu_soc *ipu);
++
++int ipu_smfc_init(struct ipu_soc *ipu, struct device *dev, unsigned long base);
++void ipu_smfc_exit(struct ipu_soc *ipu);
++
++struct ipu_pre *ipu_pre_lookup_by_phandle(struct device *dev, const char *name,
++                                        int index);
++int ipu_pre_get_available_count(void);
++int ipu_pre_get(struct ipu_pre *pre);
++void ipu_pre_put(struct ipu_pre *pre);
++u32 ipu_pre_get_baddr(struct ipu_pre *pre);
++void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
++                     unsigned int height, unsigned int stride, u32 format,
++                     uint64_t modifier, unsigned int bufaddr);
++void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr);
++bool ipu_pre_update_pending(struct ipu_pre *pre);
++
++struct ipu_prg *ipu_prg_lookup_by_phandle(struct device *dev, const char *name,
++                                        int ipu_id);
++
++extern struct platform_driver ipu_pre_drv;
++extern struct platform_driver ipu_prg_drv;
++
++#endif                                /* __IPU_PRV_H__ */
+--- /dev/null
++++ b/drivers/gpu/imx/ipu-v3/ipu-smfc.c
+@@ -0,0 +1,202 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * Copyright 2008-2010 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++#include <linux/export.h>
++#include <linux/types.h>
++#include <linux/init.h>
++#include <linux/io.h>
++#include <linux/errno.h>
++#include <linux/spinlock.h>
++#include <linux/delay.h>
++#include <linux/clk.h>
++#include <video/imx-ipu-v3.h>
++
++#include "ipu-prv.h"
++
++struct ipu_smfc {
++      struct ipu_smfc_priv *priv;
++      int chno;
++      bool inuse;
++};
++
++struct ipu_smfc_priv {
++      void __iomem *base;
++      spinlock_t lock;
++      struct ipu_soc *ipu;
++      struct ipu_smfc channel[4];
++      int use_count;
++};
++
++/*SMFC Registers */
++#define SMFC_MAP      0x0000
++#define SMFC_WMC      0x0004
++#define SMFC_BS               0x0008
++
++int ipu_smfc_set_burstsize(struct ipu_smfc *smfc, int burstsize)
++{
++      struct ipu_smfc_priv *priv = smfc->priv;
++      unsigned long flags;
++      u32 val, shift;
++
++      spin_lock_irqsave(&priv->lock, flags);
++
++      shift = smfc->chno * 4;
++      val = readl(priv->base + SMFC_BS);
++      val &= ~(0xf << shift);
++      val |= burstsize << shift;
++      writel(val, priv->base + SMFC_BS);
++
++      spin_unlock_irqrestore(&priv->lock, flags);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_smfc_set_burstsize);
++
++int ipu_smfc_map_channel(struct ipu_smfc *smfc, int csi_id, int mipi_id)
++{
++      struct ipu_smfc_priv *priv = smfc->priv;
++      unsigned long flags;
++      u32 val, shift;
++
++      spin_lock_irqsave(&priv->lock, flags);
++
++      shift = smfc->chno * 3;
++      val = readl(priv->base + SMFC_MAP);
++      val &= ~(0x7 << shift);
++      val |= ((csi_id << 2) | mipi_id) << shift;
++      writel(val, priv->base + SMFC_MAP);
++
++      spin_unlock_irqrestore(&priv->lock, flags);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_smfc_map_channel);
++
++int ipu_smfc_set_watermark(struct ipu_smfc *smfc, u32 set_level, u32 clr_level)
++{
++      struct ipu_smfc_priv *priv = smfc->priv;
++      unsigned long flags;
++      u32 val, shift;
++
++      spin_lock_irqsave(&priv->lock, flags);
++
++      shift = smfc->chno * 6 + (smfc->chno > 1 ? 4 : 0);
++      val = readl(priv->base + SMFC_WMC);
++      val &= ~(0x3f << shift);
++      val |= ((clr_level << 3) | set_level) << shift;
++      writel(val, priv->base + SMFC_WMC);
++
++      spin_unlock_irqrestore(&priv->lock, flags);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_smfc_set_watermark);
++
++int ipu_smfc_enable(struct ipu_smfc *smfc)
++{
++      struct ipu_smfc_priv *priv = smfc->priv;
++      unsigned long flags;
++
++      spin_lock_irqsave(&priv->lock, flags);
++
++      if (!priv->use_count)
++              ipu_module_enable(priv->ipu, IPU_CONF_SMFC_EN);
++
++      priv->use_count++;
++
++      spin_unlock_irqrestore(&priv->lock, flags);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_smfc_enable);
++
++int ipu_smfc_disable(struct ipu_smfc *smfc)
++{
++      struct ipu_smfc_priv *priv = smfc->priv;
++      unsigned long flags;
++
++      spin_lock_irqsave(&priv->lock, flags);
++
++      priv->use_count--;
++
++      if (!priv->use_count)
++              ipu_module_disable(priv->ipu, IPU_CONF_SMFC_EN);
++
++      if (priv->use_count < 0)
++              priv->use_count = 0;
++
++      spin_unlock_irqrestore(&priv->lock, flags);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_smfc_disable);
++
++struct ipu_smfc *ipu_smfc_get(struct ipu_soc *ipu, unsigned int chno)
++{
++      struct ipu_smfc_priv *priv = ipu->smfc_priv;
++      struct ipu_smfc *smfc, *ret;
++      unsigned long flags;
++
++      if (chno >= 4)
++              return ERR_PTR(-EINVAL);
++
++      smfc = &priv->channel[chno];
++      ret = smfc;
++
++      spin_lock_irqsave(&priv->lock, flags);
++
++      if (smfc->inuse) {
++              ret = ERR_PTR(-EBUSY);
++              goto unlock;
++      }
++
++      smfc->inuse = true;
++unlock:
++      spin_unlock_irqrestore(&priv->lock, flags);
++      return ret;
++}
++EXPORT_SYMBOL_GPL(ipu_smfc_get);
++
++void ipu_smfc_put(struct ipu_smfc *smfc)
++{
++      struct ipu_smfc_priv *priv = smfc->priv;
++      unsigned long flags;
++
++      spin_lock_irqsave(&priv->lock, flags);
++      smfc->inuse = false;
++      spin_unlock_irqrestore(&priv->lock, flags);
++}
++EXPORT_SYMBOL_GPL(ipu_smfc_put);
++
++int ipu_smfc_init(struct ipu_soc *ipu, struct device *dev,
++                unsigned long base)
++{
++      struct ipu_smfc_priv *priv;
++      int i;
++
++      priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
++      if (!priv)
++              return -ENOMEM;
++
++      ipu->smfc_priv = priv;
++      spin_lock_init(&priv->lock);
++      priv->ipu = ipu;
++
++      priv->base = devm_ioremap(dev, base, PAGE_SIZE);
++      if (!priv->base)
++              return -ENOMEM;
++
++      for (i = 0; i < 4; i++) {
++              priv->channel[i].priv = priv;
++              priv->channel[i].chno = i;
++      }
++
++      pr_debug("%s: ioremap 0x%08lx -> %p\n", __func__, base, priv->base);
++
++      return 0;
++}
++
++void ipu_smfc_exit(struct ipu_soc *ipu)
++{
++}
+--- /dev/null
++++ b/drivers/gpu/imx/ipu-v3/ipu-vdi.c
+@@ -0,0 +1,234 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * Copyright (C) 2012-2016 Mentor Graphics Inc.
++ * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
++ */
++#include <linux/io.h>
++#include "ipu-prv.h"
++
++struct ipu_vdi {
++      void __iomem *base;
++      u32 module;
++      spinlock_t lock;
++      int use_count;
++      struct ipu_soc *ipu;
++};
++
++
++/* VDI Register Offsets */
++#define VDI_FSIZE 0x0000
++#define VDI_C     0x0004
++
++/* VDI Register Fields */
++#define VDI_C_CH_420             (0 << 1)
++#define VDI_C_CH_422             (1 << 1)
++#define VDI_C_MOT_SEL_MASK       (0x3 << 2)
++#define VDI_C_MOT_SEL_FULL       (2 << 2)
++#define VDI_C_MOT_SEL_LOW        (1 << 2)
++#define VDI_C_MOT_SEL_MED        (0 << 2)
++#define VDI_C_BURST_SIZE1_4      (3 << 4)
++#define VDI_C_BURST_SIZE2_4      (3 << 8)
++#define VDI_C_BURST_SIZE3_4      (3 << 12)
++#define VDI_C_BURST_SIZE_MASK    0xF
++#define VDI_C_BURST_SIZE1_OFFSET 4
++#define VDI_C_BURST_SIZE2_OFFSET 8
++#define VDI_C_BURST_SIZE3_OFFSET 12
++#define VDI_C_VWM1_SET_1         (0 << 16)
++#define VDI_C_VWM1_SET_2         (1 << 16)
++#define VDI_C_VWM1_CLR_2         (1 << 19)
++#define VDI_C_VWM3_SET_1         (0 << 22)
++#define VDI_C_VWM3_SET_2         (1 << 22)
++#define VDI_C_VWM3_CLR_2         (1 << 25)
++#define VDI_C_TOP_FIELD_MAN_1    (1 << 30)
++#define VDI_C_TOP_FIELD_AUTO_1   (1 << 31)
++
++static inline u32 ipu_vdi_read(struct ipu_vdi *vdi, unsigned int offset)
++{
++      return readl(vdi->base + offset);
++}
++
++static inline void ipu_vdi_write(struct ipu_vdi *vdi, u32 value,
++                               unsigned int offset)
++{
++      writel(value, vdi->base + offset);
++}
++
++void ipu_vdi_set_field_order(struct ipu_vdi *vdi, v4l2_std_id std, u32 field)
++{
++      bool top_field_0 = false;
++      unsigned long flags;
++      u32 reg;
++
++      switch (field) {
++      case V4L2_FIELD_INTERLACED_TB:
++      case V4L2_FIELD_SEQ_TB:
++      case V4L2_FIELD_TOP:
++              top_field_0 = true;
++              break;
++      case V4L2_FIELD_INTERLACED_BT:
++      case V4L2_FIELD_SEQ_BT:
++      case V4L2_FIELD_BOTTOM:
++              top_field_0 = false;
++              break;
++      default:
++              top_field_0 = (std & V4L2_STD_525_60) ? true : false;
++              break;
++      }
++
++      spin_lock_irqsave(&vdi->lock, flags);
++
++      reg = ipu_vdi_read(vdi, VDI_C);
++      if (top_field_0)
++              reg &= ~(VDI_C_TOP_FIELD_MAN_1 | VDI_C_TOP_FIELD_AUTO_1);
++      else
++              reg |= VDI_C_TOP_FIELD_MAN_1 | VDI_C_TOP_FIELD_AUTO_1;
++      ipu_vdi_write(vdi, reg, VDI_C);
++
++      spin_unlock_irqrestore(&vdi->lock, flags);
++}
++EXPORT_SYMBOL_GPL(ipu_vdi_set_field_order);
++
++void ipu_vdi_set_motion(struct ipu_vdi *vdi, enum ipu_motion_sel motion_sel)
++{
++      unsigned long flags;
++      u32 reg;
++
++      spin_lock_irqsave(&vdi->lock, flags);
++
++      reg = ipu_vdi_read(vdi, VDI_C);
++
++      reg &= ~VDI_C_MOT_SEL_MASK;
++
++      switch (motion_sel) {
++      case MED_MOTION:
++              reg |= VDI_C_MOT_SEL_MED;
++              break;
++      case HIGH_MOTION:
++              reg |= VDI_C_MOT_SEL_FULL;
++              break;
++      default:
++              reg |= VDI_C_MOT_SEL_LOW;
++              break;
++      }
++
++      ipu_vdi_write(vdi, reg, VDI_C);
++
++      spin_unlock_irqrestore(&vdi->lock, flags);
++}
++EXPORT_SYMBOL_GPL(ipu_vdi_set_motion);
++
++void ipu_vdi_setup(struct ipu_vdi *vdi, u32 code, int xres, int yres)
++{
++      unsigned long flags;
++      u32 pixel_fmt, reg;
++
++      spin_lock_irqsave(&vdi->lock, flags);
++
++      reg = ((yres - 1) << 16) | (xres - 1);
++      ipu_vdi_write(vdi, reg, VDI_FSIZE);
++
++      /*
++       * Full motion, only vertical filter is used.
++       * Burst size is 4 accesses
++       */
++      if (code == MEDIA_BUS_FMT_UYVY8_2X8 ||
++          code == MEDIA_BUS_FMT_UYVY8_1X16 ||
++          code == MEDIA_BUS_FMT_YUYV8_2X8 ||
++          code == MEDIA_BUS_FMT_YUYV8_1X16)
++              pixel_fmt = VDI_C_CH_422;
++      else
++              pixel_fmt = VDI_C_CH_420;
++
++      reg = ipu_vdi_read(vdi, VDI_C);
++      reg |= pixel_fmt;
++      reg |= VDI_C_BURST_SIZE2_4;
++      reg |= VDI_C_BURST_SIZE1_4 | VDI_C_VWM1_CLR_2;
++      reg |= VDI_C_BURST_SIZE3_4 | VDI_C_VWM3_CLR_2;
++      ipu_vdi_write(vdi, reg, VDI_C);
++
++      spin_unlock_irqrestore(&vdi->lock, flags);
++}
++EXPORT_SYMBOL_GPL(ipu_vdi_setup);
++
++void ipu_vdi_unsetup(struct ipu_vdi *vdi)
++{
++      unsigned long flags;
++
++      spin_lock_irqsave(&vdi->lock, flags);
++      ipu_vdi_write(vdi, 0, VDI_FSIZE);
++      ipu_vdi_write(vdi, 0, VDI_C);
++      spin_unlock_irqrestore(&vdi->lock, flags);
++}
++EXPORT_SYMBOL_GPL(ipu_vdi_unsetup);
++
++int ipu_vdi_enable(struct ipu_vdi *vdi)
++{
++      unsigned long flags;
++
++      spin_lock_irqsave(&vdi->lock, flags);
++
++      if (!vdi->use_count)
++              ipu_module_enable(vdi->ipu, vdi->module);
++
++      vdi->use_count++;
++
++      spin_unlock_irqrestore(&vdi->lock, flags);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_vdi_enable);
++
++int ipu_vdi_disable(struct ipu_vdi *vdi)
++{
++      unsigned long flags;
++
++      spin_lock_irqsave(&vdi->lock, flags);
++
++      if (vdi->use_count) {
++              if (!--vdi->use_count)
++                      ipu_module_disable(vdi->ipu, vdi->module);
++      }
++
++      spin_unlock_irqrestore(&vdi->lock, flags);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(ipu_vdi_disable);
++
++struct ipu_vdi *ipu_vdi_get(struct ipu_soc *ipu)
++{
++      return ipu->vdi_priv;
++}
++EXPORT_SYMBOL_GPL(ipu_vdi_get);
++
++void ipu_vdi_put(struct ipu_vdi *vdi)
++{
++}
++EXPORT_SYMBOL_GPL(ipu_vdi_put);
++
++int ipu_vdi_init(struct ipu_soc *ipu, struct device *dev,
++               unsigned long base, u32 module)
++{
++      struct ipu_vdi *vdi;
++
++      vdi = devm_kzalloc(dev, sizeof(*vdi), GFP_KERNEL);
++      if (!vdi)
++              return -ENOMEM;
++
++      ipu->vdi_priv = vdi;
++
++      spin_lock_init(&vdi->lock);
++      vdi->module = module;
++      vdi->base = devm_ioremap(dev, base, PAGE_SIZE);
++      if (!vdi->base)
++              return -ENOMEM;
++
++      dev_dbg(dev, "VDI base: 0x%08lx remapped to %p\n", base, vdi->base);
++      vdi->ipu = ipu;
++
++      return 0;
++}
++
++void ipu_vdi_exit(struct ipu_soc *ipu)
++{
++}
+--- a/drivers/gpu/ipu-v3/Kconfig
++++ /dev/null
+@@ -1,11 +0,0 @@
+-# SPDX-License-Identifier: GPL-2.0-only
+-config IMX_IPUV3_CORE
+-      tristate "IPUv3 core support"
+-      depends on SOC_IMX5 || SOC_IMX6Q || ARCH_MULTIPLATFORM || COMPILE_TEST
+-      depends on DRM || !DRM # if DRM=m, this can't be 'y'
+-      select BITREVERSE
+-      select GENERIC_ALLOCATOR if DRM
+-      select GENERIC_IRQ_CHIP
+-      help
+-        Choose this if you have a i.MX5/6 system and want to use the Image
+-        Processing Unit. This option only enables IPU base support.
+--- a/drivers/gpu/ipu-v3/Makefile
++++ /dev/null
+@@ -1,10 +0,0 @@
+-# SPDX-License-Identifier: GPL-2.0
+-obj-$(CONFIG_IMX_IPUV3_CORE) += imx-ipu-v3.o
+-
+-imx-ipu-v3-objs := ipu-common.o ipu-cpmem.o ipu-csi.o ipu-dc.o ipu-di.o \
+-              ipu-dp.o ipu-dmfc.o ipu-ic.o ipu-ic-csc.o \
+-              ipu-image-convert.o ipu-smfc.o ipu-vdi.o
+-
+-ifdef CONFIG_DRM
+-      imx-ipu-v3-objs += ipu-pre.o ipu-prg.o
+-endif
+--- a/drivers/gpu/ipu-v3/ipu-common.c
++++ /dev/null
+@@ -1,1565 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
+- * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
+- */
+-#include <linux/module.h>
+-#include <linux/export.h>
+-#include <linux/types.h>
+-#include <linux/reset.h>
+-#include <linux/platform_device.h>
+-#include <linux/err.h>
+-#include <linux/spinlock.h>
+-#include <linux/delay.h>
+-#include <linux/interrupt.h>
+-#include <linux/io.h>
+-#include <linux/clk.h>
+-#include <linux/list.h>
+-#include <linux/irq.h>
+-#include <linux/irqchip/chained_irq.h>
+-#include <linux/irqdomain.h>
+-#include <linux/of_device.h>
+-#include <linux/of_graph.h>
+-
+-#include <drm/drm_fourcc.h>
+-
+-#include <video/imx-ipu-v3.h>
+-#include "ipu-prv.h"
+-
+-static inline u32 ipu_cm_read(struct ipu_soc *ipu, unsigned offset)
+-{
+-      return readl(ipu->cm_reg + offset);
+-}
+-
+-static inline void ipu_cm_write(struct ipu_soc *ipu, u32 value, unsigned offset)
+-{
+-      writel(value, ipu->cm_reg + offset);
+-}
+-
+-int ipu_get_num(struct ipu_soc *ipu)
+-{
+-      return ipu->id;
+-}
+-EXPORT_SYMBOL_GPL(ipu_get_num);
+-
+-void ipu_srm_dp_update(struct ipu_soc *ipu, bool sync)
+-{
+-      u32 val;
+-
+-      val = ipu_cm_read(ipu, IPU_SRM_PRI2);
+-      val &= ~DP_S_SRM_MODE_MASK;
+-      val |= sync ? DP_S_SRM_MODE_NEXT_FRAME :
+-                    DP_S_SRM_MODE_NOW;
+-      ipu_cm_write(ipu, val, IPU_SRM_PRI2);
+-}
+-EXPORT_SYMBOL_GPL(ipu_srm_dp_update);
+-
+-enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc)
+-{
+-      switch (drm_fourcc) {
+-      case DRM_FORMAT_ARGB1555:
+-      case DRM_FORMAT_ABGR1555:
+-      case DRM_FORMAT_RGBA5551:
+-      case DRM_FORMAT_BGRA5551:
+-      case DRM_FORMAT_RGB565:
+-      case DRM_FORMAT_BGR565:
+-      case DRM_FORMAT_RGB888:
+-      case DRM_FORMAT_BGR888:
+-      case DRM_FORMAT_ARGB4444:
+-      case DRM_FORMAT_XRGB8888:
+-      case DRM_FORMAT_XBGR8888:
+-      case DRM_FORMAT_RGBX8888:
+-      case DRM_FORMAT_BGRX8888:
+-      case DRM_FORMAT_ARGB8888:
+-      case DRM_FORMAT_ABGR8888:
+-      case DRM_FORMAT_RGBA8888:
+-      case DRM_FORMAT_BGRA8888:
+-      case DRM_FORMAT_RGB565_A8:
+-      case DRM_FORMAT_BGR565_A8:
+-      case DRM_FORMAT_RGB888_A8:
+-      case DRM_FORMAT_BGR888_A8:
+-      case DRM_FORMAT_RGBX8888_A8:
+-      case DRM_FORMAT_BGRX8888_A8:
+-              return IPUV3_COLORSPACE_RGB;
+-      case DRM_FORMAT_YUYV:
+-      case DRM_FORMAT_UYVY:
+-      case DRM_FORMAT_YUV420:
+-      case DRM_FORMAT_YVU420:
+-      case DRM_FORMAT_YUV422:
+-      case DRM_FORMAT_YVU422:
+-      case DRM_FORMAT_YUV444:
+-      case DRM_FORMAT_YVU444:
+-      case DRM_FORMAT_NV12:
+-      case DRM_FORMAT_NV21:
+-      case DRM_FORMAT_NV16:
+-      case DRM_FORMAT_NV61:
+-              return IPUV3_COLORSPACE_YUV;
+-      default:
+-              return IPUV3_COLORSPACE_UNKNOWN;
+-      }
+-}
+-EXPORT_SYMBOL_GPL(ipu_drm_fourcc_to_colorspace);
+-
+-enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat)
+-{
+-      switch (pixelformat) {
+-      case V4L2_PIX_FMT_YUV420:
+-      case V4L2_PIX_FMT_YVU420:
+-      case V4L2_PIX_FMT_YUV422P:
+-      case V4L2_PIX_FMT_UYVY:
+-      case V4L2_PIX_FMT_YUYV:
+-      case V4L2_PIX_FMT_NV12:
+-      case V4L2_PIX_FMT_NV21:
+-      case V4L2_PIX_FMT_NV16:
+-      case V4L2_PIX_FMT_NV61:
+-              return IPUV3_COLORSPACE_YUV;
+-      case V4L2_PIX_FMT_RGB565:
+-      case V4L2_PIX_FMT_BGR24:
+-      case V4L2_PIX_FMT_RGB24:
+-      case V4L2_PIX_FMT_ABGR32:
+-      case V4L2_PIX_FMT_XBGR32:
+-      case V4L2_PIX_FMT_BGRA32:
+-      case V4L2_PIX_FMT_BGRX32:
+-      case V4L2_PIX_FMT_RGBA32:
+-      case V4L2_PIX_FMT_RGBX32:
+-      case V4L2_PIX_FMT_ARGB32:
+-      case V4L2_PIX_FMT_XRGB32:
+-              return IPUV3_COLORSPACE_RGB;
+-      default:
+-              return IPUV3_COLORSPACE_UNKNOWN;
+-      }
+-}
+-EXPORT_SYMBOL_GPL(ipu_pixelformat_to_colorspace);
+-
+-bool ipu_pixelformat_is_planar(u32 pixelformat)
+-{
+-      switch (pixelformat) {
+-      case V4L2_PIX_FMT_YUV420:
+-      case V4L2_PIX_FMT_YVU420:
+-      case V4L2_PIX_FMT_YUV422P:
+-      case V4L2_PIX_FMT_NV12:
+-      case V4L2_PIX_FMT_NV21:
+-      case V4L2_PIX_FMT_NV16:
+-      case V4L2_PIX_FMT_NV61:
+-              return true;
+-      }
+-
+-      return false;
+-}
+-EXPORT_SYMBOL_GPL(ipu_pixelformat_is_planar);
+-
+-enum ipu_color_space ipu_mbus_code_to_colorspace(u32 mbus_code)
+-{
+-      switch (mbus_code & 0xf000) {
+-      case 0x1000:
+-              return IPUV3_COLORSPACE_RGB;
+-      case 0x2000:
+-              return IPUV3_COLORSPACE_YUV;
+-      default:
+-              return IPUV3_COLORSPACE_UNKNOWN;
+-      }
+-}
+-EXPORT_SYMBOL_GPL(ipu_mbus_code_to_colorspace);
+-
+-int ipu_stride_to_bytes(u32 pixel_stride, u32 pixelformat)
+-{
+-      switch (pixelformat) {
+-      case V4L2_PIX_FMT_YUV420:
+-      case V4L2_PIX_FMT_YVU420:
+-      case V4L2_PIX_FMT_YUV422P:
+-      case V4L2_PIX_FMT_NV12:
+-      case V4L2_PIX_FMT_NV21:
+-      case V4L2_PIX_FMT_NV16:
+-      case V4L2_PIX_FMT_NV61:
+-              /*
+-               * for the planar YUV formats, the stride passed to
+-               * cpmem must be the stride in bytes of the Y plane.
+-               * And all the planar YUV formats have an 8-bit
+-               * Y component.
+-               */
+-              return (8 * pixel_stride) >> 3;
+-      case V4L2_PIX_FMT_RGB565:
+-      case V4L2_PIX_FMT_YUYV:
+-      case V4L2_PIX_FMT_UYVY:
+-              return (16 * pixel_stride) >> 3;
+-      case V4L2_PIX_FMT_BGR24:
+-      case V4L2_PIX_FMT_RGB24:
+-              return (24 * pixel_stride) >> 3;
+-      case V4L2_PIX_FMT_BGR32:
+-      case V4L2_PIX_FMT_RGB32:
+-      case V4L2_PIX_FMT_XBGR32:
+-      case V4L2_PIX_FMT_XRGB32:
+-              return (32 * pixel_stride) >> 3;
+-      default:
+-              break;
+-      }
+-
+-      return -EINVAL;
+-}
+-EXPORT_SYMBOL_GPL(ipu_stride_to_bytes);
+-
+-int ipu_degrees_to_rot_mode(enum ipu_rotate_mode *mode, int degrees,
+-                          bool hflip, bool vflip)
+-{
+-      u32 r90, vf, hf;
+-
+-      switch (degrees) {
+-      case 0:
+-              vf = hf = r90 = 0;
+-              break;
+-      case 90:
+-              vf = hf = 0;
+-              r90 = 1;
+-              break;
+-      case 180:
+-              vf = hf = 1;
+-              r90 = 0;
+-              break;
+-      case 270:
+-              vf = hf = r90 = 1;
+-              break;
+-      default:
+-              return -EINVAL;
+-      }
+-
+-      hf ^= (u32)hflip;
+-      vf ^= (u32)vflip;
+-
+-      *mode = (enum ipu_rotate_mode)((r90 << 2) | (hf << 1) | vf);
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_degrees_to_rot_mode);
+-
+-int ipu_rot_mode_to_degrees(int *degrees, enum ipu_rotate_mode mode,
+-                          bool hflip, bool vflip)
+-{
+-      u32 r90, vf, hf;
+-
+-      r90 = ((u32)mode >> 2) & 0x1;
+-      hf = ((u32)mode >> 1) & 0x1;
+-      vf = ((u32)mode >> 0) & 0x1;
+-      hf ^= (u32)hflip;
+-      vf ^= (u32)vflip;
+-
+-      switch ((enum ipu_rotate_mode)((r90 << 2) | (hf << 1) | vf)) {
+-      case IPU_ROTATE_NONE:
+-              *degrees = 0;
+-              break;
+-      case IPU_ROTATE_90_RIGHT:
+-              *degrees = 90;
+-              break;
+-      case IPU_ROTATE_180:
+-              *degrees = 180;
+-              break;
+-      case IPU_ROTATE_90_LEFT:
+-              *degrees = 270;
+-              break;
+-      default:
+-              return -EINVAL;
+-      }
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_rot_mode_to_degrees);
+-
+-struct ipuv3_channel *ipu_idmac_get(struct ipu_soc *ipu, unsigned num)
+-{
+-      struct ipuv3_channel *channel;
+-
+-      dev_dbg(ipu->dev, "%s %d\n", __func__, num);
+-
+-      if (num > 63)
+-              return ERR_PTR(-ENODEV);
+-
+-      mutex_lock(&ipu->channel_lock);
+-
+-      list_for_each_entry(channel, &ipu->channels, list) {
+-              if (channel->num == num) {
+-                      channel = ERR_PTR(-EBUSY);
+-                      goto out;
+-              }
+-      }
+-
+-      channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+-      if (!channel) {
+-              channel = ERR_PTR(-ENOMEM);
+-              goto out;
+-      }
+-
+-      channel->num = num;
+-      channel->ipu = ipu;
+-      list_add(&channel->list, &ipu->channels);
+-
+-out:
+-      mutex_unlock(&ipu->channel_lock);
+-
+-      return channel;
+-}
+-EXPORT_SYMBOL_GPL(ipu_idmac_get);
+-
+-void ipu_idmac_put(struct ipuv3_channel *channel)
+-{
+-      struct ipu_soc *ipu = channel->ipu;
+-
+-      dev_dbg(ipu->dev, "%s %d\n", __func__, channel->num);
+-
+-      mutex_lock(&ipu->channel_lock);
+-
+-      list_del(&channel->list);
+-      kfree(channel);
+-
+-      mutex_unlock(&ipu->channel_lock);
+-}
+-EXPORT_SYMBOL_GPL(ipu_idmac_put);
+-
+-#define idma_mask(ch)                 (1 << ((ch) & 0x1f))
+-
+-/*
+- * This is an undocumented feature, a write one to a channel bit in
+- * IPU_CHA_CUR_BUF and IPU_CHA_TRIPLE_CUR_BUF will reset the channel's
+- * internal current buffer pointer so that transfers start from buffer
+- * 0 on the next channel enable (that's the theory anyway, the imx6 TRM
+- * only says these are read-only registers). This operation is required
+- * for channel linking to work correctly, for instance video capture
+- * pipelines that carry out image rotations will fail after the first
+- * streaming unless this function is called for each channel before
+- * re-enabling the channels.
+- */
+-static void __ipu_idmac_reset_current_buffer(struct ipuv3_channel *channel)
+-{
+-      struct ipu_soc *ipu = channel->ipu;
+-      unsigned int chno = channel->num;
+-
+-      ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_CUR_BUF(chno));
+-}
+-
+-void ipu_idmac_set_double_buffer(struct ipuv3_channel *channel,
+-              bool doublebuffer)
+-{
+-      struct ipu_soc *ipu = channel->ipu;
+-      unsigned long flags;
+-      u32 reg;
+-
+-      spin_lock_irqsave(&ipu->lock, flags);
+-
+-      reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(channel->num));
+-      if (doublebuffer)
+-              reg |= idma_mask(channel->num);
+-      else
+-              reg &= ~idma_mask(channel->num);
+-      ipu_cm_write(ipu, reg, IPU_CHA_DB_MODE_SEL(channel->num));
+-
+-      __ipu_idmac_reset_current_buffer(channel);
+-
+-      spin_unlock_irqrestore(&ipu->lock, flags);
+-}
+-EXPORT_SYMBOL_GPL(ipu_idmac_set_double_buffer);
+-
+-static const struct {
+-      int chnum;
+-      u32 reg;
+-      int shift;
+-} idmac_lock_en_info[] = {
+-      { .chnum =  5, .reg = IDMAC_CH_LOCK_EN_1, .shift =  0, },
+-      { .chnum = 11, .reg = IDMAC_CH_LOCK_EN_1, .shift =  2, },
+-      { .chnum = 12, .reg = IDMAC_CH_LOCK_EN_1, .shift =  4, },
+-      { .chnum = 14, .reg = IDMAC_CH_LOCK_EN_1, .shift =  6, },
+-      { .chnum = 15, .reg = IDMAC_CH_LOCK_EN_1, .shift =  8, },
+-      { .chnum = 20, .reg = IDMAC_CH_LOCK_EN_1, .shift = 10, },
+-      { .chnum = 21, .reg = IDMAC_CH_LOCK_EN_1, .shift = 12, },
+-      { .chnum = 22, .reg = IDMAC_CH_LOCK_EN_1, .shift = 14, },
+-      { .chnum = 23, .reg = IDMAC_CH_LOCK_EN_1, .shift = 16, },
+-      { .chnum = 27, .reg = IDMAC_CH_LOCK_EN_1, .shift = 18, },
+-      { .chnum = 28, .reg = IDMAC_CH_LOCK_EN_1, .shift = 20, },
+-      { .chnum = 45, .reg = IDMAC_CH_LOCK_EN_2, .shift =  0, },
+-      { .chnum = 46, .reg = IDMAC_CH_LOCK_EN_2, .shift =  2, },
+-      { .chnum = 47, .reg = IDMAC_CH_LOCK_EN_2, .shift =  4, },
+-      { .chnum = 48, .reg = IDMAC_CH_LOCK_EN_2, .shift =  6, },
+-      { .chnum = 49, .reg = IDMAC_CH_LOCK_EN_2, .shift =  8, },
+-      { .chnum = 50, .reg = IDMAC_CH_LOCK_EN_2, .shift = 10, },
+-};
+-
+-int ipu_idmac_lock_enable(struct ipuv3_channel *channel, int num_bursts)
+-{
+-      struct ipu_soc *ipu = channel->ipu;
+-      unsigned long flags;
+-      u32 bursts, regval;
+-      int i;
+-
+-      switch (num_bursts) {
+-      case 0:
+-      case 1:
+-              bursts = 0x00; /* locking disabled */
+-              break;
+-      case 2:
+-              bursts = 0x01;
+-              break;
+-      case 4:
+-              bursts = 0x02;
+-              break;
+-      case 8:
+-              bursts = 0x03;
+-              break;
+-      default:
+-              return -EINVAL;
+-      }
+-
+-      /*
+-       * IPUv3EX / i.MX51 has a different register layout, and on IPUv3M /
+-       * i.MX53 channel arbitration locking doesn't seem to work properly.
+-       * Allow enabling the lock feature on IPUv3H / i.MX6 only.
+-       */
+-      if (bursts && ipu->ipu_type != IPUV3H)
+-              return -EINVAL;
+-
+-      for (i = 0; i < ARRAY_SIZE(idmac_lock_en_info); i++) {
+-              if (channel->num == idmac_lock_en_info[i].chnum)
+-                      break;
+-      }
+-      if (i >= ARRAY_SIZE(idmac_lock_en_info))
+-              return -EINVAL;
+-
+-      spin_lock_irqsave(&ipu->lock, flags);
+-
+-      regval = ipu_idmac_read(ipu, idmac_lock_en_info[i].reg);
+-      regval &= ~(0x03 << idmac_lock_en_info[i].shift);
+-      regval |= (bursts << idmac_lock_en_info[i].shift);
+-      ipu_idmac_write(ipu, regval, idmac_lock_en_info[i].reg);
+-
+-      spin_unlock_irqrestore(&ipu->lock, flags);
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_idmac_lock_enable);
+-
+-int ipu_module_enable(struct ipu_soc *ipu, u32 mask)
+-{
+-      unsigned long lock_flags;
+-      u32 val;
+-
+-      spin_lock_irqsave(&ipu->lock, lock_flags);
+-
+-      val = ipu_cm_read(ipu, IPU_DISP_GEN);
+-
+-      if (mask & IPU_CONF_DI0_EN)
+-              val |= IPU_DI0_COUNTER_RELEASE;
+-      if (mask & IPU_CONF_DI1_EN)
+-              val |= IPU_DI1_COUNTER_RELEASE;
+-
+-      ipu_cm_write(ipu, val, IPU_DISP_GEN);
+-
+-      val = ipu_cm_read(ipu, IPU_CONF);
+-      val |= mask;
+-      ipu_cm_write(ipu, val, IPU_CONF);
+-
+-      spin_unlock_irqrestore(&ipu->lock, lock_flags);
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_module_enable);
+-
+-int ipu_module_disable(struct ipu_soc *ipu, u32 mask)
+-{
+-      unsigned long lock_flags;
+-      u32 val;
+-
+-      spin_lock_irqsave(&ipu->lock, lock_flags);
+-
+-      val = ipu_cm_read(ipu, IPU_CONF);
+-      val &= ~mask;
+-      ipu_cm_write(ipu, val, IPU_CONF);
+-
+-      val = ipu_cm_read(ipu, IPU_DISP_GEN);
+-
+-      if (mask & IPU_CONF_DI0_EN)
+-              val &= ~IPU_DI0_COUNTER_RELEASE;
+-      if (mask & IPU_CONF_DI1_EN)
+-              val &= ~IPU_DI1_COUNTER_RELEASE;
+-
+-      ipu_cm_write(ipu, val, IPU_DISP_GEN);
+-
+-      spin_unlock_irqrestore(&ipu->lock, lock_flags);
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_module_disable);
+-
+-int ipu_idmac_get_current_buffer(struct ipuv3_channel *channel)
+-{
+-      struct ipu_soc *ipu = channel->ipu;
+-      unsigned int chno = channel->num;
+-
+-      return (ipu_cm_read(ipu, IPU_CHA_CUR_BUF(chno)) & idma_mask(chno)) ? 1 : 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_idmac_get_current_buffer);
+-
+-bool ipu_idmac_buffer_is_ready(struct ipuv3_channel *channel, u32 buf_num)
+-{
+-      struct ipu_soc *ipu = channel->ipu;
+-      unsigned long flags;
+-      u32 reg = 0;
+-
+-      spin_lock_irqsave(&ipu->lock, flags);
+-      switch (buf_num) {
+-      case 0:
+-              reg = ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(channel->num));
+-              break;
+-      case 1:
+-              reg = ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(channel->num));
+-              break;
+-      case 2:
+-              reg = ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(channel->num));
+-              break;
+-      }
+-      spin_unlock_irqrestore(&ipu->lock, flags);
+-
+-      return ((reg & idma_mask(channel->num)) != 0);
+-}
+-EXPORT_SYMBOL_GPL(ipu_idmac_buffer_is_ready);
+-
+-void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num)
+-{
+-      struct ipu_soc *ipu = channel->ipu;
+-      unsigned int chno = channel->num;
+-      unsigned long flags;
+-
+-      spin_lock_irqsave(&ipu->lock, flags);
+-
+-      /* Mark buffer as ready. */
+-      if (buf_num == 0)
+-              ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF0_RDY(chno));
+-      else
+-              ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF1_RDY(chno));
+-
+-      spin_unlock_irqrestore(&ipu->lock, flags);
+-}
+-EXPORT_SYMBOL_GPL(ipu_idmac_select_buffer);
+-
+-void ipu_idmac_clear_buffer(struct ipuv3_channel *channel, u32 buf_num)
+-{
+-      struct ipu_soc *ipu = channel->ipu;
+-      unsigned int chno = channel->num;
+-      unsigned long flags;
+-
+-      spin_lock_irqsave(&ipu->lock, flags);
+-
+-      ipu_cm_write(ipu, 0xF0300000, IPU_GPR); /* write one to clear */
+-      switch (buf_num) {
+-      case 0:
+-              ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF0_RDY(chno));
+-              break;
+-      case 1:
+-              ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF1_RDY(chno));
+-              break;
+-      case 2:
+-              ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF2_RDY(chno));
+-              break;
+-      default:
+-              break;
+-      }
+-      ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
+-
+-      spin_unlock_irqrestore(&ipu->lock, flags);
+-}
+-EXPORT_SYMBOL_GPL(ipu_idmac_clear_buffer);
+-
+-int ipu_idmac_enable_channel(struct ipuv3_channel *channel)
+-{
+-      struct ipu_soc *ipu = channel->ipu;
+-      u32 val;
+-      unsigned long flags;
+-
+-      spin_lock_irqsave(&ipu->lock, flags);
+-
+-      val = ipu_idmac_read(ipu, IDMAC_CHA_EN(channel->num));
+-      val |= idma_mask(channel->num);
+-      ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num));
+-
+-      spin_unlock_irqrestore(&ipu->lock, flags);
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_idmac_enable_channel);
+-
+-bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno)
+-{
+-      return (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(chno)) & idma_mask(chno));
+-}
+-EXPORT_SYMBOL_GPL(ipu_idmac_channel_busy);
+-
+-int ipu_idmac_wait_busy(struct ipuv3_channel *channel, int ms)
+-{
+-      struct ipu_soc *ipu = channel->ipu;
+-      unsigned long timeout;
+-
+-      timeout = jiffies + msecs_to_jiffies(ms);
+-      while (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(channel->num)) &
+-                      idma_mask(channel->num)) {
+-              if (time_after(jiffies, timeout))
+-                      return -ETIMEDOUT;
+-              cpu_relax();
+-      }
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_idmac_wait_busy);
+-
+-int ipu_idmac_disable_channel(struct ipuv3_channel *channel)
+-{
+-      struct ipu_soc *ipu = channel->ipu;
+-      u32 val;
+-      unsigned long flags;
+-
+-      spin_lock_irqsave(&ipu->lock, flags);
+-
+-      /* Disable DMA channel(s) */
+-      val = ipu_idmac_read(ipu, IDMAC_CHA_EN(channel->num));
+-      val &= ~idma_mask(channel->num);
+-      ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num));
+-
+-      __ipu_idmac_reset_current_buffer(channel);
+-
+-      /* Set channel buffers NOT to be ready */
+-      ipu_cm_write(ipu, 0xf0000000, IPU_GPR); /* write one to clear */
+-
+-      if (ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(channel->num)) &
+-                      idma_mask(channel->num)) {
+-              ipu_cm_write(ipu, idma_mask(channel->num),
+-                           IPU_CHA_BUF0_RDY(channel->num));
+-      }
+-
+-      if (ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(channel->num)) &
+-                      idma_mask(channel->num)) {
+-              ipu_cm_write(ipu, idma_mask(channel->num),
+-                           IPU_CHA_BUF1_RDY(channel->num));
+-      }
+-
+-      ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
+-
+-      /* Reset the double buffer */
+-      val = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(channel->num));
+-      val &= ~idma_mask(channel->num);
+-      ipu_cm_write(ipu, val, IPU_CHA_DB_MODE_SEL(channel->num));
+-
+-      spin_unlock_irqrestore(&ipu->lock, flags);
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_idmac_disable_channel);
+-
+-/*
+- * The imx6 rev. D TRM says that enabling the WM feature will increase
+- * a channel's priority. Refer to Table 36-8 Calculated priority value.
+- * The sub-module that is the sink or source for the channel must enable
+- * watermark signal for this to take effect (SMFC_WM for instance).
+- */
+-void ipu_idmac_enable_watermark(struct ipuv3_channel *channel, bool enable)
+-{
+-      struct ipu_soc *ipu = channel->ipu;
+-      unsigned long flags;
+-      u32 val;
+-
+-      spin_lock_irqsave(&ipu->lock, flags);
+-
+-      val = ipu_idmac_read(ipu, IDMAC_WM_EN(channel->num));
+-      if (enable)
+-              val |= 1 << (channel->num % 32);
+-      else
+-              val &= ~(1 << (channel->num % 32));
+-      ipu_idmac_write(ipu, val, IDMAC_WM_EN(channel->num));
+-
+-      spin_unlock_irqrestore(&ipu->lock, flags);
+-}
+-EXPORT_SYMBOL_GPL(ipu_idmac_enable_watermark);
+-
+-static int ipu_memory_reset(struct ipu_soc *ipu)
+-{
+-      unsigned long timeout;
+-
+-      ipu_cm_write(ipu, 0x807FFFFF, IPU_MEM_RST);
+-
+-      timeout = jiffies + msecs_to_jiffies(1000);
+-      while (ipu_cm_read(ipu, IPU_MEM_RST) & 0x80000000) {
+-              if (time_after(jiffies, timeout))
+-                      return -ETIME;
+-              cpu_relax();
+-      }
+-
+-      return 0;
+-}
+-
+-/*
+- * Set the source mux for the given CSI. Selects either parallel or
+- * MIPI CSI2 sources.
+- */
+-void ipu_set_csi_src_mux(struct ipu_soc *ipu, int csi_id, bool mipi_csi2)
+-{
+-      unsigned long flags;
+-      u32 val, mask;
+-
+-      mask = (csi_id == 1) ? IPU_CONF_CSI1_DATA_SOURCE :
+-              IPU_CONF_CSI0_DATA_SOURCE;
+-
+-      spin_lock_irqsave(&ipu->lock, flags);
+-
+-      val = ipu_cm_read(ipu, IPU_CONF);
+-      if (mipi_csi2)
+-              val |= mask;
+-      else
+-              val &= ~mask;
+-      ipu_cm_write(ipu, val, IPU_CONF);
+-
+-      spin_unlock_irqrestore(&ipu->lock, flags);
+-}
+-EXPORT_SYMBOL_GPL(ipu_set_csi_src_mux);
+-
+-/*
+- * Set the source mux for the IC. Selects either CSI[01] or the VDI.
+- */
+-void ipu_set_ic_src_mux(struct ipu_soc *ipu, int csi_id, bool vdi)
+-{
+-      unsigned long flags;
+-      u32 val;
+-
+-      spin_lock_irqsave(&ipu->lock, flags);
+-
+-      val = ipu_cm_read(ipu, IPU_CONF);
+-      if (vdi)
+-              val |= IPU_CONF_IC_INPUT;
+-      else
+-              val &= ~IPU_CONF_IC_INPUT;
+-
+-      if (csi_id == 1)
+-              val |= IPU_CONF_CSI_SEL;
+-      else
+-              val &= ~IPU_CONF_CSI_SEL;
+-
+-      ipu_cm_write(ipu, val, IPU_CONF);
+-
+-      spin_unlock_irqrestore(&ipu->lock, flags);
+-}
+-EXPORT_SYMBOL_GPL(ipu_set_ic_src_mux);
+-
+-
+-/* Frame Synchronization Unit Channel Linking */
+-
+-struct fsu_link_reg_info {
+-      int chno;
+-      u32 reg;
+-      u32 mask;
+-      u32 val;
+-};
+-
+-struct fsu_link_info {
+-      struct fsu_link_reg_info src;
+-      struct fsu_link_reg_info sink;
+-};
+-
+-static const struct fsu_link_info fsu_link_info[] = {
+-      {
+-              .src  = { IPUV3_CHANNEL_IC_PRP_ENC_MEM, IPU_FS_PROC_FLOW2,
+-                        FS_PRP_ENC_DEST_SEL_MASK, FS_PRP_ENC_DEST_SEL_IRT_ENC },
+-              .sink = { IPUV3_CHANNEL_MEM_ROT_ENC, IPU_FS_PROC_FLOW1,
+-                        FS_PRPENC_ROT_SRC_SEL_MASK, FS_PRPENC_ROT_SRC_SEL_ENC },
+-      }, {
+-              .src =  { IPUV3_CHANNEL_IC_PRP_VF_MEM, IPU_FS_PROC_FLOW2,
+-                        FS_PRPVF_DEST_SEL_MASK, FS_PRPVF_DEST_SEL_IRT_VF },
+-              .sink = { IPUV3_CHANNEL_MEM_ROT_VF, IPU_FS_PROC_FLOW1,
+-                        FS_PRPVF_ROT_SRC_SEL_MASK, FS_PRPVF_ROT_SRC_SEL_VF },
+-      }, {
+-              .src =  { IPUV3_CHANNEL_IC_PP_MEM, IPU_FS_PROC_FLOW2,
+-                        FS_PP_DEST_SEL_MASK, FS_PP_DEST_SEL_IRT_PP },
+-              .sink = { IPUV3_CHANNEL_MEM_ROT_PP, IPU_FS_PROC_FLOW1,
+-                        FS_PP_ROT_SRC_SEL_MASK, FS_PP_ROT_SRC_SEL_PP },
+-      }, {
+-              .src =  { IPUV3_CHANNEL_CSI_DIRECT, 0 },
+-              .sink = { IPUV3_CHANNEL_CSI_VDI_PREV, IPU_FS_PROC_FLOW1,
+-                        FS_VDI_SRC_SEL_MASK, FS_VDI_SRC_SEL_CSI_DIRECT },
+-      },
+-};
+-
+-static const struct fsu_link_info *find_fsu_link_info(int src, int sink)
+-{
+-      int i;
+-
+-      for (i = 0; i < ARRAY_SIZE(fsu_link_info); i++) {
+-              if (src == fsu_link_info[i].src.chno &&
+-                  sink == fsu_link_info[i].sink.chno)
+-                      return &fsu_link_info[i];
+-      }
+-
+-      return NULL;
+-}
+-
+-/*
+- * Links a source channel to a sink channel in the FSU.
+- */
+-int ipu_fsu_link(struct ipu_soc *ipu, int src_ch, int sink_ch)
+-{
+-      const struct fsu_link_info *link;
+-      u32 src_reg, sink_reg;
+-      unsigned long flags;
+-
+-      link = find_fsu_link_info(src_ch, sink_ch);
+-      if (!link)
+-              return -EINVAL;
+-
+-      spin_lock_irqsave(&ipu->lock, flags);
+-
+-      if (link->src.mask) {
+-              src_reg = ipu_cm_read(ipu, link->src.reg);
+-              src_reg &= ~link->src.mask;
+-              src_reg |= link->src.val;
+-              ipu_cm_write(ipu, src_reg, link->src.reg);
+-      }
+-
+-      if (link->sink.mask) {
+-              sink_reg = ipu_cm_read(ipu, link->sink.reg);
+-              sink_reg &= ~link->sink.mask;
+-              sink_reg |= link->sink.val;
+-              ipu_cm_write(ipu, sink_reg, link->sink.reg);
+-      }
+-
+-      spin_unlock_irqrestore(&ipu->lock, flags);
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_fsu_link);
+-
+-/*
+- * Unlinks source and sink channels in the FSU.
+- */
+-int ipu_fsu_unlink(struct ipu_soc *ipu, int src_ch, int sink_ch)
+-{
+-      const struct fsu_link_info *link;
+-      u32 src_reg, sink_reg;
+-      unsigned long flags;
+-
+-      link = find_fsu_link_info(src_ch, sink_ch);
+-      if (!link)
+-              return -EINVAL;
+-
+-      spin_lock_irqsave(&ipu->lock, flags);
+-
+-      if (link->src.mask) {
+-              src_reg = ipu_cm_read(ipu, link->src.reg);
+-              src_reg &= ~link->src.mask;
+-              ipu_cm_write(ipu, src_reg, link->src.reg);
+-      }
+-
+-      if (link->sink.mask) {
+-              sink_reg = ipu_cm_read(ipu, link->sink.reg);
+-              sink_reg &= ~link->sink.mask;
+-              ipu_cm_write(ipu, sink_reg, link->sink.reg);
+-      }
+-
+-      spin_unlock_irqrestore(&ipu->lock, flags);
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_fsu_unlink);
+-
+-/* Link IDMAC channels in the FSU */
+-int ipu_idmac_link(struct ipuv3_channel *src, struct ipuv3_channel *sink)
+-{
+-      return ipu_fsu_link(src->ipu, src->num, sink->num);
+-}
+-EXPORT_SYMBOL_GPL(ipu_idmac_link);
+-
+-/* Unlink IDMAC channels in the FSU */
+-int ipu_idmac_unlink(struct ipuv3_channel *src, struct ipuv3_channel *sink)
+-{
+-      return ipu_fsu_unlink(src->ipu, src->num, sink->num);
+-}
+-EXPORT_SYMBOL_GPL(ipu_idmac_unlink);
+-
+-struct ipu_devtype {
+-      const char *name;
+-      unsigned long cm_ofs;
+-      unsigned long cpmem_ofs;
+-      unsigned long srm_ofs;
+-      unsigned long tpm_ofs;
+-      unsigned long csi0_ofs;
+-      unsigned long csi1_ofs;
+-      unsigned long ic_ofs;
+-      unsigned long disp0_ofs;
+-      unsigned long disp1_ofs;
+-      unsigned long dc_tmpl_ofs;
+-      unsigned long vdi_ofs;
+-      enum ipuv3_type type;
+-};
+-
+-static struct ipu_devtype ipu_type_imx51 = {
+-      .name = "IPUv3EX",
+-      .cm_ofs = 0x1e000000,
+-      .cpmem_ofs = 0x1f000000,
+-      .srm_ofs = 0x1f040000,
+-      .tpm_ofs = 0x1f060000,
+-      .csi0_ofs = 0x1e030000,
+-      .csi1_ofs = 0x1e038000,
+-      .ic_ofs = 0x1e020000,
+-      .disp0_ofs = 0x1e040000,
+-      .disp1_ofs = 0x1e048000,
+-      .dc_tmpl_ofs = 0x1f080000,
+-      .vdi_ofs = 0x1e068000,
+-      .type = IPUV3EX,
+-};
+-
+-static struct ipu_devtype ipu_type_imx53 = {
+-      .name = "IPUv3M",
+-      .cm_ofs = 0x06000000,
+-      .cpmem_ofs = 0x07000000,
+-      .srm_ofs = 0x07040000,
+-      .tpm_ofs = 0x07060000,
+-      .csi0_ofs = 0x06030000,
+-      .csi1_ofs = 0x06038000,
+-      .ic_ofs = 0x06020000,
+-      .disp0_ofs = 0x06040000,
+-      .disp1_ofs = 0x06048000,
+-      .dc_tmpl_ofs = 0x07080000,
+-      .vdi_ofs = 0x06068000,
+-      .type = IPUV3M,
+-};
+-
+-static struct ipu_devtype ipu_type_imx6q = {
+-      .name = "IPUv3H",
+-      .cm_ofs = 0x00200000,
+-      .cpmem_ofs = 0x00300000,
+-      .srm_ofs = 0x00340000,
+-      .tpm_ofs = 0x00360000,
+-      .csi0_ofs = 0x00230000,
+-      .csi1_ofs = 0x00238000,
+-      .ic_ofs = 0x00220000,
+-      .disp0_ofs = 0x00240000,
+-      .disp1_ofs = 0x00248000,
+-      .dc_tmpl_ofs = 0x00380000,
+-      .vdi_ofs = 0x00268000,
+-      .type = IPUV3H,
+-};
+-
+-static const struct of_device_id imx_ipu_dt_ids[] = {
+-      { .compatible = "fsl,imx51-ipu", .data = &ipu_type_imx51, },
+-      { .compatible = "fsl,imx53-ipu", .data = &ipu_type_imx53, },
+-      { .compatible = "fsl,imx6q-ipu", .data = &ipu_type_imx6q, },
+-      { .compatible = "fsl,imx6qp-ipu", .data = &ipu_type_imx6q, },
+-      { /* sentinel */ }
+-};
+-MODULE_DEVICE_TABLE(of, imx_ipu_dt_ids);
+-
+-static int ipu_submodules_init(struct ipu_soc *ipu,
+-              struct platform_device *pdev, unsigned long ipu_base,
+-              struct clk *ipu_clk)
+-{
+-      char *unit;
+-      int ret;
+-      struct device *dev = &pdev->dev;
+-      const struct ipu_devtype *devtype = ipu->devtype;
+-
+-      ret = ipu_cpmem_init(ipu, dev, ipu_base + devtype->cpmem_ofs);
+-      if (ret) {
+-              unit = "cpmem";
+-              goto err_cpmem;
+-      }
+-
+-      ret = ipu_csi_init(ipu, dev, 0, ipu_base + devtype->csi0_ofs,
+-                         IPU_CONF_CSI0_EN, ipu_clk);
+-      if (ret) {
+-              unit = "csi0";
+-              goto err_csi_0;
+-      }
+-
+-      ret = ipu_csi_init(ipu, dev, 1, ipu_base + devtype->csi1_ofs,
+-                         IPU_CONF_CSI1_EN, ipu_clk);
+-      if (ret) {
+-              unit = "csi1";
+-              goto err_csi_1;
+-      }
+-
+-      ret = ipu_ic_init(ipu, dev,
+-                        ipu_base + devtype->ic_ofs,
+-                        ipu_base + devtype->tpm_ofs);
+-      if (ret) {
+-              unit = "ic";
+-              goto err_ic;
+-      }
+-
+-      ret = ipu_vdi_init(ipu, dev, ipu_base + devtype->vdi_ofs,
+-                         IPU_CONF_VDI_EN | IPU_CONF_ISP_EN |
+-                         IPU_CONF_IC_INPUT);
+-      if (ret) {
+-              unit = "vdi";
+-              goto err_vdi;
+-      }
+-
+-      ret = ipu_image_convert_init(ipu, dev);
+-      if (ret) {
+-              unit = "image_convert";
+-              goto err_image_convert;
+-      }
+-
+-      ret = ipu_di_init(ipu, dev, 0, ipu_base + devtype->disp0_ofs,
+-                        IPU_CONF_DI0_EN, ipu_clk);
+-      if (ret) {
+-              unit = "di0";
+-              goto err_di_0;
+-      }
+-
+-      ret = ipu_di_init(ipu, dev, 1, ipu_base + devtype->disp1_ofs,
+-                      IPU_CONF_DI1_EN, ipu_clk);
+-      if (ret) {
+-              unit = "di1";
+-              goto err_di_1;
+-      }
+-
+-      ret = ipu_dc_init(ipu, dev, ipu_base + devtype->cm_ofs +
+-                      IPU_CM_DC_REG_OFS, ipu_base + devtype->dc_tmpl_ofs);
+-      if (ret) {
+-              unit = "dc_template";
+-              goto err_dc;
+-      }
+-
+-      ret = ipu_dmfc_init(ipu, dev, ipu_base +
+-                      devtype->cm_ofs + IPU_CM_DMFC_REG_OFS, ipu_clk);
+-      if (ret) {
+-              unit = "dmfc";
+-              goto err_dmfc;
+-      }
+-
+-      ret = ipu_dp_init(ipu, dev, ipu_base + devtype->srm_ofs);
+-      if (ret) {
+-              unit = "dp";
+-              goto err_dp;
+-      }
+-
+-      ret = ipu_smfc_init(ipu, dev, ipu_base +
+-                      devtype->cm_ofs + IPU_CM_SMFC_REG_OFS);
+-      if (ret) {
+-              unit = "smfc";
+-              goto err_smfc;
+-      }
+-
+-      return 0;
+-
+-err_smfc:
+-      ipu_dp_exit(ipu);
+-err_dp:
+-      ipu_dmfc_exit(ipu);
+-err_dmfc:
+-      ipu_dc_exit(ipu);
+-err_dc:
+-      ipu_di_exit(ipu, 1);
+-err_di_1:
+-      ipu_di_exit(ipu, 0);
+-err_di_0:
+-      ipu_image_convert_exit(ipu);
+-err_image_convert:
+-      ipu_vdi_exit(ipu);
+-err_vdi:
+-      ipu_ic_exit(ipu);
+-err_ic:
+-      ipu_csi_exit(ipu, 1);
+-err_csi_1:
+-      ipu_csi_exit(ipu, 0);
+-err_csi_0:
+-      ipu_cpmem_exit(ipu);
+-err_cpmem:
+-      dev_err(&pdev->dev, "init %s failed with %d\n", unit, ret);
+-      return ret;
+-}
+-
+-static void ipu_irq_handle(struct ipu_soc *ipu, const int *regs, int num_regs)
+-{
+-      unsigned long status;
+-      int i, bit, irq;
+-
+-      for (i = 0; i < num_regs; i++) {
+-
+-              status = ipu_cm_read(ipu, IPU_INT_STAT(regs[i]));
+-              status &= ipu_cm_read(ipu, IPU_INT_CTRL(regs[i]));
+-
+-              for_each_set_bit(bit, &status, 32) {
+-                      irq = irq_linear_revmap(ipu->domain,
+-                                              regs[i] * 32 + bit);
+-                      if (irq)
+-                              generic_handle_irq(irq);
+-              }
+-      }
+-}
+-
+-static void ipu_irq_handler(struct irq_desc *desc)
+-{
+-      struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
+-      struct irq_chip *chip = irq_desc_get_chip(desc);
+-      static const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14};
+-
+-      chained_irq_enter(chip, desc);
+-
+-      ipu_irq_handle(ipu, int_reg, ARRAY_SIZE(int_reg));
+-
+-      chained_irq_exit(chip, desc);
+-}
+-
+-static void ipu_err_irq_handler(struct irq_desc *desc)
+-{
+-      struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
+-      struct irq_chip *chip = irq_desc_get_chip(desc);
+-      static const int int_reg[] = { 4, 5, 8, 9};
+-
+-      chained_irq_enter(chip, desc);
+-
+-      ipu_irq_handle(ipu, int_reg, ARRAY_SIZE(int_reg));
+-
+-      chained_irq_exit(chip, desc);
+-}
+-
+-int ipu_map_irq(struct ipu_soc *ipu, int irq)
+-{
+-      int virq;
+-
+-      virq = irq_linear_revmap(ipu->domain, irq);
+-      if (!virq)
+-              virq = irq_create_mapping(ipu->domain, irq);
+-
+-      return virq;
+-}
+-EXPORT_SYMBOL_GPL(ipu_map_irq);
+-
+-int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
+-              enum ipu_channel_irq irq_type)
+-{
+-      return ipu_map_irq(ipu, irq_type + channel->num);
+-}
+-EXPORT_SYMBOL_GPL(ipu_idmac_channel_irq);
+-
+-static void ipu_submodules_exit(struct ipu_soc *ipu)
+-{
+-      ipu_smfc_exit(ipu);
+-      ipu_dp_exit(ipu);
+-      ipu_dmfc_exit(ipu);
+-      ipu_dc_exit(ipu);
+-      ipu_di_exit(ipu, 1);
+-      ipu_di_exit(ipu, 0);
+-      ipu_image_convert_exit(ipu);
+-      ipu_vdi_exit(ipu);
+-      ipu_ic_exit(ipu);
+-      ipu_csi_exit(ipu, 1);
+-      ipu_csi_exit(ipu, 0);
+-      ipu_cpmem_exit(ipu);
+-}
+-
+-static int platform_remove_devices_fn(struct device *dev, void *unused)
+-{
+-      struct platform_device *pdev = to_platform_device(dev);
+-
+-      platform_device_unregister(pdev);
+-
+-      return 0;
+-}
+-
+-static void platform_device_unregister_children(struct platform_device *pdev)
+-{
+-      device_for_each_child(&pdev->dev, NULL, platform_remove_devices_fn);
+-}
+-
+-struct ipu_platform_reg {
+-      struct ipu_client_platformdata pdata;
+-      const char *name;
+-};
+-
+-/* These must be in the order of the corresponding device tree port nodes */
+-static struct ipu_platform_reg client_reg[] = {
+-      {
+-              .pdata = {
+-                      .csi = 0,
+-                      .dma[0] = IPUV3_CHANNEL_CSI0,
+-                      .dma[1] = -EINVAL,
+-              },
+-              .name = "imx-ipuv3-csi",
+-      }, {
+-              .pdata = {
+-                      .csi = 1,
+-                      .dma[0] = IPUV3_CHANNEL_CSI1,
+-                      .dma[1] = -EINVAL,
+-              },
+-              .name = "imx-ipuv3-csi",
+-      }, {
+-              .pdata = {
+-                      .di = 0,
+-                      .dc = 5,
+-                      .dp = IPU_DP_FLOW_SYNC_BG,
+-                      .dma[0] = IPUV3_CHANNEL_MEM_BG_SYNC,
+-                      .dma[1] = IPUV3_CHANNEL_MEM_FG_SYNC,
+-              },
+-              .name = "imx-ipuv3-crtc",
+-      }, {
+-              .pdata = {
+-                      .di = 1,
+-                      .dc = 1,
+-                      .dp = -EINVAL,
+-                      .dma[0] = IPUV3_CHANNEL_MEM_DC_SYNC,
+-                      .dma[1] = -EINVAL,
+-              },
+-              .name = "imx-ipuv3-crtc",
+-      },
+-};
+-
+-static DEFINE_MUTEX(ipu_client_id_mutex);
+-static int ipu_client_id;
+-
+-static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
+-{
+-      struct device *dev = ipu->dev;
+-      unsigned i;
+-      int id, ret;
+-
+-      mutex_lock(&ipu_client_id_mutex);
+-      id = ipu_client_id;
+-      ipu_client_id += ARRAY_SIZE(client_reg);
+-      mutex_unlock(&ipu_client_id_mutex);
+-
+-      for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
+-              struct ipu_platform_reg *reg = &client_reg[i];
+-              struct platform_device *pdev;
+-              struct device_node *of_node;
+-
+-              /* Associate subdevice with the corresponding port node */
+-              of_node = of_graph_get_port_by_id(dev->of_node, i);
+-              if (!of_node) {
+-                      dev_info(dev,
+-                               "no port@%d node in %pOF, not using %s%d\n",
+-                               i, dev->of_node,
+-                               (i / 2) ? "DI" : "CSI", i % 2);
+-                      continue;
+-              }
+-
+-              pdev = platform_device_alloc(reg->name, id++);
+-              if (!pdev) {
+-                      ret = -ENOMEM;
+-                      goto err_register;
+-              }
+-
+-              pdev->dev.parent = dev;
+-
+-              reg->pdata.of_node = of_node;
+-              ret = platform_device_add_data(pdev, &reg->pdata,
+-                                             sizeof(reg->pdata));
+-              if (!ret)
+-                      ret = platform_device_add(pdev);
+-              if (ret) {
+-                      platform_device_put(pdev);
+-                      goto err_register;
+-              }
+-      }
+-
+-      return 0;
+-
+-err_register:
+-      platform_device_unregister_children(to_platform_device(dev));
+-
+-      return ret;
+-}
+-
+-
+-static int ipu_irq_init(struct ipu_soc *ipu)
+-{
+-      struct irq_chip_generic *gc;
+-      struct irq_chip_type *ct;
+-      unsigned long unused[IPU_NUM_IRQS / 32] = {
+-              0x400100d0, 0xffe000fd,
+-              0x400100d0, 0xffe000fd,
+-              0x400100d0, 0xffe000fd,
+-              0x4077ffff, 0xffe7e1fd,
+-              0x23fffffe, 0x8880fff0,
+-              0xf98fe7d0, 0xfff81fff,
+-              0x400100d0, 0xffe000fd,
+-              0x00000000,
+-      };
+-      int ret, i;
+-
+-      ipu->domain = irq_domain_add_linear(ipu->dev->of_node, IPU_NUM_IRQS,
+-                                          &irq_generic_chip_ops, ipu);
+-      if (!ipu->domain) {
+-              dev_err(ipu->dev, "failed to add irq domain\n");
+-              return -ENODEV;
+-      }
+-
+-      ret = irq_alloc_domain_generic_chips(ipu->domain, 32, 1, "IPU",
+-                                           handle_level_irq, 0, 0, 0);
+-      if (ret < 0) {
+-              dev_err(ipu->dev, "failed to alloc generic irq chips\n");
+-              irq_domain_remove(ipu->domain);
+-              return ret;
+-      }
+-
+-      /* Mask and clear all interrupts */
+-      for (i = 0; i < IPU_NUM_IRQS; i += 32) {
+-              ipu_cm_write(ipu, 0, IPU_INT_CTRL(i / 32));
+-              ipu_cm_write(ipu, ~unused[i / 32], IPU_INT_STAT(i / 32));
+-      }
+-
+-      for (i = 0; i < IPU_NUM_IRQS; i += 32) {
+-              gc = irq_get_domain_generic_chip(ipu->domain, i);
+-              gc->reg_base = ipu->cm_reg;
+-              gc->unused = unused[i / 32];
+-              ct = gc->chip_types;
+-              ct->chip.irq_ack = irq_gc_ack_set_bit;
+-              ct->chip.irq_mask = irq_gc_mask_clr_bit;
+-              ct->chip.irq_unmask = irq_gc_mask_set_bit;
+-              ct->regs.ack = IPU_INT_STAT(i / 32);
+-              ct->regs.mask = IPU_INT_CTRL(i / 32);
+-      }
+-
+-      irq_set_chained_handler_and_data(ipu->irq_sync, ipu_irq_handler, ipu);
+-      irq_set_chained_handler_and_data(ipu->irq_err, ipu_err_irq_handler,
+-                                       ipu);
+-
+-      return 0;
+-}
+-
+-static void ipu_irq_exit(struct ipu_soc *ipu)
+-{
+-      int i, irq;
+-
+-      irq_set_chained_handler_and_data(ipu->irq_err, NULL, NULL);
+-      irq_set_chained_handler_and_data(ipu->irq_sync, NULL, NULL);
+-
+-      /* TODO: remove irq_domain_generic_chips */
+-
+-      for (i = 0; i < IPU_NUM_IRQS; i++) {
+-              irq = irq_linear_revmap(ipu->domain, i);
+-              if (irq)
+-                      irq_dispose_mapping(irq);
+-      }
+-
+-      irq_domain_remove(ipu->domain);
+-}
+-
+-void ipu_dump(struct ipu_soc *ipu)
+-{
+-      int i;
+-
+-      dev_dbg(ipu->dev, "IPU_CONF = \t0x%08X\n",
+-              ipu_cm_read(ipu, IPU_CONF));
+-      dev_dbg(ipu->dev, "IDMAC_CONF = \t0x%08X\n",
+-              ipu_idmac_read(ipu, IDMAC_CONF));
+-      dev_dbg(ipu->dev, "IDMAC_CHA_EN1 = \t0x%08X\n",
+-              ipu_idmac_read(ipu, IDMAC_CHA_EN(0)));
+-      dev_dbg(ipu->dev, "IDMAC_CHA_EN2 = \t0x%08X\n",
+-              ipu_idmac_read(ipu, IDMAC_CHA_EN(32)));
+-      dev_dbg(ipu->dev, "IDMAC_CHA_PRI1 = \t0x%08X\n",
+-              ipu_idmac_read(ipu, IDMAC_CHA_PRI(0)));
+-      dev_dbg(ipu->dev, "IDMAC_CHA_PRI2 = \t0x%08X\n",
+-              ipu_idmac_read(ipu, IDMAC_CHA_PRI(32)));
+-      dev_dbg(ipu->dev, "IDMAC_BAND_EN1 = \t0x%08X\n",
+-              ipu_idmac_read(ipu, IDMAC_BAND_EN(0)));
+-      dev_dbg(ipu->dev, "IDMAC_BAND_EN2 = \t0x%08X\n",
+-              ipu_idmac_read(ipu, IDMAC_BAND_EN(32)));
+-      dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL0 = \t0x%08X\n",
+-              ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(0)));
+-      dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL1 = \t0x%08X\n",
+-              ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(32)));
+-      dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW1 = \t0x%08X\n",
+-              ipu_cm_read(ipu, IPU_FS_PROC_FLOW1));
+-      dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW2 = \t0x%08X\n",
+-              ipu_cm_read(ipu, IPU_FS_PROC_FLOW2));
+-      dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW3 = \t0x%08X\n",
+-              ipu_cm_read(ipu, IPU_FS_PROC_FLOW3));
+-      dev_dbg(ipu->dev, "IPU_FS_DISP_FLOW1 = \t0x%08X\n",
+-              ipu_cm_read(ipu, IPU_FS_DISP_FLOW1));
+-      for (i = 0; i < 15; i++)
+-              dev_dbg(ipu->dev, "IPU_INT_CTRL(%d) = \t%08X\n", i,
+-                      ipu_cm_read(ipu, IPU_INT_CTRL(i)));
+-}
+-EXPORT_SYMBOL_GPL(ipu_dump);
+-
+-static int ipu_probe(struct platform_device *pdev)
+-{
+-      struct device_node *np = pdev->dev.of_node;
+-      struct ipu_soc *ipu;
+-      struct resource *res;
+-      unsigned long ipu_base;
+-      int ret, irq_sync, irq_err;
+-      const struct ipu_devtype *devtype;
+-
+-      devtype = of_device_get_match_data(&pdev->dev);
+-      if (!devtype)
+-              return -EINVAL;
+-
+-      irq_sync = platform_get_irq(pdev, 0);
+-      irq_err = platform_get_irq(pdev, 1);
+-      res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+-
+-      dev_dbg(&pdev->dev, "irq_sync: %d irq_err: %d\n",
+-                      irq_sync, irq_err);
+-
+-      if (!res || irq_sync < 0 || irq_err < 0)
+-              return -ENODEV;
+-
+-      ipu_base = res->start;
+-
+-      ipu = devm_kzalloc(&pdev->dev, sizeof(*ipu), GFP_KERNEL);
+-      if (!ipu)
+-              return -ENODEV;
+-
+-      ipu->id = of_alias_get_id(np, "ipu");
+-      if (ipu->id < 0)
+-              ipu->id = 0;
+-
+-      if (of_device_is_compatible(np, "fsl,imx6qp-ipu") &&
+-          IS_ENABLED(CONFIG_DRM)) {
+-              ipu->prg_priv = ipu_prg_lookup_by_phandle(&pdev->dev,
+-                                                        "fsl,prg", ipu->id);
+-              if (!ipu->prg_priv)
+-                      return -EPROBE_DEFER;
+-      }
+-
+-      ipu->devtype = devtype;
+-      ipu->ipu_type = devtype->type;
+-
+-      spin_lock_init(&ipu->lock);
+-      mutex_init(&ipu->channel_lock);
+-      INIT_LIST_HEAD(&ipu->channels);
+-
+-      dev_dbg(&pdev->dev, "cm_reg:   0x%08lx\n",
+-                      ipu_base + devtype->cm_ofs);
+-      dev_dbg(&pdev->dev, "idmac:    0x%08lx\n",
+-                      ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS);
+-      dev_dbg(&pdev->dev, "cpmem:    0x%08lx\n",
+-                      ipu_base + devtype->cpmem_ofs);
+-      dev_dbg(&pdev->dev, "csi0:    0x%08lx\n",
+-                      ipu_base + devtype->csi0_ofs);
+-      dev_dbg(&pdev->dev, "csi1:    0x%08lx\n",
+-                      ipu_base + devtype->csi1_ofs);
+-      dev_dbg(&pdev->dev, "ic:      0x%08lx\n",
+-                      ipu_base + devtype->ic_ofs);
+-      dev_dbg(&pdev->dev, "disp0:    0x%08lx\n",
+-                      ipu_base + devtype->disp0_ofs);
+-      dev_dbg(&pdev->dev, "disp1:    0x%08lx\n",
+-                      ipu_base + devtype->disp1_ofs);
+-      dev_dbg(&pdev->dev, "srm:      0x%08lx\n",
+-                      ipu_base + devtype->srm_ofs);
+-      dev_dbg(&pdev->dev, "tpm:      0x%08lx\n",
+-                      ipu_base + devtype->tpm_ofs);
+-      dev_dbg(&pdev->dev, "dc:       0x%08lx\n",
+-                      ipu_base + devtype->cm_ofs + IPU_CM_DC_REG_OFS);
+-      dev_dbg(&pdev->dev, "ic:       0x%08lx\n",
+-                      ipu_base + devtype->cm_ofs + IPU_CM_IC_REG_OFS);
+-      dev_dbg(&pdev->dev, "dmfc:     0x%08lx\n",
+-                      ipu_base + devtype->cm_ofs + IPU_CM_DMFC_REG_OFS);
+-      dev_dbg(&pdev->dev, "vdi:      0x%08lx\n",
+-                      ipu_base + devtype->vdi_ofs);
+-
+-      ipu->cm_reg = devm_ioremap(&pdev->dev,
+-                      ipu_base + devtype->cm_ofs, PAGE_SIZE);
+-      ipu->idmac_reg = devm_ioremap(&pdev->dev,
+-                      ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS,
+-                      PAGE_SIZE);
+-
+-      if (!ipu->cm_reg || !ipu->idmac_reg)
+-              return -ENOMEM;
+-
+-      ipu->clk = devm_clk_get(&pdev->dev, "bus");
+-      if (IS_ERR(ipu->clk)) {
+-              ret = PTR_ERR(ipu->clk);
+-              dev_err(&pdev->dev, "clk_get failed with %d", ret);
+-              return ret;
+-      }
+-
+-      platform_set_drvdata(pdev, ipu);
+-
+-      ret = clk_prepare_enable(ipu->clk);
+-      if (ret) {
+-              dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
+-              return ret;
+-      }
+-
+-      ipu->dev = &pdev->dev;
+-      ipu->irq_sync = irq_sync;
+-      ipu->irq_err = irq_err;
+-
+-      ret = device_reset(&pdev->dev);
+-      if (ret) {
+-              dev_err(&pdev->dev, "failed to reset: %d\n", ret);
+-              goto out_failed_reset;
+-      }
+-      ret = ipu_memory_reset(ipu);
+-      if (ret)
+-              goto out_failed_reset;
+-
+-      ret = ipu_irq_init(ipu);
+-      if (ret)
+-              goto out_failed_irq;
+-
+-      /* Set MCU_T to divide MCU access window into 2 */
+-      ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18),
+-                      IPU_DISP_GEN);
+-
+-      ret = ipu_submodules_init(ipu, pdev, ipu_base, ipu->clk);
+-      if (ret)
+-              goto failed_submodules_init;
+-
+-      ret = ipu_add_client_devices(ipu, ipu_base);
+-      if (ret) {
+-              dev_err(&pdev->dev, "adding client devices failed with %d\n",
+-                              ret);
+-              goto failed_add_clients;
+-      }
+-
+-      dev_info(&pdev->dev, "%s probed\n", devtype->name);
+-
+-      return 0;
+-
+-failed_add_clients:
+-      ipu_submodules_exit(ipu);
+-failed_submodules_init:
+-      ipu_irq_exit(ipu);
+-out_failed_irq:
+-out_failed_reset:
+-      clk_disable_unprepare(ipu->clk);
+-      return ret;
+-}
+-
+-static int ipu_remove(struct platform_device *pdev)
+-{
+-      struct ipu_soc *ipu = platform_get_drvdata(pdev);
+-
+-      platform_device_unregister_children(pdev);
+-      ipu_submodules_exit(ipu);
+-      ipu_irq_exit(ipu);
+-
+-      clk_disable_unprepare(ipu->clk);
+-
+-      return 0;
+-}
+-
+-static struct platform_driver imx_ipu_driver = {
+-      .driver = {
+-              .name = "imx-ipuv3",
+-              .of_match_table = imx_ipu_dt_ids,
+-      },
+-      .probe = ipu_probe,
+-      .remove = ipu_remove,
+-};
+-
+-static struct platform_driver * const drivers[] = {
+-#if IS_ENABLED(CONFIG_DRM)
+-      &ipu_pre_drv,
+-      &ipu_prg_drv,
+-#endif
+-      &imx_ipu_driver,
+-};
+-
+-static int __init imx_ipu_init(void)
+-{
+-      return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
+-}
+-module_init(imx_ipu_init);
+-
+-static void __exit imx_ipu_exit(void)
+-{
+-      platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
+-}
+-module_exit(imx_ipu_exit);
+-
+-MODULE_ALIAS("platform:imx-ipuv3");
+-MODULE_DESCRIPTION("i.MX IPU v3 driver");
+-MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
+-MODULE_LICENSE("GPL");
+--- a/drivers/gpu/ipu-v3/ipu-cpmem.c
++++ /dev/null
+@@ -1,976 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- * Copyright (C) 2012 Mentor Graphics Inc.
+- * Copyright 2005-2012 Freescale Semiconductor, Inc. All Rights Reserved.
+- */
+-#include <linux/types.h>
+-#include <linux/bitrev.h>
+-#include <linux/io.h>
+-#include <linux/sizes.h>
+-#include <drm/drm_fourcc.h>
+-#include "ipu-prv.h"
+-
+-struct ipu_cpmem_word {
+-      u32 data[5];
+-      u32 res[3];
+-};
+-
+-struct ipu_ch_param {
+-      struct ipu_cpmem_word word[2];
+-};
+-
+-struct ipu_cpmem {
+-      struct ipu_ch_param __iomem *base;
+-      u32 module;
+-      spinlock_t lock;
+-      int use_count;
+-      struct ipu_soc *ipu;
+-};
+-
+-#define IPU_CPMEM_WORD(word, ofs, size) ((((word) * 160 + (ofs)) << 8) | (size))
+-
+-#define IPU_FIELD_UBO         IPU_CPMEM_WORD(0, 46, 22)
+-#define IPU_FIELD_VBO         IPU_CPMEM_WORD(0, 68, 22)
+-#define IPU_FIELD_IOX         IPU_CPMEM_WORD(0, 90, 4)
+-#define IPU_FIELD_RDRW                IPU_CPMEM_WORD(0, 94, 1)
+-#define IPU_FIELD_SO          IPU_CPMEM_WORD(0, 113, 1)
+-#define IPU_FIELD_SLY         IPU_CPMEM_WORD(1, 102, 14)
+-#define IPU_FIELD_SLUV                IPU_CPMEM_WORD(1, 128, 14)
+-
+-#define IPU_FIELD_XV          IPU_CPMEM_WORD(0, 0, 10)
+-#define IPU_FIELD_YV          IPU_CPMEM_WORD(0, 10, 9)
+-#define IPU_FIELD_XB          IPU_CPMEM_WORD(0, 19, 13)
+-#define IPU_FIELD_YB          IPU_CPMEM_WORD(0, 32, 12)
+-#define IPU_FIELD_NSB_B               IPU_CPMEM_WORD(0, 44, 1)
+-#define IPU_FIELD_CF          IPU_CPMEM_WORD(0, 45, 1)
+-#define IPU_FIELD_SX          IPU_CPMEM_WORD(0, 46, 12)
+-#define IPU_FIELD_SY          IPU_CPMEM_WORD(0, 58, 11)
+-#define IPU_FIELD_NS          IPU_CPMEM_WORD(0, 69, 10)
+-#define IPU_FIELD_SDX         IPU_CPMEM_WORD(0, 79, 7)
+-#define IPU_FIELD_SM          IPU_CPMEM_WORD(0, 86, 10)
+-#define IPU_FIELD_SCC         IPU_CPMEM_WORD(0, 96, 1)
+-#define IPU_FIELD_SCE         IPU_CPMEM_WORD(0, 97, 1)
+-#define IPU_FIELD_SDY         IPU_CPMEM_WORD(0, 98, 7)
+-#define IPU_FIELD_SDRX                IPU_CPMEM_WORD(0, 105, 1)
+-#define IPU_FIELD_SDRY                IPU_CPMEM_WORD(0, 106, 1)
+-#define IPU_FIELD_BPP         IPU_CPMEM_WORD(0, 107, 3)
+-#define IPU_FIELD_DEC_SEL     IPU_CPMEM_WORD(0, 110, 2)
+-#define IPU_FIELD_DIM         IPU_CPMEM_WORD(0, 112, 1)
+-#define IPU_FIELD_BNDM                IPU_CPMEM_WORD(0, 114, 3)
+-#define IPU_FIELD_BM          IPU_CPMEM_WORD(0, 117, 2)
+-#define IPU_FIELD_ROT         IPU_CPMEM_WORD(0, 119, 1)
+-#define IPU_FIELD_ROT_HF_VF   IPU_CPMEM_WORD(0, 119, 3)
+-#define IPU_FIELD_HF          IPU_CPMEM_WORD(0, 120, 1)
+-#define IPU_FIELD_VF          IPU_CPMEM_WORD(0, 121, 1)
+-#define IPU_FIELD_THE         IPU_CPMEM_WORD(0, 122, 1)
+-#define IPU_FIELD_CAP         IPU_CPMEM_WORD(0, 123, 1)
+-#define IPU_FIELD_CAE         IPU_CPMEM_WORD(0, 124, 1)
+-#define IPU_FIELD_FW          IPU_CPMEM_WORD(0, 125, 13)
+-#define IPU_FIELD_FH          IPU_CPMEM_WORD(0, 138, 12)
+-#define IPU_FIELD_EBA0                IPU_CPMEM_WORD(1, 0, 29)
+-#define IPU_FIELD_EBA1                IPU_CPMEM_WORD(1, 29, 29)
+-#define IPU_FIELD_ILO         IPU_CPMEM_WORD(1, 58, 20)
+-#define IPU_FIELD_NPB         IPU_CPMEM_WORD(1, 78, 7)
+-#define IPU_FIELD_PFS         IPU_CPMEM_WORD(1, 85, 4)
+-#define IPU_FIELD_ALU         IPU_CPMEM_WORD(1, 89, 1)
+-#define IPU_FIELD_ALBM                IPU_CPMEM_WORD(1, 90, 3)
+-#define IPU_FIELD_ID          IPU_CPMEM_WORD(1, 93, 2)
+-#define IPU_FIELD_TH          IPU_CPMEM_WORD(1, 95, 7)
+-#define IPU_FIELD_SL          IPU_CPMEM_WORD(1, 102, 14)
+-#define IPU_FIELD_WID0                IPU_CPMEM_WORD(1, 116, 3)
+-#define IPU_FIELD_WID1                IPU_CPMEM_WORD(1, 119, 3)
+-#define IPU_FIELD_WID2                IPU_CPMEM_WORD(1, 122, 3)
+-#define IPU_FIELD_WID3                IPU_CPMEM_WORD(1, 125, 3)
+-#define IPU_FIELD_OFS0                IPU_CPMEM_WORD(1, 128, 5)
+-#define IPU_FIELD_OFS1                IPU_CPMEM_WORD(1, 133, 5)
+-#define IPU_FIELD_OFS2                IPU_CPMEM_WORD(1, 138, 5)
+-#define IPU_FIELD_OFS3                IPU_CPMEM_WORD(1, 143, 5)
+-#define IPU_FIELD_SXYS                IPU_CPMEM_WORD(1, 148, 1)
+-#define IPU_FIELD_CRE         IPU_CPMEM_WORD(1, 149, 1)
+-#define IPU_FIELD_DEC_SEL2    IPU_CPMEM_WORD(1, 150, 1)
+-
+-static inline struct ipu_ch_param __iomem *
+-ipu_get_cpmem(struct ipuv3_channel *ch)
+-{
+-      struct ipu_cpmem *cpmem = ch->ipu->cpmem_priv;
+-
+-      return cpmem->base + ch->num;
+-}
+-
+-static void ipu_ch_param_write_field(struct ipuv3_channel *ch, u32 wbs, u32 v)
+-{
+-      struct ipu_ch_param __iomem *base = ipu_get_cpmem(ch);
+-      u32 bit = (wbs >> 8) % 160;
+-      u32 size = wbs & 0xff;
+-      u32 word = (wbs >> 8) / 160;
+-      u32 i = bit / 32;
+-      u32 ofs = bit % 32;
+-      u32 mask = (1 << size) - 1;
+-      u32 val;
+-
+-      pr_debug("%s %d %d %d\n", __func__, word, bit , size);
+-
+-      val = readl(&base->word[word].data[i]);
+-      val &= ~(mask << ofs);
+-      val |= v << ofs;
+-      writel(val, &base->word[word].data[i]);
+-
+-      if ((bit + size - 1) / 32 > i) {
+-              val = readl(&base->word[word].data[i + 1]);
+-              val &= ~(mask >> (ofs ? (32 - ofs) : 0));
+-              val |= v >> (ofs ? (32 - ofs) : 0);
+-              writel(val, &base->word[word].data[i + 1]);
+-      }
+-}
+-
+-static u32 ipu_ch_param_read_field(struct ipuv3_channel *ch, u32 wbs)
+-{
+-      struct ipu_ch_param __iomem *base = ipu_get_cpmem(ch);
+-      u32 bit = (wbs >> 8) % 160;
+-      u32 size = wbs & 0xff;
+-      u32 word = (wbs >> 8) / 160;
+-      u32 i = bit / 32;
+-      u32 ofs = bit % 32;
+-      u32 mask = (1 << size) - 1;
+-      u32 val = 0;
+-
+-      pr_debug("%s %d %d %d\n", __func__, word, bit , size);
+-
+-      val = (readl(&base->word[word].data[i]) >> ofs) & mask;
+-
+-      if ((bit + size - 1) / 32 > i) {
+-              u32 tmp;
+-
+-              tmp = readl(&base->word[word].data[i + 1]);
+-              tmp &= mask >> (ofs ? (32 - ofs) : 0);
+-              val |= tmp << (ofs ? (32 - ofs) : 0);
+-      }
+-
+-      return val;
+-}
+-
+-/*
+- * The V4L2 spec defines packed RGB formats in memory byte order, which from
+- * point of view of the IPU corresponds to little-endian words with the first
+- * component in the least significant bits.
+- * The DRM pixel formats and IPU internal representation are ordered the other
+- * way around, with the first named component ordered at the most significant
+- * bits. Further, V4L2 formats are not well defined:
+- *     https://linuxtv.org/downloads/v4l-dvb-apis/packed-rgb.html
+- * We choose the interpretation which matches GStreamer behavior.
+- */
+-static int v4l2_pix_fmt_to_drm_fourcc(u32 pixelformat)
+-{
+-      switch (pixelformat) {
+-      case V4L2_PIX_FMT_RGB565:
+-              /*
+-               * Here we choose the 'corrected' interpretation of RGBP, a
+-               * little-endian 16-bit word with the red component at the most
+-               * significant bits:
+-               * g[2:0]b[4:0] r[4:0]g[5:3] <=> [16:0] R:G:B
+-               */
+-              return DRM_FORMAT_RGB565;
+-      case V4L2_PIX_FMT_BGR24:
+-              /* B G R <=> [24:0] R:G:B */
+-              return DRM_FORMAT_RGB888;
+-      case V4L2_PIX_FMT_RGB24:
+-              /* R G B <=> [24:0] B:G:R */
+-              return DRM_FORMAT_BGR888;
+-      case V4L2_PIX_FMT_BGR32:
+-              /* B G R A <=> [32:0] A:B:G:R */
+-              return DRM_FORMAT_XRGB8888;
+-      case V4L2_PIX_FMT_RGB32:
+-              /* R G B A <=> [32:0] A:B:G:R */
+-              return DRM_FORMAT_XBGR8888;
+-      case V4L2_PIX_FMT_ABGR32:
+-              /* B G R A <=> [32:0] A:R:G:B */
+-              return DRM_FORMAT_ARGB8888;
+-      case V4L2_PIX_FMT_XBGR32:
+-              /* B G R X <=> [32:0] X:R:G:B */
+-              return DRM_FORMAT_XRGB8888;
+-      case V4L2_PIX_FMT_BGRA32:
+-              /* A B G R <=> [32:0] R:G:B:A */
+-              return DRM_FORMAT_RGBA8888;
+-      case V4L2_PIX_FMT_BGRX32:
+-              /* X B G R <=> [32:0] R:G:B:X */
+-              return DRM_FORMAT_RGBX8888;
+-      case V4L2_PIX_FMT_RGBA32:
+-              /* R G B A <=> [32:0] A:B:G:R */
+-              return DRM_FORMAT_ABGR8888;
+-      case V4L2_PIX_FMT_RGBX32:
+-              /* R G B X <=> [32:0] X:B:G:R */
+-              return DRM_FORMAT_XBGR8888;
+-      case V4L2_PIX_FMT_ARGB32:
+-              /* A R G B <=> [32:0] B:G:R:A */
+-              return DRM_FORMAT_BGRA8888;
+-      case V4L2_PIX_FMT_XRGB32:
+-              /* X R G B <=> [32:0] B:G:R:X */
+-              return DRM_FORMAT_BGRX8888;
+-      case V4L2_PIX_FMT_UYVY:
+-              return DRM_FORMAT_UYVY;
+-      case V4L2_PIX_FMT_YUYV:
+-              return DRM_FORMAT_YUYV;
+-      case V4L2_PIX_FMT_YUV420:
+-              return DRM_FORMAT_YUV420;
+-      case V4L2_PIX_FMT_YUV422P:
+-              return DRM_FORMAT_YUV422;
+-      case V4L2_PIX_FMT_YVU420:
+-              return DRM_FORMAT_YVU420;
+-      case V4L2_PIX_FMT_NV12:
+-              return DRM_FORMAT_NV12;
+-      case V4L2_PIX_FMT_NV16:
+-              return DRM_FORMAT_NV16;
+-      }
+-
+-      return -EINVAL;
+-}
+-
+-void ipu_cpmem_zero(struct ipuv3_channel *ch)
+-{
+-      struct ipu_ch_param __iomem *p = ipu_get_cpmem(ch);
+-      void __iomem *base = p;
+-      int i;
+-
+-      for (i = 0; i < sizeof(*p) / sizeof(u32); i++)
+-              writel(0, base + i * sizeof(u32));
+-}
+-EXPORT_SYMBOL_GPL(ipu_cpmem_zero);
+-
+-void ipu_cpmem_set_resolution(struct ipuv3_channel *ch, int xres, int yres)
+-{
+-      ipu_ch_param_write_field(ch, IPU_FIELD_FW, xres - 1);
+-      ipu_ch_param_write_field(ch, IPU_FIELD_FH, yres - 1);
+-}
+-EXPORT_SYMBOL_GPL(ipu_cpmem_set_resolution);
+-
+-void ipu_cpmem_skip_odd_chroma_rows(struct ipuv3_channel *ch)
+-{
+-      ipu_ch_param_write_field(ch, IPU_FIELD_RDRW, 1);
+-}
+-EXPORT_SYMBOL_GPL(ipu_cpmem_skip_odd_chroma_rows);
+-
+-void ipu_cpmem_set_stride(struct ipuv3_channel *ch, int stride)
+-{
+-      ipu_ch_param_write_field(ch, IPU_FIELD_SLY, stride - 1);
+-}
+-EXPORT_SYMBOL_GPL(ipu_cpmem_set_stride);
+-
+-void ipu_cpmem_set_high_priority(struct ipuv3_channel *ch)
+-{
+-      struct ipu_soc *ipu = ch->ipu;
+-      u32 val;
+-
+-      if (ipu->ipu_type == IPUV3EX)
+-              ipu_ch_param_write_field(ch, IPU_FIELD_ID, 1);
+-
+-      val = ipu_idmac_read(ipu, IDMAC_CHA_PRI(ch->num));
+-      val |= 1 << (ch->num % 32);
+-      ipu_idmac_write(ipu, val, IDMAC_CHA_PRI(ch->num));
+-};
+-EXPORT_SYMBOL_GPL(ipu_cpmem_set_high_priority);
+-
+-void ipu_cpmem_set_buffer(struct ipuv3_channel *ch, int bufnum, dma_addr_t buf)
+-{
+-      WARN_ON_ONCE(buf & 0x7);
+-
+-      if (bufnum)
+-              ipu_ch_param_write_field(ch, IPU_FIELD_EBA1, buf >> 3);
+-      else
+-              ipu_ch_param_write_field(ch, IPU_FIELD_EBA0, buf >> 3);
+-}
+-EXPORT_SYMBOL_GPL(ipu_cpmem_set_buffer);
+-
+-void ipu_cpmem_set_uv_offset(struct ipuv3_channel *ch, u32 u_off, u32 v_off)
+-{
+-      WARN_ON_ONCE((u_off & 0x7) || (v_off & 0x7));
+-
+-      ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_off / 8);
+-      ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_off / 8);
+-}
+-EXPORT_SYMBOL_GPL(ipu_cpmem_set_uv_offset);
+-
+-void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride,
+-                             u32 pixelformat)
+-{
+-      u32 ilo, sly, sluv;
+-
+-      if (stride < 0) {
+-              stride = -stride;
+-              ilo = 0x100000 - (stride / 8);
+-      } else {
+-              ilo = stride / 8;
+-      }
+-
+-      sly = (stride * 2) - 1;
+-
+-      switch (pixelformat) {
+-      case V4L2_PIX_FMT_YUV420:
+-      case V4L2_PIX_FMT_YVU420:
+-              sluv = stride / 2 - 1;
+-              break;
+-      case V4L2_PIX_FMT_NV12:
+-              sluv = stride - 1;
+-              break;
+-      case V4L2_PIX_FMT_YUV422P:
+-              sluv = stride - 1;
+-              break;
+-      case V4L2_PIX_FMT_NV16:
+-              sluv = stride * 2 - 1;
+-              break;
+-      default:
+-              sluv = 0;
+-              break;
+-      }
+-
+-      ipu_ch_param_write_field(ch, IPU_FIELD_SO, 1);
+-      ipu_ch_param_write_field(ch, IPU_FIELD_ILO, ilo);
+-      ipu_ch_param_write_field(ch, IPU_FIELD_SLY, sly);
+-      if (sluv)
+-              ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, sluv);
+-};
+-EXPORT_SYMBOL_GPL(ipu_cpmem_interlaced_scan);
+-
+-void ipu_cpmem_set_axi_id(struct ipuv3_channel *ch, u32 id)
+-{
+-      id &= 0x3;
+-      ipu_ch_param_write_field(ch, IPU_FIELD_ID, id);
+-}
+-EXPORT_SYMBOL_GPL(ipu_cpmem_set_axi_id);
+-
+-int ipu_cpmem_get_burstsize(struct ipuv3_channel *ch)
+-{
+-      return ipu_ch_param_read_field(ch, IPU_FIELD_NPB) + 1;
+-}
+-EXPORT_SYMBOL_GPL(ipu_cpmem_get_burstsize);
+-
+-void ipu_cpmem_set_burstsize(struct ipuv3_channel *ch, int burstsize)
+-{
+-      ipu_ch_param_write_field(ch, IPU_FIELD_NPB, burstsize - 1);
+-};
+-EXPORT_SYMBOL_GPL(ipu_cpmem_set_burstsize);
+-
+-void ipu_cpmem_set_block_mode(struct ipuv3_channel *ch)
+-{
+-      ipu_ch_param_write_field(ch, IPU_FIELD_BM, 1);
+-}
+-EXPORT_SYMBOL_GPL(ipu_cpmem_set_block_mode);
+-
+-void ipu_cpmem_set_rotation(struct ipuv3_channel *ch,
+-                          enum ipu_rotate_mode rot)
+-{
+-      u32 temp_rot = bitrev8(rot) >> 5;
+-
+-      ipu_ch_param_write_field(ch, IPU_FIELD_ROT_HF_VF, temp_rot);
+-}
+-EXPORT_SYMBOL_GPL(ipu_cpmem_set_rotation);
+-
+-int ipu_cpmem_set_format_rgb(struct ipuv3_channel *ch,
+-                           const struct ipu_rgb *rgb)
+-{
+-      int bpp = 0, npb = 0, ro, go, bo, to;
+-
+-      ro = rgb->bits_per_pixel - rgb->red.length - rgb->red.offset;
+-      go = rgb->bits_per_pixel - rgb->green.length - rgb->green.offset;
+-      bo = rgb->bits_per_pixel - rgb->blue.length - rgb->blue.offset;
+-      to = rgb->bits_per_pixel - rgb->transp.length - rgb->transp.offset;
+-
+-      ipu_ch_param_write_field(ch, IPU_FIELD_WID0, rgb->red.length - 1);
+-      ipu_ch_param_write_field(ch, IPU_FIELD_OFS0, ro);
+-      ipu_ch_param_write_field(ch, IPU_FIELD_WID1, rgb->green.length - 1);
+-      ipu_ch_param_write_field(ch, IPU_FIELD_OFS1, go);
+-      ipu_ch_param_write_field(ch, IPU_FIELD_WID2, rgb->blue.length - 1);
+-      ipu_ch_param_write_field(ch, IPU_FIELD_OFS2, bo);
+-
+-      if (rgb->transp.length) {
+-              ipu_ch_param_write_field(ch, IPU_FIELD_WID3,
+-                              rgb->transp.length - 1);
+-              ipu_ch_param_write_field(ch, IPU_FIELD_OFS3, to);
+-      } else {
+-              ipu_ch_param_write_field(ch, IPU_FIELD_WID3, 7);
+-              ipu_ch_param_write_field(ch, IPU_FIELD_OFS3,
+-                              rgb->bits_per_pixel);
+-      }
+-
+-      switch (rgb->bits_per_pixel) {
+-      case 32:
+-              bpp = 0;
+-              npb = 15;
+-              break;
+-      case 24:
+-              bpp = 1;
+-              npb = 19;
+-              break;
+-      case 16:
+-              bpp = 3;
+-              npb = 31;
+-              break;
+-      case 8:
+-              bpp = 5;
+-              npb = 63;
+-              break;
+-      default:
+-              return -EINVAL;
+-      }
+-      ipu_ch_param_write_field(ch, IPU_FIELD_BPP, bpp);
+-      ipu_ch_param_write_field(ch, IPU_FIELD_NPB, npb);
+-      ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 7); /* rgb mode */
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_cpmem_set_format_rgb);
+-
+-int ipu_cpmem_set_format_passthrough(struct ipuv3_channel *ch, int width)
+-{
+-      int bpp = 0, npb = 0;
+-
+-      switch (width) {
+-      case 32:
+-              bpp = 0;
+-              npb = 15;
+-              break;
+-      case 24:
+-              bpp = 1;
+-              npb = 19;
+-              break;
+-      case 16:
+-              bpp = 3;
+-              npb = 31;
+-              break;
+-      case 8:
+-              bpp = 5;
+-              npb = 63;
+-              break;
+-      default:
+-              return -EINVAL;
+-      }
+-
+-      ipu_ch_param_write_field(ch, IPU_FIELD_BPP, bpp);
+-      ipu_ch_param_write_field(ch, IPU_FIELD_NPB, npb);
+-      ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 6); /* raw mode */
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_cpmem_set_format_passthrough);
+-
+-void ipu_cpmem_set_yuv_interleaved(struct ipuv3_channel *ch, u32 pixel_format)
+-{
+-      switch (pixel_format) {
+-      case V4L2_PIX_FMT_UYVY:
+-              ipu_ch_param_write_field(ch, IPU_FIELD_BPP, 3); /* bits/pixel */
+-              ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0xA);/* pix fmt */
+-              ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);/* burst size */
+-              break;
+-      case V4L2_PIX_FMT_YUYV:
+-              ipu_ch_param_write_field(ch, IPU_FIELD_BPP, 3); /* bits/pixel */
+-              ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0x8);/* pix fmt */
+-              ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);/* burst size */
+-              break;
+-      }
+-}
+-EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_interleaved);
+-
+-void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
+-                                 unsigned int uv_stride,
+-                                 unsigned int u_offset, unsigned int v_offset)
+-{
+-      WARN_ON_ONCE((u_offset & 0x7) || (v_offset & 0x7));
+-
+-      ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, uv_stride - 1);
+-      ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
+-      ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_offset / 8);
+-}
+-EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar_full);
+-
+-static const struct ipu_rgb def_xrgb_32 = {
+-      .red    = { .offset = 16, .length = 8, },
+-      .green  = { .offset =  8, .length = 8, },
+-      .blue   = { .offset =  0, .length = 8, },
+-      .transp = { .offset = 24, .length = 8, },
+-      .bits_per_pixel = 32,
+-};
+-
+-static const struct ipu_rgb def_xbgr_32 = {
+-      .red    = { .offset =  0, .length = 8, },
+-      .green  = { .offset =  8, .length = 8, },
+-      .blue   = { .offset = 16, .length = 8, },
+-      .transp = { .offset = 24, .length = 8, },
+-      .bits_per_pixel = 32,
+-};
+-
+-static const struct ipu_rgb def_rgbx_32 = {
+-      .red    = { .offset = 24, .length = 8, },
+-      .green  = { .offset = 16, .length = 8, },
+-      .blue   = { .offset =  8, .length = 8, },
+-      .transp = { .offset =  0, .length = 8, },
+-      .bits_per_pixel = 32,
+-};
+-
+-static const struct ipu_rgb def_bgrx_32 = {
+-      .red    = { .offset =  8, .length = 8, },
+-      .green  = { .offset = 16, .length = 8, },
+-      .blue   = { .offset = 24, .length = 8, },
+-      .transp = { .offset =  0, .length = 8, },
+-      .bits_per_pixel = 32,
+-};
+-
+-static const struct ipu_rgb def_rgb_24 = {
+-      .red    = { .offset = 16, .length = 8, },
+-      .green  = { .offset =  8, .length = 8, },
+-      .blue   = { .offset =  0, .length = 8, },
+-      .transp = { .offset =  0, .length = 0, },
+-      .bits_per_pixel = 24,
+-};
+-
+-static const struct ipu_rgb def_bgr_24 = {
+-      .red    = { .offset =  0, .length = 8, },
+-      .green  = { .offset =  8, .length = 8, },
+-      .blue   = { .offset = 16, .length = 8, },
+-      .transp = { .offset =  0, .length = 0, },
+-      .bits_per_pixel = 24,
+-};
+-
+-static const struct ipu_rgb def_rgb_16 = {
+-      .red    = { .offset = 11, .length = 5, },
+-      .green  = { .offset =  5, .length = 6, },
+-      .blue   = { .offset =  0, .length = 5, },
+-      .transp = { .offset =  0, .length = 0, },
+-      .bits_per_pixel = 16,
+-};
+-
+-static const struct ipu_rgb def_bgr_16 = {
+-      .red    = { .offset =  0, .length = 5, },
+-      .green  = { .offset =  5, .length = 6, },
+-      .blue   = { .offset = 11, .length = 5, },
+-      .transp = { .offset =  0, .length = 0, },
+-      .bits_per_pixel = 16,
+-};
+-
+-static const struct ipu_rgb def_argb_16 = {
+-      .red    = { .offset = 10, .length = 5, },
+-      .green  = { .offset =  5, .length = 5, },
+-      .blue   = { .offset =  0, .length = 5, },
+-      .transp = { .offset = 15, .length = 1, },
+-      .bits_per_pixel = 16,
+-};
+-
+-static const struct ipu_rgb def_argb_16_4444 = {
+-      .red    = { .offset =  8, .length = 4, },
+-      .green  = { .offset =  4, .length = 4, },
+-      .blue   = { .offset =  0, .length = 4, },
+-      .transp = { .offset = 12, .length = 4, },
+-      .bits_per_pixel = 16,
+-};
+-
+-static const struct ipu_rgb def_abgr_16 = {
+-      .red    = { .offset =  0, .length = 5, },
+-      .green  = { .offset =  5, .length = 5, },
+-      .blue   = { .offset = 10, .length = 5, },
+-      .transp = { .offset = 15, .length = 1, },
+-      .bits_per_pixel = 16,
+-};
+-
+-static const struct ipu_rgb def_rgba_16 = {
+-      .red    = { .offset = 11, .length = 5, },
+-      .green  = { .offset =  6, .length = 5, },
+-      .blue   = { .offset =  1, .length = 5, },
+-      .transp = { .offset =  0, .length = 1, },
+-      .bits_per_pixel = 16,
+-};
+-
+-static const struct ipu_rgb def_bgra_16 = {
+-      .red    = { .offset =  1, .length = 5, },
+-      .green  = { .offset =  6, .length = 5, },
+-      .blue   = { .offset = 11, .length = 5, },
+-      .transp = { .offset =  0, .length = 1, },
+-      .bits_per_pixel = 16,
+-};
+-
+-#define Y_OFFSET(pix, x, y)   ((x) + pix->width * (y))
+-#define U_OFFSET(pix, x, y)   ((pix->width * pix->height) +           \
+-                               (pix->width * ((y) / 2) / 2) + (x) / 2)
+-#define V_OFFSET(pix, x, y)   ((pix->width * pix->height) +           \
+-                               (pix->width * pix->height / 4) +       \
+-                               (pix->width * ((y) / 2) / 2) + (x) / 2)
+-#define U2_OFFSET(pix, x, y)  ((pix->width * pix->height) +           \
+-                               (pix->width * (y) / 2) + (x) / 2)
+-#define V2_OFFSET(pix, x, y)  ((pix->width * pix->height) +           \
+-                               (pix->width * pix->height / 2) +       \
+-                               (pix->width * (y) / 2) + (x) / 2)
+-#define UV_OFFSET(pix, x, y)  ((pix->width * pix->height) +   \
+-                               (pix->width * ((y) / 2)) + (x))
+-#define UV2_OFFSET(pix, x, y) ((pix->width * pix->height) +   \
+-                               (pix->width * y) + (x))
+-
+-#define NUM_ALPHA_CHANNELS    7
+-
+-/* See Table 37-12. Alpha channels mapping. */
+-static int ipu_channel_albm(int ch_num)
+-{
+-      switch (ch_num) {
+-      case IPUV3_CHANNEL_G_MEM_IC_PRP_VF:     return 0;
+-      case IPUV3_CHANNEL_G_MEM_IC_PP:         return 1;
+-      case IPUV3_CHANNEL_MEM_FG_SYNC:         return 2;
+-      case IPUV3_CHANNEL_MEM_FG_ASYNC:        return 3;
+-      case IPUV3_CHANNEL_MEM_BG_SYNC:         return 4;
+-      case IPUV3_CHANNEL_MEM_BG_ASYNC:        return 5;
+-      case IPUV3_CHANNEL_MEM_VDI_PLANE1_COMB: return 6;
+-      default:
+-              return -EINVAL;
+-      }
+-}
+-
+-static void ipu_cpmem_set_separate_alpha(struct ipuv3_channel *ch)
+-{
+-      struct ipu_soc *ipu = ch->ipu;
+-      int albm;
+-      u32 val;
+-
+-      albm = ipu_channel_albm(ch->num);
+-      if (albm < 0)
+-              return;
+-
+-      ipu_ch_param_write_field(ch, IPU_FIELD_ALU, 1);
+-      ipu_ch_param_write_field(ch, IPU_FIELD_ALBM, albm);
+-      ipu_ch_param_write_field(ch, IPU_FIELD_CRE, 1);
+-
+-      val = ipu_idmac_read(ipu, IDMAC_SEP_ALPHA);
+-      val |= BIT(ch->num);
+-      ipu_idmac_write(ipu, val, IDMAC_SEP_ALPHA);
+-}
+-
+-int ipu_cpmem_set_fmt(struct ipuv3_channel *ch, u32 drm_fourcc)
+-{
+-      switch (drm_fourcc) {
+-      case DRM_FORMAT_YUV420:
+-      case DRM_FORMAT_YVU420:
+-              /* pix format */
+-              ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 2);
+-              /* burst size */
+-              ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
+-              break;
+-      case DRM_FORMAT_YUV422:
+-      case DRM_FORMAT_YVU422:
+-              /* pix format */
+-              ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 1);
+-              /* burst size */
+-              ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
+-              break;
+-      case DRM_FORMAT_YUV444:
+-      case DRM_FORMAT_YVU444:
+-              /* pix format */
+-              ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0);
+-              /* burst size */
+-              ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
+-              break;
+-      case DRM_FORMAT_NV12:
+-              /* pix format */
+-              ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 4);
+-              /* burst size */
+-              ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
+-              break;
+-      case DRM_FORMAT_NV16:
+-              /* pix format */
+-              ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 3);
+-              /* burst size */
+-              ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
+-              break;
+-      case DRM_FORMAT_UYVY:
+-              /* bits/pixel */
+-              ipu_ch_param_write_field(ch, IPU_FIELD_BPP, 3);
+-              /* pix format */
+-              ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0xA);
+-              /* burst size */
+-              ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
+-              break;
+-      case DRM_FORMAT_YUYV:
+-              /* bits/pixel */
+-              ipu_ch_param_write_field(ch, IPU_FIELD_BPP, 3);
+-              /* pix format */
+-              ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0x8);
+-              /* burst size */
+-              ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);
+-              break;
+-      case DRM_FORMAT_ABGR8888:
+-      case DRM_FORMAT_XBGR8888:
+-              ipu_cpmem_set_format_rgb(ch, &def_xbgr_32);
+-              break;
+-      case DRM_FORMAT_ARGB8888:
+-      case DRM_FORMAT_XRGB8888:
+-              ipu_cpmem_set_format_rgb(ch, &def_xrgb_32);
+-              break;
+-      case DRM_FORMAT_RGBA8888:
+-      case DRM_FORMAT_RGBX8888:
+-      case DRM_FORMAT_RGBX8888_A8:
+-              ipu_cpmem_set_format_rgb(ch, &def_rgbx_32);
+-              break;
+-      case DRM_FORMAT_BGRA8888:
+-      case DRM_FORMAT_BGRX8888:
+-      case DRM_FORMAT_BGRX8888_A8:
+-              ipu_cpmem_set_format_rgb(ch, &def_bgrx_32);
+-              break;
+-      case DRM_FORMAT_BGR888:
+-      case DRM_FORMAT_BGR888_A8:
+-              ipu_cpmem_set_format_rgb(ch, &def_bgr_24);
+-              break;
+-      case DRM_FORMAT_RGB888:
+-      case DRM_FORMAT_RGB888_A8:
+-              ipu_cpmem_set_format_rgb(ch, &def_rgb_24);
+-              break;
+-      case DRM_FORMAT_RGB565:
+-      case DRM_FORMAT_RGB565_A8:
+-              ipu_cpmem_set_format_rgb(ch, &def_rgb_16);
+-              break;
+-      case DRM_FORMAT_BGR565:
+-      case DRM_FORMAT_BGR565_A8:
+-              ipu_cpmem_set_format_rgb(ch, &def_bgr_16);
+-              break;
+-      case DRM_FORMAT_ARGB1555:
+-              ipu_cpmem_set_format_rgb(ch, &def_argb_16);
+-              break;
+-      case DRM_FORMAT_ABGR1555:
+-              ipu_cpmem_set_format_rgb(ch, &def_abgr_16);
+-              break;
+-      case DRM_FORMAT_RGBA5551:
+-              ipu_cpmem_set_format_rgb(ch, &def_rgba_16);
+-              break;
+-      case DRM_FORMAT_BGRA5551:
+-              ipu_cpmem_set_format_rgb(ch, &def_bgra_16);
+-              break;
+-      case DRM_FORMAT_ARGB4444:
+-              ipu_cpmem_set_format_rgb(ch, &def_argb_16_4444);
+-              break;
+-      default:
+-              return -EINVAL;
+-      }
+-
+-      switch (drm_fourcc) {
+-      case DRM_FORMAT_RGB565_A8:
+-      case DRM_FORMAT_BGR565_A8:
+-      case DRM_FORMAT_RGB888_A8:
+-      case DRM_FORMAT_BGR888_A8:
+-      case DRM_FORMAT_RGBX8888_A8:
+-      case DRM_FORMAT_BGRX8888_A8:
+-              ipu_ch_param_write_field(ch, IPU_FIELD_WID3, 7);
+-              ipu_cpmem_set_separate_alpha(ch);
+-              break;
+-      default:
+-              break;
+-      }
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_cpmem_set_fmt);
+-
+-int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
+-{
+-      struct v4l2_pix_format *pix = &image->pix;
+-      int offset, u_offset, v_offset;
+-      int ret = 0;
+-
+-      pr_debug("%s: resolution: %dx%d stride: %d\n",
+-               __func__, pix->width, pix->height,
+-               pix->bytesperline);
+-
+-      ipu_cpmem_set_resolution(ch, image->rect.width, image->rect.height);
+-      ipu_cpmem_set_stride(ch, pix->bytesperline);
+-
+-      ipu_cpmem_set_fmt(ch, v4l2_pix_fmt_to_drm_fourcc(pix->pixelformat));
+-
+-      switch (pix->pixelformat) {
+-      case V4L2_PIX_FMT_YUV420:
+-              offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
+-              u_offset = image->u_offset ?
+-                      image->u_offset : U_OFFSET(pix, image->rect.left,
+-                                                 image->rect.top) - offset;
+-              v_offset = image->v_offset ?
+-                      image->v_offset : V_OFFSET(pix, image->rect.left,
+-                                                 image->rect.top) - offset;
+-
+-              ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
+-                                            u_offset, v_offset);
+-              break;
+-      case V4L2_PIX_FMT_YVU420:
+-              offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
+-              u_offset = image->u_offset ?
+-                      image->u_offset : V_OFFSET(pix, image->rect.left,
+-                                                 image->rect.top) - offset;
+-              v_offset = image->v_offset ?
+-                      image->v_offset : U_OFFSET(pix, image->rect.left,
+-                                                 image->rect.top) - offset;
+-
+-              ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
+-                                            u_offset, v_offset);
+-              break;
+-      case V4L2_PIX_FMT_YUV422P:
+-              offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
+-              u_offset = image->u_offset ?
+-                      image->u_offset : U2_OFFSET(pix, image->rect.left,
+-                                                  image->rect.top) - offset;
+-              v_offset = image->v_offset ?
+-                      image->v_offset : V2_OFFSET(pix, image->rect.left,
+-                                                  image->rect.top) - offset;
+-
+-              ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
+-                                            u_offset, v_offset);
+-              break;
+-      case V4L2_PIX_FMT_NV12:
+-              offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
+-              u_offset = image->u_offset ?
+-                      image->u_offset : UV_OFFSET(pix, image->rect.left,
+-                                                  image->rect.top) - offset;
+-              v_offset = image->v_offset ? image->v_offset : 0;
+-
+-              ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline,
+-                                            u_offset, v_offset);
+-              break;
+-      case V4L2_PIX_FMT_NV16:
+-              offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
+-              u_offset = image->u_offset ?
+-                      image->u_offset : UV2_OFFSET(pix, image->rect.left,
+-                                                   image->rect.top) - offset;
+-              v_offset = image->v_offset ? image->v_offset : 0;
+-
+-              ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline,
+-                                            u_offset, v_offset);
+-              break;
+-      case V4L2_PIX_FMT_UYVY:
+-      case V4L2_PIX_FMT_YUYV:
+-      case V4L2_PIX_FMT_RGB565:
+-              offset = image->rect.left * 2 +
+-                      image->rect.top * pix->bytesperline;
+-              break;
+-      case V4L2_PIX_FMT_RGB32:
+-      case V4L2_PIX_FMT_BGR32:
+-      case V4L2_PIX_FMT_ABGR32:
+-      case V4L2_PIX_FMT_XBGR32:
+-      case V4L2_PIX_FMT_BGRA32:
+-      case V4L2_PIX_FMT_BGRX32:
+-      case V4L2_PIX_FMT_RGBA32:
+-      case V4L2_PIX_FMT_RGBX32:
+-      case V4L2_PIX_FMT_ARGB32:
+-      case V4L2_PIX_FMT_XRGB32:
+-              offset = image->rect.left * 4 +
+-                      image->rect.top * pix->bytesperline;
+-              break;
+-      case V4L2_PIX_FMT_RGB24:
+-      case V4L2_PIX_FMT_BGR24:
+-              offset = image->rect.left * 3 +
+-                      image->rect.top * pix->bytesperline;
+-              break;
+-      case V4L2_PIX_FMT_SBGGR8:
+-      case V4L2_PIX_FMT_SGBRG8:
+-      case V4L2_PIX_FMT_SGRBG8:
+-      case V4L2_PIX_FMT_SRGGB8:
+-      case V4L2_PIX_FMT_GREY:
+-              offset = image->rect.left + image->rect.top * pix->bytesperline;
+-              break;
+-      case V4L2_PIX_FMT_SBGGR16:
+-      case V4L2_PIX_FMT_SGBRG16:
+-      case V4L2_PIX_FMT_SGRBG16:
+-      case V4L2_PIX_FMT_SRGGB16:
+-      case V4L2_PIX_FMT_Y16:
+-              offset = image->rect.left * 2 +
+-                       image->rect.top * pix->bytesperline;
+-              break;
+-      default:
+-              /* This should not happen */
+-              WARN_ON(1);
+-              offset = 0;
+-              ret = -EINVAL;
+-      }
+-
+-      ipu_cpmem_set_buffer(ch, 0, image->phys0 + offset);
+-      ipu_cpmem_set_buffer(ch, 1, image->phys1 + offset);
+-
+-      return ret;
+-}
+-EXPORT_SYMBOL_GPL(ipu_cpmem_set_image);
+-
+-void ipu_cpmem_dump(struct ipuv3_channel *ch)
+-{
+-      struct ipu_ch_param __iomem *p = ipu_get_cpmem(ch);
+-      struct ipu_soc *ipu = ch->ipu;
+-      int chno = ch->num;
+-
+-      dev_dbg(ipu->dev, "ch %d word 0 - %08X %08X %08X %08X %08X\n", chno,
+-              readl(&p->word[0].data[0]),
+-              readl(&p->word[0].data[1]),
+-              readl(&p->word[0].data[2]),
+-              readl(&p->word[0].data[3]),
+-              readl(&p->word[0].data[4]));
+-      dev_dbg(ipu->dev, "ch %d word 1 - %08X %08X %08X %08X %08X\n", chno,
+-              readl(&p->word[1].data[0]),
+-              readl(&p->word[1].data[1]),
+-              readl(&p->word[1].data[2]),
+-              readl(&p->word[1].data[3]),
+-              readl(&p->word[1].data[4]));
+-      dev_dbg(ipu->dev, "PFS 0x%x, ",
+-               ipu_ch_param_read_field(ch, IPU_FIELD_PFS));
+-      dev_dbg(ipu->dev, "BPP 0x%x, ",
+-              ipu_ch_param_read_field(ch, IPU_FIELD_BPP));
+-      dev_dbg(ipu->dev, "NPB 0x%x\n",
+-               ipu_ch_param_read_field(ch, IPU_FIELD_NPB));
+-
+-      dev_dbg(ipu->dev, "FW %d, ",
+-               ipu_ch_param_read_field(ch, IPU_FIELD_FW));
+-      dev_dbg(ipu->dev, "FH %d, ",
+-               ipu_ch_param_read_field(ch, IPU_FIELD_FH));
+-      dev_dbg(ipu->dev, "EBA0 0x%x\n",
+-               ipu_ch_param_read_field(ch, IPU_FIELD_EBA0) << 3);
+-      dev_dbg(ipu->dev, "EBA1 0x%x\n",
+-               ipu_ch_param_read_field(ch, IPU_FIELD_EBA1) << 3);
+-      dev_dbg(ipu->dev, "Stride %d\n",
+-               ipu_ch_param_read_field(ch, IPU_FIELD_SL));
+-      dev_dbg(ipu->dev, "scan_order %d\n",
+-               ipu_ch_param_read_field(ch, IPU_FIELD_SO));
+-      dev_dbg(ipu->dev, "uv_stride %d\n",
+-               ipu_ch_param_read_field(ch, IPU_FIELD_SLUV));
+-      dev_dbg(ipu->dev, "u_offset 0x%x\n",
+-               ipu_ch_param_read_field(ch, IPU_FIELD_UBO) << 3);
+-      dev_dbg(ipu->dev, "v_offset 0x%x\n",
+-               ipu_ch_param_read_field(ch, IPU_FIELD_VBO) << 3);
+-
+-      dev_dbg(ipu->dev, "Width0 %d+1, ",
+-               ipu_ch_param_read_field(ch, IPU_FIELD_WID0));
+-      dev_dbg(ipu->dev, "Width1 %d+1, ",
+-               ipu_ch_param_read_field(ch, IPU_FIELD_WID1));
+-      dev_dbg(ipu->dev, "Width2 %d+1, ",
+-               ipu_ch_param_read_field(ch, IPU_FIELD_WID2));
+-      dev_dbg(ipu->dev, "Width3 %d+1, ",
+-               ipu_ch_param_read_field(ch, IPU_FIELD_WID3));
+-      dev_dbg(ipu->dev, "Offset0 %d, ",
+-               ipu_ch_param_read_field(ch, IPU_FIELD_OFS0));
+-      dev_dbg(ipu->dev, "Offset1 %d, ",
+-               ipu_ch_param_read_field(ch, IPU_FIELD_OFS1));
+-      dev_dbg(ipu->dev, "Offset2 %d, ",
+-               ipu_ch_param_read_field(ch, IPU_FIELD_OFS2));
+-      dev_dbg(ipu->dev, "Offset3 %d\n",
+-               ipu_ch_param_read_field(ch, IPU_FIELD_OFS3));
+-}
+-EXPORT_SYMBOL_GPL(ipu_cpmem_dump);
+-
+-int ipu_cpmem_init(struct ipu_soc *ipu, struct device *dev, unsigned long base)
+-{
+-      struct ipu_cpmem *cpmem;
+-
+-      cpmem = devm_kzalloc(dev, sizeof(*cpmem), GFP_KERNEL);
+-      if (!cpmem)
+-              return -ENOMEM;
+-
+-      ipu->cpmem_priv = cpmem;
+-
+-      spin_lock_init(&cpmem->lock);
+-      cpmem->base = devm_ioremap(dev, base, SZ_128K);
+-      if (!cpmem->base)
+-              return -ENOMEM;
+-
+-      dev_dbg(dev, "CPMEM base: 0x%08lx remapped to %p\n",
+-              base, cpmem->base);
+-      cpmem->ipu = ipu;
+-
+-      return 0;
+-}
+-
+-void ipu_cpmem_exit(struct ipu_soc *ipu)
+-{
+-}
+--- a/drivers/gpu/ipu-v3/ipu-csi.c
++++ /dev/null
+@@ -1,821 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- * Copyright (C) 2012-2014 Mentor Graphics Inc.
+- * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
+- */
+-#include <linux/export.h>
+-#include <linux/module.h>
+-#include <linux/types.h>
+-#include <linux/errno.h>
+-#include <linux/delay.h>
+-#include <linux/io.h>
+-#include <linux/err.h>
+-#include <linux/platform_device.h>
+-#include <linux/videodev2.h>
+-#include <uapi/linux/v4l2-mediabus.h>
+-#include <linux/clk.h>
+-#include <linux/clk-provider.h>
+-#include <linux/clkdev.h>
+-
+-#include "ipu-prv.h"
+-
+-struct ipu_csi {
+-      void __iomem *base;
+-      int id;
+-      u32 module;
+-      struct clk *clk_ipu;    /* IPU bus clock */
+-      spinlock_t lock;
+-      bool inuse;
+-      struct ipu_soc *ipu;
+-};
+-
+-/* CSI Register Offsets */
+-#define CSI_SENS_CONF         0x0000
+-#define CSI_SENS_FRM_SIZE     0x0004
+-#define CSI_ACT_FRM_SIZE      0x0008
+-#define CSI_OUT_FRM_CTRL      0x000c
+-#define CSI_TST_CTRL          0x0010
+-#define CSI_CCIR_CODE_1               0x0014
+-#define CSI_CCIR_CODE_2               0x0018
+-#define CSI_CCIR_CODE_3               0x001c
+-#define CSI_MIPI_DI           0x0020
+-#define CSI_SKIP              0x0024
+-#define CSI_CPD_CTRL          0x0028
+-#define CSI_CPD_RC(n)         (0x002c + ((n)*4))
+-#define CSI_CPD_RS(n)         (0x004c + ((n)*4))
+-#define CSI_CPD_GRC(n)                (0x005c + ((n)*4))
+-#define CSI_CPD_GRS(n)                (0x007c + ((n)*4))
+-#define CSI_CPD_GBC(n)                (0x008c + ((n)*4))
+-#define CSI_CPD_GBS(n)                (0x00Ac + ((n)*4))
+-#define CSI_CPD_BC(n)         (0x00Bc + ((n)*4))
+-#define CSI_CPD_BS(n)         (0x00Dc + ((n)*4))
+-#define CSI_CPD_OFFSET1               0x00ec
+-#define CSI_CPD_OFFSET2               0x00f0
+-
+-/* CSI Register Fields */
+-#define CSI_SENS_CONF_DATA_FMT_SHIFT          8
+-#define CSI_SENS_CONF_DATA_FMT_MASK           0x00000700
+-#define CSI_SENS_CONF_DATA_FMT_RGB_YUV444     0L
+-#define CSI_SENS_CONF_DATA_FMT_YUV422_YUYV    1L
+-#define CSI_SENS_CONF_DATA_FMT_YUV422_UYVY    2L
+-#define CSI_SENS_CONF_DATA_FMT_BAYER          3L
+-#define CSI_SENS_CONF_DATA_FMT_RGB565         4L
+-#define CSI_SENS_CONF_DATA_FMT_RGB555         5L
+-#define CSI_SENS_CONF_DATA_FMT_RGB444         6L
+-#define CSI_SENS_CONF_DATA_FMT_JPEG           7L
+-
+-#define CSI_SENS_CONF_VSYNC_POL_SHIFT         0
+-#define CSI_SENS_CONF_HSYNC_POL_SHIFT         1
+-#define CSI_SENS_CONF_DATA_POL_SHIFT          2
+-#define CSI_SENS_CONF_PIX_CLK_POL_SHIFT               3
+-#define CSI_SENS_CONF_SENS_PRTCL_MASK         0x00000070
+-#define CSI_SENS_CONF_SENS_PRTCL_SHIFT                4
+-#define CSI_SENS_CONF_PACK_TIGHT_SHIFT                7
+-#define CSI_SENS_CONF_DATA_WIDTH_SHIFT                11
+-#define CSI_SENS_CONF_EXT_VSYNC_SHIFT         15
+-#define CSI_SENS_CONF_DIVRATIO_SHIFT          16
+-
+-#define CSI_SENS_CONF_DIVRATIO_MASK           0x00ff0000
+-#define CSI_SENS_CONF_DATA_DEST_SHIFT         24
+-#define CSI_SENS_CONF_DATA_DEST_MASK          0x07000000
+-#define CSI_SENS_CONF_JPEG8_EN_SHIFT          27
+-#define CSI_SENS_CONF_JPEG_EN_SHIFT           28
+-#define CSI_SENS_CONF_FORCE_EOF_SHIFT         29
+-#define CSI_SENS_CONF_DATA_EN_POL_SHIFT               31
+-
+-#define CSI_DATA_DEST_IC                      2
+-#define CSI_DATA_DEST_IDMAC                   4
+-
+-#define CSI_CCIR_ERR_DET_EN                   0x01000000
+-#define CSI_HORI_DOWNSIZE_EN                  0x80000000
+-#define CSI_VERT_DOWNSIZE_EN                  0x40000000
+-#define CSI_TEST_GEN_MODE_EN                  0x01000000
+-
+-#define CSI_HSC_MASK                          0x1fff0000
+-#define CSI_HSC_SHIFT                         16
+-#define CSI_VSC_MASK                          0x00000fff
+-#define CSI_VSC_SHIFT                         0
+-
+-#define CSI_TEST_GEN_R_MASK                   0x000000ff
+-#define CSI_TEST_GEN_R_SHIFT                  0
+-#define CSI_TEST_GEN_G_MASK                   0x0000ff00
+-#define CSI_TEST_GEN_G_SHIFT                  8
+-#define CSI_TEST_GEN_B_MASK                   0x00ff0000
+-#define CSI_TEST_GEN_B_SHIFT                  16
+-
+-#define CSI_MAX_RATIO_SKIP_SMFC_MASK          0x00000007
+-#define CSI_MAX_RATIO_SKIP_SMFC_SHIFT         0
+-#define CSI_SKIP_SMFC_MASK                    0x000000f8
+-#define CSI_SKIP_SMFC_SHIFT                   3
+-#define CSI_ID_2_SKIP_MASK                    0x00000300
+-#define CSI_ID_2_SKIP_SHIFT                   8
+-
+-#define CSI_COLOR_FIRST_ROW_MASK              0x00000002
+-#define CSI_COLOR_FIRST_COMP_MASK             0x00000001
+-
+-/* MIPI CSI-2 data types */
+-#define MIPI_DT_YUV420                0x18 /* YYY.../UYVY.... */
+-#define MIPI_DT_YUV420_LEGACY 0x1a /* UYY.../VYY...   */
+-#define MIPI_DT_YUV422                0x1e /* UYVY...         */
+-#define MIPI_DT_RGB444                0x20
+-#define MIPI_DT_RGB555                0x21
+-#define MIPI_DT_RGB565                0x22
+-#define MIPI_DT_RGB666                0x23
+-#define MIPI_DT_RGB888                0x24
+-#define MIPI_DT_RAW6          0x28
+-#define MIPI_DT_RAW7          0x29
+-#define MIPI_DT_RAW8          0x2a
+-#define MIPI_DT_RAW10         0x2b
+-#define MIPI_DT_RAW12         0x2c
+-#define MIPI_DT_RAW14         0x2d
+-
+-/*
+- * Bitfield of CSI bus signal polarities and modes.
+- */
+-struct ipu_csi_bus_config {
+-      unsigned data_width:4;
+-      unsigned clk_mode:3;
+-      unsigned ext_vsync:1;
+-      unsigned vsync_pol:1;
+-      unsigned hsync_pol:1;
+-      unsigned pixclk_pol:1;
+-      unsigned data_pol:1;
+-      unsigned sens_clksrc:1;
+-      unsigned pack_tight:1;
+-      unsigned force_eof:1;
+-      unsigned data_en_pol:1;
+-
+-      unsigned data_fmt;
+-      unsigned mipi_dt;
+-};
+-
+-/*
+- * Enumeration of CSI data bus widths.
+- */
+-enum ipu_csi_data_width {
+-      IPU_CSI_DATA_WIDTH_4   = 0,
+-      IPU_CSI_DATA_WIDTH_8   = 1,
+-      IPU_CSI_DATA_WIDTH_10  = 3,
+-      IPU_CSI_DATA_WIDTH_12  = 5,
+-      IPU_CSI_DATA_WIDTH_16  = 9,
+-};
+-
+-/*
+- * Enumeration of CSI clock modes.
+- */
+-enum ipu_csi_clk_mode {
+-      IPU_CSI_CLK_MODE_GATED_CLK,
+-      IPU_CSI_CLK_MODE_NONGATED_CLK,
+-      IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE,
+-      IPU_CSI_CLK_MODE_CCIR656_INTERLACED,
+-      IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR,
+-      IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR,
+-      IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR,
+-      IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR,
+-};
+-
+-static inline u32 ipu_csi_read(struct ipu_csi *csi, unsigned offset)
+-{
+-      return readl(csi->base + offset);
+-}
+-
+-static inline void ipu_csi_write(struct ipu_csi *csi, u32 value,
+-                               unsigned offset)
+-{
+-      writel(value, csi->base + offset);
+-}
+-
+-/*
+- * Set mclk division ratio for generating test mode mclk. Only used
+- * for test generator.
+- */
+-static int ipu_csi_set_testgen_mclk(struct ipu_csi *csi, u32 pixel_clk,
+-                                      u32 ipu_clk)
+-{
+-      u32 temp;
+-      int div_ratio;
+-
+-      div_ratio = (ipu_clk / pixel_clk) - 1;
+-
+-      if (div_ratio > 0xFF || div_ratio < 0) {
+-              dev_err(csi->ipu->dev,
+-                      "value of pixel_clk extends normal range\n");
+-              return -EINVAL;
+-      }
+-
+-      temp = ipu_csi_read(csi, CSI_SENS_CONF);
+-      temp &= ~CSI_SENS_CONF_DIVRATIO_MASK;
+-      ipu_csi_write(csi, temp | (div_ratio << CSI_SENS_CONF_DIVRATIO_SHIFT),
+-                        CSI_SENS_CONF);
+-
+-      return 0;
+-}
+-
+-/*
+- * Find the CSI data format and data width for the given V4L2 media
+- * bus pixel format code.
+- */
+-static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code,
+-                              enum v4l2_mbus_type mbus_type)
+-{
+-      switch (mbus_code) {
+-      case MEDIA_BUS_FMT_BGR565_2X8_BE:
+-      case MEDIA_BUS_FMT_BGR565_2X8_LE:
+-      case MEDIA_BUS_FMT_RGB565_2X8_BE:
+-      case MEDIA_BUS_FMT_RGB565_2X8_LE:
+-              if (mbus_type == V4L2_MBUS_CSI2_DPHY)
+-                      cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB565;
+-              else
+-                      cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
+-              cfg->mipi_dt = MIPI_DT_RGB565;
+-              cfg->data_width = IPU_CSI_DATA_WIDTH_8;
+-              break;
+-      case MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE:
+-      case MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE:
+-              cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB444;
+-              cfg->mipi_dt = MIPI_DT_RGB444;
+-              cfg->data_width = IPU_CSI_DATA_WIDTH_8;
+-              break;
+-      case MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE:
+-      case MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE:
+-              cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB555;
+-              cfg->mipi_dt = MIPI_DT_RGB555;
+-              cfg->data_width = IPU_CSI_DATA_WIDTH_8;
+-              break;
+-      case MEDIA_BUS_FMT_RGB888_1X24:
+-      case MEDIA_BUS_FMT_BGR888_1X24:
+-              cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB_YUV444;
+-              cfg->mipi_dt = MIPI_DT_RGB888;
+-              cfg->data_width = IPU_CSI_DATA_WIDTH_8;
+-              break;
+-      case MEDIA_BUS_FMT_UYVY8_2X8:
+-              cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_UYVY;
+-              cfg->mipi_dt = MIPI_DT_YUV422;
+-              cfg->data_width = IPU_CSI_DATA_WIDTH_8;
+-              break;
+-      case MEDIA_BUS_FMT_YUYV8_2X8:
+-              cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_YUYV;
+-              cfg->mipi_dt = MIPI_DT_YUV422;
+-              cfg->data_width = IPU_CSI_DATA_WIDTH_8;
+-              break;
+-      case MEDIA_BUS_FMT_UYVY8_1X16:
+-      case MEDIA_BUS_FMT_YUYV8_1X16:
+-              cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
+-              cfg->mipi_dt = MIPI_DT_YUV422;
+-              cfg->data_width = IPU_CSI_DATA_WIDTH_16;
+-              break;
+-      case MEDIA_BUS_FMT_SBGGR8_1X8:
+-      case MEDIA_BUS_FMT_SGBRG8_1X8:
+-      case MEDIA_BUS_FMT_SGRBG8_1X8:
+-      case MEDIA_BUS_FMT_SRGGB8_1X8:
+-      case MEDIA_BUS_FMT_Y8_1X8:
+-              cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
+-              cfg->mipi_dt = MIPI_DT_RAW8;
+-              cfg->data_width = IPU_CSI_DATA_WIDTH_8;
+-              break;
+-      case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8:
+-      case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8:
+-      case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
+-      case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8:
+-      case MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE:
+-      case MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE:
+-      case MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE:
+-      case MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE:
+-              cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
+-              cfg->mipi_dt = MIPI_DT_RAW10;
+-              cfg->data_width = IPU_CSI_DATA_WIDTH_8;
+-              break;
+-      case MEDIA_BUS_FMT_SBGGR10_1X10:
+-      case MEDIA_BUS_FMT_SGBRG10_1X10:
+-      case MEDIA_BUS_FMT_SGRBG10_1X10:
+-      case MEDIA_BUS_FMT_SRGGB10_1X10:
+-      case MEDIA_BUS_FMT_Y10_1X10:
+-              cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
+-              cfg->mipi_dt = MIPI_DT_RAW10;
+-              cfg->data_width = IPU_CSI_DATA_WIDTH_10;
+-              break;
+-      case MEDIA_BUS_FMT_SBGGR12_1X12:
+-      case MEDIA_BUS_FMT_SGBRG12_1X12:
+-      case MEDIA_BUS_FMT_SGRBG12_1X12:
+-      case MEDIA_BUS_FMT_SRGGB12_1X12:
+-      case MEDIA_BUS_FMT_Y12_1X12:
+-              cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
+-              cfg->mipi_dt = MIPI_DT_RAW12;
+-              cfg->data_width = IPU_CSI_DATA_WIDTH_12;
+-              break;
+-      case MEDIA_BUS_FMT_JPEG_1X8:
+-              /* TODO */
+-              cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_JPEG;
+-              cfg->mipi_dt = MIPI_DT_RAW8;
+-              cfg->data_width = IPU_CSI_DATA_WIDTH_8;
+-              break;
+-      default:
+-              return -EINVAL;
+-      }
+-
+-      return 0;
+-}
+-
+-/* translate alternate field mode based on given standard */
+-static inline enum v4l2_field
+-ipu_csi_translate_field(enum v4l2_field field, v4l2_std_id std)
+-{
+-      return (field != V4L2_FIELD_ALTERNATE) ? field :
+-              ((std & V4L2_STD_525_60) ?
+-               V4L2_FIELD_SEQ_BT : V4L2_FIELD_SEQ_TB);
+-}
+-
+-/*
+- * Fill a CSI bus config struct from mbus_config and mbus_framefmt.
+- */
+-static int fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
+-                          const struct v4l2_mbus_config *mbus_cfg,
+-                          const struct v4l2_mbus_framefmt *mbus_fmt)
+-{
+-      int ret;
+-
+-      memset(csicfg, 0, sizeof(*csicfg));
+-
+-      ret = mbus_code_to_bus_cfg(csicfg, mbus_fmt->code, mbus_cfg->type);
+-      if (ret < 0)
+-              return ret;
+-
+-      switch (mbus_cfg->type) {
+-      case V4L2_MBUS_PARALLEL:
+-              csicfg->ext_vsync = 1;
+-              csicfg->vsync_pol = (mbus_cfg->flags &
+-                                   V4L2_MBUS_VSYNC_ACTIVE_LOW) ? 1 : 0;
+-              csicfg->hsync_pol = (mbus_cfg->flags &
+-                                   V4L2_MBUS_HSYNC_ACTIVE_LOW) ? 1 : 0;
+-              csicfg->pixclk_pol = (mbus_cfg->flags &
+-                                    V4L2_MBUS_PCLK_SAMPLE_FALLING) ? 1 : 0;
+-              csicfg->clk_mode = IPU_CSI_CLK_MODE_GATED_CLK;
+-              break;
+-      case V4L2_MBUS_BT656:
+-              csicfg->ext_vsync = 0;
+-              if (V4L2_FIELD_HAS_BOTH(mbus_fmt->field) ||
+-                  mbus_fmt->field == V4L2_FIELD_ALTERNATE)
+-                      csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_INTERLACED;
+-              else
+-                      csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE;
+-              break;
+-      case V4L2_MBUS_CSI2_DPHY:
+-              /*
+-               * MIPI CSI-2 requires non gated clock mode, all other
+-               * parameters are not applicable for MIPI CSI-2 bus.
+-               */
+-              csicfg->clk_mode = IPU_CSI_CLK_MODE_NONGATED_CLK;
+-              break;
+-      default:
+-              /* will never get here, keep compiler quiet */
+-              break;
+-      }
+-
+-      return 0;
+-}
+-
+-static int
+-ipu_csi_set_bt_interlaced_codes(struct ipu_csi *csi,
+-                              const struct v4l2_mbus_framefmt *infmt,
+-                              const struct v4l2_mbus_framefmt *outfmt,
+-                              v4l2_std_id std)
+-{
+-      enum v4l2_field infield, outfield;
+-      bool swap_fields;
+-
+-      /* get translated field type of input and output */
+-      infield = ipu_csi_translate_field(infmt->field, std);
+-      outfield = ipu_csi_translate_field(outfmt->field, std);
+-
+-      /*
+-       * Write the H-V-F codes the CSI will match against the
+-       * incoming data for start/end of active and blanking
+-       * field intervals. If input and output field types are
+-       * sequential but not the same (one is SEQ_BT and the other
+-       * is SEQ_TB), swap the F-bit so that the CSI will capture
+-       * field 1 lines before field 0 lines.
+-       */
+-      swap_fields = (V4L2_FIELD_IS_SEQUENTIAL(infield) &&
+-                     V4L2_FIELD_IS_SEQUENTIAL(outfield) &&
+-                     infield != outfield);
+-
+-      if (!swap_fields) {
+-              /*
+-               * Field0BlankEnd  = 110, Field0BlankStart  = 010
+-               * Field0ActiveEnd = 100, Field0ActiveStart = 000
+-               * Field1BlankEnd  = 111, Field1BlankStart  = 011
+-               * Field1ActiveEnd = 101, Field1ActiveStart = 001
+-               */
+-              ipu_csi_write(csi, 0x40596 | CSI_CCIR_ERR_DET_EN,
+-                            CSI_CCIR_CODE_1);
+-              ipu_csi_write(csi, 0xD07DF, CSI_CCIR_CODE_2);
+-      } else {
+-              dev_dbg(csi->ipu->dev, "capture field swap\n");
+-
+-              /* same as above but with F-bit inverted */
+-              ipu_csi_write(csi, 0xD07DF | CSI_CCIR_ERR_DET_EN,
+-                            CSI_CCIR_CODE_1);
+-              ipu_csi_write(csi, 0x40596, CSI_CCIR_CODE_2);
+-      }
+-
+-      ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3);
+-
+-      return 0;
+-}
+-
+-
+-int ipu_csi_init_interface(struct ipu_csi *csi,
+-                         const struct v4l2_mbus_config *mbus_cfg,
+-                         const struct v4l2_mbus_framefmt *infmt,
+-                         const struct v4l2_mbus_framefmt *outfmt)
+-{
+-      struct ipu_csi_bus_config cfg;
+-      unsigned long flags;
+-      u32 width, height, data = 0;
+-      v4l2_std_id std;
+-      int ret;
+-
+-      ret = fill_csi_bus_cfg(&cfg, mbus_cfg, infmt);
+-      if (ret < 0)
+-              return ret;
+-
+-      /* set default sensor frame width and height */
+-      width = infmt->width;
+-      height = infmt->height;
+-      if (infmt->field == V4L2_FIELD_ALTERNATE)
+-              height *= 2;
+-
+-      /* Set the CSI_SENS_CONF register remaining fields */
+-      data |= cfg.data_width << CSI_SENS_CONF_DATA_WIDTH_SHIFT |
+-              cfg.data_fmt << CSI_SENS_CONF_DATA_FMT_SHIFT |
+-              cfg.data_pol << CSI_SENS_CONF_DATA_POL_SHIFT |
+-              cfg.vsync_pol << CSI_SENS_CONF_VSYNC_POL_SHIFT |
+-              cfg.hsync_pol << CSI_SENS_CONF_HSYNC_POL_SHIFT |
+-              cfg.pixclk_pol << CSI_SENS_CONF_PIX_CLK_POL_SHIFT |
+-              cfg.ext_vsync << CSI_SENS_CONF_EXT_VSYNC_SHIFT |
+-              cfg.clk_mode << CSI_SENS_CONF_SENS_PRTCL_SHIFT |
+-              cfg.pack_tight << CSI_SENS_CONF_PACK_TIGHT_SHIFT |
+-              cfg.force_eof << CSI_SENS_CONF_FORCE_EOF_SHIFT |
+-              cfg.data_en_pol << CSI_SENS_CONF_DATA_EN_POL_SHIFT;
+-
+-      spin_lock_irqsave(&csi->lock, flags);
+-
+-      ipu_csi_write(csi, data, CSI_SENS_CONF);
+-
+-      /* Set CCIR registers */
+-
+-      switch (cfg.clk_mode) {
+-      case IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE:
+-              ipu_csi_write(csi, 0x40030, CSI_CCIR_CODE_1);
+-              ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3);
+-              break;
+-      case IPU_CSI_CLK_MODE_CCIR656_INTERLACED:
+-              if (width == 720 && height == 480) {
+-                      std = V4L2_STD_NTSC;
+-                      height = 525;
+-              } else if (width == 720 && height == 576) {
+-                      std = V4L2_STD_PAL;
+-                      height = 625;
+-              } else {
+-                      dev_err(csi->ipu->dev,
+-                              "Unsupported interlaced video mode\n");
+-                      ret = -EINVAL;
+-                      goto out_unlock;
+-              }
+-
+-              ret = ipu_csi_set_bt_interlaced_codes(csi, infmt, outfmt, std);
+-              if (ret)
+-                      goto out_unlock;
+-              break;
+-      case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR:
+-      case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR:
+-      case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR:
+-      case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR:
+-              ipu_csi_write(csi, 0x40030 | CSI_CCIR_ERR_DET_EN,
+-                                 CSI_CCIR_CODE_1);
+-              ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3);
+-              break;
+-      case IPU_CSI_CLK_MODE_GATED_CLK:
+-      case IPU_CSI_CLK_MODE_NONGATED_CLK:
+-              ipu_csi_write(csi, 0, CSI_CCIR_CODE_1);
+-              break;
+-      }
+-
+-      /* Setup sensor frame size */
+-      ipu_csi_write(csi, (width - 1) | ((height - 1) << 16),
+-                    CSI_SENS_FRM_SIZE);
+-
+-      dev_dbg(csi->ipu->dev, "CSI_SENS_CONF = 0x%08X\n",
+-              ipu_csi_read(csi, CSI_SENS_CONF));
+-      dev_dbg(csi->ipu->dev, "CSI_ACT_FRM_SIZE = 0x%08X\n",
+-              ipu_csi_read(csi, CSI_ACT_FRM_SIZE));
+-
+-out_unlock:
+-      spin_unlock_irqrestore(&csi->lock, flags);
+-
+-      return ret;
+-}
+-EXPORT_SYMBOL_GPL(ipu_csi_init_interface);
+-
+-bool ipu_csi_is_interlaced(struct ipu_csi *csi)
+-{
+-      unsigned long flags;
+-      u32 sensor_protocol;
+-
+-      spin_lock_irqsave(&csi->lock, flags);
+-      sensor_protocol =
+-              (ipu_csi_read(csi, CSI_SENS_CONF) &
+-               CSI_SENS_CONF_SENS_PRTCL_MASK) >>
+-              CSI_SENS_CONF_SENS_PRTCL_SHIFT;
+-      spin_unlock_irqrestore(&csi->lock, flags);
+-
+-      switch (sensor_protocol) {
+-      case IPU_CSI_CLK_MODE_GATED_CLK:
+-      case IPU_CSI_CLK_MODE_NONGATED_CLK:
+-      case IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE:
+-      case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR:
+-      case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR:
+-              return false;
+-      case IPU_CSI_CLK_MODE_CCIR656_INTERLACED:
+-      case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR:
+-      case IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR:
+-              return true;
+-      default:
+-              dev_err(csi->ipu->dev,
+-                      "CSI %d sensor protocol unsupported\n", csi->id);
+-              return false;
+-      }
+-}
+-EXPORT_SYMBOL_GPL(ipu_csi_is_interlaced);
+-
+-void ipu_csi_get_window(struct ipu_csi *csi, struct v4l2_rect *w)
+-{
+-      unsigned long flags;
+-      u32 reg;
+-
+-      spin_lock_irqsave(&csi->lock, flags);
+-
+-      reg = ipu_csi_read(csi, CSI_ACT_FRM_SIZE);
+-      w->width = (reg & 0xFFFF) + 1;
+-      w->height = (reg >> 16 & 0xFFFF) + 1;
+-
+-      reg = ipu_csi_read(csi, CSI_OUT_FRM_CTRL);
+-      w->left = (reg & CSI_HSC_MASK) >> CSI_HSC_SHIFT;
+-      w->top = (reg & CSI_VSC_MASK) >> CSI_VSC_SHIFT;
+-
+-      spin_unlock_irqrestore(&csi->lock, flags);
+-}
+-EXPORT_SYMBOL_GPL(ipu_csi_get_window);
+-
+-void ipu_csi_set_window(struct ipu_csi *csi, struct v4l2_rect *w)
+-{
+-      unsigned long flags;
+-      u32 reg;
+-
+-      spin_lock_irqsave(&csi->lock, flags);
+-
+-      ipu_csi_write(csi, (w->width - 1) | ((w->height - 1) << 16),
+-                        CSI_ACT_FRM_SIZE);
+-
+-      reg = ipu_csi_read(csi, CSI_OUT_FRM_CTRL);
+-      reg &= ~(CSI_HSC_MASK | CSI_VSC_MASK);
+-      reg |= ((w->top << CSI_VSC_SHIFT) | (w->left << CSI_HSC_SHIFT));
+-      ipu_csi_write(csi, reg, CSI_OUT_FRM_CTRL);
+-
+-      spin_unlock_irqrestore(&csi->lock, flags);
+-}
+-EXPORT_SYMBOL_GPL(ipu_csi_set_window);
+-
+-void ipu_csi_set_downsize(struct ipu_csi *csi, bool horiz, bool vert)
+-{
+-      unsigned long flags;
+-      u32 reg;
+-
+-      spin_lock_irqsave(&csi->lock, flags);
+-
+-      reg = ipu_csi_read(csi, CSI_OUT_FRM_CTRL);
+-      reg &= ~(CSI_HORI_DOWNSIZE_EN | CSI_VERT_DOWNSIZE_EN);
+-      reg |= (horiz ? CSI_HORI_DOWNSIZE_EN : 0) |
+-             (vert ? CSI_VERT_DOWNSIZE_EN : 0);
+-      ipu_csi_write(csi, reg, CSI_OUT_FRM_CTRL);
+-
+-      spin_unlock_irqrestore(&csi->lock, flags);
+-}
+-EXPORT_SYMBOL_GPL(ipu_csi_set_downsize);
+-
+-void ipu_csi_set_test_generator(struct ipu_csi *csi, bool active,
+-                              u32 r_value, u32 g_value, u32 b_value,
+-                              u32 pix_clk)
+-{
+-      unsigned long flags;
+-      u32 ipu_clk = clk_get_rate(csi->clk_ipu);
+-      u32 temp;
+-
+-      spin_lock_irqsave(&csi->lock, flags);
+-
+-      temp = ipu_csi_read(csi, CSI_TST_CTRL);
+-
+-      if (!active) {
+-              temp &= ~CSI_TEST_GEN_MODE_EN;
+-              ipu_csi_write(csi, temp, CSI_TST_CTRL);
+-      } else {
+-              /* Set sensb_mclk div_ratio */
+-              ipu_csi_set_testgen_mclk(csi, pix_clk, ipu_clk);
+-
+-              temp &= ~(CSI_TEST_GEN_R_MASK | CSI_TEST_GEN_G_MASK |
+-                        CSI_TEST_GEN_B_MASK);
+-              temp |= CSI_TEST_GEN_MODE_EN;
+-              temp |= (r_value << CSI_TEST_GEN_R_SHIFT) |
+-                      (g_value << CSI_TEST_GEN_G_SHIFT) |
+-                      (b_value << CSI_TEST_GEN_B_SHIFT);
+-              ipu_csi_write(csi, temp, CSI_TST_CTRL);
+-      }
+-
+-      spin_unlock_irqrestore(&csi->lock, flags);
+-}
+-EXPORT_SYMBOL_GPL(ipu_csi_set_test_generator);
+-
+-int ipu_csi_set_mipi_datatype(struct ipu_csi *csi, u32 vc,
+-                            struct v4l2_mbus_framefmt *mbus_fmt)
+-{
+-      struct ipu_csi_bus_config cfg;
+-      unsigned long flags;
+-      u32 temp;
+-      int ret;
+-
+-      if (vc > 3)
+-              return -EINVAL;
+-
+-      ret = mbus_code_to_bus_cfg(&cfg, mbus_fmt->code, V4L2_MBUS_CSI2_DPHY);
+-      if (ret < 0)
+-              return ret;
+-
+-      spin_lock_irqsave(&csi->lock, flags);
+-
+-      temp = ipu_csi_read(csi, CSI_MIPI_DI);
+-      temp &= ~(0xff << (vc * 8));
+-      temp |= (cfg.mipi_dt << (vc * 8));
+-      ipu_csi_write(csi, temp, CSI_MIPI_DI);
+-
+-      spin_unlock_irqrestore(&csi->lock, flags);
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_csi_set_mipi_datatype);
+-
+-int ipu_csi_set_skip_smfc(struct ipu_csi *csi, u32 skip,
+-                        u32 max_ratio, u32 id)
+-{
+-      unsigned long flags;
+-      u32 temp;
+-
+-      if (max_ratio > 5 || id > 3)
+-              return -EINVAL;
+-
+-      spin_lock_irqsave(&csi->lock, flags);
+-
+-      temp = ipu_csi_read(csi, CSI_SKIP);
+-      temp &= ~(CSI_MAX_RATIO_SKIP_SMFC_MASK | CSI_ID_2_SKIP_MASK |
+-                CSI_SKIP_SMFC_MASK);
+-      temp |= (max_ratio << CSI_MAX_RATIO_SKIP_SMFC_SHIFT) |
+-              (id << CSI_ID_2_SKIP_SHIFT) |
+-              (skip << CSI_SKIP_SMFC_SHIFT);
+-      ipu_csi_write(csi, temp, CSI_SKIP);
+-
+-      spin_unlock_irqrestore(&csi->lock, flags);
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_csi_set_skip_smfc);
+-
+-int ipu_csi_set_dest(struct ipu_csi *csi, enum ipu_csi_dest csi_dest)
+-{
+-      unsigned long flags;
+-      u32 csi_sens_conf, dest;
+-
+-      if (csi_dest == IPU_CSI_DEST_IDMAC)
+-              dest = CSI_DATA_DEST_IDMAC;
+-      else
+-              dest = CSI_DATA_DEST_IC; /* IC or VDIC */
+-
+-      spin_lock_irqsave(&csi->lock, flags);
+-
+-      csi_sens_conf = ipu_csi_read(csi, CSI_SENS_CONF);
+-      csi_sens_conf &= ~CSI_SENS_CONF_DATA_DEST_MASK;
+-      csi_sens_conf |= (dest << CSI_SENS_CONF_DATA_DEST_SHIFT);
+-      ipu_csi_write(csi, csi_sens_conf, CSI_SENS_CONF);
+-
+-      spin_unlock_irqrestore(&csi->lock, flags);
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_csi_set_dest);
+-
+-int ipu_csi_enable(struct ipu_csi *csi)
+-{
+-      ipu_module_enable(csi->ipu, csi->module);
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_csi_enable);
+-
+-int ipu_csi_disable(struct ipu_csi *csi)
+-{
+-      ipu_module_disable(csi->ipu, csi->module);
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_csi_disable);
+-
+-struct ipu_csi *ipu_csi_get(struct ipu_soc *ipu, int id)
+-{
+-      unsigned long flags;
+-      struct ipu_csi *csi, *ret;
+-
+-      if (id > 1)
+-              return ERR_PTR(-EINVAL);
+-
+-      csi = ipu->csi_priv[id];
+-      ret = csi;
+-
+-      spin_lock_irqsave(&csi->lock, flags);
+-
+-      if (csi->inuse) {
+-              ret = ERR_PTR(-EBUSY);
+-              goto unlock;
+-      }
+-
+-      csi->inuse = true;
+-unlock:
+-      spin_unlock_irqrestore(&csi->lock, flags);
+-      return ret;
+-}
+-EXPORT_SYMBOL_GPL(ipu_csi_get);
+-
+-void ipu_csi_put(struct ipu_csi *csi)
+-{
+-      unsigned long flags;
+-
+-      spin_lock_irqsave(&csi->lock, flags);
+-      csi->inuse = false;
+-      spin_unlock_irqrestore(&csi->lock, flags);
+-}
+-EXPORT_SYMBOL_GPL(ipu_csi_put);
+-
+-int ipu_csi_init(struct ipu_soc *ipu, struct device *dev, int id,
+-               unsigned long base, u32 module, struct clk *clk_ipu)
+-{
+-      struct ipu_csi *csi;
+-
+-      if (id > 1)
+-              return -ENODEV;
+-
+-      csi = devm_kzalloc(dev, sizeof(*csi), GFP_KERNEL);
+-      if (!csi)
+-              return -ENOMEM;
+-
+-      ipu->csi_priv[id] = csi;
+-
+-      spin_lock_init(&csi->lock);
+-      csi->module = module;
+-      csi->id = id;
+-      csi->clk_ipu = clk_ipu;
+-      csi->base = devm_ioremap(dev, base, PAGE_SIZE);
+-      if (!csi->base)
+-              return -ENOMEM;
+-
+-      dev_dbg(dev, "CSI%d base: 0x%08lx remapped to %p\n",
+-              id, base, csi->base);
+-      csi->ipu = ipu;
+-
+-      return 0;
+-}
+-
+-void ipu_csi_exit(struct ipu_soc *ipu, int id)
+-{
+-}
+-
+-void ipu_csi_dump(struct ipu_csi *csi)
+-{
+-      dev_dbg(csi->ipu->dev, "CSI_SENS_CONF:     %08x\n",
+-              ipu_csi_read(csi, CSI_SENS_CONF));
+-      dev_dbg(csi->ipu->dev, "CSI_SENS_FRM_SIZE: %08x\n",
+-              ipu_csi_read(csi, CSI_SENS_FRM_SIZE));
+-      dev_dbg(csi->ipu->dev, "CSI_ACT_FRM_SIZE:  %08x\n",
+-              ipu_csi_read(csi, CSI_ACT_FRM_SIZE));
+-      dev_dbg(csi->ipu->dev, "CSI_OUT_FRM_CTRL:  %08x\n",
+-              ipu_csi_read(csi, CSI_OUT_FRM_CTRL));
+-      dev_dbg(csi->ipu->dev, "CSI_TST_CTRL:      %08x\n",
+-              ipu_csi_read(csi, CSI_TST_CTRL));
+-      dev_dbg(csi->ipu->dev, "CSI_CCIR_CODE_1:   %08x\n",
+-              ipu_csi_read(csi, CSI_CCIR_CODE_1));
+-      dev_dbg(csi->ipu->dev, "CSI_CCIR_CODE_2:   %08x\n",
+-              ipu_csi_read(csi, CSI_CCIR_CODE_2));
+-      dev_dbg(csi->ipu->dev, "CSI_CCIR_CODE_3:   %08x\n",
+-              ipu_csi_read(csi, CSI_CCIR_CODE_3));
+-      dev_dbg(csi->ipu->dev, "CSI_MIPI_DI:       %08x\n",
+-              ipu_csi_read(csi, CSI_MIPI_DI));
+-      dev_dbg(csi->ipu->dev, "CSI_SKIP:          %08x\n",
+-              ipu_csi_read(csi, CSI_SKIP));
+-}
+-EXPORT_SYMBOL_GPL(ipu_csi_dump);
+--- a/drivers/gpu/ipu-v3/ipu-dc.c
++++ /dev/null
+@@ -1,420 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
+- * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
+- */
+-
+-#include <linux/export.h>
+-#include <linux/module.h>
+-#include <linux/types.h>
+-#include <linux/errno.h>
+-#include <linux/delay.h>
+-#include <linux/interrupt.h>
+-#include <linux/io.h>
+-
+-#include <video/imx-ipu-v3.h>
+-#include "ipu-prv.h"
+-
+-#define DC_MAP_CONF_PTR(n)    (0x108 + ((n) & ~0x1) * 2)
+-#define DC_MAP_CONF_VAL(n)    (0x144 + ((n) & ~0x1) * 2)
+-
+-#define DC_EVT_NF             0
+-#define DC_EVT_NL             1
+-#define DC_EVT_EOF            2
+-#define DC_EVT_NFIELD         3
+-#define DC_EVT_EOL            4
+-#define DC_EVT_EOFIELD                5
+-#define DC_EVT_NEW_ADDR               6
+-#define DC_EVT_NEW_CHAN               7
+-#define DC_EVT_NEW_DATA               8
+-
+-#define DC_EVT_NEW_ADDR_W_0   0
+-#define DC_EVT_NEW_ADDR_W_1   1
+-#define DC_EVT_NEW_CHAN_W_0   2
+-#define DC_EVT_NEW_CHAN_W_1   3
+-#define DC_EVT_NEW_DATA_W_0   4
+-#define DC_EVT_NEW_DATA_W_1   5
+-#define DC_EVT_NEW_ADDR_R_0   6
+-#define DC_EVT_NEW_ADDR_R_1   7
+-#define DC_EVT_NEW_CHAN_R_0   8
+-#define DC_EVT_NEW_CHAN_R_1   9
+-#define DC_EVT_NEW_DATA_R_0   10
+-#define DC_EVT_NEW_DATA_R_1   11
+-
+-#define DC_WR_CH_CONF         0x0
+-#define DC_WR_CH_ADDR         0x4
+-#define DC_RL_CH(evt)         (8 + ((evt) & ~0x1) * 2)
+-
+-#define DC_GEN                        0xd4
+-#define DC_DISP_CONF1(disp)   (0xd8 + (disp) * 4)
+-#define DC_DISP_CONF2(disp)   (0xe8 + (disp) * 4)
+-#define DC_STAT                       0x1c8
+-
+-#define WROD(lf)              (0x18 | ((lf) << 1))
+-#define WRG                   0x01
+-#define WCLK                  0xc9
+-
+-#define SYNC_WAVE 0
+-#define NULL_WAVE (-1)
+-
+-#define DC_GEN_SYNC_1_6_SYNC  (2 << 1)
+-#define DC_GEN_SYNC_PRIORITY_1        (1 << 7)
+-
+-#define DC_WR_CH_CONF_WORD_SIZE_8             (0 << 0)
+-#define DC_WR_CH_CONF_WORD_SIZE_16            (1 << 0)
+-#define DC_WR_CH_CONF_WORD_SIZE_24            (2 << 0)
+-#define DC_WR_CH_CONF_WORD_SIZE_32            (3 << 0)
+-#define DC_WR_CH_CONF_DISP_ID_PARALLEL(i)     (((i) & 0x1) << 3)
+-#define DC_WR_CH_CONF_DISP_ID_SERIAL          (2 << 3)
+-#define DC_WR_CH_CONF_DISP_ID_ASYNC           (3 << 4)
+-#define DC_WR_CH_CONF_FIELD_MODE              (1 << 9)
+-#define DC_WR_CH_CONF_PROG_TYPE_NORMAL                (4 << 5)
+-#define DC_WR_CH_CONF_PROG_TYPE_MASK          (7 << 5)
+-#define DC_WR_CH_CONF_PROG_DI_ID              (1 << 2)
+-#define DC_WR_CH_CONF_PROG_DISP_ID(i)         (((i) & 0x1) << 3)
+-
+-#define IPU_DC_NUM_CHANNELS   10
+-
+-struct ipu_dc_priv;
+-
+-enum ipu_dc_map {
+-      IPU_DC_MAP_RGB24,
+-      IPU_DC_MAP_RGB565,
+-      IPU_DC_MAP_GBR24, /* TVEv2 */
+-      IPU_DC_MAP_BGR666,
+-      IPU_DC_MAP_LVDS666,
+-      IPU_DC_MAP_BGR24,
+-};
+-
+-struct ipu_dc {
+-      /* The display interface number assigned to this dc channel */
+-      unsigned int            di;
+-      void __iomem            *base;
+-      struct ipu_dc_priv      *priv;
+-      int                     chno;
+-      bool                    in_use;
+-};
+-
+-struct ipu_dc_priv {
+-      void __iomem            *dc_reg;
+-      void __iomem            *dc_tmpl_reg;
+-      struct ipu_soc          *ipu;
+-      struct device           *dev;
+-      struct ipu_dc           channels[IPU_DC_NUM_CHANNELS];
+-      struct mutex            mutex;
+-      struct completion       comp;
+-      int                     use_count;
+-};
+-
+-static void dc_link_event(struct ipu_dc *dc, int event, int addr, int priority)
+-{
+-      u32 reg;
+-
+-      reg = readl(dc->base + DC_RL_CH(event));
+-      reg &= ~(0xffff << (16 * (event & 0x1)));
+-      reg |= ((addr << 8) | priority) << (16 * (event & 0x1));
+-      writel(reg, dc->base + DC_RL_CH(event));
+-}
+-
+-static void dc_write_tmpl(struct ipu_dc *dc, int word, u32 opcode, u32 operand,
+-              int map, int wave, int glue, int sync, int stop)
+-{
+-      struct ipu_dc_priv *priv = dc->priv;
+-      u32 reg1, reg2;
+-
+-      if (opcode == WCLK) {
+-              reg1 = (operand << 20) & 0xfff00000;
+-              reg2 = operand >> 12 | opcode << 1 | stop << 9;
+-      } else if (opcode == WRG) {
+-              reg1 = sync | glue << 4 | ++wave << 11 | ((operand << 15) & 0xffff8000);
+-              reg2 = operand >> 17 | opcode << 7 | stop << 9;
+-      } else {
+-              reg1 = sync | glue << 4 | ++wave << 11 | ++map << 15 | ((operand << 20) & 0xfff00000);
+-              reg2 = operand >> 12 | opcode << 4 | stop << 9;
+-      }
+-      writel(reg1, priv->dc_tmpl_reg + word * 8);
+-      writel(reg2, priv->dc_tmpl_reg + word * 8 + 4);
+-}
+-
+-static int ipu_bus_format_to_map(u32 fmt)
+-{
+-      switch (fmt) {
+-      default:
+-              WARN_ON(1);
+-              /* fall-through */
+-      case MEDIA_BUS_FMT_RGB888_1X24:
+-              return IPU_DC_MAP_RGB24;
+-      case MEDIA_BUS_FMT_RGB565_1X16:
+-              return IPU_DC_MAP_RGB565;
+-      case MEDIA_BUS_FMT_GBR888_1X24:
+-              return IPU_DC_MAP_GBR24;
+-      case MEDIA_BUS_FMT_RGB666_1X18:
+-              return IPU_DC_MAP_BGR666;
+-      case MEDIA_BUS_FMT_RGB666_1X24_CPADHI:
+-              return IPU_DC_MAP_LVDS666;
+-      case MEDIA_BUS_FMT_BGR888_1X24:
+-              return IPU_DC_MAP_BGR24;
+-      }
+-}
+-
+-int ipu_dc_init_sync(struct ipu_dc *dc, struct ipu_di *di, bool interlaced,
+-              u32 bus_format, u32 width)
+-{
+-      struct ipu_dc_priv *priv = dc->priv;
+-      int addr, sync;
+-      u32 reg = 0;
+-      int map;
+-
+-      dc->di = ipu_di_get_num(di);
+-
+-      map = ipu_bus_format_to_map(bus_format);
+-
+-      /*
+-       * In interlaced mode we need more counters to create the asymmetric
+-       * per-field VSYNC signals. The pixel active signal synchronising DC
+-       * to DI moves to signal generator #6 (see ipu-di.c). In progressive
+-       * mode counter #5 is used.
+-       */
+-      sync = interlaced ? 6 : 5;
+-
+-      /* Reserve 5 microcode template words for each DI */
+-      if (dc->di)
+-              addr = 5;
+-      else
+-              addr = 0;
+-
+-      if (interlaced) {
+-              dc_link_event(dc, DC_EVT_NL, addr, 3);
+-              dc_link_event(dc, DC_EVT_EOL, addr, 2);
+-              dc_link_event(dc, DC_EVT_NEW_DATA, addr, 1);
+-
+-              /* Init template microcode */
+-              dc_write_tmpl(dc, addr, WROD(0), 0, map, SYNC_WAVE, 0, sync, 1);
+-      } else {
+-              dc_link_event(dc, DC_EVT_NL, addr + 2, 3);
+-              dc_link_event(dc, DC_EVT_EOL, addr + 3, 2);
+-              dc_link_event(dc, DC_EVT_NEW_DATA, addr + 1, 1);
+-
+-              /* Init template microcode */
+-              dc_write_tmpl(dc, addr + 2, WROD(0), 0, map, SYNC_WAVE, 8, sync, 1);
+-              dc_write_tmpl(dc, addr + 3, WROD(0), 0, map, SYNC_WAVE, 4, sync, 0);
+-              dc_write_tmpl(dc, addr + 4, WRG, 0, map, NULL_WAVE, 0, 0, 1);
+-              dc_write_tmpl(dc, addr + 1, WROD(0), 0, map, SYNC_WAVE, 0, sync, 1);
+-      }
+-
+-      dc_link_event(dc, DC_EVT_NF, 0, 0);
+-      dc_link_event(dc, DC_EVT_NFIELD, 0, 0);
+-      dc_link_event(dc, DC_EVT_EOF, 0, 0);
+-      dc_link_event(dc, DC_EVT_EOFIELD, 0, 0);
+-      dc_link_event(dc, DC_EVT_NEW_CHAN, 0, 0);
+-      dc_link_event(dc, DC_EVT_NEW_ADDR, 0, 0);
+-
+-      reg = readl(dc->base + DC_WR_CH_CONF);
+-      if (interlaced)
+-              reg |= DC_WR_CH_CONF_FIELD_MODE;
+-      else
+-              reg &= ~DC_WR_CH_CONF_FIELD_MODE;
+-      writel(reg, dc->base + DC_WR_CH_CONF);
+-
+-      writel(0x0, dc->base + DC_WR_CH_ADDR);
+-      writel(width, priv->dc_reg + DC_DISP_CONF2(dc->di));
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_dc_init_sync);
+-
+-void ipu_dc_enable(struct ipu_soc *ipu)
+-{
+-      struct ipu_dc_priv *priv = ipu->dc_priv;
+-
+-      mutex_lock(&priv->mutex);
+-
+-      if (!priv->use_count)
+-              ipu_module_enable(priv->ipu, IPU_CONF_DC_EN);
+-
+-      priv->use_count++;
+-
+-      mutex_unlock(&priv->mutex);
+-}
+-EXPORT_SYMBOL_GPL(ipu_dc_enable);
+-
+-void ipu_dc_enable_channel(struct ipu_dc *dc)
+-{
+-      u32 reg;
+-
+-      reg = readl(dc->base + DC_WR_CH_CONF);
+-      reg |= DC_WR_CH_CONF_PROG_TYPE_NORMAL;
+-      writel(reg, dc->base + DC_WR_CH_CONF);
+-}
+-EXPORT_SYMBOL_GPL(ipu_dc_enable_channel);
+-
+-void ipu_dc_disable_channel(struct ipu_dc *dc)
+-{
+-      u32 val;
+-
+-      val = readl(dc->base + DC_WR_CH_CONF);
+-      val &= ~DC_WR_CH_CONF_PROG_TYPE_MASK;
+-      writel(val, dc->base + DC_WR_CH_CONF);
+-}
+-EXPORT_SYMBOL_GPL(ipu_dc_disable_channel);
+-
+-void ipu_dc_disable(struct ipu_soc *ipu)
+-{
+-      struct ipu_dc_priv *priv = ipu->dc_priv;
+-
+-      mutex_lock(&priv->mutex);
+-
+-      priv->use_count--;
+-      if (!priv->use_count)
+-              ipu_module_disable(priv->ipu, IPU_CONF_DC_EN);
+-
+-      if (priv->use_count < 0)
+-              priv->use_count = 0;
+-
+-      mutex_unlock(&priv->mutex);
+-}
+-EXPORT_SYMBOL_GPL(ipu_dc_disable);
+-
+-static void ipu_dc_map_config(struct ipu_dc_priv *priv, enum ipu_dc_map map,
+-              int byte_num, int offset, int mask)
+-{
+-      int ptr = map * 3 + byte_num;
+-      u32 reg;
+-
+-      reg = readl(priv->dc_reg + DC_MAP_CONF_VAL(ptr));
+-      reg &= ~(0xffff << (16 * (ptr & 0x1)));
+-      reg |= ((offset << 8) | mask) << (16 * (ptr & 0x1));
+-      writel(reg, priv->dc_reg + DC_MAP_CONF_VAL(ptr));
+-
+-      reg = readl(priv->dc_reg + DC_MAP_CONF_PTR(map));
+-      reg &= ~(0x1f << ((16 * (map & 0x1)) + (5 * byte_num)));
+-      reg |= ptr << ((16 * (map & 0x1)) + (5 * byte_num));
+-      writel(reg, priv->dc_reg + DC_MAP_CONF_PTR(map));
+-}
+-
+-static void ipu_dc_map_clear(struct ipu_dc_priv *priv, int map)
+-{
+-      u32 reg = readl(priv->dc_reg + DC_MAP_CONF_PTR(map));
+-
+-      writel(reg & ~(0xffff << (16 * (map & 0x1))),
+-                   priv->dc_reg + DC_MAP_CONF_PTR(map));
+-}
+-
+-struct ipu_dc *ipu_dc_get(struct ipu_soc *ipu, int channel)
+-{
+-      struct ipu_dc_priv *priv = ipu->dc_priv;
+-      struct ipu_dc *dc;
+-
+-      if (channel >= IPU_DC_NUM_CHANNELS)
+-              return ERR_PTR(-ENODEV);
+-
+-      dc = &priv->channels[channel];
+-
+-      mutex_lock(&priv->mutex);
+-
+-      if (dc->in_use) {
+-              mutex_unlock(&priv->mutex);
+-              return ERR_PTR(-EBUSY);
+-      }
+-
+-      dc->in_use = true;
+-
+-      mutex_unlock(&priv->mutex);
+-
+-      return dc;
+-}
+-EXPORT_SYMBOL_GPL(ipu_dc_get);
+-
+-void ipu_dc_put(struct ipu_dc *dc)
+-{
+-      struct ipu_dc_priv *priv = dc->priv;
+-
+-      mutex_lock(&priv->mutex);
+-      dc->in_use = false;
+-      mutex_unlock(&priv->mutex);
+-}
+-EXPORT_SYMBOL_GPL(ipu_dc_put);
+-
+-int ipu_dc_init(struct ipu_soc *ipu, struct device *dev,
+-              unsigned long base, unsigned long template_base)
+-{
+-      struct ipu_dc_priv *priv;
+-      static int channel_offsets[] = { 0, 0x1c, 0x38, 0x54, 0x58, 0x5c,
+-              0x78, 0, 0x94, 0xb4};
+-      int i;
+-
+-      priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+-      if (!priv)
+-              return -ENOMEM;
+-
+-      mutex_init(&priv->mutex);
+-
+-      priv->dev = dev;
+-      priv->ipu = ipu;
+-      priv->dc_reg = devm_ioremap(dev, base, PAGE_SIZE);
+-      priv->dc_tmpl_reg = devm_ioremap(dev, template_base, PAGE_SIZE);
+-      if (!priv->dc_reg || !priv->dc_tmpl_reg)
+-              return -ENOMEM;
+-
+-      for (i = 0; i < IPU_DC_NUM_CHANNELS; i++) {
+-              priv->channels[i].chno = i;
+-              priv->channels[i].priv = priv;
+-              priv->channels[i].base = priv->dc_reg + channel_offsets[i];
+-      }
+-
+-      writel(DC_WR_CH_CONF_WORD_SIZE_24 | DC_WR_CH_CONF_DISP_ID_PARALLEL(1) |
+-                      DC_WR_CH_CONF_PROG_DI_ID,
+-                      priv->channels[1].base + DC_WR_CH_CONF);
+-      writel(DC_WR_CH_CONF_WORD_SIZE_24 | DC_WR_CH_CONF_DISP_ID_PARALLEL(0),
+-                      priv->channels[5].base + DC_WR_CH_CONF);
+-
+-      writel(DC_GEN_SYNC_1_6_SYNC | DC_GEN_SYNC_PRIORITY_1,
+-              priv->dc_reg + DC_GEN);
+-
+-      ipu->dc_priv = priv;
+-
+-      dev_dbg(dev, "DC base: 0x%08lx template base: 0x%08lx\n",
+-                      base, template_base);
+-
+-      /* rgb24 */
+-      ipu_dc_map_clear(priv, IPU_DC_MAP_RGB24);
+-      ipu_dc_map_config(priv, IPU_DC_MAP_RGB24, 0, 7, 0xff); /* blue */
+-      ipu_dc_map_config(priv, IPU_DC_MAP_RGB24, 1, 15, 0xff); /* green */
+-      ipu_dc_map_config(priv, IPU_DC_MAP_RGB24, 2, 23, 0xff); /* red */
+-
+-      /* rgb565 */
+-      ipu_dc_map_clear(priv, IPU_DC_MAP_RGB565);
+-      ipu_dc_map_config(priv, IPU_DC_MAP_RGB565, 0, 4, 0xf8); /* blue */
+-      ipu_dc_map_config(priv, IPU_DC_MAP_RGB565, 1, 10, 0xfc); /* green */
+-      ipu_dc_map_config(priv, IPU_DC_MAP_RGB565, 2, 15, 0xf8); /* red */
+-
+-      /* gbr24 */
+-      ipu_dc_map_clear(priv, IPU_DC_MAP_GBR24);
+-      ipu_dc_map_config(priv, IPU_DC_MAP_GBR24, 2, 15, 0xff); /* green */
+-      ipu_dc_map_config(priv, IPU_DC_MAP_GBR24, 1, 7, 0xff); /* blue */
+-      ipu_dc_map_config(priv, IPU_DC_MAP_GBR24, 0, 23, 0xff); /* red */
+-
+-      /* bgr666 */
+-      ipu_dc_map_clear(priv, IPU_DC_MAP_BGR666);
+-      ipu_dc_map_config(priv, IPU_DC_MAP_BGR666, 0, 5, 0xfc); /* blue */
+-      ipu_dc_map_config(priv, IPU_DC_MAP_BGR666, 1, 11, 0xfc); /* green */
+-      ipu_dc_map_config(priv, IPU_DC_MAP_BGR666, 2, 17, 0xfc); /* red */
+-
+-      /* lvds666 */
+-      ipu_dc_map_clear(priv, IPU_DC_MAP_LVDS666);
+-      ipu_dc_map_config(priv, IPU_DC_MAP_LVDS666, 0, 5, 0xfc); /* blue */
+-      ipu_dc_map_config(priv, IPU_DC_MAP_LVDS666, 1, 13, 0xfc); /* green */
+-      ipu_dc_map_config(priv, IPU_DC_MAP_LVDS666, 2, 21, 0xfc); /* red */
+-
+-      /* bgr24 */
+-      ipu_dc_map_clear(priv, IPU_DC_MAP_BGR24);
+-      ipu_dc_map_config(priv, IPU_DC_MAP_BGR24, 2, 7, 0xff); /* red */
+-      ipu_dc_map_config(priv, IPU_DC_MAP_BGR24, 1, 15, 0xff); /* green */
+-      ipu_dc_map_config(priv, IPU_DC_MAP_BGR24, 0, 23, 0xff); /* blue */
+-
+-      return 0;
+-}
+-
+-void ipu_dc_exit(struct ipu_soc *ipu)
+-{
+-}
+--- a/drivers/gpu/ipu-v3/ipu-di.c
++++ /dev/null
+@@ -1,745 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
+- * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
+- */
+-#include <linux/export.h>
+-#include <linux/module.h>
+-#include <linux/types.h>
+-#include <linux/errno.h>
+-#include <linux/io.h>
+-#include <linux/err.h>
+-#include <linux/platform_device.h>
+-
+-#include <video/imx-ipu-v3.h>
+-#include "ipu-prv.h"
+-
+-struct ipu_di {
+-      void __iomem *base;
+-      int id;
+-      u32 module;
+-      struct clk *clk_di;     /* display input clock */
+-      struct clk *clk_ipu;    /* IPU bus clock */
+-      struct clk *clk_di_pixel; /* resulting pixel clock */
+-      bool inuse;
+-      struct ipu_soc *ipu;
+-};
+-
+-static DEFINE_MUTEX(di_mutex);
+-
+-struct di_sync_config {
+-      int run_count;
+-      int run_src;
+-      int offset_count;
+-      int offset_src;
+-      int repeat_count;
+-      int cnt_clr_src;
+-      int cnt_polarity_gen_en;
+-      int cnt_polarity_clr_src;
+-      int cnt_polarity_trigger_src;
+-      int cnt_up;
+-      int cnt_down;
+-};
+-
+-enum di_pins {
+-      DI_PIN11 = 0,
+-      DI_PIN12 = 1,
+-      DI_PIN13 = 2,
+-      DI_PIN14 = 3,
+-      DI_PIN15 = 4,
+-      DI_PIN16 = 5,
+-      DI_PIN17 = 6,
+-      DI_PIN_CS = 7,
+-
+-      DI_PIN_SER_CLK = 0,
+-      DI_PIN_SER_RS = 1,
+-};
+-
+-enum di_sync_wave {
+-      DI_SYNC_NONE = 0,
+-      DI_SYNC_CLK = 1,
+-      DI_SYNC_INT_HSYNC = 2,
+-      DI_SYNC_HSYNC = 3,
+-      DI_SYNC_VSYNC = 4,
+-      DI_SYNC_DE = 6,
+-
+-      DI_SYNC_CNT1 = 2,       /* counter >= 2 only */
+-      DI_SYNC_CNT4 = 5,       /* counter >= 5 only */
+-      DI_SYNC_CNT5 = 6,       /* counter >= 6 only */
+-};
+-
+-#define SYNC_WAVE 0
+-
+-#define DI_GENERAL            0x0000
+-#define DI_BS_CLKGEN0         0x0004
+-#define DI_BS_CLKGEN1         0x0008
+-#define DI_SW_GEN0(gen)               (0x000c + 4 * ((gen) - 1))
+-#define DI_SW_GEN1(gen)               (0x0030 + 4 * ((gen) - 1))
+-#define DI_STP_REP(gen)               (0x0148 + 4 * (((gen) - 1)/2))
+-#define DI_SYNC_AS_GEN                0x0054
+-#define DI_DW_GEN(gen)                (0x0058 + 4 * (gen))
+-#define DI_DW_SET(gen, set)   (0x0088 + 4 * ((gen) + 0xc * (set)))
+-#define DI_SER_CONF           0x015c
+-#define DI_SSC                        0x0160
+-#define DI_POL                        0x0164
+-#define DI_AW0                        0x0168
+-#define DI_AW1                        0x016c
+-#define DI_SCR_CONF           0x0170
+-#define DI_STAT                       0x0174
+-
+-#define DI_SW_GEN0_RUN_COUNT(x)                       ((x) << 19)
+-#define DI_SW_GEN0_RUN_SRC(x)                 ((x) << 16)
+-#define DI_SW_GEN0_OFFSET_COUNT(x)            ((x) << 3)
+-#define DI_SW_GEN0_OFFSET_SRC(x)              ((x) << 0)
+-
+-#define DI_SW_GEN1_CNT_POL_GEN_EN(x)          ((x) << 29)
+-#define DI_SW_GEN1_CNT_CLR_SRC(x)             ((x) << 25)
+-#define DI_SW_GEN1_CNT_POL_TRIGGER_SRC(x)     ((x) << 12)
+-#define DI_SW_GEN1_CNT_POL_CLR_SRC(x)         ((x) << 9)
+-#define DI_SW_GEN1_CNT_DOWN(x)                        ((x) << 16)
+-#define DI_SW_GEN1_CNT_UP(x)                  (x)
+-#define DI_SW_GEN1_AUTO_RELOAD                        (0x10000000)
+-
+-#define DI_DW_GEN_ACCESS_SIZE_OFFSET          24
+-#define DI_DW_GEN_COMPONENT_SIZE_OFFSET               16
+-
+-#define DI_GEN_POLARITY_1                     (1 << 0)
+-#define DI_GEN_POLARITY_2                     (1 << 1)
+-#define DI_GEN_POLARITY_3                     (1 << 2)
+-#define DI_GEN_POLARITY_4                     (1 << 3)
+-#define DI_GEN_POLARITY_5                     (1 << 4)
+-#define DI_GEN_POLARITY_6                     (1 << 5)
+-#define DI_GEN_POLARITY_7                     (1 << 6)
+-#define DI_GEN_POLARITY_8                     (1 << 7)
+-#define DI_GEN_POLARITY_DISP_CLK              (1 << 17)
+-#define DI_GEN_DI_CLK_EXT                     (1 << 20)
+-#define DI_GEN_DI_VSYNC_EXT                   (1 << 21)
+-
+-#define DI_POL_DRDY_DATA_POLARITY             (1 << 7)
+-#define DI_POL_DRDY_POLARITY_15                       (1 << 4)
+-
+-#define DI_VSYNC_SEL_OFFSET                   13
+-
+-static inline u32 ipu_di_read(struct ipu_di *di, unsigned offset)
+-{
+-      return readl(di->base + offset);
+-}
+-
+-static inline void ipu_di_write(struct ipu_di *di, u32 value, unsigned offset)
+-{
+-      writel(value, di->base + offset);
+-}
+-
+-static void ipu_di_data_wave_config(struct ipu_di *di,
+-                                   int wave_gen,
+-                                   int access_size, int component_size)
+-{
+-      u32 reg;
+-      reg = (access_size << DI_DW_GEN_ACCESS_SIZE_OFFSET) |
+-          (component_size << DI_DW_GEN_COMPONENT_SIZE_OFFSET);
+-      ipu_di_write(di, reg, DI_DW_GEN(wave_gen));
+-}
+-
+-static void ipu_di_data_pin_config(struct ipu_di *di, int wave_gen, int di_pin,
+-              int set, int up, int down)
+-{
+-      u32 reg;
+-
+-      reg = ipu_di_read(di, DI_DW_GEN(wave_gen));
+-      reg &= ~(0x3 << (di_pin * 2));
+-      reg |= set << (di_pin * 2);
+-      ipu_di_write(di, reg, DI_DW_GEN(wave_gen));
+-
+-      ipu_di_write(di, (down << 16) | up, DI_DW_SET(wave_gen, set));
+-}
+-
+-static void ipu_di_sync_config(struct ipu_di *di, struct di_sync_config *config,
+-              int start, int count)
+-{
+-      u32 reg;
+-      int i;
+-
+-      for (i = 0; i < count; i++) {
+-              struct di_sync_config *c = &config[i];
+-              int wave_gen = start + i + 1;
+-
+-              if ((c->run_count >= 0x1000) || (c->offset_count >= 0x1000) ||
+-                              (c->repeat_count >= 0x1000) ||
+-                              (c->cnt_up >= 0x400) ||
+-                              (c->cnt_down >= 0x400)) {
+-                      dev_err(di->ipu->dev, "DI%d counters out of range.\n",
+-                                      di->id);
+-                      return;
+-              }
+-
+-              reg = DI_SW_GEN0_RUN_COUNT(c->run_count) |
+-                      DI_SW_GEN0_RUN_SRC(c->run_src) |
+-                      DI_SW_GEN0_OFFSET_COUNT(c->offset_count) |
+-                      DI_SW_GEN0_OFFSET_SRC(c->offset_src);
+-              ipu_di_write(di, reg, DI_SW_GEN0(wave_gen));
+-
+-              reg = DI_SW_GEN1_CNT_POL_GEN_EN(c->cnt_polarity_gen_en) |
+-                      DI_SW_GEN1_CNT_CLR_SRC(c->cnt_clr_src) |
+-                      DI_SW_GEN1_CNT_POL_TRIGGER_SRC(
+-                                      c->cnt_polarity_trigger_src) |
+-                      DI_SW_GEN1_CNT_POL_CLR_SRC(c->cnt_polarity_clr_src) |
+-                      DI_SW_GEN1_CNT_DOWN(c->cnt_down) |
+-                      DI_SW_GEN1_CNT_UP(c->cnt_up);
+-
+-              /* Enable auto reload */
+-              if (c->repeat_count == 0)
+-                      reg |= DI_SW_GEN1_AUTO_RELOAD;
+-
+-              ipu_di_write(di, reg, DI_SW_GEN1(wave_gen));
+-
+-              reg = ipu_di_read(di, DI_STP_REP(wave_gen));
+-              reg &= ~(0xffff << (16 * ((wave_gen - 1) & 0x1)));
+-              reg |= c->repeat_count << (16 * ((wave_gen - 1) & 0x1));
+-              ipu_di_write(di, reg, DI_STP_REP(wave_gen));
+-      }
+-}
+-
+-static void ipu_di_sync_config_interlaced(struct ipu_di *di,
+-              struct ipu_di_signal_cfg *sig)
+-{
+-      u32 h_total = sig->mode.hactive + sig->mode.hsync_len +
+-              sig->mode.hback_porch + sig->mode.hfront_porch;
+-      u32 v_total = sig->mode.vactive + sig->mode.vsync_len +
+-              sig->mode.vback_porch + sig->mode.vfront_porch;
+-      struct di_sync_config cfg[] = {
+-              {
+-                      /* 1: internal VSYNC for each frame */
+-                      .run_count = v_total * 2 - 1,
+-                      .run_src = 3,                   /* == counter 7 */
+-              }, {
+-                      /* PIN2: HSYNC waveform */
+-                      .run_count = h_total - 1,
+-                      .run_src = DI_SYNC_CLK,
+-                      .cnt_polarity_gen_en = 1,
+-                      .cnt_polarity_trigger_src = DI_SYNC_CLK,
+-                      .cnt_down = sig->mode.hsync_len * 2,
+-              }, {
+-                      /* PIN3: VSYNC waveform */
+-                      .run_count = v_total - 1,
+-                      .run_src = 4,                   /* == counter 7 */
+-                      .cnt_polarity_gen_en = 1,
+-                      .cnt_polarity_trigger_src = 4,  /* == counter 7 */
+-                      .cnt_down = sig->mode.vsync_len * 2,
+-                      .cnt_clr_src = DI_SYNC_CNT1,
+-              }, {
+-                      /* 4: Field */
+-                      .run_count = v_total / 2,
+-                      .run_src = DI_SYNC_HSYNC,
+-                      .offset_count = h_total / 2,
+-                      .offset_src = DI_SYNC_CLK,
+-                      .repeat_count = 2,
+-                      .cnt_clr_src = DI_SYNC_CNT1,
+-              }, {
+-                      /* 5: Active lines */
+-                      .run_src = DI_SYNC_HSYNC,
+-                      .offset_count = (sig->mode.vsync_len +
+-                                       sig->mode.vback_porch) / 2,
+-                      .offset_src = DI_SYNC_HSYNC,
+-                      .repeat_count = sig->mode.vactive / 2,
+-                      .cnt_clr_src = DI_SYNC_CNT4,
+-              }, {
+-                      /* 6: Active pixel, referenced by DC */
+-                      .run_src = DI_SYNC_CLK,
+-                      .offset_count = sig->mode.hsync_len +
+-                                      sig->mode.hback_porch,
+-                      .offset_src = DI_SYNC_CLK,
+-                      .repeat_count = sig->mode.hactive,
+-                      .cnt_clr_src = DI_SYNC_CNT5,
+-              }, {
+-                      /* 7: Half line HSYNC */
+-                      .run_count = h_total / 2 - 1,
+-                      .run_src = DI_SYNC_CLK,
+-              }
+-      };
+-
+-      ipu_di_sync_config(di, cfg, 0, ARRAY_SIZE(cfg));
+-
+-      ipu_di_write(di, v_total / 2 - 1, DI_SCR_CONF);
+-}
+-
+-static void ipu_di_sync_config_noninterlaced(struct ipu_di *di,
+-              struct ipu_di_signal_cfg *sig, int div)
+-{
+-      u32 h_total = sig->mode.hactive + sig->mode.hsync_len +
+-              sig->mode.hback_porch + sig->mode.hfront_porch;
+-      u32 v_total = sig->mode.vactive + sig->mode.vsync_len +
+-              sig->mode.vback_porch + sig->mode.vfront_porch;
+-      struct di_sync_config cfg[] = {
+-              {
+-                      /* 1: INT_HSYNC */
+-                      .run_count = h_total - 1,
+-                      .run_src = DI_SYNC_CLK,
+-              } , {
+-                      /* PIN2: HSYNC */
+-                      .run_count = h_total - 1,
+-                      .run_src = DI_SYNC_CLK,
+-                      .offset_count = div * sig->v_to_h_sync,
+-                      .offset_src = DI_SYNC_CLK,
+-                      .cnt_polarity_gen_en = 1,
+-                      .cnt_polarity_trigger_src = DI_SYNC_CLK,
+-                      .cnt_down = sig->mode.hsync_len * 2,
+-              } , {
+-                      /* PIN3: VSYNC */
+-                      .run_count = v_total - 1,
+-                      .run_src = DI_SYNC_INT_HSYNC,
+-                      .cnt_polarity_gen_en = 1,
+-                      .cnt_polarity_trigger_src = DI_SYNC_INT_HSYNC,
+-                      .cnt_down = sig->mode.vsync_len * 2,
+-              } , {
+-                      /* 4: Line Active */
+-                      .run_src = DI_SYNC_HSYNC,
+-                      .offset_count = sig->mode.vsync_len +
+-                                      sig->mode.vback_porch,
+-                      .offset_src = DI_SYNC_HSYNC,
+-                      .repeat_count = sig->mode.vactive,
+-                      .cnt_clr_src = DI_SYNC_VSYNC,
+-              } , {
+-                      /* 5: Pixel Active, referenced by DC */
+-                      .run_src = DI_SYNC_CLK,
+-                      .offset_count = sig->mode.hsync_len +
+-                                      sig->mode.hback_porch,
+-                      .offset_src = DI_SYNC_CLK,
+-                      .repeat_count = sig->mode.hactive,
+-                      .cnt_clr_src = 5, /* Line Active */
+-              } , {
+-                      /* unused */
+-              } , {
+-                      /* unused */
+-              } , {
+-                      /* unused */
+-              } , {
+-                      /* unused */
+-              },
+-      };
+-      /* can't use #7 and #8 for line active and pixel active counters */
+-      struct di_sync_config cfg_vga[] = {
+-              {
+-                      /* 1: INT_HSYNC */
+-                      .run_count = h_total - 1,
+-                      .run_src = DI_SYNC_CLK,
+-              } , {
+-                      /* 2: VSYNC */
+-                      .run_count = v_total - 1,
+-                      .run_src = DI_SYNC_INT_HSYNC,
+-              } , {
+-                      /* 3: Line Active */
+-                      .run_src = DI_SYNC_INT_HSYNC,
+-                      .offset_count = sig->mode.vsync_len +
+-                                      sig->mode.vback_porch,
+-                      .offset_src = DI_SYNC_INT_HSYNC,
+-                      .repeat_count = sig->mode.vactive,
+-                      .cnt_clr_src = 3 /* VSYNC */,
+-              } , {
+-                      /* PIN4: HSYNC for VGA via TVEv2 on TQ MBa53 */
+-                      .run_count = h_total - 1,
+-                      .run_src = DI_SYNC_CLK,
+-                      .offset_count = div * sig->v_to_h_sync + 18, /* magic value from Freescale TVE driver */
+-                      .offset_src = DI_SYNC_CLK,
+-                      .cnt_polarity_gen_en = 1,
+-                      .cnt_polarity_trigger_src = DI_SYNC_CLK,
+-                      .cnt_down = sig->mode.hsync_len * 2,
+-              } , {
+-                      /* 5: Pixel Active signal to DC */
+-                      .run_src = DI_SYNC_CLK,
+-                      .offset_count = sig->mode.hsync_len +
+-                                      sig->mode.hback_porch,
+-                      .offset_src = DI_SYNC_CLK,
+-                      .repeat_count = sig->mode.hactive,
+-                      .cnt_clr_src = 4, /* Line Active */
+-              } , {
+-                      /* PIN6: VSYNC for VGA via TVEv2 on TQ MBa53 */
+-                      .run_count = v_total - 1,
+-                      .run_src = DI_SYNC_INT_HSYNC,
+-                      .offset_count = 1, /* magic value from Freescale TVE driver */
+-                      .offset_src = DI_SYNC_INT_HSYNC,
+-                      .cnt_polarity_gen_en = 1,
+-                      .cnt_polarity_trigger_src = DI_SYNC_INT_HSYNC,
+-                      .cnt_down = sig->mode.vsync_len * 2,
+-              } , {
+-                      /* PIN4: HSYNC for VGA via TVEv2 on i.MX53-QSB */
+-                      .run_count = h_total - 1,
+-                      .run_src = DI_SYNC_CLK,
+-                      .offset_count = div * sig->v_to_h_sync + 18, /* magic value from Freescale TVE driver */
+-                      .offset_src = DI_SYNC_CLK,
+-                      .cnt_polarity_gen_en = 1,
+-                      .cnt_polarity_trigger_src = DI_SYNC_CLK,
+-                      .cnt_down = sig->mode.hsync_len * 2,
+-              } , {
+-                      /* PIN6: VSYNC for VGA via TVEv2 on i.MX53-QSB */
+-                      .run_count = v_total - 1,
+-                      .run_src = DI_SYNC_INT_HSYNC,
+-                      .offset_count = 1, /* magic value from Freescale TVE driver */
+-                      .offset_src = DI_SYNC_INT_HSYNC,
+-                      .cnt_polarity_gen_en = 1,
+-                      .cnt_polarity_trigger_src = DI_SYNC_INT_HSYNC,
+-                      .cnt_down = sig->mode.vsync_len * 2,
+-              } , {
+-                      /* unused */
+-              },
+-      };
+-
+-      ipu_di_write(di, v_total - 1, DI_SCR_CONF);
+-      if (sig->hsync_pin == 2 && sig->vsync_pin == 3)
+-              ipu_di_sync_config(di, cfg, 0, ARRAY_SIZE(cfg));
+-      else
+-              ipu_di_sync_config(di, cfg_vga, 0, ARRAY_SIZE(cfg_vga));
+-}
+-
+-static void ipu_di_config_clock(struct ipu_di *di,
+-      const struct ipu_di_signal_cfg *sig)
+-{
+-      struct clk *clk;
+-      unsigned clkgen0;
+-      uint32_t val;
+-
+-      if (sig->clkflags & IPU_DI_CLKMODE_EXT) {
+-              /*
+-               * CLKMODE_EXT means we must use the DI clock: this is
+-               * needed for things like LVDS which needs to feed the
+-               * DI and LDB with the same pixel clock.
+-               */
+-              clk = di->clk_di;
+-
+-              if (sig->clkflags & IPU_DI_CLKMODE_SYNC) {
+-                      /*
+-                       * CLKMODE_SYNC means that we want the DI to be
+-                       * clocked at the same rate as the parent clock.
+-                       * This is needed (eg) for LDB which needs to be
+-                       * fed with the same pixel clock.  We assume that
+-                       * the LDB clock has already been set correctly.
+-                       */
+-                      clkgen0 = 1 << 4;
+-              } else {
+-                      /*
+-                       * We can use the divider.  We should really have
+-                       * a flag here indicating whether the bridge can
+-                       * cope with a fractional divider or not.  For the
+-                       * time being, let's go for simplicitly and
+-                       * reliability.
+-                       */
+-                      unsigned long in_rate;
+-                      unsigned div;
+-
+-                      clk_set_rate(clk, sig->mode.pixelclock);
+-
+-                      in_rate = clk_get_rate(clk);
+-                      div = DIV_ROUND_CLOSEST(in_rate, sig->mode.pixelclock);
+-                      div = clamp(div, 1U, 255U);
+-
+-                      clkgen0 = div << 4;
+-              }
+-      } else {
+-              /*
+-               * For other interfaces, we can arbitarily select between
+-               * the DI specific clock and the internal IPU clock.  See
+-               * DI_GENERAL bit 20.  We select the IPU clock if it can
+-               * give us a clock rate within 1% of the requested frequency,
+-               * otherwise we use the DI clock.
+-               */
+-              unsigned long rate, clkrate;
+-              unsigned div, error;
+-
+-              clkrate = clk_get_rate(di->clk_ipu);
+-              div = DIV_ROUND_CLOSEST(clkrate, sig->mode.pixelclock);
+-              div = clamp(div, 1U, 255U);
+-              rate = clkrate / div;
+-
+-              error = rate / (sig->mode.pixelclock / 1000);
+-
+-              dev_dbg(di->ipu->dev, "  IPU clock can give %lu with divider %u, error %d.%u%%\n",
+-                      rate, div, (signed)(error - 1000) / 10, error % 10);
+-
+-              /* Allow a 1% error */
+-              if (error < 1010 && error >= 990) {
+-                      clk = di->clk_ipu;
+-
+-                      clkgen0 = div << 4;
+-              } else {
+-                      unsigned long in_rate;
+-                      unsigned div;
+-
+-                      clk = di->clk_di;
+-
+-                      clk_set_rate(clk, sig->mode.pixelclock);
+-
+-                      in_rate = clk_get_rate(clk);
+-                      div = DIV_ROUND_CLOSEST(in_rate, sig->mode.pixelclock);
+-                      div = clamp(div, 1U, 255U);
+-
+-                      clkgen0 = div << 4;
+-              }
+-      }
+-
+-      di->clk_di_pixel = clk;
+-
+-      /* Set the divider */
+-      ipu_di_write(di, clkgen0, DI_BS_CLKGEN0);
+-
+-      /*
+-       * Set the high/low periods.  Bits 24:16 give us the falling edge,
+-       * and bits 8:0 give the rising edge.  LSB is fraction, and is
+-       * based on the divider above.  We want a 50% duty cycle, so set
+-       * the falling edge to be half the divider.
+-       */
+-      ipu_di_write(di, (clkgen0 >> 4) << 16, DI_BS_CLKGEN1);
+-
+-      /* Finally select the input clock */
+-      val = ipu_di_read(di, DI_GENERAL) & ~DI_GEN_DI_CLK_EXT;
+-      if (clk == di->clk_di)
+-              val |= DI_GEN_DI_CLK_EXT;
+-      ipu_di_write(di, val, DI_GENERAL);
+-
+-      dev_dbg(di->ipu->dev, "Want %luHz IPU %luHz DI %luHz using %s, %luHz\n",
+-              sig->mode.pixelclock,
+-              clk_get_rate(di->clk_ipu),
+-              clk_get_rate(di->clk_di),
+-              clk == di->clk_di ? "DI" : "IPU",
+-              clk_get_rate(di->clk_di_pixel) / (clkgen0 >> 4));
+-}
+-
+-/*
+- * This function is called to adjust a video mode to IPU restrictions.
+- * It is meant to be called from drm crtc mode_fixup() methods.
+- */
+-int ipu_di_adjust_videomode(struct ipu_di *di, struct videomode *mode)
+-{
+-      u32 diff;
+-
+-      if (mode->vfront_porch >= 2)
+-              return 0;
+-
+-      diff = 2 - mode->vfront_porch;
+-
+-      if (mode->vback_porch >= diff) {
+-              mode->vfront_porch = 2;
+-              mode->vback_porch -= diff;
+-      } else if (mode->vsync_len > diff) {
+-              mode->vfront_porch = 2;
+-              mode->vsync_len = mode->vsync_len - diff;
+-      } else {
+-              dev_warn(di->ipu->dev, "failed to adjust videomode\n");
+-              return -EINVAL;
+-      }
+-
+-      dev_dbg(di->ipu->dev, "videomode adapted for IPU restrictions\n");
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_di_adjust_videomode);
+-
+-static u32 ipu_di_gen_polarity(int pin)
+-{
+-      switch (pin) {
+-      case 1:
+-              return DI_GEN_POLARITY_1;
+-      case 2:
+-              return DI_GEN_POLARITY_2;
+-      case 3:
+-              return DI_GEN_POLARITY_3;
+-      case 4:
+-              return DI_GEN_POLARITY_4;
+-      case 5:
+-              return DI_GEN_POLARITY_5;
+-      case 6:
+-              return DI_GEN_POLARITY_6;
+-      case 7:
+-              return DI_GEN_POLARITY_7;
+-      case 8:
+-              return DI_GEN_POLARITY_8;
+-      }
+-      return 0;
+-}
+-
+-int ipu_di_init_sync_panel(struct ipu_di *di, struct ipu_di_signal_cfg *sig)
+-{
+-      u32 reg;
+-      u32 di_gen, vsync_cnt;
+-      u32 div;
+-
+-      dev_dbg(di->ipu->dev, "disp %d: panel size = %d x %d\n",
+-              di->id, sig->mode.hactive, sig->mode.vactive);
+-
+-      dev_dbg(di->ipu->dev, "Clocks: IPU %luHz DI %luHz Needed %luHz\n",
+-              clk_get_rate(di->clk_ipu),
+-              clk_get_rate(di->clk_di),
+-              sig->mode.pixelclock);
+-
+-      mutex_lock(&di_mutex);
+-
+-      ipu_di_config_clock(di, sig);
+-
+-      div = ipu_di_read(di, DI_BS_CLKGEN0) & 0xfff;
+-      div = div / 16;         /* Now divider is integer portion */
+-
+-      /* Setup pixel clock timing */
+-      /* Down time is half of period */
+-      ipu_di_write(di, (div << 16), DI_BS_CLKGEN1);
+-
+-      ipu_di_data_wave_config(di, SYNC_WAVE, div - 1, div - 1);
+-      ipu_di_data_pin_config(di, SYNC_WAVE, DI_PIN15, 3, 0, div * 2);
+-
+-      di_gen = ipu_di_read(di, DI_GENERAL) & DI_GEN_DI_CLK_EXT;
+-      di_gen |= DI_GEN_DI_VSYNC_EXT;
+-
+-      if (sig->mode.flags & DISPLAY_FLAGS_INTERLACED) {
+-              ipu_di_sync_config_interlaced(di, sig);
+-
+-              /* set y_sel = 1 */
+-              di_gen |= 0x10000000;
+-
+-              vsync_cnt = 3;
+-      } else {
+-              ipu_di_sync_config_noninterlaced(di, sig, div);
+-
+-              vsync_cnt = 3;
+-              if (di->id == 1)
+-                      /*
+-                       * TODO: change only for TVEv2, parallel display
+-                       * uses pin 2 / 3
+-                       */
+-                      if (!(sig->hsync_pin == 2 && sig->vsync_pin == 3))
+-                              vsync_cnt = 6;
+-      }
+-
+-      if (sig->mode.flags & DISPLAY_FLAGS_HSYNC_HIGH)
+-              di_gen |= ipu_di_gen_polarity(sig->hsync_pin);
+-      if (sig->mode.flags & DISPLAY_FLAGS_VSYNC_HIGH)
+-              di_gen |= ipu_di_gen_polarity(sig->vsync_pin);
+-
+-      if (sig->clk_pol)
+-              di_gen |= DI_GEN_POLARITY_DISP_CLK;
+-
+-      ipu_di_write(di, di_gen, DI_GENERAL);
+-
+-      ipu_di_write(di, (--vsync_cnt << DI_VSYNC_SEL_OFFSET) | 0x00000002,
+-                   DI_SYNC_AS_GEN);
+-
+-      reg = ipu_di_read(di, DI_POL);
+-      reg &= ~(DI_POL_DRDY_DATA_POLARITY | DI_POL_DRDY_POLARITY_15);
+-
+-      if (sig->enable_pol)
+-              reg |= DI_POL_DRDY_POLARITY_15;
+-      if (sig->data_pol)
+-              reg |= DI_POL_DRDY_DATA_POLARITY;
+-
+-      ipu_di_write(di, reg, DI_POL);
+-
+-      mutex_unlock(&di_mutex);
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_di_init_sync_panel);
+-
+-int ipu_di_enable(struct ipu_di *di)
+-{
+-      int ret;
+-
+-      WARN_ON(IS_ERR(di->clk_di_pixel));
+-
+-      ret = clk_prepare_enable(di->clk_di_pixel);
+-      if (ret)
+-              return ret;
+-
+-      ipu_module_enable(di->ipu, di->module);
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_di_enable);
+-
+-int ipu_di_disable(struct ipu_di *di)
+-{
+-      WARN_ON(IS_ERR(di->clk_di_pixel));
+-
+-      ipu_module_disable(di->ipu, di->module);
+-
+-      clk_disable_unprepare(di->clk_di_pixel);
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_di_disable);
+-
+-int ipu_di_get_num(struct ipu_di *di)
+-{
+-      return di->id;
+-}
+-EXPORT_SYMBOL_GPL(ipu_di_get_num);
+-
+-static DEFINE_MUTEX(ipu_di_lock);
+-
+-struct ipu_di *ipu_di_get(struct ipu_soc *ipu, int disp)
+-{
+-      struct ipu_di *di;
+-
+-      if (disp > 1)
+-              return ERR_PTR(-EINVAL);
+-
+-      di = ipu->di_priv[disp];
+-
+-      mutex_lock(&ipu_di_lock);
+-
+-      if (di->inuse) {
+-              di = ERR_PTR(-EBUSY);
+-              goto out;
+-      }
+-
+-      di->inuse = true;
+-out:
+-      mutex_unlock(&ipu_di_lock);
+-
+-      return di;
+-}
+-EXPORT_SYMBOL_GPL(ipu_di_get);
+-
+-void ipu_di_put(struct ipu_di *di)
+-{
+-      mutex_lock(&ipu_di_lock);
+-
+-      di->inuse = false;
+-
+-      mutex_unlock(&ipu_di_lock);
+-}
+-EXPORT_SYMBOL_GPL(ipu_di_put);
+-
+-int ipu_di_init(struct ipu_soc *ipu, struct device *dev, int id,
+-              unsigned long base,
+-              u32 module, struct clk *clk_ipu)
+-{
+-      struct ipu_di *di;
+-
+-      if (id > 1)
+-              return -ENODEV;
+-
+-      di = devm_kzalloc(dev, sizeof(*di), GFP_KERNEL);
+-      if (!di)
+-              return -ENOMEM;
+-
+-      ipu->di_priv[id] = di;
+-
+-      di->clk_di = devm_clk_get(dev, id ? "di1" : "di0");
+-      if (IS_ERR(di->clk_di))
+-              return PTR_ERR(di->clk_di);
+-
+-      di->module = module;
+-      di->id = id;
+-      di->clk_ipu = clk_ipu;
+-      di->base = devm_ioremap(dev, base, PAGE_SIZE);
+-      if (!di->base)
+-              return -ENOMEM;
+-
+-      ipu_di_write(di, 0x10, DI_BS_CLKGEN0);
+-
+-      dev_dbg(dev, "DI%d base: 0x%08lx remapped to %p\n",
+-                      id, base, di->base);
+-      di->inuse = false;
+-      di->ipu = ipu;
+-
+-      return 0;
+-}
+-
+-void ipu_di_exit(struct ipu_soc *ipu, int id)
+-{
+-}
+--- a/drivers/gpu/ipu-v3/ipu-dmfc.c
++++ /dev/null
+@@ -1,214 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
+- * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
+- */
+-#include <linux/export.h>
+-#include <linux/types.h>
+-#include <linux/errno.h>
+-#include <linux/io.h>
+-
+-#include <video/imx-ipu-v3.h>
+-#include "ipu-prv.h"
+-
+-#define DMFC_RD_CHAN          0x0000
+-#define DMFC_WR_CHAN          0x0004
+-#define DMFC_WR_CHAN_DEF      0x0008
+-#define DMFC_DP_CHAN          0x000c
+-#define DMFC_DP_CHAN_DEF      0x0010
+-#define DMFC_GENERAL1         0x0014
+-#define DMFC_GENERAL2         0x0018
+-#define DMFC_IC_CTRL          0x001c
+-#define DMFC_WR_CHAN_ALT      0x0020
+-#define DMFC_WR_CHAN_DEF_ALT  0x0024
+-#define DMFC_DP_CHAN_ALT      0x0028
+-#define DMFC_DP_CHAN_DEF_ALT  0x002c
+-#define DMFC_GENERAL1_ALT     0x0030
+-#define DMFC_STAT             0x0034
+-
+-#define DMFC_WR_CHAN_1_28             0
+-#define DMFC_WR_CHAN_2_41             8
+-#define DMFC_WR_CHAN_1C_42            16
+-#define DMFC_WR_CHAN_2C_43            24
+-
+-#define DMFC_DP_CHAN_5B_23            0
+-#define DMFC_DP_CHAN_5F_27            8
+-#define DMFC_DP_CHAN_6B_24            16
+-#define DMFC_DP_CHAN_6F_29            24
+-
+-struct dmfc_channel_data {
+-      int             ipu_channel;
+-      unsigned long   channel_reg;
+-      unsigned long   shift;
+-      unsigned        eot_shift;
+-      unsigned        max_fifo_lines;
+-};
+-
+-static const struct dmfc_channel_data dmfcdata[] = {
+-      {
+-              .ipu_channel    = IPUV3_CHANNEL_MEM_BG_SYNC,
+-              .channel_reg    = DMFC_DP_CHAN,
+-              .shift          = DMFC_DP_CHAN_5B_23,
+-              .eot_shift      = 20,
+-              .max_fifo_lines = 3,
+-      }, {
+-              .ipu_channel    = 24,
+-              .channel_reg    = DMFC_DP_CHAN,
+-              .shift          = DMFC_DP_CHAN_6B_24,
+-              .eot_shift      = 22,
+-              .max_fifo_lines = 1,
+-      }, {
+-              .ipu_channel    = IPUV3_CHANNEL_MEM_FG_SYNC,
+-              .channel_reg    = DMFC_DP_CHAN,
+-              .shift          = DMFC_DP_CHAN_5F_27,
+-              .eot_shift      = 21,
+-              .max_fifo_lines = 2,
+-      }, {
+-              .ipu_channel    = IPUV3_CHANNEL_MEM_DC_SYNC,
+-              .channel_reg    = DMFC_WR_CHAN,
+-              .shift          = DMFC_WR_CHAN_1_28,
+-              .eot_shift      = 16,
+-              .max_fifo_lines = 2,
+-      }, {
+-              .ipu_channel    = 29,
+-              .channel_reg    = DMFC_DP_CHAN,
+-              .shift          = DMFC_DP_CHAN_6F_29,
+-              .eot_shift      = 23,
+-              .max_fifo_lines = 1,
+-      },
+-};
+-
+-#define DMFC_NUM_CHANNELS     ARRAY_SIZE(dmfcdata)
+-
+-struct ipu_dmfc_priv;
+-
+-struct dmfc_channel {
+-      unsigned                        slots;
+-      struct ipu_soc                  *ipu;
+-      struct ipu_dmfc_priv            *priv;
+-      const struct dmfc_channel_data  *data;
+-};
+-
+-struct ipu_dmfc_priv {
+-      struct ipu_soc *ipu;
+-      struct device *dev;
+-      struct dmfc_channel channels[DMFC_NUM_CHANNELS];
+-      struct mutex mutex;
+-      void __iomem *base;
+-      int use_count;
+-};
+-
+-int ipu_dmfc_enable_channel(struct dmfc_channel *dmfc)
+-{
+-      struct ipu_dmfc_priv *priv = dmfc->priv;
+-      mutex_lock(&priv->mutex);
+-
+-      if (!priv->use_count)
+-              ipu_module_enable(priv->ipu, IPU_CONF_DMFC_EN);
+-
+-      priv->use_count++;
+-
+-      mutex_unlock(&priv->mutex);
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_dmfc_enable_channel);
+-
+-void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc)
+-{
+-      struct ipu_dmfc_priv *priv = dmfc->priv;
+-
+-      mutex_lock(&priv->mutex);
+-
+-      priv->use_count--;
+-
+-      if (!priv->use_count)
+-              ipu_module_disable(priv->ipu, IPU_CONF_DMFC_EN);
+-
+-      if (priv->use_count < 0)
+-              priv->use_count = 0;
+-
+-      mutex_unlock(&priv->mutex);
+-}
+-EXPORT_SYMBOL_GPL(ipu_dmfc_disable_channel);
+-
+-void ipu_dmfc_config_wait4eot(struct dmfc_channel *dmfc, int width)
+-{
+-      struct ipu_dmfc_priv *priv = dmfc->priv;
+-      u32 dmfc_gen1;
+-
+-      mutex_lock(&priv->mutex);
+-
+-      dmfc_gen1 = readl(priv->base + DMFC_GENERAL1);
+-
+-      if ((dmfc->slots * 64 * 4) / width > dmfc->data->max_fifo_lines)
+-              dmfc_gen1 |= 1 << dmfc->data->eot_shift;
+-      else
+-              dmfc_gen1 &= ~(1 << dmfc->data->eot_shift);
+-
+-      writel(dmfc_gen1, priv->base + DMFC_GENERAL1);
+-
+-      mutex_unlock(&priv->mutex);
+-}
+-EXPORT_SYMBOL_GPL(ipu_dmfc_config_wait4eot);
+-
+-struct dmfc_channel *ipu_dmfc_get(struct ipu_soc *ipu, int ipu_channel)
+-{
+-      struct ipu_dmfc_priv *priv = ipu->dmfc_priv;
+-      int i;
+-
+-      for (i = 0; i < DMFC_NUM_CHANNELS; i++)
+-              if (dmfcdata[i].ipu_channel == ipu_channel)
+-                      return &priv->channels[i];
+-      return ERR_PTR(-ENODEV);
+-}
+-EXPORT_SYMBOL_GPL(ipu_dmfc_get);
+-
+-void ipu_dmfc_put(struct dmfc_channel *dmfc)
+-{
+-}
+-EXPORT_SYMBOL_GPL(ipu_dmfc_put);
+-
+-int ipu_dmfc_init(struct ipu_soc *ipu, struct device *dev, unsigned long base,
+-              struct clk *ipu_clk)
+-{
+-      struct ipu_dmfc_priv *priv;
+-      int i;
+-
+-      priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+-      if (!priv)
+-              return -ENOMEM;
+-
+-      priv->base = devm_ioremap(dev, base, PAGE_SIZE);
+-      if (!priv->base)
+-              return -ENOMEM;
+-
+-      priv->dev = dev;
+-      priv->ipu = ipu;
+-      mutex_init(&priv->mutex);
+-
+-      ipu->dmfc_priv = priv;
+-
+-      for (i = 0; i < DMFC_NUM_CHANNELS; i++) {
+-              priv->channels[i].priv = priv;
+-              priv->channels[i].ipu = ipu;
+-              priv->channels[i].data = &dmfcdata[i];
+-
+-              if (dmfcdata[i].ipu_channel == IPUV3_CHANNEL_MEM_BG_SYNC ||
+-                  dmfcdata[i].ipu_channel == IPUV3_CHANNEL_MEM_FG_SYNC ||
+-                  dmfcdata[i].ipu_channel == IPUV3_CHANNEL_MEM_DC_SYNC)
+-                      priv->channels[i].slots = 2;
+-      }
+-
+-      writel(0x00000050, priv->base + DMFC_WR_CHAN);
+-      writel(0x00005654, priv->base + DMFC_DP_CHAN);
+-      writel(0x202020f6, priv->base + DMFC_WR_CHAN_DEF);
+-      writel(0x2020f6f6, priv->base + DMFC_DP_CHAN_DEF);
+-      writel(0x00000003, priv->base + DMFC_GENERAL1);
+-
+-      return 0;
+-}
+-
+-void ipu_dmfc_exit(struct ipu_soc *ipu)
+-{
+-}
+--- a/drivers/gpu/ipu-v3/ipu-dp.c
++++ /dev/null
+@@ -1,357 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
+- * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
+- */
+-#include <linux/export.h>
+-#include <linux/kernel.h>
+-#include <linux/types.h>
+-#include <linux/errno.h>
+-#include <linux/io.h>
+-#include <linux/err.h>
+-
+-#include <video/imx-ipu-v3.h>
+-#include "ipu-prv.h"
+-
+-#define DP_SYNC 0
+-#define DP_ASYNC0 0x60
+-#define DP_ASYNC1 0xBC
+-
+-#define DP_COM_CONF           0x0
+-#define DP_GRAPH_WIND_CTRL    0x0004
+-#define DP_FG_POS             0x0008
+-#define DP_CSC_A_0            0x0044
+-#define DP_CSC_A_1            0x0048
+-#define DP_CSC_A_2            0x004C
+-#define DP_CSC_A_3            0x0050
+-#define DP_CSC_0              0x0054
+-#define DP_CSC_1              0x0058
+-
+-#define DP_COM_CONF_FG_EN             (1 << 0)
+-#define DP_COM_CONF_GWSEL             (1 << 1)
+-#define DP_COM_CONF_GWAM              (1 << 2)
+-#define DP_COM_CONF_GWCKE             (1 << 3)
+-#define DP_COM_CONF_CSC_DEF_MASK      (3 << 8)
+-#define DP_COM_CONF_CSC_DEF_OFFSET    8
+-#define DP_COM_CONF_CSC_DEF_FG                (3 << 8)
+-#define DP_COM_CONF_CSC_DEF_BG                (2 << 8)
+-#define DP_COM_CONF_CSC_DEF_BOTH      (1 << 8)
+-
+-#define IPUV3_NUM_FLOWS               3
+-
+-struct ipu_dp_priv;
+-
+-struct ipu_dp {
+-      u32 flow;
+-      bool in_use;
+-      bool foreground;
+-      enum ipu_color_space in_cs;
+-};
+-
+-struct ipu_flow {
+-      struct ipu_dp foreground;
+-      struct ipu_dp background;
+-      enum ipu_color_space out_cs;
+-      void __iomem *base;
+-      struct ipu_dp_priv *priv;
+-};
+-
+-struct ipu_dp_priv {
+-      struct ipu_soc *ipu;
+-      struct device *dev;
+-      void __iomem *base;
+-      struct ipu_flow flow[IPUV3_NUM_FLOWS];
+-      struct mutex mutex;
+-      int use_count;
+-};
+-
+-static u32 ipu_dp_flow_base[] = {DP_SYNC, DP_ASYNC0, DP_ASYNC1};
+-
+-static inline struct ipu_flow *to_flow(struct ipu_dp *dp)
+-{
+-      if (dp->foreground)
+-              return container_of(dp, struct ipu_flow, foreground);
+-      else
+-              return container_of(dp, struct ipu_flow, background);
+-}
+-
+-int ipu_dp_set_global_alpha(struct ipu_dp *dp, bool enable,
+-              u8 alpha, bool bg_chan)
+-{
+-      struct ipu_flow *flow = to_flow(dp);
+-      struct ipu_dp_priv *priv = flow->priv;
+-      u32 reg;
+-
+-      mutex_lock(&priv->mutex);
+-
+-      reg = readl(flow->base + DP_COM_CONF);
+-      if (bg_chan)
+-              reg &= ~DP_COM_CONF_GWSEL;
+-      else
+-              reg |= DP_COM_CONF_GWSEL;
+-      writel(reg, flow->base + DP_COM_CONF);
+-
+-      if (enable) {
+-              reg = readl(flow->base + DP_GRAPH_WIND_CTRL) & 0x00FFFFFFL;
+-              writel(reg | ((u32) alpha << 24),
+-                           flow->base + DP_GRAPH_WIND_CTRL);
+-
+-              reg = readl(flow->base + DP_COM_CONF);
+-              writel(reg | DP_COM_CONF_GWAM, flow->base + DP_COM_CONF);
+-      } else {
+-              reg = readl(flow->base + DP_COM_CONF);
+-              writel(reg & ~DP_COM_CONF_GWAM, flow->base + DP_COM_CONF);
+-      }
+-
+-      ipu_srm_dp_update(priv->ipu, true);
+-
+-      mutex_unlock(&priv->mutex);
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_dp_set_global_alpha);
+-
+-int ipu_dp_set_window_pos(struct ipu_dp *dp, u16 x_pos, u16 y_pos)
+-{
+-      struct ipu_flow *flow = to_flow(dp);
+-      struct ipu_dp_priv *priv = flow->priv;
+-
+-      writel((x_pos << 16) | y_pos, flow->base + DP_FG_POS);
+-
+-      ipu_srm_dp_update(priv->ipu, true);
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_dp_set_window_pos);
+-
+-static void ipu_dp_csc_init(struct ipu_flow *flow,
+-              enum ipu_color_space in,
+-              enum ipu_color_space out,
+-              u32 place)
+-{
+-      u32 reg;
+-
+-      reg = readl(flow->base + DP_COM_CONF);
+-      reg &= ~DP_COM_CONF_CSC_DEF_MASK;
+-
+-      if (in == out) {
+-              writel(reg, flow->base + DP_COM_CONF);
+-              return;
+-      }
+-
+-      if (in == IPUV3_COLORSPACE_RGB && out == IPUV3_COLORSPACE_YUV) {
+-              writel(0x099 | (0x12d << 16), flow->base + DP_CSC_A_0);
+-              writel(0x03a | (0x3a9 << 16), flow->base + DP_CSC_A_1);
+-              writel(0x356 | (0x100 << 16), flow->base + DP_CSC_A_2);
+-              writel(0x100 | (0x329 << 16), flow->base + DP_CSC_A_3);
+-              writel(0x3d6 | (0x0000 << 16) | (2 << 30),
+-                              flow->base + DP_CSC_0);
+-              writel(0x200 | (2 << 14) | (0x200 << 16) | (2 << 30),
+-                              flow->base + DP_CSC_1);
+-      } else {
+-              writel(0x095 | (0x000 << 16), flow->base + DP_CSC_A_0);
+-              writel(0x0cc | (0x095 << 16), flow->base + DP_CSC_A_1);
+-              writel(0x3ce | (0x398 << 16), flow->base + DP_CSC_A_2);
+-              writel(0x095 | (0x0ff << 16), flow->base + DP_CSC_A_3);
+-              writel(0x000 | (0x3e42 << 16) | (1 << 30),
+-                              flow->base + DP_CSC_0);
+-              writel(0x10a | (1 << 14) | (0x3dd6 << 16) | (1 << 30),
+-                              flow->base + DP_CSC_1);
+-      }
+-
+-      reg |= place;
+-
+-      writel(reg, flow->base + DP_COM_CONF);
+-}
+-
+-int ipu_dp_setup_channel(struct ipu_dp *dp,
+-              enum ipu_color_space in,
+-              enum ipu_color_space out)
+-{
+-      struct ipu_flow *flow = to_flow(dp);
+-      struct ipu_dp_priv *priv = flow->priv;
+-
+-      mutex_lock(&priv->mutex);
+-
+-      dp->in_cs = in;
+-
+-      if (!dp->foreground)
+-              flow->out_cs = out;
+-
+-      if (flow->foreground.in_cs == flow->background.in_cs) {
+-              /*
+-               * foreground and background are of same colorspace, put
+-               * colorspace converter after combining unit.
+-               */
+-              ipu_dp_csc_init(flow, flow->foreground.in_cs, flow->out_cs,
+-                              DP_COM_CONF_CSC_DEF_BOTH);
+-      } else {
+-              if (flow->foreground.in_cs == IPUV3_COLORSPACE_UNKNOWN ||
+-                  flow->foreground.in_cs == flow->out_cs)
+-                      /*
+-                       * foreground identical to output, apply color
+-                       * conversion on background
+-                       */
+-                      ipu_dp_csc_init(flow, flow->background.in_cs,
+-                                      flow->out_cs, DP_COM_CONF_CSC_DEF_BG);
+-              else
+-                      ipu_dp_csc_init(flow, flow->foreground.in_cs,
+-                                      flow->out_cs, DP_COM_CONF_CSC_DEF_FG);
+-      }
+-
+-      ipu_srm_dp_update(priv->ipu, true);
+-
+-      mutex_unlock(&priv->mutex);
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_dp_setup_channel);
+-
+-int ipu_dp_enable(struct ipu_soc *ipu)
+-{
+-      struct ipu_dp_priv *priv = ipu->dp_priv;
+-
+-      mutex_lock(&priv->mutex);
+-
+-      if (!priv->use_count)
+-              ipu_module_enable(priv->ipu, IPU_CONF_DP_EN);
+-
+-      priv->use_count++;
+-
+-      mutex_unlock(&priv->mutex);
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_dp_enable);
+-
+-int ipu_dp_enable_channel(struct ipu_dp *dp)
+-{
+-      struct ipu_flow *flow = to_flow(dp);
+-      struct ipu_dp_priv *priv = flow->priv;
+-      u32 reg;
+-
+-      if (!dp->foreground)
+-              return 0;
+-
+-      mutex_lock(&priv->mutex);
+-
+-      reg = readl(flow->base + DP_COM_CONF);
+-      reg |= DP_COM_CONF_FG_EN;
+-      writel(reg, flow->base + DP_COM_CONF);
+-
+-      ipu_srm_dp_update(priv->ipu, true);
+-
+-      mutex_unlock(&priv->mutex);
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_dp_enable_channel);
+-
+-void ipu_dp_disable_channel(struct ipu_dp *dp, bool sync)
+-{
+-      struct ipu_flow *flow = to_flow(dp);
+-      struct ipu_dp_priv *priv = flow->priv;
+-      u32 reg, csc;
+-
+-      dp->in_cs = IPUV3_COLORSPACE_UNKNOWN;
+-
+-      if (!dp->foreground)
+-              return;
+-
+-      mutex_lock(&priv->mutex);
+-
+-      reg = readl(flow->base + DP_COM_CONF);
+-      csc = reg & DP_COM_CONF_CSC_DEF_MASK;
+-      reg &= ~DP_COM_CONF_CSC_DEF_MASK;
+-      if (csc == DP_COM_CONF_CSC_DEF_BOTH || csc == DP_COM_CONF_CSC_DEF_BG)
+-              reg |= DP_COM_CONF_CSC_DEF_BG;
+-
+-      reg &= ~DP_COM_CONF_FG_EN;
+-      writel(reg, flow->base + DP_COM_CONF);
+-
+-      writel(0, flow->base + DP_FG_POS);
+-      ipu_srm_dp_update(priv->ipu, sync);
+-
+-      mutex_unlock(&priv->mutex);
+-}
+-EXPORT_SYMBOL_GPL(ipu_dp_disable_channel);
+-
+-void ipu_dp_disable(struct ipu_soc *ipu)
+-{
+-      struct ipu_dp_priv *priv = ipu->dp_priv;
+-
+-      mutex_lock(&priv->mutex);
+-
+-      priv->use_count--;
+-
+-      if (!priv->use_count)
+-              ipu_module_disable(priv->ipu, IPU_CONF_DP_EN);
+-
+-      if (priv->use_count < 0)
+-              priv->use_count = 0;
+-
+-      mutex_unlock(&priv->mutex);
+-}
+-EXPORT_SYMBOL_GPL(ipu_dp_disable);
+-
+-struct ipu_dp *ipu_dp_get(struct ipu_soc *ipu, unsigned int flow)
+-{
+-      struct ipu_dp_priv *priv = ipu->dp_priv;
+-      struct ipu_dp *dp;
+-
+-      if ((flow >> 1) >= IPUV3_NUM_FLOWS)
+-              return ERR_PTR(-EINVAL);
+-
+-      if (flow & 1)
+-              dp = &priv->flow[flow >> 1].foreground;
+-      else
+-              dp = &priv->flow[flow >> 1].background;
+-
+-      if (dp->in_use)
+-              return ERR_PTR(-EBUSY);
+-
+-      dp->in_use = true;
+-
+-      return dp;
+-}
+-EXPORT_SYMBOL_GPL(ipu_dp_get);
+-
+-void ipu_dp_put(struct ipu_dp *dp)
+-{
+-      dp->in_use = false;
+-}
+-EXPORT_SYMBOL_GPL(ipu_dp_put);
+-
+-int ipu_dp_init(struct ipu_soc *ipu, struct device *dev, unsigned long base)
+-{
+-      struct ipu_dp_priv *priv;
+-      int i;
+-
+-      priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+-      if (!priv)
+-              return -ENOMEM;
+-      priv->dev = dev;
+-      priv->ipu = ipu;
+-
+-      ipu->dp_priv = priv;
+-
+-      priv->base = devm_ioremap(dev, base, PAGE_SIZE);
+-      if (!priv->base)
+-              return -ENOMEM;
+-
+-      mutex_init(&priv->mutex);
+-
+-      for (i = 0; i < IPUV3_NUM_FLOWS; i++) {
+-              priv->flow[i].background.in_cs = IPUV3_COLORSPACE_UNKNOWN;
+-              priv->flow[i].foreground.in_cs = IPUV3_COLORSPACE_UNKNOWN;
+-              priv->flow[i].foreground.foreground = true;
+-              priv->flow[i].base = priv->base + ipu_dp_flow_base[i];
+-              priv->flow[i].priv = priv;
+-      }
+-
+-      return 0;
+-}
+-
+-void ipu_dp_exit(struct ipu_soc *ipu)
+-{
+-}
+--- a/drivers/gpu/ipu-v3/ipu-ic.c
++++ /dev/null
+@@ -1,761 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- * Copyright (C) 2012-2014 Mentor Graphics Inc.
+- * Copyright 2005-2012 Freescale Semiconductor, Inc. All Rights Reserved.
+- */
+-
+-#include <linux/types.h>
+-#include <linux/init.h>
+-#include <linux/errno.h>
+-#include <linux/spinlock.h>
+-#include <linux/bitrev.h>
+-#include <linux/io.h>
+-#include <linux/err.h>
+-#include <linux/sizes.h>
+-#include "ipu-prv.h"
+-
+-/* IC Register Offsets */
+-#define IC_CONF                 0x0000
+-#define IC_PRP_ENC_RSC          0x0004
+-#define IC_PRP_VF_RSC           0x0008
+-#define IC_PP_RSC               0x000C
+-#define IC_CMBP_1               0x0010
+-#define IC_CMBP_2               0x0014
+-#define IC_IDMAC_1              0x0018
+-#define IC_IDMAC_2              0x001C
+-#define IC_IDMAC_3              0x0020
+-#define IC_IDMAC_4              0x0024
+-
+-/* IC Register Fields */
+-#define IC_CONF_PRPENC_EN       (1 << 0)
+-#define IC_CONF_PRPENC_CSC1     (1 << 1)
+-#define IC_CONF_PRPENC_ROT_EN   (1 << 2)
+-#define IC_CONF_PRPVF_EN        (1 << 8)
+-#define IC_CONF_PRPVF_CSC1      (1 << 9)
+-#define IC_CONF_PRPVF_CSC2      (1 << 10)
+-#define IC_CONF_PRPVF_CMB       (1 << 11)
+-#define IC_CONF_PRPVF_ROT_EN    (1 << 12)
+-#define IC_CONF_PP_EN           (1 << 16)
+-#define IC_CONF_PP_CSC1         (1 << 17)
+-#define IC_CONF_PP_CSC2         (1 << 18)
+-#define IC_CONF_PP_CMB          (1 << 19)
+-#define IC_CONF_PP_ROT_EN       (1 << 20)
+-#define IC_CONF_IC_GLB_LOC_A    (1 << 28)
+-#define IC_CONF_KEY_COLOR_EN    (1 << 29)
+-#define IC_CONF_RWS_EN          (1 << 30)
+-#define IC_CONF_CSI_MEM_WR_EN   (1 << 31)
+-
+-#define IC_IDMAC_1_CB0_BURST_16         (1 << 0)
+-#define IC_IDMAC_1_CB1_BURST_16         (1 << 1)
+-#define IC_IDMAC_1_CB2_BURST_16         (1 << 2)
+-#define IC_IDMAC_1_CB3_BURST_16         (1 << 3)
+-#define IC_IDMAC_1_CB4_BURST_16         (1 << 4)
+-#define IC_IDMAC_1_CB5_BURST_16         (1 << 5)
+-#define IC_IDMAC_1_CB6_BURST_16         (1 << 6)
+-#define IC_IDMAC_1_CB7_BURST_16         (1 << 7)
+-#define IC_IDMAC_1_PRPENC_ROT_MASK      (0x7 << 11)
+-#define IC_IDMAC_1_PRPENC_ROT_OFFSET    11
+-#define IC_IDMAC_1_PRPVF_ROT_MASK       (0x7 << 14)
+-#define IC_IDMAC_1_PRPVF_ROT_OFFSET     14
+-#define IC_IDMAC_1_PP_ROT_MASK          (0x7 << 17)
+-#define IC_IDMAC_1_PP_ROT_OFFSET        17
+-#define IC_IDMAC_1_PP_FLIP_RS           (1 << 22)
+-#define IC_IDMAC_1_PRPVF_FLIP_RS        (1 << 21)
+-#define IC_IDMAC_1_PRPENC_FLIP_RS       (1 << 20)
+-
+-#define IC_IDMAC_2_PRPENC_HEIGHT_MASK   (0x3ff << 0)
+-#define IC_IDMAC_2_PRPENC_HEIGHT_OFFSET 0
+-#define IC_IDMAC_2_PRPVF_HEIGHT_MASK    (0x3ff << 10)
+-#define IC_IDMAC_2_PRPVF_HEIGHT_OFFSET  10
+-#define IC_IDMAC_2_PP_HEIGHT_MASK       (0x3ff << 20)
+-#define IC_IDMAC_2_PP_HEIGHT_OFFSET     20
+-
+-#define IC_IDMAC_3_PRPENC_WIDTH_MASK    (0x3ff << 0)
+-#define IC_IDMAC_3_PRPENC_WIDTH_OFFSET  0
+-#define IC_IDMAC_3_PRPVF_WIDTH_MASK     (0x3ff << 10)
+-#define IC_IDMAC_3_PRPVF_WIDTH_OFFSET   10
+-#define IC_IDMAC_3_PP_WIDTH_MASK        (0x3ff << 20)
+-#define IC_IDMAC_3_PP_WIDTH_OFFSET      20
+-
+-struct ic_task_regoffs {
+-      u32 rsc;
+-      u32 tpmem_csc[2];
+-};
+-
+-struct ic_task_bitfields {
+-      u32 ic_conf_en;
+-      u32 ic_conf_rot_en;
+-      u32 ic_conf_cmb_en;
+-      u32 ic_conf_csc1_en;
+-      u32 ic_conf_csc2_en;
+-      u32 ic_cmb_galpha_bit;
+-};
+-
+-static const struct ic_task_regoffs ic_task_reg[IC_NUM_TASKS] = {
+-      [IC_TASK_ENCODER] = {
+-              .rsc = IC_PRP_ENC_RSC,
+-              .tpmem_csc = {0x2008, 0},
+-      },
+-      [IC_TASK_VIEWFINDER] = {
+-              .rsc = IC_PRP_VF_RSC,
+-              .tpmem_csc = {0x4028, 0x4040},
+-      },
+-      [IC_TASK_POST_PROCESSOR] = {
+-              .rsc = IC_PP_RSC,
+-              .tpmem_csc = {0x6060, 0x6078},
+-      },
+-};
+-
+-static const struct ic_task_bitfields ic_task_bit[IC_NUM_TASKS] = {
+-      [IC_TASK_ENCODER] = {
+-              .ic_conf_en = IC_CONF_PRPENC_EN,
+-              .ic_conf_rot_en = IC_CONF_PRPENC_ROT_EN,
+-              .ic_conf_cmb_en = 0,    /* NA */
+-              .ic_conf_csc1_en = IC_CONF_PRPENC_CSC1,
+-              .ic_conf_csc2_en = 0,   /* NA */
+-              .ic_cmb_galpha_bit = 0, /* NA */
+-      },
+-      [IC_TASK_VIEWFINDER] = {
+-              .ic_conf_en = IC_CONF_PRPVF_EN,
+-              .ic_conf_rot_en = IC_CONF_PRPVF_ROT_EN,
+-              .ic_conf_cmb_en = IC_CONF_PRPVF_CMB,
+-              .ic_conf_csc1_en = IC_CONF_PRPVF_CSC1,
+-              .ic_conf_csc2_en = IC_CONF_PRPVF_CSC2,
+-              .ic_cmb_galpha_bit = 0,
+-      },
+-      [IC_TASK_POST_PROCESSOR] = {
+-              .ic_conf_en = IC_CONF_PP_EN,
+-              .ic_conf_rot_en = IC_CONF_PP_ROT_EN,
+-              .ic_conf_cmb_en = IC_CONF_PP_CMB,
+-              .ic_conf_csc1_en = IC_CONF_PP_CSC1,
+-              .ic_conf_csc2_en = IC_CONF_PP_CSC2,
+-              .ic_cmb_galpha_bit = 8,
+-      },
+-};
+-
+-struct ipu_ic_priv;
+-
+-struct ipu_ic {
+-      enum ipu_ic_task task;
+-      const struct ic_task_regoffs *reg;
+-      const struct ic_task_bitfields *bit;
+-
+-      struct ipu_ic_colorspace in_cs;
+-      struct ipu_ic_colorspace g_in_cs;
+-      struct ipu_ic_colorspace out_cs;
+-
+-      bool graphics;
+-      bool rotation;
+-      bool in_use;
+-
+-      struct ipu_ic_priv *priv;
+-};
+-
+-struct ipu_ic_priv {
+-      void __iomem *base;
+-      void __iomem *tpmem_base;
+-      spinlock_t lock;
+-      struct ipu_soc *ipu;
+-      int use_count;
+-      int irt_use_count;
+-      struct ipu_ic task[IC_NUM_TASKS];
+-};
+-
+-static inline u32 ipu_ic_read(struct ipu_ic *ic, unsigned offset)
+-{
+-      return readl(ic->priv->base + offset);
+-}
+-
+-static inline void ipu_ic_write(struct ipu_ic *ic, u32 value, unsigned offset)
+-{
+-      writel(value, ic->priv->base + offset);
+-}
+-
+-static int init_csc(struct ipu_ic *ic,
+-                  const struct ipu_ic_csc *csc,
+-                  int csc_index)
+-{
+-      struct ipu_ic_priv *priv = ic->priv;
+-      u32 __iomem *base;
+-      const u16 (*c)[3];
+-      const u16 *a;
+-      u32 param;
+-
+-      base = (u32 __iomem *)
+-              (priv->tpmem_base + ic->reg->tpmem_csc[csc_index]);
+-
+-      /* Cast to unsigned */
+-      c = (const u16 (*)[3])csc->params.coeff;
+-      a = (const u16 *)csc->params.offset;
+-
+-      param = ((a[0] & 0x1f) << 27) | ((c[0][0] & 0x1ff) << 18) |
+-              ((c[1][1] & 0x1ff) << 9) | (c[2][2] & 0x1ff);
+-      writel(param, base++);
+-
+-      param = ((a[0] & 0x1fe0) >> 5) | (csc->params.scale << 8) |
+-              (csc->params.sat << 10);
+-      writel(param, base++);
+-
+-      param = ((a[1] & 0x1f) << 27) | ((c[0][1] & 0x1ff) << 18) |
+-              ((c[1][0] & 0x1ff) << 9) | (c[2][0] & 0x1ff);
+-      writel(param, base++);
+-
+-      param = ((a[1] & 0x1fe0) >> 5);
+-      writel(param, base++);
+-
+-      param = ((a[2] & 0x1f) << 27) | ((c[0][2] & 0x1ff) << 18) |
+-              ((c[1][2] & 0x1ff) << 9) | (c[2][1] & 0x1ff);
+-      writel(param, base++);
+-
+-      param = ((a[2] & 0x1fe0) >> 5);
+-      writel(param, base++);
+-
+-      return 0;
+-}
+-
+-static int calc_resize_coeffs(struct ipu_ic *ic,
+-                            u32 in_size, u32 out_size,
+-                            u32 *resize_coeff,
+-                            u32 *downsize_coeff)
+-{
+-      struct ipu_ic_priv *priv = ic->priv;
+-      struct ipu_soc *ipu = priv->ipu;
+-      u32 temp_size, temp_downsize;
+-
+-      /*
+-       * Input size cannot be more than 4096, and output size cannot
+-       * be more than 1024
+-       */
+-      if (in_size > 4096) {
+-              dev_err(ipu->dev, "Unsupported resize (in_size > 4096)\n");
+-              return -EINVAL;
+-      }
+-      if (out_size > 1024) {
+-              dev_err(ipu->dev, "Unsupported resize (out_size > 1024)\n");
+-              return -EINVAL;
+-      }
+-
+-      /* Cannot downsize more than 4:1 */
+-      if ((out_size << 2) < in_size) {
+-              dev_err(ipu->dev, "Unsupported downsize\n");
+-              return -EINVAL;
+-      }
+-
+-      /* Compute downsizing coefficient */
+-      temp_downsize = 0;
+-      temp_size = in_size;
+-      while (((temp_size > 1024) || (temp_size >= out_size * 2)) &&
+-             (temp_downsize < 2)) {
+-              temp_size >>= 1;
+-              temp_downsize++;
+-      }
+-      *downsize_coeff = temp_downsize;
+-
+-      /*
+-       * compute resizing coefficient using the following equation:
+-       * resize_coeff = M * (SI - 1) / (SO - 1)
+-       * where M = 2^13, SI = input size, SO = output size
+-       */
+-      *resize_coeff = (8192L * (temp_size - 1)) / (out_size - 1);
+-      if (*resize_coeff >= 16384L) {
+-              dev_err(ipu->dev, "Warning! Overflow on resize coeff.\n");
+-              *resize_coeff = 0x3FFF;
+-      }
+-
+-      return 0;
+-}
+-
+-void ipu_ic_task_enable(struct ipu_ic *ic)
+-{
+-      struct ipu_ic_priv *priv = ic->priv;
+-      unsigned long flags;
+-      u32 ic_conf;
+-
+-      spin_lock_irqsave(&priv->lock, flags);
+-
+-      ic_conf = ipu_ic_read(ic, IC_CONF);
+-
+-      ic_conf |= ic->bit->ic_conf_en;
+-
+-      if (ic->rotation)
+-              ic_conf |= ic->bit->ic_conf_rot_en;
+-
+-      if (ic->in_cs.cs != ic->out_cs.cs)
+-              ic_conf |= ic->bit->ic_conf_csc1_en;
+-
+-      if (ic->graphics) {
+-              ic_conf |= ic->bit->ic_conf_cmb_en;
+-              ic_conf |= ic->bit->ic_conf_csc1_en;
+-
+-              if (ic->g_in_cs.cs != ic->out_cs.cs)
+-                      ic_conf |= ic->bit->ic_conf_csc2_en;
+-      }
+-
+-      ipu_ic_write(ic, ic_conf, IC_CONF);
+-
+-      spin_unlock_irqrestore(&priv->lock, flags);
+-}
+-EXPORT_SYMBOL_GPL(ipu_ic_task_enable);
+-
+-void ipu_ic_task_disable(struct ipu_ic *ic)
+-{
+-      struct ipu_ic_priv *priv = ic->priv;
+-      unsigned long flags;
+-      u32 ic_conf;
+-
+-      spin_lock_irqsave(&priv->lock, flags);
+-
+-      ic_conf = ipu_ic_read(ic, IC_CONF);
+-
+-      ic_conf &= ~(ic->bit->ic_conf_en |
+-                   ic->bit->ic_conf_csc1_en |
+-                   ic->bit->ic_conf_rot_en);
+-      if (ic->bit->ic_conf_csc2_en)
+-              ic_conf &= ~ic->bit->ic_conf_csc2_en;
+-      if (ic->bit->ic_conf_cmb_en)
+-              ic_conf &= ~ic->bit->ic_conf_cmb_en;
+-
+-      ipu_ic_write(ic, ic_conf, IC_CONF);
+-
+-      spin_unlock_irqrestore(&priv->lock, flags);
+-}
+-EXPORT_SYMBOL_GPL(ipu_ic_task_disable);
+-
+-int ipu_ic_task_graphics_init(struct ipu_ic *ic,
+-                            const struct ipu_ic_colorspace *g_in_cs,
+-                            bool galpha_en, u32 galpha,
+-                            bool colorkey_en, u32 colorkey)
+-{
+-      struct ipu_ic_priv *priv = ic->priv;
+-      struct ipu_ic_csc csc2;
+-      unsigned long flags;
+-      u32 reg, ic_conf;
+-      int ret = 0;
+-
+-      if (ic->task == IC_TASK_ENCODER)
+-              return -EINVAL;
+-
+-      spin_lock_irqsave(&priv->lock, flags);
+-
+-      ic_conf = ipu_ic_read(ic, IC_CONF);
+-
+-      if (!(ic_conf & ic->bit->ic_conf_csc1_en)) {
+-              struct ipu_ic_csc csc1;
+-
+-              ret = ipu_ic_calc_csc(&csc1,
+-                                    V4L2_YCBCR_ENC_601,
+-                                    V4L2_QUANTIZATION_FULL_RANGE,
+-                                    IPUV3_COLORSPACE_RGB,
+-                                    V4L2_YCBCR_ENC_601,
+-                                    V4L2_QUANTIZATION_FULL_RANGE,
+-                                    IPUV3_COLORSPACE_RGB);
+-              if (ret)
+-                      goto unlock;
+-
+-              /* need transparent CSC1 conversion */
+-              ret = init_csc(ic, &csc1, 0);
+-              if (ret)
+-                      goto unlock;
+-      }
+-
+-      ic->g_in_cs = *g_in_cs;
+-      csc2.in_cs = ic->g_in_cs;
+-      csc2.out_cs = ic->out_cs;
+-
+-      ret = __ipu_ic_calc_csc(&csc2);
+-      if (ret)
+-              goto unlock;
+-
+-      ret = init_csc(ic, &csc2, 1);
+-      if (ret)
+-              goto unlock;
+-
+-      if (galpha_en) {
+-              ic_conf |= IC_CONF_IC_GLB_LOC_A;
+-              reg = ipu_ic_read(ic, IC_CMBP_1);
+-              reg &= ~(0xff << ic->bit->ic_cmb_galpha_bit);
+-              reg |= (galpha << ic->bit->ic_cmb_galpha_bit);
+-              ipu_ic_write(ic, reg, IC_CMBP_1);
+-      } else
+-              ic_conf &= ~IC_CONF_IC_GLB_LOC_A;
+-
+-      if (colorkey_en) {
+-              ic_conf |= IC_CONF_KEY_COLOR_EN;
+-              ipu_ic_write(ic, colorkey, IC_CMBP_2);
+-      } else
+-              ic_conf &= ~IC_CONF_KEY_COLOR_EN;
+-
+-      ipu_ic_write(ic, ic_conf, IC_CONF);
+-
+-      ic->graphics = true;
+-unlock:
+-      spin_unlock_irqrestore(&priv->lock, flags);
+-      return ret;
+-}
+-EXPORT_SYMBOL_GPL(ipu_ic_task_graphics_init);
+-
+-int ipu_ic_task_init_rsc(struct ipu_ic *ic,
+-                       const struct ipu_ic_csc *csc,
+-                       int in_width, int in_height,
+-                       int out_width, int out_height,
+-                       u32 rsc)
+-{
+-      struct ipu_ic_priv *priv = ic->priv;
+-      u32 downsize_coeff, resize_coeff;
+-      unsigned long flags;
+-      int ret = 0;
+-
+-      if (!rsc) {
+-              /* Setup vertical resizing */
+-
+-              ret = calc_resize_coeffs(ic, in_height, out_height,
+-                                       &resize_coeff, &downsize_coeff);
+-              if (ret)
+-                      return ret;
+-
+-              rsc = (downsize_coeff << 30) | (resize_coeff << 16);
+-
+-              /* Setup horizontal resizing */
+-              ret = calc_resize_coeffs(ic, in_width, out_width,
+-                                       &resize_coeff, &downsize_coeff);
+-              if (ret)
+-                      return ret;
+-
+-              rsc |= (downsize_coeff << 14) | resize_coeff;
+-      }
+-
+-      spin_lock_irqsave(&priv->lock, flags);
+-
+-      ipu_ic_write(ic, rsc, ic->reg->rsc);
+-
+-      /* Setup color space conversion */
+-      ic->in_cs = csc->in_cs;
+-      ic->out_cs = csc->out_cs;
+-
+-      ret = init_csc(ic, csc, 0);
+-
+-      spin_unlock_irqrestore(&priv->lock, flags);
+-      return ret;
+-}
+-
+-int ipu_ic_task_init(struct ipu_ic *ic,
+-                   const struct ipu_ic_csc *csc,
+-                   int in_width, int in_height,
+-                   int out_width, int out_height)
+-{
+-      return ipu_ic_task_init_rsc(ic, csc,
+-                                  in_width, in_height,
+-                                  out_width, out_height, 0);
+-}
+-EXPORT_SYMBOL_GPL(ipu_ic_task_init);
+-
+-int ipu_ic_task_idma_init(struct ipu_ic *ic, struct ipuv3_channel *channel,
+-                        u32 width, u32 height, int burst_size,
+-                        enum ipu_rotate_mode rot)
+-{
+-      struct ipu_ic_priv *priv = ic->priv;
+-      struct ipu_soc *ipu = priv->ipu;
+-      u32 ic_idmac_1, ic_idmac_2, ic_idmac_3;
+-      u32 temp_rot = bitrev8(rot) >> 5;
+-      bool need_hor_flip = false;
+-      unsigned long flags;
+-      int ret = 0;
+-
+-      if ((burst_size != 8) && (burst_size != 16)) {
+-              dev_err(ipu->dev, "Illegal burst length for IC\n");
+-              return -EINVAL;
+-      }
+-
+-      width--;
+-      height--;
+-
+-      if (temp_rot & 0x2)     /* Need horizontal flip */
+-              need_hor_flip = true;
+-
+-      spin_lock_irqsave(&priv->lock, flags);
+-
+-      ic_idmac_1 = ipu_ic_read(ic, IC_IDMAC_1);
+-      ic_idmac_2 = ipu_ic_read(ic, IC_IDMAC_2);
+-      ic_idmac_3 = ipu_ic_read(ic, IC_IDMAC_3);
+-
+-      switch (channel->num) {
+-      case IPUV3_CHANNEL_IC_PP_MEM:
+-              if (burst_size == 16)
+-                      ic_idmac_1 |= IC_IDMAC_1_CB2_BURST_16;
+-              else
+-                      ic_idmac_1 &= ~IC_IDMAC_1_CB2_BURST_16;
+-
+-              if (need_hor_flip)
+-                      ic_idmac_1 |= IC_IDMAC_1_PP_FLIP_RS;
+-              else
+-                      ic_idmac_1 &= ~IC_IDMAC_1_PP_FLIP_RS;
+-
+-              ic_idmac_2 &= ~IC_IDMAC_2_PP_HEIGHT_MASK;
+-              ic_idmac_2 |= height << IC_IDMAC_2_PP_HEIGHT_OFFSET;
+-
+-              ic_idmac_3 &= ~IC_IDMAC_3_PP_WIDTH_MASK;
+-              ic_idmac_3 |= width << IC_IDMAC_3_PP_WIDTH_OFFSET;
+-              break;
+-      case IPUV3_CHANNEL_MEM_IC_PP:
+-              if (burst_size == 16)
+-                      ic_idmac_1 |= IC_IDMAC_1_CB5_BURST_16;
+-              else
+-                      ic_idmac_1 &= ~IC_IDMAC_1_CB5_BURST_16;
+-              break;
+-      case IPUV3_CHANNEL_MEM_ROT_PP:
+-              ic_idmac_1 &= ~IC_IDMAC_1_PP_ROT_MASK;
+-              ic_idmac_1 |= temp_rot << IC_IDMAC_1_PP_ROT_OFFSET;
+-              break;
+-      case IPUV3_CHANNEL_MEM_IC_PRP_VF:
+-              if (burst_size == 16)
+-                      ic_idmac_1 |= IC_IDMAC_1_CB6_BURST_16;
+-              else
+-                      ic_idmac_1 &= ~IC_IDMAC_1_CB6_BURST_16;
+-              break;
+-      case IPUV3_CHANNEL_IC_PRP_ENC_MEM:
+-              if (burst_size == 16)
+-                      ic_idmac_1 |= IC_IDMAC_1_CB0_BURST_16;
+-              else
+-                      ic_idmac_1 &= ~IC_IDMAC_1_CB0_BURST_16;
+-
+-              if (need_hor_flip)
+-                      ic_idmac_1 |= IC_IDMAC_1_PRPENC_FLIP_RS;
+-              else
+-                      ic_idmac_1 &= ~IC_IDMAC_1_PRPENC_FLIP_RS;
+-
+-              ic_idmac_2 &= ~IC_IDMAC_2_PRPENC_HEIGHT_MASK;
+-              ic_idmac_2 |= height << IC_IDMAC_2_PRPENC_HEIGHT_OFFSET;
+-
+-              ic_idmac_3 &= ~IC_IDMAC_3_PRPENC_WIDTH_MASK;
+-              ic_idmac_3 |= width << IC_IDMAC_3_PRPENC_WIDTH_OFFSET;
+-              break;
+-      case IPUV3_CHANNEL_MEM_ROT_ENC:
+-              ic_idmac_1 &= ~IC_IDMAC_1_PRPENC_ROT_MASK;
+-              ic_idmac_1 |= temp_rot << IC_IDMAC_1_PRPENC_ROT_OFFSET;
+-              break;
+-      case IPUV3_CHANNEL_IC_PRP_VF_MEM:
+-              if (burst_size == 16)
+-                      ic_idmac_1 |= IC_IDMAC_1_CB1_BURST_16;
+-              else
+-                      ic_idmac_1 &= ~IC_IDMAC_1_CB1_BURST_16;
+-
+-              if (need_hor_flip)
+-                      ic_idmac_1 |= IC_IDMAC_1_PRPVF_FLIP_RS;
+-              else
+-                      ic_idmac_1 &= ~IC_IDMAC_1_PRPVF_FLIP_RS;
+-
+-              ic_idmac_2 &= ~IC_IDMAC_2_PRPVF_HEIGHT_MASK;
+-              ic_idmac_2 |= height << IC_IDMAC_2_PRPVF_HEIGHT_OFFSET;
+-
+-              ic_idmac_3 &= ~IC_IDMAC_3_PRPVF_WIDTH_MASK;
+-              ic_idmac_3 |= width << IC_IDMAC_3_PRPVF_WIDTH_OFFSET;
+-              break;
+-      case IPUV3_CHANNEL_MEM_ROT_VF:
+-              ic_idmac_1 &= ~IC_IDMAC_1_PRPVF_ROT_MASK;
+-              ic_idmac_1 |= temp_rot << IC_IDMAC_1_PRPVF_ROT_OFFSET;
+-              break;
+-      case IPUV3_CHANNEL_G_MEM_IC_PRP_VF:
+-              if (burst_size == 16)
+-                      ic_idmac_1 |= IC_IDMAC_1_CB3_BURST_16;
+-              else
+-                      ic_idmac_1 &= ~IC_IDMAC_1_CB3_BURST_16;
+-              break;
+-      case IPUV3_CHANNEL_G_MEM_IC_PP:
+-              if (burst_size == 16)
+-                      ic_idmac_1 |= IC_IDMAC_1_CB4_BURST_16;
+-              else
+-                      ic_idmac_1 &= ~IC_IDMAC_1_CB4_BURST_16;
+-              break;
+-      case IPUV3_CHANNEL_VDI_MEM_IC_VF:
+-              if (burst_size == 16)
+-                      ic_idmac_1 |= IC_IDMAC_1_CB7_BURST_16;
+-              else
+-                      ic_idmac_1 &= ~IC_IDMAC_1_CB7_BURST_16;
+-              break;
+-      default:
+-              goto unlock;
+-      }
+-
+-      ipu_ic_write(ic, ic_idmac_1, IC_IDMAC_1);
+-      ipu_ic_write(ic, ic_idmac_2, IC_IDMAC_2);
+-      ipu_ic_write(ic, ic_idmac_3, IC_IDMAC_3);
+-
+-      if (ipu_rot_mode_is_irt(rot))
+-              ic->rotation = true;
+-
+-unlock:
+-      spin_unlock_irqrestore(&priv->lock, flags);
+-      return ret;
+-}
+-EXPORT_SYMBOL_GPL(ipu_ic_task_idma_init);
+-
+-static void ipu_irt_enable(struct ipu_ic *ic)
+-{
+-      struct ipu_ic_priv *priv = ic->priv;
+-
+-      if (!priv->irt_use_count)
+-              ipu_module_enable(priv->ipu, IPU_CONF_ROT_EN);
+-
+-      priv->irt_use_count++;
+-}
+-
+-static void ipu_irt_disable(struct ipu_ic *ic)
+-{
+-      struct ipu_ic_priv *priv = ic->priv;
+-
+-      if (priv->irt_use_count) {
+-              if (!--priv->irt_use_count)
+-                      ipu_module_disable(priv->ipu, IPU_CONF_ROT_EN);
+-      }
+-}
+-
+-int ipu_ic_enable(struct ipu_ic *ic)
+-{
+-      struct ipu_ic_priv *priv = ic->priv;
+-      unsigned long flags;
+-
+-      spin_lock_irqsave(&priv->lock, flags);
+-
+-      if (!priv->use_count)
+-              ipu_module_enable(priv->ipu, IPU_CONF_IC_EN);
+-
+-      priv->use_count++;
+-
+-      if (ic->rotation)
+-              ipu_irt_enable(ic);
+-
+-      spin_unlock_irqrestore(&priv->lock, flags);
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_ic_enable);
+-
+-int ipu_ic_disable(struct ipu_ic *ic)
+-{
+-      struct ipu_ic_priv *priv = ic->priv;
+-      unsigned long flags;
+-
+-      spin_lock_irqsave(&priv->lock, flags);
+-
+-      priv->use_count--;
+-
+-      if (!priv->use_count)
+-              ipu_module_disable(priv->ipu, IPU_CONF_IC_EN);
+-
+-      if (priv->use_count < 0)
+-              priv->use_count = 0;
+-
+-      if (ic->rotation)
+-              ipu_irt_disable(ic);
+-
+-      ic->rotation = ic->graphics = false;
+-
+-      spin_unlock_irqrestore(&priv->lock, flags);
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_ic_disable);
+-
+-struct ipu_ic *ipu_ic_get(struct ipu_soc *ipu, enum ipu_ic_task task)
+-{
+-      struct ipu_ic_priv *priv = ipu->ic_priv;
+-      unsigned long flags;
+-      struct ipu_ic *ic, *ret;
+-
+-      if (task >= IC_NUM_TASKS)
+-              return ERR_PTR(-EINVAL);
+-
+-      ic = &priv->task[task];
+-
+-      spin_lock_irqsave(&priv->lock, flags);
+-
+-      if (ic->in_use) {
+-              ret = ERR_PTR(-EBUSY);
+-              goto unlock;
+-      }
+-
+-      ic->in_use = true;
+-      ret = ic;
+-
+-unlock:
+-      spin_unlock_irqrestore(&priv->lock, flags);
+-      return ret;
+-}
+-EXPORT_SYMBOL_GPL(ipu_ic_get);
+-
+-void ipu_ic_put(struct ipu_ic *ic)
+-{
+-      struct ipu_ic_priv *priv = ic->priv;
+-      unsigned long flags;
+-
+-      spin_lock_irqsave(&priv->lock, flags);
+-      ic->in_use = false;
+-      spin_unlock_irqrestore(&priv->lock, flags);
+-}
+-EXPORT_SYMBOL_GPL(ipu_ic_put);
+-
+-int ipu_ic_init(struct ipu_soc *ipu, struct device *dev,
+-              unsigned long base, unsigned long tpmem_base)
+-{
+-      struct ipu_ic_priv *priv;
+-      int i;
+-
+-      priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+-      if (!priv)
+-              return -ENOMEM;
+-
+-      ipu->ic_priv = priv;
+-
+-      spin_lock_init(&priv->lock);
+-      priv->base = devm_ioremap(dev, base, PAGE_SIZE);
+-      if (!priv->base)
+-              return -ENOMEM;
+-      priv->tpmem_base = devm_ioremap(dev, tpmem_base, SZ_64K);
+-      if (!priv->tpmem_base)
+-              return -ENOMEM;
+-
+-      dev_dbg(dev, "IC base: 0x%08lx remapped to %p\n", base, priv->base);
+-
+-      priv->ipu = ipu;
+-
+-      for (i = 0; i < IC_NUM_TASKS; i++) {
+-              priv->task[i].task = i;
+-              priv->task[i].priv = priv;
+-              priv->task[i].reg = &ic_task_reg[i];
+-              priv->task[i].bit = &ic_task_bit[i];
+-      }
+-
+-      return 0;
+-}
+-
+-void ipu_ic_exit(struct ipu_soc *ipu)
+-{
+-}
+-
+-void ipu_ic_dump(struct ipu_ic *ic)
+-{
+-      struct ipu_ic_priv *priv = ic->priv;
+-      struct ipu_soc *ipu = priv->ipu;
+-
+-      dev_dbg(ipu->dev, "IC_CONF = \t0x%08X\n",
+-              ipu_ic_read(ic, IC_CONF));
+-      dev_dbg(ipu->dev, "IC_PRP_ENC_RSC = \t0x%08X\n",
+-              ipu_ic_read(ic, IC_PRP_ENC_RSC));
+-      dev_dbg(ipu->dev, "IC_PRP_VF_RSC = \t0x%08X\n",
+-              ipu_ic_read(ic, IC_PRP_VF_RSC));
+-      dev_dbg(ipu->dev, "IC_PP_RSC = \t0x%08X\n",
+-              ipu_ic_read(ic, IC_PP_RSC));
+-      dev_dbg(ipu->dev, "IC_CMBP_1 = \t0x%08X\n",
+-              ipu_ic_read(ic, IC_CMBP_1));
+-      dev_dbg(ipu->dev, "IC_CMBP_2 = \t0x%08X\n",
+-              ipu_ic_read(ic, IC_CMBP_2));
+-      dev_dbg(ipu->dev, "IC_IDMAC_1 = \t0x%08X\n",
+-              ipu_ic_read(ic, IC_IDMAC_1));
+-      dev_dbg(ipu->dev, "IC_IDMAC_2 = \t0x%08X\n",
+-              ipu_ic_read(ic, IC_IDMAC_2));
+-      dev_dbg(ipu->dev, "IC_IDMAC_3 = \t0x%08X\n",
+-              ipu_ic_read(ic, IC_IDMAC_3));
+-      dev_dbg(ipu->dev, "IC_IDMAC_4 = \t0x%08X\n",
+-              ipu_ic_read(ic, IC_IDMAC_4));
+-}
+-EXPORT_SYMBOL_GPL(ipu_ic_dump);
+--- a/drivers/gpu/ipu-v3/ipu-image-convert.c
++++ /dev/null
+@@ -1,2475 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- * Copyright (C) 2012-2016 Mentor Graphics Inc.
+- *
+- * Queued image conversion support, with tiling and rotation.
+- */
+-
+-#include <linux/interrupt.h>
+-#include <linux/dma-mapping.h>
+-#include <video/imx-ipu-image-convert.h>
+-#include "ipu-prv.h"
+-
+-/*
+- * The IC Resizer has a restriction that the output frame from the
+- * resizer must be 1024 or less in both width (pixels) and height
+- * (lines).
+- *
+- * The image converter attempts to split up a conversion when
+- * the desired output (converted) frame resolution exceeds the
+- * IC resizer limit of 1024 in either dimension.
+- *
+- * If either dimension of the output frame exceeds the limit, the
+- * dimension is split into 1, 2, or 4 equal stripes, for a maximum
+- * of 4*4 or 16 tiles. A conversion is then carried out for each
+- * tile (but taking care to pass the full frame stride length to
+- * the DMA channel's parameter memory!). IDMA double-buffering is used
+- * to convert each tile back-to-back when possible (see note below
+- * when double_buffering boolean is set).
+- *
+- * Note that the input frame must be split up into the same number
+- * of tiles as the output frame:
+- *
+- *                       +---------+-----+
+- *   +-----+---+         |  A      | B   |
+- *   | A   | B |         |         |     |
+- *   +-----+---+   -->   +---------+-----+
+- *   | C   | D |         |  C      | D   |
+- *   +-----+---+         |         |     |
+- *                       +---------+-----+
+- *
+- * Clockwise 90° rotations are handled by first rescaling into a
+- * reusable temporary tile buffer and then rotating with the 8x8
+- * block rotator, writing to the correct destination:
+- *
+- *                                         +-----+-----+
+- *                                         |     |     |
+- *   +-----+---+         +---------+       | C   | A   |
+- *   | A   | B |         | A,B, |  |       |     |     |
+- *   +-----+---+   -->   | C,D  |  |  -->  |     |     |
+- *   | C   | D |         +---------+       +-----+-----+
+- *   +-----+---+                           | D   | B   |
+- *                                         |     |     |
+- *                                         +-----+-----+
+- *
+- * If the 8x8 block rotator is used, horizontal or vertical flipping
+- * is done during the rotation step, otherwise flipping is done
+- * during the scaling step.
+- * With rotation or flipping, tile order changes between input and
+- * output image. Tiles are numbered row major from top left to bottom
+- * right for both input and output image.
+- */
+-
+-#define MAX_STRIPES_W    4
+-#define MAX_STRIPES_H    4
+-#define MAX_TILES (MAX_STRIPES_W * MAX_STRIPES_H)
+-
+-#define MIN_W     16
+-#define MIN_H     8
+-#define MAX_W     4096
+-#define MAX_H     4096
+-
+-enum ipu_image_convert_type {
+-      IMAGE_CONVERT_IN = 0,
+-      IMAGE_CONVERT_OUT,
+-};
+-
+-struct ipu_image_convert_dma_buf {
+-      void          *virt;
+-      dma_addr_t    phys;
+-      unsigned long len;
+-};
+-
+-struct ipu_image_convert_dma_chan {
+-      int in;
+-      int out;
+-      int rot_in;
+-      int rot_out;
+-      int vdi_in_p;
+-      int vdi_in;
+-      int vdi_in_n;
+-};
+-
+-/* dimensions of one tile */
+-struct ipu_image_tile {
+-      u32 width;
+-      u32 height;
+-      u32 left;
+-      u32 top;
+-      /* size and strides are in bytes */
+-      u32 size;
+-      u32 stride;
+-      u32 rot_stride;
+-      /* start Y or packed offset of this tile */
+-      u32 offset;
+-      /* offset from start to tile in U plane, for planar formats */
+-      u32 u_off;
+-      /* offset from start to tile in V plane, for planar formats */
+-      u32 v_off;
+-};
+-
+-struct ipu_image_convert_image {
+-      struct ipu_image base;
+-      enum ipu_image_convert_type type;
+-
+-      const struct ipu_image_pixfmt *fmt;
+-      unsigned int stride;
+-
+-      /* # of rows (horizontal stripes) if dest height is > 1024 */
+-      unsigned int num_rows;
+-      /* # of columns (vertical stripes) if dest width is > 1024 */
+-      unsigned int num_cols;
+-
+-      struct ipu_image_tile tile[MAX_TILES];
+-};
+-
+-struct ipu_image_pixfmt {
+-      u32     fourcc;        /* V4L2 fourcc */
+-      int     bpp;           /* total bpp */
+-      int     uv_width_dec;  /* decimation in width for U/V planes */
+-      int     uv_height_dec; /* decimation in height for U/V planes */
+-      bool    planar;        /* planar format */
+-      bool    uv_swapped;    /* U and V planes are swapped */
+-      bool    uv_packed;     /* partial planar (U and V in same plane) */
+-};
+-
+-struct ipu_image_convert_ctx;
+-struct ipu_image_convert_chan;
+-struct ipu_image_convert_priv;
+-
+-struct ipu_image_convert_ctx {
+-      struct ipu_image_convert_chan *chan;
+-
+-      ipu_image_convert_cb_t complete;
+-      void *complete_context;
+-
+-      /* Source/destination image data and rotation mode */
+-      struct ipu_image_convert_image in;
+-      struct ipu_image_convert_image out;
+-      struct ipu_ic_csc csc;
+-      enum ipu_rotate_mode rot_mode;
+-      u32 downsize_coeff_h;
+-      u32 downsize_coeff_v;
+-      u32 image_resize_coeff_h;
+-      u32 image_resize_coeff_v;
+-      u32 resize_coeffs_h[MAX_STRIPES_W];
+-      u32 resize_coeffs_v[MAX_STRIPES_H];
+-
+-      /* intermediate buffer for rotation */
+-      struct ipu_image_convert_dma_buf rot_intermediate[2];
+-
+-      /* current buffer number for double buffering */
+-      int cur_buf_num;
+-
+-      bool aborting;
+-      struct completion aborted;
+-
+-      /* can we use double-buffering for this conversion operation? */
+-      bool double_buffering;
+-      /* num_rows * num_cols */
+-      unsigned int num_tiles;
+-      /* next tile to process */
+-      unsigned int next_tile;
+-      /* where to place converted tile in dest image */
+-      unsigned int out_tile_map[MAX_TILES];
+-
+-      struct list_head list;
+-};
+-
+-struct ipu_image_convert_chan {
+-      struct ipu_image_convert_priv *priv;
+-
+-      enum ipu_ic_task ic_task;
+-      const struct ipu_image_convert_dma_chan *dma_ch;
+-
+-      struct ipu_ic *ic;
+-      struct ipuv3_channel *in_chan;
+-      struct ipuv3_channel *out_chan;
+-      struct ipuv3_channel *rotation_in_chan;
+-      struct ipuv3_channel *rotation_out_chan;
+-
+-      /* the IPU end-of-frame irqs */
+-      int out_eof_irq;
+-      int rot_out_eof_irq;
+-
+-      spinlock_t irqlock;
+-
+-      /* list of convert contexts */
+-      struct list_head ctx_list;
+-      /* queue of conversion runs */
+-      struct list_head pending_q;
+-      /* queue of completed runs */
+-      struct list_head done_q;
+-
+-      /* the current conversion run */
+-      struct ipu_image_convert_run *current_run;
+-};
+-
+-struct ipu_image_convert_priv {
+-      struct ipu_image_convert_chan chan[IC_NUM_TASKS];
+-      struct ipu_soc *ipu;
+-};
+-
+-static const struct ipu_image_convert_dma_chan
+-image_convert_dma_chan[IC_NUM_TASKS] = {
+-      [IC_TASK_VIEWFINDER] = {
+-              .in = IPUV3_CHANNEL_MEM_IC_PRP_VF,
+-              .out = IPUV3_CHANNEL_IC_PRP_VF_MEM,
+-              .rot_in = IPUV3_CHANNEL_MEM_ROT_VF,
+-              .rot_out = IPUV3_CHANNEL_ROT_VF_MEM,
+-              .vdi_in_p = IPUV3_CHANNEL_MEM_VDI_PREV,
+-              .vdi_in = IPUV3_CHANNEL_MEM_VDI_CUR,
+-              .vdi_in_n = IPUV3_CHANNEL_MEM_VDI_NEXT,
+-      },
+-      [IC_TASK_POST_PROCESSOR] = {
+-              .in = IPUV3_CHANNEL_MEM_IC_PP,
+-              .out = IPUV3_CHANNEL_IC_PP_MEM,
+-              .rot_in = IPUV3_CHANNEL_MEM_ROT_PP,
+-              .rot_out = IPUV3_CHANNEL_ROT_PP_MEM,
+-      },
+-};
+-
+-static const struct ipu_image_pixfmt image_convert_formats[] = {
+-      {
+-              .fourcc = V4L2_PIX_FMT_RGB565,
+-              .bpp    = 16,
+-      }, {
+-              .fourcc = V4L2_PIX_FMT_RGB24,
+-              .bpp    = 24,
+-      }, {
+-              .fourcc = V4L2_PIX_FMT_BGR24,
+-              .bpp    = 24,
+-      }, {
+-              .fourcc = V4L2_PIX_FMT_RGB32,
+-              .bpp    = 32,
+-      }, {
+-              .fourcc = V4L2_PIX_FMT_BGR32,
+-              .bpp    = 32,
+-      }, {
+-              .fourcc = V4L2_PIX_FMT_XRGB32,
+-              .bpp    = 32,
+-      }, {
+-              .fourcc = V4L2_PIX_FMT_XBGR32,
+-              .bpp    = 32,
+-      }, {
+-              .fourcc = V4L2_PIX_FMT_BGRX32,
+-              .bpp    = 32,
+-      }, {
+-              .fourcc = V4L2_PIX_FMT_RGBX32,
+-              .bpp    = 32,
+-      }, {
+-              .fourcc = V4L2_PIX_FMT_YUYV,
+-              .bpp    = 16,
+-              .uv_width_dec = 2,
+-              .uv_height_dec = 1,
+-      }, {
+-              .fourcc = V4L2_PIX_FMT_UYVY,
+-              .bpp    = 16,
+-              .uv_width_dec = 2,
+-              .uv_height_dec = 1,
+-      }, {
+-              .fourcc = V4L2_PIX_FMT_YUV420,
+-              .bpp    = 12,
+-              .planar = true,
+-              .uv_width_dec = 2,
+-              .uv_height_dec = 2,
+-      }, {
+-              .fourcc = V4L2_PIX_FMT_YVU420,
+-              .bpp    = 12,
+-              .planar = true,
+-              .uv_width_dec = 2,
+-              .uv_height_dec = 2,
+-              .uv_swapped = true,
+-      }, {
+-              .fourcc = V4L2_PIX_FMT_NV12,
+-              .bpp    = 12,
+-              .planar = true,
+-              .uv_width_dec = 2,
+-              .uv_height_dec = 2,
+-              .uv_packed = true,
+-      }, {
+-              .fourcc = V4L2_PIX_FMT_YUV422P,
+-              .bpp    = 16,
+-              .planar = true,
+-              .uv_width_dec = 2,
+-              .uv_height_dec = 1,
+-      }, {
+-              .fourcc = V4L2_PIX_FMT_NV16,
+-              .bpp    = 16,
+-              .planar = true,
+-              .uv_width_dec = 2,
+-              .uv_height_dec = 1,
+-              .uv_packed = true,
+-      },
+-};
+-
+-static const struct ipu_image_pixfmt *get_format(u32 fourcc)
+-{
+-      const struct ipu_image_pixfmt *ret = NULL;
+-      unsigned int i;
+-
+-      for (i = 0; i < ARRAY_SIZE(image_convert_formats); i++) {
+-              if (image_convert_formats[i].fourcc == fourcc) {
+-                      ret = &image_convert_formats[i];
+-                      break;
+-              }
+-      }
+-
+-      return ret;
+-}
+-
+-static void dump_format(struct ipu_image_convert_ctx *ctx,
+-                      struct ipu_image_convert_image *ic_image)
+-{
+-      struct ipu_image_convert_chan *chan = ctx->chan;
+-      struct ipu_image_convert_priv *priv = chan->priv;
+-
+-      dev_dbg(priv->ipu->dev,
+-              "task %u: ctx %p: %s format: %dx%d (%dx%d tiles), %c%c%c%c\n",
+-              chan->ic_task, ctx,
+-              ic_image->type == IMAGE_CONVERT_OUT ? "Output" : "Input",
+-              ic_image->base.pix.width, ic_image->base.pix.height,
+-              ic_image->num_cols, ic_image->num_rows,
+-              ic_image->fmt->fourcc & 0xff,
+-              (ic_image->fmt->fourcc >> 8) & 0xff,
+-              (ic_image->fmt->fourcc >> 16) & 0xff,
+-              (ic_image->fmt->fourcc >> 24) & 0xff);
+-}
+-
+-int ipu_image_convert_enum_format(int index, u32 *fourcc)
+-{
+-      const struct ipu_image_pixfmt *fmt;
+-
+-      if (index >= (int)ARRAY_SIZE(image_convert_formats))
+-              return -EINVAL;
+-
+-      /* Format found */
+-      fmt = &image_convert_formats[index];
+-      *fourcc = fmt->fourcc;
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_image_convert_enum_format);
+-
+-static void free_dma_buf(struct ipu_image_convert_priv *priv,
+-                       struct ipu_image_convert_dma_buf *buf)
+-{
+-      if (buf->virt)
+-              dma_free_coherent(priv->ipu->dev,
+-                                buf->len, buf->virt, buf->phys);
+-      buf->virt = NULL;
+-      buf->phys = 0;
+-}
+-
+-static int alloc_dma_buf(struct ipu_image_convert_priv *priv,
+-                       struct ipu_image_convert_dma_buf *buf,
+-                       int size)
+-{
+-      buf->len = PAGE_ALIGN(size);
+-      buf->virt = dma_alloc_coherent(priv->ipu->dev, buf->len, &buf->phys,
+-                                     GFP_DMA | GFP_KERNEL);
+-      if (!buf->virt) {
+-              dev_err(priv->ipu->dev, "failed to alloc dma buffer\n");
+-              return -ENOMEM;
+-      }
+-
+-      return 0;
+-}
+-
+-static inline int num_stripes(int dim)
+-{
+-      return (dim - 1) / 1024 + 1;
+-}
+-
+-/*
+- * Calculate downsizing coefficients, which are the same for all tiles,
+- * and initial bilinear resizing coefficients, which are used to find the
+- * best seam positions.
+- * Also determine the number of tiles necessary to guarantee that no tile
+- * is larger than 1024 pixels in either dimension at the output and between
+- * IC downsizing and main processing sections.
+- */
+-static int calc_image_resize_coefficients(struct ipu_image_convert_ctx *ctx,
+-                                        struct ipu_image *in,
+-                                        struct ipu_image *out)
+-{
+-      u32 downsized_width = in->rect.width;
+-      u32 downsized_height = in->rect.height;
+-      u32 downsize_coeff_v = 0;
+-      u32 downsize_coeff_h = 0;
+-      u32 resized_width = out->rect.width;
+-      u32 resized_height = out->rect.height;
+-      u32 resize_coeff_h;
+-      u32 resize_coeff_v;
+-      u32 cols;
+-      u32 rows;
+-
+-      if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+-              resized_width = out->rect.height;
+-              resized_height = out->rect.width;
+-      }
+-
+-      /* Do not let invalid input lead to an endless loop below */
+-      if (WARN_ON(resized_width == 0 || resized_height == 0))
+-              return -EINVAL;
+-
+-      while (downsized_width >= resized_width * 2) {
+-              downsized_width >>= 1;
+-              downsize_coeff_h++;
+-      }
+-
+-      while (downsized_height >= resized_height * 2) {
+-              downsized_height >>= 1;
+-              downsize_coeff_v++;
+-      }
+-
+-      /*
+-       * Calculate the bilinear resizing coefficients that could be used if
+-       * we were converting with a single tile. The bottom right output pixel
+-       * should sample as close as possible to the bottom right input pixel
+-       * out of the decimator, but not overshoot it:
+-       */
+-      resize_coeff_h = 8192 * (downsized_width - 1) / (resized_width - 1);
+-      resize_coeff_v = 8192 * (downsized_height - 1) / (resized_height - 1);
+-
+-      /*
+-       * Both the output of the IC downsizing section before being passed to
+-       * the IC main processing section and the final output of the IC main
+-       * processing section must be <= 1024 pixels in both dimensions.
+-       */
+-      cols = num_stripes(max_t(u32, downsized_width, resized_width));
+-      rows = num_stripes(max_t(u32, downsized_height, resized_height));
+-
+-      dev_dbg(ctx->chan->priv->ipu->dev,
+-              "%s: hscale: >>%u, *8192/%u vscale: >>%u, *8192/%u, %ux%u tiles\n",
+-              __func__, downsize_coeff_h, resize_coeff_h, downsize_coeff_v,
+-              resize_coeff_v, cols, rows);
+-
+-      if (downsize_coeff_h > 2 || downsize_coeff_v  > 2 ||
+-          resize_coeff_h > 0x3fff || resize_coeff_v > 0x3fff)
+-              return -EINVAL;
+-
+-      ctx->downsize_coeff_h = downsize_coeff_h;
+-      ctx->downsize_coeff_v = downsize_coeff_v;
+-      ctx->image_resize_coeff_h = resize_coeff_h;
+-      ctx->image_resize_coeff_v = resize_coeff_v;
+-      ctx->in.num_cols = cols;
+-      ctx->in.num_rows = rows;
+-
+-      return 0;
+-}
+-
+-#define round_closest(x, y) round_down((x) + (y)/2, (y))
+-
+-/*
+- * Find the best aligned seam position for the given column / row index.
+- * Rotation and image offsets are out of scope.
+- *
+- * @index: column / row index, used to calculate valid interval
+- * @in_edge: input right / bottom edge
+- * @out_edge: output right / bottom edge
+- * @in_align: input alignment, either horizontal 8-byte line start address
+- *            alignment, or pixel alignment due to image format
+- * @out_align: output alignment, either horizontal 8-byte line start address
+- *             alignment, or pixel alignment due to image format or rotator
+- *             block size
+- * @in_burst: horizontal input burst size in case of horizontal flip
+- * @out_burst: horizontal output burst size or rotator block size
+- * @downsize_coeff: downsizing section coefficient
+- * @resize_coeff: main processing section resizing coefficient
+- * @_in_seam: aligned input seam position return value
+- * @_out_seam: aligned output seam position return value
+- */
+-static void find_best_seam(struct ipu_image_convert_ctx *ctx,
+-                         unsigned int index,
+-                         unsigned int in_edge,
+-                         unsigned int out_edge,
+-                         unsigned int in_align,
+-                         unsigned int out_align,
+-                         unsigned int in_burst,
+-                         unsigned int out_burst,
+-                         unsigned int downsize_coeff,
+-                         unsigned int resize_coeff,
+-                         u32 *_in_seam,
+-                         u32 *_out_seam)
+-{
+-      struct device *dev = ctx->chan->priv->ipu->dev;
+-      unsigned int out_pos;
+-      /* Input / output seam position candidates */
+-      unsigned int out_seam = 0;
+-      unsigned int in_seam = 0;
+-      unsigned int min_diff = UINT_MAX;
+-      unsigned int out_start;
+-      unsigned int out_end;
+-      unsigned int in_start;
+-      unsigned int in_end;
+-
+-      /* Start within 1024 pixels of the right / bottom edge */
+-      out_start = max_t(int, index * out_align, out_edge - 1024);
+-      /* End before having to add more columns to the left / rows above */
+-      out_end = min_t(unsigned int, out_edge, index * 1024 + 1);
+-
+-      /*
+-       * Limit input seam position to make sure that the downsized input tile
+-       * to the right or bottom does not exceed 1024 pixels.
+-       */
+-      in_start = max_t(int, index * in_align,
+-                       in_edge - (1024 << downsize_coeff));
+-      in_end = min_t(unsigned int, in_edge,
+-                     index * (1024 << downsize_coeff) + 1);
+-
+-      /*
+-       * Output tiles must start at a multiple of 8 bytes horizontally and
+-       * possibly at an even line horizontally depending on the pixel format.
+-       * Only consider output aligned positions for the seam.
+-       */
+-      out_start = round_up(out_start, out_align);
+-      for (out_pos = out_start; out_pos < out_end; out_pos += out_align) {
+-              unsigned int in_pos;
+-              unsigned int in_pos_aligned;
+-              unsigned int in_pos_rounded;
+-              unsigned int abs_diff;
+-
+-              /*
+-               * Tiles in the right row / bottom column may not be allowed to
+-               * overshoot horizontally / vertically. out_burst may be the
+-               * actual DMA burst size, or the rotator block size.
+-               */
+-              if ((out_burst > 1) && (out_edge - out_pos) % out_burst)
+-                      continue;
+-
+-              /*
+-               * Input sample position, corresponding to out_pos, 19.13 fixed
+-               * point.
+-               */
+-              in_pos = (out_pos * resize_coeff) << downsize_coeff;
+-              /*
+-               * The closest input sample position that we could actually
+-               * start the input tile at, 19.13 fixed point.
+-               */
+-              in_pos_aligned = round_closest(in_pos, 8192U * in_align);
+-              /* Convert 19.13 fixed point to integer */
+-              in_pos_rounded = in_pos_aligned / 8192U;
+-
+-              if (in_pos_rounded < in_start)
+-                      continue;
+-              if (in_pos_rounded >= in_end)
+-                      break;
+-
+-              if ((in_burst > 1) &&
+-                  (in_edge - in_pos_rounded) % in_burst)
+-                      continue;
+-
+-              if (in_pos < in_pos_aligned)
+-                      abs_diff = in_pos_aligned - in_pos;
+-              else
+-                      abs_diff = in_pos - in_pos_aligned;
+-
+-              if (abs_diff < min_diff) {
+-                      in_seam = in_pos_rounded;
+-                      out_seam = out_pos;
+-                      min_diff = abs_diff;
+-              }
+-      }
+-
+-      *_out_seam = out_seam;
+-      *_in_seam = in_seam;
+-
+-      dev_dbg(dev, "%s: out_seam %u(%u) in [%u, %u], in_seam %u(%u) in [%u, %u] diff %u.%03u\n",
+-              __func__, out_seam, out_align, out_start, out_end,
+-              in_seam, in_align, in_start, in_end, min_diff / 8192,
+-              DIV_ROUND_CLOSEST(min_diff % 8192 * 1000, 8192));
+-}
+-
+-/*
+- * Tile left edges are required to be aligned to multiples of 8 bytes
+- * by the IDMAC.
+- */
+-static inline u32 tile_left_align(const struct ipu_image_pixfmt *fmt)
+-{
+-      if (fmt->planar)
+-              return fmt->uv_packed ? 8 : 8 * fmt->uv_width_dec;
+-      else
+-              return fmt->bpp == 32 ? 2 : fmt->bpp == 16 ? 4 : 8;
+-}
+-
+-/*
+- * Tile top edge alignment is only limited by chroma subsampling.
+- */
+-static inline u32 tile_top_align(const struct ipu_image_pixfmt *fmt)
+-{
+-      return fmt->uv_height_dec > 1 ? 2 : 1;
+-}
+-
+-static inline u32 tile_width_align(enum ipu_image_convert_type type,
+-                                 const struct ipu_image_pixfmt *fmt,
+-                                 enum ipu_rotate_mode rot_mode)
+-{
+-      if (type == IMAGE_CONVERT_IN) {
+-              /*
+-               * The IC burst reads 8 pixels at a time. Reading beyond the
+-               * end of the line is usually acceptable. Those pixels are
+-               * ignored, unless the IC has to write the scaled line in
+-               * reverse.
+-               */
+-              return (!ipu_rot_mode_is_irt(rot_mode) &&
+-                      (rot_mode & IPU_ROT_BIT_HFLIP)) ? 8 : 2;
+-      }
+-
+-      /*
+-       * Align to 16x16 pixel blocks for planar 4:2:0 chroma subsampled
+-       * formats to guarantee 8-byte aligned line start addresses in the
+-       * chroma planes when IRT is used. Align to 8x8 pixel IRT block size
+-       * for all other formats.
+-       */
+-      return (ipu_rot_mode_is_irt(rot_mode) &&
+-              fmt->planar && !fmt->uv_packed) ?
+-              8 * fmt->uv_width_dec : 8;
+-}
+-
+-static inline u32 tile_height_align(enum ipu_image_convert_type type,
+-                                  const struct ipu_image_pixfmt *fmt,
+-                                  enum ipu_rotate_mode rot_mode)
+-{
+-      if (type == IMAGE_CONVERT_IN || !ipu_rot_mode_is_irt(rot_mode))
+-              return 2;
+-
+-      /*
+-       * Align to 16x16 pixel blocks for planar 4:2:0 chroma subsampled
+-       * formats to guarantee 8-byte aligned line start addresses in the
+-       * chroma planes when IRT is used. Align to 8x8 pixel IRT block size
+-       * for all other formats.
+-       */
+-      return (fmt->planar && !fmt->uv_packed) ? 8 * fmt->uv_width_dec : 8;
+-}
+-
+-/*
+- * Fill in left position and width and for all tiles in an input column, and
+- * for all corresponding output tiles. If the 90° rotator is used, the output
+- * tiles are in a row, and output tile top position and height are set.
+- */
+-static void fill_tile_column(struct ipu_image_convert_ctx *ctx,
+-                           unsigned int col,
+-                           struct ipu_image_convert_image *in,
+-                           unsigned int in_left, unsigned int in_width,
+-                           struct ipu_image_convert_image *out,
+-                           unsigned int out_left, unsigned int out_width)
+-{
+-      unsigned int row, tile_idx;
+-      struct ipu_image_tile *in_tile, *out_tile;
+-
+-      for (row = 0; row < in->num_rows; row++) {
+-              tile_idx = in->num_cols * row + col;
+-              in_tile = &in->tile[tile_idx];
+-              out_tile = &out->tile[ctx->out_tile_map[tile_idx]];
+-
+-              in_tile->left = in_left;
+-              in_tile->width = in_width;
+-
+-              if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+-                      out_tile->top = out_left;
+-                      out_tile->height = out_width;
+-              } else {
+-                      out_tile->left = out_left;
+-                      out_tile->width = out_width;
+-              }
+-      }
+-}
+-
+-/*
+- * Fill in top position and height and for all tiles in an input row, and
+- * for all corresponding output tiles. If the 90° rotator is used, the output
+- * tiles are in a column, and output tile left position and width are set.
+- */
+-static void fill_tile_row(struct ipu_image_convert_ctx *ctx, unsigned int row,
+-                        struct ipu_image_convert_image *in,
+-                        unsigned int in_top, unsigned int in_height,
+-                        struct ipu_image_convert_image *out,
+-                        unsigned int out_top, unsigned int out_height)
+-{
+-      unsigned int col, tile_idx;
+-      struct ipu_image_tile *in_tile, *out_tile;
+-
+-      for (col = 0; col < in->num_cols; col++) {
+-              tile_idx = in->num_cols * row + col;
+-              in_tile = &in->tile[tile_idx];
+-              out_tile = &out->tile[ctx->out_tile_map[tile_idx]];
+-
+-              in_tile->top = in_top;
+-              in_tile->height = in_height;
+-
+-              if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+-                      out_tile->left = out_top;
+-                      out_tile->width = out_height;
+-              } else {
+-                      out_tile->top = out_top;
+-                      out_tile->height = out_height;
+-              }
+-      }
+-}
+-
+-/*
+- * Find the best horizontal and vertical seam positions to split into tiles.
+- * Minimize the fractional part of the input sampling position for the
+- * top / left pixels of each tile.
+- */
+-static void find_seams(struct ipu_image_convert_ctx *ctx,
+-                     struct ipu_image_convert_image *in,
+-                     struct ipu_image_convert_image *out)
+-{
+-      struct device *dev = ctx->chan->priv->ipu->dev;
+-      unsigned int resized_width = out->base.rect.width;
+-      unsigned int resized_height = out->base.rect.height;
+-      unsigned int col;
+-      unsigned int row;
+-      unsigned int in_left_align = tile_left_align(in->fmt);
+-      unsigned int in_top_align = tile_top_align(in->fmt);
+-      unsigned int out_left_align = tile_left_align(out->fmt);
+-      unsigned int out_top_align = tile_top_align(out->fmt);
+-      unsigned int out_width_align = tile_width_align(out->type, out->fmt,
+-                                                      ctx->rot_mode);
+-      unsigned int out_height_align = tile_height_align(out->type, out->fmt,
+-                                                        ctx->rot_mode);
+-      unsigned int in_right = in->base.rect.width;
+-      unsigned int in_bottom = in->base.rect.height;
+-      unsigned int out_right = out->base.rect.width;
+-      unsigned int out_bottom = out->base.rect.height;
+-      unsigned int flipped_out_left;
+-      unsigned int flipped_out_top;
+-
+-      if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+-              /* Switch width/height and align top left to IRT block size */
+-              resized_width = out->base.rect.height;
+-              resized_height = out->base.rect.width;
+-              out_left_align = out_height_align;
+-              out_top_align = out_width_align;
+-              out_width_align = out_left_align;
+-              out_height_align = out_top_align;
+-              out_right = out->base.rect.height;
+-              out_bottom = out->base.rect.width;
+-      }
+-
+-      for (col = in->num_cols - 1; col > 0; col--) {
+-              bool allow_in_overshoot = ipu_rot_mode_is_irt(ctx->rot_mode) ||
+-                                        !(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
+-              bool allow_out_overshoot = (col < in->num_cols - 1) &&
+-                                         !(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
+-              unsigned int in_left;
+-              unsigned int out_left;
+-
+-              /*
+-               * Align input width to burst length if the scaling step flips
+-               * horizontally.
+-               */
+-
+-              find_best_seam(ctx, col,
+-                             in_right, out_right,
+-                             in_left_align, out_left_align,
+-                             allow_in_overshoot ? 1 : 8 /* burst length */,
+-                             allow_out_overshoot ? 1 : out_width_align,
+-                             ctx->downsize_coeff_h, ctx->image_resize_coeff_h,
+-                             &in_left, &out_left);
+-
+-              if (ctx->rot_mode & IPU_ROT_BIT_HFLIP)
+-                      flipped_out_left = resized_width - out_right;
+-              else
+-                      flipped_out_left = out_left;
+-
+-              fill_tile_column(ctx, col, in, in_left, in_right - in_left,
+-                               out, flipped_out_left, out_right - out_left);
+-
+-              dev_dbg(dev, "%s: col %u: %u, %u -> %u, %u\n", __func__, col,
+-                      in_left, in_right - in_left,
+-                      flipped_out_left, out_right - out_left);
+-
+-              in_right = in_left;
+-              out_right = out_left;
+-      }
+-
+-      flipped_out_left = (ctx->rot_mode & IPU_ROT_BIT_HFLIP) ?
+-                         resized_width - out_right : 0;
+-
+-      fill_tile_column(ctx, 0, in, 0, in_right,
+-                       out, flipped_out_left, out_right);
+-
+-      dev_dbg(dev, "%s: col 0: 0, %u -> %u, %u\n", __func__,
+-              in_right, flipped_out_left, out_right);
+-
+-      for (row = in->num_rows - 1; row > 0; row--) {
+-              bool allow_overshoot = row < in->num_rows - 1;
+-              unsigned int in_top;
+-              unsigned int out_top;
+-
+-              find_best_seam(ctx, row,
+-                             in_bottom, out_bottom,
+-                             in_top_align, out_top_align,
+-                             1, allow_overshoot ? 1 : out_height_align,
+-                             ctx->downsize_coeff_v, ctx->image_resize_coeff_v,
+-                             &in_top, &out_top);
+-
+-              if ((ctx->rot_mode & IPU_ROT_BIT_VFLIP) ^
+-                  ipu_rot_mode_is_irt(ctx->rot_mode))
+-                      flipped_out_top = resized_height - out_bottom;
+-              else
+-                      flipped_out_top = out_top;
+-
+-              fill_tile_row(ctx, row, in, in_top, in_bottom - in_top,
+-                            out, flipped_out_top, out_bottom - out_top);
+-
+-              dev_dbg(dev, "%s: row %u: %u, %u -> %u, %u\n", __func__, row,
+-                      in_top, in_bottom - in_top,
+-                      flipped_out_top, out_bottom - out_top);
+-
+-              in_bottom = in_top;
+-              out_bottom = out_top;
+-      }
+-
+-      if ((ctx->rot_mode & IPU_ROT_BIT_VFLIP) ^
+-          ipu_rot_mode_is_irt(ctx->rot_mode))
+-              flipped_out_top = resized_height - out_bottom;
+-      else
+-              flipped_out_top = 0;
+-
+-      fill_tile_row(ctx, 0, in, 0, in_bottom,
+-                    out, flipped_out_top, out_bottom);
+-
+-      dev_dbg(dev, "%s: row 0: 0, %u -> %u, %u\n", __func__,
+-              in_bottom, flipped_out_top, out_bottom);
+-}
+-
+-static int calc_tile_dimensions(struct ipu_image_convert_ctx *ctx,
+-                              struct ipu_image_convert_image *image)
+-{
+-      struct ipu_image_convert_chan *chan = ctx->chan;
+-      struct ipu_image_convert_priv *priv = chan->priv;
+-      unsigned int max_width = 1024;
+-      unsigned int max_height = 1024;
+-      unsigned int i;
+-
+-      if (image->type == IMAGE_CONVERT_IN) {
+-              /* Up to 4096x4096 input tile size */
+-              max_width <<= ctx->downsize_coeff_h;
+-              max_height <<= ctx->downsize_coeff_v;
+-      }
+-
+-      for (i = 0; i < ctx->num_tiles; i++) {
+-              struct ipu_image_tile *tile;
+-              const unsigned int row = i / image->num_cols;
+-              const unsigned int col = i % image->num_cols;
+-
+-              if (image->type == IMAGE_CONVERT_OUT)
+-                      tile = &image->tile[ctx->out_tile_map[i]];
+-              else
+-                      tile = &image->tile[i];
+-
+-              tile->size = ((tile->height * image->fmt->bpp) >> 3) *
+-                      tile->width;
+-
+-              if (image->fmt->planar) {
+-                      tile->stride = tile->width;
+-                      tile->rot_stride = tile->height;
+-              } else {
+-                      tile->stride =
+-                              (image->fmt->bpp * tile->width) >> 3;
+-                      tile->rot_stride =
+-                              (image->fmt->bpp * tile->height) >> 3;
+-              }
+-
+-              dev_dbg(priv->ipu->dev,
+-                      "task %u: ctx %p: %s@[%u,%u]: %ux%u@%u,%u\n",
+-                      chan->ic_task, ctx,
+-                      image->type == IMAGE_CONVERT_IN ? "Input" : "Output",
+-                      row, col,
+-                      tile->width, tile->height, tile->left, tile->top);
+-
+-              if (!tile->width || tile->width > max_width ||
+-                  !tile->height || tile->height > max_height) {
+-                      dev_err(priv->ipu->dev, "invalid %s tile size: %ux%u\n",
+-                              image->type == IMAGE_CONVERT_IN ? "input" :
+-                              "output", tile->width, tile->height);
+-                      return -EINVAL;
+-              }
+-      }
+-
+-      return 0;
+-}
+-
+-/*
+- * Use the rotation transformation to find the tile coordinates
+- * (row, col) of a tile in the destination frame that corresponds
+- * to the given tile coordinates of a source frame. The destination
+- * coordinate is then converted to a tile index.
+- */
+-static int transform_tile_index(struct ipu_image_convert_ctx *ctx,
+-                              int src_row, int src_col)
+-{
+-      struct ipu_image_convert_chan *chan = ctx->chan;
+-      struct ipu_image_convert_priv *priv = chan->priv;
+-      struct ipu_image_convert_image *s_image = &ctx->in;
+-      struct ipu_image_convert_image *d_image = &ctx->out;
+-      int dst_row, dst_col;
+-
+-      /* with no rotation it's a 1:1 mapping */
+-      if (ctx->rot_mode == IPU_ROTATE_NONE)
+-              return src_row * s_image->num_cols + src_col;
+-
+-      /*
+-       * before doing the transform, first we have to translate
+-       * source row,col for an origin in the center of s_image
+-       */
+-      src_row = src_row * 2 - (s_image->num_rows - 1);
+-      src_col = src_col * 2 - (s_image->num_cols - 1);
+-
+-      /* do the rotation transform */
+-      if (ctx->rot_mode & IPU_ROT_BIT_90) {
+-              dst_col = -src_row;
+-              dst_row = src_col;
+-      } else {
+-              dst_col = src_col;
+-              dst_row = src_row;
+-      }
+-
+-      /* apply flip */
+-      if (ctx->rot_mode & IPU_ROT_BIT_HFLIP)
+-              dst_col = -dst_col;
+-      if (ctx->rot_mode & IPU_ROT_BIT_VFLIP)
+-              dst_row = -dst_row;
+-
+-      dev_dbg(priv->ipu->dev, "task %u: ctx %p: [%d,%d] --> [%d,%d]\n",
+-              chan->ic_task, ctx, src_col, src_row, dst_col, dst_row);
+-
+-      /*
+-       * finally translate dest row,col using an origin in upper
+-       * left of d_image
+-       */
+-      dst_row += d_image->num_rows - 1;
+-      dst_col += d_image->num_cols - 1;
+-      dst_row /= 2;
+-      dst_col /= 2;
+-
+-      return dst_row * d_image->num_cols + dst_col;
+-}
+-
+-/*
+- * Fill the out_tile_map[] with transformed destination tile indeces.
+- */
+-static void calc_out_tile_map(struct ipu_image_convert_ctx *ctx)
+-{
+-      struct ipu_image_convert_image *s_image = &ctx->in;
+-      unsigned int row, col, tile = 0;
+-
+-      for (row = 0; row < s_image->num_rows; row++) {
+-              for (col = 0; col < s_image->num_cols; col++) {
+-                      ctx->out_tile_map[tile] =
+-                              transform_tile_index(ctx, row, col);
+-                      tile++;
+-              }
+-      }
+-}
+-
+-static int calc_tile_offsets_planar(struct ipu_image_convert_ctx *ctx,
+-                                  struct ipu_image_convert_image *image)
+-{
+-      struct ipu_image_convert_chan *chan = ctx->chan;
+-      struct ipu_image_convert_priv *priv = chan->priv;
+-      const struct ipu_image_pixfmt *fmt = image->fmt;
+-      unsigned int row, col, tile = 0;
+-      u32 H, top, y_stride, uv_stride;
+-      u32 uv_row_off, uv_col_off, uv_off, u_off, v_off, tmp;
+-      u32 y_row_off, y_col_off, y_off;
+-      u32 y_size, uv_size;
+-
+-      /* setup some convenience vars */
+-      H = image->base.pix.height;
+-
+-      y_stride = image->stride;
+-      uv_stride = y_stride / fmt->uv_width_dec;
+-      if (fmt->uv_packed)
+-              uv_stride *= 2;
+-
+-      y_size = H * y_stride;
+-      uv_size = y_size / (fmt->uv_width_dec * fmt->uv_height_dec);
+-
+-      for (row = 0; row < image->num_rows; row++) {
+-              top = image->tile[tile].top;
+-              y_row_off = top * y_stride;
+-              uv_row_off = (top * uv_stride) / fmt->uv_height_dec;
+-
+-              for (col = 0; col < image->num_cols; col++) {
+-                      y_col_off = image->tile[tile].left;
+-                      uv_col_off = y_col_off / fmt->uv_width_dec;
+-                      if (fmt->uv_packed)
+-                              uv_col_off *= 2;
+-
+-                      y_off = y_row_off + y_col_off;
+-                      uv_off = uv_row_off + uv_col_off;
+-
+-                      u_off = y_size - y_off + uv_off;
+-                      v_off = (fmt->uv_packed) ? 0 : u_off + uv_size;
+-                      if (fmt->uv_swapped) {
+-                              tmp = u_off;
+-                              u_off = v_off;
+-                              v_off = tmp;
+-                      }
+-
+-                      image->tile[tile].offset = y_off;
+-                      image->tile[tile].u_off = u_off;
+-                      image->tile[tile++].v_off = v_off;
+-
+-                      if ((y_off & 0x7) || (u_off & 0x7) || (v_off & 0x7)) {
+-                              dev_err(priv->ipu->dev,
+-                                      "task %u: ctx %p: %s@[%d,%d]: "
+-                                      "y_off %08x, u_off %08x, v_off %08x\n",
+-                                      chan->ic_task, ctx,
+-                                      image->type == IMAGE_CONVERT_IN ?
+-                                      "Input" : "Output", row, col,
+-                                      y_off, u_off, v_off);
+-                              return -EINVAL;
+-                      }
+-              }
+-      }
+-
+-      return 0;
+-}
+-
+-static int calc_tile_offsets_packed(struct ipu_image_convert_ctx *ctx,
+-                                  struct ipu_image_convert_image *image)
+-{
+-      struct ipu_image_convert_chan *chan = ctx->chan;
+-      struct ipu_image_convert_priv *priv = chan->priv;
+-      const struct ipu_image_pixfmt *fmt = image->fmt;
+-      unsigned int row, col, tile = 0;
+-      u32 bpp, stride, offset;
+-      u32 row_off, col_off;
+-
+-      /* setup some convenience vars */
+-      stride = image->stride;
+-      bpp = fmt->bpp;
+-
+-      for (row = 0; row < image->num_rows; row++) {
+-              row_off = image->tile[tile].top * stride;
+-
+-              for (col = 0; col < image->num_cols; col++) {
+-                      col_off = (image->tile[tile].left * bpp) >> 3;
+-
+-                      offset = row_off + col_off;
+-
+-                      image->tile[tile].offset = offset;
+-                      image->tile[tile].u_off = 0;
+-                      image->tile[tile++].v_off = 0;
+-
+-                      if (offset & 0x7) {
+-                              dev_err(priv->ipu->dev,
+-                                      "task %u: ctx %p: %s@[%d,%d]: "
+-                                      "phys %08x\n",
+-                                      chan->ic_task, ctx,
+-                                      image->type == IMAGE_CONVERT_IN ?
+-                                      "Input" : "Output", row, col,
+-                                      row_off + col_off);
+-                              return -EINVAL;
+-                      }
+-              }
+-      }
+-
+-      return 0;
+-}
+-
+-static int calc_tile_offsets(struct ipu_image_convert_ctx *ctx,
+-                            struct ipu_image_convert_image *image)
+-{
+-      if (image->fmt->planar)
+-              return calc_tile_offsets_planar(ctx, image);
+-
+-      return calc_tile_offsets_packed(ctx, image);
+-}
+-
+-/*
+- * Calculate the resizing ratio for the IC main processing section given input
+- * size, fixed downsizing coefficient, and output size.
+- * Either round to closest for the next tile's first pixel to minimize seams
+- * and distortion (for all but right column / bottom row), or round down to
+- * avoid sampling beyond the edges of the input image for this tile's last
+- * pixel.
+- * Returns the resizing coefficient, resizing ratio is 8192.0 / resize_coeff.
+- */
+-static u32 calc_resize_coeff(u32 input_size, u32 downsize_coeff,
+-                           u32 output_size, bool allow_overshoot)
+-{
+-      u32 downsized = input_size >> downsize_coeff;
+-
+-      if (allow_overshoot)
+-              return DIV_ROUND_CLOSEST(8192 * downsized, output_size);
+-      else
+-              return 8192 * (downsized - 1) / (output_size - 1);
+-}
+-
+-/*
+- * Slightly modify resize coefficients per tile to hide the bilinear
+- * interpolator reset at tile borders, shifting the right / bottom edge
+- * by up to a half input pixel. This removes noticeable seams between
+- * tiles at higher upscaling factors.
+- */
+-static void calc_tile_resize_coefficients(struct ipu_image_convert_ctx *ctx)
+-{
+-      struct ipu_image_convert_chan *chan = ctx->chan;
+-      struct ipu_image_convert_priv *priv = chan->priv;
+-      struct ipu_image_tile *in_tile, *out_tile;
+-      unsigned int col, row, tile_idx;
+-      unsigned int last_output;
+-
+-      for (col = 0; col < ctx->in.num_cols; col++) {
+-              bool closest = (col < ctx->in.num_cols - 1) &&
+-                             !(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
+-              u32 resized_width;
+-              u32 resize_coeff_h;
+-              u32 in_width;
+-
+-              tile_idx = col;
+-              in_tile = &ctx->in.tile[tile_idx];
+-              out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
+-
+-              if (ipu_rot_mode_is_irt(ctx->rot_mode))
+-                      resized_width = out_tile->height;
+-              else
+-                      resized_width = out_tile->width;
+-
+-              resize_coeff_h = calc_resize_coeff(in_tile->width,
+-                                                 ctx->downsize_coeff_h,
+-                                                 resized_width, closest);
+-
+-              dev_dbg(priv->ipu->dev, "%s: column %u hscale: *8192/%u\n",
+-                      __func__, col, resize_coeff_h);
+-
+-              /*
+-               * With the horizontal scaling factor known, round up resized
+-               * width (output width or height) to burst size.
+-               */
+-              resized_width = round_up(resized_width, 8);
+-
+-              /*
+-               * Calculate input width from the last accessed input pixel
+-               * given resized width and scaling coefficients. Round up to
+-               * burst size.
+-               */
+-              last_output = resized_width - 1;
+-              if (closest && ((last_output * resize_coeff_h) % 8192))
+-                      last_output++;
+-              in_width = round_up(
+-                      (DIV_ROUND_UP(last_output * resize_coeff_h, 8192) + 1)
+-                      << ctx->downsize_coeff_h, 8);
+-
+-              for (row = 0; row < ctx->in.num_rows; row++) {
+-                      tile_idx = row * ctx->in.num_cols + col;
+-                      in_tile = &ctx->in.tile[tile_idx];
+-                      out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
+-
+-                      if (ipu_rot_mode_is_irt(ctx->rot_mode))
+-                              out_tile->height = resized_width;
+-                      else
+-                              out_tile->width = resized_width;
+-
+-                      in_tile->width = in_width;
+-              }
+-
+-              ctx->resize_coeffs_h[col] = resize_coeff_h;
+-      }
+-
+-      for (row = 0; row < ctx->in.num_rows; row++) {
+-              bool closest = (row < ctx->in.num_rows - 1) &&
+-                             !(ctx->rot_mode & IPU_ROT_BIT_VFLIP);
+-              u32 resized_height;
+-              u32 resize_coeff_v;
+-              u32 in_height;
+-
+-              tile_idx = row * ctx->in.num_cols;
+-              in_tile = &ctx->in.tile[tile_idx];
+-              out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
+-
+-              if (ipu_rot_mode_is_irt(ctx->rot_mode))
+-                      resized_height = out_tile->width;
+-              else
+-                      resized_height = out_tile->height;
+-
+-              resize_coeff_v = calc_resize_coeff(in_tile->height,
+-                                                 ctx->downsize_coeff_v,
+-                                                 resized_height, closest);
+-
+-              dev_dbg(priv->ipu->dev, "%s: row %u vscale: *8192/%u\n",
+-                      __func__, row, resize_coeff_v);
+-
+-              /*
+-               * With the vertical scaling factor known, round up resized
+-               * height (output width or height) to IDMAC limitations.
+-               */
+-              resized_height = round_up(resized_height, 2);
+-
+-              /*
+-               * Calculate input width from the last accessed input pixel
+-               * given resized height and scaling coefficients. Align to
+-               * IDMAC restrictions.
+-               */
+-              last_output = resized_height - 1;
+-              if (closest && ((last_output * resize_coeff_v) % 8192))
+-                      last_output++;
+-              in_height = round_up(
+-                      (DIV_ROUND_UP(last_output * resize_coeff_v, 8192) + 1)
+-                      << ctx->downsize_coeff_v, 2);
+-
+-              for (col = 0; col < ctx->in.num_cols; col++) {
+-                      tile_idx = row * ctx->in.num_cols + col;
+-                      in_tile = &ctx->in.tile[tile_idx];
+-                      out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
+-
+-                      if (ipu_rot_mode_is_irt(ctx->rot_mode))
+-                              out_tile->width = resized_height;
+-                      else
+-                              out_tile->height = resized_height;
+-
+-                      in_tile->height = in_height;
+-              }
+-
+-              ctx->resize_coeffs_v[row] = resize_coeff_v;
+-      }
+-}
+-
+-/*
+- * return the number of runs in given queue (pending_q or done_q)
+- * for this context. hold irqlock when calling.
+- */
+-static int get_run_count(struct ipu_image_convert_ctx *ctx,
+-                       struct list_head *q)
+-{
+-      struct ipu_image_convert_run *run;
+-      int count = 0;
+-
+-      lockdep_assert_held(&ctx->chan->irqlock);
+-
+-      list_for_each_entry(run, q, list) {
+-              if (run->ctx == ctx)
+-                      count++;
+-      }
+-
+-      return count;
+-}
+-
+-static void convert_stop(struct ipu_image_convert_run *run)
+-{
+-      struct ipu_image_convert_ctx *ctx = run->ctx;
+-      struct ipu_image_convert_chan *chan = ctx->chan;
+-      struct ipu_image_convert_priv *priv = chan->priv;
+-
+-      dev_dbg(priv->ipu->dev, "%s: task %u: stopping ctx %p run %p\n",
+-              __func__, chan->ic_task, ctx, run);
+-
+-      /* disable IC tasks and the channels */
+-      ipu_ic_task_disable(chan->ic);
+-      ipu_idmac_disable_channel(chan->in_chan);
+-      ipu_idmac_disable_channel(chan->out_chan);
+-
+-      if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+-              ipu_idmac_disable_channel(chan->rotation_in_chan);
+-              ipu_idmac_disable_channel(chan->rotation_out_chan);
+-              ipu_idmac_unlink(chan->out_chan, chan->rotation_in_chan);
+-      }
+-
+-      ipu_ic_disable(chan->ic);
+-}
+-
+-static void init_idmac_channel(struct ipu_image_convert_ctx *ctx,
+-                             struct ipuv3_channel *channel,
+-                             struct ipu_image_convert_image *image,
+-                             enum ipu_rotate_mode rot_mode,
+-                             bool rot_swap_width_height,
+-                             unsigned int tile)
+-{
+-      struct ipu_image_convert_chan *chan = ctx->chan;
+-      unsigned int burst_size;
+-      u32 width, height, stride;
+-      dma_addr_t addr0, addr1 = 0;
+-      struct ipu_image tile_image;
+-      unsigned int tile_idx[2];
+-
+-      if (image->type == IMAGE_CONVERT_OUT) {
+-              tile_idx[0] = ctx->out_tile_map[tile];
+-              tile_idx[1] = ctx->out_tile_map[1];
+-      } else {
+-              tile_idx[0] = tile;
+-              tile_idx[1] = 1;
+-      }
+-
+-      if (rot_swap_width_height) {
+-              width = image->tile[tile_idx[0]].height;
+-              height = image->tile[tile_idx[0]].width;
+-              stride = image->tile[tile_idx[0]].rot_stride;
+-              addr0 = ctx->rot_intermediate[0].phys;
+-              if (ctx->double_buffering)
+-                      addr1 = ctx->rot_intermediate[1].phys;
+-      } else {
+-              width = image->tile[tile_idx[0]].width;
+-              height = image->tile[tile_idx[0]].height;
+-              stride = image->stride;
+-              addr0 = image->base.phys0 +
+-                      image->tile[tile_idx[0]].offset;
+-              if (ctx->double_buffering)
+-                      addr1 = image->base.phys0 +
+-                              image->tile[tile_idx[1]].offset;
+-      }
+-
+-      ipu_cpmem_zero(channel);
+-
+-      memset(&tile_image, 0, sizeof(tile_image));
+-      tile_image.pix.width = tile_image.rect.width = width;
+-      tile_image.pix.height = tile_image.rect.height = height;
+-      tile_image.pix.bytesperline = stride;
+-      tile_image.pix.pixelformat =  image->fmt->fourcc;
+-      tile_image.phys0 = addr0;
+-      tile_image.phys1 = addr1;
+-      if (image->fmt->planar && !rot_swap_width_height) {
+-              tile_image.u_offset = image->tile[tile_idx[0]].u_off;
+-              tile_image.v_offset = image->tile[tile_idx[0]].v_off;
+-      }
+-
+-      ipu_cpmem_set_image(channel, &tile_image);
+-
+-      if (rot_mode)
+-              ipu_cpmem_set_rotation(channel, rot_mode);
+-
+-      /*
+-       * Skip writing U and V components to odd rows in the output
+-       * channels for planar 4:2:0.
+-       */
+-      if ((channel == chan->out_chan ||
+-           channel == chan->rotation_out_chan) &&
+-          image->fmt->planar && image->fmt->uv_height_dec == 2)
+-              ipu_cpmem_skip_odd_chroma_rows(channel);
+-
+-      if (channel == chan->rotation_in_chan ||
+-          channel == chan->rotation_out_chan) {
+-              burst_size = 8;
+-              ipu_cpmem_set_block_mode(channel);
+-      } else
+-              burst_size = (width % 16) ? 8 : 16;
+-
+-      ipu_cpmem_set_burstsize(channel, burst_size);
+-
+-      ipu_ic_task_idma_init(chan->ic, channel, width, height,
+-                            burst_size, rot_mode);
+-
+-      /*
+-       * Setting a non-zero AXI ID collides with the PRG AXI snooping, so
+-       * only do this when there is no PRG present.
+-       */
+-      if (!channel->ipu->prg_priv)
+-              ipu_cpmem_set_axi_id(channel, 1);
+-
+-      ipu_idmac_set_double_buffer(channel, ctx->double_buffering);
+-}
+-
+-static int convert_start(struct ipu_image_convert_run *run, unsigned int tile)
+-{
+-      struct ipu_image_convert_ctx *ctx = run->ctx;
+-      struct ipu_image_convert_chan *chan = ctx->chan;
+-      struct ipu_image_convert_priv *priv = chan->priv;
+-      struct ipu_image_convert_image *s_image = &ctx->in;
+-      struct ipu_image_convert_image *d_image = &ctx->out;
+-      unsigned int dst_tile = ctx->out_tile_map[tile];
+-      unsigned int dest_width, dest_height;
+-      unsigned int col, row;
+-      u32 rsc;
+-      int ret;
+-
+-      dev_dbg(priv->ipu->dev, "%s: task %u: starting ctx %p run %p tile %u -> %u\n",
+-              __func__, chan->ic_task, ctx, run, tile, dst_tile);
+-
+-      if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+-              /* swap width/height for resizer */
+-              dest_width = d_image->tile[dst_tile].height;
+-              dest_height = d_image->tile[dst_tile].width;
+-      } else {
+-              dest_width = d_image->tile[dst_tile].width;
+-              dest_height = d_image->tile[dst_tile].height;
+-      }
+-
+-      row = tile / s_image->num_cols;
+-      col = tile % s_image->num_cols;
+-
+-      rsc =  (ctx->downsize_coeff_v << 30) |
+-             (ctx->resize_coeffs_v[row] << 16) |
+-             (ctx->downsize_coeff_h << 14) |
+-             (ctx->resize_coeffs_h[col]);
+-
+-      dev_dbg(priv->ipu->dev, "%s: %ux%u -> %ux%u (rsc = 0x%x)\n",
+-              __func__, s_image->tile[tile].width,
+-              s_image->tile[tile].height, dest_width, dest_height, rsc);
+-
+-      /* setup the IC resizer and CSC */
+-      ret = ipu_ic_task_init_rsc(chan->ic, &ctx->csc,
+-                                 s_image->tile[tile].width,
+-                                 s_image->tile[tile].height,
+-                                 dest_width,
+-                                 dest_height,
+-                                 rsc);
+-      if (ret) {
+-              dev_err(priv->ipu->dev, "ipu_ic_task_init failed, %d\n", ret);
+-              return ret;
+-      }
+-
+-      /* init the source MEM-->IC PP IDMAC channel */
+-      init_idmac_channel(ctx, chan->in_chan, s_image,
+-                         IPU_ROTATE_NONE, false, tile);
+-
+-      if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+-              /* init the IC PP-->MEM IDMAC channel */
+-              init_idmac_channel(ctx, chan->out_chan, d_image,
+-                                 IPU_ROTATE_NONE, true, tile);
+-
+-              /* init the MEM-->IC PP ROT IDMAC channel */
+-              init_idmac_channel(ctx, chan->rotation_in_chan, d_image,
+-                                 ctx->rot_mode, true, tile);
+-
+-              /* init the destination IC PP ROT-->MEM IDMAC channel */
+-              init_idmac_channel(ctx, chan->rotation_out_chan, d_image,
+-                                 IPU_ROTATE_NONE, false, tile);
+-
+-              /* now link IC PP-->MEM to MEM-->IC PP ROT */
+-              ipu_idmac_link(chan->out_chan, chan->rotation_in_chan);
+-      } else {
+-              /* init the destination IC PP-->MEM IDMAC channel */
+-              init_idmac_channel(ctx, chan->out_chan, d_image,
+-                                 ctx->rot_mode, false, tile);
+-      }
+-
+-      /* enable the IC */
+-      ipu_ic_enable(chan->ic);
+-
+-      /* set buffers ready */
+-      ipu_idmac_select_buffer(chan->in_chan, 0);
+-      ipu_idmac_select_buffer(chan->out_chan, 0);
+-      if (ipu_rot_mode_is_irt(ctx->rot_mode))
+-              ipu_idmac_select_buffer(chan->rotation_out_chan, 0);
+-      if (ctx->double_buffering) {
+-              ipu_idmac_select_buffer(chan->in_chan, 1);
+-              ipu_idmac_select_buffer(chan->out_chan, 1);
+-              if (ipu_rot_mode_is_irt(ctx->rot_mode))
+-                      ipu_idmac_select_buffer(chan->rotation_out_chan, 1);
+-      }
+-
+-      /* enable the channels! */
+-      ipu_idmac_enable_channel(chan->in_chan);
+-      ipu_idmac_enable_channel(chan->out_chan);
+-      if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+-              ipu_idmac_enable_channel(chan->rotation_in_chan);
+-              ipu_idmac_enable_channel(chan->rotation_out_chan);
+-      }
+-
+-      ipu_ic_task_enable(chan->ic);
+-
+-      ipu_cpmem_dump(chan->in_chan);
+-      ipu_cpmem_dump(chan->out_chan);
+-      if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+-              ipu_cpmem_dump(chan->rotation_in_chan);
+-              ipu_cpmem_dump(chan->rotation_out_chan);
+-      }
+-
+-      ipu_dump(priv->ipu);
+-
+-      return 0;
+-}
+-
+-/* hold irqlock when calling */
+-static int do_run(struct ipu_image_convert_run *run)
+-{
+-      struct ipu_image_convert_ctx *ctx = run->ctx;
+-      struct ipu_image_convert_chan *chan = ctx->chan;
+-
+-      lockdep_assert_held(&chan->irqlock);
+-
+-      ctx->in.base.phys0 = run->in_phys;
+-      ctx->out.base.phys0 = run->out_phys;
+-
+-      ctx->cur_buf_num = 0;
+-      ctx->next_tile = 1;
+-
+-      /* remove run from pending_q and set as current */
+-      list_del(&run->list);
+-      chan->current_run = run;
+-
+-      return convert_start(run, 0);
+-}
+-
+-/* hold irqlock when calling */
+-static void run_next(struct ipu_image_convert_chan *chan)
+-{
+-      struct ipu_image_convert_priv *priv = chan->priv;
+-      struct ipu_image_convert_run *run, *tmp;
+-      int ret;
+-
+-      lockdep_assert_held(&chan->irqlock);
+-
+-      list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
+-              /* skip contexts that are aborting */
+-              if (run->ctx->aborting) {
+-                      dev_dbg(priv->ipu->dev,
+-                              "%s: task %u: skipping aborting ctx %p run %p\n",
+-                              __func__, chan->ic_task, run->ctx, run);
+-                      continue;
+-              }
+-
+-              ret = do_run(run);
+-              if (!ret)
+-                      break;
+-
+-              /*
+-               * something went wrong with start, add the run
+-               * to done q and continue to the next run in the
+-               * pending q.
+-               */
+-              run->status = ret;
+-              list_add_tail(&run->list, &chan->done_q);
+-              chan->current_run = NULL;
+-      }
+-}
+-
+-static void empty_done_q(struct ipu_image_convert_chan *chan)
+-{
+-      struct ipu_image_convert_priv *priv = chan->priv;
+-      struct ipu_image_convert_run *run;
+-      unsigned long flags;
+-
+-      spin_lock_irqsave(&chan->irqlock, flags);
+-
+-      while (!list_empty(&chan->done_q)) {
+-              run = list_entry(chan->done_q.next,
+-                               struct ipu_image_convert_run,
+-                               list);
+-
+-              list_del(&run->list);
+-
+-              dev_dbg(priv->ipu->dev,
+-                      "%s: task %u: completing ctx %p run %p with %d\n",
+-                      __func__, chan->ic_task, run->ctx, run, run->status);
+-
+-              /* call the completion callback and free the run */
+-              spin_unlock_irqrestore(&chan->irqlock, flags);
+-              run->ctx->complete(run, run->ctx->complete_context);
+-              spin_lock_irqsave(&chan->irqlock, flags);
+-      }
+-
+-      spin_unlock_irqrestore(&chan->irqlock, flags);
+-}
+-
+-/*
+- * the bottom half thread clears out the done_q, calling the
+- * completion handler for each.
+- */
+-static irqreturn_t do_bh(int irq, void *dev_id)
+-{
+-      struct ipu_image_convert_chan *chan = dev_id;
+-      struct ipu_image_convert_priv *priv = chan->priv;
+-      struct ipu_image_convert_ctx *ctx;
+-      unsigned long flags;
+-
+-      dev_dbg(priv->ipu->dev, "%s: task %u: enter\n", __func__,
+-              chan->ic_task);
+-
+-      empty_done_q(chan);
+-
+-      spin_lock_irqsave(&chan->irqlock, flags);
+-
+-      /*
+-       * the done_q is cleared out, signal any contexts
+-       * that are aborting that abort can complete.
+-       */
+-      list_for_each_entry(ctx, &chan->ctx_list, list) {
+-              if (ctx->aborting) {
+-                      dev_dbg(priv->ipu->dev,
+-                              "%s: task %u: signaling abort for ctx %p\n",
+-                              __func__, chan->ic_task, ctx);
+-                      complete_all(&ctx->aborted);
+-              }
+-      }
+-
+-      spin_unlock_irqrestore(&chan->irqlock, flags);
+-
+-      dev_dbg(priv->ipu->dev, "%s: task %u: exit\n", __func__,
+-              chan->ic_task);
+-
+-      return IRQ_HANDLED;
+-}
+-
+-static bool ic_settings_changed(struct ipu_image_convert_ctx *ctx)
+-{
+-      unsigned int cur_tile = ctx->next_tile - 1;
+-      unsigned int next_tile = ctx->next_tile;
+-
+-      if (ctx->resize_coeffs_h[cur_tile % ctx->in.num_cols] !=
+-          ctx->resize_coeffs_h[next_tile % ctx->in.num_cols] ||
+-          ctx->resize_coeffs_v[cur_tile / ctx->in.num_cols] !=
+-          ctx->resize_coeffs_v[next_tile / ctx->in.num_cols] ||
+-          ctx->in.tile[cur_tile].width != ctx->in.tile[next_tile].width ||
+-          ctx->in.tile[cur_tile].height != ctx->in.tile[next_tile].height ||
+-          ctx->out.tile[cur_tile].width != ctx->out.tile[next_tile].width ||
+-          ctx->out.tile[cur_tile].height != ctx->out.tile[next_tile].height)
+-              return true;
+-
+-      return false;
+-}
+-
+-/* hold irqlock when calling */
+-static irqreturn_t do_irq(struct ipu_image_convert_run *run)
+-{
+-      struct ipu_image_convert_ctx *ctx = run->ctx;
+-      struct ipu_image_convert_chan *chan = ctx->chan;
+-      struct ipu_image_tile *src_tile, *dst_tile;
+-      struct ipu_image_convert_image *s_image = &ctx->in;
+-      struct ipu_image_convert_image *d_image = &ctx->out;
+-      struct ipuv3_channel *outch;
+-      unsigned int dst_idx;
+-
+-      lockdep_assert_held(&chan->irqlock);
+-
+-      outch = ipu_rot_mode_is_irt(ctx->rot_mode) ?
+-              chan->rotation_out_chan : chan->out_chan;
+-
+-      /*
+-       * It is difficult to stop the channel DMA before the channels
+-       * enter the paused state. Without double-buffering the channels
+-       * are always in a paused state when the EOF irq occurs, so it
+-       * is safe to stop the channels now. For double-buffering we
+-       * just ignore the abort until the operation completes, when it
+-       * is safe to shut down.
+-       */
+-      if (ctx->aborting && !ctx->double_buffering) {
+-              convert_stop(run);
+-              run->status = -EIO;
+-              goto done;
+-      }
+-
+-      if (ctx->next_tile == ctx->num_tiles) {
+-              /*
+-               * the conversion is complete
+-               */
+-              convert_stop(run);
+-              run->status = 0;
+-              goto done;
+-      }
+-
+-      /*
+-       * not done, place the next tile buffers.
+-       */
+-      if (!ctx->double_buffering) {
+-              if (ic_settings_changed(ctx)) {
+-                      convert_stop(run);
+-                      convert_start(run, ctx->next_tile);
+-              } else {
+-                      src_tile = &s_image->tile[ctx->next_tile];
+-                      dst_idx = ctx->out_tile_map[ctx->next_tile];
+-                      dst_tile = &d_image->tile[dst_idx];
+-
+-                      ipu_cpmem_set_buffer(chan->in_chan, 0,
+-                                           s_image->base.phys0 +
+-                                           src_tile->offset);
+-                      ipu_cpmem_set_buffer(outch, 0,
+-                                           d_image->base.phys0 +
+-                                           dst_tile->offset);
+-                      if (s_image->fmt->planar)
+-                              ipu_cpmem_set_uv_offset(chan->in_chan,
+-                                                      src_tile->u_off,
+-                                                      src_tile->v_off);
+-                      if (d_image->fmt->planar)
+-                              ipu_cpmem_set_uv_offset(outch,
+-                                                      dst_tile->u_off,
+-                                                      dst_tile->v_off);
+-
+-                      ipu_idmac_select_buffer(chan->in_chan, 0);
+-                      ipu_idmac_select_buffer(outch, 0);
+-              }
+-      } else if (ctx->next_tile < ctx->num_tiles - 1) {
+-
+-              src_tile = &s_image->tile[ctx->next_tile + 1];
+-              dst_idx = ctx->out_tile_map[ctx->next_tile + 1];
+-              dst_tile = &d_image->tile[dst_idx];
+-
+-              ipu_cpmem_set_buffer(chan->in_chan, ctx->cur_buf_num,
+-                                   s_image->base.phys0 + src_tile->offset);
+-              ipu_cpmem_set_buffer(outch, ctx->cur_buf_num,
+-                                   d_image->base.phys0 + dst_tile->offset);
+-
+-              ipu_idmac_select_buffer(chan->in_chan, ctx->cur_buf_num);
+-              ipu_idmac_select_buffer(outch, ctx->cur_buf_num);
+-
+-              ctx->cur_buf_num ^= 1;
+-      }
+-
+-      ctx->next_tile++;
+-      return IRQ_HANDLED;
+-done:
+-      list_add_tail(&run->list, &chan->done_q);
+-      chan->current_run = NULL;
+-      run_next(chan);
+-      return IRQ_WAKE_THREAD;
+-}
+-
+-static irqreturn_t norotate_irq(int irq, void *data)
+-{
+-      struct ipu_image_convert_chan *chan = data;
+-      struct ipu_image_convert_ctx *ctx;
+-      struct ipu_image_convert_run *run;
+-      unsigned long flags;
+-      irqreturn_t ret;
+-
+-      spin_lock_irqsave(&chan->irqlock, flags);
+-
+-      /* get current run and its context */
+-      run = chan->current_run;
+-      if (!run) {
+-              ret = IRQ_NONE;
+-              goto out;
+-      }
+-
+-      ctx = run->ctx;
+-
+-      if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+-              /* this is a rotation operation, just ignore */
+-              spin_unlock_irqrestore(&chan->irqlock, flags);
+-              return IRQ_HANDLED;
+-      }
+-
+-      ret = do_irq(run);
+-out:
+-      spin_unlock_irqrestore(&chan->irqlock, flags);
+-      return ret;
+-}
+-
+-static irqreturn_t rotate_irq(int irq, void *data)
+-{
+-      struct ipu_image_convert_chan *chan = data;
+-      struct ipu_image_convert_priv *priv = chan->priv;
+-      struct ipu_image_convert_ctx *ctx;
+-      struct ipu_image_convert_run *run;
+-      unsigned long flags;
+-      irqreturn_t ret;
+-
+-      spin_lock_irqsave(&chan->irqlock, flags);
+-
+-      /* get current run and its context */
+-      run = chan->current_run;
+-      if (!run) {
+-              ret = IRQ_NONE;
+-              goto out;
+-      }
+-
+-      ctx = run->ctx;
+-
+-      if (!ipu_rot_mode_is_irt(ctx->rot_mode)) {
+-              /* this was NOT a rotation operation, shouldn't happen */
+-              dev_err(priv->ipu->dev, "Unexpected rotation interrupt\n");
+-              spin_unlock_irqrestore(&chan->irqlock, flags);
+-              return IRQ_HANDLED;
+-      }
+-
+-      ret = do_irq(run);
+-out:
+-      spin_unlock_irqrestore(&chan->irqlock, flags);
+-      return ret;
+-}
+-
+-/*
+- * try to force the completion of runs for this ctx. Called when
+- * abort wait times out in ipu_image_convert_abort().
+- */
+-static void force_abort(struct ipu_image_convert_ctx *ctx)
+-{
+-      struct ipu_image_convert_chan *chan = ctx->chan;
+-      struct ipu_image_convert_run *run;
+-      unsigned long flags;
+-
+-      spin_lock_irqsave(&chan->irqlock, flags);
+-
+-      run = chan->current_run;
+-      if (run && run->ctx == ctx) {
+-              convert_stop(run);
+-              run->status = -EIO;
+-              list_add_tail(&run->list, &chan->done_q);
+-              chan->current_run = NULL;
+-              run_next(chan);
+-      }
+-
+-      spin_unlock_irqrestore(&chan->irqlock, flags);
+-
+-      empty_done_q(chan);
+-}
+-
+-static void release_ipu_resources(struct ipu_image_convert_chan *chan)
+-{
+-      if (chan->out_eof_irq >= 0)
+-              free_irq(chan->out_eof_irq, chan);
+-      if (chan->rot_out_eof_irq >= 0)
+-              free_irq(chan->rot_out_eof_irq, chan);
+-
+-      if (!IS_ERR_OR_NULL(chan->in_chan))
+-              ipu_idmac_put(chan->in_chan);
+-      if (!IS_ERR_OR_NULL(chan->out_chan))
+-              ipu_idmac_put(chan->out_chan);
+-      if (!IS_ERR_OR_NULL(chan->rotation_in_chan))
+-              ipu_idmac_put(chan->rotation_in_chan);
+-      if (!IS_ERR_OR_NULL(chan->rotation_out_chan))
+-              ipu_idmac_put(chan->rotation_out_chan);
+-      if (!IS_ERR_OR_NULL(chan->ic))
+-              ipu_ic_put(chan->ic);
+-
+-      chan->in_chan = chan->out_chan = chan->rotation_in_chan =
+-              chan->rotation_out_chan = NULL;
+-      chan->out_eof_irq = chan->rot_out_eof_irq = -1;
+-}
+-
+-static int get_ipu_resources(struct ipu_image_convert_chan *chan)
+-{
+-      const struct ipu_image_convert_dma_chan *dma = chan->dma_ch;
+-      struct ipu_image_convert_priv *priv = chan->priv;
+-      int ret;
+-
+-      /* get IC */
+-      chan->ic = ipu_ic_get(priv->ipu, chan->ic_task);
+-      if (IS_ERR(chan->ic)) {
+-              dev_err(priv->ipu->dev, "could not acquire IC\n");
+-              ret = PTR_ERR(chan->ic);
+-              goto err;
+-      }
+-
+-      /* get IDMAC channels */
+-      chan->in_chan = ipu_idmac_get(priv->ipu, dma->in);
+-      chan->out_chan = ipu_idmac_get(priv->ipu, dma->out);
+-      if (IS_ERR(chan->in_chan) || IS_ERR(chan->out_chan)) {
+-              dev_err(priv->ipu->dev, "could not acquire idmac channels\n");
+-              ret = -EBUSY;
+-              goto err;
+-      }
+-
+-      chan->rotation_in_chan = ipu_idmac_get(priv->ipu, dma->rot_in);
+-      chan->rotation_out_chan = ipu_idmac_get(priv->ipu, dma->rot_out);
+-      if (IS_ERR(chan->rotation_in_chan) || IS_ERR(chan->rotation_out_chan)) {
+-              dev_err(priv->ipu->dev,
+-                      "could not acquire idmac rotation channels\n");
+-              ret = -EBUSY;
+-              goto err;
+-      }
+-
+-      /* acquire the EOF interrupts */
+-      chan->out_eof_irq = ipu_idmac_channel_irq(priv->ipu,
+-                                                chan->out_chan,
+-                                                IPU_IRQ_EOF);
+-
+-      ret = request_threaded_irq(chan->out_eof_irq, norotate_irq, do_bh,
+-                                 0, "ipu-ic", chan);
+-      if (ret < 0) {
+-              dev_err(priv->ipu->dev, "could not acquire irq %d\n",
+-                       chan->out_eof_irq);
+-              chan->out_eof_irq = -1;
+-              goto err;
+-      }
+-
+-      chan->rot_out_eof_irq = ipu_idmac_channel_irq(priv->ipu,
+-                                                   chan->rotation_out_chan,
+-                                                   IPU_IRQ_EOF);
+-
+-      ret = request_threaded_irq(chan->rot_out_eof_irq, rotate_irq, do_bh,
+-                                 0, "ipu-ic", chan);
+-      if (ret < 0) {
+-              dev_err(priv->ipu->dev, "could not acquire irq %d\n",
+-                      chan->rot_out_eof_irq);
+-              chan->rot_out_eof_irq = -1;
+-              goto err;
+-      }
+-
+-      return 0;
+-err:
+-      release_ipu_resources(chan);
+-      return ret;
+-}
+-
+-static int fill_image(struct ipu_image_convert_ctx *ctx,
+-                    struct ipu_image_convert_image *ic_image,
+-                    struct ipu_image *image,
+-                    enum ipu_image_convert_type type)
+-{
+-      struct ipu_image_convert_priv *priv = ctx->chan->priv;
+-
+-      ic_image->base = *image;
+-      ic_image->type = type;
+-
+-      ic_image->fmt = get_format(image->pix.pixelformat);
+-      if (!ic_image->fmt) {
+-              dev_err(priv->ipu->dev, "pixelformat not supported for %s\n",
+-                      type == IMAGE_CONVERT_OUT ? "Output" : "Input");
+-              return -EINVAL;
+-      }
+-
+-      if (ic_image->fmt->planar)
+-              ic_image->stride = ic_image->base.pix.width;
+-      else
+-              ic_image->stride  = ic_image->base.pix.bytesperline;
+-
+-      return 0;
+-}
+-
+-/* borrowed from drivers/media/v4l2-core/v4l2-common.c */
+-static unsigned int clamp_align(unsigned int x, unsigned int min,
+-                              unsigned int max, unsigned int align)
+-{
+-      /* Bits that must be zero to be aligned */
+-      unsigned int mask = ~((1 << align) - 1);
+-
+-      /* Clamp to aligned min and max */
+-      x = clamp(x, (min + ~mask) & mask, max & mask);
+-
+-      /* Round to nearest aligned value */
+-      if (align)
+-              x = (x + (1 << (align - 1))) & mask;
+-
+-      return x;
+-}
+-
+-/* Adjusts input/output images to IPU restrictions */
+-void ipu_image_convert_adjust(struct ipu_image *in, struct ipu_image *out,
+-                            enum ipu_rotate_mode rot_mode)
+-{
+-      const struct ipu_image_pixfmt *infmt, *outfmt;
+-      u32 w_align_out, h_align_out;
+-      u32 w_align_in, h_align_in;
+-
+-      infmt = get_format(in->pix.pixelformat);
+-      outfmt = get_format(out->pix.pixelformat);
+-
+-      /* set some default pixel formats if needed */
+-      if (!infmt) {
+-              in->pix.pixelformat = V4L2_PIX_FMT_RGB24;
+-              infmt = get_format(V4L2_PIX_FMT_RGB24);
+-      }
+-      if (!outfmt) {
+-              out->pix.pixelformat = V4L2_PIX_FMT_RGB24;
+-              outfmt = get_format(V4L2_PIX_FMT_RGB24);
+-      }
+-
+-      /* image converter does not handle fields */
+-      in->pix.field = out->pix.field = V4L2_FIELD_NONE;
+-
+-      /* resizer cannot downsize more than 4:1 */
+-      if (ipu_rot_mode_is_irt(rot_mode)) {
+-              out->pix.height = max_t(__u32, out->pix.height,
+-                                      in->pix.width / 4);
+-              out->pix.width = max_t(__u32, out->pix.width,
+-                                     in->pix.height / 4);
+-      } else {
+-              out->pix.width = max_t(__u32, out->pix.width,
+-                                     in->pix.width / 4);
+-              out->pix.height = max_t(__u32, out->pix.height,
+-                                      in->pix.height / 4);
+-      }
+-
+-      /* align input width/height */
+-      w_align_in = ilog2(tile_width_align(IMAGE_CONVERT_IN, infmt,
+-                                          rot_mode));
+-      h_align_in = ilog2(tile_height_align(IMAGE_CONVERT_IN, infmt,
+-                                           rot_mode));
+-      in->pix.width = clamp_align(in->pix.width, MIN_W, MAX_W,
+-                                  w_align_in);
+-      in->pix.height = clamp_align(in->pix.height, MIN_H, MAX_H,
+-                                   h_align_in);
+-
+-      /* align output width/height */
+-      w_align_out = ilog2(tile_width_align(IMAGE_CONVERT_OUT, outfmt,
+-                                           rot_mode));
+-      h_align_out = ilog2(tile_height_align(IMAGE_CONVERT_OUT, outfmt,
+-                                            rot_mode));
+-      out->pix.width = clamp_align(out->pix.width, MIN_W, MAX_W,
+-                                   w_align_out);
+-      out->pix.height = clamp_align(out->pix.height, MIN_H, MAX_H,
+-                                    h_align_out);
+-
+-      /* set input/output strides and image sizes */
+-      in->pix.bytesperline = infmt->planar ?
+-              clamp_align(in->pix.width, 2 << w_align_in, MAX_W,
+-                          w_align_in) :
+-              clamp_align((in->pix.width * infmt->bpp) >> 3,
+-                          ((2 << w_align_in) * infmt->bpp) >> 3,
+-                          (MAX_W * infmt->bpp) >> 3,
+-                          w_align_in);
+-      in->pix.sizeimage = infmt->planar ?
+-              (in->pix.height * in->pix.bytesperline * infmt->bpp) >> 3 :
+-              in->pix.height * in->pix.bytesperline;
+-      out->pix.bytesperline = outfmt->planar ? out->pix.width :
+-              (out->pix.width * outfmt->bpp) >> 3;
+-      out->pix.sizeimage = outfmt->planar ?
+-              (out->pix.height * out->pix.bytesperline * outfmt->bpp) >> 3 :
+-              out->pix.height * out->pix.bytesperline;
+-}
+-EXPORT_SYMBOL_GPL(ipu_image_convert_adjust);
+-
+-/*
+- * this is used by ipu_image_convert_prepare() to verify set input and
+- * output images are valid before starting the conversion. Clients can
+- * also call it before calling ipu_image_convert_prepare().
+- */
+-int ipu_image_convert_verify(struct ipu_image *in, struct ipu_image *out,
+-                           enum ipu_rotate_mode rot_mode)
+-{
+-      struct ipu_image testin, testout;
+-
+-      testin = *in;
+-      testout = *out;
+-
+-      ipu_image_convert_adjust(&testin, &testout, rot_mode);
+-
+-      if (testin.pix.width != in->pix.width ||
+-          testin.pix.height != in->pix.height ||
+-          testout.pix.width != out->pix.width ||
+-          testout.pix.height != out->pix.height)
+-              return -EINVAL;
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_image_convert_verify);
+-
+-/*
+- * Call ipu_image_convert_prepare() to prepare for the conversion of
+- * given images and rotation mode. Returns a new conversion context.
+- */
+-struct ipu_image_convert_ctx *
+-ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
+-                        struct ipu_image *in, struct ipu_image *out,
+-                        enum ipu_rotate_mode rot_mode,
+-                        ipu_image_convert_cb_t complete,
+-                        void *complete_context)
+-{
+-      struct ipu_image_convert_priv *priv = ipu->image_convert_priv;
+-      struct ipu_image_convert_image *s_image, *d_image;
+-      struct ipu_image_convert_chan *chan;
+-      struct ipu_image_convert_ctx *ctx;
+-      unsigned long flags;
+-      unsigned int i;
+-      bool get_res;
+-      int ret;
+-
+-      if (!in || !out || !complete ||
+-          (ic_task != IC_TASK_VIEWFINDER &&
+-           ic_task != IC_TASK_POST_PROCESSOR))
+-              return ERR_PTR(-EINVAL);
+-
+-      /* verify the in/out images before continuing */
+-      ret = ipu_image_convert_verify(in, out, rot_mode);
+-      if (ret) {
+-              dev_err(priv->ipu->dev, "%s: in/out formats invalid\n",
+-                      __func__);
+-              return ERR_PTR(ret);
+-      }
+-
+-      chan = &priv->chan[ic_task];
+-
+-      ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+-      if (!ctx)
+-              return ERR_PTR(-ENOMEM);
+-
+-      dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p\n", __func__,
+-              chan->ic_task, ctx);
+-
+-      ctx->chan = chan;
+-      init_completion(&ctx->aborted);
+-
+-      ctx->rot_mode = rot_mode;
+-
+-      /* Sets ctx->in.num_rows/cols as well */
+-      ret = calc_image_resize_coefficients(ctx, in, out);
+-      if (ret)
+-              goto out_free;
+-
+-      s_image = &ctx->in;
+-      d_image = &ctx->out;
+-
+-      /* set tiling and rotation */
+-      if (ipu_rot_mode_is_irt(rot_mode)) {
+-              d_image->num_rows = s_image->num_cols;
+-              d_image->num_cols = s_image->num_rows;
+-      } else {
+-              d_image->num_rows = s_image->num_rows;
+-              d_image->num_cols = s_image->num_cols;
+-      }
+-
+-      ctx->num_tiles = d_image->num_cols * d_image->num_rows;
+-
+-      ret = fill_image(ctx, s_image, in, IMAGE_CONVERT_IN);
+-      if (ret)
+-              goto out_free;
+-      ret = fill_image(ctx, d_image, out, IMAGE_CONVERT_OUT);
+-      if (ret)
+-              goto out_free;
+-
+-      calc_out_tile_map(ctx);
+-
+-      find_seams(ctx, s_image, d_image);
+-
+-      ret = calc_tile_dimensions(ctx, s_image);
+-      if (ret)
+-              goto out_free;
+-
+-      ret = calc_tile_offsets(ctx, s_image);
+-      if (ret)
+-              goto out_free;
+-
+-      calc_tile_dimensions(ctx, d_image);
+-      ret = calc_tile_offsets(ctx, d_image);
+-      if (ret)
+-              goto out_free;
+-
+-      calc_tile_resize_coefficients(ctx);
+-
+-      ret = ipu_ic_calc_csc(&ctx->csc,
+-                      s_image->base.pix.ycbcr_enc,
+-                      s_image->base.pix.quantization,
+-                      ipu_pixelformat_to_colorspace(s_image->fmt->fourcc),
+-                      d_image->base.pix.ycbcr_enc,
+-                      d_image->base.pix.quantization,
+-                      ipu_pixelformat_to_colorspace(d_image->fmt->fourcc));
+-      if (ret)
+-              goto out_free;
+-
+-      dump_format(ctx, s_image);
+-      dump_format(ctx, d_image);
+-
+-      ctx->complete = complete;
+-      ctx->complete_context = complete_context;
+-
+-      /*
+-       * Can we use double-buffering for this operation? If there is
+-       * only one tile (the whole image can be converted in a single
+-       * operation) there's no point in using double-buffering. Also,
+-       * the IPU's IDMAC channels allow only a single U and V plane
+-       * offset shared between both buffers, but these offsets change
+-       * for every tile, and therefore would have to be updated for
+-       * each buffer which is not possible. So double-buffering is
+-       * impossible when either the source or destination images are
+-       * a planar format (YUV420, YUV422P, etc.). Further, differently
+-       * sized tiles or different resizing coefficients per tile
+-       * prevent double-buffering as well.
+-       */
+-      ctx->double_buffering = (ctx->num_tiles > 1 &&
+-                               !s_image->fmt->planar &&
+-                               !d_image->fmt->planar);
+-      for (i = 1; i < ctx->num_tiles; i++) {
+-              if (ctx->in.tile[i].width != ctx->in.tile[0].width ||
+-                  ctx->in.tile[i].height != ctx->in.tile[0].height ||
+-                  ctx->out.tile[i].width != ctx->out.tile[0].width ||
+-                  ctx->out.tile[i].height != ctx->out.tile[0].height) {
+-                      ctx->double_buffering = false;
+-                      break;
+-              }
+-      }
+-      for (i = 1; i < ctx->in.num_cols; i++) {
+-              if (ctx->resize_coeffs_h[i] != ctx->resize_coeffs_h[0]) {
+-                      ctx->double_buffering = false;
+-                      break;
+-              }
+-      }
+-      for (i = 1; i < ctx->in.num_rows; i++) {
+-              if (ctx->resize_coeffs_v[i] != ctx->resize_coeffs_v[0]) {
+-                      ctx->double_buffering = false;
+-                      break;
+-              }
+-      }
+-
+-      if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+-              unsigned long intermediate_size = d_image->tile[0].size;
+-
+-              for (i = 1; i < ctx->num_tiles; i++) {
+-                      if (d_image->tile[i].size > intermediate_size)
+-                              intermediate_size = d_image->tile[i].size;
+-              }
+-
+-              ret = alloc_dma_buf(priv, &ctx->rot_intermediate[0],
+-                                  intermediate_size);
+-              if (ret)
+-                      goto out_free;
+-              if (ctx->double_buffering) {
+-                      ret = alloc_dma_buf(priv,
+-                                          &ctx->rot_intermediate[1],
+-                                          intermediate_size);
+-                      if (ret)
+-                              goto out_free_dmabuf0;
+-              }
+-      }
+-
+-      spin_lock_irqsave(&chan->irqlock, flags);
+-
+-      get_res = list_empty(&chan->ctx_list);
+-
+-      list_add_tail(&ctx->list, &chan->ctx_list);
+-
+-      spin_unlock_irqrestore(&chan->irqlock, flags);
+-
+-      if (get_res) {
+-              ret = get_ipu_resources(chan);
+-              if (ret)
+-                      goto out_free_dmabuf1;
+-      }
+-
+-      return ctx;
+-
+-out_free_dmabuf1:
+-      free_dma_buf(priv, &ctx->rot_intermediate[1]);
+-      spin_lock_irqsave(&chan->irqlock, flags);
+-      list_del(&ctx->list);
+-      spin_unlock_irqrestore(&chan->irqlock, flags);
+-out_free_dmabuf0:
+-      free_dma_buf(priv, &ctx->rot_intermediate[0]);
+-out_free:
+-      kfree(ctx);
+-      return ERR_PTR(ret);
+-}
+-EXPORT_SYMBOL_GPL(ipu_image_convert_prepare);
+-
+-/*
+- * Carry out a single image conversion run. Only the physaddr's of the input
+- * and output image buffers are needed. The conversion context must have
+- * been created previously with ipu_image_convert_prepare().
+- */
+-int ipu_image_convert_queue(struct ipu_image_convert_run *run)
+-{
+-      struct ipu_image_convert_chan *chan;
+-      struct ipu_image_convert_priv *priv;
+-      struct ipu_image_convert_ctx *ctx;
+-      unsigned long flags;
+-      int ret = 0;
+-
+-      if (!run || !run->ctx || !run->in_phys || !run->out_phys)
+-              return -EINVAL;
+-
+-      ctx = run->ctx;
+-      chan = ctx->chan;
+-      priv = chan->priv;
+-
+-      dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p run %p\n", __func__,
+-              chan->ic_task, ctx, run);
+-
+-      INIT_LIST_HEAD(&run->list);
+-
+-      spin_lock_irqsave(&chan->irqlock, flags);
+-
+-      if (ctx->aborting) {
+-              ret = -EIO;
+-              goto unlock;
+-      }
+-
+-      list_add_tail(&run->list, &chan->pending_q);
+-
+-      if (!chan->current_run) {
+-              ret = do_run(run);
+-              if (ret)
+-                      chan->current_run = NULL;
+-      }
+-unlock:
+-      spin_unlock_irqrestore(&chan->irqlock, flags);
+-      return ret;
+-}
+-EXPORT_SYMBOL_GPL(ipu_image_convert_queue);
+-
+-/* Abort any active or pending conversions for this context */
+-static void __ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
+-{
+-      struct ipu_image_convert_chan *chan = ctx->chan;
+-      struct ipu_image_convert_priv *priv = chan->priv;
+-      struct ipu_image_convert_run *run, *active_run, *tmp;
+-      unsigned long flags;
+-      int run_count, ret;
+-
+-      spin_lock_irqsave(&chan->irqlock, flags);
+-
+-      /* move all remaining pending runs in this context to done_q */
+-      list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
+-              if (run->ctx != ctx)
+-                      continue;
+-              run->status = -EIO;
+-              list_move_tail(&run->list, &chan->done_q);
+-      }
+-
+-      run_count = get_run_count(ctx, &chan->done_q);
+-      active_run = (chan->current_run && chan->current_run->ctx == ctx) ?
+-              chan->current_run : NULL;
+-
+-      if (active_run)
+-              reinit_completion(&ctx->aborted);
+-
+-      ctx->aborting = true;
+-
+-      spin_unlock_irqrestore(&chan->irqlock, flags);
+-
+-      if (!run_count && !active_run) {
+-              dev_dbg(priv->ipu->dev,
+-                      "%s: task %u: no abort needed for ctx %p\n",
+-                      __func__, chan->ic_task, ctx);
+-              return;
+-      }
+-
+-      if (!active_run) {
+-              empty_done_q(chan);
+-              return;
+-      }
+-
+-      dev_dbg(priv->ipu->dev,
+-              "%s: task %u: wait for completion: %d runs\n",
+-              __func__, chan->ic_task, run_count);
+-
+-      ret = wait_for_completion_timeout(&ctx->aborted,
+-                                        msecs_to_jiffies(10000));
+-      if (ret == 0) {
+-              dev_warn(priv->ipu->dev, "%s: timeout\n", __func__);
+-              force_abort(ctx);
+-      }
+-}
+-
+-void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
+-{
+-      __ipu_image_convert_abort(ctx);
+-      ctx->aborting = false;
+-}
+-EXPORT_SYMBOL_GPL(ipu_image_convert_abort);
+-
+-/* Unprepare image conversion context */
+-void ipu_image_convert_unprepare(struct ipu_image_convert_ctx *ctx)
+-{
+-      struct ipu_image_convert_chan *chan = ctx->chan;
+-      struct ipu_image_convert_priv *priv = chan->priv;
+-      unsigned long flags;
+-      bool put_res;
+-
+-      /* make sure no runs are hanging around */
+-      __ipu_image_convert_abort(ctx);
+-
+-      dev_dbg(priv->ipu->dev, "%s: task %u: removing ctx %p\n", __func__,
+-              chan->ic_task, ctx);
+-
+-      spin_lock_irqsave(&chan->irqlock, flags);
+-
+-      list_del(&ctx->list);
+-
+-      put_res = list_empty(&chan->ctx_list);
+-
+-      spin_unlock_irqrestore(&chan->irqlock, flags);
+-
+-      if (put_res)
+-              release_ipu_resources(chan);
+-
+-      free_dma_buf(priv, &ctx->rot_intermediate[1]);
+-      free_dma_buf(priv, &ctx->rot_intermediate[0]);
+-
+-      kfree(ctx);
+-}
+-EXPORT_SYMBOL_GPL(ipu_image_convert_unprepare);
+-
+-/*
+- * "Canned" asynchronous single image conversion. Allocates and returns
+- * a new conversion run.  On successful return the caller must free the
+- * run and call ipu_image_convert_unprepare() after conversion completes.
+- */
+-struct ipu_image_convert_run *
+-ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
+-                struct ipu_image *in, struct ipu_image *out,
+-                enum ipu_rotate_mode rot_mode,
+-                ipu_image_convert_cb_t complete,
+-                void *complete_context)
+-{
+-      struct ipu_image_convert_ctx *ctx;
+-      struct ipu_image_convert_run *run;
+-      int ret;
+-
+-      ctx = ipu_image_convert_prepare(ipu, ic_task, in, out, rot_mode,
+-                                      complete, complete_context);
+-      if (IS_ERR(ctx))
+-              return ERR_CAST(ctx);
+-
+-      run = kzalloc(sizeof(*run), GFP_KERNEL);
+-      if (!run) {
+-              ipu_image_convert_unprepare(ctx);
+-              return ERR_PTR(-ENOMEM);
+-      }
+-
+-      run->ctx = ctx;
+-      run->in_phys = in->phys0;
+-      run->out_phys = out->phys0;
+-
+-      ret = ipu_image_convert_queue(run);
+-      if (ret) {
+-              ipu_image_convert_unprepare(ctx);
+-              kfree(run);
+-              return ERR_PTR(ret);
+-      }
+-
+-      return run;
+-}
+-EXPORT_SYMBOL_GPL(ipu_image_convert);
+-
+-/* "Canned" synchronous single image conversion */
+-static void image_convert_sync_complete(struct ipu_image_convert_run *run,
+-                                      void *data)
+-{
+-      struct completion *comp = data;
+-
+-      complete(comp);
+-}
+-
+-int ipu_image_convert_sync(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
+-                         struct ipu_image *in, struct ipu_image *out,
+-                         enum ipu_rotate_mode rot_mode)
+-{
+-      struct ipu_image_convert_run *run;
+-      struct completion comp;
+-      int ret;
+-
+-      init_completion(&comp);
+-
+-      run = ipu_image_convert(ipu, ic_task, in, out, rot_mode,
+-                              image_convert_sync_complete, &comp);
+-      if (IS_ERR(run))
+-              return PTR_ERR(run);
+-
+-      ret = wait_for_completion_timeout(&comp, msecs_to_jiffies(10000));
+-      ret = (ret == 0) ? -ETIMEDOUT : 0;
+-
+-      ipu_image_convert_unprepare(run->ctx);
+-      kfree(run);
+-
+-      return ret;
+-}
+-EXPORT_SYMBOL_GPL(ipu_image_convert_sync);
+-
+-int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev)
+-{
+-      struct ipu_image_convert_priv *priv;
+-      int i;
+-
+-      priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+-      if (!priv)
+-              return -ENOMEM;
+-
+-      ipu->image_convert_priv = priv;
+-      priv->ipu = ipu;
+-
+-      for (i = 0; i < IC_NUM_TASKS; i++) {
+-              struct ipu_image_convert_chan *chan = &priv->chan[i];
+-
+-              chan->ic_task = i;
+-              chan->priv = priv;
+-              chan->dma_ch = &image_convert_dma_chan[i];
+-              chan->out_eof_irq = -1;
+-              chan->rot_out_eof_irq = -1;
+-
+-              spin_lock_init(&chan->irqlock);
+-              INIT_LIST_HEAD(&chan->ctx_list);
+-              INIT_LIST_HEAD(&chan->pending_q);
+-              INIT_LIST_HEAD(&chan->done_q);
+-      }
+-
+-      return 0;
+-}
+-
+-void ipu_image_convert_exit(struct ipu_soc *ipu)
+-{
+-}
+--- a/drivers/gpu/ipu-v3/ipu-pre.c
++++ /dev/null
+@@ -1,346 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-only
+-/*
+- * Copyright (c) 2017 Lucas Stach, Pengutronix
+- */
+-
+-#include <drm/drm_fourcc.h>
+-#include <linux/clk.h>
+-#include <linux/err.h>
+-#include <linux/genalloc.h>
+-#include <linux/module.h>
+-#include <linux/of.h>
+-#include <linux/platform_device.h>
+-#include <video/imx-ipu-v3.h>
+-
+-#include "ipu-prv.h"
+-
+-#define IPU_PRE_MAX_WIDTH     2048
+-#define IPU_PRE_NUM_SCANLINES 8
+-
+-#define IPU_PRE_CTRL                                  0x000
+-#define IPU_PRE_CTRL_SET                              0x004
+-#define  IPU_PRE_CTRL_ENABLE                          (1 << 0)
+-#define  IPU_PRE_CTRL_BLOCK_EN                                (1 << 1)
+-#define  IPU_PRE_CTRL_BLOCK_16                                (1 << 2)
+-#define  IPU_PRE_CTRL_SDW_UPDATE                      (1 << 4)
+-#define  IPU_PRE_CTRL_VFLIP                           (1 << 5)
+-#define  IPU_PRE_CTRL_SO                              (1 << 6)
+-#define  IPU_PRE_CTRL_INTERLACED_FIELD                        (1 << 7)
+-#define  IPU_PRE_CTRL_HANDSHAKE_EN                    (1 << 8)
+-#define  IPU_PRE_CTRL_HANDSHAKE_LINE_NUM(v)           ((v & 0x3) << 9)
+-#define  IPU_PRE_CTRL_HANDSHAKE_ABORT_SKIP_EN         (1 << 11)
+-#define  IPU_PRE_CTRL_EN_REPEAT                               (1 << 28)
+-#define  IPU_PRE_CTRL_TPR_REST_SEL                    (1 << 29)
+-#define  IPU_PRE_CTRL_CLKGATE                         (1 << 30)
+-#define  IPU_PRE_CTRL_SFTRST                          (1 << 31)
+-
+-#define IPU_PRE_CUR_BUF                                       0x030
+-
+-#define IPU_PRE_NEXT_BUF                              0x040
+-
+-#define IPU_PRE_TPR_CTRL                              0x070
+-#define  IPU_PRE_TPR_CTRL_TILE_FORMAT(v)              ((v & 0xff) << 0)
+-#define  IPU_PRE_TPR_CTRL_TILE_FORMAT_MASK            0xff
+-#define  IPU_PRE_TPR_CTRL_TILE_FORMAT_16_BIT          (1 << 0)
+-#define  IPU_PRE_TPR_CTRL_TILE_FORMAT_SPLIT_BUF               (1 << 4)
+-#define  IPU_PRE_TPR_CTRL_TILE_FORMAT_SINGLE_BUF      (1 << 5)
+-#define  IPU_PRE_TPR_CTRL_TILE_FORMAT_SUPER_TILED     (1 << 6)
+-
+-#define IPU_PRE_PREFETCH_ENG_CTRL                     0x080
+-#define  IPU_PRE_PREF_ENG_CTRL_PREFETCH_EN            (1 << 0)
+-#define  IPU_PRE_PREF_ENG_CTRL_RD_NUM_BYTES(v)                ((v & 0x7) << 1)
+-#define  IPU_PRE_PREF_ENG_CTRL_INPUT_ACTIVE_BPP(v)    ((v & 0x3) << 4)
+-#define  IPU_PRE_PREF_ENG_CTRL_INPUT_PIXEL_FORMAT(v)  ((v & 0x7) << 8)
+-#define  IPU_PRE_PREF_ENG_CTRL_SHIFT_BYPASS           (1 << 11)
+-#define  IPU_PRE_PREF_ENG_CTRL_FIELD_INVERSE          (1 << 12)
+-#define  IPU_PRE_PREF_ENG_CTRL_PARTIAL_UV_SWAP                (1 << 14)
+-#define  IPU_PRE_PREF_ENG_CTRL_TPR_COOR_OFFSET_EN     (1 << 15)
+-
+-#define IPU_PRE_PREFETCH_ENG_INPUT_SIZE                       0x0a0
+-#define  IPU_PRE_PREFETCH_ENG_INPUT_SIZE_WIDTH(v)     ((v & 0xffff) << 0)
+-#define  IPU_PRE_PREFETCH_ENG_INPUT_SIZE_HEIGHT(v)    ((v & 0xffff) << 16)
+-
+-#define IPU_PRE_PREFETCH_ENG_PITCH                    0x0d0
+-#define  IPU_PRE_PREFETCH_ENG_PITCH_Y(v)              ((v & 0xffff) << 0)
+-#define  IPU_PRE_PREFETCH_ENG_PITCH_UV(v)             ((v & 0xffff) << 16)
+-
+-#define IPU_PRE_STORE_ENG_CTRL                                0x110
+-#define  IPU_PRE_STORE_ENG_CTRL_STORE_EN              (1 << 0)
+-#define  IPU_PRE_STORE_ENG_CTRL_WR_NUM_BYTES(v)               ((v & 0x7) << 1)
+-#define  IPU_PRE_STORE_ENG_CTRL_OUTPUT_ACTIVE_BPP(v)  ((v & 0x3) << 4)
+-
+-#define IPU_PRE_STORE_ENG_STATUS                      0x120
+-#define  IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_X_MASK  0xffff
+-#define  IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_X_SHIFT 0
+-#define  IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_MASK  0x3fff
+-#define  IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_SHIFT 16
+-#define  IPU_PRE_STORE_ENG_STATUS_STORE_FIFO_FULL     (1 << 30)
+-#define  IPU_PRE_STORE_ENG_STATUS_STORE_FIELD         (1 << 31)
+-
+-#define IPU_PRE_STORE_ENG_SIZE                                0x130
+-#define  IPU_PRE_STORE_ENG_SIZE_INPUT_WIDTH(v)                ((v & 0xffff) << 0)
+-#define  IPU_PRE_STORE_ENG_SIZE_INPUT_HEIGHT(v)               ((v & 0xffff) << 16)
+-
+-#define IPU_PRE_STORE_ENG_PITCH                               0x140
+-#define  IPU_PRE_STORE_ENG_PITCH_OUT_PITCH(v)         ((v & 0xffff) << 0)
+-
+-#define IPU_PRE_STORE_ENG_ADDR                                0x150
+-
+-struct ipu_pre {
+-      struct list_head        list;
+-      struct device           *dev;
+-
+-      void __iomem            *regs;
+-      struct clk              *clk_axi;
+-      struct gen_pool         *iram;
+-
+-      dma_addr_t              buffer_paddr;
+-      void                    *buffer_virt;
+-      bool                    in_use;
+-      unsigned int            safe_window_end;
+-      unsigned int            last_bufaddr;
+-};
+-
+-static DEFINE_MUTEX(ipu_pre_list_mutex);
+-static LIST_HEAD(ipu_pre_list);
+-static int available_pres;
+-
+-int ipu_pre_get_available_count(void)
+-{
+-      return available_pres;
+-}
+-
+-struct ipu_pre *
+-ipu_pre_lookup_by_phandle(struct device *dev, const char *name, int index)
+-{
+-      struct device_node *pre_node = of_parse_phandle(dev->of_node,
+-                                                      name, index);
+-      struct ipu_pre *pre;
+-
+-      mutex_lock(&ipu_pre_list_mutex);
+-      list_for_each_entry(pre, &ipu_pre_list, list) {
+-              if (pre_node == pre->dev->of_node) {
+-                      mutex_unlock(&ipu_pre_list_mutex);
+-                      device_link_add(dev, pre->dev,
+-                                      DL_FLAG_AUTOREMOVE_CONSUMER);
+-                      of_node_put(pre_node);
+-                      return pre;
+-              }
+-      }
+-      mutex_unlock(&ipu_pre_list_mutex);
+-
+-      of_node_put(pre_node);
+-
+-      return NULL;
+-}
+-
+-int ipu_pre_get(struct ipu_pre *pre)
+-{
+-      u32 val;
+-
+-      if (pre->in_use)
+-              return -EBUSY;
+-
+-      /* first get the engine out of reset and remove clock gating */
+-      writel(0, pre->regs + IPU_PRE_CTRL);
+-
+-      /* init defaults that should be applied to all streams */
+-      val = IPU_PRE_CTRL_HANDSHAKE_ABORT_SKIP_EN |
+-            IPU_PRE_CTRL_HANDSHAKE_EN |
+-            IPU_PRE_CTRL_TPR_REST_SEL |
+-            IPU_PRE_CTRL_SDW_UPDATE;
+-      writel(val, pre->regs + IPU_PRE_CTRL);
+-
+-      pre->in_use = true;
+-      return 0;
+-}
+-
+-void ipu_pre_put(struct ipu_pre *pre)
+-{
+-      writel(IPU_PRE_CTRL_SFTRST, pre->regs + IPU_PRE_CTRL);
+-
+-      pre->in_use = false;
+-}
+-
+-void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
+-                     unsigned int height, unsigned int stride, u32 format,
+-                     uint64_t modifier, unsigned int bufaddr)
+-{
+-      const struct drm_format_info *info = drm_format_info(format);
+-      u32 active_bpp = info->cpp[0] >> 1;
+-      u32 val;
+-
+-      /* calculate safe window for ctrl register updates */
+-      if (modifier == DRM_FORMAT_MOD_LINEAR)
+-              pre->safe_window_end = height - 2;
+-      else
+-              pre->safe_window_end = DIV_ROUND_UP(height, 4) - 1;
+-
+-      writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF);
+-      writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
+-      pre->last_bufaddr = bufaddr;
+-
+-      val = IPU_PRE_PREF_ENG_CTRL_INPUT_PIXEL_FORMAT(0) |
+-            IPU_PRE_PREF_ENG_CTRL_INPUT_ACTIVE_BPP(active_bpp) |
+-            IPU_PRE_PREF_ENG_CTRL_RD_NUM_BYTES(4) |
+-            IPU_PRE_PREF_ENG_CTRL_SHIFT_BYPASS |
+-            IPU_PRE_PREF_ENG_CTRL_PREFETCH_EN;
+-      writel(val, pre->regs + IPU_PRE_PREFETCH_ENG_CTRL);
+-
+-      val = IPU_PRE_PREFETCH_ENG_INPUT_SIZE_WIDTH(width) |
+-            IPU_PRE_PREFETCH_ENG_INPUT_SIZE_HEIGHT(height);
+-      writel(val, pre->regs + IPU_PRE_PREFETCH_ENG_INPUT_SIZE);
+-
+-      val = IPU_PRE_PREFETCH_ENG_PITCH_Y(stride);
+-      writel(val, pre->regs + IPU_PRE_PREFETCH_ENG_PITCH);
+-
+-      val = IPU_PRE_STORE_ENG_CTRL_OUTPUT_ACTIVE_BPP(active_bpp) |
+-            IPU_PRE_STORE_ENG_CTRL_WR_NUM_BYTES(4) |
+-            IPU_PRE_STORE_ENG_CTRL_STORE_EN;
+-      writel(val, pre->regs + IPU_PRE_STORE_ENG_CTRL);
+-
+-      val = IPU_PRE_STORE_ENG_SIZE_INPUT_WIDTH(width) |
+-            IPU_PRE_STORE_ENG_SIZE_INPUT_HEIGHT(height);
+-      writel(val, pre->regs + IPU_PRE_STORE_ENG_SIZE);
+-
+-      val = IPU_PRE_STORE_ENG_PITCH_OUT_PITCH(stride);
+-      writel(val, pre->regs + IPU_PRE_STORE_ENG_PITCH);
+-
+-      writel(pre->buffer_paddr, pre->regs + IPU_PRE_STORE_ENG_ADDR);
+-
+-      val = readl(pre->regs + IPU_PRE_TPR_CTRL);
+-      val &= ~IPU_PRE_TPR_CTRL_TILE_FORMAT_MASK;
+-      if (modifier != DRM_FORMAT_MOD_LINEAR) {
+-              /* only support single buffer formats for now */
+-              val |= IPU_PRE_TPR_CTRL_TILE_FORMAT_SINGLE_BUF;
+-              if (modifier == DRM_FORMAT_MOD_VIVANTE_SUPER_TILED)
+-                      val |= IPU_PRE_TPR_CTRL_TILE_FORMAT_SUPER_TILED;
+-              if (info->cpp[0] == 2)
+-                      val |= IPU_PRE_TPR_CTRL_TILE_FORMAT_16_BIT;
+-      }
+-      writel(val, pre->regs + IPU_PRE_TPR_CTRL);
+-
+-      val = readl(pre->regs + IPU_PRE_CTRL);
+-      val |= IPU_PRE_CTRL_EN_REPEAT | IPU_PRE_CTRL_ENABLE |
+-             IPU_PRE_CTRL_SDW_UPDATE;
+-      if (modifier == DRM_FORMAT_MOD_LINEAR)
+-              val &= ~IPU_PRE_CTRL_BLOCK_EN;
+-      else
+-              val |= IPU_PRE_CTRL_BLOCK_EN;
+-      writel(val, pre->regs + IPU_PRE_CTRL);
+-}
+-
+-void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr)
+-{
+-      unsigned long timeout = jiffies + msecs_to_jiffies(5);
+-      unsigned short current_yblock;
+-      u32 val;
+-
+-      if (bufaddr == pre->last_bufaddr)
+-              return;
+-
+-      writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
+-      pre->last_bufaddr = bufaddr;
+-
+-      do {
+-              if (time_after(jiffies, timeout)) {
+-                      dev_warn(pre->dev, "timeout waiting for PRE safe window\n");
+-                      return;
+-              }
+-
+-              val = readl(pre->regs + IPU_PRE_STORE_ENG_STATUS);
+-              current_yblock =
+-                      (val >> IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_SHIFT) &
+-                      IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_MASK;
+-      } while (current_yblock == 0 || current_yblock >= pre->safe_window_end);
+-
+-      writel(IPU_PRE_CTRL_SDW_UPDATE, pre->regs + IPU_PRE_CTRL_SET);
+-}
+-
+-bool ipu_pre_update_pending(struct ipu_pre *pre)
+-{
+-      return !!(readl_relaxed(pre->regs + IPU_PRE_CTRL) &
+-                IPU_PRE_CTRL_SDW_UPDATE);
+-}
+-
+-u32 ipu_pre_get_baddr(struct ipu_pre *pre)
+-{
+-      return (u32)pre->buffer_paddr;
+-}
+-
+-static int ipu_pre_probe(struct platform_device *pdev)
+-{
+-      struct device *dev = &pdev->dev;
+-      struct resource *res;
+-      struct ipu_pre *pre;
+-
+-      pre = devm_kzalloc(dev, sizeof(*pre), GFP_KERNEL);
+-      if (!pre)
+-              return -ENOMEM;
+-
+-      res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+-      pre->regs = devm_ioremap_resource(&pdev->dev, res);
+-      if (IS_ERR(pre->regs))
+-              return PTR_ERR(pre->regs);
+-
+-      pre->clk_axi = devm_clk_get(dev, "axi");
+-      if (IS_ERR(pre->clk_axi))
+-              return PTR_ERR(pre->clk_axi);
+-
+-      pre->iram = of_gen_pool_get(dev->of_node, "fsl,iram", 0);
+-      if (!pre->iram)
+-              return -EPROBE_DEFER;
+-
+-      /*
+-       * Allocate IRAM buffer with maximum size. This could be made dynamic,
+-       * but as there is no other user of this IRAM region and we can fit all
+-       * max sized buffers into it, there is no need yet.
+-       */
+-      pre->buffer_virt = gen_pool_dma_alloc(pre->iram, IPU_PRE_MAX_WIDTH *
+-                                            IPU_PRE_NUM_SCANLINES * 4,
+-                                            &pre->buffer_paddr);
+-      if (!pre->buffer_virt)
+-              return -ENOMEM;
+-
+-      clk_prepare_enable(pre->clk_axi);
+-
+-      pre->dev = dev;
+-      platform_set_drvdata(pdev, pre);
+-      mutex_lock(&ipu_pre_list_mutex);
+-      list_add(&pre->list, &ipu_pre_list);
+-      available_pres++;
+-      mutex_unlock(&ipu_pre_list_mutex);
+-
+-      return 0;
+-}
+-
+-static int ipu_pre_remove(struct platform_device *pdev)
+-{
+-      struct ipu_pre *pre = platform_get_drvdata(pdev);
+-
+-      mutex_lock(&ipu_pre_list_mutex);
+-      list_del(&pre->list);
+-      available_pres--;
+-      mutex_unlock(&ipu_pre_list_mutex);
+-
+-      clk_disable_unprepare(pre->clk_axi);
+-
+-      if (pre->buffer_virt)
+-              gen_pool_free(pre->iram, (unsigned long)pre->buffer_virt,
+-                            IPU_PRE_MAX_WIDTH * IPU_PRE_NUM_SCANLINES * 4);
+-      return 0;
+-}
+-
+-static const struct of_device_id ipu_pre_dt_ids[] = {
+-      { .compatible = "fsl,imx6qp-pre", },
+-      { /* sentinel */ },
+-};
+-
+-struct platform_driver ipu_pre_drv = {
+-      .probe          = ipu_pre_probe,
+-      .remove         = ipu_pre_remove,
+-      .driver         = {
+-              .name   = "imx-ipu-pre",
+-              .of_match_table = ipu_pre_dt_ids,
+-      },
+-};
+--- a/drivers/gpu/ipu-v3/ipu-prg.c
++++ /dev/null
+@@ -1,483 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-only
+-/*
+- * Copyright (c) 2016-2017 Lucas Stach, Pengutronix
+- */
+-
+-#include <drm/drm_fourcc.h>
+-#include <linux/clk.h>
+-#include <linux/err.h>
+-#include <linux/iopoll.h>
+-#include <linux/mfd/syscon.h>
+-#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+-#include <linux/module.h>
+-#include <linux/of.h>
+-#include <linux/platform_device.h>
+-#include <linux/pm_runtime.h>
+-#include <linux/regmap.h>
+-#include <video/imx-ipu-v3.h>
+-
+-#include "ipu-prv.h"
+-
+-#define IPU_PRG_CTL                           0x00
+-#define  IPU_PRG_CTL_BYPASS(i)                        (1 << (0 + i))
+-#define  IPU_PRG_CTL_SOFT_ARID_MASK           0x3
+-#define  IPU_PRG_CTL_SOFT_ARID_SHIFT(i)               (8 + i * 2)
+-#define  IPU_PRG_CTL_SOFT_ARID(i, v)          ((v & 0x3) << (8 + 2 * i))
+-#define  IPU_PRG_CTL_SO(i)                    (1 << (16 + i))
+-#define  IPU_PRG_CTL_VFLIP(i)                 (1 << (19 + i))
+-#define  IPU_PRG_CTL_BLOCK_MODE(i)            (1 << (22 + i))
+-#define  IPU_PRG_CTL_CNT_LOAD_EN(i)           (1 << (25 + i))
+-#define  IPU_PRG_CTL_SOFTRST                  (1 << 30)
+-#define  IPU_PRG_CTL_SHADOW_EN                        (1 << 31)
+-
+-#define IPU_PRG_STATUS                                0x04
+-#define  IPU_PRG_STATUS_BUFFER0_READY(i)      (1 << (0 + i * 2))
+-#define  IPU_PRG_STATUS_BUFFER1_READY(i)      (1 << (1 + i * 2))
+-
+-#define IPU_PRG_QOS                           0x08
+-#define  IPU_PRG_QOS_ARID_MASK                        0xf
+-#define  IPU_PRG_QOS_ARID_SHIFT(i)            (0 + i * 4)
+-
+-#define IPU_PRG_REG_UPDATE                    0x0c
+-#define  IPU_PRG_REG_UPDATE_REG_UPDATE                (1 << 0)
+-
+-#define IPU_PRG_STRIDE(i)                     (0x10 + i * 0x4)
+-#define  IPU_PRG_STRIDE_STRIDE_MASK           0x3fff
+-
+-#define IPU_PRG_CROP_LINE                     0x1c
+-
+-#define IPU_PRG_THD                           0x20
+-
+-#define IPU_PRG_BADDR(i)                      (0x24 + i * 0x4)
+-
+-#define IPU_PRG_OFFSET(i)                     (0x30 + i * 0x4)
+-
+-#define IPU_PRG_ILO(i)                                (0x3c + i * 0x4)
+-
+-#define IPU_PRG_HEIGHT(i)                     (0x48 + i * 0x4)
+-#define  IPU_PRG_HEIGHT_PRE_HEIGHT_MASK               0xfff
+-#define  IPU_PRG_HEIGHT_PRE_HEIGHT_SHIFT      0
+-#define  IPU_PRG_HEIGHT_IPU_HEIGHT_MASK               0xfff
+-#define  IPU_PRG_HEIGHT_IPU_HEIGHT_SHIFT      16
+-
+-struct ipu_prg_channel {
+-      bool                    enabled;
+-      int                     used_pre;
+-};
+-
+-struct ipu_prg {
+-      struct list_head        list;
+-      struct device           *dev;
+-      int                     id;
+-
+-      void __iomem            *regs;
+-      struct clk              *clk_ipg, *clk_axi;
+-      struct regmap           *iomuxc_gpr;
+-      struct ipu_pre          *pres[3];
+-
+-      struct ipu_prg_channel  chan[3];
+-};
+-
+-static DEFINE_MUTEX(ipu_prg_list_mutex);
+-static LIST_HEAD(ipu_prg_list);
+-
+-struct ipu_prg *
+-ipu_prg_lookup_by_phandle(struct device *dev, const char *name, int ipu_id)
+-{
+-      struct device_node *prg_node = of_parse_phandle(dev->of_node,
+-                                                      name, 0);
+-      struct ipu_prg *prg;
+-
+-      mutex_lock(&ipu_prg_list_mutex);
+-      list_for_each_entry(prg, &ipu_prg_list, list) {
+-              if (prg_node == prg->dev->of_node) {
+-                      mutex_unlock(&ipu_prg_list_mutex);
+-                      device_link_add(dev, prg->dev,
+-                                      DL_FLAG_AUTOREMOVE_CONSUMER);
+-                      prg->id = ipu_id;
+-                      of_node_put(prg_node);
+-                      return prg;
+-              }
+-      }
+-      mutex_unlock(&ipu_prg_list_mutex);
+-
+-      of_node_put(prg_node);
+-
+-      return NULL;
+-}
+-
+-int ipu_prg_max_active_channels(void)
+-{
+-      return ipu_pre_get_available_count();
+-}
+-EXPORT_SYMBOL_GPL(ipu_prg_max_active_channels);
+-
+-bool ipu_prg_present(struct ipu_soc *ipu)
+-{
+-      if (ipu->prg_priv)
+-              return true;
+-
+-      return false;
+-}
+-EXPORT_SYMBOL_GPL(ipu_prg_present);
+-
+-bool ipu_prg_format_supported(struct ipu_soc *ipu, uint32_t format,
+-                            uint64_t modifier)
+-{
+-      const struct drm_format_info *info = drm_format_info(format);
+-
+-      if (info->num_planes != 1)
+-              return false;
+-
+-      switch (modifier) {
+-      case DRM_FORMAT_MOD_LINEAR:
+-      case DRM_FORMAT_MOD_VIVANTE_TILED:
+-      case DRM_FORMAT_MOD_VIVANTE_SUPER_TILED:
+-              return true;
+-      default:
+-              return false;
+-      }
+-}
+-EXPORT_SYMBOL_GPL(ipu_prg_format_supported);
+-
+-int ipu_prg_enable(struct ipu_soc *ipu)
+-{
+-      struct ipu_prg *prg = ipu->prg_priv;
+-
+-      if (!prg)
+-              return 0;
+-
+-      return pm_runtime_get_sync(prg->dev);
+-}
+-EXPORT_SYMBOL_GPL(ipu_prg_enable);
+-
+-void ipu_prg_disable(struct ipu_soc *ipu)
+-{
+-      struct ipu_prg *prg = ipu->prg_priv;
+-
+-      if (!prg)
+-              return;
+-
+-      pm_runtime_put(prg->dev);
+-}
+-EXPORT_SYMBOL_GPL(ipu_prg_disable);
+-
+-/*
+- * The channel configuartion functions below are not thread safe, as they
+- * must be only called from the atomic commit path in the DRM driver, which
+- * is properly serialized.
+- */
+-static int ipu_prg_ipu_to_prg_chan(int ipu_chan)
+-{
+-      /*
+-       * This isn't clearly documented in the RM, but IPU to PRG channel
+-       * assignment is fixed, as only with this mapping the control signals
+-       * match up.
+-       */
+-      switch (ipu_chan) {
+-      case IPUV3_CHANNEL_MEM_BG_SYNC:
+-              return 0;
+-      case IPUV3_CHANNEL_MEM_FG_SYNC:
+-              return 1;
+-      case IPUV3_CHANNEL_MEM_DC_SYNC:
+-              return 2;
+-      default:
+-              return -EINVAL;
+-      }
+-}
+-
+-static int ipu_prg_get_pre(struct ipu_prg *prg, int prg_chan)
+-{
+-      int i, ret;
+-
+-      /* channel 0 is special as it is hardwired to one of the PREs */
+-      if (prg_chan == 0) {
+-              ret = ipu_pre_get(prg->pres[0]);
+-              if (ret)
+-                      goto fail;
+-              prg->chan[prg_chan].used_pre = 0;
+-              return 0;
+-      }
+-
+-      for (i = 1; i < 3; i++) {
+-              ret = ipu_pre_get(prg->pres[i]);
+-              if (!ret) {
+-                      u32 val, mux;
+-                      int shift;
+-
+-                      prg->chan[prg_chan].used_pre = i;
+-
+-                      /* configure the PRE to PRG channel mux */
+-                      shift = (i == 1) ? 12 : 14;
+-                      mux = (prg->id << 1) | (prg_chan - 1);
+-                      regmap_update_bits(prg->iomuxc_gpr, IOMUXC_GPR5,
+-                                         0x3 << shift, mux << shift);
+-
+-                      /* check other mux, must not point to same channel */
+-                      shift = (i == 1) ? 14 : 12;
+-                      regmap_read(prg->iomuxc_gpr, IOMUXC_GPR5, &val);
+-                      if (((val >> shift) & 0x3) == mux) {
+-                              regmap_update_bits(prg->iomuxc_gpr, IOMUXC_GPR5,
+-                                                 0x3 << shift,
+-                                                 (mux ^ 0x1) << shift);
+-                      }
+-
+-                      return 0;
+-              }
+-      }
+-
+-fail:
+-      dev_err(prg->dev, "could not get PRE for PRG chan %d", prg_chan);
+-      return ret;
+-}
+-
+-static void ipu_prg_put_pre(struct ipu_prg *prg, int prg_chan)
+-{
+-      struct ipu_prg_channel *chan = &prg->chan[prg_chan];
+-
+-      ipu_pre_put(prg->pres[chan->used_pre]);
+-      chan->used_pre = -1;
+-}
+-
+-void ipu_prg_channel_disable(struct ipuv3_channel *ipu_chan)
+-{
+-      int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num);
+-      struct ipu_prg *prg = ipu_chan->ipu->prg_priv;
+-      struct ipu_prg_channel *chan;
+-      u32 val;
+-
+-      if (prg_chan < 0)
+-              return;
+-
+-      chan = &prg->chan[prg_chan];
+-      if (!chan->enabled)
+-              return;
+-
+-      pm_runtime_get_sync(prg->dev);
+-
+-      val = readl(prg->regs + IPU_PRG_CTL);
+-      val |= IPU_PRG_CTL_BYPASS(prg_chan);
+-      writel(val, prg->regs + IPU_PRG_CTL);
+-
+-      val = IPU_PRG_REG_UPDATE_REG_UPDATE;
+-      writel(val, prg->regs + IPU_PRG_REG_UPDATE);
+-
+-      pm_runtime_put(prg->dev);
+-
+-      ipu_prg_put_pre(prg, prg_chan);
+-
+-      chan->enabled = false;
+-}
+-EXPORT_SYMBOL_GPL(ipu_prg_channel_disable);
+-
+-int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan,
+-                            unsigned int axi_id, unsigned int width,
+-                            unsigned int height, unsigned int stride,
+-                            u32 format, uint64_t modifier, unsigned long *eba)
+-{
+-      int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num);
+-      struct ipu_prg *prg = ipu_chan->ipu->prg_priv;
+-      struct ipu_prg_channel *chan;
+-      u32 val;
+-      int ret;
+-
+-      if (prg_chan < 0)
+-              return prg_chan;
+-
+-      chan = &prg->chan[prg_chan];
+-
+-      if (chan->enabled) {
+-              ipu_pre_update(prg->pres[chan->used_pre], *eba);
+-              return 0;
+-      }
+-
+-      ret = ipu_prg_get_pre(prg, prg_chan);
+-      if (ret)
+-              return ret;
+-
+-      ipu_pre_configure(prg->pres[chan->used_pre],
+-                        width, height, stride, format, modifier, *eba);
+-
+-
+-      pm_runtime_get_sync(prg->dev);
+-
+-      val = (stride - 1) & IPU_PRG_STRIDE_STRIDE_MASK;
+-      writel(val, prg->regs + IPU_PRG_STRIDE(prg_chan));
+-
+-      val = ((height & IPU_PRG_HEIGHT_PRE_HEIGHT_MASK) <<
+-             IPU_PRG_HEIGHT_PRE_HEIGHT_SHIFT) |
+-            ((height & IPU_PRG_HEIGHT_IPU_HEIGHT_MASK) <<
+-             IPU_PRG_HEIGHT_IPU_HEIGHT_SHIFT);
+-      writel(val, prg->regs + IPU_PRG_HEIGHT(prg_chan));
+-
+-      val = ipu_pre_get_baddr(prg->pres[chan->used_pre]);
+-      *eba = val;
+-      writel(val, prg->regs + IPU_PRG_BADDR(prg_chan));
+-
+-      val = readl(prg->regs + IPU_PRG_CTL);
+-      /* config AXI ID */
+-      val &= ~(IPU_PRG_CTL_SOFT_ARID_MASK <<
+-               IPU_PRG_CTL_SOFT_ARID_SHIFT(prg_chan));
+-      val |= IPU_PRG_CTL_SOFT_ARID(prg_chan, axi_id);
+-      /* enable channel */
+-      val &= ~IPU_PRG_CTL_BYPASS(prg_chan);
+-      writel(val, prg->regs + IPU_PRG_CTL);
+-
+-      val = IPU_PRG_REG_UPDATE_REG_UPDATE;
+-      writel(val, prg->regs + IPU_PRG_REG_UPDATE);
+-
+-      /* wait for both double buffers to be filled */
+-      readl_poll_timeout(prg->regs + IPU_PRG_STATUS, val,
+-                         (val & IPU_PRG_STATUS_BUFFER0_READY(prg_chan)) &&
+-                         (val & IPU_PRG_STATUS_BUFFER1_READY(prg_chan)),
+-                         5, 1000);
+-
+-      pm_runtime_put(prg->dev);
+-
+-      chan->enabled = true;
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_prg_channel_configure);
+-
+-bool ipu_prg_channel_configure_pending(struct ipuv3_channel *ipu_chan)
+-{
+-      int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num);
+-      struct ipu_prg *prg = ipu_chan->ipu->prg_priv;
+-      struct ipu_prg_channel *chan;
+-
+-      if (prg_chan < 0)
+-              return false;
+-
+-      chan = &prg->chan[prg_chan];
+-      WARN_ON(!chan->enabled);
+-
+-      return ipu_pre_update_pending(prg->pres[chan->used_pre]);
+-}
+-EXPORT_SYMBOL_GPL(ipu_prg_channel_configure_pending);
+-
+-static int ipu_prg_probe(struct platform_device *pdev)
+-{
+-      struct device *dev = &pdev->dev;
+-      struct resource *res;
+-      struct ipu_prg *prg;
+-      u32 val;
+-      int i, ret;
+-
+-      prg = devm_kzalloc(dev, sizeof(*prg), GFP_KERNEL);
+-      if (!prg)
+-              return -ENOMEM;
+-
+-      res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+-      prg->regs = devm_ioremap_resource(&pdev->dev, res);
+-      if (IS_ERR(prg->regs))
+-              return PTR_ERR(prg->regs);
+-
+-
+-      prg->clk_ipg = devm_clk_get(dev, "ipg");
+-      if (IS_ERR(prg->clk_ipg))
+-              return PTR_ERR(prg->clk_ipg);
+-
+-      prg->clk_axi = devm_clk_get(dev, "axi");
+-      if (IS_ERR(prg->clk_axi))
+-              return PTR_ERR(prg->clk_axi);
+-
+-      prg->iomuxc_gpr =
+-              syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
+-      if (IS_ERR(prg->iomuxc_gpr))
+-              return PTR_ERR(prg->iomuxc_gpr);
+-
+-      for (i = 0; i < 3; i++) {
+-              prg->pres[i] = ipu_pre_lookup_by_phandle(dev, "fsl,pres", i);
+-              if (!prg->pres[i])
+-                      return -EPROBE_DEFER;
+-      }
+-
+-      ret = clk_prepare_enable(prg->clk_ipg);
+-      if (ret)
+-              return ret;
+-
+-      ret = clk_prepare_enable(prg->clk_axi);
+-      if (ret) {
+-              clk_disable_unprepare(prg->clk_ipg);
+-              return ret;
+-      }
+-
+-      /* init to free running mode */
+-      val = readl(prg->regs + IPU_PRG_CTL);
+-      val |= IPU_PRG_CTL_SHADOW_EN;
+-      writel(val, prg->regs + IPU_PRG_CTL);
+-
+-      /* disable address threshold */
+-      writel(0xffffffff, prg->regs + IPU_PRG_THD);
+-
+-      pm_runtime_set_active(dev);
+-      pm_runtime_enable(dev);
+-
+-      prg->dev = dev;
+-      platform_set_drvdata(pdev, prg);
+-      mutex_lock(&ipu_prg_list_mutex);
+-      list_add(&prg->list, &ipu_prg_list);
+-      mutex_unlock(&ipu_prg_list_mutex);
+-
+-      return 0;
+-}
+-
+-static int ipu_prg_remove(struct platform_device *pdev)
+-{
+-      struct ipu_prg *prg = platform_get_drvdata(pdev);
+-
+-      mutex_lock(&ipu_prg_list_mutex);
+-      list_del(&prg->list);
+-      mutex_unlock(&ipu_prg_list_mutex);
+-
+-      return 0;
+-}
+-
+-#ifdef CONFIG_PM
+-static int prg_suspend(struct device *dev)
+-{
+-      struct ipu_prg *prg = dev_get_drvdata(dev);
+-
+-      clk_disable_unprepare(prg->clk_axi);
+-      clk_disable_unprepare(prg->clk_ipg);
+-
+-      return 0;
+-}
+-
+-static int prg_resume(struct device *dev)
+-{
+-      struct ipu_prg *prg = dev_get_drvdata(dev);
+-      int ret;
+-
+-      ret = clk_prepare_enable(prg->clk_ipg);
+-      if (ret)
+-              return ret;
+-
+-      ret = clk_prepare_enable(prg->clk_axi);
+-      if (ret) {
+-              clk_disable_unprepare(prg->clk_ipg);
+-              return ret;
+-      }
+-
+-      return 0;
+-}
+-#endif
+-
+-static const struct dev_pm_ops prg_pm_ops = {
+-      SET_RUNTIME_PM_OPS(prg_suspend, prg_resume, NULL)
+-};
+-
+-static const struct of_device_id ipu_prg_dt_ids[] = {
+-      { .compatible = "fsl,imx6qp-prg", },
+-      { /* sentinel */ },
+-};
+-
+-struct platform_driver ipu_prg_drv = {
+-      .probe          = ipu_prg_probe,
+-      .remove         = ipu_prg_remove,
+-      .driver         = {
+-              .name   = "imx-ipu-prg",
+-              .pm     = &prg_pm_ops,
+-              .of_match_table = ipu_prg_dt_ids,
+-      },
+-};
+--- a/drivers/gpu/ipu-v3/ipu-prv.h
++++ /dev/null
+@@ -1,274 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+- * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
+- * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
+- */
+-#ifndef __IPU_PRV_H__
+-#define __IPU_PRV_H__
+-
+-struct ipu_soc;
+-
+-#include <linux/types.h>
+-#include <linux/device.h>
+-#include <linux/clk.h>
+-#include <linux/platform_device.h>
+-
+-#include <video/imx-ipu-v3.h>
+-
+-#define IPU_MCU_T_DEFAULT     8
+-#define IPU_CM_IDMAC_REG_OFS  0x00008000
+-#define IPU_CM_IC_REG_OFS     0x00020000
+-#define IPU_CM_IRT_REG_OFS    0x00028000
+-#define IPU_CM_CSI0_REG_OFS   0x00030000
+-#define IPU_CM_CSI1_REG_OFS   0x00038000
+-#define IPU_CM_SMFC_REG_OFS   0x00050000
+-#define IPU_CM_DC_REG_OFS     0x00058000
+-#define IPU_CM_DMFC_REG_OFS   0x00060000
+-
+-/* Register addresses */
+-/* IPU Common registers */
+-#define IPU_CM_REG(offset)    (offset)
+-
+-#define IPU_CONF                      IPU_CM_REG(0)
+-
+-#define IPU_SRM_PRI1                  IPU_CM_REG(0x00a0)
+-#define IPU_SRM_PRI2                  IPU_CM_REG(0x00a4)
+-#define IPU_FS_PROC_FLOW1             IPU_CM_REG(0x00a8)
+-#define IPU_FS_PROC_FLOW2             IPU_CM_REG(0x00ac)
+-#define IPU_FS_PROC_FLOW3             IPU_CM_REG(0x00b0)
+-#define IPU_FS_DISP_FLOW1             IPU_CM_REG(0x00b4)
+-#define IPU_FS_DISP_FLOW2             IPU_CM_REG(0x00b8)
+-#define IPU_SKIP                      IPU_CM_REG(0x00bc)
+-#define IPU_DISP_ALT_CONF             IPU_CM_REG(0x00c0)
+-#define IPU_DISP_GEN                  IPU_CM_REG(0x00c4)
+-#define IPU_DISP_ALT1                 IPU_CM_REG(0x00c8)
+-#define IPU_DISP_ALT2                 IPU_CM_REG(0x00cc)
+-#define IPU_DISP_ALT3                 IPU_CM_REG(0x00d0)
+-#define IPU_DISP_ALT4                 IPU_CM_REG(0x00d4)
+-#define IPU_SNOOP                     IPU_CM_REG(0x00d8)
+-#define IPU_MEM_RST                   IPU_CM_REG(0x00dc)
+-#define IPU_PM                                IPU_CM_REG(0x00e0)
+-#define IPU_GPR                               IPU_CM_REG(0x00e4)
+-#define IPU_CHA_DB_MODE_SEL(ch)               IPU_CM_REG(0x0150 + 4 * ((ch) / 32))
+-#define IPU_ALT_CHA_DB_MODE_SEL(ch)   IPU_CM_REG(0x0168 + 4 * ((ch) / 32))
+-#define IPU_CHA_CUR_BUF(ch)           IPU_CM_REG(0x023C + 4 * ((ch) / 32))
+-#define IPU_ALT_CUR_BUF0              IPU_CM_REG(0x0244)
+-#define IPU_ALT_CUR_BUF1              IPU_CM_REG(0x0248)
+-#define IPU_SRM_STAT                  IPU_CM_REG(0x024C)
+-#define IPU_PROC_TASK_STAT            IPU_CM_REG(0x0250)
+-#define IPU_DISP_TASK_STAT            IPU_CM_REG(0x0254)
+-#define IPU_CHA_BUF0_RDY(ch)          IPU_CM_REG(0x0268 + 4 * ((ch) / 32))
+-#define IPU_CHA_BUF1_RDY(ch)          IPU_CM_REG(0x0270 + 4 * ((ch) / 32))
+-#define IPU_CHA_BUF2_RDY(ch)          IPU_CM_REG(0x0288 + 4 * ((ch) / 32))
+-#define IPU_ALT_CHA_BUF0_RDY(ch)      IPU_CM_REG(0x0278 + 4 * ((ch) / 32))
+-#define IPU_ALT_CHA_BUF1_RDY(ch)      IPU_CM_REG(0x0280 + 4 * ((ch) / 32))
+-
+-#define IPU_INT_CTRL(n)               IPU_CM_REG(0x003C + 4 * (n))
+-#define IPU_INT_STAT(n)               IPU_CM_REG(0x0200 + 4 * (n))
+-
+-/* SRM_PRI2 */
+-#define DP_S_SRM_MODE_MASK            (0x3 << 3)
+-#define DP_S_SRM_MODE_NOW             (0x3 << 3)
+-#define DP_S_SRM_MODE_NEXT_FRAME      (0x1 << 3)
+-
+-/* FS_PROC_FLOW1 */
+-#define FS_PRPENC_ROT_SRC_SEL_MASK    (0xf << 0)
+-#define FS_PRPENC_ROT_SRC_SEL_ENC             (0x7 << 0)
+-#define FS_PRPVF_ROT_SRC_SEL_MASK     (0xf << 8)
+-#define FS_PRPVF_ROT_SRC_SEL_VF                       (0x8 << 8)
+-#define FS_PP_SRC_SEL_MASK            (0xf << 12)
+-#define FS_PP_ROT_SRC_SEL_MASK                (0xf << 16)
+-#define FS_PP_ROT_SRC_SEL_PP                  (0x5 << 16)
+-#define FS_VDI1_SRC_SEL_MASK          (0x3 << 20)
+-#define FS_VDI3_SRC_SEL_MASK          (0x3 << 20)
+-#define FS_PRP_SRC_SEL_MASK           (0xf << 24)
+-#define FS_VDI_SRC_SEL_MASK           (0x3 << 28)
+-#define FS_VDI_SRC_SEL_CSI_DIRECT             (0x1 << 28)
+-#define FS_VDI_SRC_SEL_VDOA                   (0x2 << 28)
+-
+-/* FS_PROC_FLOW2 */
+-#define FS_PRP_ENC_DEST_SEL_MASK      (0xf << 0)
+-#define FS_PRP_ENC_DEST_SEL_IRT_ENC           (0x1 << 0)
+-#define FS_PRPVF_DEST_SEL_MASK                (0xf << 4)
+-#define FS_PRPVF_DEST_SEL_IRT_VF              (0x1 << 4)
+-#define FS_PRPVF_ROT_DEST_SEL_MASK    (0xf << 8)
+-#define FS_PP_DEST_SEL_MASK           (0xf << 12)
+-#define FS_PP_DEST_SEL_IRT_PP                 (0x3 << 12)
+-#define FS_PP_ROT_DEST_SEL_MASK               (0xf << 16)
+-#define FS_PRPENC_ROT_DEST_SEL_MASK   (0xf << 20)
+-#define FS_PRP_DEST_SEL_MASK          (0xf << 24)
+-
+-#define IPU_DI0_COUNTER_RELEASE                       (1 << 24)
+-#define IPU_DI1_COUNTER_RELEASE                       (1 << 25)
+-
+-#define IPU_IDMAC_REG(offset) (offset)
+-
+-#define IDMAC_CONF                    IPU_IDMAC_REG(0x0000)
+-#define IDMAC_CHA_EN(ch)              IPU_IDMAC_REG(0x0004 + 4 * ((ch) / 32))
+-#define IDMAC_SEP_ALPHA                       IPU_IDMAC_REG(0x000c)
+-#define IDMAC_ALT_SEP_ALPHA           IPU_IDMAC_REG(0x0010)
+-#define IDMAC_CHA_PRI(ch)             IPU_IDMAC_REG(0x0014 + 4 * ((ch) / 32))
+-#define IDMAC_WM_EN(ch)                       IPU_IDMAC_REG(0x001c + 4 * ((ch) / 32))
+-#define IDMAC_CH_LOCK_EN_1            IPU_IDMAC_REG(0x0024)
+-#define IDMAC_CH_LOCK_EN_2            IPU_IDMAC_REG(0x0028)
+-#define IDMAC_SUB_ADDR_0              IPU_IDMAC_REG(0x002c)
+-#define IDMAC_SUB_ADDR_1              IPU_IDMAC_REG(0x0030)
+-#define IDMAC_SUB_ADDR_2              IPU_IDMAC_REG(0x0034)
+-#define IDMAC_BAND_EN(ch)             IPU_IDMAC_REG(0x0040 + 4 * ((ch) / 32))
+-#define IDMAC_CHA_BUSY(ch)            IPU_IDMAC_REG(0x0100 + 4 * ((ch) / 32))
+-
+-#define IPU_NUM_IRQS  (32 * 15)
+-
+-enum ipu_modules {
+-      IPU_CONF_CSI0_EN                = (1 << 0),
+-      IPU_CONF_CSI1_EN                = (1 << 1),
+-      IPU_CONF_IC_EN                  = (1 << 2),
+-      IPU_CONF_ROT_EN                 = (1 << 3),
+-      IPU_CONF_ISP_EN                 = (1 << 4),
+-      IPU_CONF_DP_EN                  = (1 << 5),
+-      IPU_CONF_DI0_EN                 = (1 << 6),
+-      IPU_CONF_DI1_EN                 = (1 << 7),
+-      IPU_CONF_SMFC_EN                = (1 << 8),
+-      IPU_CONF_DC_EN                  = (1 << 9),
+-      IPU_CONF_DMFC_EN                = (1 << 10),
+-
+-      IPU_CONF_VDI_EN                 = (1 << 12),
+-
+-      IPU_CONF_IDMAC_DIS              = (1 << 22),
+-
+-      IPU_CONF_IC_DMFC_SEL            = (1 << 25),
+-      IPU_CONF_IC_DMFC_SYNC           = (1 << 26),
+-      IPU_CONF_VDI_DMFC_SYNC          = (1 << 27),
+-
+-      IPU_CONF_CSI0_DATA_SOURCE       = (1 << 28),
+-      IPU_CONF_CSI1_DATA_SOURCE       = (1 << 29),
+-      IPU_CONF_IC_INPUT               = (1 << 30),
+-      IPU_CONF_CSI_SEL                = (1 << 31),
+-};
+-
+-struct ipuv3_channel {
+-      unsigned int num;
+-      struct ipu_soc *ipu;
+-      struct list_head list;
+-};
+-
+-struct ipu_cpmem;
+-struct ipu_csi;
+-struct ipu_dc_priv;
+-struct ipu_dmfc_priv;
+-struct ipu_di;
+-struct ipu_ic_priv;
+-struct ipu_vdi;
+-struct ipu_image_convert_priv;
+-struct ipu_smfc_priv;
+-struct ipu_pre;
+-struct ipu_prg;
+-
+-struct ipu_devtype;
+-
+-struct ipu_soc {
+-      struct device           *dev;
+-      const struct ipu_devtype        *devtype;
+-      enum ipuv3_type         ipu_type;
+-      spinlock_t              lock;
+-      struct mutex            channel_lock;
+-      struct list_head        channels;
+-
+-      void __iomem            *cm_reg;
+-      void __iomem            *idmac_reg;
+-
+-      int                     id;
+-      int                     usecount;
+-
+-      struct clk              *clk;
+-
+-      int                     irq_sync;
+-      int                     irq_err;
+-      struct irq_domain       *domain;
+-
+-      struct ipu_cpmem        *cpmem_priv;
+-      struct ipu_dc_priv      *dc_priv;
+-      struct ipu_dp_priv      *dp_priv;
+-      struct ipu_dmfc_priv    *dmfc_priv;
+-      struct ipu_di           *di_priv[2];
+-      struct ipu_csi          *csi_priv[2];
+-      struct ipu_ic_priv      *ic_priv;
+-      struct ipu_vdi          *vdi_priv;
+-      struct ipu_image_convert_priv *image_convert_priv;
+-      struct ipu_smfc_priv    *smfc_priv;
+-      struct ipu_prg          *prg_priv;
+-};
+-
+-static inline u32 ipu_idmac_read(struct ipu_soc *ipu, unsigned offset)
+-{
+-      return readl(ipu->idmac_reg + offset);
+-}
+-
+-static inline void ipu_idmac_write(struct ipu_soc *ipu, u32 value,
+-                                 unsigned offset)
+-{
+-      writel(value, ipu->idmac_reg + offset);
+-}
+-
+-void ipu_srm_dp_update(struct ipu_soc *ipu, bool sync);
+-
+-int ipu_module_enable(struct ipu_soc *ipu, u32 mask);
+-int ipu_module_disable(struct ipu_soc *ipu, u32 mask);
+-
+-bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno);
+-
+-int ipu_csi_init(struct ipu_soc *ipu, struct device *dev, int id,
+-               unsigned long base, u32 module, struct clk *clk_ipu);
+-void ipu_csi_exit(struct ipu_soc *ipu, int id);
+-
+-int ipu_ic_init(struct ipu_soc *ipu, struct device *dev,
+-              unsigned long base, unsigned long tpmem_base);
+-void ipu_ic_exit(struct ipu_soc *ipu);
+-
+-int ipu_vdi_init(struct ipu_soc *ipu, struct device *dev,
+-               unsigned long base, u32 module);
+-void ipu_vdi_exit(struct ipu_soc *ipu);
+-
+-int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev);
+-void ipu_image_convert_exit(struct ipu_soc *ipu);
+-
+-int ipu_di_init(struct ipu_soc *ipu, struct device *dev, int id,
+-              unsigned long base, u32 module, struct clk *ipu_clk);
+-void ipu_di_exit(struct ipu_soc *ipu, int id);
+-
+-int ipu_dmfc_init(struct ipu_soc *ipu, struct device *dev, unsigned long base,
+-              struct clk *ipu_clk);
+-void ipu_dmfc_exit(struct ipu_soc *ipu);
+-
+-int ipu_dp_init(struct ipu_soc *ipu, struct device *dev, unsigned long base);
+-void ipu_dp_exit(struct ipu_soc *ipu);
+-
+-int ipu_dc_init(struct ipu_soc *ipu, struct device *dev, unsigned long base,
+-              unsigned long template_base);
+-void ipu_dc_exit(struct ipu_soc *ipu);
+-
+-int ipu_cpmem_init(struct ipu_soc *ipu, struct device *dev, unsigned long base);
+-void ipu_cpmem_exit(struct ipu_soc *ipu);
+-
+-int ipu_smfc_init(struct ipu_soc *ipu, struct device *dev, unsigned long base);
+-void ipu_smfc_exit(struct ipu_soc *ipu);
+-
+-struct ipu_pre *ipu_pre_lookup_by_phandle(struct device *dev, const char *name,
+-                                        int index);
+-int ipu_pre_get_available_count(void);
+-int ipu_pre_get(struct ipu_pre *pre);
+-void ipu_pre_put(struct ipu_pre *pre);
+-u32 ipu_pre_get_baddr(struct ipu_pre *pre);
+-void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
+-                     unsigned int height, unsigned int stride, u32 format,
+-                     uint64_t modifier, unsigned int bufaddr);
+-void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr);
+-bool ipu_pre_update_pending(struct ipu_pre *pre);
+-
+-struct ipu_prg *ipu_prg_lookup_by_phandle(struct device *dev, const char *name,
+-                                        int ipu_id);
+-
+-extern struct platform_driver ipu_pre_drv;
+-extern struct platform_driver ipu_prg_drv;
+-
+-#endif                                /* __IPU_PRV_H__ */
+--- a/drivers/gpu/ipu-v3/ipu-smfc.c
++++ /dev/null
+@@ -1,202 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- * Copyright 2008-2010 Freescale Semiconductor, Inc. All Rights Reserved.
+- */
+-#include <linux/export.h>
+-#include <linux/types.h>
+-#include <linux/init.h>
+-#include <linux/io.h>
+-#include <linux/errno.h>
+-#include <linux/spinlock.h>
+-#include <linux/delay.h>
+-#include <linux/clk.h>
+-#include <video/imx-ipu-v3.h>
+-
+-#include "ipu-prv.h"
+-
+-struct ipu_smfc {
+-      struct ipu_smfc_priv *priv;
+-      int chno;
+-      bool inuse;
+-};
+-
+-struct ipu_smfc_priv {
+-      void __iomem *base;
+-      spinlock_t lock;
+-      struct ipu_soc *ipu;
+-      struct ipu_smfc channel[4];
+-      int use_count;
+-};
+-
+-/*SMFC Registers */
+-#define SMFC_MAP      0x0000
+-#define SMFC_WMC      0x0004
+-#define SMFC_BS               0x0008
+-
+-int ipu_smfc_set_burstsize(struct ipu_smfc *smfc, int burstsize)
+-{
+-      struct ipu_smfc_priv *priv = smfc->priv;
+-      unsigned long flags;
+-      u32 val, shift;
+-
+-      spin_lock_irqsave(&priv->lock, flags);
+-
+-      shift = smfc->chno * 4;
+-      val = readl(priv->base + SMFC_BS);
+-      val &= ~(0xf << shift);
+-      val |= burstsize << shift;
+-      writel(val, priv->base + SMFC_BS);
+-
+-      spin_unlock_irqrestore(&priv->lock, flags);
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_smfc_set_burstsize);
+-
+-int ipu_smfc_map_channel(struct ipu_smfc *smfc, int csi_id, int mipi_id)
+-{
+-      struct ipu_smfc_priv *priv = smfc->priv;
+-      unsigned long flags;
+-      u32 val, shift;
+-
+-      spin_lock_irqsave(&priv->lock, flags);
+-
+-      shift = smfc->chno * 3;
+-      val = readl(priv->base + SMFC_MAP);
+-      val &= ~(0x7 << shift);
+-      val |= ((csi_id << 2) | mipi_id) << shift;
+-      writel(val, priv->base + SMFC_MAP);
+-
+-      spin_unlock_irqrestore(&priv->lock, flags);
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_smfc_map_channel);
+-
+-int ipu_smfc_set_watermark(struct ipu_smfc *smfc, u32 set_level, u32 clr_level)
+-{
+-      struct ipu_smfc_priv *priv = smfc->priv;
+-      unsigned long flags;
+-      u32 val, shift;
+-
+-      spin_lock_irqsave(&priv->lock, flags);
+-
+-      shift = smfc->chno * 6 + (smfc->chno > 1 ? 4 : 0);
+-      val = readl(priv->base + SMFC_WMC);
+-      val &= ~(0x3f << shift);
+-      val |= ((clr_level << 3) | set_level) << shift;
+-      writel(val, priv->base + SMFC_WMC);
+-
+-      spin_unlock_irqrestore(&priv->lock, flags);
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_smfc_set_watermark);
+-
+-int ipu_smfc_enable(struct ipu_smfc *smfc)
+-{
+-      struct ipu_smfc_priv *priv = smfc->priv;
+-      unsigned long flags;
+-
+-      spin_lock_irqsave(&priv->lock, flags);
+-
+-      if (!priv->use_count)
+-              ipu_module_enable(priv->ipu, IPU_CONF_SMFC_EN);
+-
+-      priv->use_count++;
+-
+-      spin_unlock_irqrestore(&priv->lock, flags);
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_smfc_enable);
+-
+-int ipu_smfc_disable(struct ipu_smfc *smfc)
+-{
+-      struct ipu_smfc_priv *priv = smfc->priv;
+-      unsigned long flags;
+-
+-      spin_lock_irqsave(&priv->lock, flags);
+-
+-      priv->use_count--;
+-
+-      if (!priv->use_count)
+-              ipu_module_disable(priv->ipu, IPU_CONF_SMFC_EN);
+-
+-      if (priv->use_count < 0)
+-              priv->use_count = 0;
+-
+-      spin_unlock_irqrestore(&priv->lock, flags);
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_smfc_disable);
+-
+-struct ipu_smfc *ipu_smfc_get(struct ipu_soc *ipu, unsigned int chno)
+-{
+-      struct ipu_smfc_priv *priv = ipu->smfc_priv;
+-      struct ipu_smfc *smfc, *ret;
+-      unsigned long flags;
+-
+-      if (chno >= 4)
+-              return ERR_PTR(-EINVAL);
+-
+-      smfc = &priv->channel[chno];
+-      ret = smfc;
+-
+-      spin_lock_irqsave(&priv->lock, flags);
+-
+-      if (smfc->inuse) {
+-              ret = ERR_PTR(-EBUSY);
+-              goto unlock;
+-      }
+-
+-      smfc->inuse = true;
+-unlock:
+-      spin_unlock_irqrestore(&priv->lock, flags);
+-      return ret;
+-}
+-EXPORT_SYMBOL_GPL(ipu_smfc_get);
+-
+-void ipu_smfc_put(struct ipu_smfc *smfc)
+-{
+-      struct ipu_smfc_priv *priv = smfc->priv;
+-      unsigned long flags;
+-
+-      spin_lock_irqsave(&priv->lock, flags);
+-      smfc->inuse = false;
+-      spin_unlock_irqrestore(&priv->lock, flags);
+-}
+-EXPORT_SYMBOL_GPL(ipu_smfc_put);
+-
+-int ipu_smfc_init(struct ipu_soc *ipu, struct device *dev,
+-                unsigned long base)
+-{
+-      struct ipu_smfc_priv *priv;
+-      int i;
+-
+-      priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+-      if (!priv)
+-              return -ENOMEM;
+-
+-      ipu->smfc_priv = priv;
+-      spin_lock_init(&priv->lock);
+-      priv->ipu = ipu;
+-
+-      priv->base = devm_ioremap(dev, base, PAGE_SIZE);
+-      if (!priv->base)
+-              return -ENOMEM;
+-
+-      for (i = 0; i < 4; i++) {
+-              priv->channel[i].priv = priv;
+-              priv->channel[i].chno = i;
+-      }
+-
+-      pr_debug("%s: ioremap 0x%08lx -> %p\n", __func__, base, priv->base);
+-
+-      return 0;
+-}
+-
+-void ipu_smfc_exit(struct ipu_soc *ipu)
+-{
+-}
+--- a/drivers/gpu/ipu-v3/ipu-vdi.c
++++ /dev/null
+@@ -1,234 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- * Copyright (C) 2012-2016 Mentor Graphics Inc.
+- * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
+- */
+-#include <linux/io.h>
+-#include "ipu-prv.h"
+-
+-struct ipu_vdi {
+-      void __iomem *base;
+-      u32 module;
+-      spinlock_t lock;
+-      int use_count;
+-      struct ipu_soc *ipu;
+-};
+-
+-
+-/* VDI Register Offsets */
+-#define VDI_FSIZE 0x0000
+-#define VDI_C     0x0004
+-
+-/* VDI Register Fields */
+-#define VDI_C_CH_420             (0 << 1)
+-#define VDI_C_CH_422             (1 << 1)
+-#define VDI_C_MOT_SEL_MASK       (0x3 << 2)
+-#define VDI_C_MOT_SEL_FULL       (2 << 2)
+-#define VDI_C_MOT_SEL_LOW        (1 << 2)
+-#define VDI_C_MOT_SEL_MED        (0 << 2)
+-#define VDI_C_BURST_SIZE1_4      (3 << 4)
+-#define VDI_C_BURST_SIZE2_4      (3 << 8)
+-#define VDI_C_BURST_SIZE3_4      (3 << 12)
+-#define VDI_C_BURST_SIZE_MASK    0xF
+-#define VDI_C_BURST_SIZE1_OFFSET 4
+-#define VDI_C_BURST_SIZE2_OFFSET 8
+-#define VDI_C_BURST_SIZE3_OFFSET 12
+-#define VDI_C_VWM1_SET_1         (0 << 16)
+-#define VDI_C_VWM1_SET_2         (1 << 16)
+-#define VDI_C_VWM1_CLR_2         (1 << 19)
+-#define VDI_C_VWM3_SET_1         (0 << 22)
+-#define VDI_C_VWM3_SET_2         (1 << 22)
+-#define VDI_C_VWM3_CLR_2         (1 << 25)
+-#define VDI_C_TOP_FIELD_MAN_1    (1 << 30)
+-#define VDI_C_TOP_FIELD_AUTO_1   (1 << 31)
+-
+-static inline u32 ipu_vdi_read(struct ipu_vdi *vdi, unsigned int offset)
+-{
+-      return readl(vdi->base + offset);
+-}
+-
+-static inline void ipu_vdi_write(struct ipu_vdi *vdi, u32 value,
+-                               unsigned int offset)
+-{
+-      writel(value, vdi->base + offset);
+-}
+-
+-void ipu_vdi_set_field_order(struct ipu_vdi *vdi, v4l2_std_id std, u32 field)
+-{
+-      bool top_field_0 = false;
+-      unsigned long flags;
+-      u32 reg;
+-
+-      switch (field) {
+-      case V4L2_FIELD_INTERLACED_TB:
+-      case V4L2_FIELD_SEQ_TB:
+-      case V4L2_FIELD_TOP:
+-              top_field_0 = true;
+-              break;
+-      case V4L2_FIELD_INTERLACED_BT:
+-      case V4L2_FIELD_SEQ_BT:
+-      case V4L2_FIELD_BOTTOM:
+-              top_field_0 = false;
+-              break;
+-      default:
+-              top_field_0 = (std & V4L2_STD_525_60) ? true : false;
+-              break;
+-      }
+-
+-      spin_lock_irqsave(&vdi->lock, flags);
+-
+-      reg = ipu_vdi_read(vdi, VDI_C);
+-      if (top_field_0)
+-              reg &= ~(VDI_C_TOP_FIELD_MAN_1 | VDI_C_TOP_FIELD_AUTO_1);
+-      else
+-              reg |= VDI_C_TOP_FIELD_MAN_1 | VDI_C_TOP_FIELD_AUTO_1;
+-      ipu_vdi_write(vdi, reg, VDI_C);
+-
+-      spin_unlock_irqrestore(&vdi->lock, flags);
+-}
+-EXPORT_SYMBOL_GPL(ipu_vdi_set_field_order);
+-
+-void ipu_vdi_set_motion(struct ipu_vdi *vdi, enum ipu_motion_sel motion_sel)
+-{
+-      unsigned long flags;
+-      u32 reg;
+-
+-      spin_lock_irqsave(&vdi->lock, flags);
+-
+-      reg = ipu_vdi_read(vdi, VDI_C);
+-
+-      reg &= ~VDI_C_MOT_SEL_MASK;
+-
+-      switch (motion_sel) {
+-      case MED_MOTION:
+-              reg |= VDI_C_MOT_SEL_MED;
+-              break;
+-      case HIGH_MOTION:
+-              reg |= VDI_C_MOT_SEL_FULL;
+-              break;
+-      default:
+-              reg |= VDI_C_MOT_SEL_LOW;
+-              break;
+-      }
+-
+-      ipu_vdi_write(vdi, reg, VDI_C);
+-
+-      spin_unlock_irqrestore(&vdi->lock, flags);
+-}
+-EXPORT_SYMBOL_GPL(ipu_vdi_set_motion);
+-
+-void ipu_vdi_setup(struct ipu_vdi *vdi, u32 code, int xres, int yres)
+-{
+-      unsigned long flags;
+-      u32 pixel_fmt, reg;
+-
+-      spin_lock_irqsave(&vdi->lock, flags);
+-
+-      reg = ((yres - 1) << 16) | (xres - 1);
+-      ipu_vdi_write(vdi, reg, VDI_FSIZE);
+-
+-      /*
+-       * Full motion, only vertical filter is used.
+-       * Burst size is 4 accesses
+-       */
+-      if (code == MEDIA_BUS_FMT_UYVY8_2X8 ||
+-          code == MEDIA_BUS_FMT_UYVY8_1X16 ||
+-          code == MEDIA_BUS_FMT_YUYV8_2X8 ||
+-          code == MEDIA_BUS_FMT_YUYV8_1X16)
+-              pixel_fmt = VDI_C_CH_422;
+-      else
+-              pixel_fmt = VDI_C_CH_420;
+-
+-      reg = ipu_vdi_read(vdi, VDI_C);
+-      reg |= pixel_fmt;
+-      reg |= VDI_C_BURST_SIZE2_4;
+-      reg |= VDI_C_BURST_SIZE1_4 | VDI_C_VWM1_CLR_2;
+-      reg |= VDI_C_BURST_SIZE3_4 | VDI_C_VWM3_CLR_2;
+-      ipu_vdi_write(vdi, reg, VDI_C);
+-
+-      spin_unlock_irqrestore(&vdi->lock, flags);
+-}
+-EXPORT_SYMBOL_GPL(ipu_vdi_setup);
+-
+-void ipu_vdi_unsetup(struct ipu_vdi *vdi)
+-{
+-      unsigned long flags;
+-
+-      spin_lock_irqsave(&vdi->lock, flags);
+-      ipu_vdi_write(vdi, 0, VDI_FSIZE);
+-      ipu_vdi_write(vdi, 0, VDI_C);
+-      spin_unlock_irqrestore(&vdi->lock, flags);
+-}
+-EXPORT_SYMBOL_GPL(ipu_vdi_unsetup);
+-
+-int ipu_vdi_enable(struct ipu_vdi *vdi)
+-{
+-      unsigned long flags;
+-
+-      spin_lock_irqsave(&vdi->lock, flags);
+-
+-      if (!vdi->use_count)
+-              ipu_module_enable(vdi->ipu, vdi->module);
+-
+-      vdi->use_count++;
+-
+-      spin_unlock_irqrestore(&vdi->lock, flags);
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_vdi_enable);
+-
+-int ipu_vdi_disable(struct ipu_vdi *vdi)
+-{
+-      unsigned long flags;
+-
+-      spin_lock_irqsave(&vdi->lock, flags);
+-
+-      if (vdi->use_count) {
+-              if (!--vdi->use_count)
+-                      ipu_module_disable(vdi->ipu, vdi->module);
+-      }
+-
+-      spin_unlock_irqrestore(&vdi->lock, flags);
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(ipu_vdi_disable);
+-
+-struct ipu_vdi *ipu_vdi_get(struct ipu_soc *ipu)
+-{
+-      return ipu->vdi_priv;
+-}
+-EXPORT_SYMBOL_GPL(ipu_vdi_get);
+-
+-void ipu_vdi_put(struct ipu_vdi *vdi)
+-{
+-}
+-EXPORT_SYMBOL_GPL(ipu_vdi_put);
+-
+-int ipu_vdi_init(struct ipu_soc *ipu, struct device *dev,
+-               unsigned long base, u32 module)
+-{
+-      struct ipu_vdi *vdi;
+-
+-      vdi = devm_kzalloc(dev, sizeof(*vdi), GFP_KERNEL);
+-      if (!vdi)
+-              return -ENOMEM;
+-
+-      ipu->vdi_priv = vdi;
+-
+-      spin_lock_init(&vdi->lock);
+-      vdi->module = module;
+-      vdi->base = devm_ioremap(dev, base, PAGE_SIZE);
+-      if (!vdi->base)
+-              return -ENOMEM;
+-
+-      dev_dbg(dev, "VDI base: 0x%08lx remapped to %p\n", base, vdi->base);
+-      vdi->ipu = ipu;
+-
+-      return 0;
+-}
+-
+-void ipu_vdi_exit(struct ipu_soc *ipu)
+-{
+-}
+--- a/drivers/video/Kconfig
++++ b/drivers/video/Kconfig
+@@ -15,7 +15,7 @@ source "drivers/char/agp/Kconfig"
+ source "drivers/gpu/vga/Kconfig"
+ source "drivers/gpu/host1x/Kconfig"
+-source "drivers/gpu/ipu-v3/Kconfig"
++source "drivers/gpu/imx/Kconfig"
+ source "drivers/gpu/drm/Kconfig"