From: Hauke Mehrtens Date: Fri, 30 Dec 2016 12:11:34 +0000 (+0100) Subject: lantiq: fix dma locking problems with SMP X-Git-Tag: v17.01.0-rc1~214 X-Git-Url: http://git.openwrt.org/?p=openwrt%2Fopenwrt.git;a=commitdiff_plain;h=2dac67464d47fd1c7d87108cfdc04af28d32b264 lantiq: fix dma locking problems with SMP The DMA controller used only local locks, which will not lock the section against other CPUs, fix this by using spin locks. Signed-off-by: Hauke Mehrtens --- diff --git a/target/linux/lantiq/patches-4.4/0170-MIPS-lantiq-lock-DMA-register-accesses-for-SMP.patch b/target/linux/lantiq/patches-4.4/0170-MIPS-lantiq-lock-DMA-register-accesses-for-SMP.patch new file mode 100644 index 0000000000..234a2527fc --- /dev/null +++ b/target/linux/lantiq/patches-4.4/0170-MIPS-lantiq-lock-DMA-register-accesses-for-SMP.patch @@ -0,0 +1,152 @@ +From 58078a30038b578c26c532545448fe3746648390 Mon Sep 17 00:00:00 2001 +From: Hauke Mehrtens +Date: Thu, 29 Dec 2016 21:02:57 +0100 +Subject: [PATCH] MIPS: lantiq: lock DMA register accesses for SMP + +The DMA controller channel and port configuration is changed by +selecting the port or channel in one register and then update the +configuration in other registers. This has to be done in an atomic +operation. Previously only the local interrupts were deactivated which +works for single CPU systems. If the system supports SMP a better +locking is needed, use spinlocks instead. +On more recent SoCs (at least xrx200 and later) there are two memory +regions to change the configuration, there we could use one area for +each CPU and do not have to synchronize between the CPUs and more. + +Signed-off-by: Hauke Mehrtens +--- + arch/mips/lantiq/xway/dma.c | 38 ++++++++++++++++++++------------------ + 1 file changed, 20 insertions(+), 18 deletions(-) + +--- a/arch/mips/lantiq/xway/dma.c ++++ b/arch/mips/lantiq/xway/dma.c +@@ -20,6 +20,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -59,16 +60,17 @@ + ltq_dma_membase + (z)) + + static void __iomem *ltq_dma_membase; ++static DEFINE_SPINLOCK(ltq_dma_lock); + + void + ltq_dma_enable_irq(struct ltq_dma_channel *ch) + { + unsigned long flags; + +- local_irq_save(flags); ++ spin_lock_irqsave(<q_dma_lock, flags); + ltq_dma_w32(ch->nr, LTQ_DMA_CS); + ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); +- local_irq_restore(flags); ++ spin_unlock_irqrestore(<q_dma_lock, flags); + } + EXPORT_SYMBOL_GPL(ltq_dma_enable_irq); + +@@ -77,10 +79,10 @@ ltq_dma_disable_irq(struct ltq_dma_chann + { + unsigned long flags; + +- local_irq_save(flags); ++ spin_lock_irqsave(<q_dma_lock, flags); + ltq_dma_w32(ch->nr, LTQ_DMA_CS); + ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN); +- local_irq_restore(flags); ++ spin_unlock_irqrestore(<q_dma_lock, flags); + } + EXPORT_SYMBOL_GPL(ltq_dma_disable_irq); + +@@ -89,10 +91,10 @@ ltq_dma_ack_irq(struct ltq_dma_channel * + { + unsigned long flags; + +- local_irq_save(flags); ++ spin_lock_irqsave(<q_dma_lock, flags); + ltq_dma_w32(ch->nr, LTQ_DMA_CS); + ltq_dma_w32(DMA_IRQ_ACK, LTQ_DMA_CIS); +- local_irq_restore(flags); ++ spin_unlock_irqrestore(<q_dma_lock, flags); + } + EXPORT_SYMBOL_GPL(ltq_dma_ack_irq); + +@@ -101,11 +103,11 @@ ltq_dma_open(struct ltq_dma_channel *ch) + { + unsigned long flag; + +- local_irq_save(flag); ++ spin_lock_irqsave(<q_dma_lock, flag); + ltq_dma_w32(ch->nr, LTQ_DMA_CS); + ltq_dma_w32_mask(0, DMA_CHAN_ON, LTQ_DMA_CCTRL); +- ltq_dma_enable_irq(ch); +- local_irq_restore(flag); ++ ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); ++ spin_unlock_irqrestore(<q_dma_lock, flag); + } + EXPORT_SYMBOL_GPL(ltq_dma_open); + +@@ -114,11 +116,11 @@ ltq_dma_close(struct ltq_dma_channel *ch + { + unsigned long flag; + +- local_irq_save(flag); ++ spin_lock_irqsave(<q_dma_lock, flag); + ltq_dma_w32(ch->nr, LTQ_DMA_CS); + ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL); +- ltq_dma_disable_irq(ch); +- local_irq_restore(flag); ++ ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN); ++ spin_unlock_irqrestore(<q_dma_lock, flag); + } + EXPORT_SYMBOL_GPL(ltq_dma_close); + +@@ -133,7 +135,7 @@ ltq_dma_alloc(struct ltq_dma_channel *ch + &ch->phys, GFP_ATOMIC); + memset(ch->desc_base, 0, LTQ_DESC_NUM * LTQ_DESC_SIZE); + +- local_irq_save(flags); ++ spin_lock_irqsave(<q_dma_lock, flags); + ltq_dma_w32(ch->nr, LTQ_DMA_CS); + ltq_dma_w32(ch->phys, LTQ_DMA_CDBA); + ltq_dma_w32(LTQ_DESC_NUM, LTQ_DMA_CDLEN); +@@ -142,7 +144,7 @@ ltq_dma_alloc(struct ltq_dma_channel *ch + ltq_dma_w32_mask(0, DMA_CHAN_RST, LTQ_DMA_CCTRL); + while (ltq_dma_r32(LTQ_DMA_CCTRL) & DMA_CHAN_RST) + ; +- local_irq_restore(flags); ++ spin_unlock_irqrestore(<q_dma_lock, flags); + } + + void +@@ -152,11 +154,11 @@ ltq_dma_alloc_tx(struct ltq_dma_channel + + ltq_dma_alloc(ch); + +- local_irq_save(flags); ++ spin_lock_irqsave(<q_dma_lock, flags); + ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE); + ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); + ltq_dma_w32(DMA_WEIGHT | DMA_TX, LTQ_DMA_CCTRL); +- local_irq_restore(flags); ++ spin_unlock_irqrestore(<q_dma_lock, flags); + } + EXPORT_SYMBOL_GPL(ltq_dma_alloc_tx); + +@@ -167,11 +169,11 @@ ltq_dma_alloc_rx(struct ltq_dma_channel + + ltq_dma_alloc(ch); + +- local_irq_save(flags); ++ spin_lock_irqsave(<q_dma_lock, flags); + ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE); + ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); + ltq_dma_w32(DMA_WEIGHT, LTQ_DMA_CCTRL); +- local_irq_restore(flags); ++ spin_unlock_irqrestore(<q_dma_lock, flags); + } + EXPORT_SYMBOL_GPL(ltq_dma_alloc_rx); +