apm821xx: sata_dwc_460ex: backport fixes and cleanups from 4.7
[openwrt/staging/chunkeey.git] / target / linux / apm821xx / patches-4.4 / 020-sata-dwc.patch
1 From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
2 Date: Sat, 21 May 2016 22:46:32 +0200
3 Subject: [PATCH v2 00/23] ata: sata_dwc_460ex: make it working again
4
5 The last approach in the commit 8b3444852a2b ("sata_dwc_460ex: move to generic
6 DMA driver") to switch to generic DMA engine API wasn't tested on bare metal.
7 Besides that we expecting new board support coming with the same SATA IP but
8 with different DMA.
9
10 This series is targetting the following things:
11 - a few bug fixes to the original driver
12 - a part to fix the DMA engine usage and in particularly dw_dmac driver
13 - move driver to use generic PHY and "dmas" property which leads to update in DTS
14
15 The driver has been tested myself on Sam460ex and WD MyBookLive (apollo3g)
16 boards. In any case I ask Christian, Måns, and Julian to independently test and
17 provide Tested-by tag or an error report.
18
19 Series depends on previously published but not yet fully applied series [1].
20
21 The patches are also available via public branch [2].
22
23 [1] http://www.spinics.net/lists/dmaengine/msg09250.html
24 [2] https://bitbucket.org/andy-shev/linux/branch/topic%2Fdw%2Fsata
25
26 Since v1:
27 - simplify patch 8 (David Laight)
28 - add Tested-by and Acked-by tags
29
30 Andy Shevchenko (11):
31 ata: sata_dwc_460ex: set dma_boundary to 0x1fff
32 ata: sata_dwc_460ex: burst size must be in items not bytes
33 ata: sata_dwc_460ex: DMA is always a flow controller
34 ata: sata_dwc_460ex: select only core part of DMA driver
35 ata: sata_dwc_460ex: don't call ata_sff_qc_issue() on DMA commands
36 ata: sata_dwc_460ex: correct HOSTDEV{P}_FROM_*() macros
37 ata: sata_dwc_460ex: supply physical address of FIFO to DMA
38 ata: sata_dwc_460ex: switch to new dmaengine_terminate_* API
39 ata: sata_dwc_460ex: use devm_ioremap
40 ata: sata_dwc_460ex: make debug messages neat
41 powerpc/4xx: Device tree update for the 460ex DWC SATA
42
43 Christian Lamparter (1):
44 ata: sata_dwc_460ex: fix crash on offline links without an attached
45 drive
46
47 Mans Rullgard (11):
48 ata: sata_dwc_460ex: remove incorrect locking
49 ata: sata_dwc_460ex: skip dma setup for non-dma commands
50 ata: sata_dwc_460ex: use "dmas" DT property to find dma channel
51 ata: sata_dwc_460ex: add phy support
52 ata: sata_dwc_460ex: get rid of global data
53 ata: sata_dwc_460ex: remove empty libata callback
54 ata: sata_dwc_460ex: get rid of some pointless casts
55 ata: sata_dwc_460ex: get rid of incorrect cast
56 ata: sata_dwc_460ex: add __iomem to register base pointer
57 ata: sata_dwc_460ex: use readl/writel_relaxed()
58 ata: sata_dwc_460ex: tidy up sata_dwc_clear_dmacr()
59
60 arch/powerpc/boot/dts/canyonlands.dts | 15 +-
61 drivers/ata/Kconfig | 12 +-
62 drivers/ata/sata_dwc_460ex.c | 552 +++++++++++++++++-----------------
63 3 files changed, 305 insertions(+), 274 deletions(-)
64
65 ---
66 drivers/ata/sata_dwc_460ex.c | 552 ++++++++++++++++++++++---------------------
67 1 file changed, 283 insertions(+), 269 deletions(-)
68
69 diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
70 index 9020349..00c2af1 100644
71 --- a/drivers/ata/sata_dwc_460ex.c
72 +++ b/drivers/ata/sata_dwc_460ex.c
73 @@ -30,10 +30,12 @@
74 #include <linux/kernel.h>
75 #include <linux/module.h>
76 #include <linux/device.h>
77 +#include <linux/dmaengine.h>
78 #include <linux/of_address.h>
79 #include <linux/of_irq.h>
80 #include <linux/of_platform.h>
81 #include <linux/platform_device.h>
82 +#include <linux/phy/phy.h>
83 #include <linux/libata.h>
84 #include <linux/slab.h>
85
86 @@ -42,10 +44,6 @@
87 #include <scsi/scsi_host.h>
88 #include <scsi/scsi_cmnd.h>
89
90 -/* Supported DMA engine drivers */
91 -#include <linux/platform_data/dma-dw.h>
92 -#include <linux/dma/dw.h>
93 -
94 /* These two are defined in "libata.h" */
95 #undef DRV_NAME
96 #undef DRV_VERSION
97 @@ -53,19 +51,14 @@
98 #define DRV_NAME "sata-dwc"
99 #define DRV_VERSION "1.3"
100
101 -#ifndef out_le32
102 -#define out_le32(a, v) __raw_writel(__cpu_to_le32(v), (void __iomem *)(a))
103 -#endif
104 -
105 -#ifndef in_le32
106 -#define in_le32(a) __le32_to_cpu(__raw_readl((void __iomem *)(a)))
107 -#endif
108 +#define sata_dwc_writel(a, v) writel_relaxed(v, a)
109 +#define sata_dwc_readl(a) readl_relaxed(a)
110
111 #ifndef NO_IRQ
112 #define NO_IRQ 0
113 #endif
114
115 -#define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length*/
116 +#define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length */
117
118 enum {
119 SATA_DWC_MAX_PORTS = 1,
120 @@ -102,7 +95,7 @@ struct sata_dwc_regs {
121 u32 versionr; /* Version Register */
122 u32 idr; /* ID Register */
123 u32 unimpl[192]; /* Unimplemented */
124 - u32 dmadr[256]; /* FIFO Locations in DMA Mode */
125 + u32 dmadr[256]; /* FIFO Locations in DMA Mode */
126 };
127
128 enum {
129 @@ -146,9 +139,14 @@ struct sata_dwc_device {
130 struct device *dev; /* generic device struct */
131 struct ata_probe_ent *pe; /* ptr to probe-ent */
132 struct ata_host *host;
133 - u8 __iomem *reg_base;
134 - struct sata_dwc_regs *sata_dwc_regs; /* DW Synopsys SATA specific */
135 + struct sata_dwc_regs __iomem *sata_dwc_regs; /* DW SATA specific */
136 + u32 sactive_issued;
137 + u32 sactive_queued;
138 + struct phy *phy;
139 + phys_addr_t dmadr;
140 +#ifdef CONFIG_SATA_DWC_OLD_DMA
141 struct dw_dma_chip *dma;
142 +#endif
143 };
144
145 #define SATA_DWC_QCMD_MAX 32
146 @@ -159,25 +157,19 @@ struct sata_dwc_device_port {
147 int dma_pending[SATA_DWC_QCMD_MAX];
148
149 /* DMA info */
150 - struct dw_dma_slave *dws;
151 struct dma_chan *chan;
152 struct dma_async_tx_descriptor *desc[SATA_DWC_QCMD_MAX];
153 u32 dma_interrupt_count;
154 };
155
156 /*
157 - * Commonly used DWC SATA driver Macros
158 + * Commonly used DWC SATA driver macros
159 */
160 -#define HSDEV_FROM_HOST(host) ((struct sata_dwc_device *)\
161 - (host)->private_data)
162 -#define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *)\
163 - (ap)->host->private_data)
164 -#define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *)\
165 - (ap)->private_data)
166 -#define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *)\
167 - (qc)->ap->host->private_data)
168 -#define HSDEV_FROM_HSDEVP(p) ((struct sata_dwc_device *)\
169 - (hsdevp)->hsdev)
170 +#define HSDEV_FROM_HOST(host) ((struct sata_dwc_device *)(host)->private_data)
171 +#define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *)(ap)->host->private_data)
172 +#define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *)(ap)->private_data)
173 +#define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *)(qc)->ap->host->private_data)
174 +#define HSDEV_FROM_HSDEVP(p) ((struct sata_dwc_device *)(p)->hsdev)
175
176 enum {
177 SATA_DWC_CMD_ISSUED_NOT = 0,
178 @@ -190,21 +182,6 @@ enum {
179 SATA_DWC_DMA_PENDING_RX = 2,
180 };
181
182 -struct sata_dwc_host_priv {
183 - void __iomem *scr_addr_sstatus;
184 - u32 sata_dwc_sactive_issued ;
185 - u32 sata_dwc_sactive_queued ;
186 -};
187 -
188 -static struct sata_dwc_host_priv host_pvt;
189 -
190 -static struct dw_dma_slave sata_dwc_dma_dws = {
191 - .src_id = 0,
192 - .dst_id = 0,
193 - .src_master = 0,
194 - .dst_master = 1,
195 -};
196 -
197 /*
198 * Prototypes
199 */
200 @@ -215,6 +192,93 @@ static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status);
201 static void sata_dwc_port_stop(struct ata_port *ap);
202 static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag);
203
204 +#ifdef CONFIG_SATA_DWC_OLD_DMA
205 +
206 +#include <linux/platform_data/dma-dw.h>
207 +#include <linux/dma/dw.h>
208 +
209 +static struct dw_dma_slave sata_dwc_dma_dws = {
210 + .src_id = 0,
211 + .dst_id = 0,
212 + .m_master = 1,
213 + .p_master = 0,
214 +};
215 +
216 +static bool sata_dwc_dma_filter(struct dma_chan *chan, void *param)
217 +{
218 + struct dw_dma_slave *dws = &sata_dwc_dma_dws;
219 +
220 + if (dws->dma_dev != chan->device->dev)
221 + return false;
222 +
223 + chan->private = dws;
224 + return true;
225 +}
226 +
227 +static int sata_dwc_dma_get_channel_old(struct sata_dwc_device_port *hsdevp)
228 +{
229 + struct sata_dwc_device *hsdev = hsdevp->hsdev;
230 + struct dw_dma_slave *dws = &sata_dwc_dma_dws;
231 + dma_cap_mask_t mask;
232 +
233 + dws->dma_dev = hsdev->dev;
234 +
235 + dma_cap_zero(mask);
236 + dma_cap_set(DMA_SLAVE, mask);
237 +
238 + /* Acquire DMA channel */
239 + hsdevp->chan = dma_request_channel(mask, sata_dwc_dma_filter, hsdevp);
240 + if (!hsdevp->chan) {
241 + dev_err(hsdev->dev, "%s: dma channel unavailable\n",
242 + __func__);
243 + return -EAGAIN;
244 + }
245 +
246 + return 0;
247 +}
248 +
249 +static int sata_dwc_dma_init_old(struct platform_device *pdev,
250 + struct sata_dwc_device *hsdev)
251 +{
252 + struct device_node *np = pdev->dev.of_node;
253 + struct resource *res;
254 +
255 + hsdev->dma = devm_kzalloc(&pdev->dev, sizeof(*hsdev->dma), GFP_KERNEL);
256 + if (!hsdev->dma)
257 + return -ENOMEM;
258 +
259 + hsdev->dma->dev = &pdev->dev;
260 +
261 + /* Get SATA DMA interrupt number */
262 + hsdev->dma->irq = irq_of_parse_and_map(np, 1);
263 + if (hsdev->dma->irq == NO_IRQ) {
264 + dev_err(&pdev->dev, "no SATA DMA irq\n");
265 + return -ENODEV;
266 + }
267 +
268 + /* Get physical SATA DMA register base address */
269 + res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
270 + hsdev->dma->regs = devm_ioremap_resource(&pdev->dev, res);
271 + if (IS_ERR(hsdev->dma->regs)) {
272 + dev_err(&pdev->dev,
273 + "ioremap failed for AHBDMA register address\n");
274 + return PTR_ERR(hsdev->dma->regs);
275 + }
276 +
277 + /* Initialize AHB DMAC */
278 + return dw_dma_probe(hsdev->dma);
279 +}
280 +
281 +static void sata_dwc_dma_exit_old(struct sata_dwc_device *hsdev)
282 +{
283 + if (!hsdev->dma)
284 + return;
285 +
286 + dw_dma_remove(hsdev->dma);
287 +}
288 +
289 +#endif
290 +
291 static const char *get_prot_descript(u8 protocol)
292 {
293 switch ((enum ata_tf_protocols)protocol) {
294 @@ -305,21 +369,20 @@ static struct dma_async_tx_descriptor *dma_dwc_xfer_setup(struct ata_queued_cmd
295 struct ata_port *ap = qc->ap;
296 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
297 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
298 - dma_addr_t addr = (dma_addr_t)&hsdev->sata_dwc_regs->dmadr;
299 struct dma_slave_config sconf;
300 struct dma_async_tx_descriptor *desc;
301
302 if (qc->dma_dir == DMA_DEV_TO_MEM) {
303 - sconf.src_addr = addr;
304 - sconf.device_fc = true;
305 + sconf.src_addr = hsdev->dmadr;
306 + sconf.device_fc = false;
307 } else { /* DMA_MEM_TO_DEV */
308 - sconf.dst_addr = addr;
309 + sconf.dst_addr = hsdev->dmadr;
310 sconf.device_fc = false;
311 }
312
313 sconf.direction = qc->dma_dir;
314 - sconf.src_maxburst = AHB_DMA_BRST_DFLT;
315 - sconf.dst_maxburst = AHB_DMA_BRST_DFLT;
316 + sconf.src_maxburst = AHB_DMA_BRST_DFLT / 4; /* in items */
317 + sconf.dst_maxburst = AHB_DMA_BRST_DFLT / 4; /* in items */
318 sconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
319 sconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
320
321 @@ -336,8 +399,8 @@ static struct dma_async_tx_descriptor *dma_dwc_xfer_setup(struct ata_queued_cmd
322 desc->callback = dma_dwc_xfer_done;
323 desc->callback_param = hsdev;
324
325 - dev_dbg(hsdev->dev, "%s sg: 0x%p, count: %d addr: %pad\n",
326 - __func__, qc->sg, qc->n_elem, &addr);
327 + dev_dbg(hsdev->dev, "%s sg: 0x%p, count: %d addr: %pa\n", __func__,
328 + qc->sg, qc->n_elem, &hsdev->dmadr);
329
330 return desc;
331 }
332 @@ -350,48 +413,38 @@ static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
333 return -EINVAL;
334 }
335
336 - *val = in_le32(link->ap->ioaddr.scr_addr + (scr * 4));
337 - dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n",
338 - __func__, link->ap->print_id, scr, *val);
339 + *val = sata_dwc_readl(link->ap->ioaddr.scr_addr + (scr * 4));
340 + dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=0x%08x\n", __func__,
341 + link->ap->print_id, scr, *val);
342
343 return 0;
344 }
345
346 static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val)
347 {
348 - dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n",
349 - __func__, link->ap->print_id, scr, val);
350 + dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=0x%08x\n", __func__,
351 + link->ap->print_id, scr, val);
352 if (scr > SCR_NOTIFICATION) {
353 dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
354 __func__, scr);
355 return -EINVAL;
356 }
357 - out_le32(link->ap->ioaddr.scr_addr + (scr * 4), val);
358 + sata_dwc_writel(link->ap->ioaddr.scr_addr + (scr * 4), val);
359
360 return 0;
361 }
362
363 -static u32 core_scr_read(unsigned int scr)
364 -{
365 - return in_le32(host_pvt.scr_addr_sstatus + (scr * 4));
366 -}
367 -
368 -static void core_scr_write(unsigned int scr, u32 val)
369 -{
370 - out_le32(host_pvt.scr_addr_sstatus + (scr * 4), val);
371 -}
372 -
373 -static void clear_serror(void)
374 +static void clear_serror(struct ata_port *ap)
375 {
376 u32 val;
377 - val = core_scr_read(SCR_ERROR);
378 - core_scr_write(SCR_ERROR, val);
379 + sata_dwc_scr_read(&ap->link, SCR_ERROR, &val);
380 + sata_dwc_scr_write(&ap->link, SCR_ERROR, val);
381 }
382
383 static void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit)
384 {
385 - out_le32(&hsdev->sata_dwc_regs->intpr,
386 - in_le32(&hsdev->sata_dwc_regs->intpr));
387 + sata_dwc_writel(&hsdev->sata_dwc_regs->intpr,
388 + sata_dwc_readl(&hsdev->sata_dwc_regs->intpr));
389 }
390
391 static u32 qcmd_tag_to_mask(u8 tag)
392 @@ -412,7 +465,7 @@ static void sata_dwc_error_intr(struct ata_port *ap,
393
394 ata_ehi_clear_desc(ehi);
395
396 - serror = core_scr_read(SCR_ERROR);
397 + sata_dwc_scr_read(&ap->link, SCR_ERROR, &serror);
398 status = ap->ops->sff_check_status(ap);
399
400 tag = ap->link.active_tag;
401 @@ -423,7 +476,7 @@ static void sata_dwc_error_intr(struct ata_port *ap,
402 hsdevp->dma_pending[tag], hsdevp->cmd_issued[tag]);
403
404 /* Clear error register and interrupt bit */
405 - clear_serror();
406 + clear_serror(ap);
407 clear_interrupt_bit(hsdev, SATA_DWC_INTPR_ERR);
408
409 /* This is the only error happening now. TODO check for exact error */
410 @@ -462,12 +515,12 @@ static irqreturn_t sata_dwc_isr(int irq, void *dev_instance)
411 int handled, num_processed, port = 0;
412 uint intpr, sactive, sactive2, tag_mask;
413 struct sata_dwc_device_port *hsdevp;
414 - host_pvt.sata_dwc_sactive_issued = 0;
415 + hsdev->sactive_issued = 0;
416
417 spin_lock_irqsave(&host->lock, flags);
418
419 /* Read the interrupt register */
420 - intpr = in_le32(&hsdev->sata_dwc_regs->intpr);
421 + intpr = sata_dwc_readl(&hsdev->sata_dwc_regs->intpr);
422
423 ap = host->ports[port];
424 hsdevp = HSDEVP_FROM_AP(ap);
425 @@ -486,12 +539,12 @@ static irqreturn_t sata_dwc_isr(int irq, void *dev_instance)
426 if (intpr & SATA_DWC_INTPR_NEWFP) {
427 clear_interrupt_bit(hsdev, SATA_DWC_INTPR_NEWFP);
428
429 - tag = (u8)(in_le32(&hsdev->sata_dwc_regs->fptagr));
430 + tag = (u8)(sata_dwc_readl(&hsdev->sata_dwc_regs->fptagr));
431 dev_dbg(ap->dev, "%s: NEWFP tag=%d\n", __func__, tag);
432 if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_PEND)
433 dev_warn(ap->dev, "CMD tag=%d not pending?\n", tag);
434
435 - host_pvt.sata_dwc_sactive_issued |= qcmd_tag_to_mask(tag);
436 + hsdev->sactive_issued |= qcmd_tag_to_mask(tag);
437
438 qc = ata_qc_from_tag(ap, tag);
439 /*
440 @@ -505,11 +558,11 @@ static irqreturn_t sata_dwc_isr(int irq, void *dev_instance)
441 handled = 1;
442 goto DONE;
443 }
444 - sactive = core_scr_read(SCR_ACTIVE);
445 - tag_mask = (host_pvt.sata_dwc_sactive_issued | sactive) ^ sactive;
446 + sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
447 + tag_mask = (hsdev->sactive_issued | sactive) ^ sactive;
448
449 /* If no sactive issued and tag_mask is zero then this is not NCQ */
450 - if (host_pvt.sata_dwc_sactive_issued == 0 && tag_mask == 0) {
451 + if (hsdev->sactive_issued == 0 && tag_mask == 0) {
452 if (ap->link.active_tag == ATA_TAG_POISON)
453 tag = 0;
454 else
455 @@ -579,22 +632,19 @@ DRVSTILLBUSY:
456 */
457
458 /* process completed commands */
459 - sactive = core_scr_read(SCR_ACTIVE);
460 - tag_mask = (host_pvt.sata_dwc_sactive_issued | sactive) ^ sactive;
461 + sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
462 + tag_mask = (hsdev->sactive_issued | sactive) ^ sactive;
463
464 - if (sactive != 0 || (host_pvt.sata_dwc_sactive_issued) > 1 || \
465 - tag_mask > 1) {
466 + if (sactive != 0 || hsdev->sactive_issued > 1 || tag_mask > 1) {
467 dev_dbg(ap->dev,
468 "%s NCQ:sactive=0x%08x sactive_issued=0x%08x tag_mask=0x%08x\n",
469 - __func__, sactive, host_pvt.sata_dwc_sactive_issued,
470 - tag_mask);
471 + __func__, sactive, hsdev->sactive_issued, tag_mask);
472 }
473
474 - if ((tag_mask | (host_pvt.sata_dwc_sactive_issued)) != \
475 - (host_pvt.sata_dwc_sactive_issued)) {
476 + if ((tag_mask | hsdev->sactive_issued) != hsdev->sactive_issued) {
477 dev_warn(ap->dev,
478 - "Bad tag mask? sactive=0x%08x (host_pvt.sata_dwc_sactive_issued)=0x%08x tag_mask=0x%08x\n",
479 - sactive, host_pvt.sata_dwc_sactive_issued, tag_mask);
480 + "Bad tag mask? sactive=0x%08x sactive_issued=0x%08x tag_mask=0x%08x\n",
481 + sactive, hsdev->sactive_issued, tag_mask);
482 }
483
484 /* read just to clear ... not bad if currently still busy */
485 @@ -656,7 +706,7 @@ STILLBUSY:
486 * we were processing --we read status as part of processing a completed
487 * command).
488 */
489 - sactive2 = core_scr_read(SCR_ACTIVE);
490 + sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive2);
491 if (sactive2 != sactive) {
492 dev_dbg(ap->dev,
493 "More completed - sactive=0x%x sactive2=0x%x\n",
494 @@ -672,15 +722,14 @@ DONE:
495 static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag)
496 {
497 struct sata_dwc_device *hsdev = HSDEV_FROM_HSDEVP(hsdevp);
498 + u32 dmacr = sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr);
499
500 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) {
501 - out_le32(&(hsdev->sata_dwc_regs->dmacr),
502 - SATA_DWC_DMACR_RX_CLEAR(
503 - in_le32(&(hsdev->sata_dwc_regs->dmacr))));
504 + dmacr = SATA_DWC_DMACR_RX_CLEAR(dmacr);
505 + sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, dmacr);
506 } else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) {
507 - out_le32(&(hsdev->sata_dwc_regs->dmacr),
508 - SATA_DWC_DMACR_TX_CLEAR(
509 - in_le32(&(hsdev->sata_dwc_regs->dmacr))));
510 + dmacr = SATA_DWC_DMACR_TX_CLEAR(dmacr);
511 + sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, dmacr);
512 } else {
513 /*
514 * This should not happen, it indicates the driver is out of
515 @@ -688,10 +737,9 @@ static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag)
516 */
517 dev_err(hsdev->dev,
518 "%s DMA protocol RX and TX DMA not pending tag=0x%02x pending=%d dmacr: 0x%08x\n",
519 - __func__, tag, hsdevp->dma_pending[tag],
520 - in_le32(&hsdev->sata_dwc_regs->dmacr));
521 - out_le32(&(hsdev->sata_dwc_regs->dmacr),
522 - SATA_DWC_DMACR_TXRXCH_CLEAR);
523 + __func__, tag, hsdevp->dma_pending[tag], dmacr);
524 + sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
525 + SATA_DWC_DMACR_TXRXCH_CLEAR);
526 }
527 }
528
529 @@ -716,7 +764,7 @@ static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status)
530 __func__, qc->tag, qc->tf.command,
531 get_dma_dir_descript(qc->dma_dir),
532 get_prot_descript(qc->tf.protocol),
533 - in_le32(&(hsdev->sata_dwc_regs->dmacr)));
534 + sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr));
535 }
536 #endif
537
538 @@ -725,7 +773,7 @@ static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status)
539 dev_err(ap->dev,
540 "%s DMA protocol RX and TX DMA not pending dmacr: 0x%08x\n",
541 __func__,
542 - in_le32(&(hsdev->sata_dwc_regs->dmacr)));
543 + sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr));
544 }
545
546 hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE;
547 @@ -742,8 +790,9 @@ static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
548 u8 status = 0;
549 u32 mask = 0x0;
550 u8 tag = qc->tag;
551 + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
552 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
553 - host_pvt.sata_dwc_sactive_queued = 0;
554 + hsdev->sactive_queued = 0;
555 dev_dbg(ap->dev, "%s checkstatus? %x\n", __func__, check_status);
556
557 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX)
558 @@ -756,10 +805,8 @@ static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
559
560 /* clear active bit */
561 mask = (~(qcmd_tag_to_mask(tag)));
562 - host_pvt.sata_dwc_sactive_queued = (host_pvt.sata_dwc_sactive_queued) \
563 - & mask;
564 - host_pvt.sata_dwc_sactive_issued = (host_pvt.sata_dwc_sactive_issued) \
565 - & mask;
566 + hsdev->sactive_queued = hsdev->sactive_queued & mask;
567 + hsdev->sactive_issued = hsdev->sactive_issued & mask;
568 ata_qc_complete(qc);
569 return 0;
570 }
571 @@ -767,54 +814,62 @@ static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
572 static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev)
573 {
574 /* Enable selective interrupts by setting the interrupt maskregister*/
575 - out_le32(&hsdev->sata_dwc_regs->intmr,
576 - SATA_DWC_INTMR_ERRM |
577 - SATA_DWC_INTMR_NEWFPM |
578 - SATA_DWC_INTMR_PMABRTM |
579 - SATA_DWC_INTMR_DMATM);
580 + sata_dwc_writel(&hsdev->sata_dwc_regs->intmr,
581 + SATA_DWC_INTMR_ERRM |
582 + SATA_DWC_INTMR_NEWFPM |
583 + SATA_DWC_INTMR_PMABRTM |
584 + SATA_DWC_INTMR_DMATM);
585 /*
586 * Unmask the error bits that should trigger an error interrupt by
587 * setting the error mask register.
588 */
589 - out_le32(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS);
590 + sata_dwc_writel(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS);
591
592 dev_dbg(hsdev->dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n",
593 - __func__, in_le32(&hsdev->sata_dwc_regs->intmr),
594 - in_le32(&hsdev->sata_dwc_regs->errmr));
595 + __func__, sata_dwc_readl(&hsdev->sata_dwc_regs->intmr),
596 + sata_dwc_readl(&hsdev->sata_dwc_regs->errmr));
597 }
598
599 -static bool sata_dwc_dma_filter(struct dma_chan *chan, void *param)
600 +static void sata_dwc_setup_port(struct ata_ioports *port, void __iomem *base)
601 {
602 - struct sata_dwc_device_port *hsdevp = param;
603 - struct dw_dma_slave *dws = hsdevp->dws;
604 + port->cmd_addr = base + 0x00;
605 + port->data_addr = base + 0x00;
606
607 - if (dws->dma_dev != chan->device->dev)
608 - return false;
609 + port->error_addr = base + 0x04;
610 + port->feature_addr = base + 0x04;
611
612 - chan->private = dws;
613 - return true;
614 -}
615 + port->nsect_addr = base + 0x08;
616
617 -static void sata_dwc_setup_port(struct ata_ioports *port, unsigned long base)
618 -{
619 - port->cmd_addr = (void __iomem *)base + 0x00;
620 - port->data_addr = (void __iomem *)base + 0x00;
621 + port->lbal_addr = base + 0x0c;
622 + port->lbam_addr = base + 0x10;
623 + port->lbah_addr = base + 0x14;
624
625 - port->error_addr = (void __iomem *)base + 0x04;
626 - port->feature_addr = (void __iomem *)base + 0x04;
627 + port->device_addr = base + 0x18;
628 + port->command_addr = base + 0x1c;
629 + port->status_addr = base + 0x1c;
630
631 - port->nsect_addr = (void __iomem *)base + 0x08;
632 + port->altstatus_addr = base + 0x20;
633 + port->ctl_addr = base + 0x20;
634 +}
635 +
636 +static int sata_dwc_dma_get_channel(struct sata_dwc_device_port *hsdevp)
637 +{
638 + struct sata_dwc_device *hsdev = hsdevp->hsdev;
639 + struct device *dev = hsdev->dev;
640
641 - port->lbal_addr = (void __iomem *)base + 0x0c;
642 - port->lbam_addr = (void __iomem *)base + 0x10;
643 - port->lbah_addr = (void __iomem *)base + 0x14;
644 +#ifdef CONFIG_SATA_DWC_OLD_DMA
645 + if (!of_find_property(dev->of_node, "dmas", NULL))
646 + return sata_dwc_dma_get_channel_old(hsdevp);
647 +#endif
648
649 - port->device_addr = (void __iomem *)base + 0x18;
650 - port->command_addr = (void __iomem *)base + 0x1c;
651 - port->status_addr = (void __iomem *)base + 0x1c;
652 + hsdevp->chan = dma_request_chan(dev, "sata-dma");
653 + if (IS_ERR(hsdevp->chan)) {
654 + dev_err(dev, "failed to allocate dma channel: %ld\n",
655 + PTR_ERR(hsdevp->chan));
656 + return PTR_ERR(hsdevp->chan);
657 + }
658
659 - port->altstatus_addr = (void __iomem *)base + 0x20;
660 - port->ctl_addr = (void __iomem *)base + 0x20;
661 + return 0;
662 }
663
664 /*
665 @@ -829,7 +884,6 @@ static int sata_dwc_port_start(struct ata_port *ap)
666 struct sata_dwc_device *hsdev;
667 struct sata_dwc_device_port *hsdevp = NULL;
668 struct device *pdev;
669 - dma_cap_mask_t mask;
670 int i;
671
672 hsdev = HSDEV_FROM_AP(ap);
673 @@ -853,20 +907,13 @@ static int sata_dwc_port_start(struct ata_port *ap)
674 }
675 hsdevp->hsdev = hsdev;
676
677 - hsdevp->dws = &sata_dwc_dma_dws;
678 - hsdevp->dws->dma_dev = hsdev->dev;
679 -
680 - dma_cap_zero(mask);
681 - dma_cap_set(DMA_SLAVE, mask);
682 + err = sata_dwc_dma_get_channel(hsdevp);
683 + if (err)
684 + goto CLEANUP_ALLOC;
685
686 - /* Acquire DMA channel */
687 - hsdevp->chan = dma_request_channel(mask, sata_dwc_dma_filter, hsdevp);
688 - if (!hsdevp->chan) {
689 - dev_err(hsdev->dev, "%s: dma channel unavailable\n",
690 - __func__);
691 - err = -EAGAIN;
692 + err = phy_power_on(hsdev->phy);
693 + if (err)
694 goto CLEANUP_ALLOC;
695 - }
696
697 for (i = 0; i < SATA_DWC_QCMD_MAX; i++)
698 hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT;
699 @@ -877,18 +924,18 @@ static int sata_dwc_port_start(struct ata_port *ap)
700 if (ap->port_no == 0) {
701 dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n",
702 __func__);
703 - out_le32(&hsdev->sata_dwc_regs->dmacr,
704 - SATA_DWC_DMACR_TXRXCH_CLEAR);
705 + sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
706 + SATA_DWC_DMACR_TXRXCH_CLEAR);
707
708 dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n",
709 __func__);
710 - out_le32(&hsdev->sata_dwc_regs->dbtsr,
711 - (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
712 - SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)));
713 + sata_dwc_writel(&hsdev->sata_dwc_regs->dbtsr,
714 + (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
715 + SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)));
716 }
717
718 /* Clear any error bits before libata starts issuing commands */
719 - clear_serror();
720 + clear_serror(ap);
721 ap->private_data = hsdevp;
722 dev_dbg(ap->dev, "%s: done\n", __func__);
723 return 0;
724 @@ -903,11 +950,13 @@ CLEANUP:
725 static void sata_dwc_port_stop(struct ata_port *ap)
726 {
727 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
728 + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
729
730 dev_dbg(ap->dev, "%s: ap->id = %d\n", __func__, ap->print_id);
731
732 - dmaengine_terminate_all(hsdevp->chan);
733 + dmaengine_terminate_sync(hsdevp->chan);
734 dma_release_channel(hsdevp->chan);
735 + phy_power_off(hsdev->phy);
736
737 kfree(hsdevp);
738 ap->private_data = NULL;
739 @@ -924,22 +973,20 @@ static void sata_dwc_exec_command_by_tag(struct ata_port *ap,
740 struct ata_taskfile *tf,
741 u8 tag, u32 cmd_issued)
742 {
743 - unsigned long flags;
744 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
745
746 dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d\n", __func__, tf->command,
747 ata_get_cmd_descript(tf->command), tag);
748
749 - spin_lock_irqsave(&ap->host->lock, flags);
750 hsdevp->cmd_issued[tag] = cmd_issued;
751 - spin_unlock_irqrestore(&ap->host->lock, flags);
752 +
753 /*
754 * Clear SError before executing a new command.
755 * sata_dwc_scr_write and read can not be used here. Clearing the PM
756 * managed SError register for the disk needs to be done before the
757 * task file is loaded.
758 */
759 - clear_serror();
760 + clear_serror(ap);
761 ata_sff_exec_command(ap, tf);
762 }
763
764 @@ -992,18 +1039,18 @@ static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag)
765 sata_dwc_tf_dump(ap, &qc->tf);
766
767 if (start_dma) {
768 - reg = core_scr_read(SCR_ERROR);
769 + sata_dwc_scr_read(&ap->link, SCR_ERROR, &reg);
770 if (reg & SATA_DWC_SERROR_ERR_BITS) {
771 dev_err(ap->dev, "%s: ****** SError=0x%08x ******\n",
772 __func__, reg);
773 }
774
775 if (dir == DMA_TO_DEVICE)
776 - out_le32(&hsdev->sata_dwc_regs->dmacr,
777 - SATA_DWC_DMACR_TXCHEN);
778 + sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
779 + SATA_DWC_DMACR_TXCHEN);
780 else
781 - out_le32(&hsdev->sata_dwc_regs->dmacr,
782 - SATA_DWC_DMACR_RXCHEN);
783 + sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
784 + SATA_DWC_DMACR_RXCHEN);
785
786 /* Enable AHB DMA transfer on the specified channel */
787 dmaengine_submit(desc);
788 @@ -1025,36 +1072,12 @@ static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc)
789 sata_dwc_bmdma_start_by_tag(qc, tag);
790 }
791
792 -/*
793 - * Function : sata_dwc_qc_prep_by_tag
794 - * arguments : ata_queued_cmd *qc, u8 tag
795 - * Return value : None
796 - * qc_prep for a particular queued command based on tag
797 - */
798 -static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag)
799 -{
800 - struct dma_async_tx_descriptor *desc;
801 - struct ata_port *ap = qc->ap;
802 - struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
803 -
804 - dev_dbg(ap->dev, "%s: port=%d dma dir=%s n_elem=%d\n",
805 - __func__, ap->port_no, get_dma_dir_descript(qc->dma_dir),
806 - qc->n_elem);
807 -
808 - desc = dma_dwc_xfer_setup(qc);
809 - if (!desc) {
810 - dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns NULL\n",
811 - __func__);
812 - return;
813 - }
814 - hsdevp->desc[tag] = desc;
815 -}
816 -
817 static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc)
818 {
819 u32 sactive;
820 u8 tag = qc->tag;
821 struct ata_port *ap = qc->ap;
822 + struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
823
824 #ifdef DEBUG_NCQ
825 if (qc->tag > 0 || ap->link.sactive > 1)
826 @@ -1068,47 +1091,33 @@ static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc)
827
828 if (!ata_is_ncq(qc->tf.protocol))
829 tag = 0;
830 - sata_dwc_qc_prep_by_tag(qc, tag);
831 +
832 + if (ata_is_dma(qc->tf.protocol)) {
833 + hsdevp->desc[tag] = dma_dwc_xfer_setup(qc);
834 + if (!hsdevp->desc[tag])
835 + return AC_ERR_SYSTEM;
836 + } else {
837 + hsdevp->desc[tag] = NULL;
838 + }
839
840 if (ata_is_ncq(qc->tf.protocol)) {
841 - sactive = core_scr_read(SCR_ACTIVE);
842 + sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
843 sactive |= (0x00000001 << tag);
844 - core_scr_write(SCR_ACTIVE, sactive);
845 + sata_dwc_scr_write(&ap->link, SCR_ACTIVE, sactive);
846
847 dev_dbg(qc->ap->dev,
848 "%s: tag=%d ap->link.sactive = 0x%08x sactive=0x%08x\n",
849 __func__, tag, qc->ap->link.sactive, sactive);
850
851 ap->ops->sff_tf_load(ap, &qc->tf);
852 - sata_dwc_exec_command_by_tag(ap, &qc->tf, qc->tag,
853 + sata_dwc_exec_command_by_tag(ap, &qc->tf, tag,
854 SATA_DWC_CMD_ISSUED_PEND);
855 } else {
856 - ata_sff_qc_issue(qc);
857 + return ata_bmdma_qc_issue(qc);
858 }
859 return 0;
860 }
861
862 -/*
863 - * Function : sata_dwc_qc_prep
864 - * arguments : ata_queued_cmd *qc
865 - * Return value : None
866 - * qc_prep for a particular queued command
867 - */
868 -
869 -static void sata_dwc_qc_prep(struct ata_queued_cmd *qc)
870 -{
871 - if ((qc->dma_dir == DMA_NONE) || (qc->tf.protocol == ATA_PROT_PIO))
872 - return;
873 -
874 -#ifdef DEBUG_NCQ
875 - if (qc->tag > 0)
876 - dev_info(qc->ap->dev, "%s: qc->tag=%d ap->active_tag=0x%08x\n",
877 - __func__, qc->tag, qc->ap->link.active_tag);
878 -
879 - return ;
880 -#endif
881 -}
882 -
883 static void sata_dwc_error_handler(struct ata_port *ap)
884 {
885 ata_sff_error_handler(ap);
886 @@ -1125,17 +1134,22 @@ static int sata_dwc_hardreset(struct ata_link *link, unsigned int *class,
887 sata_dwc_enable_interrupts(hsdev);
888
889 /* Reconfigure the DMA control register */
890 - out_le32(&hsdev->sata_dwc_regs->dmacr,
891 - SATA_DWC_DMACR_TXRXCH_CLEAR);
892 + sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
893 + SATA_DWC_DMACR_TXRXCH_CLEAR);
894
895 /* Reconfigure the DMA Burst Transaction Size register */
896 - out_le32(&hsdev->sata_dwc_regs->dbtsr,
897 - SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
898 - SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT));
899 + sata_dwc_writel(&hsdev->sata_dwc_regs->dbtsr,
900 + SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
901 + SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT));
902
903 return ret;
904 }
905
906 +static void sata_dwc_dev_select(struct ata_port *ap, unsigned int device)
907 +{
908 + /* SATA DWC is master only */
909 +}
910 +
911 /*
912 * scsi mid-layer and libata interface structures
913 */
914 @@ -1148,7 +1162,13 @@ static struct scsi_host_template sata_dwc_sht = {
915 */
916 .sg_tablesize = LIBATA_MAX_PRD,
917 /* .can_queue = ATA_MAX_QUEUE, */
918 - .dma_boundary = ATA_DMA_BOUNDARY,
919 + /*
920 + * Make sure a LLI block is not created that will span 8K max FIS
921 + * boundary. If the block spans such a FIS boundary, there is a chance
922 + * that a DMA burst will cross that boundary -- this results in an
923 + * error in the host controller.
924 + */
925 + .dma_boundary = 0x1fff /* ATA_DMA_BOUNDARY */,
926 };
927
928 static struct ata_port_operations sata_dwc_ops = {
929 @@ -1157,7 +1177,6 @@ static struct ata_port_operations sata_dwc_ops = {
930 .error_handler = sata_dwc_error_handler,
931 .hardreset = sata_dwc_hardreset,
932
933 - .qc_prep = sata_dwc_qc_prep,
934 .qc_issue = sata_dwc_qc_issue,
935
936 .scr_read = sata_dwc_scr_read,
937 @@ -1166,6 +1185,8 @@ static struct ata_port_operations sata_dwc_ops = {
938 .port_start = sata_dwc_port_start,
939 .port_stop = sata_dwc_port_stop,
940
941 + .sff_dev_select = sata_dwc_dev_select,
942 +
943 .bmdma_setup = sata_dwc_bmdma_setup,
944 .bmdma_start = sata_dwc_bmdma_start,
945 };
946 @@ -1184,13 +1205,14 @@ static int sata_dwc_probe(struct platform_device *ofdev)
947 struct sata_dwc_device *hsdev;
948 u32 idr, versionr;
949 char *ver = (char *)&versionr;
950 - u8 __iomem *base;
951 + void __iomem *base;
952 int err = 0;
953 int irq;
954 struct ata_host *host;
955 struct ata_port_info pi = sata_dwc_port_info[0];
956 const struct ata_port_info *ppi[] = { &pi, NULL };
957 struct device_node *np = ofdev->dev.of_node;
958 + struct resource *res;
959
960 /* Allocate DWC SATA device */
961 host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS);
962 @@ -1201,57 +1223,33 @@ static int sata_dwc_probe(struct platform_device *ofdev)
963 host->private_data = hsdev;
964
965 /* Ioremap SATA registers */
966 - base = of_iomap(np, 0);
967 - if (!base) {
968 + res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
969 + base = devm_ioremap_resource(&ofdev->dev, res);
970 + if (IS_ERR(base)) {
971 dev_err(&ofdev->dev,
972 "ioremap failed for SATA register address\n");
973 - return -ENODEV;
974 + return PTR_ERR(base);
975 }
976 - hsdev->reg_base = base;
977 dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n");
978
979 /* Synopsys DWC SATA specific Registers */
980 - hsdev->sata_dwc_regs = (void *__iomem)(base + SATA_DWC_REG_OFFSET);
981 + hsdev->sata_dwc_regs = base + SATA_DWC_REG_OFFSET;
982 + hsdev->dmadr = res->start + SATA_DWC_REG_OFFSET + offsetof(struct sata_dwc_regs, dmadr);
983
984 /* Setup port */
985 host->ports[0]->ioaddr.cmd_addr = base;
986 host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET;
987 - host_pvt.scr_addr_sstatus = base + SATA_DWC_SCR_OFFSET;
988 - sata_dwc_setup_port(&host->ports[0]->ioaddr, (unsigned long)base);
989 + sata_dwc_setup_port(&host->ports[0]->ioaddr, base);
990
991 /* Read the ID and Version Registers */
992 - idr = in_le32(&hsdev->sata_dwc_regs->idr);
993 - versionr = in_le32(&hsdev->sata_dwc_regs->versionr);
994 + idr = sata_dwc_readl(&hsdev->sata_dwc_regs->idr);
995 + versionr = sata_dwc_readl(&hsdev->sata_dwc_regs->versionr);
996 dev_notice(&ofdev->dev, "id %d, controller version %c.%c%c\n",
997 idr, ver[0], ver[1], ver[2]);
998
999 - /* Get SATA DMA interrupt number */
1000 - hsdev->dma->irq = irq_of_parse_and_map(np, 1);
1001 - if (hsdev->dma->irq == NO_IRQ) {
1002 - dev_err(&ofdev->dev, "no SATA DMA irq\n");
1003 - err = -ENODEV;
1004 - goto error_iomap;
1005 - }
1006 -
1007 - /* Get physical SATA DMA register base address */
1008 - hsdev->dma->regs = of_iomap(np, 1);
1009 - if (!hsdev->dma->regs) {
1010 - dev_err(&ofdev->dev,
1011 - "ioremap failed for AHBDMA register address\n");
1012 - err = -ENODEV;
1013 - goto error_iomap;
1014 - }
1015 -
1016 /* Save dev for later use in dev_xxx() routines */
1017 hsdev->dev = &ofdev->dev;
1018
1019 - hsdev->dma->dev = &ofdev->dev;
1020 -
1021 - /* Initialize AHB DMAC */
1022 - err = dw_dma_probe(hsdev->dma, NULL);
1023 - if (err)
1024 - goto error_dma_iomap;
1025 -
1026 /* Enable SATA Interrupts */
1027 sata_dwc_enable_interrupts(hsdev);
1028
1029 @@ -1263,6 +1261,25 @@ static int sata_dwc_probe(struct platform_device *ofdev)
1030 goto error_out;
1031 }
1032
1033 +#ifdef CONFIG_SATA_DWC_OLD_DMA
1034 + if (!of_find_property(np, "dmas", NULL)) {
1035 + err = sata_dwc_dma_init_old(ofdev, hsdev);
1036 + if (err)
1037 + goto error_out;
1038 + }
1039 +#endif
1040 +
1041 + hsdev->phy = devm_phy_optional_get(hsdev->dev, "sata-phy");
1042 + if (IS_ERR(hsdev->phy)) {
1043 + err = PTR_ERR(hsdev->phy);
1044 + hsdev->phy = NULL;
1045 + goto error_out;
1046 + }
1047 +
1048 + err = phy_init(hsdev->phy);
1049 + if (err)
1050 + goto error_out;
1051 +
1052 /*
1053 * Now, register with libATA core, this will also initiate the
1054 * device discovery process, invoking our port_start() handler &
1055 @@ -1276,12 +1293,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
1056 return 0;
1057
1058 error_out:
1059 - /* Free SATA DMA resources */
1060 - dw_dma_remove(hsdev->dma);
1061 -error_dma_iomap:
1062 - iounmap(hsdev->dma->regs);
1063 -error_iomap:
1064 - iounmap(base);
1065 + phy_exit(hsdev->phy);
1066 return err;
1067 }
1068
1069 @@ -1293,11 +1305,13 @@ static int sata_dwc_remove(struct platform_device *ofdev)
1070
1071 ata_host_detach(host);
1072
1073 + phy_exit(hsdev->phy);
1074 +
1075 +#ifdef CONFIG_SATA_DWC_OLD_DMA
1076 /* Free SATA DMA resources */
1077 - dw_dma_remove(hsdev->dma);
1078 + sata_dwc_dma_exit_old(hsdev);
1079 +#endif
1080
1081 - iounmap(hsdev->dma->regs);
1082 - iounmap(hsdev->reg_base);
1083 dev_dbg(&ofdev->dev, "done\n");
1084 return 0;
1085 }
1086 --
1087 2.8.1
1088