1 From 1d67040af0144c549f4db8144d2ccc253ff8639c Mon Sep 17 00:00:00 2001
2 From: Jonas Gorski <jogo@openwrt.org>
3 Date: Mon, 1 Jul 2013 16:39:28 +0200
4 Subject: [PATCH 2/2] net: ixp4xx_eth: use parent device for dma allocations
6 Now that the platfomr device provides a dma_cohorent_mask, use it for
9 This fixes ethernet on ixp4xx which was broken since 3.7.
11 Signed-off-by: Jonas Gorski <jogo@openwrt.org>
13 drivers/net/ethernet/xscale/ixp4xx_eth.c | 23 ++++++++++++-----------
14 1 file changed, 12 insertions(+), 11 deletions(-)
16 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
17 +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
18 @@ -661,10 +661,10 @@ static inline void queue_put_desc(unsign
19 static inline void dma_unmap_tx(struct port *port, struct desc *desc)
22 - dma_unmap_single(&port->netdev->dev, desc->data,
23 + dma_unmap_single(port->netdev->dev.parent, desc->data,
24 desc->buf_len, DMA_TO_DEVICE);
26 - dma_unmap_single(&port->netdev->dev, desc->data & ~3,
27 + dma_unmap_single(port->netdev->dev.parent, desc->data & ~3,
28 ALIGN((desc->data & 3) + desc->buf_len, 4),
31 @@ -731,9 +731,9 @@ static int eth_poll(struct napi_struct *
34 if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) {
35 - phys = dma_map_single(&dev->dev, skb->data,
36 + phys = dma_map_single(dev->dev.parent, skb->data,
37 RX_BUFF_SIZE, DMA_FROM_DEVICE);
38 - if (dma_mapping_error(&dev->dev, phys)) {
39 + if (dma_mapping_error(dev->dev.parent, phys)) {
43 @@ -756,10 +756,11 @@ static int eth_poll(struct napi_struct *
46 skb = port->rx_buff_tab[n];
47 - dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN,
48 + dma_unmap_single(dev->dev.parent, desc->data - NET_IP_ALIGN,
49 RX_BUFF_SIZE, DMA_FROM_DEVICE);
51 - dma_sync_single_for_cpu(&dev->dev, desc->data - NET_IP_ALIGN,
52 + dma_sync_single_for_cpu(dev->dev.parent,
53 + desc->data - NET_IP_ALIGN,
54 RX_BUFF_SIZE, DMA_FROM_DEVICE);
55 memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
56 ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4);
57 @@ -878,7 +879,7 @@ static int eth_xmit(struct sk_buff *skb,
58 memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
61 - phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE);
62 + phys = dma_map_single(dev->dev.parent, mem, bytes, DMA_TO_DEVICE);
63 if (dma_mapping_error(&dev->dev, phys)) {
66 @@ -1128,7 +1129,7 @@ static int init_queues(struct port *port
70 - dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev,
71 + dma_pool = dma_pool_create(DRV_NAME, port->netdev->dev.parent,
72 POOL_ALLOC_SIZE, 32, 0);
75 @@ -1156,9 +1157,9 @@ static int init_queues(struct port *port
78 desc->buf_len = MAX_MRU;
79 - desc->data = dma_map_single(&port->netdev->dev, data,
80 + desc->data = dma_map_single(port->netdev->dev.parent, data,
81 RX_BUFF_SIZE, DMA_FROM_DEVICE);
82 - if (dma_mapping_error(&port->netdev->dev, desc->data)) {
83 + if (dma_mapping_error(port->netdev->dev.parent, desc->data)) {
87 @@ -1178,7 +1179,7 @@ static void destroy_queues(struct port *
88 struct desc *desc = rx_desc_ptr(port, i);
89 buffer_t *buff = port->rx_buff_tab[i];
91 - dma_unmap_single(&port->netdev->dev,
92 + dma_unmap_single(port->netdev->dev.parent,
93 desc->data - NET_IP_ALIGN,
94 RX_BUFF_SIZE, DMA_FROM_DEVICE);