Add 2.6.27 support to rb532, nand is not recognized, needs to be sorted out
[openwrt/svn-archive/archive.git] / target / linux / rb532 / patches-2.6.27 / 008-korina_fixes.patch
1 --- linux-2.6.27.5/drivers/net/korina.c 2008-11-07 18:55:34.000000000 +0100
2 +++ ../build_dir/linux-rb532/linux-2.6.27.5/drivers/net/korina.c 2008-11-16 00:38:19.000000000 +0100
3 @@ -327,12 +327,11 @@
4
5 dmas = readl(&lp->rx_dma_regs->dmas);
6 if (dmas & (DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR)) {
7 - netif_rx_schedule_prep(dev, &lp->napi);
8 -
9 dmasm = readl(&lp->rx_dma_regs->dmasm);
10 writel(dmasm | (DMA_STAT_DONE |
11 DMA_STAT_HALT | DMA_STAT_ERR),
12 &lp->rx_dma_regs->dmasm);
13 + netif_rx_schedule(dev, &lp->napi);
14
15 if (dmas & DMA_STAT_ERR)
16 printk(KERN_ERR DRV_NAME "%s: DMA error\n", dev->name);
17 @@ -350,14 +349,24 @@
18 struct dma_desc *rd = &lp->rd_ring[lp->rx_next_done];
19 struct sk_buff *skb, *skb_new;
20 u8 *pkt_buf;
21 - u32 devcs, pkt_len, dmas, rx_free_desc;
22 + u32 devcs, pkt_len, dmas, pktuncrc_len;
23 int count;
24
25 dma_cache_inv((u32)rd, sizeof(*rd));
26
27 for (count = 0; count < limit; count++) {
28 -
29 + skb_new = NULL;
30 devcs = rd->devcs;
31 + pkt_len = RCVPKT_LENGTH(devcs);
32 + skb = lp->rx_skb[lp->rx_next_done];
33 +
34 + if ((devcs & ETH_RX_LD) != ETH_RX_LD) {
35 + /* check that this is a whole packet
36 + * WARNING: DMA_FD bit incorrectly set
37 + * in Rc32434 (errata ref #077) */
38 + dev->stats.rx_errors++;
39 + dev->stats.rx_dropped++;
40 + }
41
42 /* Update statistics counters */
43 if (devcs & ETH_RX_CRC)
44 @@ -375,75 +384,79 @@
45 if (devcs & ETH_RX_MP)
46 dev->stats.multicast++;
47
48 - if ((devcs & ETH_RX_LD) != ETH_RX_LD) {
49 - /* check that this is a whole packet
50 - * WARNING: DMA_FD bit incorrectly set
51 - * in Rc32434 (errata ref #077) */
52 - dev->stats.rx_errors++;
53 - dev->stats.rx_dropped++;
54 - }
55 -
56 - while ((rx_free_desc = KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) != 0) {
57 - /* init the var. used for the later
58 - * operations within the while loop */
59 - skb_new = NULL;
60 - pkt_len = RCVPKT_LENGTH(devcs);
61 - skb = lp->rx_skb[lp->rx_next_done];
62 -
63 - if ((devcs & ETH_RX_ROK)) {
64 - /* must be the (first and) last
65 - * descriptor then */
66 - pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data;
67 + else if ((devcs & ETH_RX_ROK)) {
68 + /* must be the (first and) last
69 + * descriptor then */
70 + pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data;
71 + pktuncrc_len = pkt_len - 4;
72
73 - /* invalidate the cache */
74 - dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
75 + /* invalidate the cache */
76 + dma_cache_inv((unsigned long)pkt_buf, pktuncrc_len);
77
78 - /* Malloc up new buffer. */
79 - skb_new = netdev_alloc_skb(dev, KORINA_RBSIZE + 2);
80 + /* Malloc up new buffer. */
81 + skb_new = netdev_alloc_skb(dev, KORINA_RBSIZE + 2);
82
83 - if (!skb_new)
84 - break;
85 + if (skb_new) {
86 /* Do not count the CRC */
87 - skb_put(skb, pkt_len - 4);
88 + skb_put(skb, pktuncrc_len);
89 skb->protocol = eth_type_trans(skb, dev);
90
91 /* Pass the packet to upper layers */
92 netif_receive_skb(skb);
93 +
94 dev->last_rx = jiffies;
95 dev->stats.rx_packets++;
96 - dev->stats.rx_bytes += pkt_len;
97 -
98 - /* Update the mcast stats */
99 - if (devcs & ETH_RX_MP)
100 - dev->stats.multicast++;
101 -
102 + dev->stats.rx_bytes += pktuncrc_len;
103 +
104 lp->rx_skb[lp->rx_next_done] = skb_new;
105 + } else {
106 + dev->stats.rx_errors++;
107 + dev->stats.rx_dropped++;
108 }
109 + } else {
110 + dev->stats.rx_errors++;
111 + dev->stats.rx_dropped++;
112 +
113 + /* Update statistics counters */
114 + if (devcs & ETH_RX_CRC)
115 + dev->stats.rx_crc_errors++;
116 + else if (devcs & ETH_RX_LOR)
117 + dev->stats.rx_length_errors++;
118 + else if (devcs & ETH_RX_LE)
119 + dev->stats.rx_length_errors++;
120 + else if (devcs & ETH_RX_OVR)
121 + dev->stats.rx_over_errors++;
122 + else if (devcs & ETH_RX_CV)
123 + dev->stats.rx_frame_errors++;
124 + else if (devcs & ETH_RX_CES)
125 + dev->stats.rx_length_errors++;
126 + else if (devcs & ETH_RX_MP)
127 + dev->stats.multicast++;
128 + }
129
130 - rd->devcs = 0;
131 + rd->devcs = 0;
132
133 - /* Restore descriptor's curr_addr */
134 - if (skb_new)
135 - rd->ca = CPHYSADDR(skb_new->data);
136 - else
137 - rd->ca = CPHYSADDR(skb->data);
138 + /* Restore descriptor's curr_addr */
139 + if (skb_new)
140 + rd->ca = CPHYSADDR(skb_new->data);
141 + else
142 + rd->ca = CPHYSADDR(skb->data);
143
144 - rd->control = DMA_COUNT(KORINA_RBSIZE) |
145 + rd->control = DMA_COUNT(KORINA_RBSIZE) |
146 DMA_DESC_COD | DMA_DESC_IOD;
147 - lp->rd_ring[(lp->rx_next_done - 1) &
148 - KORINA_RDS_MASK].control &=
149 - ~DMA_DESC_COD;
150 -
151 - lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK;
152 - dma_cache_wback((u32)rd, sizeof(*rd));
153 - rd = &lp->rd_ring[lp->rx_next_done];
154 - writel(~DMA_STAT_DONE, &lp->rx_dma_regs->dmas);
155 - }
156 + lp->rd_ring[(lp->rx_next_done - 1) &
157 + KORINA_RDS_MASK].control &= ~DMA_DESC_COD;
158 +
159 + lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK;
160 + dma_cache_wback((u32)rd, sizeof(*rd));
161 + rd = &lp->rd_ring[lp->rx_next_done];
162 + writel(~DMA_STAT_DONE, &lp->rx_dma_regs->dmas);
163 }
164
165 dmas = readl(&lp->rx_dma_regs->dmas);
166
167 if (dmas & DMA_STAT_HALT) {
168 + /* Mask off halt and errors bits */
169 writel(~(DMA_STAT_HALT | DMA_STAT_ERR),
170 &lp->rx_dma_regs->dmas);
171
172 @@ -469,8 +482,9 @@
173 if (work_done < budget) {
174 netif_rx_complete(dev, napi);
175
176 + /* Mask off interrupts */
177 writel(readl(&lp->rx_dma_regs->dmasm) &
178 - ~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR),
179 + (DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR),
180 &lp->rx_dma_regs->dmasm);
181 }
182 return work_done;
183 @@ -534,10 +548,11 @@
184 {
185 struct korina_private *lp = netdev_priv(dev);
186 struct dma_desc *td = &lp->td_ring[lp->tx_next_done];
187 + unsigned long flags;
188 u32 devcs;
189 u32 dmas;
190
191 - spin_lock(&lp->lock);
192 + spin_lock_irqsave(&lp->lock, flags);
193
194 /* Process all desc that are done */
195 while (IS_DMA_FINISHED(td->control)) {
196 @@ -610,7 +625,7 @@
197 ~(DMA_STAT_FINI | DMA_STAT_ERR),
198 &lp->tx_dma_regs->dmasm);
199
200 - spin_unlock(&lp->lock);
201 + spin_unlock_irqrestore(&lp->lock, flags);
202 }
203
204 static irqreturn_t
205 @@ -624,11 +639,10 @@
206 dmas = readl(&lp->tx_dma_regs->dmas);
207
208 if (dmas & (DMA_STAT_FINI | DMA_STAT_ERR)) {
209 - korina_tx(dev);
210 -
211 dmasm = readl(&lp->tx_dma_regs->dmasm);
212 writel(dmasm | (DMA_STAT_FINI | DMA_STAT_ERR),
213 &lp->tx_dma_regs->dmasm);
214 + korina_tx(dev);
215
216 if (lp->tx_chain_status == desc_filled &&
217 (readl(&(lp->tx_dma_regs->dmandptr)) == 0)) {
218 @@ -1078,11 +1092,18 @@
219
220 static int korina_probe(struct platform_device *pdev)
221 {
222 - struct korina_device *bif = platform_get_drvdata(pdev);
223 + struct korina_device *bif;
224 struct korina_private *lp;
225 struct net_device *dev;
226 struct resource *r;
227 int rc;
228 + DECLARE_MAC_BUF(mac);
229 +
230 + bif = (struct korina_device *)pdev->dev.platform_data;
231 + if (!bif) {
232 + printk(KERN_ERR DRV_NAME ": missing platform_data\n");
233 + return -ENODEV;
234 + }
235
236 dev = alloc_etherdev(sizeof(struct korina_private));
237 if (!dev) {
238 @@ -1172,6 +1193,7 @@
239 ": cannot register net device %d\n", rc);
240 goto probe_err_register;
241 }
242 + printk(KERN_INFO DRV_NAME ": registered %s, IRQ %d MAC %s\n", dev->name, dev->irq, print_mac(mac, dev->dev_addr));
243 out:
244 return rc;
245