6cbcf2e9f71d4c147ded3d18a95493438e944edb
[openwrt/svn-archive/archive.git] / target / linux / generic / patches-4.1 / 760-8139cp-fixes-from-4.3.patch
1 commit 41b976414c88016e2c9d9b2f6667ee67a998d388
2 Author: David Woodhouse <David.Woodhouse@intel.com>
3 Date: Wed Sep 23 09:45:31 2015 +0100
4
5 8139cp: Dump contents of descriptor ring on TX timeout
6
7 We are seeing unexplained TX timeouts under heavy load. Let's try to get
8 a better idea of what's going on.
9
10 Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
11 Signed-off-by: David S. Miller <davem@davemloft.net>
12
13 commit 7f4c685633e2df9ba10d49a31dda13715745db37
14 Author: David Woodhouse <David.Woodhouse@intel.com>
15 Date: Wed Sep 23 09:45:16 2015 +0100
16
17 8139cp: Fix DMA unmapping of transmitted buffers
18
19 The low 16 bits of the 'opts1' field in the TX descriptor are supposed
20 to still contain the buffer length when the descriptor is handed back to
21 us. In practice, at least on my hardware, they don't. So stash the
22 original value of the opts1 field and get the length to unmap from
23 there.
24
25 There are other ways we could have worked out the length, but I actually
26 want a stash of the opts1 field anyway so that I can dump it alongside
27 the contents of the descriptor ring when we suffer a TX timeout.
28
29 Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
30 Signed-off-by: David S. Miller <davem@davemloft.net>
31
32 commit 0a5aeee0b79fa99d8e04c98dd4e87d4f52aa497b
33 Author: David Woodhouse <David.Woodhouse@intel.com>
34 Date: Wed Sep 23 09:44:57 2015 +0100
35
36 8139cp: Reduce duplicate csum/tso code in cp_start_xmit()
37
38 We calculate the value of the opts1 descriptor field in three different
39 places. With two different behaviours when given an invalid packet to
40 be checksummed — none of them correct. Sort that out.
41
42 Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
43 Signed-off-by: David S. Miller <davem@davemloft.net>
44
45 commit a3b804043f490aeec57d8ca5baccdd35e6250857
46 Author: David Woodhouse <David.Woodhouse@intel.com>
47 Date: Wed Sep 23 09:44:38 2015 +0100
48
49 8139cp: Fix TSO/scatter-gather descriptor setup
50
51 When sending a TSO frame in multiple buffers, we were neglecting to set
52 the first descriptor up in TSO mode.
53
54 Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
55 Signed-off-by: David S. Miller <davem@davemloft.net>
56
57 commit 26b0bad6ac3a0167792dc4ffb276c29bc597d239
58 Author: David Woodhouse <David.Woodhouse@intel.com>
59 Date: Wed Sep 23 09:44:06 2015 +0100
60
61 8139cp: Fix tx_queued debug message to print correct slot numbers
62
63 After a certain amount of staring at the debug output of this driver, I
64 realised it was lying to me.
65
66 Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
67 Signed-off-by: David S. Miller <davem@davemloft.net>
68
69 commit aaa0062ecf4877a26dea66bee1039c6eaf906c94
70 Author: David Woodhouse <David.Woodhouse@intel.com>
71 Date: Wed Sep 23 09:43:41 2015 +0100
72
73 8139cp: Do not re-enable RX interrupts in cp_tx_timeout()
74
75 If an RX interrupt was already received but NAPI has not yet run when
76 the RX timeout happens, we end up in cp_tx_timeout() with RX interrupts
77 already disabled. Blindly re-enabling them will cause an IRQ storm.
78
79 (This is made particularly horrid by the fact that cp_interrupt() always
80 returns that it's handled the interrupt, even when it hasn't actually
81 done anything. If it didn't do that, the core IRQ code would have
82 detected the storm and handled it, I'd have had a clear smoking gun
83 backtrace instead of just a spontaneously resetting router, and I'd have
84 at *least* two days of my life back. Changing the return value of
85 cp_interrupt() will be argued about under separate cover.)
86
87 Unconditionally leave RX interrupts disabled after the reset, and
88 schedule NAPI to check the receive ring and re-enable them.
89
90 Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
91 Signed-off-by: David S. Miller <davem@davemloft.net>
92
93 commit 7a8a8e75d505147358b225173e890ada43a267e2
94 Author: David Woodhouse <dwmw2@infradead.org>
95 Date: Fri Sep 18 00:21:54 2015 +0100
96
97 8139cp: Call __cp_set_rx_mode() from cp_tx_timeout()
98
99 Unless we reset the RX config, on real hardware I don't seem to receive
100 any packets after a TX timeout.
101
102 Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
103 Signed-off-by: David S. Miller <davem@davemloft.net>
104
105 commit fc27bd115b334e3ebdc682a42a47c3aea2566dcc
106 Author: David Woodhouse <dwmw2@infradead.org>
107 Date: Fri Sep 18 00:19:08 2015 +0100
108
109 8139cp: Use dev_kfree_skb_any() instead of dev_kfree_skb() in cp_clean_rings()
110
111 This can be called from cp_tx_timeout() with interrupts disabled.
112 Spotted by Francois Romieu <romieu@fr.zoreil.com>
113
114 Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
115 Signed-off-by: David S. Miller <davem@davemloft.net>
116 diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
117 index d79e33b..686334f 100644
118 --- a/drivers/net/ethernet/realtek/8139cp.c
119 +++ b/drivers/net/ethernet/realtek/8139cp.c
120 @@ -157,6 +157,7 @@ enum {
121 NWayAdvert = 0x66, /* MII ADVERTISE */
122 NWayLPAR = 0x68, /* MII LPA */
123 NWayExpansion = 0x6A, /* MII Expansion */
124 + TxDmaOkLowDesc = 0x82, /* Low 16 bit address of a Tx descriptor. */
125 Config5 = 0xD8, /* Config5 */
126 TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */
127 RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */
128 @@ -341,6 +342,7 @@ struct cp_private {
129 unsigned tx_tail;
130 struct cp_desc *tx_ring;
131 struct sk_buff *tx_skb[CP_TX_RING_SIZE];
132 + u32 tx_opts[CP_TX_RING_SIZE];
133
134 unsigned rx_buf_sz;
135 unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */
136 @@ -665,7 +667,7 @@ static void cp_tx (struct cp_private *cp)
137 BUG_ON(!skb);
138
139 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
140 - le32_to_cpu(txd->opts1) & 0xffff,
141 + cp->tx_opts[tx_tail] & 0xffff,
142 PCI_DMA_TODEVICE);
143
144 if (status & LastFrag) {
145 @@ -733,7 +735,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
146 {
147 struct cp_private *cp = netdev_priv(dev);
148 unsigned entry;
149 - u32 eor, flags;
150 + u32 eor, opts1;
151 unsigned long intr_flags;
152 __le32 opts2;
153 int mss = 0;
154 @@ -753,6 +755,21 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
155 mss = skb_shinfo(skb)->gso_size;
156
157 opts2 = cpu_to_le32(cp_tx_vlan_tag(skb));
158 + opts1 = DescOwn;
159 + if (mss)
160 + opts1 |= LargeSend | ((mss & MSSMask) << MSSShift);
161 + else if (skb->ip_summed == CHECKSUM_PARTIAL) {
162 + const struct iphdr *ip = ip_hdr(skb);
163 + if (ip->protocol == IPPROTO_TCP)
164 + opts1 |= IPCS | TCPCS;
165 + else if (ip->protocol == IPPROTO_UDP)
166 + opts1 |= IPCS | UDPCS;
167 + else {
168 + WARN_ONCE(1,
169 + "Net bug: asked to checksum invalid Legacy IP packet\n");
170 + goto out_dma_error;
171 + }
172 + }
173
174 if (skb_shinfo(skb)->nr_frags == 0) {
175 struct cp_desc *txd = &cp->tx_ring[entry];
176 @@ -768,31 +785,20 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
177 txd->addr = cpu_to_le64(mapping);
178 wmb();
179
180 - flags = eor | len | DescOwn | FirstFrag | LastFrag;
181 -
182 - if (mss)
183 - flags |= LargeSend | ((mss & MSSMask) << MSSShift);
184 - else if (skb->ip_summed == CHECKSUM_PARTIAL) {
185 - const struct iphdr *ip = ip_hdr(skb);
186 - if (ip->protocol == IPPROTO_TCP)
187 - flags |= IPCS | TCPCS;
188 - else if (ip->protocol == IPPROTO_UDP)
189 - flags |= IPCS | UDPCS;
190 - else
191 - WARN_ON(1); /* we need a WARN() */
192 - }
193 + opts1 |= eor | len | FirstFrag | LastFrag;
194
195 - txd->opts1 = cpu_to_le32(flags);
196 + txd->opts1 = cpu_to_le32(opts1);
197 wmb();
198
199 cp->tx_skb[entry] = skb;
200 - entry = NEXT_TX(entry);
201 + cp->tx_opts[entry] = opts1;
202 + netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
203 + entry, skb->len);
204 } else {
205 struct cp_desc *txd;
206 - u32 first_len, first_eor;
207 + u32 first_len, first_eor, ctrl;
208 dma_addr_t first_mapping;
209 int frag, first_entry = entry;
210 - const struct iphdr *ip = ip_hdr(skb);
211
212 /* We must give this initial chunk to the device last.
213 * Otherwise we could race with the device.
214 @@ -805,14 +811,14 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
215 goto out_dma_error;
216
217 cp->tx_skb[entry] = skb;
218 - entry = NEXT_TX(entry);
219
220 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
221 const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
222 u32 len;
223 - u32 ctrl;
224 dma_addr_t mapping;
225
226 + entry = NEXT_TX(entry);
227 +
228 len = skb_frag_size(this_frag);
229 mapping = dma_map_single(&cp->pdev->dev,
230 skb_frag_address(this_frag),
231 @@ -824,19 +830,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
232
233 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
234
235 - ctrl = eor | len | DescOwn;
236 -
237 - if (mss)
238 - ctrl |= LargeSend |
239 - ((mss & MSSMask) << MSSShift);
240 - else if (skb->ip_summed == CHECKSUM_PARTIAL) {
241 - if (ip->protocol == IPPROTO_TCP)
242 - ctrl |= IPCS | TCPCS;
243 - else if (ip->protocol == IPPROTO_UDP)
244 - ctrl |= IPCS | UDPCS;
245 - else
246 - BUG();
247 - }
248 + ctrl = opts1 | eor | len;
249
250 if (frag == skb_shinfo(skb)->nr_frags - 1)
251 ctrl |= LastFrag;
252 @@ -849,8 +843,8 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
253 txd->opts1 = cpu_to_le32(ctrl);
254 wmb();
255
256 + cp->tx_opts[entry] = ctrl;
257 cp->tx_skb[entry] = skb;
258 - entry = NEXT_TX(entry);
259 }
260
261 txd = &cp->tx_ring[first_entry];
262 @@ -858,27 +852,17 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
263 txd->addr = cpu_to_le64(first_mapping);
264 wmb();
265
266 - if (skb->ip_summed == CHECKSUM_PARTIAL) {
267 - if (ip->protocol == IPPROTO_TCP)
268 - txd->opts1 = cpu_to_le32(first_eor | first_len |
269 - FirstFrag | DescOwn |
270 - IPCS | TCPCS);
271 - else if (ip->protocol == IPPROTO_UDP)
272 - txd->opts1 = cpu_to_le32(first_eor | first_len |
273 - FirstFrag | DescOwn |
274 - IPCS | UDPCS);
275 - else
276 - BUG();
277 - } else
278 - txd->opts1 = cpu_to_le32(first_eor | first_len |
279 - FirstFrag | DescOwn);
280 + ctrl = opts1 | first_eor | first_len | FirstFrag;
281 + txd->opts1 = cpu_to_le32(ctrl);
282 wmb();
283 +
284 + cp->tx_opts[first_entry] = ctrl;
285 + netif_dbg(cp, tx_queued, cp->dev, "tx queued, slots %d-%d, skblen %d\n",
286 + first_entry, entry, skb->len);
287 }
288 - cp->tx_head = entry;
289 + cp->tx_head = NEXT_TX(entry);
290
291 netdev_sent_queue(dev, skb->len);
292 - netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
293 - entry, skb->len);
294 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
295 netif_stop_queue(dev);
296
297 @@ -1115,6 +1099,7 @@ static int cp_init_rings (struct cp_private *cp)
298 {
299 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
300 cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
301 + memset(cp->tx_opts, 0, sizeof(cp->tx_opts));
302
303 cp_init_rings_index(cp);
304
305 @@ -1151,7 +1136,7 @@ static void cp_clean_rings (struct cp_private *cp)
306 desc = cp->rx_ring + i;
307 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
308 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
309 - dev_kfree_skb(cp->rx_skb[i]);
310 + dev_kfree_skb_any(cp->rx_skb[i]);
311 }
312 }
313
314 @@ -1164,7 +1149,7 @@ static void cp_clean_rings (struct cp_private *cp)
315 le32_to_cpu(desc->opts1) & 0xffff,
316 PCI_DMA_TODEVICE);
317 if (le32_to_cpu(desc->opts1) & LastFrag)
318 - dev_kfree_skb(skb);
319 + dev_kfree_skb_any(skb);
320 cp->dev->stats.tx_dropped++;
321 }
322 }
323 @@ -1172,6 +1157,7 @@ static void cp_clean_rings (struct cp_private *cp)
324
325 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
326 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
327 + memset(cp->tx_opts, 0, sizeof(cp->tx_opts));
328
329 memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
330 memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
331 @@ -1249,7 +1235,7 @@ static void cp_tx_timeout(struct net_device *dev)
332 {
333 struct cp_private *cp = netdev_priv(dev);
334 unsigned long flags;
335 - int rc;
336 + int rc, i;
337
338 netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
339 cpr8(Cmd), cpr16(CpCmd),
340 @@ -1257,13 +1243,26 @@ static void cp_tx_timeout(struct net_device *dev)
341
342 spin_lock_irqsave(&cp->lock, flags);
343
344 + netif_dbg(cp, tx_err, cp->dev, "TX ring head %d tail %d desc %x\n",
345 + cp->tx_head, cp->tx_tail, cpr16(TxDmaOkLowDesc));
346 + for (i = 0; i < CP_TX_RING_SIZE; i++) {
347 + netif_dbg(cp, tx_err, cp->dev,
348 + "TX slot %d @%p: %08x (%08x) %08x %llx %p\n",
349 + i, &cp->tx_ring[i], le32_to_cpu(cp->tx_ring[i].opts1),
350 + cp->tx_opts[i], le32_to_cpu(cp->tx_ring[i].opts2),
351 + le64_to_cpu(cp->tx_ring[i].addr),
352 + cp->tx_skb[i]);
353 + }
354 +
355 cp_stop_hw(cp);
356 cp_clean_rings(cp);
357 rc = cp_init_rings(cp);
358 cp_start_hw(cp);
359 - cp_enable_irq(cp);
360 + __cp_set_rx_mode(dev);
361 + cpw16_f(IntrMask, cp_norx_intr_mask);
362
363 netif_wake_queue(dev);
364 + napi_schedule_irqoff(&cp->napi);
365
366 spin_unlock_irqrestore(&cp->lock, flags);
367 }