1 commit 41b976414c88016e2c9d9b2f6667ee67a998d388
2 Author: David Woodhouse <David.Woodhouse@intel.com>
3 Date: Wed Sep 23 09:45:31 2015 +0100
5 8139cp: Dump contents of descriptor ring on TX timeout
7 We are seeing unexplained TX timeouts under heavy load. Let's try to get
8 a better idea of what's going on.
10 Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
11 Signed-off-by: David S. Miller <davem@davemloft.net>
13 commit 7f4c685633e2df9ba10d49a31dda13715745db37
14 Author: David Woodhouse <David.Woodhouse@intel.com>
15 Date: Wed Sep 23 09:45:16 2015 +0100
17 8139cp: Fix DMA unmapping of transmitted buffers
19 The low 16 bits of the 'opts1' field in the TX descriptor are supposed
20 to still contain the buffer length when the descriptor is handed back to
21 us. In practice, at least on my hardware, they don't. So stash the
22 original value of the opts1 field and get the length to unmap from
25 There are other ways we could have worked out the length, but I actually
26 want a stash of the opts1 field anyway so that I can dump it alongside
27 the contents of the descriptor ring when we suffer a TX timeout.
29 Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
30 Signed-off-by: David S. Miller <davem@davemloft.net>
32 commit 0a5aeee0b79fa99d8e04c98dd4e87d4f52aa497b
33 Author: David Woodhouse <David.Woodhouse@intel.com>
34 Date: Wed Sep 23 09:44:57 2015 +0100
36 8139cp: Reduce duplicate csum/tso code in cp_start_xmit()
38 We calculate the value of the opts1 descriptor field in three different
39 places. With two different behaviours when given an invalid packet to
40 be checksummed — none of them correct. Sort that out.
42 Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
43 Signed-off-by: David S. Miller <davem@davemloft.net>
45 commit a3b804043f490aeec57d8ca5baccdd35e6250857
46 Author: David Woodhouse <David.Woodhouse@intel.com>
47 Date: Wed Sep 23 09:44:38 2015 +0100
49 8139cp: Fix TSO/scatter-gather descriptor setup
51 When sending a TSO frame in multiple buffers, we were neglecting to set
52 the first descriptor up in TSO mode.
54 Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
55 Signed-off-by: David S. Miller <davem@davemloft.net>
57 commit 26b0bad6ac3a0167792dc4ffb276c29bc597d239
58 Author: David Woodhouse <David.Woodhouse@intel.com>
59 Date: Wed Sep 23 09:44:06 2015 +0100
61 8139cp: Fix tx_queued debug message to print correct slot numbers
63 After a certain amount of staring at the debug output of this driver, I
64 realised it was lying to me.
66 Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
67 Signed-off-by: David S. Miller <davem@davemloft.net>
69 commit aaa0062ecf4877a26dea66bee1039c6eaf906c94
70 Author: David Woodhouse <David.Woodhouse@intel.com>
71 Date: Wed Sep 23 09:43:41 2015 +0100
73 8139cp: Do not re-enable RX interrupts in cp_tx_timeout()
75 If an RX interrupt was already received but NAPI has not yet run when
76 the RX timeout happens, we end up in cp_tx_timeout() with RX interrupts
77 already disabled. Blindly re-enabling them will cause an IRQ storm.
79 (This is made particularly horrid by the fact that cp_interrupt() always
80 returns that it's handled the interrupt, even when it hasn't actually
81 done anything. If it didn't do that, the core IRQ code would have
82 detected the storm and handled it, I'd have had a clear smoking gun
83 backtrace instead of just a spontaneously resetting router, and I'd have
84 at *least* two days of my life back. Changing the return value of
85 cp_interrupt() will be argued about under separate cover.)
87 Unconditionally leave RX interrupts disabled after the reset, and
88 schedule NAPI to check the receive ring and re-enable them.
90 Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
91 Signed-off-by: David S. Miller <davem@davemloft.net>
93 commit 7a8a8e75d505147358b225173e890ada43a267e2
94 Author: David Woodhouse <dwmw2@infradead.org>
95 Date: Fri Sep 18 00:21:54 2015 +0100
97 8139cp: Call __cp_set_rx_mode() from cp_tx_timeout()
99 Unless we reset the RX config, on real hardware I don't seem to receive
100 any packets after a TX timeout.
102 Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
103 Signed-off-by: David S. Miller <davem@davemloft.net>
105 commit fc27bd115b334e3ebdc682a42a47c3aea2566dcc
106 Author: David Woodhouse <dwmw2@infradead.org>
107 Date: Fri Sep 18 00:19:08 2015 +0100
109 8139cp: Use dev_kfree_skb_any() instead of dev_kfree_skb() in cp_clean_rings()
111 This can be called from cp_tx_timeout() with interrupts disabled.
112 Spotted by Francois Romieu <romieu@fr.zoreil.com>
114 Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
115 Signed-off-by: David S. Miller <davem@davemloft.net>
116 --- a/drivers/net/ethernet/realtek/8139cp.c
117 +++ b/drivers/net/ethernet/realtek/8139cp.c
118 @@ -157,6 +157,7 @@ enum {
119 NWayAdvert = 0x66, /* MII ADVERTISE */
120 NWayLPAR = 0x68, /* MII LPA */
121 NWayExpansion = 0x6A, /* MII Expansion */
122 + TxDmaOkLowDesc = 0x82, /* Low 16 bit address of a Tx descriptor. */
123 Config5 = 0xD8, /* Config5 */
124 TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */
125 RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */
126 @@ -341,6 +342,7 @@ struct cp_private {
128 struct cp_desc *tx_ring;
129 struct sk_buff *tx_skb[CP_TX_RING_SIZE];
130 + u32 tx_opts[CP_TX_RING_SIZE];
133 unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */
134 @@ -665,7 +667,7 @@ static void cp_tx (struct cp_private *cp
137 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
138 - le32_to_cpu(txd->opts1) & 0xffff,
139 + cp->tx_opts[tx_tail] & 0xffff,
142 if (status & LastFrag) {
143 @@ -733,7 +735,7 @@ static netdev_tx_t cp_start_xmit (struct
145 struct cp_private *cp = netdev_priv(dev);
149 unsigned long intr_flags;
152 @@ -753,6 +755,21 @@ static netdev_tx_t cp_start_xmit (struct
153 mss = skb_shinfo(skb)->gso_size;
155 opts2 = cpu_to_le32(cp_tx_vlan_tag(skb));
158 + opts1 |= LargeSend | ((mss & MSSMask) << MSSShift);
159 + else if (skb->ip_summed == CHECKSUM_PARTIAL) {
160 + const struct iphdr *ip = ip_hdr(skb);
161 + if (ip->protocol == IPPROTO_TCP)
162 + opts1 |= IPCS | TCPCS;
163 + else if (ip->protocol == IPPROTO_UDP)
164 + opts1 |= IPCS | UDPCS;
167 + "Net bug: asked to checksum invalid Legacy IP packet\n");
168 + goto out_dma_error;
172 if (skb_shinfo(skb)->nr_frags == 0) {
173 struct cp_desc *txd = &cp->tx_ring[entry];
174 @@ -768,31 +785,20 @@ static netdev_tx_t cp_start_xmit (struct
175 txd->addr = cpu_to_le64(mapping);
178 - flags = eor | len | DescOwn | FirstFrag | LastFrag;
181 - flags |= LargeSend | ((mss & MSSMask) << MSSShift);
182 - else if (skb->ip_summed == CHECKSUM_PARTIAL) {
183 - const struct iphdr *ip = ip_hdr(skb);
184 - if (ip->protocol == IPPROTO_TCP)
185 - flags |= IPCS | TCPCS;
186 - else if (ip->protocol == IPPROTO_UDP)
187 - flags |= IPCS | UDPCS;
189 - WARN_ON(1); /* we need a WARN() */
191 + opts1 |= eor | len | FirstFrag | LastFrag;
193 - txd->opts1 = cpu_to_le32(flags);
194 + txd->opts1 = cpu_to_le32(opts1);
197 cp->tx_skb[entry] = skb;
198 - entry = NEXT_TX(entry);
199 + cp->tx_opts[entry] = opts1;
200 + netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
204 - u32 first_len, first_eor;
205 + u32 first_len, first_eor, ctrl;
206 dma_addr_t first_mapping;
207 int frag, first_entry = entry;
208 - const struct iphdr *ip = ip_hdr(skb);
210 /* We must give this initial chunk to the device last.
211 * Otherwise we could race with the device.
212 @@ -805,14 +811,14 @@ static netdev_tx_t cp_start_xmit (struct
215 cp->tx_skb[entry] = skb;
216 - entry = NEXT_TX(entry);
218 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
219 const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
224 + entry = NEXT_TX(entry);
226 len = skb_frag_size(this_frag);
227 mapping = dma_map_single(&cp->pdev->dev,
228 skb_frag_address(this_frag),
229 @@ -824,19 +830,7 @@ static netdev_tx_t cp_start_xmit (struct
231 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
233 - ctrl = eor | len | DescOwn;
236 - ctrl |= LargeSend |
237 - ((mss & MSSMask) << MSSShift);
238 - else if (skb->ip_summed == CHECKSUM_PARTIAL) {
239 - if (ip->protocol == IPPROTO_TCP)
240 - ctrl |= IPCS | TCPCS;
241 - else if (ip->protocol == IPPROTO_UDP)
242 - ctrl |= IPCS | UDPCS;
246 + ctrl = opts1 | eor | len;
248 if (frag == skb_shinfo(skb)->nr_frags - 1)
250 @@ -849,8 +843,8 @@ static netdev_tx_t cp_start_xmit (struct
251 txd->opts1 = cpu_to_le32(ctrl);
254 + cp->tx_opts[entry] = ctrl;
255 cp->tx_skb[entry] = skb;
256 - entry = NEXT_TX(entry);
259 txd = &cp->tx_ring[first_entry];
260 @@ -858,27 +852,17 @@ static netdev_tx_t cp_start_xmit (struct
261 txd->addr = cpu_to_le64(first_mapping);
264 - if (skb->ip_summed == CHECKSUM_PARTIAL) {
265 - if (ip->protocol == IPPROTO_TCP)
266 - txd->opts1 = cpu_to_le32(first_eor | first_len |
267 - FirstFrag | DescOwn |
269 - else if (ip->protocol == IPPROTO_UDP)
270 - txd->opts1 = cpu_to_le32(first_eor | first_len |
271 - FirstFrag | DescOwn |
276 - txd->opts1 = cpu_to_le32(first_eor | first_len |
277 - FirstFrag | DescOwn);
278 + ctrl = opts1 | first_eor | first_len | FirstFrag;
279 + txd->opts1 = cpu_to_le32(ctrl);
282 + cp->tx_opts[first_entry] = ctrl;
283 + netif_dbg(cp, tx_queued, cp->dev, "tx queued, slots %d-%d, skblen %d\n",
284 + first_entry, entry, skb->len);
286 - cp->tx_head = entry;
287 + cp->tx_head = NEXT_TX(entry);
289 netdev_sent_queue(dev, skb->len);
290 - netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
292 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
293 netif_stop_queue(dev);
295 @@ -1115,6 +1099,7 @@ static int cp_init_rings (struct cp_priv
297 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
298 cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
299 + memset(cp->tx_opts, 0, sizeof(cp->tx_opts));
301 cp_init_rings_index(cp);
303 @@ -1151,7 +1136,7 @@ static void cp_clean_rings (struct cp_pr
304 desc = cp->rx_ring + i;
305 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
306 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
307 - dev_kfree_skb(cp->rx_skb[i]);
308 + dev_kfree_skb_any(cp->rx_skb[i]);
312 @@ -1164,7 +1149,7 @@ static void cp_clean_rings (struct cp_pr
313 le32_to_cpu(desc->opts1) & 0xffff,
315 if (le32_to_cpu(desc->opts1) & LastFrag)
316 - dev_kfree_skb(skb);
317 + dev_kfree_skb_any(skb);
318 cp->dev->stats.tx_dropped++;
321 @@ -1172,6 +1157,7 @@ static void cp_clean_rings (struct cp_pr
323 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
324 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
325 + memset(cp->tx_opts, 0, sizeof(cp->tx_opts));
327 memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
328 memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
329 @@ -1249,7 +1235,7 @@ static void cp_tx_timeout(struct net_dev
331 struct cp_private *cp = netdev_priv(dev);
336 netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
337 cpr8(Cmd), cpr16(CpCmd),
338 @@ -1257,13 +1243,26 @@ static void cp_tx_timeout(struct net_dev
340 spin_lock_irqsave(&cp->lock, flags);
342 + netif_dbg(cp, tx_err, cp->dev, "TX ring head %d tail %d desc %x\n",
343 + cp->tx_head, cp->tx_tail, cpr16(TxDmaOkLowDesc));
344 + for (i = 0; i < CP_TX_RING_SIZE; i++) {
345 + netif_dbg(cp, tx_err, cp->dev,
346 + "TX slot %d @%p: %08x (%08x) %08x %llx %p\n",
347 + i, &cp->tx_ring[i], le32_to_cpu(cp->tx_ring[i].opts1),
348 + cp->tx_opts[i], le32_to_cpu(cp->tx_ring[i].opts2),
349 + le64_to_cpu(cp->tx_ring[i].addr),
355 rc = cp_init_rings(cp);
358 + __cp_set_rx_mode(dev);
359 + cpw16_f(IntrMask, cp_norx_intr_mask);
361 netif_wake_queue(dev);
362 + napi_schedule_irqoff(&cp->napi);
364 spin_unlock_irqrestore(&cp->lock, flags);