cns3xxx: fix ethernet driver tx completion and queue stop/start
[openwrt/svn-archive/archive.git] / target / linux / cns3xxx / patches-3.3 / 430-ethernet_fix_tx_completion.patch
1 --- a/drivers/net/ethernet/cavium/cns3xxx_eth.c
2 +++ b/drivers/net/ethernet/cavium/cns3xxx_eth.c
3 @@ -28,6 +28,7 @@
4
5 #define RX_DESCS 128
6 #define TX_DESCS 128
7 +#define TX_DESC_RESERVE 20
8
9 #define RX_POOL_ALLOC_SIZE (sizeof(struct rx_desc) * RX_DESCS)
10 #define TX_POOL_ALLOC_SIZE (sizeof(struct tx_desc) * TX_DESCS)
11 @@ -266,6 +267,7 @@ struct _tx_ring {
12 u32 cur_index;
13 int num_used;
14 int num_count;
15 + bool stopped;
16 };
17
18 struct _rx_ring {
19 @@ -546,7 +548,34 @@ out:
20 rx_ring->alloc_index = i;
21 }
22
23 -static void clear_tx_desc(struct sw *sw)
24 +static void eth_check_num_used(struct _tx_ring *tx_ring)
25 +{
26 + bool stop = false;
27 + int i;
28 +
29 + if (tx_ring->num_used >= TX_DESCS - TX_DESC_RESERVE)
30 + stop = true;
31 +
32 + if (tx_ring->stopped == stop)
33 + return;
34 +
35 + tx_ring->stopped = stop;
36 + for (i = 0; i < 4; i++) {
37 + struct port *port = switch_port_tab[i];
38 + struct net_device *dev;
39 +
40 + if (!port)
41 + continue;
42 +
43 + dev = port->netdev;
44 + if (stop)
45 + netif_stop_queue(dev);
46 + else
47 + netif_wake_queue(dev);
48 + }
49 +}
50 +
51 +static void eth_complete_tx(struct sw *sw)
52 {
53 struct _tx_ring *tx_ring = sw->tx_ring;
54 struct tx_desc *desc;
55 @@ -555,9 +584,6 @@ static void clear_tx_desc(struct sw *sw)
56 int num_used = tx_ring->num_used;
57 struct sk_buff *skb;
58
59 - if (num_used < (TX_DESCS >> 1))
60 - return;
61 -
62 index = tx_ring->free_index;
63 desc = &(tx_ring)->desc[index];
64 for (i = 0; i < num_used; i++) {
65 @@ -580,6 +606,7 @@ static void clear_tx_desc(struct sw *sw)
66 }
67 tx_ring->free_index = index;
68 tx_ring->num_used -= i;
69 + eth_check_num_used(tx_ring);
70 }
71
72 static int eth_poll(struct napi_struct *napi, int budget)
73 @@ -688,6 +715,10 @@ static int eth_poll(struct napi_struct *
74
75 enable_rx_dma(sw);
76
77 + spin_lock_bh(&tx_lock);
78 + eth_complete_tx(sw);
79 + spin_unlock_bh(&tx_lock);
80 +
81 return received;
82 }
83
84 @@ -732,21 +763,19 @@ static int eth_xmit(struct sk_buff *skb,
85 skb_walk_frags(skb, skb1)
86 nr_desc++;
87
88 - spin_lock(&tx_lock);
89 + spin_lock_bh(&tx_lock);
90
91 + eth_complete_tx(sw);
92 if ((tx_ring->num_used + nr_desc + 1) >= TX_DESCS) {
93 - clear_tx_desc(sw);
94 - if ((tx_ring->num_used + nr_desc + 1) >= TX_DESCS) {
95 - spin_unlock(&tx_lock);
96 - return NETDEV_TX_BUSY;
97 - }
98 + spin_unlock_bh(&tx_lock);
99 + return NETDEV_TX_BUSY;
100 }
101
102 index = index0 = tx_ring->cur_index;
103 index_last = (index0 + nr_desc) % TX_DESCS;
104 tx_ring->cur_index = (index_last + 1) % TX_DESCS;
105
106 - spin_unlock(&tx_lock);
107 + spin_unlock_bh(&tx_lock);
108
109 config0 = FORCE_ROUTE;
110 if (skb->ip_summed == CHECKSUM_PARTIAL)