base-files: fix some failsafe issues
[openwrt/staging/yousong.git] / target / linux / mediatek / patches-4.4 / 0091-net-next-mediatek-WIP.patch
1 From 34e10b96d5ccb99fb78251051bc5652b09359983 Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Thu, 28 Apr 2016 07:58:22 +0200
4 Subject: [PATCH 91/91] net-next: mediatek WIP
5
6 Signed-off-by: John Crispin <blogic@openwrt.org>
7 ---
8 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 89 ++++++++++++---------------
9 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 5 +-
10 2 files changed, 44 insertions(+), 50 deletions(-)
11
12 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
13 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
14 @@ -326,7 +326,7 @@ static inline void mtk_irq_disable(struc
15 val = mtk_r32(eth, MTK_QDMA_INT_MASK);
16 mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK);
17 /* flush write */
18 - mtk_r32(eth, MTK_QDMA_INT_MASK);
19 +// mtk_r32(eth, MTK_QDMA_INT_MASK);
20 spin_unlock_irqrestore(&eth->irq_lock, flags);
21 }
22
23 @@ -339,7 +339,7 @@ static inline void mtk_irq_enable(struct
24 val = mtk_r32(eth, MTK_QDMA_INT_MASK);
25 mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK);
26 /* flush write */
27 - mtk_r32(eth, MTK_QDMA_INT_MASK);
28 +// mtk_r32(eth, MTK_QDMA_INT_MASK);
29 spin_unlock_irqrestore(&eth->irq_lock, flags);
30 }
31
32 @@ -710,10 +710,26 @@ static inline int mtk_cal_txd_req(struct
33 return nfrags;
34 }
35
36 +static int mtk_queue_stopped(struct mtk_eth *eth)
37 +{
38 + int i;
39 +
40 + for (i = 0; i < MTK_MAC_COUNT; i++) {
41 + if (!eth->netdev[i])
42 + continue;
43 + if (netif_queue_stopped(eth->netdev[i]))
44 + return 1;
45 + }
46 +
47 + return 0;
48 +}
49 +
50 static void mtk_wake_queue(struct mtk_eth *eth)
51 {
52 int i;
53
54 + printk("%s:%s[%d]w\n", __FILE__, __func__, __LINE__);
55 +
56 for (i = 0; i < MTK_MAC_COUNT; i++) {
57 if (!eth->netdev[i])
58 continue;
59 @@ -725,6 +741,7 @@ static void mtk_stop_queue(struct mtk_et
60 {
61 int i;
62
63 + printk("%s:%s[%d]s\n", __FILE__, __func__, __LINE__);
64 for (i = 0; i < MTK_MAC_COUNT; i++) {
65 if (!eth->netdev[i])
66 continue;
67 @@ -775,12 +792,9 @@ static int mtk_start_xmit(struct sk_buff
68 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
69 goto drop;
70
71 - if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) {
72 + if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
73 mtk_stop_queue(eth);
74 - if (unlikely(atomic_read(&ring->free_count) >
75 - ring->thresh))
76 - mtk_wake_queue(eth);
77 - }
78 +
79 spin_unlock_irqrestore(&eth->page_lock, flags);
80
81 return NETDEV_TX_OK;
82 @@ -927,7 +941,6 @@ static int mtk_poll_tx(struct mtk_eth *e
83 }
84 mtk_tx_unmap(eth->dev, tx_buf);
85
86 - ring->last_free->txd2 = next_cpu;
87 ring->last_free = desc;
88 atomic_inc(&ring->free_count);
89
90 @@ -945,11 +958,8 @@ static int mtk_poll_tx(struct mtk_eth *e
91 netdev_completed_queue(eth->netdev[i], done, bytes);
92 }
93
94 - /* read hw index again make sure no new tx packet */
95 - if (cpu == dma && cpu == mtk_r32(eth, MTK_QTX_DRX_PTR))
96 - mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
97 -
98 - if (atomic_read(&ring->free_count) > ring->thresh)
99 + if (mtk_queue_stopped(eth) &&
100 + (atomic_read(&ring->free_count) > ring->thresh))
101 mtk_wake_queue(eth);
102
103 return done;
104 @@ -973,10 +983,11 @@ static int mtk_napi_tx(struct napi_struc
105 int tx_done = 0;
106
107 mtk_handle_status_irq(eth);
108 -
109 - status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
110 + mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
111 tx_done = mtk_poll_tx(eth, budget);
112 +
113 if (unlikely(netif_msg_intr(eth))) {
114 + status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
115 mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
116 dev_info(eth->dev,
117 "done tx %d, intr 0x%08x/0x%x\n",
118 @@ -1002,9 +1013,12 @@ static int mtk_napi_rx(struct napi_struc
119 u32 status, mask;
120 int rx_done = 0;
121
122 - status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
123 + mtk_handle_status_irq(eth);
124 + mtk_w32(eth, MTK_RX_DONE_INT, MTK_QMTK_INT_STATUS);
125 rx_done = mtk_poll_rx(napi, budget, eth);
126 +
127 if (unlikely(netif_msg_intr(eth))) {
128 + status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
129 mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
130 dev_info(eth->dev,
131 "done rx %d, intr 0x%08x/0x%x\n",
132 @@ -1052,9 +1066,8 @@ static int mtk_tx_alloc(struct mtk_eth *
133
134 atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
135 ring->next_free = &ring->dma[0];
136 - ring->last_free = &ring->dma[MTK_DMA_SIZE - 2];
137 - ring->thresh = max((unsigned long)MTK_DMA_SIZE >> 2,
138 - MAX_SKB_FRAGS);
139 + ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
140 + ring->thresh = MAX_SKB_FRAGS;
141
142 /* make sure that all changes to the dma ring are flushed before we
143 * continue
144 @@ -1259,21 +1272,11 @@ static void mtk_tx_timeout(struct net_de
145 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
146 {
147 struct mtk_eth *eth = _eth;
148 - u32 status;
149 -
150 - status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
151 - status &= ~MTK_TX_DONE_INT;
152 -
153 - if (unlikely(!status))
154 - return IRQ_NONE;
155
156 - if (status & MTK_RX_DONE_INT) {
157 - if (likely(napi_schedule_prep(&eth->rx_napi))) {
158 - mtk_irq_disable(eth, MTK_RX_DONE_INT);
159 - __napi_schedule(&eth->rx_napi);
160 - }
161 + if (likely(napi_schedule_prep(&eth->rx_napi))) {
162 + __napi_schedule(&eth->rx_napi);
163 + mtk_irq_disable(eth, MTK_RX_DONE_INT);
164 }
165 - mtk_w32(eth, status, MTK_QMTK_INT_STATUS);
166
167 return IRQ_HANDLED;
168 }
169 @@ -1281,21 +1284,11 @@ static irqreturn_t mtk_handle_irq_rx(int
170 static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
171 {
172 struct mtk_eth *eth = _eth;
173 - u32 status;
174 -
175 - status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
176 - status &= ~MTK_RX_DONE_INT;
177 -
178 - if (unlikely(!status))
179 - return IRQ_NONE;
180
181 - if (status & MTK_TX_DONE_INT) {
182 - if (likely(napi_schedule_prep(&eth->tx_napi))) {
183 - mtk_irq_disable(eth, MTK_TX_DONE_INT);
184 - __napi_schedule(&eth->tx_napi);
185 - }
186 + if (likely(napi_schedule_prep(&eth->tx_napi))) {
187 + __napi_schedule(&eth->tx_napi);
188 + mtk_irq_disable(eth, MTK_TX_DONE_INT);
189 }
190 - mtk_w32(eth, status, MTK_QMTK_INT_STATUS);
191
192 return IRQ_HANDLED;
193 }
194 @@ -1326,7 +1319,7 @@ static int mtk_start_dma(struct mtk_eth
195 mtk_w32(eth,
196 MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN |
197 MTK_RX_2B_OFFSET | MTK_DMA_SIZE_16DWORDS |
198 - MTK_RX_BT_32DWORDS,
199 + MTK_RX_BT_32DWORDS | MTK_NDP_CO_PRO,
200 MTK_QDMA_GLO_CFG);
201
202 return 0;
203 @@ -1440,7 +1433,7 @@ static int __init mtk_hw_init(struct mtk
204
205 /* disable delay and normal interrupt */
206 mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
207 - mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
208 + mtk_irq_disable(eth, ~0);
209 mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
210 mtk_w32(eth, 0, MTK_RST_GL);
211
212 @@ -1765,7 +1758,7 @@ static int mtk_add_mac(struct mtk_eth *e
213 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
214
215 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
216 - eth->netdev[id]->watchdog_timeo = HZ;
217 + eth->netdev[id]->watchdog_timeo = 4 * HZ;
218 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
219 eth->netdev[id]->base_addr = (unsigned long)eth->base;
220 eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
221 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
222 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
223 @@ -18,9 +18,9 @@
224 #define MTK_QDMA_PAGE_SIZE 2048
225 #define MTK_MAX_RX_LENGTH 1536
226 #define MTK_TX_DMA_BUF_LEN 0x3fff
227 -#define MTK_DMA_SIZE 256
228 -#define MTK_NAPI_WEIGHT 64
229 #define MTK_MAC_COUNT 2
230 +#define MTK_DMA_SIZE (256 * MTK_MAC_COUNT)
231 +#define MTK_NAPI_WEIGHT (64 * MTK_MAC_COUNT)
232 #define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
233 #define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
234 #define MTK_DMA_DUMMY_DESC 0xffffffff
235 @@ -95,6 +95,7 @@
236 #define MTK_QDMA_GLO_CFG 0x1A04
237 #define MTK_RX_2B_OFFSET BIT(31)
238 #define MTK_RX_BT_32DWORDS (3 << 11)
239 +#define MTK_NDP_CO_PRO BIT(10)
240 #define MTK_TX_WB_DDONE BIT(6)
241 #define MTK_DMA_SIZE_16DWORDS (2 << 4)
242 #define MTK_RX_DMA_BUSY BIT(3)