backport mv643xx ethernet changes from the Marvell git repo
[openwrt/staging/yousong.git] / target / linux / orion / patches / 060-mv643xx_git_backport.patch
1 --- a/drivers/net/mv643xx_eth.c
2 +++ b/drivers/net/mv643xx_eth.c
3 @@ -34,406 +34,145 @@
4 * along with this program; if not, write to the Free Software
5 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
6 */
7 +
8 #include <linux/init.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/in.h>
11 -#include <linux/ip.h>
12 #include <linux/tcp.h>
13 #include <linux/udp.h>
14 #include <linux/etherdevice.h>
15 -
16 -#include <linux/bitops.h>
17 #include <linux/delay.h>
18 #include <linux/ethtool.h>
19 #include <linux/platform_device.h>
20 -
21 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/spinlock.h>
24 #include <linux/workqueue.h>
25 #include <linux/mii.h>
26 -
27 #include <linux/mv643xx_eth.h>
28 -
29 #include <asm/io.h>
30 #include <asm/types.h>
31 -#include <asm/pgtable.h>
32 #include <asm/system.h>
33 -#include <asm/delay.h>
34 -#include <asm/dma-mapping.h>
35
36 -#define MV643XX_CHECKSUM_OFFLOAD_TX
37 -#define MV643XX_NAPI
38 -#define MV643XX_TX_FAST_REFILL
39 -#undef MV643XX_COAL
40 -
41 -#define MV643XX_TX_COAL 100
42 -#ifdef MV643XX_COAL
43 -#define MV643XX_RX_COAL 100
44 -#endif
45 +static char mv643xx_eth_driver_name[] = "mv643xx_eth";
46 +static char mv643xx_eth_driver_version[] = "1.1";
47
48 -#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
49 +#define MV643XX_ETH_CHECKSUM_OFFLOAD_TX
50 +#define MV643XX_ETH_NAPI
51 +#define MV643XX_ETH_TX_FAST_REFILL
52 +
53 +#ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
54 #define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1)
55 #else
56 #define MAX_DESCS_PER_SKB 1
57 #endif
58
59 -#define ETH_VLAN_HLEN 4
60 -#define ETH_FCS_LEN 4
61 -#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
62 -#define ETH_WRAPPER_LEN (ETH_HW_IP_ALIGN + ETH_HLEN + \
63 - ETH_VLAN_HLEN + ETH_FCS_LEN)
64 -#define ETH_RX_SKB_SIZE (dev->mtu + ETH_WRAPPER_LEN + \
65 - dma_get_cache_alignment())
66 -
67 /*
68 * Registers shared between all ports.
69 */
70 -#define PHY_ADDR_REG 0x0000
71 -#define SMI_REG 0x0004
72 -#define WINDOW_BASE(i) (0x0200 + ((i) << 3))
73 -#define WINDOW_SIZE(i) (0x0204 + ((i) << 3))
74 -#define WINDOW_REMAP_HIGH(i) (0x0280 + ((i) << 2))
75 -#define WINDOW_BAR_ENABLE 0x0290
76 -#define WINDOW_PROTECT(i) (0x0294 + ((i) << 4))
77 +#define PHY_ADDR 0x0000
78 +#define SMI_REG 0x0004
79 +#define WINDOW_BASE(w) (0x0200 + ((w) << 3))
80 +#define WINDOW_SIZE(w) (0x0204 + ((w) << 3))
81 +#define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2))
82 +#define WINDOW_BAR_ENABLE 0x0290
83 +#define WINDOW_PROTECT(w) (0x0294 + ((w) << 4))
84
85 /*
86 * Per-port registers.
87 */
88 -#define PORT_CONFIG_REG(p) (0x0400 + ((p) << 10))
89 -#define PORT_CONFIG_EXTEND_REG(p) (0x0404 + ((p) << 10))
90 -#define MAC_ADDR_LOW(p) (0x0414 + ((p) << 10))
91 -#define MAC_ADDR_HIGH(p) (0x0418 + ((p) << 10))
92 -#define SDMA_CONFIG_REG(p) (0x041c + ((p) << 10))
93 -#define PORT_SERIAL_CONTROL_REG(p) (0x043c + ((p) << 10))
94 -#define PORT_STATUS_REG(p) (0x0444 + ((p) << 10))
95 -#define TRANSMIT_QUEUE_COMMAND_REG(p) (0x0448 + ((p) << 10))
96 -#define MAXIMUM_TRANSMIT_UNIT(p) (0x0458 + ((p) << 10))
97 -#define INTERRUPT_CAUSE_REG(p) (0x0460 + ((p) << 10))
98 -#define INTERRUPT_CAUSE_EXTEND_REG(p) (0x0464 + ((p) << 10))
99 -#define INTERRUPT_MASK_REG(p) (0x0468 + ((p) << 10))
100 -#define INTERRUPT_EXTEND_MASK_REG(p) (0x046c + ((p) << 10))
101 -#define TX_FIFO_URGENT_THRESHOLD_REG(p) (0x0474 + ((p) << 10))
102 -#define RX_CURRENT_QUEUE_DESC_PTR_0(p) (0x060c + ((p) << 10))
103 -#define RECEIVE_QUEUE_COMMAND_REG(p) (0x0680 + ((p) << 10))
104 -#define TX_CURRENT_QUEUE_DESC_PTR_0(p) (0x06c0 + ((p) << 10))
105 -#define MIB_COUNTERS_BASE(p) (0x1000 + ((p) << 7))
106 -#define DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(p) (0x1400 + ((p) << 10))
107 -#define DA_FILTER_OTHER_MULTICAST_TABLE_BASE(p) (0x1500 + ((p) << 10))
108 -#define DA_FILTER_UNICAST_TABLE_BASE(p) (0x1600 + ((p) << 10))
109 -
110 -/* These macros describe Ethernet Port configuration reg (Px_cR) bits */
111 -#define UNICAST_NORMAL_MODE (0 << 0)
112 -#define UNICAST_PROMISCUOUS_MODE (1 << 0)
113 -#define DEFAULT_RX_QUEUE(queue) ((queue) << 1)
114 -#define DEFAULT_RX_ARP_QUEUE(queue) ((queue) << 4)
115 -#define RECEIVE_BC_IF_NOT_IP_OR_ARP (0 << 7)
116 -#define REJECT_BC_IF_NOT_IP_OR_ARP (1 << 7)
117 -#define RECEIVE_BC_IF_IP (0 << 8)
118 -#define REJECT_BC_IF_IP (1 << 8)
119 -#define RECEIVE_BC_IF_ARP (0 << 9)
120 -#define REJECT_BC_IF_ARP (1 << 9)
121 -#define TX_AM_NO_UPDATE_ERROR_SUMMARY (1 << 12)
122 -#define CAPTURE_TCP_FRAMES_DIS (0 << 14)
123 -#define CAPTURE_TCP_FRAMES_EN (1 << 14)
124 -#define CAPTURE_UDP_FRAMES_DIS (0 << 15)
125 -#define CAPTURE_UDP_FRAMES_EN (1 << 15)
126 -#define DEFAULT_RX_TCP_QUEUE(queue) ((queue) << 16)
127 -#define DEFAULT_RX_UDP_QUEUE(queue) ((queue) << 19)
128 -#define DEFAULT_RX_BPDU_QUEUE(queue) ((queue) << 22)
129 -
130 -#define PORT_CONFIG_DEFAULT_VALUE \
131 - UNICAST_NORMAL_MODE | \
132 - DEFAULT_RX_QUEUE(0) | \
133 - DEFAULT_RX_ARP_QUEUE(0) | \
134 - RECEIVE_BC_IF_NOT_IP_OR_ARP | \
135 - RECEIVE_BC_IF_IP | \
136 - RECEIVE_BC_IF_ARP | \
137 - CAPTURE_TCP_FRAMES_DIS | \
138 - CAPTURE_UDP_FRAMES_DIS | \
139 - DEFAULT_RX_TCP_QUEUE(0) | \
140 - DEFAULT_RX_UDP_QUEUE(0) | \
141 - DEFAULT_RX_BPDU_QUEUE(0)
142 -
143 -/* These macros describe Ethernet Port configuration extend reg (Px_cXR) bits*/
144 -#define CLASSIFY_EN (1 << 0)
145 -#define SPAN_BPDU_PACKETS_AS_NORMAL (0 << 1)
146 -#define SPAN_BPDU_PACKETS_TO_RX_QUEUE_7 (1 << 1)
147 -#define PARTITION_DISABLE (0 << 2)
148 -#define PARTITION_ENABLE (1 << 2)
149 -
150 -#define PORT_CONFIG_EXTEND_DEFAULT_VALUE \
151 - SPAN_BPDU_PACKETS_AS_NORMAL | \
152 - PARTITION_DISABLE
153 -
154 -/* These macros describe Ethernet Port Sdma configuration reg (SDCR) bits */
155 -#define RIFB (1 << 0)
156 -#define RX_BURST_SIZE_1_64BIT (0 << 1)
157 -#define RX_BURST_SIZE_2_64BIT (1 << 1)
158 +#define PORT_CONFIG(p) (0x0400 + ((p) << 10))
159 +#define UNICAST_PROMISCUOUS_MODE 0x00000001
160 +#define PORT_CONFIG_EXT(p) (0x0404 + ((p) << 10))
161 +#define MAC_ADDR_LOW(p) (0x0414 + ((p) << 10))
162 +#define MAC_ADDR_HIGH(p) (0x0418 + ((p) << 10))
163 +#define SDMA_CONFIG(p) (0x041c + ((p) << 10))
164 +#define PORT_SERIAL_CONTROL(p) (0x043c + ((p) << 10))
165 +#define PORT_STATUS(p) (0x0444 + ((p) << 10))
166 +#define TX_FIFO_EMPTY 0x00000400
167 +#define TXQ_COMMAND(p) (0x0448 + ((p) << 10))
168 +#define TXQ_FIX_PRIO_CONF(p) (0x044c + ((p) << 10))
169 +#define TX_BW_RATE(p) (0x0450 + ((p) << 10))
170 +#define TX_BW_MTU(p) (0x0458 + ((p) << 10))
171 +#define TX_BW_BURST(p) (0x045c + ((p) << 10))
172 +#define INT_CAUSE(p) (0x0460 + ((p) << 10))
173 +#define INT_TX_END 0x07f80000
174 +#define INT_RX 0x0007fbfc
175 +#define INT_EXT 0x00000002
176 +#define INT_CAUSE_EXT(p) (0x0464 + ((p) << 10))
177 +#define INT_EXT_LINK 0x00100000
178 +#define INT_EXT_PHY 0x00010000
179 +#define INT_EXT_TX_ERROR_0 0x00000100
180 +#define INT_EXT_TX_0 0x00000001
181 +#define INT_EXT_TX 0x0000ffff
182 +#define INT_MASK(p) (0x0468 + ((p) << 10))
183 +#define INT_MASK_EXT(p) (0x046c + ((p) << 10))
184 +#define TX_FIFO_URGENT_THRESHOLD(p) (0x0474 + ((p) << 10))
185 +#define TXQ_FIX_PRIO_CONF_MOVED(p) (0x04dc + ((p) << 10))
186 +#define TX_BW_RATE_MOVED(p) (0x04e0 + ((p) << 10))
187 +#define TX_BW_MTU_MOVED(p) (0x04e8 + ((p) << 10))
188 +#define TX_BW_BURST_MOVED(p) (0x04ec + ((p) << 10))
189 +#define RXQ_CURRENT_DESC_PTR(p, q) (0x060c + ((p) << 10) + ((q) << 4))
190 +#define RXQ_COMMAND(p) (0x0680 + ((p) << 10))
191 +#define TXQ_CURRENT_DESC_PTR(p, q) (0x06c0 + ((p) << 10) + ((q) << 2))
192 +#define TXQ_BW_TOKENS(p, q) (0x0700 + ((p) << 10) + ((q) << 4))
193 +#define TXQ_BW_CONF(p, q) (0x0704 + ((p) << 10) + ((q) << 4))
194 +#define TXQ_BW_WRR_CONF(p, q) (0x0708 + ((p) << 10) + ((q) << 4))
195 +#define MIB_COUNTERS(p) (0x1000 + ((p) << 7))
196 +#define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10))
197 +#define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10))
198 +#define UNICAST_TABLE(p) (0x1600 + ((p) << 10))
199 +
200 +
201 +/*
202 + * SDMA configuration register.
203 + */
204 #define RX_BURST_SIZE_4_64BIT (2 << 1)
205 -#define RX_BURST_SIZE_8_64BIT (3 << 1)
206 -#define RX_BURST_SIZE_16_64BIT (4 << 1)
207 #define BLM_RX_NO_SWAP (1 << 4)
208 -#define BLM_RX_BYTE_SWAP (0 << 4)
209 #define BLM_TX_NO_SWAP (1 << 5)
210 -#define BLM_TX_BYTE_SWAP (0 << 5)
211 -#define DESCRIPTORS_BYTE_SWAP (1 << 6)
212 -#define DESCRIPTORS_NO_SWAP (0 << 6)
213 -#define IPG_INT_RX(value) (((value) & 0x3fff) << 8)
214 -#define TX_BURST_SIZE_1_64BIT (0 << 22)
215 -#define TX_BURST_SIZE_2_64BIT (1 << 22)
216 #define TX_BURST_SIZE_4_64BIT (2 << 22)
217 -#define TX_BURST_SIZE_8_64BIT (3 << 22)
218 -#define TX_BURST_SIZE_16_64BIT (4 << 22)
219
220 #if defined(__BIG_ENDIAN)
221 #define PORT_SDMA_CONFIG_DEFAULT_VALUE \
222 RX_BURST_SIZE_4_64BIT | \
223 - IPG_INT_RX(0) | \
224 TX_BURST_SIZE_4_64BIT
225 #elif defined(__LITTLE_ENDIAN)
226 #define PORT_SDMA_CONFIG_DEFAULT_VALUE \
227 RX_BURST_SIZE_4_64BIT | \
228 BLM_RX_NO_SWAP | \
229 BLM_TX_NO_SWAP | \
230 - IPG_INT_RX(0) | \
231 TX_BURST_SIZE_4_64BIT
232 #else
233 #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
234 #endif
235
236 -/* These macros describe Ethernet Port serial control reg (PSCR) bits */
237 -#define SERIAL_PORT_DISABLE (0 << 0)
238 -#define SERIAL_PORT_ENABLE (1 << 0)
239 -#define DO_NOT_FORCE_LINK_PASS (0 << 1)
240 -#define FORCE_LINK_PASS (1 << 1)
241 -#define ENABLE_AUTO_NEG_FOR_DUPLX (0 << 2)
242 -#define DISABLE_AUTO_NEG_FOR_DUPLX (1 << 2)
243 -#define ENABLE_AUTO_NEG_FOR_FLOW_CTRL (0 << 3)
244 -#define DISABLE_AUTO_NEG_FOR_FLOW_CTRL (1 << 3)
245 -#define ADV_NO_FLOW_CTRL (0 << 4)
246 -#define ADV_SYMMETRIC_FLOW_CTRL (1 << 4)
247 -#define FORCE_FC_MODE_NO_PAUSE_DIS_TX (0 << 5)
248 -#define FORCE_FC_MODE_TX_PAUSE_DIS (1 << 5)
249 -#define FORCE_BP_MODE_NO_JAM (0 << 7)
250 -#define FORCE_BP_MODE_JAM_TX (1 << 7)
251 -#define FORCE_BP_MODE_JAM_TX_ON_RX_ERR (2 << 7)
252 -#define SERIAL_PORT_CONTROL_RESERVED (1 << 9)
253 -#define FORCE_LINK_FAIL (0 << 10)
254 -#define DO_NOT_FORCE_LINK_FAIL (1 << 10)
255 -#define RETRANSMIT_16_ATTEMPTS (0 << 11)
256 -#define RETRANSMIT_FOREVER (1 << 11)
257 -#define ENABLE_AUTO_NEG_SPEED_GMII (0 << 13)
258 -#define DISABLE_AUTO_NEG_SPEED_GMII (1 << 13)
259 -#define DTE_ADV_0 (0 << 14)
260 -#define DTE_ADV_1 (1 << 14)
261 -#define DISABLE_AUTO_NEG_BYPASS (0 << 15)
262 -#define ENABLE_AUTO_NEG_BYPASS (1 << 15)
263 -#define AUTO_NEG_NO_CHANGE (0 << 16)
264 -#define RESTART_AUTO_NEG (1 << 16)
265 -#define MAX_RX_PACKET_1518BYTE (0 << 17)
266 +
267 +/*
268 + * Port serial control register.
269 + */
270 +#define SET_MII_SPEED_TO_100 (1 << 24)
271 +#define SET_GMII_SPEED_TO_1000 (1 << 23)
272 +#define SET_FULL_DUPLEX_MODE (1 << 21)
273 #define MAX_RX_PACKET_1522BYTE (1 << 17)
274 -#define MAX_RX_PACKET_1552BYTE (2 << 17)
275 -#define MAX_RX_PACKET_9022BYTE (3 << 17)
276 -#define MAX_RX_PACKET_9192BYTE (4 << 17)
277 #define MAX_RX_PACKET_9700BYTE (5 << 17)
278 #define MAX_RX_PACKET_MASK (7 << 17)
279 -#define CLR_EXT_LOOPBACK (0 << 20)
280 -#define SET_EXT_LOOPBACK (1 << 20)
281 -#define SET_HALF_DUPLEX_MODE (0 << 21)
282 -#define SET_FULL_DUPLEX_MODE (1 << 21)
283 -#define DISABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX (0 << 22)
284 -#define ENABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX (1 << 22)
285 -#define SET_GMII_SPEED_TO_10_100 (0 << 23)
286 -#define SET_GMII_SPEED_TO_1000 (1 << 23)
287 -#define SET_MII_SPEED_TO_10 (0 << 24)
288 -#define SET_MII_SPEED_TO_100 (1 << 24)
289 +#define DISABLE_AUTO_NEG_SPEED_GMII (1 << 13)
290 +#define DO_NOT_FORCE_LINK_FAIL (1 << 10)
291 +#define SERIAL_PORT_CONTROL_RESERVED (1 << 9)
292 +#define DISABLE_AUTO_NEG_FOR_FLOW_CTRL (1 << 3)
293 +#define DISABLE_AUTO_NEG_FOR_DUPLEX (1 << 2)
294 +#define FORCE_LINK_PASS (1 << 1)
295 +#define SERIAL_PORT_ENABLE (1 << 0)
296 +
297 +#define DEFAULT_RX_QUEUE_SIZE 400
298 +#define DEFAULT_TX_QUEUE_SIZE 800
299
300 -#define PORT_SERIAL_CONTROL_DEFAULT_VALUE \
301 - DO_NOT_FORCE_LINK_PASS | \
302 - ENABLE_AUTO_NEG_FOR_DUPLX | \
303 - DISABLE_AUTO_NEG_FOR_FLOW_CTRL | \
304 - ADV_SYMMETRIC_FLOW_CTRL | \
305 - FORCE_FC_MODE_NO_PAUSE_DIS_TX | \
306 - FORCE_BP_MODE_NO_JAM | \
307 - (1 << 9) /* reserved */ | \
308 - DO_NOT_FORCE_LINK_FAIL | \
309 - RETRANSMIT_16_ATTEMPTS | \
310 - ENABLE_AUTO_NEG_SPEED_GMII | \
311 - DTE_ADV_0 | \
312 - DISABLE_AUTO_NEG_BYPASS | \
313 - AUTO_NEG_NO_CHANGE | \
314 - MAX_RX_PACKET_9700BYTE | \
315 - CLR_EXT_LOOPBACK | \
316 - SET_FULL_DUPLEX_MODE | \
317 - ENABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX
318 -
319 -/* These macros describe Ethernet Serial Status reg (PSR) bits */
320 -#define PORT_STATUS_MODE_10_BIT (1 << 0)
321 -#define PORT_STATUS_LINK_UP (1 << 1)
322 -#define PORT_STATUS_FULL_DUPLEX (1 << 2)
323 -#define PORT_STATUS_FLOW_CONTROL (1 << 3)
324 -#define PORT_STATUS_GMII_1000 (1 << 4)
325 -#define PORT_STATUS_MII_100 (1 << 5)
326 -/* PSR bit 6 is undocumented */
327 -#define PORT_STATUS_TX_IN_PROGRESS (1 << 7)
328 -#define PORT_STATUS_AUTONEG_BYPASSED (1 << 8)
329 -#define PORT_STATUS_PARTITION (1 << 9)
330 -#define PORT_STATUS_TX_FIFO_EMPTY (1 << 10)
331 -/* PSR bits 11-31 are reserved */
332 -
333 -#define PORT_DEFAULT_TRANSMIT_QUEUE_SIZE 800
334 -#define PORT_DEFAULT_RECEIVE_QUEUE_SIZE 400
335 -
336 -#define DESC_SIZE 64
337 -
338 -#define ETH_RX_QUEUES_ENABLED (1 << 0) /* use only Q0 for receive */
339 -#define ETH_TX_QUEUES_ENABLED (1 << 0) /* use only Q0 for transmit */
340 -
341 -#define ETH_INT_CAUSE_RX_DONE (ETH_RX_QUEUES_ENABLED << 2)
342 -#define ETH_INT_CAUSE_RX_ERROR (ETH_RX_QUEUES_ENABLED << 9)
343 -#define ETH_INT_CAUSE_RX (ETH_INT_CAUSE_RX_DONE | ETH_INT_CAUSE_RX_ERROR)
344 -#define ETH_INT_CAUSE_EXT 0x00000002
345 -#define ETH_INT_UNMASK_ALL (ETH_INT_CAUSE_RX | ETH_INT_CAUSE_EXT)
346 -
347 -#define ETH_INT_CAUSE_TX_DONE (ETH_TX_QUEUES_ENABLED << 0)
348 -#define ETH_INT_CAUSE_TX_ERROR (ETH_TX_QUEUES_ENABLED << 8)
349 -#define ETH_INT_CAUSE_TX (ETH_INT_CAUSE_TX_DONE | ETH_INT_CAUSE_TX_ERROR)
350 -#define ETH_INT_CAUSE_PHY 0x00010000
351 -#define ETH_INT_CAUSE_STATE 0x00100000
352 -#define ETH_INT_UNMASK_ALL_EXT (ETH_INT_CAUSE_TX | ETH_INT_CAUSE_PHY | \
353 - ETH_INT_CAUSE_STATE)
354 -
355 -#define ETH_INT_MASK_ALL 0x00000000
356 -#define ETH_INT_MASK_ALL_EXT 0x00000000
357 -
358 -#define PHY_WAIT_ITERATIONS 1000 /* 1000 iterations * 10uS = 10mS max */
359 -#define PHY_WAIT_MICRO_SECONDS 10
360 -
361 -/* Buffer offset from buffer pointer */
362 -#define RX_BUF_OFFSET 0x2
363 -
364 -/* Gigabit Ethernet Unit Global Registers */
365 -
366 -/* MIB Counters register definitions */
367 -#define ETH_MIB_GOOD_OCTETS_RECEIVED_LOW 0x0
368 -#define ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH 0x4
369 -#define ETH_MIB_BAD_OCTETS_RECEIVED 0x8
370 -#define ETH_MIB_INTERNAL_MAC_TRANSMIT_ERR 0xc
371 -#define ETH_MIB_GOOD_FRAMES_RECEIVED 0x10
372 -#define ETH_MIB_BAD_FRAMES_RECEIVED 0x14
373 -#define ETH_MIB_BROADCAST_FRAMES_RECEIVED 0x18
374 -#define ETH_MIB_MULTICAST_FRAMES_RECEIVED 0x1c
375 -#define ETH_MIB_FRAMES_64_OCTETS 0x20
376 -#define ETH_MIB_FRAMES_65_TO_127_OCTETS 0x24
377 -#define ETH_MIB_FRAMES_128_TO_255_OCTETS 0x28
378 -#define ETH_MIB_FRAMES_256_TO_511_OCTETS 0x2c
379 -#define ETH_MIB_FRAMES_512_TO_1023_OCTETS 0x30
380 -#define ETH_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34
381 -#define ETH_MIB_GOOD_OCTETS_SENT_LOW 0x38
382 -#define ETH_MIB_GOOD_OCTETS_SENT_HIGH 0x3c
383 -#define ETH_MIB_GOOD_FRAMES_SENT 0x40
384 -#define ETH_MIB_EXCESSIVE_COLLISION 0x44
385 -#define ETH_MIB_MULTICAST_FRAMES_SENT 0x48
386 -#define ETH_MIB_BROADCAST_FRAMES_SENT 0x4c
387 -#define ETH_MIB_UNREC_MAC_CONTROL_RECEIVED 0x50
388 -#define ETH_MIB_FC_SENT 0x54
389 -#define ETH_MIB_GOOD_FC_RECEIVED 0x58
390 -#define ETH_MIB_BAD_FC_RECEIVED 0x5c
391 -#define ETH_MIB_UNDERSIZE_RECEIVED 0x60
392 -#define ETH_MIB_FRAGMENTS_RECEIVED 0x64
393 -#define ETH_MIB_OVERSIZE_RECEIVED 0x68
394 -#define ETH_MIB_JABBER_RECEIVED 0x6c
395 -#define ETH_MIB_MAC_RECEIVE_ERROR 0x70
396 -#define ETH_MIB_BAD_CRC_EVENT 0x74
397 -#define ETH_MIB_COLLISION 0x78
398 -#define ETH_MIB_LATE_COLLISION 0x7c
399 -
400 -/* Port serial status reg (PSR) */
401 -#define ETH_INTERFACE_PCM 0x00000001
402 -#define ETH_LINK_IS_UP 0x00000002
403 -#define ETH_PORT_AT_FULL_DUPLEX 0x00000004
404 -#define ETH_RX_FLOW_CTRL_ENABLED 0x00000008
405 -#define ETH_GMII_SPEED_1000 0x00000010
406 -#define ETH_MII_SPEED_100 0x00000020
407 -#define ETH_TX_IN_PROGRESS 0x00000080
408 -#define ETH_BYPASS_ACTIVE 0x00000100
409 -#define ETH_PORT_AT_PARTITION_STATE 0x00000200
410 -#define ETH_PORT_TX_FIFO_EMPTY 0x00000400
411 -
412 -/* SMI reg */
413 -#define ETH_SMI_BUSY 0x10000000 /* 0 - Write, 1 - Read */
414 -#define ETH_SMI_READ_VALID 0x08000000 /* 0 - Write, 1 - Read */
415 -#define ETH_SMI_OPCODE_WRITE 0 /* Completion of Read */
416 -#define ETH_SMI_OPCODE_READ 0x04000000 /* Operation is in progress */
417 -
418 -/* Interrupt Cause Register Bit Definitions */
419 -
420 -/* SDMA command status fields macros */
421 -
422 -/* Tx & Rx descriptors status */
423 -#define ETH_ERROR_SUMMARY 0x00000001
424 -
425 -/* Tx & Rx descriptors command */
426 -#define ETH_BUFFER_OWNED_BY_DMA 0x80000000
427 -
428 -/* Tx descriptors status */
429 -#define ETH_LC_ERROR 0
430 -#define ETH_UR_ERROR 0x00000002
431 -#define ETH_RL_ERROR 0x00000004
432 -#define ETH_LLC_SNAP_FORMAT 0x00000200
433 -
434 -/* Rx descriptors status */
435 -#define ETH_OVERRUN_ERROR 0x00000002
436 -#define ETH_MAX_FRAME_LENGTH_ERROR 0x00000004
437 -#define ETH_RESOURCE_ERROR 0x00000006
438 -#define ETH_VLAN_TAGGED 0x00080000
439 -#define ETH_BPDU_FRAME 0x00100000
440 -#define ETH_UDP_FRAME_OVER_IP_V_4 0x00200000
441 -#define ETH_OTHER_FRAME_TYPE 0x00400000
442 -#define ETH_LAYER_2_IS_ETH_V_2 0x00800000
443 -#define ETH_FRAME_TYPE_IP_V_4 0x01000000
444 -#define ETH_FRAME_HEADER_OK 0x02000000
445 -#define ETH_RX_LAST_DESC 0x04000000
446 -#define ETH_RX_FIRST_DESC 0x08000000
447 -#define ETH_UNKNOWN_DESTINATION_ADDR 0x10000000
448 -#define ETH_RX_ENABLE_INTERRUPT 0x20000000
449 -#define ETH_LAYER_4_CHECKSUM_OK 0x40000000
450 -
451 -/* Rx descriptors byte count */
452 -#define ETH_FRAME_FRAGMENTED 0x00000004
453 -
454 -/* Tx descriptors command */
455 -#define ETH_LAYER_4_CHECKSUM_FIRST_DESC 0x00000400
456 -#define ETH_FRAME_SET_TO_VLAN 0x00008000
457 -#define ETH_UDP_FRAME 0x00010000
458 -#define ETH_GEN_TCP_UDP_CHECKSUM 0x00020000
459 -#define ETH_GEN_IP_V_4_CHECKSUM 0x00040000
460 -#define ETH_ZERO_PADDING 0x00080000
461 -#define ETH_TX_LAST_DESC 0x00100000
462 -#define ETH_TX_FIRST_DESC 0x00200000
463 -#define ETH_GEN_CRC 0x00400000
464 -#define ETH_TX_ENABLE_INTERRUPT 0x00800000
465 -#define ETH_AUTO_MODE 0x40000000
466 -
467 -#define ETH_TX_IHL_SHIFT 11
468 -
469 -/* typedefs */
470 -
471 -typedef enum _eth_func_ret_status {
472 - ETH_OK, /* Returned as expected. */
473 - ETH_ERROR, /* Fundamental error. */
474 - ETH_RETRY, /* Could not process request. Try later.*/
475 - ETH_END_OF_JOB, /* Ring has nothing to process. */
476 - ETH_QUEUE_FULL, /* Ring resource error. */
477 - ETH_QUEUE_LAST_RESOURCE /* Ring resources about to exhaust. */
478 -} ETH_FUNC_RET_STATUS;
479
480 -/* These are for big-endian machines. Little endian needs different
481 - * definitions.
482 +/*
483 + * RX/TX descriptors.
484 */
485 #if defined(__BIG_ENDIAN)
486 -struct eth_rx_desc {
487 +struct rx_desc {
488 u16 byte_cnt; /* Descriptor buffer byte count */
489 u16 buf_size; /* Buffer size */
490 u32 cmd_sts; /* Descriptor command status */
491 @@ -441,7 +180,7 @@
492 u32 buf_ptr; /* Descriptor buffer pointer */
493 };
494
495 -struct eth_tx_desc {
496 +struct tx_desc {
497 u16 byte_cnt; /* buffer byte count */
498 u16 l4i_chk; /* CPU provided TCP checksum */
499 u32 cmd_sts; /* Command/status field */
500 @@ -449,7 +188,7 @@
501 u32 buf_ptr; /* pointer to buffer for this descriptor*/
502 };
503 #elif defined(__LITTLE_ENDIAN)
504 -struct eth_rx_desc {
505 +struct rx_desc {
506 u32 cmd_sts; /* Descriptor command status */
507 u16 buf_size; /* Buffer size */
508 u16 byte_cnt; /* Descriptor buffer byte count */
509 @@ -457,7 +196,7 @@
510 u32 next_desc_ptr; /* Next descriptor pointer */
511 };
512
513 -struct eth_tx_desc {
514 +struct tx_desc {
515 u32 cmd_sts; /* Command/status field */
516 u16 l4i_chk; /* CPU provided TCP checksum */
517 u16 byte_cnt; /* buffer byte count */
518 @@ -468,18 +207,59 @@
519 #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
520 #endif
521
522 -/* Unified struct for Rx and Tx operations. The user is not required to */
523 -/* be familier with neither Tx nor Rx descriptors. */
524 -struct pkt_info {
525 - unsigned short byte_cnt; /* Descriptor buffer byte count */
526 - unsigned short l4i_chk; /* Tx CPU provided TCP Checksum */
527 - unsigned int cmd_sts; /* Descriptor command status */
528 - dma_addr_t buf_ptr; /* Descriptor buffer pointer */
529 - struct sk_buff *return_info; /* User resource return information */
530 +/* RX & TX descriptor command */
531 +#define BUFFER_OWNED_BY_DMA 0x80000000
532 +
533 +/* RX & TX descriptor status */
534 +#define ERROR_SUMMARY 0x00000001
535 +
536 +/* RX descriptor status */
537 +#define LAYER_4_CHECKSUM_OK 0x40000000
538 +#define RX_ENABLE_INTERRUPT 0x20000000
539 +#define RX_FIRST_DESC 0x08000000
540 +#define RX_LAST_DESC 0x04000000
541 +
542 +/* TX descriptor command */
543 +#define TX_ENABLE_INTERRUPT 0x00800000
544 +#define GEN_CRC 0x00400000
545 +#define TX_FIRST_DESC 0x00200000
546 +#define TX_LAST_DESC 0x00100000
547 +#define ZERO_PADDING 0x00080000
548 +#define GEN_IP_V4_CHECKSUM 0x00040000
549 +#define GEN_TCP_UDP_CHECKSUM 0x00020000
550 +#define UDP_FRAME 0x00010000
551 +
552 +#define TX_IHL_SHIFT 11
553 +
554 +
555 +/* global *******************************************************************/
556 +struct mv643xx_eth_shared_private {
557 + /*
558 + * Ethernet controller base address.
559 + */
560 + void __iomem *base;
561 +
562 + /*
563 + * Protects access to SMI_REG, which is shared between ports.
564 + */
565 + spinlock_t phy_lock;
566 +
567 + /*
568 + * Per-port MBUS window access register value.
569 + */
570 + u32 win_protect;
571 +
572 + /*
573 + * Hardware-specific parameters.
574 + */
575 + unsigned int t_clk;
576 + int extended_rx_coal_limit;
577 + int tx_bw_control_moved;
578 };
579
580 -/* Ethernet port specific information */
581 -struct mv643xx_mib_counters {
582 +
583 +/* per-port *****************************************************************/
584 +struct mib_counters {
585 u64 good_octets_received;
586 u32 bad_octets_received;
587 u32 internal_mac_transmit_err;
588 @@ -512,461 +292,282 @@
589 u32 late_collision;
590 };
591
592 -struct mv643xx_shared_private {
593 - void __iomem *eth_base;
594 -
595 - /* used to protect SMI_REG, which is shared across ports */
596 - spinlock_t phy_lock;
597 -
598 - u32 win_protect;
599 -
600 - unsigned int t_clk;
601 -};
602 -
603 -struct mv643xx_private {
604 - struct mv643xx_shared_private *shared;
605 - int port_num; /* User Ethernet port number */
606 -
607 - struct mv643xx_shared_private *shared_smi;
608 +struct rx_queue {
609 + int index;
610
611 - u32 rx_sram_addr; /* Base address of rx sram area */
612 - u32 rx_sram_size; /* Size of rx sram area */
613 - u32 tx_sram_addr; /* Base address of tx sram area */
614 - u32 tx_sram_size; /* Size of tx sram area */
615 + int rx_ring_size;
616
617 - int rx_resource_err; /* Rx ring resource error flag */
618 + int rx_desc_count;
619 + int rx_curr_desc;
620 + int rx_used_desc;
621
622 - /* Tx/Rx rings managment indexes fields. For driver use */
623 + struct rx_desc *rx_desc_area;
624 + dma_addr_t rx_desc_dma;
625 + int rx_desc_area_size;
626 + struct sk_buff **rx_skb;
627
628 - /* Next available and first returning Rx resource */
629 - int rx_curr_desc_q, rx_used_desc_q;
630 + struct timer_list rx_oom;
631 +};
632
633 - /* Next available and first returning Tx resource */
634 - int tx_curr_desc_q, tx_used_desc_q;
635 +struct tx_queue {
636 + int index;
637
638 -#ifdef MV643XX_TX_FAST_REFILL
639 - u32 tx_clean_threshold;
640 -#endif
641 + int tx_ring_size;
642
643 - struct eth_rx_desc *p_rx_desc_area;
644 - dma_addr_t rx_desc_dma;
645 - int rx_desc_area_size;
646 - struct sk_buff **rx_skb;
647 + int tx_desc_count;
648 + int tx_curr_desc;
649 + int tx_used_desc;
650
651 - struct eth_tx_desc *p_tx_desc_area;
652 + struct tx_desc *tx_desc_area;
653 dma_addr_t tx_desc_dma;
654 int tx_desc_area_size;
655 struct sk_buff **tx_skb;
656 +};
657
658 - struct work_struct tx_timeout_task;
659 +struct mv643xx_eth_private {
660 + struct mv643xx_eth_shared_private *shared;
661 + int port_num;
662
663 struct net_device *dev;
664 - struct napi_struct napi;
665 - struct net_device_stats stats;
666 - struct mv643xx_mib_counters mib_counters;
667 +
668 + struct mv643xx_eth_shared_private *shared_smi;
669 + int phy_addr;
670 +
671 spinlock_t lock;
672 - /* Size of Tx Ring per queue */
673 - int tx_ring_size;
674 - /* Number of tx descriptors in use */
675 - int tx_desc_count;
676 - /* Size of Rx Ring per queue */
677 - int rx_ring_size;
678 - /* Number of rx descriptors in use */
679 - int rx_desc_count;
680 +
681 + struct mib_counters mib_counters;
682 + struct work_struct tx_timeout_task;
683 + struct mii_if_info mii;
684
685 /*
686 - * Used in case RX Ring is empty, which can be caused when
687 - * system does not have resources (skb's)
688 + * RX state.
689 */
690 - struct timer_list timeout;
691 -
692 - u32 rx_int_coal;
693 - u32 tx_int_coal;
694 - struct mii_if_info mii;
695 -};
696 + int default_rx_ring_size;
697 + unsigned long rx_desc_sram_addr;
698 + int rx_desc_sram_size;
699 + u8 rxq_mask;
700 + int rxq_primary;
701 + struct napi_struct napi;
702 + struct rx_queue rxq[8];
703
704 -/* Static function declarations */
705 -static void eth_port_init(struct mv643xx_private *mp);
706 -static void eth_port_reset(struct mv643xx_private *mp);
707 -static void eth_port_start(struct net_device *dev);
708 -
709 -static void ethernet_phy_reset(struct mv643xx_private *mp);
710 -
711 -static void eth_port_write_smi_reg(struct mv643xx_private *mp,
712 - unsigned int phy_reg, unsigned int value);
713 -
714 -static void eth_port_read_smi_reg(struct mv643xx_private *mp,
715 - unsigned int phy_reg, unsigned int *value);
716 -
717 -static void eth_clear_mib_counters(struct mv643xx_private *mp);
718 -
719 -static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
720 - struct pkt_info *p_pkt_info);
721 -static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp,
722 - struct pkt_info *p_pkt_info);
723 -
724 -static void eth_port_uc_addr_get(struct mv643xx_private *mp,
725 - unsigned char *p_addr);
726 -static void eth_port_uc_addr_set(struct mv643xx_private *mp,
727 - unsigned char *p_addr);
728 -static void eth_port_set_multicast_list(struct net_device *);
729 -static void mv643xx_eth_port_enable_tx(struct mv643xx_private *mp,
730 - unsigned int queues);
731 -static void mv643xx_eth_port_enable_rx(struct mv643xx_private *mp,
732 - unsigned int queues);
733 -static unsigned int mv643xx_eth_port_disable_tx(struct mv643xx_private *mp);
734 -static unsigned int mv643xx_eth_port_disable_rx(struct mv643xx_private *mp);
735 -static int mv643xx_eth_open(struct net_device *);
736 -static int mv643xx_eth_stop(struct net_device *);
737 -static void eth_port_init_mac_tables(struct mv643xx_private *mp);
738 -#ifdef MV643XX_NAPI
739 -static int mv643xx_poll(struct napi_struct *napi, int budget);
740 + /*
741 + * TX state.
742 + */
743 + int default_tx_ring_size;
744 + unsigned long tx_desc_sram_addr;
745 + int tx_desc_sram_size;
746 + u8 txq_mask;
747 + int txq_primary;
748 + struct tx_queue txq[8];
749 +#ifdef MV643XX_ETH_TX_FAST_REFILL
750 + int tx_clean_threshold;
751 #endif
752 -static int ethernet_phy_get(struct mv643xx_private *mp);
753 -static void ethernet_phy_set(struct mv643xx_private *mp, int phy_addr);
754 -static int ethernet_phy_detect(struct mv643xx_private *mp);
755 -static int mv643xx_mdio_read(struct net_device *dev, int phy_id, int location);
756 -static void mv643xx_mdio_write(struct net_device *dev, int phy_id, int location, int val);
757 -static int mv643xx_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
758 -static const struct ethtool_ops mv643xx_ethtool_ops;
759 +};
760
761 -static char mv643xx_driver_name[] = "mv643xx_eth";
762 -static char mv643xx_driver_version[] = "1.0";
763
764 -static inline u32 rdl(struct mv643xx_private *mp, int offset)
765 +/* port register accessors **************************************************/
766 +static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
767 {
768 - return readl(mp->shared->eth_base + offset);
769 + return readl(mp->shared->base + offset);
770 }
771
772 -static inline void wrl(struct mv643xx_private *mp, int offset, u32 data)
773 +static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
774 {
775 - writel(data, mp->shared->eth_base + offset);
776 + writel(data, mp->shared->base + offset);
777 }
778
779 -/*
780 - * Changes MTU (maximum transfer unit) of the gigabit ethenret port
781 - *
782 - * Input : pointer to ethernet interface network device structure
783 - * new mtu size
784 - * Output : 0 upon success, -EINVAL upon failure
785 - */
786 -static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
787 -{
788 - if ((new_mtu > 9500) || (new_mtu < 64))
789 - return -EINVAL;
790 -
791 - dev->mtu = new_mtu;
792 - if (!netif_running(dev))
793 - return 0;
794 -
795 - /*
796 - * Stop and then re-open the interface. This will allocate RX
797 - * skbs of the new MTU.
798 - * There is a possible danger that the open will not succeed,
799 - * due to memory being full, which might fail the open function.
800 - */
801 - mv643xx_eth_stop(dev);
802 - if (mv643xx_eth_open(dev)) {
803 - printk(KERN_ERR "%s: Fatal error on opening device\n",
804 - dev->name);
805 - }
806 -
807 - return 0;
808 -}
809
810 -/*
811 - * mv643xx_eth_rx_refill_descs
812 - *
813 - * Fills / refills RX queue on a certain gigabit ethernet port
814 - *
815 - * Input : pointer to ethernet interface network device structure
816 - * Output : N/A
817 - */
818 -static void mv643xx_eth_rx_refill_descs(struct net_device *dev)
819 +/* rxq/txq helper functions *************************************************/
820 +static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
821 {
822 - struct mv643xx_private *mp = netdev_priv(dev);
823 - struct pkt_info pkt_info;
824 - struct sk_buff *skb;
825 - int unaligned;
826 -
827 - while (mp->rx_desc_count < mp->rx_ring_size) {
828 - skb = dev_alloc_skb(ETH_RX_SKB_SIZE + dma_get_cache_alignment());
829 - if (!skb)
830 - break;
831 - mp->rx_desc_count++;
832 - unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1);
833 - if (unaligned)
834 - skb_reserve(skb, dma_get_cache_alignment() - unaligned);
835 - pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT;
836 - pkt_info.byte_cnt = ETH_RX_SKB_SIZE;
837 - pkt_info.buf_ptr = dma_map_single(NULL, skb->data,
838 - ETH_RX_SKB_SIZE, DMA_FROM_DEVICE);
839 - pkt_info.return_info = skb;
840 - if (eth_rx_return_buff(mp, &pkt_info) != ETH_OK) {
841 - printk(KERN_ERR
842 - "%s: Error allocating RX Ring\n", dev->name);
843 - break;
844 - }
845 - skb_reserve(skb, ETH_HW_IP_ALIGN);
846 - }
847 - /*
848 - * If RX ring is empty of SKB, set a timer to try allocating
849 - * again at a later time.
850 - */
851 - if (mp->rx_desc_count == 0) {
852 - printk(KERN_INFO "%s: Rx ring is empty\n", dev->name);
853 - mp->timeout.expires = jiffies + (HZ / 10); /* 100 mSec */
854 - add_timer(&mp->timeout);
855 - }
856 + return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]);
857 }
858
859 -/*
860 - * mv643xx_eth_rx_refill_descs_timer_wrapper
861 - *
862 - * Timer routine to wake up RX queue filling task. This function is
863 - * used only in case the RX queue is empty, and all alloc_skb has
864 - * failed (due to out of memory event).
865 - *
866 - * Input : pointer to ethernet interface network device structure
867 - * Output : N/A
868 - */
869 -static inline void mv643xx_eth_rx_refill_descs_timer_wrapper(unsigned long data)
870 +static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
871 {
872 - mv643xx_eth_rx_refill_descs((struct net_device *)data);
873 + return container_of(txq, struct mv643xx_eth_private, txq[txq->index]);
874 }
875
876 -/*
877 - * mv643xx_eth_update_mac_address
878 - *
879 - * Update the MAC address of the port in the address table
880 - *
881 - * Input : pointer to ethernet interface network device structure
882 - * Output : N/A
883 - */
884 -static void mv643xx_eth_update_mac_address(struct net_device *dev)
885 +static void rxq_enable(struct rx_queue *rxq)
886 {
887 - struct mv643xx_private *mp = netdev_priv(dev);
888 -
889 - eth_port_init_mac_tables(mp);
890 - eth_port_uc_addr_set(mp, dev->dev_addr);
891 + struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
892 + wrl(mp, RXQ_COMMAND(mp->port_num), 1 << rxq->index);
893 }
894
895 -/*
896 - * mv643xx_eth_set_rx_mode
897 - *
898 - * Change from promiscuos to regular rx mode
899 - *
900 - * Input : pointer to ethernet interface network device structure
901 - * Output : N/A
902 - */
903 -static void mv643xx_eth_set_rx_mode(struct net_device *dev)
904 +static void rxq_disable(struct rx_queue *rxq)
905 {
906 - struct mv643xx_private *mp = netdev_priv(dev);
907 - u32 config_reg;
908 -
909 - config_reg = rdl(mp, PORT_CONFIG_REG(mp->port_num));
910 - if (dev->flags & IFF_PROMISC)
911 - config_reg |= (u32) UNICAST_PROMISCUOUS_MODE;
912 - else
913 - config_reg &= ~(u32) UNICAST_PROMISCUOUS_MODE;
914 - wrl(mp, PORT_CONFIG_REG(mp->port_num), config_reg);
915 + struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
916 + u8 mask = 1 << rxq->index;
917
918 - eth_port_set_multicast_list(dev);
919 + wrl(mp, RXQ_COMMAND(mp->port_num), mask << 8);
920 + while (rdl(mp, RXQ_COMMAND(mp->port_num)) & mask)
921 + udelay(10);
922 }
923
924 -/*
925 - * mv643xx_eth_set_mac_address
926 - *
927 - * Change the interface's mac address.
928 - * No special hardware thing should be done because interface is always
929 - * put in promiscuous mode.
930 - *
931 - * Input : pointer to ethernet interface network device structure and
932 - * a pointer to the designated entry to be added to the cache.
933 - * Output : zero upon success, negative upon failure
934 - */
935 -static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
936 +static void txq_enable(struct tx_queue *txq)
937 {
938 - int i;
939 -
940 - for (i = 0; i < 6; i++)
941 - /* +2 is for the offset of the HW addr type */
942 - dev->dev_addr[i] = ((unsigned char *)addr)[i + 2];
943 - mv643xx_eth_update_mac_address(dev);
944 - return 0;
945 + struct mv643xx_eth_private *mp = txq_to_mp(txq);
946 + wrl(mp, TXQ_COMMAND(mp->port_num), 1 << txq->index);
947 }
948
949 -/*
950 - * mv643xx_eth_tx_timeout
951 - *
952 - * Called upon a timeout on transmitting a packet
953 - *
954 - * Input : pointer to ethernet interface network device structure.
955 - * Output : N/A
956 - */
957 -static void mv643xx_eth_tx_timeout(struct net_device *dev)
958 +static void txq_disable(struct tx_queue *txq)
959 {
960 - struct mv643xx_private *mp = netdev_priv(dev);
961 -
962 - printk(KERN_INFO "%s: TX timeout ", dev->name);
963 + struct mv643xx_eth_private *mp = txq_to_mp(txq);
964 + u8 mask = 1 << txq->index;
965
966 - /* Do the reset outside of interrupt context */
967 - schedule_work(&mp->tx_timeout_task);
968 + wrl(mp, TXQ_COMMAND(mp->port_num), mask << 8);
969 + while (rdl(mp, TXQ_COMMAND(mp->port_num)) & mask)
970 + udelay(10);
971 }
972
973 -/*
974 - * mv643xx_eth_tx_timeout_task
975 - *
976 - * Actual routine to reset the adapter when a timeout on Tx has occurred
977 - */
978 -static void mv643xx_eth_tx_timeout_task(struct work_struct *ugly)
979 +static void __txq_maybe_wake(struct tx_queue *txq)
980 {
981 - struct mv643xx_private *mp = container_of(ugly, struct mv643xx_private,
982 - tx_timeout_task);
983 - struct net_device *dev = mp->dev;
984 + struct mv643xx_eth_private *mp = txq_to_mp(txq);
985
986 - if (!netif_running(dev))
987 - return;
988 + /*
989 + * netif_{stop,wake}_queue() flow control only applies to
990 + * the primary queue.
991 + */
992 + BUG_ON(txq->index != mp->txq_primary);
993
994 - netif_stop_queue(dev);
995 + if (txq->tx_ring_size - txq->tx_desc_count >= MAX_DESCS_PER_SKB)
996 + netif_wake_queue(mp->dev);
997 +}
998
999 - eth_port_reset(mp);
1000 - eth_port_start(dev);
1001
1002 - if (mp->tx_ring_size - mp->tx_desc_count >= MAX_DESCS_PER_SKB)
1003 - netif_wake_queue(dev);
1004 -}
1005 +/* rx ***********************************************************************/
1006 +static void txq_reclaim(struct tx_queue *txq, int force);
1007
1008 -/**
1009 - * mv643xx_eth_free_tx_descs - Free the tx desc data for completed descriptors
1010 - *
1011 - * If force is non-zero, frees uncompleted descriptors as well
1012 - */
1013 -static int mv643xx_eth_free_tx_descs(struct net_device *dev, int force)
1014 +static void rxq_refill(struct rx_queue *rxq)
1015 {
1016 - struct mv643xx_private *mp = netdev_priv(dev);
1017 - struct eth_tx_desc *desc;
1018 - u32 cmd_sts;
1019 - struct sk_buff *skb;
1020 + struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
1021 unsigned long flags;
1022 - int tx_index;
1023 - dma_addr_t addr;
1024 - int count;
1025 - int released = 0;
1026 -
1027 - while (mp->tx_desc_count > 0) {
1028 - spin_lock_irqsave(&mp->lock, flags);
1029 -
1030 - /* tx_desc_count might have changed before acquiring the lock */
1031 - if (mp->tx_desc_count <= 0) {
1032 - spin_unlock_irqrestore(&mp->lock, flags);
1033 - return released;
1034 - }
1035 -
1036 - tx_index = mp->tx_used_desc_q;
1037 - desc = &mp->p_tx_desc_area[tx_index];
1038 - cmd_sts = desc->cmd_sts;
1039
1040 - if (!force && (cmd_sts & ETH_BUFFER_OWNED_BY_DMA)) {
1041 - spin_unlock_irqrestore(&mp->lock, flags);
1042 - return released;
1043 - }
1044 + spin_lock_irqsave(&mp->lock, flags);
1045
1046 - mp->tx_used_desc_q = (tx_index + 1) % mp->tx_ring_size;
1047 - mp->tx_desc_count--;
1048 + while (rxq->rx_desc_count < rxq->rx_ring_size) {
1049 + int skb_size;
1050 + struct sk_buff *skb;
1051 + int unaligned;
1052 + int rx;
1053
1054 - addr = desc->buf_ptr;
1055 - count = desc->byte_cnt;
1056 - skb = mp->tx_skb[tx_index];
1057 - if (skb)
1058 - mp->tx_skb[tx_index] = NULL;
1059 + /*
1060 + * Reserve 2+14 bytes for an ethernet header (the
1061 + * hardware automatically prepends 2 bytes of dummy
1062 + * data to each received packet), 4 bytes for a VLAN
1063 + * header, and 4 bytes for the trailing FCS -- 24
1064 + * bytes total.
1065 + */
1066 + skb_size = mp->dev->mtu + 24;
1067
1068 - if (cmd_sts & ETH_ERROR_SUMMARY) {
1069 - printk("%s: Error in TX\n", dev->name);
1070 - dev->stats.tx_errors++;
1071 - }
1072 + skb = dev_alloc_skb(skb_size + dma_get_cache_alignment() - 1);
1073 + if (skb == NULL)
1074 + break;
1075
1076 - spin_unlock_irqrestore(&mp->lock, flags);
1077 + unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1);
1078 + if (unaligned)
1079 + skb_reserve(skb, dma_get_cache_alignment() - unaligned);
1080
1081 - if (cmd_sts & ETH_TX_FIRST_DESC)
1082 - dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
1083 - else
1084 - dma_unmap_page(NULL, addr, count, DMA_TO_DEVICE);
1085 + rxq->rx_desc_count++;
1086 + rx = rxq->rx_used_desc;
1087 + rxq->rx_used_desc = (rx + 1) % rxq->rx_ring_size;
1088 +
1089 + rxq->rx_desc_area[rx].buf_ptr = dma_map_single(NULL, skb->data,
1090 + skb_size, DMA_FROM_DEVICE);
1091 + rxq->rx_desc_area[rx].buf_size = skb_size;
1092 + rxq->rx_skb[rx] = skb;
1093 + wmb();
1094 + rxq->rx_desc_area[rx].cmd_sts = BUFFER_OWNED_BY_DMA |
1095 + RX_ENABLE_INTERRUPT;
1096 + wmb();
1097
1098 - if (skb)
1099 - dev_kfree_skb_irq(skb);
1100 + /*
1101 + * The hardware automatically prepends 2 bytes of
1102 + * dummy data to each received packet, so that the
1103 + * IP header ends up 16-byte aligned.
1104 + */
1105 + skb_reserve(skb, 2);
1106 + }
1107
1108 - released = 1;
1109 + if (rxq->rx_desc_count != rxq->rx_ring_size) {
1110 + rxq->rx_oom.expires = jiffies + (HZ / 10);
1111 + add_timer(&rxq->rx_oom);
1112 }
1113
1114 - return released;
1115 + spin_unlock_irqrestore(&mp->lock, flags);
1116 }
1117
1118 -static void mv643xx_eth_free_completed_tx_descs(struct net_device *dev)
1119 +static inline void rxq_refill_timer_wrapper(unsigned long data)
1120 {
1121 - struct mv643xx_private *mp = netdev_priv(dev);
1122 -
1123 - if (mv643xx_eth_free_tx_descs(dev, 0) &&
1124 - mp->tx_ring_size - mp->tx_desc_count >= MAX_DESCS_PER_SKB)
1125 - netif_wake_queue(dev);
1126 + rxq_refill((struct rx_queue *)data);
1127 }
1128
1129 -static void mv643xx_eth_free_all_tx_descs(struct net_device *dev)
1130 +static int rxq_process(struct rx_queue *rxq, int budget)
1131 {
1132 - mv643xx_eth_free_tx_descs(dev, 1);
1133 -}
1134 + struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
1135 + struct net_device_stats *stats = &mp->dev->stats;
1136 + int rx;
1137
1138 -/*
1139 - * mv643xx_eth_receive
1140 - *
1141 - * This function is forward packets that are received from the port's
1142 - * queues toward kernel core or FastRoute them to another interface.
1143 - *
1144 - * Input : dev - a pointer to the required interface
1145 - * max - maximum number to receive (0 means unlimted)
1146 - *
1147 - * Output : number of served packets
1148 - */
1149 -static int mv643xx_eth_receive_queue(struct net_device *dev, int budget)
1150 -{
1151 - struct mv643xx_private *mp = netdev_priv(dev);
1152 - struct net_device_stats *stats = &dev->stats;
1153 - unsigned int received_packets = 0;
1154 - struct sk_buff *skb;
1155 - struct pkt_info pkt_info;
1156 -
1157 - while (budget-- > 0 && eth_port_receive(mp, &pkt_info) == ETH_OK) {
1158 - dma_unmap_single(NULL, pkt_info.buf_ptr, ETH_RX_SKB_SIZE,
1159 - DMA_FROM_DEVICE);
1160 - mp->rx_desc_count--;
1161 - received_packets++;
1162 + rx = 0;
1163 + while (rx < budget) {
1164 + struct rx_desc *rx_desc;
1165 + unsigned int cmd_sts;
1166 + struct sk_buff *skb;
1167 + unsigned long flags;
1168 +
1169 + spin_lock_irqsave(&mp->lock, flags);
1170 +
1171 + rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
1172 +
1173 + cmd_sts = rx_desc->cmd_sts;
1174 + if (cmd_sts & BUFFER_OWNED_BY_DMA) {
1175 + spin_unlock_irqrestore(&mp->lock, flags);
1176 + break;
1177 + }
1178 + rmb();
1179 +
1180 + skb = rxq->rx_skb[rxq->rx_curr_desc];
1181 + rxq->rx_skb[rxq->rx_curr_desc] = NULL;
1182 +
1183 + rxq->rx_curr_desc = (rxq->rx_curr_desc + 1) % rxq->rx_ring_size;
1184 +
1185 + spin_unlock_irqrestore(&mp->lock, flags);
1186 +
1187 + dma_unmap_single(NULL, rx_desc->buf_ptr + 2,
1188 + mp->dev->mtu + 24, DMA_FROM_DEVICE);
1189 + rxq->rx_desc_count--;
1190 + rx++;
1191
1192 /*
1193 * Update statistics.
1194 - * Note byte count includes 4 byte CRC count
1195 + *
1196 + * Note that the descriptor byte count includes 2 dummy
1197 + * bytes automatically inserted by the hardware at the
1198 + * start of the packet (which we don't count), and a 4
1199 + * byte CRC at the end of the packet (which we do count).
1200 */
1201 stats->rx_packets++;
1202 - stats->rx_bytes += pkt_info.byte_cnt;
1203 - skb = pkt_info.return_info;
1204 + stats->rx_bytes += rx_desc->byte_cnt - 2;
1205 +
1206 /*
1207 - * In case received a packet without first / last bits on OR
1208 - * the error summary bit is on, the packets needs to be dropeed.
1209 + * In case we received a packet without first / last bits
1210 + * on, or the error summary bit is set, the packet needs
1211 + * to be dropped.
1212 */
1213 - if (((pkt_info.cmd_sts
1214 - & (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) !=
1215 - (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC))
1216 - || (pkt_info.cmd_sts & ETH_ERROR_SUMMARY)) {
1217 + if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
1218 + (RX_FIRST_DESC | RX_LAST_DESC))
1219 + || (cmd_sts & ERROR_SUMMARY)) {
1220 stats->rx_dropped++;
1221 - if ((pkt_info.cmd_sts & (ETH_RX_FIRST_DESC |
1222 - ETH_RX_LAST_DESC)) !=
1223 - (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) {
1224 +
1225 + if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
1226 + (RX_FIRST_DESC | RX_LAST_DESC)) {
1227 if (net_ratelimit())
1228 - printk(KERN_ERR
1229 - "%s: Received packet spread "
1230 - "on multiple descriptors\n",
1231 - dev->name);
1232 + dev_printk(KERN_ERR, &mp->dev->dev,
1233 + "received packet spanning "
1234 + "multiple descriptors\n");
1235 }
1236 - if (pkt_info.cmd_sts & ETH_ERROR_SUMMARY)
1237 +
1238 + if (cmd_sts & ERROR_SUMMARY)
1239 stats->rx_errors++;
1240
1241 dev_kfree_skb_irq(skb);
1242 @@ -975,668 +576,120 @@
1243 * The -4 is for the CRC in the trailer of the
1244 * received packet
1245 */
1246 - skb_put(skb, pkt_info.byte_cnt - 4);
1247 + skb_put(skb, rx_desc->byte_cnt - 2 - 4);
1248
1249 - if (pkt_info.cmd_sts & ETH_LAYER_4_CHECKSUM_OK) {
1250 + if (cmd_sts & LAYER_4_CHECKSUM_OK) {
1251 skb->ip_summed = CHECKSUM_UNNECESSARY;
1252 skb->csum = htons(
1253 - (pkt_info.cmd_sts & 0x0007fff8) >> 3);
1254 + (cmd_sts & 0x0007fff8) >> 3);
1255 }
1256 - skb->protocol = eth_type_trans(skb, dev);
1257 -#ifdef MV643XX_NAPI
1258 + skb->protocol = eth_type_trans(skb, mp->dev);
1259 +#ifdef MV643XX_ETH_NAPI
1260 netif_receive_skb(skb);
1261 #else
1262 netif_rx(skb);
1263 #endif
1264 }
1265 - dev->last_rx = jiffies;
1266 +
1267 + mp->dev->last_rx = jiffies;
1268 }
1269 - mv643xx_eth_rx_refill_descs(dev); /* Fill RX ring with skb's */
1270
1271 - return received_packets;
1272 + rxq_refill(rxq);
1273 +
1274 + return rx;
1275 }
1276
1277 -/* Set the mv643xx port configuration register for the speed/duplex mode. */
1278 -static void mv643xx_eth_update_pscr(struct net_device *dev,
1279 - struct ethtool_cmd *ecmd)
1280 +#ifdef MV643XX_ETH_NAPI
1281 +static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
1282 {
1283 - struct mv643xx_private *mp = netdev_priv(dev);
1284 - int port_num = mp->port_num;
1285 - u32 o_pscr, n_pscr;
1286 - unsigned int queues;
1287 + struct mv643xx_eth_private *mp;
1288 + int rx;
1289 + int i;
1290
1291 - o_pscr = rdl(mp, PORT_SERIAL_CONTROL_REG(port_num));
1292 - n_pscr = o_pscr;
1293 + mp = container_of(napi, struct mv643xx_eth_private, napi);
1294
1295 - /* clear speed, duplex and rx buffer size fields */
1296 - n_pscr &= ~(SET_MII_SPEED_TO_100 |
1297 - SET_GMII_SPEED_TO_1000 |
1298 - SET_FULL_DUPLEX_MODE |
1299 - MAX_RX_PACKET_MASK);
1300 -
1301 - if (ecmd->duplex == DUPLEX_FULL)
1302 - n_pscr |= SET_FULL_DUPLEX_MODE;
1303 -
1304 - if (ecmd->speed == SPEED_1000)
1305 - n_pscr |= SET_GMII_SPEED_TO_1000 |
1306 - MAX_RX_PACKET_9700BYTE;
1307 - else {
1308 - if (ecmd->speed == SPEED_100)
1309 - n_pscr |= SET_MII_SPEED_TO_100;
1310 - n_pscr |= MAX_RX_PACKET_1522BYTE;
1311 - }
1312 -
1313 - if (n_pscr != o_pscr) {
1314 - if ((o_pscr & SERIAL_PORT_ENABLE) == 0)
1315 - wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), n_pscr);
1316 - else {
1317 - queues = mv643xx_eth_port_disable_tx(mp);
1318 +#ifdef MV643XX_ETH_TX_FAST_REFILL
1319 + if (++mp->tx_clean_threshold > 5) {
1320 + mp->tx_clean_threshold = 0;
1321 + for (i = 0; i < 8; i++)
1322 + if (mp->txq_mask & (1 << i))
1323 + txq_reclaim(mp->txq + i, 0);
1324 + }
1325 +#endif
1326
1327 - o_pscr &= ~SERIAL_PORT_ENABLE;
1328 - wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), o_pscr);
1329 - wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), n_pscr);
1330 - wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), n_pscr);
1331 - if (queues)
1332 - mv643xx_eth_port_enable_tx(mp, queues);
1333 - }
1334 + rx = 0;
1335 + for (i = 7; rx < budget && i >= 0; i--)
1336 + if (mp->rxq_mask & (1 << i))
1337 + rx += rxq_process(mp->rxq + i, budget - rx);
1338 +
1339 + if (rx < budget) {
1340 + netif_rx_complete(mp->dev, napi);
1341 + wrl(mp, INT_CAUSE(mp->port_num), 0);
1342 + wrl(mp, INT_CAUSE_EXT(mp->port_num), 0);
1343 + wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT);
1344 }
1345 +
1346 + return rx;
1347 }
1348 +#endif
1349
1350 -/*
1351 - * mv643xx_eth_int_handler
1352 - *
1353 - * Main interrupt handler for the gigbit ethernet ports
1354 - *
1355 - * Input : irq - irq number (not used)
1356 - * dev_id - a pointer to the required interface's data structure
1357 - * regs - not used
1358 - * Output : N/A
1359 - */
1360
1361 -static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
1362 +/* tx ***********************************************************************/
1363 +static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
1364 {
1365 - struct net_device *dev = (struct net_device *)dev_id;
1366 - struct mv643xx_private *mp = netdev_priv(dev);
1367 - u32 eth_int_cause, eth_int_cause_ext = 0;
1368 - unsigned int port_num = mp->port_num;
1369 -
1370 - /* Read interrupt cause registers */
1371 - eth_int_cause = rdl(mp, INTERRUPT_CAUSE_REG(port_num)) &
1372 - ETH_INT_UNMASK_ALL;
1373 - if (eth_int_cause & ETH_INT_CAUSE_EXT) {
1374 - eth_int_cause_ext = rdl(mp,
1375 - INTERRUPT_CAUSE_EXTEND_REG(port_num)) &
1376 - ETH_INT_UNMASK_ALL_EXT;
1377 - wrl(mp, INTERRUPT_CAUSE_EXTEND_REG(port_num),
1378 - ~eth_int_cause_ext);
1379 - }
1380 -
1381 - /* PHY status changed */
1382 - if (eth_int_cause_ext & (ETH_INT_CAUSE_PHY | ETH_INT_CAUSE_STATE)) {
1383 - struct ethtool_cmd cmd;
1384 + int frag;
1385
1386 - if (mii_link_ok(&mp->mii)) {
1387 - mii_ethtool_gset(&mp->mii, &cmd);
1388 - mv643xx_eth_update_pscr(dev, &cmd);
1389 - mv643xx_eth_port_enable_tx(mp, ETH_TX_QUEUES_ENABLED);
1390 - if (!netif_carrier_ok(dev)) {
1391 - netif_carrier_on(dev);
1392 - if (mp->tx_ring_size - mp->tx_desc_count >=
1393 - MAX_DESCS_PER_SKB)
1394 - netif_wake_queue(dev);
1395 - }
1396 - } else if (netif_carrier_ok(dev)) {
1397 - netif_stop_queue(dev);
1398 - netif_carrier_off(dev);
1399 - }
1400 + for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
1401 + skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
1402 + if (fragp->size <= 8 && fragp->page_offset & 7)
1403 + return 1;
1404 }
1405
1406 -#ifdef MV643XX_NAPI
1407 - if (eth_int_cause & ETH_INT_CAUSE_RX) {
1408 - /* schedule the NAPI poll routine to maintain port */
1409 - wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL);
1410 + return 0;
1411 +}
1412
1413 - /* wait for previous write to complete */
1414 - rdl(mp, INTERRUPT_MASK_REG(port_num));
1415 +static int txq_alloc_desc_index(struct tx_queue *txq)
1416 +{
1417 + int tx_desc_curr;
1418
1419 - netif_rx_schedule(dev, &mp->napi);
1420 - }
1421 -#else
1422 - if (eth_int_cause & ETH_INT_CAUSE_RX)
1423 - mv643xx_eth_receive_queue(dev, INT_MAX);
1424 -#endif
1425 - if (eth_int_cause_ext & ETH_INT_CAUSE_TX)
1426 - mv643xx_eth_free_completed_tx_descs(dev);
1427 + BUG_ON(txq->tx_desc_count >= txq->tx_ring_size);
1428
1429 - /*
1430 - * If no real interrupt occured, exit.
1431 - * This can happen when using gigE interrupt coalescing mechanism.
1432 - */
1433 - if ((eth_int_cause == 0x0) && (eth_int_cause_ext == 0x0))
1434 - return IRQ_NONE;
1435 + tx_desc_curr = txq->tx_curr_desc;
1436 + txq->tx_curr_desc = (tx_desc_curr + 1) % txq->tx_ring_size;
1437
1438 - return IRQ_HANDLED;
1439 -}
1440 -
1441 -#ifdef MV643XX_COAL
1442 -
1443 -/*
1444 - * eth_port_set_rx_coal - Sets coalescing interrupt mechanism on RX path
1445 - *
1446 - * DESCRIPTION:
1447 - * This routine sets the RX coalescing interrupt mechanism parameter.
1448 - * This parameter is a timeout counter, that counts in 64 t_clk
1449 - * chunks ; that when timeout event occurs a maskable interrupt
1450 - * occurs.
1451 - * The parameter is calculated using the tClk of the MV-643xx chip
1452 - * , and the required delay of the interrupt in usec.
1453 - *
1454 - * INPUT:
1455 - * struct mv643xx_private *mp Ethernet port
1456 - * unsigned int delay Delay in usec
1457 - *
1458 - * OUTPUT:
1459 - * Interrupt coalescing mechanism value is set in MV-643xx chip.
1460 - *
1461 - * RETURN:
1462 - * The interrupt coalescing value set in the gigE port.
1463 - *
1464 - */
1465 -static unsigned int eth_port_set_rx_coal(struct mv643xx_private *mp,
1466 - unsigned int delay)
1467 -{
1468 - unsigned int port_num = mp->port_num;
1469 - unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
1470 -
1471 - /* Set RX Coalescing mechanism */
1472 - wrl(mp, SDMA_CONFIG_REG(port_num),
1473 - ((coal & 0x3fff) << 8) |
1474 - (rdl(mp, SDMA_CONFIG_REG(port_num))
1475 - & 0xffc000ff));
1476 -
1477 - return coal;
1478 -}
1479 -#endif
1480 -
1481 -/*
1482 - * eth_port_set_tx_coal - Sets coalescing interrupt mechanism on TX path
1483 - *
1484 - * DESCRIPTION:
1485 - * This routine sets the TX coalescing interrupt mechanism parameter.
1486 - * This parameter is a timeout counter, that counts in 64 t_clk
1487 - * chunks ; that when timeout event occurs a maskable interrupt
1488 - * occurs.
1489 - * The parameter is calculated using the t_cLK frequency of the
1490 - * MV-643xx chip and the required delay in the interrupt in uSec
1491 - *
1492 - * INPUT:
1493 - * struct mv643xx_private *mp Ethernet port
1494 - * unsigned int delay Delay in uSeconds
1495 - *
1496 - * OUTPUT:
1497 - * Interrupt coalescing mechanism value is set in MV-643xx chip.
1498 - *
1499 - * RETURN:
1500 - * The interrupt coalescing value set in the gigE port.
1501 - *
1502 - */
1503 -static unsigned int eth_port_set_tx_coal(struct mv643xx_private *mp,
1504 - unsigned int delay)
1505 -{
1506 - unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
1507 -
1508 - /* Set TX Coalescing mechanism */
1509 - wrl(mp, TX_FIFO_URGENT_THRESHOLD_REG(mp->port_num), coal << 4);
1510 -
1511 - return coal;
1512 -}
1513 -
1514 -/*
1515 - * ether_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
1516 - *
1517 - * DESCRIPTION:
1518 - * This function prepares a Rx chained list of descriptors and packet
1519 - * buffers in a form of a ring. The routine must be called after port
1520 - * initialization routine and before port start routine.
1521 - * The Ethernet SDMA engine uses CPU bus addresses to access the various
1522 - * devices in the system (i.e. DRAM). This function uses the ethernet
1523 - * struct 'virtual to physical' routine (set by the user) to set the ring
1524 - * with physical addresses.
1525 - *
1526 - * INPUT:
1527 - * struct mv643xx_private *mp Ethernet Port Control srtuct.
1528 - *
1529 - * OUTPUT:
1530 - * The routine updates the Ethernet port control struct with information
1531 - * regarding the Rx descriptors and buffers.
1532 - *
1533 - * RETURN:
1534 - * None.
1535 - */
1536 -static void ether_init_rx_desc_ring(struct mv643xx_private *mp)
1537 -{
1538 - volatile struct eth_rx_desc *p_rx_desc;
1539 - int rx_desc_num = mp->rx_ring_size;
1540 - int i;
1541 -
1542 - /* initialize the next_desc_ptr links in the Rx descriptors ring */
1543 - p_rx_desc = (struct eth_rx_desc *)mp->p_rx_desc_area;
1544 - for (i = 0; i < rx_desc_num; i++) {
1545 - p_rx_desc[i].next_desc_ptr = mp->rx_desc_dma +
1546 - ((i + 1) % rx_desc_num) * sizeof(struct eth_rx_desc);
1547 - }
1548 -
1549 - /* Save Rx desc pointer to driver struct. */
1550 - mp->rx_curr_desc_q = 0;
1551 - mp->rx_used_desc_q = 0;
1552 -
1553 - mp->rx_desc_area_size = rx_desc_num * sizeof(struct eth_rx_desc);
1554 -}
1555 -
1556 -/*
1557 - * ether_init_tx_desc_ring - Curve a Tx chain desc list and buffer in memory.
1558 - *
1559 - * DESCRIPTION:
1560 - * This function prepares a Tx chained list of descriptors and packet
1561 - * buffers in a form of a ring. The routine must be called after port
1562 - * initialization routine and before port start routine.
1563 - * The Ethernet SDMA engine uses CPU bus addresses to access the various
1564 - * devices in the system (i.e. DRAM). This function uses the ethernet
1565 - * struct 'virtual to physical' routine (set by the user) to set the ring
1566 - * with physical addresses.
1567 - *
1568 - * INPUT:
1569 - * struct mv643xx_private *mp Ethernet Port Control srtuct.
1570 - *
1571 - * OUTPUT:
1572 - * The routine updates the Ethernet port control struct with information
1573 - * regarding the Tx descriptors and buffers.
1574 - *
1575 - * RETURN:
1576 - * None.
1577 - */
1578 -static void ether_init_tx_desc_ring(struct mv643xx_private *mp)
1579 -{
1580 - int tx_desc_num = mp->tx_ring_size;
1581 - struct eth_tx_desc *p_tx_desc;
1582 - int i;
1583 -
1584 - /* Initialize the next_desc_ptr links in the Tx descriptors ring */
1585 - p_tx_desc = (struct eth_tx_desc *)mp->p_tx_desc_area;
1586 - for (i = 0; i < tx_desc_num; i++) {
1587 - p_tx_desc[i].next_desc_ptr = mp->tx_desc_dma +
1588 - ((i + 1) % tx_desc_num) * sizeof(struct eth_tx_desc);
1589 - }
1590 -
1591 - mp->tx_curr_desc_q = 0;
1592 - mp->tx_used_desc_q = 0;
1593 -
1594 - mp->tx_desc_area_size = tx_desc_num * sizeof(struct eth_tx_desc);
1595 -}
1596 -
1597 -static int mv643xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1598 -{
1599 - struct mv643xx_private *mp = netdev_priv(dev);
1600 - int err;
1601 -
1602 - spin_lock_irq(&mp->lock);
1603 - err = mii_ethtool_sset(&mp->mii, cmd);
1604 - spin_unlock_irq(&mp->lock);
1605 -
1606 - return err;
1607 -}
1608 -
1609 -static int mv643xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1610 -{
1611 - struct mv643xx_private *mp = netdev_priv(dev);
1612 - int err;
1613 -
1614 - spin_lock_irq(&mp->lock);
1615 - err = mii_ethtool_gset(&mp->mii, cmd);
1616 - spin_unlock_irq(&mp->lock);
1617 -
1618 - /* The PHY may support 1000baseT_Half, but the mv643xx does not */
1619 - cmd->supported &= ~SUPPORTED_1000baseT_Half;
1620 - cmd->advertising &= ~ADVERTISED_1000baseT_Half;
1621 -
1622 - return err;
1623 -}
1624 -
1625 -/*
1626 - * mv643xx_eth_open
1627 - *
1628 - * This function is called when openning the network device. The function
1629 - * should initialize all the hardware, initialize cyclic Rx/Tx
1630 - * descriptors chain and buffers and allocate an IRQ to the network
1631 - * device.
1632 - *
1633 - * Input : a pointer to the network device structure
1634 - *
1635 - * Output : zero of success , nonzero if fails.
1636 - */
1637 -
1638 -static int mv643xx_eth_open(struct net_device *dev)
1639 -{
1640 - struct mv643xx_private *mp = netdev_priv(dev);
1641 - unsigned int port_num = mp->port_num;
1642 - unsigned int size;
1643 - int err;
1644 -
1645 - /* Clear any pending ethernet port interrupts */
1646 - wrl(mp, INTERRUPT_CAUSE_REG(port_num), 0);
1647 - wrl(mp, INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
1648 - /* wait for previous write to complete */
1649 - rdl(mp, INTERRUPT_CAUSE_EXTEND_REG(port_num));
1650 -
1651 - err = request_irq(dev->irq, mv643xx_eth_int_handler,
1652 - IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
1653 - if (err) {
1654 - printk(KERN_ERR "%s: Can not assign IRQ\n", dev->name);
1655 - return -EAGAIN;
1656 - }
1657 -
1658 - eth_port_init(mp);
1659 -
1660 - memset(&mp->timeout, 0, sizeof(struct timer_list));
1661 - mp->timeout.function = mv643xx_eth_rx_refill_descs_timer_wrapper;
1662 - mp->timeout.data = (unsigned long)dev;
1663 -
1664 - /* Allocate RX and TX skb rings */
1665 - mp->rx_skb = kmalloc(sizeof(*mp->rx_skb) * mp->rx_ring_size,
1666 - GFP_KERNEL);
1667 - if (!mp->rx_skb) {
1668 - printk(KERN_ERR "%s: Cannot allocate Rx skb ring\n", dev->name);
1669 - err = -ENOMEM;
1670 - goto out_free_irq;
1671 - }
1672 - mp->tx_skb = kmalloc(sizeof(*mp->tx_skb) * mp->tx_ring_size,
1673 - GFP_KERNEL);
1674 - if (!mp->tx_skb) {
1675 - printk(KERN_ERR "%s: Cannot allocate Tx skb ring\n", dev->name);
1676 - err = -ENOMEM;
1677 - goto out_free_rx_skb;
1678 - }
1679 -
1680 - /* Allocate TX ring */
1681 - mp->tx_desc_count = 0;
1682 - size = mp->tx_ring_size * sizeof(struct eth_tx_desc);
1683 - mp->tx_desc_area_size = size;
1684 -
1685 - if (mp->tx_sram_size) {
1686 - mp->p_tx_desc_area = ioremap(mp->tx_sram_addr,
1687 - mp->tx_sram_size);
1688 - mp->tx_desc_dma = mp->tx_sram_addr;
1689 - } else
1690 - mp->p_tx_desc_area = dma_alloc_coherent(NULL, size,
1691 - &mp->tx_desc_dma,
1692 - GFP_KERNEL);
1693 -
1694 - if (!mp->p_tx_desc_area) {
1695 - printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
1696 - dev->name, size);
1697 - err = -ENOMEM;
1698 - goto out_free_tx_skb;
1699 - }
1700 - BUG_ON((u32) mp->p_tx_desc_area & 0xf); /* check 16-byte alignment */
1701 - memset((void *)mp->p_tx_desc_area, 0, mp->tx_desc_area_size);
1702 -
1703 - ether_init_tx_desc_ring(mp);
1704 -
1705 - /* Allocate RX ring */
1706 - mp->rx_desc_count = 0;
1707 - size = mp->rx_ring_size * sizeof(struct eth_rx_desc);
1708 - mp->rx_desc_area_size = size;
1709 -
1710 - if (mp->rx_sram_size) {
1711 - mp->p_rx_desc_area = ioremap(mp->rx_sram_addr,
1712 - mp->rx_sram_size);
1713 - mp->rx_desc_dma = mp->rx_sram_addr;
1714 - } else
1715 - mp->p_rx_desc_area = dma_alloc_coherent(NULL, size,
1716 - &mp->rx_desc_dma,
1717 - GFP_KERNEL);
1718 -
1719 - if (!mp->p_rx_desc_area) {
1720 - printk(KERN_ERR "%s: Cannot allocate Rx ring (size %d bytes)\n",
1721 - dev->name, size);
1722 - printk(KERN_ERR "%s: Freeing previously allocated TX queues...",
1723 - dev->name);
1724 - if (mp->rx_sram_size)
1725 - iounmap(mp->p_tx_desc_area);
1726 - else
1727 - dma_free_coherent(NULL, mp->tx_desc_area_size,
1728 - mp->p_tx_desc_area, mp->tx_desc_dma);
1729 - err = -ENOMEM;
1730 - goto out_free_tx_skb;
1731 - }
1732 - memset((void *)mp->p_rx_desc_area, 0, size);
1733 -
1734 - ether_init_rx_desc_ring(mp);
1735 -
1736 - mv643xx_eth_rx_refill_descs(dev); /* Fill RX ring with skb's */
1737 -
1738 -#ifdef MV643XX_NAPI
1739 - napi_enable(&mp->napi);
1740 -#endif
1741 -
1742 - eth_port_start(dev);
1743 -
1744 - /* Interrupt Coalescing */
1745 -
1746 -#ifdef MV643XX_COAL
1747 - mp->rx_int_coal =
1748 - eth_port_set_rx_coal(mp, MV643XX_RX_COAL);
1749 -#endif
1750 -
1751 - mp->tx_int_coal =
1752 - eth_port_set_tx_coal(mp, MV643XX_TX_COAL);
1753 -
1754 - /* Unmask phy and link status changes interrupts */
1755 - wrl(mp, INTERRUPT_EXTEND_MASK_REG(port_num), ETH_INT_UNMASK_ALL_EXT);
1756 -
1757 - /* Unmask RX buffer and TX end interrupt */
1758 - wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL);
1759 -
1760 - return 0;
1761 -
1762 -out_free_tx_skb:
1763 - kfree(mp->tx_skb);
1764 -out_free_rx_skb:
1765 - kfree(mp->rx_skb);
1766 -out_free_irq:
1767 - free_irq(dev->irq, dev);
1768 -
1769 - return err;
1770 -}
1771 -
1772 -static void mv643xx_eth_free_tx_rings(struct net_device *dev)
1773 -{
1774 - struct mv643xx_private *mp = netdev_priv(dev);
1775 -
1776 - /* Stop Tx Queues */
1777 - mv643xx_eth_port_disable_tx(mp);
1778 -
1779 - /* Free outstanding skb's on TX ring */
1780 - mv643xx_eth_free_all_tx_descs(dev);
1781 -
1782 - BUG_ON(mp->tx_used_desc_q != mp->tx_curr_desc_q);
1783 -
1784 - /* Free TX ring */
1785 - if (mp->tx_sram_size)
1786 - iounmap(mp->p_tx_desc_area);
1787 - else
1788 - dma_free_coherent(NULL, mp->tx_desc_area_size,
1789 - mp->p_tx_desc_area, mp->tx_desc_dma);
1790 -}
1791 -
1792 -static void mv643xx_eth_free_rx_rings(struct net_device *dev)
1793 -{
1794 - struct mv643xx_private *mp = netdev_priv(dev);
1795 - int curr;
1796 -
1797 - /* Stop RX Queues */
1798 - mv643xx_eth_port_disable_rx(mp);
1799 -
1800 - /* Free preallocated skb's on RX rings */
1801 - for (curr = 0; mp->rx_desc_count && curr < mp->rx_ring_size; curr++) {
1802 - if (mp->rx_skb[curr]) {
1803 - dev_kfree_skb(mp->rx_skb[curr]);
1804 - mp->rx_desc_count--;
1805 - }
1806 - }
1807 -
1808 - if (mp->rx_desc_count)
1809 - printk(KERN_ERR
1810 - "%s: Error in freeing Rx Ring. %d skb's still"
1811 - " stuck in RX Ring - ignoring them\n", dev->name,
1812 - mp->rx_desc_count);
1813 - /* Free RX ring */
1814 - if (mp->rx_sram_size)
1815 - iounmap(mp->p_rx_desc_area);
1816 - else
1817 - dma_free_coherent(NULL, mp->rx_desc_area_size,
1818 - mp->p_rx_desc_area, mp->rx_desc_dma);
1819 -}
1820 -
1821 -/*
1822 - * mv643xx_eth_stop
1823 - *
1824 - * This function is used when closing the network device.
1825 - * It updates the hardware,
1826 - * release all memory that holds buffers and descriptors and release the IRQ.
1827 - * Input : a pointer to the device structure
1828 - * Output : zero if success , nonzero if fails
1829 - */
1830 -
1831 -static int mv643xx_eth_stop(struct net_device *dev)
1832 -{
1833 - struct mv643xx_private *mp = netdev_priv(dev);
1834 - unsigned int port_num = mp->port_num;
1835 -
1836 - /* Mask all interrupts on ethernet port */
1837 - wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL);
1838 - /* wait for previous write to complete */
1839 - rdl(mp, INTERRUPT_MASK_REG(port_num));
1840 -
1841 -#ifdef MV643XX_NAPI
1842 - napi_disable(&mp->napi);
1843 -#endif
1844 - netif_carrier_off(dev);
1845 - netif_stop_queue(dev);
1846 -
1847 - eth_port_reset(mp);
1848 -
1849 - mv643xx_eth_free_tx_rings(dev);
1850 - mv643xx_eth_free_rx_rings(dev);
1851 -
1852 - free_irq(dev->irq, dev);
1853 -
1854 - return 0;
1855 -}
1856 -
1857 -#ifdef MV643XX_NAPI
1858 -/*
1859 - * mv643xx_poll
1860 - *
1861 - * This function is used in case of NAPI
1862 - */
1863 -static int mv643xx_poll(struct napi_struct *napi, int budget)
1864 -{
1865 - struct mv643xx_private *mp = container_of(napi, struct mv643xx_private, napi);
1866 - struct net_device *dev = mp->dev;
1867 - unsigned int port_num = mp->port_num;
1868 - int work_done;
1869 -
1870 -#ifdef MV643XX_TX_FAST_REFILL
1871 - if (++mp->tx_clean_threshold > 5) {
1872 - mv643xx_eth_free_completed_tx_descs(dev);
1873 - mp->tx_clean_threshold = 0;
1874 - }
1875 -#endif
1876 -
1877 - work_done = 0;
1878 - if ((rdl(mp, RX_CURRENT_QUEUE_DESC_PTR_0(port_num)))
1879 - != (u32) mp->rx_used_desc_q)
1880 - work_done = mv643xx_eth_receive_queue(dev, budget);
1881 -
1882 - if (work_done < budget) {
1883 - netif_rx_complete(dev, napi);
1884 - wrl(mp, INTERRUPT_CAUSE_REG(port_num), 0);
1885 - wrl(mp, INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
1886 - wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL);
1887 - }
1888 -
1889 - return work_done;
1890 -}
1891 -#endif
1892 -
1893 -/**
1894 - * has_tiny_unaligned_frags - check if skb has any small, unaligned fragments
1895 - *
1896 - * Hardware can't handle unaligned fragments smaller than 9 bytes.
1897 - * This helper function detects that case.
1898 - */
1899 -
1900 -static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
1901 -{
1902 - unsigned int frag;
1903 - skb_frag_t *fragp;
1904 -
1905 - for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
1906 - fragp = &skb_shinfo(skb)->frags[frag];
1907 - if (fragp->size <= 8 && fragp->page_offset & 0x7)
1908 - return 1;
1909 - }
1910 - return 0;
1911 -}
1912 -
1913 -/**
1914 - * eth_alloc_tx_desc_index - return the index of the next available tx desc
1915 - */
1916 -static int eth_alloc_tx_desc_index(struct mv643xx_private *mp)
1917 -{
1918 - int tx_desc_curr;
1919 -
1920 - BUG_ON(mp->tx_desc_count >= mp->tx_ring_size);
1921 -
1922 - tx_desc_curr = mp->tx_curr_desc_q;
1923 - mp->tx_curr_desc_q = (tx_desc_curr + 1) % mp->tx_ring_size;
1924 -
1925 - BUG_ON(mp->tx_curr_desc_q == mp->tx_used_desc_q);
1926 + BUG_ON(txq->tx_curr_desc == txq->tx_used_desc);
1927
1928 return tx_desc_curr;
1929 }
1930
1931 -/**
1932 - * eth_tx_fill_frag_descs - fill tx hw descriptors for an skb's fragments.
1933 - *
1934 - * Ensure the data for each fragment to be transmitted is mapped properly,
1935 - * then fill in descriptors in the tx hw queue.
1936 - */
1937 -static void eth_tx_fill_frag_descs(struct mv643xx_private *mp,
1938 - struct sk_buff *skb)
1939 +static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
1940 {
1941 + int nr_frags = skb_shinfo(skb)->nr_frags;
1942 int frag;
1943 - int tx_index;
1944 - struct eth_tx_desc *desc;
1945 -
1946 - for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
1947 - skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
1948
1949 - tx_index = eth_alloc_tx_desc_index(mp);
1950 - desc = &mp->p_tx_desc_area[tx_index];
1951 + for (frag = 0; frag < nr_frags; frag++) {
1952 + skb_frag_t *this_frag;
1953 + int tx_index;
1954 + struct tx_desc *desc;
1955 +
1956 + this_frag = &skb_shinfo(skb)->frags[frag];
1957 + tx_index = txq_alloc_desc_index(txq);
1958 + desc = &txq->tx_desc_area[tx_index];
1959
1960 - desc->cmd_sts = ETH_BUFFER_OWNED_BY_DMA;
1961 - /* Last Frag enables interrupt and frees the skb */
1962 - if (frag == (skb_shinfo(skb)->nr_frags - 1)) {
1963 - desc->cmd_sts |= ETH_ZERO_PADDING |
1964 - ETH_TX_LAST_DESC |
1965 - ETH_TX_ENABLE_INTERRUPT;
1966 - mp->tx_skb[tx_index] = skb;
1967 - } else
1968 - mp->tx_skb[tx_index] = NULL;
1969 + /*
1970 + * The last fragment will generate an interrupt
1971 + * which will free the skb on TX completion.
1972 + */
1973 + if (frag == nr_frags - 1) {
1974 + desc->cmd_sts = BUFFER_OWNED_BY_DMA |
1975 + ZERO_PADDING | TX_LAST_DESC |
1976 + TX_ENABLE_INTERRUPT;
1977 + txq->tx_skb[tx_index] = skb;
1978 + } else {
1979 + desc->cmd_sts = BUFFER_OWNED_BY_DMA;
1980 + txq->tx_skb[tx_index] = NULL;
1981 + }
1982
1983 - desc = &mp->p_tx_desc_area[tx_index];
1984 desc->l4i_chk = 0;
1985 desc->byte_cnt = this_frag->size;
1986 desc->buf_ptr = dma_map_page(NULL, this_frag->page,
1987 @@ -1651,37 +704,28 @@
1988 return (__force __be16)sum;
1989 }
1990
1991 -/**
1992 - * eth_tx_submit_descs_for_skb - submit data from an skb to the tx hw
1993 - *
1994 - * Ensure the data for an skb to be transmitted is mapped properly,
1995 - * then fill in descriptors in the tx hw queue and start the hardware.
1996 - */
1997 -static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp,
1998 - struct sk_buff *skb)
1999 +static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
2000 {
2001 + int nr_frags = skb_shinfo(skb)->nr_frags;
2002 int tx_index;
2003 - struct eth_tx_desc *desc;
2004 + struct tx_desc *desc;
2005 u32 cmd_sts;
2006 int length;
2007 - int nr_frags = skb_shinfo(skb)->nr_frags;
2008
2009 - cmd_sts = ETH_TX_FIRST_DESC | ETH_GEN_CRC | ETH_BUFFER_OWNED_BY_DMA;
2010 + cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
2011
2012 - tx_index = eth_alloc_tx_desc_index(mp);
2013 - desc = &mp->p_tx_desc_area[tx_index];
2014 + tx_index = txq_alloc_desc_index(txq);
2015 + desc = &txq->tx_desc_area[tx_index];
2016
2017 if (nr_frags) {
2018 - eth_tx_fill_frag_descs(mp, skb);
2019 + txq_submit_frag_skb(txq, skb);
2020
2021 length = skb_headlen(skb);
2022 - mp->tx_skb[tx_index] = NULL;
2023 + txq->tx_skb[tx_index] = NULL;
2024 } else {
2025 - cmd_sts |= ETH_ZERO_PADDING |
2026 - ETH_TX_LAST_DESC |
2027 - ETH_TX_ENABLE_INTERRUPT;
2028 + cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
2029 length = skb->len;
2030 - mp->tx_skb[tx_index] = skb;
2031 + txq->tx_skb[tx_index] = skb;
2032 }
2033
2034 desc->byte_cnt = length;
2035 @@ -1690,13 +734,13 @@
2036 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2037 BUG_ON(skb->protocol != htons(ETH_P_IP));
2038
2039 - cmd_sts |= ETH_GEN_TCP_UDP_CHECKSUM |
2040 - ETH_GEN_IP_V_4_CHECKSUM |
2041 - ip_hdr(skb)->ihl << ETH_TX_IHL_SHIFT;
2042 + cmd_sts |= GEN_TCP_UDP_CHECKSUM |
2043 + GEN_IP_V4_CHECKSUM |
2044 + ip_hdr(skb)->ihl << TX_IHL_SHIFT;
2045
2046 switch (ip_hdr(skb)->protocol) {
2047 case IPPROTO_UDP:
2048 - cmd_sts |= ETH_UDP_FRAME;
2049 + cmd_sts |= UDP_FRAME;
2050 desc->l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
2051 break;
2052 case IPPROTO_TCP:
2053 @@ -1707,7 +751,7 @@
2054 }
2055 } else {
2056 /* Errata BTS #50, IHL must be 5 if no HW checksum */
2057 - cmd_sts |= 5 << ETH_TX_IHL_SHIFT;
2058 + cmd_sts |= 5 << TX_IHL_SHIFT;
2059 desc->l4i_chk = 0;
2060 }
2061
2062 @@ -1717,1649 +761,1818 @@
2063
2064 /* ensure all descriptors are written before poking hardware */
2065 wmb();
2066 - mv643xx_eth_port_enable_tx(mp, ETH_TX_QUEUES_ENABLED);
2067 + txq_enable(txq);
2068
2069 - mp->tx_desc_count += nr_frags + 1;
2070 + txq->tx_desc_count += nr_frags + 1;
2071 }
2072
2073 -/**
2074 - * mv643xx_eth_start_xmit - queue an skb to the hardware for transmission
2075 - *
2076 - */
2077 -static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
2078 +static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
2079 {
2080 - struct mv643xx_private *mp = netdev_priv(dev);
2081 + struct mv643xx_eth_private *mp = netdev_priv(dev);
2082 struct net_device_stats *stats = &dev->stats;
2083 + struct tx_queue *txq;
2084 unsigned long flags;
2085
2086 - BUG_ON(netif_queue_stopped(dev));
2087 -
2088 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
2089 stats->tx_dropped++;
2090 - printk(KERN_DEBUG "%s: failed to linearize tiny "
2091 - "unaligned fragment\n", dev->name);
2092 + dev_printk(KERN_DEBUG, &dev->dev,
2093 + "failed to linearize skb with tiny "
2094 + "unaligned fragment\n");
2095 return NETDEV_TX_BUSY;
2096 }
2097
2098 spin_lock_irqsave(&mp->lock, flags);
2099
2100 - if (mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB) {
2101 - printk(KERN_ERR "%s: transmit with queue full\n", dev->name);
2102 - netif_stop_queue(dev);
2103 + txq = mp->txq + mp->txq_primary;
2104 +
2105 + if (txq->tx_ring_size - txq->tx_desc_count < MAX_DESCS_PER_SKB) {
2106 spin_unlock_irqrestore(&mp->lock, flags);
2107 - return NETDEV_TX_BUSY;
2108 + if (txq->index == mp->txq_primary && net_ratelimit())
2109 + dev_printk(KERN_ERR, &dev->dev,
2110 + "primary tx queue full?!\n");
2111 + kfree_skb(skb);
2112 + return NETDEV_TX_OK;
2113 }
2114
2115 - eth_tx_submit_descs_for_skb(mp, skb);
2116 + txq_submit_skb(txq, skb);
2117 stats->tx_bytes += skb->len;
2118 stats->tx_packets++;
2119 dev->trans_start = jiffies;
2120
2121 - if (mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB)
2122 - netif_stop_queue(dev);
2123 + if (txq->index == mp->txq_primary) {
2124 + int entries_left;
2125 +
2126 + entries_left = txq->tx_ring_size - txq->tx_desc_count;
2127 + if (entries_left < MAX_DESCS_PER_SKB)
2128 + netif_stop_queue(dev);
2129 + }
2130
2131 spin_unlock_irqrestore(&mp->lock, flags);
2132
2133 return NETDEV_TX_OK;
2134 }
2135
2136 -#ifdef CONFIG_NET_POLL_CONTROLLER
2137 -static void mv643xx_netpoll(struct net_device *netdev)
2138 +
2139 +/* tx rate control **********************************************************/
2140 +/*
2141 + * Set total maximum TX rate (shared by all TX queues for this port)
2142 + * to 'rate' bits per second, with a maximum burst of 'burst' bytes.
2143 + */
2144 +static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
2145 {
2146 - struct mv643xx_private *mp = netdev_priv(netdev);
2147 - int port_num = mp->port_num;
2148 + int token_rate;
2149 + int mtu;
2150 + int bucket_size;
2151 +
2152 + token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
2153 + if (token_rate > 1023)
2154 + token_rate = 1023;
2155
2156 - wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL);
2157 - /* wait for previous write to complete */
2158 - rdl(mp, INTERRUPT_MASK_REG(port_num));
2159 + mtu = (mp->dev->mtu + 255) >> 8;
2160 + if (mtu > 63)
2161 + mtu = 63;
2162
2163 - mv643xx_eth_int_handler(netdev->irq, netdev);
2164 + bucket_size = (burst + 255) >> 8;
2165 + if (bucket_size > 65535)
2166 + bucket_size = 65535;
2167
2168 - wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL);
2169 + if (mp->shared->tx_bw_control_moved) {
2170 + wrl(mp, TX_BW_RATE_MOVED(mp->port_num), token_rate);
2171 + wrl(mp, TX_BW_MTU_MOVED(mp->port_num), mtu);
2172 + wrl(mp, TX_BW_BURST_MOVED(mp->port_num), bucket_size);
2173 + } else {
2174 + wrl(mp, TX_BW_RATE(mp->port_num), token_rate);
2175 + wrl(mp, TX_BW_MTU(mp->port_num), mtu);
2176 + wrl(mp, TX_BW_BURST(mp->port_num), bucket_size);
2177 + }
2178 }
2179 -#endif
2180
2181 -static void mv643xx_init_ethtool_cmd(struct net_device *dev, int phy_address,
2182 - int speed, int duplex,
2183 - struct ethtool_cmd *cmd)
2184 +static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
2185 {
2186 - struct mv643xx_private *mp = netdev_priv(dev);
2187 + struct mv643xx_eth_private *mp = txq_to_mp(txq);
2188 + int token_rate;
2189 + int bucket_size;
2190
2191 - memset(cmd, 0, sizeof(*cmd));
2192 + token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
2193 + if (token_rate > 1023)
2194 + token_rate = 1023;
2195
2196 - cmd->port = PORT_MII;
2197 - cmd->transceiver = XCVR_INTERNAL;
2198 - cmd->phy_address = phy_address;
2199 + bucket_size = (burst + 255) >> 8;
2200 + if (bucket_size > 65535)
2201 + bucket_size = 65535;
2202
2203 - if (speed == 0) {
2204 - cmd->autoneg = AUTONEG_ENABLE;
2205 - /* mii lib checks, but doesn't use speed on AUTONEG_ENABLE */
2206 - cmd->speed = SPEED_100;
2207 - cmd->advertising = ADVERTISED_10baseT_Half |
2208 - ADVERTISED_10baseT_Full |
2209 - ADVERTISED_100baseT_Half |
2210 - ADVERTISED_100baseT_Full;
2211 - if (mp->mii.supports_gmii)
2212 - cmd->advertising |= ADVERTISED_1000baseT_Full;
2213 - } else {
2214 - cmd->autoneg = AUTONEG_DISABLE;
2215 - cmd->speed = speed;
2216 - cmd->duplex = duplex;
2217 - }
2218 + wrl(mp, TXQ_BW_TOKENS(mp->port_num, txq->index), token_rate << 14);
2219 + wrl(mp, TXQ_BW_CONF(mp->port_num, txq->index),
2220 + (bucket_size << 10) | token_rate);
2221 }
2222
2223 -/*/
2224 - * mv643xx_eth_probe
2225 - *
2226 - * First function called after registering the network device.
2227 - * It's purpose is to initialize the device as an ethernet device,
2228 - * fill the ethernet device structure with pointers * to functions,
2229 - * and set the MAC address of the interface
2230 - *
2231 - * Input : struct device *
2232 - * Output : -ENOMEM if failed , 0 if success
2233 - */
2234 -static int mv643xx_eth_probe(struct platform_device *pdev)
2235 +static void txq_set_fixed_prio_mode(struct tx_queue *txq)
2236 {
2237 - struct mv643xx_eth_platform_data *pd;
2238 - int port_num;
2239 - struct mv643xx_private *mp;
2240 - struct net_device *dev;
2241 - u8 *p;
2242 - struct resource *res;
2243 - int err;
2244 - struct ethtool_cmd cmd;
2245 - int duplex = DUPLEX_HALF;
2246 - int speed = 0; /* default to auto-negotiation */
2247 - DECLARE_MAC_BUF(mac);
2248 + struct mv643xx_eth_private *mp = txq_to_mp(txq);
2249 + int off;
2250 + u32 val;
2251
2252 - pd = pdev->dev.platform_data;
2253 - if (pd == NULL) {
2254 - printk(KERN_ERR "No mv643xx_eth_platform_data\n");
2255 - return -ENODEV;
2256 - }
2257 + /*
2258 + * Turn on fixed priority mode.
2259 + */
2260 + if (mp->shared->tx_bw_control_moved)
2261 + off = TXQ_FIX_PRIO_CONF_MOVED(mp->port_num);
2262 + else
2263 + off = TXQ_FIX_PRIO_CONF(mp->port_num);
2264
2265 - if (pd->shared == NULL) {
2266 - printk(KERN_ERR "No mv643xx_eth_platform_data->shared\n");
2267 - return -ENODEV;
2268 - }
2269 + val = rdl(mp, off);
2270 + val |= 1 << txq->index;
2271 + wrl(mp, off, val);
2272 +}
2273
2274 - dev = alloc_etherdev(sizeof(struct mv643xx_private));
2275 - if (!dev)
2276 - return -ENOMEM;
2277 +static void txq_set_wrr(struct tx_queue *txq, int weight)
2278 +{
2279 + struct mv643xx_eth_private *mp = txq_to_mp(txq);
2280 + int off;
2281 + u32 val;
2282
2283 - platform_set_drvdata(pdev, dev);
2284 + /*
2285 + * Turn off fixed priority mode.
2286 + */
2287 + if (mp->shared->tx_bw_control_moved)
2288 + off = TXQ_FIX_PRIO_CONF_MOVED(mp->port_num);
2289 + else
2290 + off = TXQ_FIX_PRIO_CONF(mp->port_num);
2291
2292 - mp = netdev_priv(dev);
2293 - mp->dev = dev;
2294 -#ifdef MV643XX_NAPI
2295 - netif_napi_add(dev, &mp->napi, mv643xx_poll, 64);
2296 -#endif
2297 + val = rdl(mp, off);
2298 + val &= ~(1 << txq->index);
2299 + wrl(mp, off, val);
2300
2301 - res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2302 - BUG_ON(!res);
2303 - dev->irq = res->start;
2304 + /*
2305 + * Configure WRR weight for this queue.
2306 + */
2307 + off = TXQ_BW_WRR_CONF(mp->port_num, txq->index);
2308
2309 - dev->open = mv643xx_eth_open;
2310 - dev->stop = mv643xx_eth_stop;
2311 - dev->hard_start_xmit = mv643xx_eth_start_xmit;
2312 - dev->set_mac_address = mv643xx_eth_set_mac_address;
2313 - dev->set_multicast_list = mv643xx_eth_set_rx_mode;
2314 + val = rdl(mp, off);
2315 + val = (val & ~0xff) | (weight & 0xff);
2316 + wrl(mp, off, val);
2317 +}
2318
2319 - /* No need to Tx Timeout */
2320 - dev->tx_timeout = mv643xx_eth_tx_timeout;
2321
2322 -#ifdef CONFIG_NET_POLL_CONTROLLER
2323 - dev->poll_controller = mv643xx_netpoll;
2324 -#endif
2325 +/* mii management interface *************************************************/
2326 +#define SMI_BUSY 0x10000000
2327 +#define SMI_READ_VALID 0x08000000
2328 +#define SMI_OPCODE_READ 0x04000000
2329 +#define SMI_OPCODE_WRITE 0x00000000
2330
2331 - dev->watchdog_timeo = 2 * HZ;
2332 - dev->base_addr = 0;
2333 - dev->change_mtu = mv643xx_eth_change_mtu;
2334 - dev->do_ioctl = mv643xx_eth_do_ioctl;
2335 - SET_ETHTOOL_OPS(dev, &mv643xx_ethtool_ops);
2336 +static void smi_reg_read(struct mv643xx_eth_private *mp, unsigned int addr,
2337 + unsigned int reg, unsigned int *value)
2338 +{
2339 + void __iomem *smi_reg = mp->shared_smi->base + SMI_REG;
2340 + unsigned long flags;
2341 + int i;
2342 +
2343 + /* the SMI register is a shared resource */
2344 + spin_lock_irqsave(&mp->shared_smi->phy_lock, flags);
2345 +
2346 + /* wait for the SMI register to become available */
2347 + for (i = 0; readl(smi_reg) & SMI_BUSY; i++) {
2348 + if (i == 1000) {
2349 + printk("%s: PHY busy timeout\n", mp->dev->name);
2350 + goto out;
2351 + }
2352 + udelay(10);
2353 + }
2354 +
2355 + writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg);
2356 +
2357 + /* now wait for the data to be valid */
2358 + for (i = 0; !(readl(smi_reg) & SMI_READ_VALID); i++) {
2359 + if (i == 1000) {
2360 + printk("%s: PHY read timeout\n", mp->dev->name);
2361 + goto out;
2362 + }
2363 + udelay(10);
2364 + }
2365 +
2366 + *value = readl(smi_reg) & 0xffff;
2367 +out:
2368 + spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags);
2369 +}
2370 +
2371 +static void smi_reg_write(struct mv643xx_eth_private *mp,
2372 + unsigned int addr,
2373 + unsigned int reg, unsigned int value)
2374 +{
2375 + void __iomem *smi_reg = mp->shared_smi->base + SMI_REG;
2376 + unsigned long flags;
2377 + int i;
2378 +
2379 + /* the SMI register is a shared resource */
2380 + spin_lock_irqsave(&mp->shared_smi->phy_lock, flags);
2381 +
2382 + /* wait for the SMI register to become available */
2383 + for (i = 0; readl(smi_reg) & SMI_BUSY; i++) {
2384 + if (i == 1000) {
2385 + printk("%s: PHY busy timeout\n", mp->dev->name);
2386 + goto out;
2387 + }
2388 + udelay(10);
2389 + }
2390 +
2391 + writel(SMI_OPCODE_WRITE | (reg << 21) |
2392 + (addr << 16) | (value & 0xffff), smi_reg);
2393 +out:
2394 + spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags);
2395 +}
2396 +
2397 +
2398 +/* mib counters *************************************************************/
2399 +static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
2400 +{
2401 + return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
2402 +}
2403 +
2404 +static void mib_counters_clear(struct mv643xx_eth_private *mp)
2405 +{
2406 + int i;
2407 +
2408 + for (i = 0; i < 0x80; i += 4)
2409 + mib_read(mp, i);
2410 +}
2411 +
2412 +static void mib_counters_update(struct mv643xx_eth_private *mp)
2413 +{
2414 + struct mib_counters *p = &mp->mib_counters;
2415 +
2416 + p->good_octets_received += mib_read(mp, 0x00);
2417 + p->good_octets_received += (u64)mib_read(mp, 0x04) << 32;
2418 + p->bad_octets_received += mib_read(mp, 0x08);
2419 + p->internal_mac_transmit_err += mib_read(mp, 0x0c);
2420 + p->good_frames_received += mib_read(mp, 0x10);
2421 + p->bad_frames_received += mib_read(mp, 0x14);
2422 + p->broadcast_frames_received += mib_read(mp, 0x18);
2423 + p->multicast_frames_received += mib_read(mp, 0x1c);
2424 + p->frames_64_octets += mib_read(mp, 0x20);
2425 + p->frames_65_to_127_octets += mib_read(mp, 0x24);
2426 + p->frames_128_to_255_octets += mib_read(mp, 0x28);
2427 + p->frames_256_to_511_octets += mib_read(mp, 0x2c);
2428 + p->frames_512_to_1023_octets += mib_read(mp, 0x30);
2429 + p->frames_1024_to_max_octets += mib_read(mp, 0x34);
2430 + p->good_octets_sent += mib_read(mp, 0x38);
2431 + p->good_octets_sent += (u64)mib_read(mp, 0x3c) << 32;
2432 + p->good_frames_sent += mib_read(mp, 0x40);
2433 + p->excessive_collision += mib_read(mp, 0x44);
2434 + p->multicast_frames_sent += mib_read(mp, 0x48);
2435 + p->broadcast_frames_sent += mib_read(mp, 0x4c);
2436 + p->unrec_mac_control_received += mib_read(mp, 0x50);
2437 + p->fc_sent += mib_read(mp, 0x54);
2438 + p->good_fc_received += mib_read(mp, 0x58);
2439 + p->bad_fc_received += mib_read(mp, 0x5c);
2440 + p->undersize_received += mib_read(mp, 0x60);
2441 + p->fragments_received += mib_read(mp, 0x64);
2442 + p->oversize_received += mib_read(mp, 0x68);
2443 + p->jabber_received += mib_read(mp, 0x6c);
2444 + p->mac_receive_error += mib_read(mp, 0x70);
2445 + p->bad_crc_event += mib_read(mp, 0x74);
2446 + p->collision += mib_read(mp, 0x78);
2447 + p->late_collision += mib_read(mp, 0x7c);
2448 +}
2449 +
2450 +
2451 +/* ethtool ******************************************************************/
2452 +struct mv643xx_eth_stats {
2453 + char stat_string[ETH_GSTRING_LEN];
2454 + int sizeof_stat;
2455 + int netdev_off;
2456 + int mp_off;
2457 +};
2458 +
2459 +#define SSTAT(m) \
2460 + { #m, FIELD_SIZEOF(struct net_device_stats, m), \
2461 + offsetof(struct net_device, stats.m), -1 }
2462 +
2463 +#define MIBSTAT(m) \
2464 + { #m, FIELD_SIZEOF(struct mib_counters, m), \
2465 + -1, offsetof(struct mv643xx_eth_private, mib_counters.m) }
2466 +
2467 +static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
2468 + SSTAT(rx_packets),
2469 + SSTAT(tx_packets),
2470 + SSTAT(rx_bytes),
2471 + SSTAT(tx_bytes),
2472 + SSTAT(rx_errors),
2473 + SSTAT(tx_errors),
2474 + SSTAT(rx_dropped),
2475 + SSTAT(tx_dropped),
2476 + MIBSTAT(good_octets_received),
2477 + MIBSTAT(bad_octets_received),
2478 + MIBSTAT(internal_mac_transmit_err),
2479 + MIBSTAT(good_frames_received),
2480 + MIBSTAT(bad_frames_received),
2481 + MIBSTAT(broadcast_frames_received),
2482 + MIBSTAT(multicast_frames_received),
2483 + MIBSTAT(frames_64_octets),
2484 + MIBSTAT(frames_65_to_127_octets),
2485 + MIBSTAT(frames_128_to_255_octets),
2486 + MIBSTAT(frames_256_to_511_octets),
2487 + MIBSTAT(frames_512_to_1023_octets),
2488 + MIBSTAT(frames_1024_to_max_octets),
2489 + MIBSTAT(good_octets_sent),
2490 + MIBSTAT(good_frames_sent),
2491 + MIBSTAT(excessive_collision),
2492 + MIBSTAT(multicast_frames_sent),
2493 + MIBSTAT(broadcast_frames_sent),
2494 + MIBSTAT(unrec_mac_control_received),
2495 + MIBSTAT(fc_sent),
2496 + MIBSTAT(good_fc_received),
2497 + MIBSTAT(bad_fc_received),
2498 + MIBSTAT(undersize_received),
2499 + MIBSTAT(fragments_received),
2500 + MIBSTAT(oversize_received),
2501 + MIBSTAT(jabber_received),
2502 + MIBSTAT(mac_receive_error),
2503 + MIBSTAT(bad_crc_event),
2504 + MIBSTAT(collision),
2505 + MIBSTAT(late_collision),
2506 +};
2507 +
2508 +static int mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2509 +{
2510 + struct mv643xx_eth_private *mp = netdev_priv(dev);
2511 + int err;
2512 +
2513 + spin_lock_irq(&mp->lock);
2514 + err = mii_ethtool_gset(&mp->mii, cmd);
2515 + spin_unlock_irq(&mp->lock);
2516
2517 -#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
2518 -#ifdef MAX_SKB_FRAGS
2519 /*
2520 - * Zero copy can only work if we use Discovery II memory. Else, we will
2521 - * have to map the buffers to ISA memory which is only 16 MB
2522 + * The MAC does not support 1000baseT_Half.
2523 */
2524 - dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
2525 -#endif
2526 -#endif
2527 + cmd->supported &= ~SUPPORTED_1000baseT_Half;
2528 + cmd->advertising &= ~ADVERTISED_1000baseT_Half;
2529
2530 - /* Configure the timeout task */
2531 - INIT_WORK(&mp->tx_timeout_task, mv643xx_eth_tx_timeout_task);
2532 + return err;
2533 +}
2534
2535 - spin_lock_init(&mp->lock);
2536 +static int mv643xx_eth_get_settings_phyless(struct net_device *dev, struct ethtool_cmd *cmd)
2537 +{
2538 + cmd->supported = SUPPORTED_MII;
2539 + cmd->advertising = ADVERTISED_MII;
2540 + cmd->speed = SPEED_1000;
2541 + cmd->duplex = DUPLEX_FULL;
2542 + cmd->port = PORT_MII;
2543 + cmd->phy_address = 0;
2544 + cmd->transceiver = XCVR_INTERNAL;
2545 + cmd->autoneg = AUTONEG_DISABLE;
2546 + cmd->maxtxpkt = 1;
2547 + cmd->maxrxpkt = 1;
2548
2549 - mp->shared = platform_get_drvdata(pd->shared);
2550 - port_num = mp->port_num = pd->port_number;
2551 + return 0;
2552 +}
2553
2554 - if (mp->shared->win_protect)
2555 - wrl(mp, WINDOW_PROTECT(port_num), mp->shared->win_protect);
2556 +static int mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2557 +{
2558 + struct mv643xx_eth_private *mp = netdev_priv(dev);
2559 + int err;
2560
2561 - mp->shared_smi = mp->shared;
2562 - if (pd->shared_smi != NULL)
2563 - mp->shared_smi = platform_get_drvdata(pd->shared_smi);
2564 -
2565 - /* set default config values */
2566 - eth_port_uc_addr_get(mp, dev->dev_addr);
2567 - mp->rx_ring_size = PORT_DEFAULT_RECEIVE_QUEUE_SIZE;
2568 - mp->tx_ring_size = PORT_DEFAULT_TRANSMIT_QUEUE_SIZE;
2569 + /*
2570 + * The MAC does not support 1000baseT_Half.
2571 + */
2572 + cmd->advertising &= ~ADVERTISED_1000baseT_Half;
2573
2574 - if (is_valid_ether_addr(pd->mac_addr))
2575 - memcpy(dev->dev_addr, pd->mac_addr, 6);
2576 + spin_lock_irq(&mp->lock);
2577 + err = mii_ethtool_sset(&mp->mii, cmd);
2578 + spin_unlock_irq(&mp->lock);
2579
2580 - if (pd->phy_addr || pd->force_phy_addr)
2581 - ethernet_phy_set(mp, pd->phy_addr);
2582 + return err;
2583 +}
2584
2585 - if (pd->rx_queue_size)
2586 - mp->rx_ring_size = pd->rx_queue_size;
2587 +static int mv643xx_eth_set_settings_phyless(struct net_device *dev, struct ethtool_cmd *cmd)
2588 +{
2589 + return -EINVAL;
2590 +}
2591
2592 - if (pd->tx_queue_size)
2593 - mp->tx_ring_size = pd->tx_queue_size;
2594 +static void mv643xx_eth_get_drvinfo(struct net_device *dev,
2595 + struct ethtool_drvinfo *drvinfo)
2596 +{
2597 + strncpy(drvinfo->driver, mv643xx_eth_driver_name, 32);
2598 + strncpy(drvinfo->version, mv643xx_eth_driver_version, 32);
2599 + strncpy(drvinfo->fw_version, "N/A", 32);
2600 + strncpy(drvinfo->bus_info, "platform", 32);
2601 + drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats);
2602 +}
2603 +
2604 +static int mv643xx_eth_nway_reset(struct net_device *dev)
2605 +{
2606 + struct mv643xx_eth_private *mp = netdev_priv(dev);
2607
2608 - if (pd->tx_sram_size) {
2609 - mp->tx_sram_size = pd->tx_sram_size;
2610 - mp->tx_sram_addr = pd->tx_sram_addr;
2611 + return mii_nway_restart(&mp->mii);
2612 +}
2613 +
2614 +static int mv643xx_eth_nway_reset_phyless(struct net_device *dev)
2615 +{
2616 + return -EINVAL;
2617 +}
2618 +
2619 +static u32 mv643xx_eth_get_link(struct net_device *dev)
2620 +{
2621 + struct mv643xx_eth_private *mp = netdev_priv(dev);
2622 +
2623 + return mii_link_ok(&mp->mii);
2624 +}
2625 +
2626 +static u32 mv643xx_eth_get_link_phyless(struct net_device *dev)
2627 +{
2628 + return 1;
2629 +}
2630 +
2631 +static void mv643xx_eth_get_strings(struct net_device *dev,
2632 + uint32_t stringset, uint8_t *data)
2633 +{
2634 + int i;
2635 +
2636 + if (stringset == ETH_SS_STATS) {
2637 + for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
2638 + memcpy(data + i * ETH_GSTRING_LEN,
2639 + mv643xx_eth_stats[i].stat_string,
2640 + ETH_GSTRING_LEN);
2641 + }
2642 }
2643 +}
2644 +
2645 +static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
2646 + struct ethtool_stats *stats,
2647 + uint64_t *data)
2648 +{
2649 + struct mv643xx_eth_private *mp = dev->priv;
2650 + int i;
2651 +
2652 + mib_counters_update(mp);
2653 +
2654 + for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
2655 + const struct mv643xx_eth_stats *stat;
2656 + void *p;
2657 +
2658 + stat = mv643xx_eth_stats + i;
2659 +
2660 + if (stat->netdev_off >= 0)
2661 + p = ((void *)mp->dev) + stat->netdev_off;
2662 + else
2663 + p = ((void *)mp) + stat->mp_off;
2664
2665 - if (pd->rx_sram_size) {
2666 - mp->rx_sram_size = pd->rx_sram_size;
2667 - mp->rx_sram_addr = pd->rx_sram_addr;
2668 + data[i] = (stat->sizeof_stat == 8) ?
2669 + *(uint64_t *)p : *(uint32_t *)p;
2670 }
2671 +}
2672
2673 - duplex = pd->duplex;
2674 - speed = pd->speed;
2675 +static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
2676 +{
2677 + if (sset == ETH_SS_STATS)
2678 + return ARRAY_SIZE(mv643xx_eth_stats);
2679
2680 - /* Hook up MII support for ethtool */
2681 - mp->mii.dev = dev;
2682 - mp->mii.mdio_read = mv643xx_mdio_read;
2683 - mp->mii.mdio_write = mv643xx_mdio_write;
2684 - mp->mii.phy_id = ethernet_phy_get(mp);
2685 - mp->mii.phy_id_mask = 0x3f;
2686 - mp->mii.reg_num_mask = 0x1f;
2687 + return -EOPNOTSUPP;
2688 +}
2689
2690 - err = ethernet_phy_detect(mp);
2691 - if (err) {
2692 - pr_debug("%s: No PHY detected at addr %d\n",
2693 - dev->name, ethernet_phy_get(mp));
2694 +static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
2695 + .get_settings = mv643xx_eth_get_settings,
2696 + .set_settings = mv643xx_eth_set_settings,
2697 + .get_drvinfo = mv643xx_eth_get_drvinfo,
2698 + .nway_reset = mv643xx_eth_nway_reset,
2699 + .get_link = mv643xx_eth_get_link,
2700 + .set_sg = ethtool_op_set_sg,
2701 + .get_strings = mv643xx_eth_get_strings,
2702 + .get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
2703 + .get_sset_count = mv643xx_eth_get_sset_count,
2704 +};
2705 +
2706 +static const struct ethtool_ops mv643xx_eth_ethtool_ops_phyless = {
2707 + .get_settings = mv643xx_eth_get_settings_phyless,
2708 + .set_settings = mv643xx_eth_set_settings_phyless,
2709 + .get_drvinfo = mv643xx_eth_get_drvinfo,
2710 + .nway_reset = mv643xx_eth_nway_reset_phyless,
2711 + .get_link = mv643xx_eth_get_link_phyless,
2712 + .set_sg = ethtool_op_set_sg,
2713 + .get_strings = mv643xx_eth_get_strings,
2714 + .get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
2715 + .get_sset_count = mv643xx_eth_get_sset_count,
2716 +};
2717 +
2718 +
2719 +/* address handling *********************************************************/
2720 +static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
2721 +{
2722 + unsigned int mac_h;
2723 + unsigned int mac_l;
2724 +
2725 + mac_h = rdl(mp, MAC_ADDR_HIGH(mp->port_num));
2726 + mac_l = rdl(mp, MAC_ADDR_LOW(mp->port_num));
2727 +
2728 + addr[0] = (mac_h >> 24) & 0xff;
2729 + addr[1] = (mac_h >> 16) & 0xff;
2730 + addr[2] = (mac_h >> 8) & 0xff;
2731 + addr[3] = mac_h & 0xff;
2732 + addr[4] = (mac_l >> 8) & 0xff;
2733 + addr[5] = mac_l & 0xff;
2734 +}
2735 +
2736 +static void init_mac_tables(struct mv643xx_eth_private *mp)
2737 +{
2738 + int i;
2739 +
2740 + for (i = 0; i < 0x100; i += 4) {
2741 + wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, 0);
2742 + wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, 0);
2743 + }
2744 +
2745 + for (i = 0; i < 0x10; i += 4)
2746 + wrl(mp, UNICAST_TABLE(mp->port_num) + i, 0);
2747 +}
2748 +
2749 +static void set_filter_table_entry(struct mv643xx_eth_private *mp,
2750 + int table, unsigned char entry)
2751 +{
2752 + unsigned int table_reg;
2753 +
2754 + /* Set "accepts frame bit" at specified table entry */
2755 + table_reg = rdl(mp, table + (entry & 0xfc));
2756 + table_reg |= 0x01 << (8 * (entry & 3));
2757 + wrl(mp, table + (entry & 0xfc), table_reg);
2758 +}
2759 +
2760 +static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
2761 +{
2762 + unsigned int mac_h;
2763 + unsigned int mac_l;
2764 + int table;
2765 +
2766 + mac_l = (addr[4] << 8) | addr[5];
2767 + mac_h = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
2768 +
2769 + wrl(mp, MAC_ADDR_LOW(mp->port_num), mac_l);
2770 + wrl(mp, MAC_ADDR_HIGH(mp->port_num), mac_h);
2771 +
2772 + table = UNICAST_TABLE(mp->port_num);
2773 + set_filter_table_entry(mp, table, addr[5] & 0x0f);
2774 +}
2775 +
2776 +static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
2777 +{
2778 + struct mv643xx_eth_private *mp = netdev_priv(dev);
2779 +
2780 + /* +2 is for the offset of the HW addr type */
2781 + memcpy(dev->dev_addr, addr + 2, 6);
2782 +
2783 + init_mac_tables(mp);
2784 + uc_addr_set(mp, dev->dev_addr);
2785 +
2786 + return 0;
2787 +}
2788 +
2789 +static int addr_crc(unsigned char *addr)
2790 +{
2791 + int crc = 0;
2792 + int i;
2793 +
2794 + for (i = 0; i < 6; i++) {
2795 + int j;
2796 +
2797 + crc = (crc ^ addr[i]) << 8;
2798 + for (j = 7; j >= 0; j--) {
2799 + if (crc & (0x100 << j))
2800 + crc ^= 0x107 << j;
2801 + }
2802 + }
2803 +
2804 + return crc;
2805 +}
2806 +
2807 +static void mv643xx_eth_set_rx_mode(struct net_device *dev)
2808 +{
2809 + struct mv643xx_eth_private *mp = netdev_priv(dev);
2810 + u32 port_config;
2811 + struct dev_addr_list *addr;
2812 + int i;
2813 +
2814 + port_config = rdl(mp, PORT_CONFIG(mp->port_num));
2815 + if (dev->flags & IFF_PROMISC)
2816 + port_config |= UNICAST_PROMISCUOUS_MODE;
2817 + else
2818 + port_config &= ~UNICAST_PROMISCUOUS_MODE;
2819 + wrl(mp, PORT_CONFIG(mp->port_num), port_config);
2820 +
2821 + if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2822 + int port_num = mp->port_num;
2823 + u32 accept = 0x01010101;
2824 +
2825 + for (i = 0; i < 0x100; i += 4) {
2826 + wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept);
2827 + wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept);
2828 + }
2829 + return;
2830 + }
2831 +
2832 + for (i = 0; i < 0x100; i += 4) {
2833 + wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, 0);
2834 + wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, 0);
2835 + }
2836 +
2837 + for (addr = dev->mc_list; addr != NULL; addr = addr->next) {
2838 + u8 *a = addr->da_addr;
2839 + int table;
2840 +
2841 + if (addr->da_addrlen != 6)
2842 + continue;
2843 +
2844 + if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) {
2845 + table = SPECIAL_MCAST_TABLE(mp->port_num);
2846 + set_filter_table_entry(mp, table, a[5]);
2847 + } else {
2848 + int crc = addr_crc(a);
2849 +
2850 + table = OTHER_MCAST_TABLE(mp->port_num);
2851 + set_filter_table_entry(mp, table, crc);
2852 + }
2853 + }
2854 +}
2855 +
2856 +
2857 +/* rx/tx queue initialisation ***********************************************/
2858 +static int rxq_init(struct mv643xx_eth_private *mp, int index)
2859 +{
2860 + struct rx_queue *rxq = mp->rxq + index;
2861 + struct rx_desc *rx_desc;
2862 + int size;
2863 + int i;
2864 +
2865 + rxq->index = index;
2866 +
2867 + rxq->rx_ring_size = mp->default_rx_ring_size;
2868 +
2869 + rxq->rx_desc_count = 0;
2870 + rxq->rx_curr_desc = 0;
2871 + rxq->rx_used_desc = 0;
2872 +
2873 + size = rxq->rx_ring_size * sizeof(struct rx_desc);
2874 +
2875 + if (index == mp->rxq_primary && size <= mp->rx_desc_sram_size) {
2876 + rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
2877 + mp->rx_desc_sram_size);
2878 + rxq->rx_desc_dma = mp->rx_desc_sram_addr;
2879 + } else {
2880 + rxq->rx_desc_area = dma_alloc_coherent(NULL, size,
2881 + &rxq->rx_desc_dma,
2882 + GFP_KERNEL);
2883 + }
2884 +
2885 + if (rxq->rx_desc_area == NULL) {
2886 + dev_printk(KERN_ERR, &mp->dev->dev,
2887 + "can't allocate rx ring (%d bytes)\n", size);
2888 goto out;
2889 }
2890 + memset(rxq->rx_desc_area, 0, size);
2891
2892 - ethernet_phy_reset(mp);
2893 - mp->mii.supports_gmii = mii_check_gmii_support(&mp->mii);
2894 - mv643xx_init_ethtool_cmd(dev, mp->mii.phy_id, speed, duplex, &cmd);
2895 - mv643xx_eth_update_pscr(dev, &cmd);
2896 - mv643xx_set_settings(dev, &cmd);
2897 + rxq->rx_desc_area_size = size;
2898 + rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb),
2899 + GFP_KERNEL);
2900 + if (rxq->rx_skb == NULL) {
2901 + dev_printk(KERN_ERR, &mp->dev->dev,
2902 + "can't allocate rx skb ring\n");
2903 + goto out_free;
2904 + }
2905
2906 - SET_NETDEV_DEV(dev, &pdev->dev);
2907 - err = register_netdev(dev);
2908 - if (err)
2909 + rx_desc = (struct rx_desc *)rxq->rx_desc_area;
2910 + for (i = 0; i < rxq->rx_ring_size; i++) {
2911 + int nexti = (i + 1) % rxq->rx_ring_size;
2912 + rx_desc[i].next_desc_ptr = rxq->rx_desc_dma +
2913 + nexti * sizeof(struct rx_desc);
2914 + }
2915 +
2916 + init_timer(&rxq->rx_oom);
2917 + rxq->rx_oom.data = (unsigned long)rxq;
2918 + rxq->rx_oom.function = rxq_refill_timer_wrapper;
2919 +
2920 + return 0;
2921 +
2922 +
2923 +out_free:
2924 + if (index == mp->rxq_primary && size <= mp->rx_desc_sram_size)
2925 + iounmap(rxq->rx_desc_area);
2926 + else
2927 + dma_free_coherent(NULL, size,
2928 + rxq->rx_desc_area,
2929 + rxq->rx_desc_dma);
2930 +
2931 +out:
2932 + return -ENOMEM;
2933 +}
2934 +
2935 +static void rxq_deinit(struct rx_queue *rxq)
2936 +{
2937 + struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
2938 + int i;
2939 +
2940 + rxq_disable(rxq);
2941 +
2942 + del_timer_sync(&rxq->rx_oom);
2943 +
2944 + for (i = 0; i < rxq->rx_ring_size; i++) {
2945 + if (rxq->rx_skb[i]) {
2946 + dev_kfree_skb(rxq->rx_skb[i]);
2947 + rxq->rx_desc_count--;
2948 + }
2949 + }
2950 +
2951 + if (rxq->rx_desc_count) {
2952 + dev_printk(KERN_ERR, &mp->dev->dev,
2953 + "error freeing rx ring -- %d skbs stuck\n",
2954 + rxq->rx_desc_count);
2955 + }
2956 +
2957 + if (rxq->index == mp->rxq_primary &&
2958 + rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
2959 + iounmap(rxq->rx_desc_area);
2960 + else
2961 + dma_free_coherent(NULL, rxq->rx_desc_area_size,
2962 + rxq->rx_desc_area, rxq->rx_desc_dma);
2963 +
2964 + kfree(rxq->rx_skb);
2965 +}
2966 +
2967 +static int txq_init(struct mv643xx_eth_private *mp, int index)
2968 +{
2969 + struct tx_queue *txq = mp->txq + index;
2970 + struct tx_desc *tx_desc;
2971 + int size;
2972 + int i;
2973 +
2974 + txq->index = index;
2975 +
2976 + txq->tx_ring_size = mp->default_tx_ring_size;
2977 +
2978 + txq->tx_desc_count = 0;
2979 + txq->tx_curr_desc = 0;
2980 + txq->tx_used_desc = 0;
2981 +
2982 + size = txq->tx_ring_size * sizeof(struct tx_desc);
2983 +
2984 + if (index == mp->txq_primary && size <= mp->tx_desc_sram_size) {
2985 + txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
2986 + mp->tx_desc_sram_size);
2987 + txq->tx_desc_dma = mp->tx_desc_sram_addr;
2988 + } else {
2989 + txq->tx_desc_area = dma_alloc_coherent(NULL, size,
2990 + &txq->tx_desc_dma,
2991 + GFP_KERNEL);
2992 + }
2993 +
2994 + if (txq->tx_desc_area == NULL) {
2995 + dev_printk(KERN_ERR, &mp->dev->dev,
2996 + "can't allocate tx ring (%d bytes)\n", size);
2997 goto out;
2998 + }
2999 + memset(txq->tx_desc_area, 0, size);
3000
3001 - p = dev->dev_addr;
3002 - printk(KERN_NOTICE
3003 - "%s: port %d with MAC address %s\n",
3004 - dev->name, port_num, print_mac(mac, p));
3005 + txq->tx_desc_area_size = size;
3006 + txq->tx_skb = kmalloc(txq->tx_ring_size * sizeof(*txq->tx_skb),
3007 + GFP_KERNEL);
3008 + if (txq->tx_skb == NULL) {
3009 + dev_printk(KERN_ERR, &mp->dev->dev,
3010 + "can't allocate tx skb ring\n");
3011 + goto out_free;
3012 + }
3013
3014 - if (dev->features & NETIF_F_SG)
3015 - printk(KERN_NOTICE "%s: Scatter Gather Enabled\n", dev->name);
3016 + tx_desc = (struct tx_desc *)txq->tx_desc_area;
3017 + for (i = 0; i < txq->tx_ring_size; i++) {
3018 + int nexti = (i + 1) % txq->tx_ring_size;
3019 + tx_desc[i].next_desc_ptr = txq->tx_desc_dma +
3020 + nexti * sizeof(struct tx_desc);
3021 + }
3022
3023 - if (dev->features & NETIF_F_IP_CSUM)
3024 - printk(KERN_NOTICE "%s: TX TCP/IP Checksumming Supported\n",
3025 - dev->name);
3026 + return 0;
3027 +
3028 +
3029 +out_free:
3030 + if (index == mp->txq_primary && size <= mp->tx_desc_sram_size)
3031 + iounmap(txq->tx_desc_area);
3032 + else
3033 + dma_free_coherent(NULL, size,
3034 + txq->tx_desc_area,
3035 + txq->tx_desc_dma);
3036 +
3037 +out:
3038 + return -ENOMEM;
3039 +}
3040 +
3041 +static void txq_reclaim(struct tx_queue *txq, int force)
3042 +{
3043 + struct mv643xx_eth_private *mp = txq_to_mp(txq);
3044 + unsigned long flags;
3045 +
3046 + spin_lock_irqsave(&mp->lock, flags);
3047 + while (txq->tx_desc_count > 0) {
3048 + int tx_index;
3049 + struct tx_desc *desc;
3050 + u32 cmd_sts;
3051 + struct sk_buff *skb;
3052 + dma_addr_t addr;
3053 + int count;
3054 +
3055 + tx_index = txq->tx_used_desc;
3056 + desc = &txq->tx_desc_area[tx_index];
3057 + cmd_sts = desc->cmd_sts;
3058 +
3059 + if (!force && (cmd_sts & BUFFER_OWNED_BY_DMA))
3060 + break;
3061 +
3062 + txq->tx_used_desc = (tx_index + 1) % txq->tx_ring_size;
3063 + txq->tx_desc_count--;
3064 +
3065 + addr = desc->buf_ptr;
3066 + count = desc->byte_cnt;
3067 + skb = txq->tx_skb[tx_index];
3068 + txq->tx_skb[tx_index] = NULL;
3069 +
3070 + if (cmd_sts & ERROR_SUMMARY) {
3071 + dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n");
3072 + mp->dev->stats.tx_errors++;
3073 + }
3074 +
3075 + /*
3076 + * Drop mp->lock while we free the skb.
3077 + */
3078 + spin_unlock_irqrestore(&mp->lock, flags);
3079
3080 -#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
3081 - printk(KERN_NOTICE "%s: RX TCP/UDP Checksum Offload ON \n", dev->name);
3082 + if (cmd_sts & TX_FIRST_DESC)
3083 + dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
3084 + else
3085 + dma_unmap_page(NULL, addr, count, DMA_TO_DEVICE);
3086 +
3087 + if (skb)
3088 + dev_kfree_skb_irq(skb);
3089 +
3090 + spin_lock_irqsave(&mp->lock, flags);
3091 + }
3092 + spin_unlock_irqrestore(&mp->lock, flags);
3093 +}
3094 +
3095 +static void txq_deinit(struct tx_queue *txq)
3096 +{
3097 + struct mv643xx_eth_private *mp = txq_to_mp(txq);
3098 +
3099 + txq_disable(txq);
3100 + txq_reclaim(txq, 1);
3101 +
3102 + BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);
3103 +
3104 + if (txq->index == mp->txq_primary &&
3105 + txq->tx_desc_area_size <= mp->tx_desc_sram_size)
3106 + iounmap(txq->tx_desc_area);
3107 + else
3108 + dma_free_coherent(NULL, txq->tx_desc_area_size,
3109 + txq->tx_desc_area, txq->tx_desc_dma);
3110 +
3111 + kfree(txq->tx_skb);
3112 +}
3113 +
3114 +
3115 +/* netdev ops and related ***************************************************/
3116 +static void update_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
3117 +{
3118 + u32 pscr_o;
3119 + u32 pscr_n;
3120 +
3121 + pscr_o = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
3122 +
3123 + /* clear speed, duplex and rx buffer size fields */
3124 + pscr_n = pscr_o & ~(SET_MII_SPEED_TO_100 |
3125 + SET_GMII_SPEED_TO_1000 |
3126 + SET_FULL_DUPLEX_MODE |
3127 + MAX_RX_PACKET_MASK);
3128 +
3129 + if (speed == SPEED_1000) {
3130 + pscr_n |= SET_GMII_SPEED_TO_1000 | MAX_RX_PACKET_9700BYTE;
3131 + } else {
3132 + if (speed == SPEED_100)
3133 + pscr_n |= SET_MII_SPEED_TO_100;
3134 + pscr_n |= MAX_RX_PACKET_1522BYTE;
3135 + }
3136 +
3137 + if (duplex == DUPLEX_FULL)
3138 + pscr_n |= SET_FULL_DUPLEX_MODE;
3139 +
3140 + if (pscr_n != pscr_o) {
3141 + if ((pscr_o & SERIAL_PORT_ENABLE) == 0)
3142 + wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n);
3143 + else {
3144 + int i;
3145 +
3146 + for (i = 0; i < 8; i++)
3147 + if (mp->txq_mask & (1 << i))
3148 + txq_disable(mp->txq + i);
3149 +
3150 + pscr_o &= ~SERIAL_PORT_ENABLE;
3151 + wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_o);
3152 + wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n);
3153 + wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n);
3154 +
3155 + for (i = 0; i < 8; i++)
3156 + if (mp->txq_mask & (1 << i))
3157 + txq_enable(mp->txq + i);
3158 + }
3159 + }
3160 +}
3161 +
3162 +static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
3163 +{
3164 + struct net_device *dev = (struct net_device *)dev_id;
3165 + struct mv643xx_eth_private *mp = netdev_priv(dev);
3166 + u32 int_cause;
3167 + u32 int_cause_ext;
3168 + u32 txq_active;
3169 +
3170 + int_cause = rdl(mp, INT_CAUSE(mp->port_num)) &
3171 + (INT_TX_END | INT_RX | INT_EXT);
3172 + if (int_cause == 0)
3173 + return IRQ_NONE;
3174 +
3175 + int_cause_ext = 0;
3176 + if (int_cause & INT_EXT) {
3177 + int_cause_ext = rdl(mp, INT_CAUSE_EXT(mp->port_num))
3178 + & (INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX);
3179 + wrl(mp, INT_CAUSE_EXT(mp->port_num), ~int_cause_ext);
3180 + }
3181 +
3182 + if (int_cause_ext & (INT_EXT_PHY | INT_EXT_LINK)) {
3183 + if (mp->phy_addr == -1 || mii_link_ok(&mp->mii)) {
3184 + int i;
3185 +
3186 + if (mp->phy_addr != -1) {
3187 + struct ethtool_cmd cmd;
3188 +
3189 + mii_ethtool_gset(&mp->mii, &cmd);
3190 + update_pscr(mp, cmd.speed, cmd.duplex);
3191 + }
3192 +
3193 + for (i = 0; i < 8; i++)
3194 + if (mp->txq_mask & (1 << i))
3195 + txq_enable(mp->txq + i);
3196 +
3197 + if (!netif_carrier_ok(dev)) {
3198 + netif_carrier_on(dev);
3199 + __txq_maybe_wake(mp->txq + mp->txq_primary);
3200 + }
3201 + } else if (netif_carrier_ok(dev)) {
3202 + netif_stop_queue(dev);
3203 + netif_carrier_off(dev);
3204 + }
3205 + }
3206 +
3207 + /*
3208 + * RxBuffer or RxError set for any of the 8 queues?
3209 + */
3210 +#ifdef MV643XX_ETH_NAPI
3211 + if (int_cause & INT_RX) {
3212 + wrl(mp, INT_MASK(mp->port_num), 0x00000000);
3213 + rdl(mp, INT_MASK(mp->port_num));
3214 +
3215 + netif_rx_schedule(dev, &mp->napi);
3216 + }
3217 +#else
3218 + if (int_cause & INT_RX) {
3219 + int i;
3220 +
3221 + for (i = 7; i >= 0; i--)
3222 + if (mp->rxq_mask & (1 << i))
3223 + rxq_process(mp->rxq + i, INT_MAX);
3224 + }
3225 #endif
3226
3227 -#ifdef MV643XX_COAL
3228 - printk(KERN_NOTICE "%s: TX and RX Interrupt Coalescing ON \n",
3229 - dev->name);
3230 -#endif
3231 + txq_active = rdl(mp, TXQ_COMMAND(mp->port_num));
3232 +
3233 + /*
3234 + * TxBuffer or TxError set for any of the 8 queues?
3235 + */
3236 + if (int_cause_ext & INT_EXT_TX) {
3237 + int i;
3238 +
3239 + for (i = 0; i < 8; i++)
3240 + if (mp->txq_mask & (1 << i))
3241 + txq_reclaim(mp->txq + i, 0);
3242 + }
3243 +
3244 + /*
3245 + * Any TxEnd interrupts?
3246 + */
3247 + if (int_cause & INT_TX_END) {
3248 + int i;
3249 +
3250 + wrl(mp, INT_CAUSE(mp->port_num), ~(int_cause & INT_TX_END));
3251 + for (i = 0; i < 8; i++) {
3252 + struct tx_queue *txq = mp->txq + i;
3253 + if (txq->tx_desc_count && !((txq_active >> i) & 1))
3254 + txq_enable(txq);
3255 + }
3256 + }
3257 +
3258 + /*
3259 + * Enough space again in the primary TX queue for a full packet?
3260 + */
3261 + if (int_cause_ext & INT_EXT_TX) {
3262 + struct tx_queue *txq = mp->txq + mp->txq_primary;
3263 + __txq_maybe_wake(txq);
3264 + }
3265 +
3266 + return IRQ_HANDLED;
3267 +}
3268 +
3269 +static void phy_reset(struct mv643xx_eth_private *mp)
3270 +{
3271 + unsigned int data;
3272 +
3273 + smi_reg_read(mp, mp->phy_addr, 0, &data);
3274 + data |= 0x8000;
3275 + smi_reg_write(mp, mp->phy_addr, 0, data);
3276 +
3277 + do {
3278 + udelay(1);
3279 + smi_reg_read(mp, mp->phy_addr, 0, &data);
3280 + } while (data & 0x8000);
3281 +}
3282 +
3283 +static void port_start(struct mv643xx_eth_private *mp)
3284 +{
3285 + u32 pscr;
3286 + int i;
3287 +
3288 + /*
3289 + * Configure basic link parameters.
3290 + */
3291 + pscr = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
3292 + pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS);
3293 + wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
3294 + pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
3295 + DISABLE_AUTO_NEG_SPEED_GMII |
3296 + DISABLE_AUTO_NEG_FOR_DUPLEX |
3297 + DO_NOT_FORCE_LINK_FAIL |
3298 + SERIAL_PORT_CONTROL_RESERVED;
3299 + wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
3300 + pscr |= SERIAL_PORT_ENABLE;
3301 + wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
3302 +
3303 + wrl(mp, SDMA_CONFIG(mp->port_num), PORT_SDMA_CONFIG_DEFAULT_VALUE);
3304 +
3305 + /*
3306 + * Perform PHY reset, if there is a PHY.
3307 + */
3308 + if (mp->phy_addr != -1) {
3309 + struct ethtool_cmd cmd;
3310 +
3311 + mv643xx_eth_get_settings(mp->dev, &cmd);
3312 + phy_reset(mp);
3313 + mv643xx_eth_set_settings(mp->dev, &cmd);
3314 + }
3315 +
3316 + /*
3317 + * Configure TX path and queues.
3318 + */
3319 + tx_set_rate(mp, 1000000000, 16777216);
3320 + for (i = 0; i < 8; i++) {
3321 + struct tx_queue *txq = mp->txq + i;
3322 + int off = TXQ_CURRENT_DESC_PTR(mp->port_num, i);
3323 + u32 addr;
3324 +
3325 + if ((mp->txq_mask & (1 << i)) == 0)
3326 + continue;
3327 +
3328 + addr = (u32)txq->tx_desc_dma;
3329 + addr += txq->tx_curr_desc * sizeof(struct tx_desc);
3330 + wrl(mp, off, addr);
3331 +
3332 + txq_set_rate(txq, 1000000000, 16777216);
3333 + txq_set_fixed_prio_mode(txq);
3334 + }
3335 +
3336 + /*
3337 + * Add configured unicast address to address filter table.
3338 + */
3339 + uc_addr_set(mp, mp->dev->dev_addr);
3340 +
3341 + /*
3342 + * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
3343 + * frames to RX queue #0.
3344 + */
3345 + wrl(mp, PORT_CONFIG(mp->port_num), 0x00000000);
3346
3347 -#ifdef MV643XX_NAPI
3348 - printk(KERN_NOTICE "%s: RX NAPI Enabled \n", dev->name);
3349 -#endif
3350 + /*
3351 + * Treat BPDUs as normal multicasts, and disable partition mode.
3352 + */
3353 + wrl(mp, PORT_CONFIG_EXT(mp->port_num), 0x00000000);
3354
3355 - if (mp->tx_sram_size > 0)
3356 - printk(KERN_NOTICE "%s: Using SRAM\n", dev->name);
3357 + /*
3358 + * Enable the receive queues.
3359 + */
3360 + for (i = 0; i < 8; i++) {
3361 + struct rx_queue *rxq = mp->rxq + i;
3362 + int off = RXQ_CURRENT_DESC_PTR(mp->port_num, i);
3363 + u32 addr;
3364
3365 - return 0;
3366 + if ((mp->rxq_mask & (1 << i)) == 0)
3367 + continue;
3368
3369 -out:
3370 - free_netdev(dev);
3371 + addr = (u32)rxq->rx_desc_dma;
3372 + addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
3373 + wrl(mp, off, addr);
3374
3375 - return err;
3376 + rxq_enable(rxq);
3377 + }
3378 }
3379
3380 -static int mv643xx_eth_remove(struct platform_device *pdev)
3381 +static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
3382 {
3383 - struct net_device *dev = platform_get_drvdata(pdev);
3384 + unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
3385 + u32 val;
3386
3387 - unregister_netdev(dev);
3388 - flush_scheduled_work();
3389 + val = rdl(mp, SDMA_CONFIG(mp->port_num));
3390 + if (mp->shared->extended_rx_coal_limit) {
3391 + if (coal > 0xffff)
3392 + coal = 0xffff;
3393 + val &= ~0x023fff80;
3394 + val |= (coal & 0x8000) << 10;
3395 + val |= (coal & 0x7fff) << 7;
3396 + } else {
3397 + if (coal > 0x3fff)
3398 + coal = 0x3fff;
3399 + val &= ~0x003fff00;
3400 + val |= (coal & 0x3fff) << 8;
3401 + }
3402 + wrl(mp, SDMA_CONFIG(mp->port_num), val);
3403 +}
3404
3405 - free_netdev(dev);
3406 - platform_set_drvdata(pdev, NULL);
3407 - return 0;
3408 +static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
3409 +{
3410 + unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
3411 +
3412 + if (coal > 0x3fff)
3413 + coal = 0x3fff;
3414 + wrl(mp, TX_FIFO_URGENT_THRESHOLD(mp->port_num), (coal & 0x3fff) << 4);
3415 }
3416
3417 -static void mv643xx_eth_conf_mbus_windows(struct mv643xx_shared_private *msp,
3418 - struct mbus_dram_target_info *dram)
3419 +static int mv643xx_eth_open(struct net_device *dev)
3420 {
3421 - void __iomem *base = msp->eth_base;
3422 - u32 win_enable;
3423 - u32 win_protect;
3424 + struct mv643xx_eth_private *mp = netdev_priv(dev);
3425 + int err;
3426 int i;
3427
3428 - for (i = 0; i < 6; i++) {
3429 - writel(0, base + WINDOW_BASE(i));
3430 - writel(0, base + WINDOW_SIZE(i));
3431 - if (i < 4)
3432 - writel(0, base + WINDOW_REMAP_HIGH(i));
3433 + wrl(mp, INT_CAUSE(mp->port_num), 0);
3434 + wrl(mp, INT_CAUSE_EXT(mp->port_num), 0);
3435 + rdl(mp, INT_CAUSE_EXT(mp->port_num));
3436 +
3437 + err = request_irq(dev->irq, mv643xx_eth_irq,
3438 + IRQF_SHARED | IRQF_SAMPLE_RANDOM,
3439 + dev->name, dev);
3440 + if (err) {
3441 + dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");
3442 + return -EAGAIN;
3443 }
3444
3445 - win_enable = 0x3f;
3446 - win_protect = 0;
3447 -
3448 - for (i = 0; i < dram->num_cs; i++) {
3449 - struct mbus_dram_window *cs = dram->cs + i;
3450 + init_mac_tables(mp);
3451
3452 - writel((cs->base & 0xffff0000) |
3453 - (cs->mbus_attr << 8) |
3454 - dram->mbus_dram_target_id, base + WINDOW_BASE(i));
3455 - writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
3456 + for (i = 0; i < 8; i++) {
3457 + if ((mp->rxq_mask & (1 << i)) == 0)
3458 + continue;
3459 +
3460 + err = rxq_init(mp, i);
3461 + if (err) {
3462 + while (--i >= 0)
3463 + if (mp->rxq_mask & (1 << i))
3464 + rxq_deinit(mp->rxq + i);
3465 + goto out;
3466 + }
3467
3468 - win_enable &= ~(1 << i);
3469 - win_protect |= 3 << (2 * i);
3470 + rxq_refill(mp->rxq + i);
3471 }
3472
3473 - writel(win_enable, base + WINDOW_BAR_ENABLE);
3474 - msp->win_protect = win_protect;
3475 -}
3476 -
3477 -static int mv643xx_eth_shared_probe(struct platform_device *pdev)
3478 -{
3479 - static int mv643xx_version_printed = 0;
3480 - struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
3481 - struct mv643xx_shared_private *msp;
3482 - struct resource *res;
3483 - int ret;
3484 -
3485 - if (!mv643xx_version_printed++)
3486 - printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n");
3487 -
3488 - ret = -EINVAL;
3489 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3490 - if (res == NULL)
3491 - goto out;
3492 + for (i = 0; i < 8; i++) {
3493 + if ((mp->txq_mask & (1 << i)) == 0)
3494 + continue;
3495 +
3496 + err = txq_init(mp, i);
3497 + if (err) {
3498 + while (--i >= 0)
3499 + if (mp->txq_mask & (1 << i))
3500 + txq_deinit(mp->txq + i);
3501 + goto out_free;
3502 + }
3503 + }
3504
3505 - ret = -ENOMEM;
3506 - msp = kmalloc(sizeof(*msp), GFP_KERNEL);
3507 - if (msp == NULL)
3508 - goto out;
3509 - memset(msp, 0, sizeof(*msp));
3510 +#ifdef MV643XX_ETH_NAPI
3511 + napi_enable(&mp->napi);
3512 +#endif
3513
3514 - msp->eth_base = ioremap(res->start, res->end - res->start + 1);
3515 - if (msp->eth_base == NULL)
3516 - goto out_free;
3517 + port_start(mp);
3518
3519 - spin_lock_init(&msp->phy_lock);
3520 - msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;
3521 + set_rx_coal(mp, 0);
3522 + set_tx_coal(mp, 0);
3523
3524 - platform_set_drvdata(pdev, msp);
3525 + wrl(mp, INT_MASK_EXT(mp->port_num),
3526 + INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX);
3527
3528 - /*
3529 - * (Re-)program MBUS remapping windows if we are asked to.
3530 - */
3531 - if (pd != NULL && pd->dram != NULL)
3532 - mv643xx_eth_conf_mbus_windows(msp, pd->dram);
3533 + wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT);
3534
3535 return 0;
3536
3537 +
3538 out_free:
3539 - kfree(msp);
3540 + for (i = 0; i < 8; i++)
3541 + if (mp->rxq_mask & (1 << i))
3542 + rxq_deinit(mp->rxq + i);
3543 out:
3544 - return ret;
3545 + free_irq(dev->irq, dev);
3546 +
3547 + return err;
3548 }
3549
3550 -static int mv643xx_eth_shared_remove(struct platform_device *pdev)
3551 +static void port_reset(struct mv643xx_eth_private *mp)
3552 {
3553 - struct mv643xx_shared_private *msp = platform_get_drvdata(pdev);
3554 + unsigned int data;
3555 + int i;
3556
3557 - iounmap(msp->eth_base);
3558 - kfree(msp);
3559 + for (i = 0; i < 8; i++) {
3560 + if (mp->rxq_mask & (1 << i))
3561 + rxq_disable(mp->rxq + i);
3562 + if (mp->txq_mask & (1 << i))
3563 + txq_disable(mp->txq + i);
3564 + }
3565 + while (!(rdl(mp, PORT_STATUS(mp->port_num)) & TX_FIFO_EMPTY))
3566 + udelay(10);
3567
3568 - return 0;
3569 + /* Reset the Enable bit in the Configuration Register */
3570 + data = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
3571 + data &= ~(SERIAL_PORT_ENABLE |
3572 + DO_NOT_FORCE_LINK_FAIL |
3573 + FORCE_LINK_PASS);
3574 + wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), data);
3575 }
3576
3577 -static void mv643xx_eth_shutdown(struct platform_device *pdev)
3578 +static int mv643xx_eth_stop(struct net_device *dev)
3579 {
3580 - struct net_device *dev = platform_get_drvdata(pdev);
3581 - struct mv643xx_private *mp = netdev_priv(dev);
3582 - unsigned int port_num = mp->port_num;
3583 -
3584 - /* Mask all interrupts on ethernet port */
3585 - wrl(mp, INTERRUPT_MASK_REG(port_num), 0);
3586 - rdl(mp, INTERRUPT_MASK_REG(port_num));
3587 + struct mv643xx_eth_private *mp = netdev_priv(dev);
3588 + int i;
3589
3590 - eth_port_reset(mp);
3591 -}
3592 + wrl(mp, INT_MASK(mp->port_num), 0x00000000);
3593 + rdl(mp, INT_MASK(mp->port_num));
3594
3595 -static struct platform_driver mv643xx_eth_driver = {
3596 - .probe = mv643xx_eth_probe,
3597 - .remove = mv643xx_eth_remove,
3598 - .shutdown = mv643xx_eth_shutdown,
3599 - .driver = {
3600 - .name = MV643XX_ETH_NAME,
3601 - .owner = THIS_MODULE,
3602 - },
3603 -};
3604 +#ifdef MV643XX_ETH_NAPI
3605 + napi_disable(&mp->napi);
3606 +#endif
3607 + netif_carrier_off(dev);
3608 + netif_stop_queue(dev);
3609
3610 -static struct platform_driver mv643xx_eth_shared_driver = {
3611 - .probe = mv643xx_eth_shared_probe,
3612 - .remove = mv643xx_eth_shared_remove,
3613 - .driver = {
3614 - .name = MV643XX_ETH_SHARED_NAME,
3615 - .owner = THIS_MODULE,
3616 - },
3617 -};
3618 + free_irq(dev->irq, dev);
3619
3620 -/*
3621 - * mv643xx_init_module
3622 - *
3623 - * Registers the network drivers into the Linux kernel
3624 - *
3625 - * Input : N/A
3626 - *
3627 - * Output : N/A
3628 - */
3629 -static int __init mv643xx_init_module(void)
3630 -{
3631 - int rc;
3632 + port_reset(mp);
3633 + mib_counters_update(mp);
3634
3635 - rc = platform_driver_register(&mv643xx_eth_shared_driver);
3636 - if (!rc) {
3637 - rc = platform_driver_register(&mv643xx_eth_driver);
3638 - if (rc)
3639 - platform_driver_unregister(&mv643xx_eth_shared_driver);
3640 + for (i = 0; i < 8; i++) {
3641 + if (mp->rxq_mask & (1 << i))
3642 + rxq_deinit(mp->rxq + i);
3643 + if (mp->txq_mask & (1 << i))
3644 + txq_deinit(mp->txq + i);
3645 }
3646 - return rc;
3647 -}
3648
3649 -/*
3650 - * mv643xx_cleanup_module
3651 - *
3652 - * Registers the network drivers into the Linux kernel
3653 - *
3654 - * Input : N/A
3655 - *
3656 - * Output : N/A
3657 - */
3658 -static void __exit mv643xx_cleanup_module(void)
3659 -{
3660 - platform_driver_unregister(&mv643xx_eth_driver);
3661 - platform_driver_unregister(&mv643xx_eth_shared_driver);
3662 + return 0;
3663 }
3664
3665 -module_init(mv643xx_init_module);
3666 -module_exit(mv643xx_cleanup_module);
3667 -
3668 -MODULE_LICENSE("GPL");
3669 -MODULE_AUTHOR( "Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, Manish Lachwani"
3670 - " and Dale Farnsworth");
3671 -MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
3672 -MODULE_ALIAS("platform:" MV643XX_ETH_NAME);
3673 -MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME);
3674 -
3675 -/*
3676 - * The second part is the low level driver of the gigE ethernet ports.
3677 - */
3678 -
3679 -/*
3680 - * Marvell's Gigabit Ethernet controller low level driver
3681 - *
3682 - * DESCRIPTION:
3683 - * This file introduce low level API to Marvell's Gigabit Ethernet
3684 - * controller. This Gigabit Ethernet Controller driver API controls
3685 - * 1) Operations (i.e. port init, start, reset etc').
3686 - * 2) Data flow (i.e. port send, receive etc').
3687 - * Each Gigabit Ethernet port is controlled via
3688 - * struct mv643xx_private.
3689 - * This struct includes user configuration information as well as
3690 - * driver internal data needed for its operations.
3691 - *
3692 - * Supported Features:
3693 - * - This low level driver is OS independent. Allocating memory for
3694 - * the descriptor rings and buffers are not within the scope of
3695 - * this driver.
3696 - * - The user is free from Rx/Tx queue managing.
3697 - * - This low level driver introduce functionality API that enable
3698 - * the to operate Marvell's Gigabit Ethernet Controller in a
3699 - * convenient way.
3700 - * - Simple Gigabit Ethernet port operation API.
3701 - * - Simple Gigabit Ethernet port data flow API.
3702 - * - Data flow and operation API support per queue functionality.
3703 - * - Support cached descriptors for better performance.
3704 - * - Enable access to all four DRAM banks and internal SRAM memory
3705 - * spaces.
3706 - * - PHY access and control API.
3707 - * - Port control register configuration API.
3708 - * - Full control over Unicast and Multicast MAC configurations.
3709 - *
3710 - * Operation flow:
3711 - *
3712 - * Initialization phase
3713 - * This phase complete the initialization of the the
3714 - * mv643xx_private struct.
3715 - * User information regarding port configuration has to be set
3716 - * prior to calling the port initialization routine.
3717 - *
3718 - * In this phase any port Tx/Rx activity is halted, MIB counters
3719 - * are cleared, PHY address is set according to user parameter and
3720 - * access to DRAM and internal SRAM memory spaces.
3721 - *
3722 - * Driver ring initialization
3723 - * Allocating memory for the descriptor rings and buffers is not
3724 - * within the scope of this driver. Thus, the user is required to
3725 - * allocate memory for the descriptors ring and buffers. Those
3726 - * memory parameters are used by the Rx and Tx ring initialization
3727 - * routines in order to curve the descriptor linked list in a form
3728 - * of a ring.
3729 - * Note: Pay special attention to alignment issues when using
3730 - * cached descriptors/buffers. In this phase the driver store
3731 - * information in the mv643xx_private struct regarding each queue
3732 - * ring.
3733 - *
3734 - * Driver start
3735 - * This phase prepares the Ethernet port for Rx and Tx activity.
3736 - * It uses the information stored in the mv643xx_private struct to
3737 - * initialize the various port registers.
3738 - *
3739 - * Data flow:
3740 - * All packet references to/from the driver are done using
3741 - * struct pkt_info.
3742 - * This struct is a unified struct used with Rx and Tx operations.
3743 - * This way the user is not required to be familiar with neither
3744 - * Tx nor Rx descriptors structures.
3745 - * The driver's descriptors rings are management by indexes.
3746 - * Those indexes controls the ring resources and used to indicate
3747 - * a SW resource error:
3748 - * 'current'
3749 - * This index points to the current available resource for use. For
3750 - * example in Rx process this index will point to the descriptor
3751 - * that will be passed to the user upon calling the receive
3752 - * routine. In Tx process, this index will point to the descriptor
3753 - * that will be assigned with the user packet info and transmitted.
3754 - * 'used'
3755 - * This index points to the descriptor that need to restore its
3756 - * resources. For example in Rx process, using the Rx buffer return
3757 - * API will attach the buffer returned in packet info to the
3758 - * descriptor pointed by 'used'. In Tx process, using the Tx
3759 - * descriptor return will merely return the user packet info with
3760 - * the command status of the transmitted buffer pointed by the
3761 - * 'used' index. Nevertheless, it is essential to use this routine
3762 - * to update the 'used' index.
3763 - * 'first'
3764 - * This index supports Tx Scatter-Gather. It points to the first
3765 - * descriptor of a packet assembled of multiple buffers. For
3766 - * example when in middle of Such packet we have a Tx resource
3767 - * error the 'curr' index get the value of 'first' to indicate
3768 - * that the ring returned to its state before trying to transmit
3769 - * this packet.
3770 - *
3771 - * Receive operation:
3772 - * The eth_port_receive API set the packet information struct,
3773 - * passed by the caller, with received information from the
3774 - * 'current' SDMA descriptor.
3775 - * It is the user responsibility to return this resource back
3776 - * to the Rx descriptor ring to enable the reuse of this source.
3777 - * Return Rx resource is done using the eth_rx_return_buff API.
3778 - *
3779 - * Prior to calling the initialization routine eth_port_init() the user
3780 - * must set the following fields under mv643xx_private struct:
3781 - * port_num User Ethernet port number.
3782 - * port_config User port configuration value.
3783 - * port_config_extend User port config extend value.
3784 - * port_sdma_config User port SDMA config value.
3785 - * port_serial_control User port serial control value.
3786 - *
3787 - * This driver data flow is done using the struct pkt_info which
3788 - * is a unified struct for Rx and Tx operations:
3789 - *
3790 - * byte_cnt Tx/Rx descriptor buffer byte count.
3791 - * l4i_chk CPU provided TCP Checksum. For Tx operation
3792 - * only.
3793 - * cmd_sts Tx/Rx descriptor command status.
3794 - * buf_ptr Tx/Rx descriptor buffer pointer.
3795 - * return_info Tx/Rx user resource return information.
3796 - */
3797 -
3798 -/* Ethernet Port routines */
3799 -static void eth_port_set_filter_table_entry(struct mv643xx_private *mp,
3800 - int table, unsigned char entry);
3801 -
3802 -/*
3803 - * eth_port_init - Initialize the Ethernet port driver
3804 - *
3805 - * DESCRIPTION:
3806 - * This function prepares the ethernet port to start its activity:
3807 - * 1) Completes the ethernet port driver struct initialization toward port
3808 - * start routine.
3809 - * 2) Resets the device to a quiescent state in case of warm reboot.
3810 - * 3) Enable SDMA access to all four DRAM banks as well as internal SRAM.
3811 - * 4) Clean MAC tables. The reset status of those tables is unknown.
3812 - * 5) Set PHY address.
3813 - * Note: Call this routine prior to eth_port_start routine and after
3814 - * setting user values in the user fields of Ethernet port control
3815 - * struct.
3816 - *
3817 - * INPUT:
3818 - * struct mv643xx_private *mp Ethernet port control struct
3819 - *
3820 - * OUTPUT:
3821 - * See description.
3822 - *
3823 - * RETURN:
3824 - * None.
3825 - */
3826 -static void eth_port_init(struct mv643xx_private *mp)
3827 +static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3828 {
3829 - mp->rx_resource_err = 0;
3830 + struct mv643xx_eth_private *mp = netdev_priv(dev);
3831
3832 - eth_port_reset(mp);
3833 + if (mp->phy_addr != -1)
3834 + return generic_mii_ioctl(&mp->mii, if_mii(ifr), cmd, NULL);
3835
3836 - eth_port_init_mac_tables(mp);
3837 + return -EOPNOTSUPP;
3838 }
3839
3840 -/*
3841 - * eth_port_start - Start the Ethernet port activity.
3842 - *
3843 - * DESCRIPTION:
3844 - * This routine prepares the Ethernet port for Rx and Tx activity:
3845 - * 1. Initialize Tx and Rx Current Descriptor Pointer for each queue that
3846 - * has been initialized a descriptor's ring (using
3847 - * ether_init_tx_desc_ring for Tx and ether_init_rx_desc_ring for Rx)
3848 - * 2. Initialize and enable the Ethernet configuration port by writing to
3849 - * the port's configuration and command registers.
3850 - * 3. Initialize and enable the SDMA by writing to the SDMA's
3851 - * configuration and command registers. After completing these steps,
3852 - * the ethernet port SDMA can starts to perform Rx and Tx activities.
3853 - *
3854 - * Note: Each Rx and Tx queue descriptor's list must be initialized prior
3855 - * to calling this function (use ether_init_tx_desc_ring for Tx queues
3856 - * and ether_init_rx_desc_ring for Rx queues).
3857 - *
3858 - * INPUT:
3859 - * dev - a pointer to the required interface
3860 - *
3861 - * OUTPUT:
3862 - * Ethernet port is ready to receive and transmit.
3863 - *
3864 - * RETURN:
3865 - * None.
3866 - */
3867 -static void eth_port_start(struct net_device *dev)
3868 +static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
3869 {
3870 - struct mv643xx_private *mp = netdev_priv(dev);
3871 - unsigned int port_num = mp->port_num;
3872 - int tx_curr_desc, rx_curr_desc;
3873 - u32 pscr;
3874 - struct ethtool_cmd ethtool_cmd;
3875 -
3876 - /* Assignment of Tx CTRP of given queue */
3877 - tx_curr_desc = mp->tx_curr_desc_q;
3878 - wrl(mp, TX_CURRENT_QUEUE_DESC_PTR_0(port_num),
3879 - (u32)((struct eth_tx_desc *)mp->tx_desc_dma + tx_curr_desc));
3880 + struct mv643xx_eth_private *mp = netdev_priv(dev);
3881
3882 - /* Assignment of Rx CRDP of given queue */
3883 - rx_curr_desc = mp->rx_curr_desc_q;
3884 - wrl(mp, RX_CURRENT_QUEUE_DESC_PTR_0(port_num),
3885 - (u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc));
3886 -
3887 - /* Add the assigned Ethernet address to the port's address table */
3888 - eth_port_uc_addr_set(mp, dev->dev_addr);
3889 + if (new_mtu < 64 || new_mtu > 9500)
3890 + return -EINVAL;
3891
3892 - /* Assign port configuration and command. */
3893 - wrl(mp, PORT_CONFIG_REG(port_num),
3894 - PORT_CONFIG_DEFAULT_VALUE);
3895 + dev->mtu = new_mtu;
3896 + tx_set_rate(mp, 1000000000, 16777216);
3897
3898 - wrl(mp, PORT_CONFIG_EXTEND_REG(port_num),
3899 - PORT_CONFIG_EXTEND_DEFAULT_VALUE);
3900 + if (!netif_running(dev))
3901 + return 0;
3902
3903 - pscr = rdl(mp, PORT_SERIAL_CONTROL_REG(port_num));
3904 + /*
3905 + * Stop and then re-open the interface. This will allocate RX
3906 + * skbs of the new MTU.
3907 + * There is a possible danger that the open will not succeed,
3908 + * due to memory being full.
3909 + */
3910 + mv643xx_eth_stop(dev);
3911 + if (mv643xx_eth_open(dev)) {
3912 + dev_printk(KERN_ERR, &dev->dev,
3913 + "fatal error on re-opening device after "
3914 + "MTU change\n");
3915 + }
3916
3917 - pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS);
3918 - wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), pscr);
3919 + return 0;
3920 +}
3921
3922 - pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
3923 - DISABLE_AUTO_NEG_SPEED_GMII |
3924 - DISABLE_AUTO_NEG_FOR_DUPLX |
3925 - DO_NOT_FORCE_LINK_FAIL |
3926 - SERIAL_PORT_CONTROL_RESERVED;
3927 +static void tx_timeout_task(struct work_struct *ugly)
3928 +{
3929 + struct mv643xx_eth_private *mp;
3930
3931 - wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), pscr);
3932 + mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
3933 + if (netif_running(mp->dev)) {
3934 + netif_stop_queue(mp->dev);
3935
3936 - pscr |= SERIAL_PORT_ENABLE;
3937 - wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), pscr);
3938 + port_reset(mp);
3939 + port_start(mp);
3940
3941 - /* Assign port SDMA configuration */
3942 - wrl(mp, SDMA_CONFIG_REG(port_num),
3943 - PORT_SDMA_CONFIG_DEFAULT_VALUE);
3944 -
3945 - /* Enable port Rx. */
3946 - mv643xx_eth_port_enable_rx(mp, ETH_RX_QUEUES_ENABLED);
3947 -
3948 - /* Disable port bandwidth limits by clearing MTU register */
3949 - wrl(mp, MAXIMUM_TRANSMIT_UNIT(port_num), 0);
3950 -
3951 - /* save phy settings across reset */
3952 - mv643xx_get_settings(dev, &ethtool_cmd);
3953 - ethernet_phy_reset(mp);
3954 - mv643xx_set_settings(dev, &ethtool_cmd);
3955 + __txq_maybe_wake(mp->txq + mp->txq_primary);
3956 + }
3957 }
3958
3959 -/*
3960 - * eth_port_uc_addr_set - Write a MAC address into the port's hw registers
3961 - */
3962 -static void eth_port_uc_addr_set(struct mv643xx_private *mp,
3963 - unsigned char *p_addr)
3964 +static void mv643xx_eth_tx_timeout(struct net_device *dev)
3965 {
3966 - unsigned int port_num = mp->port_num;
3967 - unsigned int mac_h;
3968 - unsigned int mac_l;
3969 - int table;
3970 + struct mv643xx_eth_private *mp = netdev_priv(dev);
3971
3972 - mac_l = (p_addr[4] << 8) | (p_addr[5]);
3973 - mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) |
3974 - (p_addr[3] << 0);
3975 -
3976 - wrl(mp, MAC_ADDR_LOW(port_num), mac_l);
3977 - wrl(mp, MAC_ADDR_HIGH(port_num), mac_h);
3978 -
3979 - /* Accept frames with this address */
3980 - table = DA_FILTER_UNICAST_TABLE_BASE(port_num);
3981 - eth_port_set_filter_table_entry(mp, table, p_addr[5] & 0x0f);
3982 + dev_printk(KERN_INFO, &dev->dev, "tx timeout\n");
3983 +
3984 + schedule_work(&mp->tx_timeout_task);
3985 }
3986
3987 -/*
3988 - * eth_port_uc_addr_get - Read the MAC address from the port's hw registers
3989 - */
3990 -static void eth_port_uc_addr_get(struct mv643xx_private *mp,
3991 - unsigned char *p_addr)
3992 +#ifdef CONFIG_NET_POLL_CONTROLLER
3993 +static void mv643xx_eth_netpoll(struct net_device *dev)
3994 {
3995 - unsigned int port_num = mp->port_num;
3996 - unsigned int mac_h;
3997 - unsigned int mac_l;
3998 + struct mv643xx_eth_private *mp = netdev_priv(dev);
3999 +
4000 + wrl(mp, INT_MASK(mp->port_num), 0x00000000);
4001 + rdl(mp, INT_MASK(mp->port_num));
4002
4003 - mac_h = rdl(mp, MAC_ADDR_HIGH(port_num));
4004 - mac_l = rdl(mp, MAC_ADDR_LOW(port_num));
4005 + mv643xx_eth_irq(dev->irq, dev);
4006
4007 - p_addr[0] = (mac_h >> 24) & 0xff;
4008 - p_addr[1] = (mac_h >> 16) & 0xff;
4009 - p_addr[2] = (mac_h >> 8) & 0xff;
4010 - p_addr[3] = mac_h & 0xff;
4011 - p_addr[4] = (mac_l >> 8) & 0xff;
4012 - p_addr[5] = mac_l & 0xff;
4013 + wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_CAUSE_EXT);
4014 }
4015 +#endif
4016
4017 -/*
4018 - * The entries in each table are indexed by a hash of a packet's MAC
4019 - * address. One bit in each entry determines whether the packet is
4020 - * accepted. There are 4 entries (each 8 bits wide) in each register
4021 - * of the table. The bits in each entry are defined as follows:
4022 - * 0 Accept=1, Drop=0
4023 - * 3-1 Queue (ETH_Q0=0)
4024 - * 7-4 Reserved = 0;
4025 - */
4026 -static void eth_port_set_filter_table_entry(struct mv643xx_private *mp,
4027 - int table, unsigned char entry)
4028 +static int mv643xx_eth_mdio_read(struct net_device *dev, int addr, int reg)
4029 {
4030 - unsigned int table_reg;
4031 - unsigned int tbl_offset;
4032 - unsigned int reg_offset;
4033 + struct mv643xx_eth_private *mp = netdev_priv(dev);
4034 + int val;
4035
4036 - tbl_offset = (entry / 4) * 4; /* Register offset of DA table entry */
4037 - reg_offset = entry % 4; /* Entry offset within the register */
4038 + smi_reg_read(mp, addr, reg, &val);
4039
4040 - /* Set "accepts frame bit" at specified table entry */
4041 - table_reg = rdl(mp, table + tbl_offset);
4042 - table_reg |= 0x01 << (8 * reg_offset);
4043 - wrl(mp, table + tbl_offset, table_reg);
4044 + return val;
4045 }
4046
4047 -/*
4048 - * eth_port_mc_addr - Multicast address settings.
4049 - *
4050 - * The MV device supports multicast using two tables:
4051 - * 1) Special Multicast Table for MAC addresses of the form
4052 - * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0x_FF).
4053 - * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
4054 - * Table entries in the DA-Filter table.
4055 - * 2) Other Multicast Table for multicast of another type. A CRC-8bit
4056 - * is used as an index to the Other Multicast Table entries in the
4057 - * DA-Filter table. This function calculates the CRC-8bit value.
4058 - * In either case, eth_port_set_filter_table_entry() is then called
4059 - * to set to set the actual table entry.
4060 - */
4061 -static void eth_port_mc_addr(struct mv643xx_private *mp, unsigned char *p_addr)
4062 +static void mv643xx_eth_mdio_write(struct net_device *dev, int addr, int reg, int val)
4063 {
4064 - unsigned int port_num = mp->port_num;
4065 - unsigned int mac_h;
4066 - unsigned int mac_l;
4067 - unsigned char crc_result = 0;
4068 - int table;
4069 - int mac_array[48];
4070 - int crc[8];
4071 - int i;
4072 + struct mv643xx_eth_private *mp = netdev_priv(dev);
4073 + smi_reg_write(mp, addr, reg, val);
4074 +}
4075
4076 - if ((p_addr[0] == 0x01) && (p_addr[1] == 0x00) &&
4077 - (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00)) {
4078 - table = DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(port_num);
4079 - eth_port_set_filter_table_entry(mp, table, p_addr[5]);
4080 - return;
4081 - }
4082
4083 - /* Calculate CRC-8 out of the given address */
4084 - mac_h = (p_addr[0] << 8) | (p_addr[1]);
4085 - mac_l = (p_addr[2] << 24) | (p_addr[3] << 16) |
4086 - (p_addr[4] << 8) | (p_addr[5] << 0);
4087 -
4088 - for (i = 0; i < 32; i++)
4089 - mac_array[i] = (mac_l >> i) & 0x1;
4090 - for (i = 32; i < 48; i++)
4091 - mac_array[i] = (mac_h >> (i - 32)) & 0x1;
4092 -
4093 - crc[0] = mac_array[45] ^ mac_array[43] ^ mac_array[40] ^ mac_array[39] ^
4094 - mac_array[35] ^ mac_array[34] ^ mac_array[31] ^ mac_array[30] ^
4095 - mac_array[28] ^ mac_array[23] ^ mac_array[21] ^ mac_array[19] ^
4096 - mac_array[18] ^ mac_array[16] ^ mac_array[14] ^ mac_array[12] ^
4097 - mac_array[8] ^ mac_array[7] ^ mac_array[6] ^ mac_array[0];
4098 -
4099 - crc[1] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^ mac_array[43] ^
4100 - mac_array[41] ^ mac_array[39] ^ mac_array[36] ^ mac_array[34] ^
4101 - mac_array[32] ^ mac_array[30] ^ mac_array[29] ^ mac_array[28] ^
4102 - mac_array[24] ^ mac_array[23] ^ mac_array[22] ^ mac_array[21] ^
4103 - mac_array[20] ^ mac_array[18] ^ mac_array[17] ^ mac_array[16] ^
4104 - mac_array[15] ^ mac_array[14] ^ mac_array[13] ^ mac_array[12] ^
4105 - mac_array[9] ^ mac_array[6] ^ mac_array[1] ^ mac_array[0];
4106 -
4107 - crc[2] = mac_array[47] ^ mac_array[46] ^ mac_array[44] ^ mac_array[43] ^
4108 - mac_array[42] ^ mac_array[39] ^ mac_array[37] ^ mac_array[34] ^
4109 - mac_array[33] ^ mac_array[29] ^ mac_array[28] ^ mac_array[25] ^
4110 - mac_array[24] ^ mac_array[22] ^ mac_array[17] ^ mac_array[15] ^
4111 - mac_array[13] ^ mac_array[12] ^ mac_array[10] ^ mac_array[8] ^
4112 - mac_array[6] ^ mac_array[2] ^ mac_array[1] ^ mac_array[0];
4113 -
4114 - crc[3] = mac_array[47] ^ mac_array[45] ^ mac_array[44] ^ mac_array[43] ^
4115 - mac_array[40] ^ mac_array[38] ^ mac_array[35] ^ mac_array[34] ^
4116 - mac_array[30] ^ mac_array[29] ^ mac_array[26] ^ mac_array[25] ^
4117 - mac_array[23] ^ mac_array[18] ^ mac_array[16] ^ mac_array[14] ^
4118 - mac_array[13] ^ mac_array[11] ^ mac_array[9] ^ mac_array[7] ^
4119 - mac_array[3] ^ mac_array[2] ^ mac_array[1];
4120 -
4121 - crc[4] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^ mac_array[41] ^
4122 - mac_array[39] ^ mac_array[36] ^ mac_array[35] ^ mac_array[31] ^
4123 - mac_array[30] ^ mac_array[27] ^ mac_array[26] ^ mac_array[24] ^
4124 - mac_array[19] ^ mac_array[17] ^ mac_array[15] ^ mac_array[14] ^
4125 - mac_array[12] ^ mac_array[10] ^ mac_array[8] ^ mac_array[4] ^
4126 - mac_array[3] ^ mac_array[2];
4127 -
4128 - crc[5] = mac_array[47] ^ mac_array[46] ^ mac_array[45] ^ mac_array[42] ^
4129 - mac_array[40] ^ mac_array[37] ^ mac_array[36] ^ mac_array[32] ^
4130 - mac_array[31] ^ mac_array[28] ^ mac_array[27] ^ mac_array[25] ^
4131 - mac_array[20] ^ mac_array[18] ^ mac_array[16] ^ mac_array[15] ^
4132 - mac_array[13] ^ mac_array[11] ^ mac_array[9] ^ mac_array[5] ^
4133 - mac_array[4] ^ mac_array[3];
4134 -
4135 - crc[6] = mac_array[47] ^ mac_array[46] ^ mac_array[43] ^ mac_array[41] ^
4136 - mac_array[38] ^ mac_array[37] ^ mac_array[33] ^ mac_array[32] ^
4137 - mac_array[29] ^ mac_array[28] ^ mac_array[26] ^ mac_array[21] ^
4138 - mac_array[19] ^ mac_array[17] ^ mac_array[16] ^ mac_array[14] ^
4139 - mac_array[12] ^ mac_array[10] ^ mac_array[6] ^ mac_array[5] ^
4140 - mac_array[4];
4141 -
4142 - crc[7] = mac_array[47] ^ mac_array[44] ^ mac_array[42] ^ mac_array[39] ^
4143 - mac_array[38] ^ mac_array[34] ^ mac_array[33] ^ mac_array[30] ^
4144 - mac_array[29] ^ mac_array[27] ^ mac_array[22] ^ mac_array[20] ^
4145 - mac_array[18] ^ mac_array[17] ^ mac_array[15] ^ mac_array[13] ^
4146 - mac_array[11] ^ mac_array[7] ^ mac_array[6] ^ mac_array[5];
4147 +/* platform glue ************************************************************/
4148 +static void
4149 +mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
4150 + struct mbus_dram_target_info *dram)
4151 +{
4152 + void __iomem *base = msp->base;
4153 + u32 win_enable;
4154 + u32 win_protect;
4155 + int i;
4156
4157 - for (i = 0; i < 8; i++)
4158 - crc_result = crc_result | (crc[i] << i);
4159 + for (i = 0; i < 6; i++) {
4160 + writel(0, base + WINDOW_BASE(i));
4161 + writel(0, base + WINDOW_SIZE(i));
4162 + if (i < 4)
4163 + writel(0, base + WINDOW_REMAP_HIGH(i));
4164 + }
4165
4166 - table = DA_FILTER_OTHER_MULTICAST_TABLE_BASE(port_num);
4167 - eth_port_set_filter_table_entry(mp, table, crc_result);
4168 -}
4169 + win_enable = 0x3f;
4170 + win_protect = 0;
4171
4172 -/*
4173 - * Set the entire multicast list based on dev->mc_list.
4174 - */
4175 -static void eth_port_set_multicast_list(struct net_device *dev)
4176 -{
4177 + for (i = 0; i < dram->num_cs; i++) {
4178 + struct mbus_dram_window *cs = dram->cs + i;
4179
4180 - struct dev_mc_list *mc_list;
4181 - int i;
4182 - int table_index;
4183 - struct mv643xx_private *mp = netdev_priv(dev);
4184 - unsigned int eth_port_num = mp->port_num;
4185 -
4186 - /* If the device is in promiscuous mode or in all multicast mode,
4187 - * we will fully populate both multicast tables with accept.
4188 - * This is guaranteed to yield a match on all multicast addresses...
4189 - */
4190 - if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI)) {
4191 - for (table_index = 0; table_index <= 0xFC; table_index += 4) {
4192 - /* Set all entries in DA filter special multicast
4193 - * table (Ex_dFSMT)
4194 - * Set for ETH_Q0 for now
4195 - * Bits
4196 - * 0 Accept=1, Drop=0
4197 - * 3-1 Queue ETH_Q0=0
4198 - * 7-4 Reserved = 0;
4199 - */
4200 - wrl(mp, DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
4201 + writel((cs->base & 0xffff0000) |
4202 + (cs->mbus_attr << 8) |
4203 + dram->mbus_dram_target_id, base + WINDOW_BASE(i));
4204 + writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
4205
4206 - /* Set all entries in DA filter other multicast
4207 - * table (Ex_dFOMT)
4208 - * Set for ETH_Q0 for now
4209 - * Bits
4210 - * 0 Accept=1, Drop=0
4211 - * 3-1 Queue ETH_Q0=0
4212 - * 7-4 Reserved = 0;
4213 - */
4214 - wrl(mp, DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
4215 - }
4216 - return;
4217 + win_enable &= ~(1 << i);
4218 + win_protect |= 3 << (2 * i);
4219 }
4220
4221 - /* We will clear out multicast tables every time we get the list.
4222 - * Then add the entire new list...
4223 - */
4224 - for (table_index = 0; table_index <= 0xFC; table_index += 4) {
4225 - /* Clear DA filter special multicast table (Ex_dFSMT) */
4226 - wrl(mp, DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
4227 - (eth_port_num) + table_index, 0);
4228 -
4229 - /* Clear DA filter other multicast table (Ex_dFOMT) */
4230 - wrl(mp, DA_FILTER_OTHER_MULTICAST_TABLE_BASE
4231 - (eth_port_num) + table_index, 0);
4232 - }
4233 -
4234 - /* Get pointer to net_device multicast list and add each one... */
4235 - for (i = 0, mc_list = dev->mc_list;
4236 - (i < 256) && (mc_list != NULL) && (i < dev->mc_count);
4237 - i++, mc_list = mc_list->next)
4238 - if (mc_list->dmi_addrlen == 6)
4239 - eth_port_mc_addr(mp, mc_list->dmi_addr);
4240 + writel(win_enable, base + WINDOW_BAR_ENABLE);
4241 + msp->win_protect = win_protect;
4242 }
4243
4244 -/*
4245 - * eth_port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
4246 - *
4247 - * DESCRIPTION:
4248 - * Go through all the DA filter tables (Unicast, Special Multicast &
4249 - * Other Multicast) and set each entry to 0.
4250 - *
4251 - * INPUT:
4252 - * struct mv643xx_private *mp Ethernet Port.
4253 - *
4254 - * OUTPUT:
4255 - * Multicast and Unicast packets are rejected.
4256 - *
4257 - * RETURN:
4258 - * None.
4259 - */
4260 -static void eth_port_init_mac_tables(struct mv643xx_private *mp)
4261 +static void infer_hw_params(struct mv643xx_eth_shared_private *msp)
4262 {
4263 - unsigned int port_num = mp->port_num;
4264 - int table_index;
4265 + /*
4266 + * Check whether we have a 14-bit coal limit field in bits
4267 + * [21:8], or a 16-bit coal limit in bits [25,21:7] of the
4268 + * SDMA config register.
4269 + */
4270 + writel(0x02000000, msp->base + SDMA_CONFIG(0));
4271 + if (readl(msp->base + SDMA_CONFIG(0)) & 0x02000000)
4272 + msp->extended_rx_coal_limit = 1;
4273 + else
4274 + msp->extended_rx_coal_limit = 0;
4275
4276 - /* Clear DA filter unicast table (Ex_dFUT) */
4277 - for (table_index = 0; table_index <= 0xC; table_index += 4)
4278 - wrl(mp, DA_FILTER_UNICAST_TABLE_BASE(port_num) +
4279 - table_index, 0);
4280 -
4281 - for (table_index = 0; table_index <= 0xFC; table_index += 4) {
4282 - /* Clear DA filter special multicast table (Ex_dFSMT) */
4283 - wrl(mp, DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(port_num) +
4284 - table_index, 0);
4285 - /* Clear DA filter other multicast table (Ex_dFOMT) */
4286 - wrl(mp, DA_FILTER_OTHER_MULTICAST_TABLE_BASE(port_num) +
4287 - table_index, 0);
4288 - }
4289 + /*
4290 + * Check whether the TX rate control registers are in the
4291 + * old or the new place.
4292 + */
4293 + writel(1, msp->base + TX_BW_MTU_MOVED(0));
4294 + if (readl(msp->base + TX_BW_MTU_MOVED(0)) & 1)
4295 + msp->tx_bw_control_moved = 1;
4296 + else
4297 + msp->tx_bw_control_moved = 0;
4298 }
4299
4300 -/*
4301 - * eth_clear_mib_counters - Clear all MIB counters
4302 - *
4303 - * DESCRIPTION:
4304 - * This function clears all MIB counters of a specific ethernet port.
4305 - * A read from the MIB counter will reset the counter.
4306 - *
4307 - * INPUT:
4308 - * struct mv643xx_private *mp Ethernet Port.
4309 - *
4310 - * OUTPUT:
4311 - * After reading all MIB counters, the counters resets.
4312 - *
4313 - * RETURN:
4314 - * MIB counter value.
4315 - *
4316 - */
4317 -static void eth_clear_mib_counters(struct mv643xx_private *mp)
4318 +static int mv643xx_eth_shared_probe(struct platform_device *pdev)
4319 {
4320 - unsigned int port_num = mp->port_num;
4321 - int i;
4322 -
4323 - /* Perform dummy reads from MIB counters */
4324 - for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION;
4325 - i += 4)
4326 - rdl(mp, MIB_COUNTERS_BASE(port_num) + i);
4327 -}
4328 + static int mv643xx_eth_version_printed = 0;
4329 + struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
4330 + struct mv643xx_eth_shared_private *msp;
4331 + struct resource *res;
4332 + int ret;
4333
4334 -static inline u32 read_mib(struct mv643xx_private *mp, int offset)
4335 -{
4336 - return rdl(mp, MIB_COUNTERS_BASE(mp->port_num) + offset);
4337 -}
4338 + if (!mv643xx_eth_version_printed++)
4339 + printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n");
4340
4341 -static void eth_update_mib_counters(struct mv643xx_private *mp)
4342 -{
4343 - struct mv643xx_mib_counters *p = &mp->mib_counters;
4344 - int offset;
4345 + ret = -EINVAL;
4346 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4347 + if (res == NULL)
4348 + goto out;
4349
4350 - p->good_octets_received +=
4351 - read_mib(mp, ETH_MIB_GOOD_OCTETS_RECEIVED_LOW);
4352 - p->good_octets_received +=
4353 - (u64)read_mib(mp, ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH) << 32;
4354 + ret = -ENOMEM;
4355 + msp = kmalloc(sizeof(*msp), GFP_KERNEL);
4356 + if (msp == NULL)
4357 + goto out;
4358 + memset(msp, 0, sizeof(*msp));
4359
4360 - for (offset = ETH_MIB_BAD_OCTETS_RECEIVED;
4361 - offset <= ETH_MIB_FRAMES_1024_TO_MAX_OCTETS;
4362 - offset += 4)
4363 - *(u32 *)((char *)p + offset) += read_mib(mp, offset);
4364 + msp->base = ioremap(res->start, res->end - res->start + 1);
4365 + if (msp->base == NULL)
4366 + goto out_free;
4367
4368 - p->good_octets_sent += read_mib(mp, ETH_MIB_GOOD_OCTETS_SENT_LOW);
4369 - p->good_octets_sent +=
4370 - (u64)read_mib(mp, ETH_MIB_GOOD_OCTETS_SENT_HIGH) << 32;
4371 + spin_lock_init(&msp->phy_lock);
4372
4373 - for (offset = ETH_MIB_GOOD_FRAMES_SENT;
4374 - offset <= ETH_MIB_LATE_COLLISION;
4375 - offset += 4)
4376 - *(u32 *)((char *)p + offset) += read_mib(mp, offset);
4377 -}
4378 + /*
4379 + * (Re-)program MBUS remapping windows if we are asked to.
4380 + */
4381 + if (pd != NULL && pd->dram != NULL)
4382 + mv643xx_eth_conf_mbus_windows(msp, pd->dram);
4383
4384 -/*
4385 - * ethernet_phy_detect - Detect whether a phy is present
4386 - *
4387 - * DESCRIPTION:
4388 - * This function tests whether there is a PHY present on
4389 - * the specified port.
4390 - *
4391 - * INPUT:
4392 - * struct mv643xx_private *mp Ethernet Port.
4393 - *
4394 - * OUTPUT:
4395 - * None
4396 - *
4397 - * RETURN:
4398 - * 0 on success
4399 - * -ENODEV on failure
4400 - *
4401 - */
4402 -static int ethernet_phy_detect(struct mv643xx_private *mp)
4403 -{
4404 - unsigned int phy_reg_data0;
4405 - int auto_neg;
4406 + /*
4407 + * Detect hardware parameters.
4408 + */
4409 + msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;
4410 + infer_hw_params(msp);
4411
4412 - eth_port_read_smi_reg(mp, 0, &phy_reg_data0);
4413 - auto_neg = phy_reg_data0 & 0x1000;
4414 - phy_reg_data0 ^= 0x1000; /* invert auto_neg */
4415 - eth_port_write_smi_reg(mp, 0, phy_reg_data0);
4416 -
4417 - eth_port_read_smi_reg(mp, 0, &phy_reg_data0);
4418 - if ((phy_reg_data0 & 0x1000) == auto_neg)
4419 - return -ENODEV; /* change didn't take */
4420 + platform_set_drvdata(pdev, msp);
4421
4422 - phy_reg_data0 ^= 0x1000;
4423 - eth_port_write_smi_reg(mp, 0, phy_reg_data0);
4424 return 0;
4425 +
4426 +out_free:
4427 + kfree(msp);
4428 +out:
4429 + return ret;
4430 }
4431
4432 -/*
4433 - * ethernet_phy_get - Get the ethernet port PHY address.
4434 - *
4435 - * DESCRIPTION:
4436 - * This routine returns the given ethernet port PHY address.
4437 - *
4438 - * INPUT:
4439 - * struct mv643xx_private *mp Ethernet Port.
4440 - *
4441 - * OUTPUT:
4442 - * None.
4443 - *
4444 - * RETURN:
4445 - * PHY address.
4446 - *
4447 - */
4448 -static int ethernet_phy_get(struct mv643xx_private *mp)
4449 +static int mv643xx_eth_shared_remove(struct platform_device *pdev)
4450 {
4451 - unsigned int reg_data;
4452 + struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
4453
4454 - reg_data = rdl(mp, PHY_ADDR_REG);
4455 + iounmap(msp->base);
4456 + kfree(msp);
4457
4458 - return ((reg_data >> (5 * mp->port_num)) & 0x1f);
4459 + return 0;
4460 }
4461
4462 -/*
4463 - * ethernet_phy_set - Set the ethernet port PHY address.
4464 - *
4465 - * DESCRIPTION:
4466 - * This routine sets the given ethernet port PHY address.
4467 - *
4468 - * INPUT:
4469 - * struct mv643xx_private *mp Ethernet Port.
4470 - * int phy_addr PHY address.
4471 - *
4472 - * OUTPUT:
4473 - * None.
4474 - *
4475 - * RETURN:
4476 - * None.
4477 - *
4478 - */
4479 -static void ethernet_phy_set(struct mv643xx_private *mp, int phy_addr)
4480 +static struct platform_driver mv643xx_eth_shared_driver = {
4481 + .probe = mv643xx_eth_shared_probe,
4482 + .remove = mv643xx_eth_shared_remove,
4483 + .driver = {
4484 + .name = MV643XX_ETH_SHARED_NAME,
4485 + .owner = THIS_MODULE,
4486 + },
4487 +};
4488 +
4489 +static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr)
4490 {
4491 - u32 reg_data;
4492 int addr_shift = 5 * mp->port_num;
4493 + u32 data;
4494
4495 - reg_data = rdl(mp, PHY_ADDR_REG);
4496 - reg_data &= ~(0x1f << addr_shift);
4497 - reg_data |= (phy_addr & 0x1f) << addr_shift;
4498 - wrl(mp, PHY_ADDR_REG, reg_data);
4499 + data = rdl(mp, PHY_ADDR);
4500 + data &= ~(0x1f << addr_shift);
4501 + data |= (phy_addr & 0x1f) << addr_shift;
4502 + wrl(mp, PHY_ADDR, data);
4503 }
4504
4505 -/*
4506 - * ethernet_phy_reset - Reset Ethernet port PHY.
4507 - *
4508 - * DESCRIPTION:
4509 - * This routine utilizes the SMI interface to reset the ethernet port PHY.
4510 - *
4511 - * INPUT:
4512 - * struct mv643xx_private *mp Ethernet Port.
4513 - *
4514 - * OUTPUT:
4515 - * The PHY is reset.
4516 - *
4517 - * RETURN:
4518 - * None.
4519 - *
4520 - */
4521 -static void ethernet_phy_reset(struct mv643xx_private *mp)
4522 +static int phy_addr_get(struct mv643xx_eth_private *mp)
4523 {
4524 - unsigned int phy_reg_data;
4525 + unsigned int data;
4526
4527 - /* Reset the PHY */
4528 - eth_port_read_smi_reg(mp, 0, &phy_reg_data);
4529 - phy_reg_data |= 0x8000; /* Set bit 15 to reset the PHY */
4530 - eth_port_write_smi_reg(mp, 0, phy_reg_data);
4531 + data = rdl(mp, PHY_ADDR);
4532
4533 - /* wait for PHY to come out of reset */
4534 - do {
4535 - udelay(1);
4536 - eth_port_read_smi_reg(mp, 0, &phy_reg_data);
4537 - } while (phy_reg_data & 0x8000);
4538 + return (data >> (5 * mp->port_num)) & 0x1f;
4539 }
4540
4541 -static void mv643xx_eth_port_enable_tx(struct mv643xx_private *mp,
4542 - unsigned int queues)
4543 +static void set_params(struct mv643xx_eth_private *mp,
4544 + struct mv643xx_eth_platform_data *pd)
4545 {
4546 - wrl(mp, TRANSMIT_QUEUE_COMMAND_REG(mp->port_num), queues);
4547 -}
4548 + struct net_device *dev = mp->dev;
4549
4550 -static void mv643xx_eth_port_enable_rx(struct mv643xx_private *mp,
4551 - unsigned int queues)
4552 -{
4553 - wrl(mp, RECEIVE_QUEUE_COMMAND_REG(mp->port_num), queues);
4554 -}
4555 + if (is_valid_ether_addr(pd->mac_addr))
4556 + memcpy(dev->dev_addr, pd->mac_addr, 6);
4557 + else
4558 + uc_addr_get(mp, dev->dev_addr);
4559
4560 -static unsigned int mv643xx_eth_port_disable_tx(struct mv643xx_private *mp)
4561 -{
4562 - unsigned int port_num = mp->port_num;
4563 - u32 queues;
4564 + if (pd->phy_addr == -1) {
4565 + mp->shared_smi = NULL;
4566 + mp->phy_addr = -1;
4567 + } else {
4568 + mp->shared_smi = mp->shared;
4569 + if (pd->shared_smi != NULL)
4570 + mp->shared_smi = platform_get_drvdata(pd->shared_smi);
4571 +
4572 + if (pd->force_phy_addr || pd->phy_addr) {
4573 + mp->phy_addr = pd->phy_addr & 0x3f;
4574 + phy_addr_set(mp, mp->phy_addr);
4575 + } else {
4576 + mp->phy_addr = phy_addr_get(mp);
4577 + }
4578 + }
4579
4580 - /* Stop Tx port activity. Check port Tx activity. */
4581 - queues = rdl(mp, TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF;
4582 - if (queues) {
4583 - /* Issue stop command for active queues only */
4584 - wrl(mp, TRANSMIT_QUEUE_COMMAND_REG(port_num), (queues << 8));
4585 + mp->default_rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
4586 + if (pd->rx_queue_size)
4587 + mp->default_rx_ring_size = pd->rx_queue_size;
4588 + mp->rx_desc_sram_addr = pd->rx_sram_addr;
4589 + mp->rx_desc_sram_size = pd->rx_sram_size;
4590
4591 - /* Wait for all Tx activity to terminate. */
4592 - /* Check port cause register that all Tx queues are stopped */
4593 - while (rdl(mp, TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF)
4594 - udelay(PHY_WAIT_MICRO_SECONDS);
4595 + if (pd->rx_queue_mask)
4596 + mp->rxq_mask = pd->rx_queue_mask;
4597 + else
4598 + mp->rxq_mask = 0x01;
4599 + mp->rxq_primary = fls(mp->rxq_mask) - 1;
4600
4601 - /* Wait for Tx FIFO to empty */
4602 - while (rdl(mp, PORT_STATUS_REG(port_num)) &
4603 - ETH_PORT_TX_FIFO_EMPTY)
4604 - udelay(PHY_WAIT_MICRO_SECONDS);
4605 - }
4606 + mp->default_tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
4607 + if (pd->tx_queue_size)
4608 + mp->default_tx_ring_size = pd->tx_queue_size;
4609 + mp->tx_desc_sram_addr = pd->tx_sram_addr;
4610 + mp->tx_desc_sram_size = pd->tx_sram_size;
4611
4612 - return queues;
4613 + if (pd->tx_queue_mask)
4614 + mp->txq_mask = pd->tx_queue_mask;
4615 + else
4616 + mp->txq_mask = 0x01;
4617 + mp->txq_primary = fls(mp->txq_mask) - 1;
4618 }
4619
4620 -static unsigned int mv643xx_eth_port_disable_rx(struct mv643xx_private *mp)
4621 +static int phy_detect(struct mv643xx_eth_private *mp)
4622 {
4623 - unsigned int port_num = mp->port_num;
4624 - u32 queues;
4625 + unsigned int data;
4626 + unsigned int data2;
4627
4628 - /* Stop Rx port activity. Check port Rx activity. */
4629 - queues = rdl(mp, RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF;
4630 - if (queues) {
4631 - /* Issue stop command for active queues only */
4632 - wrl(mp, RECEIVE_QUEUE_COMMAND_REG(port_num), (queues << 8));
4633 + smi_reg_read(mp, mp->phy_addr, 0, &data);
4634 + smi_reg_write(mp, mp->phy_addr, 0, data ^ 0x1000);
4635
4636 - /* Wait for all Rx activity to terminate. */
4637 - /* Check port cause register that all Rx queues are stopped */
4638 - while (rdl(mp, RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF)
4639 - udelay(PHY_WAIT_MICRO_SECONDS);
4640 - }
4641 + smi_reg_read(mp, mp->phy_addr, 0, &data2);
4642 + if (((data ^ data2) & 0x1000) == 0)
4643 + return -ENODEV;
4644 +
4645 + smi_reg_write(mp, mp->phy_addr, 0, data);
4646
4647 - return queues;
4648 + return 0;
4649 }
4650
4651 -/*
4652 - * eth_port_reset - Reset Ethernet port
4653 - *
4654 - * DESCRIPTION:
4655 - * This routine resets the chip by aborting any SDMA engine activity and
4656 - * clearing the MIB counters. The Receiver and the Transmit unit are in
4657 - * idle state after this command is performed and the port is disabled.
4658 - *
4659 - * INPUT:
4660 - * struct mv643xx_private *mp Ethernet Port.
4661 - *
4662 - * OUTPUT:
4663 - * Channel activity is halted.
4664 - *
4665 - * RETURN:
4666 - * None.
4667 - *
4668 - */
4669 -static void eth_port_reset(struct mv643xx_private *mp)
4670 +static int phy_init(struct mv643xx_eth_private *mp,
4671 + struct mv643xx_eth_platform_data *pd)
4672 {
4673 - unsigned int port_num = mp->port_num;
4674 - unsigned int reg_data;
4675 -
4676 - mv643xx_eth_port_disable_tx(mp);
4677 - mv643xx_eth_port_disable_rx(mp);
4678 -
4679 - /* Clear all MIB counters */
4680 - eth_clear_mib_counters(mp);
4681 + struct ethtool_cmd cmd;
4682 + int err;
4683
4684 - /* Reset the Enable bit in the Configuration Register */
4685 - reg_data = rdl(mp, PORT_SERIAL_CONTROL_REG(port_num));
4686 - reg_data &= ~(SERIAL_PORT_ENABLE |
4687 - DO_NOT_FORCE_LINK_FAIL |
4688 - FORCE_LINK_PASS);
4689 - wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), reg_data);
4690 -}
4691 + err = phy_detect(mp);
4692 + if (err) {
4693 + dev_printk(KERN_INFO, &mp->dev->dev,
4694 + "no PHY detected at addr %d\n", mp->phy_addr);
4695 + return err;
4696 + }
4697 + phy_reset(mp);
4698
4699 + mp->mii.phy_id = mp->phy_addr;
4700 + mp->mii.phy_id_mask = 0x3f;
4701 + mp->mii.reg_num_mask = 0x1f;
4702 + mp->mii.dev = mp->dev;
4703 + mp->mii.mdio_read = mv643xx_eth_mdio_read;
4704 + mp->mii.mdio_write = mv643xx_eth_mdio_write;
4705
4706 -/*
4707 - * eth_port_read_smi_reg - Read PHY registers
4708 - *
4709 - * DESCRIPTION:
4710 - * This routine utilize the SMI interface to interact with the PHY in
4711 - * order to perform PHY register read.
4712 - *
4713 - * INPUT:
4714 - * struct mv643xx_private *mp Ethernet Port.
4715 - * unsigned int phy_reg PHY register address offset.
4716 - * unsigned int *value Register value buffer.
4717 - *
4718 - * OUTPUT:
4719 - * Write the value of a specified PHY register into given buffer.
4720 - *
4721 - * RETURN:
4722 - * false if the PHY is busy or read data is not in valid state.
4723 - * true otherwise.
4724 - *
4725 - */
4726 -static void eth_port_read_smi_reg(struct mv643xx_private *mp,
4727 - unsigned int phy_reg, unsigned int *value)
4728 -{
4729 - void __iomem *smi_reg = mp->shared_smi->eth_base + SMI_REG;
4730 - int phy_addr = ethernet_phy_get(mp);
4731 - unsigned long flags;
4732 - int i;
4733 + mp->mii.supports_gmii = mii_check_gmii_support(&mp->mii);
4734
4735 - /* the SMI register is a shared resource */
4736 - spin_lock_irqsave(&mp->shared_smi->phy_lock, flags);
4737 + memset(&cmd, 0, sizeof(cmd));
4738
4739 - /* wait for the SMI register to become available */
4740 - for (i = 0; readl(smi_reg) & ETH_SMI_BUSY; i++) {
4741 - if (i == PHY_WAIT_ITERATIONS) {
4742 - printk("%s: PHY busy timeout\n", mp->dev->name);
4743 - goto out;
4744 - }
4745 - udelay(PHY_WAIT_MICRO_SECONDS);
4746 + cmd.port = PORT_MII;
4747 + cmd.transceiver = XCVR_INTERNAL;
4748 + cmd.phy_address = mp->phy_addr;
4749 + if (pd->speed == 0) {
4750 + cmd.autoneg = AUTONEG_ENABLE;
4751 + cmd.speed = SPEED_100;
4752 + cmd.advertising = ADVERTISED_10baseT_Half |
4753 + ADVERTISED_10baseT_Full |
4754 + ADVERTISED_100baseT_Half |
4755 + ADVERTISED_100baseT_Full;
4756 + if (mp->mii.supports_gmii)
4757 + cmd.advertising |= ADVERTISED_1000baseT_Full;
4758 + } else {
4759 + cmd.autoneg = AUTONEG_DISABLE;
4760 + cmd.speed = pd->speed;
4761 + cmd.duplex = pd->duplex;
4762 }
4763
4764 - writel((phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ,
4765 - smi_reg);
4766 -
4767 - /* now wait for the data to be valid */
4768 - for (i = 0; !(readl(smi_reg) & ETH_SMI_READ_VALID); i++) {
4769 - if (i == PHY_WAIT_ITERATIONS) {
4770 - printk("%s: PHY read timeout\n", mp->dev->name);
4771 - goto out;
4772 - }
4773 - udelay(PHY_WAIT_MICRO_SECONDS);
4774 - }
4775 + update_pscr(mp, cmd.speed, cmd.duplex);
4776 + mv643xx_eth_set_settings(mp->dev, &cmd);
4777
4778 - *value = readl(smi_reg) & 0xffff;
4779 -out:
4780 - spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags);
4781 + return 0;
4782 }
4783
4784 -/*
4785 - * eth_port_write_smi_reg - Write to PHY registers
4786 - *
4787 - * DESCRIPTION:
4788 - * This routine utilize the SMI interface to interact with the PHY in
4789 - * order to perform writes to PHY registers.
4790 - *
4791 - * INPUT:
4792 - * struct mv643xx_private *mp Ethernet Port.
4793 - * unsigned int phy_reg PHY register address offset.
4794 - * unsigned int value Register value.
4795 - *
4796 - * OUTPUT:
4797 - * Write the given value to the specified PHY register.
4798 - *
4799 - * RETURN:
4800 - * false if the PHY is busy.
4801 - * true otherwise.
4802 - *
4803 - */
4804 -static void eth_port_write_smi_reg(struct mv643xx_private *mp,
4805 - unsigned int phy_reg, unsigned int value)
4806 +static int mv643xx_eth_probe(struct platform_device *pdev)
4807 {
4808 - void __iomem *smi_reg = mp->shared_smi->eth_base + SMI_REG;
4809 - int phy_addr = ethernet_phy_get(mp);
4810 - unsigned long flags;
4811 - int i;
4812 + struct mv643xx_eth_platform_data *pd;
4813 + struct mv643xx_eth_private *mp;
4814 + struct net_device *dev;
4815 + struct resource *res;
4816 + DECLARE_MAC_BUF(mac);
4817 + int err;
4818
4819 - /* the SMI register is a shared resource */
4820 - spin_lock_irqsave(&mp->shared_smi->phy_lock, flags);
4821 + pd = pdev->dev.platform_data;
4822 + if (pd == NULL) {
4823 + dev_printk(KERN_ERR, &pdev->dev,
4824 + "no mv643xx_eth_platform_data\n");
4825 + return -ENODEV;
4826 + }
4827
4828 - /* wait for the SMI register to become available */
4829 - for (i = 0; readl(smi_reg) & ETH_SMI_BUSY; i++) {
4830 - if (i == PHY_WAIT_ITERATIONS) {
4831 - printk("%s: PHY busy timeout\n", mp->dev->name);
4832 - goto out;
4833 - }
4834 - udelay(PHY_WAIT_MICRO_SECONDS);
4835 + if (pd->shared == NULL) {
4836 + dev_printk(KERN_ERR, &pdev->dev,
4837 + "no mv643xx_eth_platform_data->shared\n");
4838 + return -ENODEV;
4839 }
4840
4841 - writel((phy_addr << 16) | (phy_reg << 21) |
4842 - ETH_SMI_OPCODE_WRITE | (value & 0xffff), smi_reg);
4843 -out:
4844 - spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags);
4845 -}
4846 + dev = alloc_etherdev(sizeof(struct mv643xx_eth_private));
4847 + if (!dev)
4848 + return -ENOMEM;
4849
4850 -/*
4851 - * Wrappers for MII support library.
4852 - */
4853 -static int mv643xx_mdio_read(struct net_device *dev, int phy_id, int location)
4854 -{
4855 - struct mv643xx_private *mp = netdev_priv(dev);
4856 - int val;
4857 + mp = netdev_priv(dev);
4858 + platform_set_drvdata(pdev, mp);
4859
4860 - eth_port_read_smi_reg(mp, location, &val);
4861 - return val;
4862 -}
4863 + mp->shared = platform_get_drvdata(pd->shared);
4864 + mp->port_num = pd->port_number;
4865
4866 -static void mv643xx_mdio_write(struct net_device *dev, int phy_id, int location, int val)
4867 -{
4868 - struct mv643xx_private *mp = netdev_priv(dev);
4869 - eth_port_write_smi_reg(mp, location, val);
4870 -}
4871 + mp->dev = dev;
4872 +#ifdef MV643XX_ETH_NAPI
4873 + netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 64);
4874 +#endif
4875
4876 -/*
4877 - * eth_port_receive - Get received information from Rx ring.
4878 - *
4879 - * DESCRIPTION:
4880 - * This routine returns the received data to the caller. There is no
4881 - * data copying during routine operation. All information is returned
4882 - * using pointer to packet information struct passed from the caller.
4883 - * If the routine exhausts Rx ring resources then the resource error flag
4884 - * is set.
4885 - *
4886 - * INPUT:
4887 - * struct mv643xx_private *mp Ethernet Port Control srtuct.
4888 - * struct pkt_info *p_pkt_info User packet buffer.
4889 - *
4890 - * OUTPUT:
4891 - * Rx ring current and used indexes are updated.
4892 - *
4893 - * RETURN:
4894 - * ETH_ERROR in case the routine can not access Rx desc ring.
4895 - * ETH_QUEUE_FULL if Rx ring resources are exhausted.
4896 - * ETH_END_OF_JOB if there is no received data.
4897 - * ETH_OK otherwise.
4898 - */
4899 -static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
4900 - struct pkt_info *p_pkt_info)
4901 -{
4902 - int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
4903 - volatile struct eth_rx_desc *p_rx_desc;
4904 - unsigned int command_status;
4905 - unsigned long flags;
4906 + set_params(mp, pd);
4907
4908 - /* Do not process Rx ring in case of Rx ring resource error */
4909 - if (mp->rx_resource_err)
4910 - return ETH_QUEUE_FULL;
4911 + spin_lock_init(&mp->lock);
4912
4913 - spin_lock_irqsave(&mp->lock, flags);
4914 + mib_counters_clear(mp);
4915 + INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
4916
4917 - /* Get the Rx Desc ring 'curr and 'used' indexes */
4918 - rx_curr_desc = mp->rx_curr_desc_q;
4919 - rx_used_desc = mp->rx_used_desc_q;
4920 -
4921 - p_rx_desc = &mp->p_rx_desc_area[rx_curr_desc];
4922 -
4923 - /* The following parameters are used to save readings from memory */
4924 - command_status = p_rx_desc->cmd_sts;
4925 - rmb();
4926 + if (mp->phy_addr != -1) {
4927 + err = phy_init(mp, pd);
4928 + if (err)
4929 + goto out;
4930
4931 - /* Nothing to receive... */
4932 - if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
4933 - spin_unlock_irqrestore(&mp->lock, flags);
4934 - return ETH_END_OF_JOB;
4935 + SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);
4936 + } else {
4937 + SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops_phyless);
4938 }
4939
4940 - p_pkt_info->byte_cnt = (p_rx_desc->byte_cnt) - RX_BUF_OFFSET;
4941 - p_pkt_info->cmd_sts = command_status;
4942 - p_pkt_info->buf_ptr = (p_rx_desc->buf_ptr) + RX_BUF_OFFSET;
4943 - p_pkt_info->return_info = mp->rx_skb[rx_curr_desc];
4944 - p_pkt_info->l4i_chk = p_rx_desc->buf_size;
4945 -
4946 - /*
4947 - * Clean the return info field to indicate that the
4948 - * packet has been moved to the upper layers
4949 - */
4950 - mp->rx_skb[rx_curr_desc] = NULL;
4951
4952 - /* Update current index in data structure */
4953 - rx_next_curr_desc = (rx_curr_desc + 1) % mp->rx_ring_size;
4954 - mp->rx_curr_desc_q = rx_next_curr_desc;
4955 + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
4956 + BUG_ON(!res);
4957 + dev->irq = res->start;
4958
4959 - /* Rx descriptors exhausted. Set the Rx ring resource error flag */
4960 - if (rx_next_curr_desc == rx_used_desc)
4961 - mp->rx_resource_err = 1;
4962 + dev->hard_start_xmit = mv643xx_eth_xmit;
4963 + dev->open = mv643xx_eth_open;
4964 + dev->stop = mv643xx_eth_stop;
4965 + dev->set_multicast_list = mv643xx_eth_set_rx_mode;
4966 + dev->set_mac_address = mv643xx_eth_set_mac_address;
4967 + dev->do_ioctl = mv643xx_eth_ioctl;
4968 + dev->change_mtu = mv643xx_eth_change_mtu;
4969 + dev->tx_timeout = mv643xx_eth_tx_timeout;
4970 +#ifdef CONFIG_NET_POLL_CONTROLLER
4971 + dev->poll_controller = mv643xx_eth_netpoll;
4972 +#endif
4973 + dev->watchdog_timeo = 2 * HZ;
4974 + dev->base_addr = 0;
4975
4976 - spin_unlock_irqrestore(&mp->lock, flags);
4977 +#ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
4978 + /*
4979 + * Zero copy can only work if we use Discovery II memory. Else, we will
4980 + * have to map the buffers to ISA memory which is only 16 MB
4981 + */
4982 + dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
4983 +#endif
4984
4985 - return ETH_OK;
4986 -}
4987 + SET_NETDEV_DEV(dev, &pdev->dev);
4988
4989 -/*
4990 - * eth_rx_return_buff - Returns a Rx buffer back to the Rx ring.
4991 - *
4992 - * DESCRIPTION:
4993 - * This routine returns a Rx buffer back to the Rx ring. It retrieves the
4994 - * next 'used' descriptor and attached the returned buffer to it.
4995 - * In case the Rx ring was in "resource error" condition, where there are
4996 - * no available Rx resources, the function resets the resource error flag.
4997 - *
4998 - * INPUT:
4999 - * struct mv643xx_private *mp Ethernet Port Control srtuct.
5000 - * struct pkt_info *p_pkt_info Information on returned buffer.
5001 - *
5002 - * OUTPUT:
5003 - * New available Rx resource in Rx descriptor ring.
5004 - *
5005 - * RETURN:
5006 - * ETH_ERROR in case the routine can not access Rx desc ring.
5007 - * ETH_OK otherwise.
5008 - */
5009 -static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp,
5010 - struct pkt_info *p_pkt_info)
5011 -{
5012 - int used_rx_desc; /* Where to return Rx resource */
5013 - volatile struct eth_rx_desc *p_used_rx_desc;
5014 - unsigned long flags;
5015 + if (mp->shared->win_protect)
5016 + wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
5017
5018 - spin_lock_irqsave(&mp->lock, flags);
5019 + err = register_netdev(dev);
5020 + if (err)
5021 + goto out;
5022
5023 - /* Get 'used' Rx descriptor */
5024 - used_rx_desc = mp->rx_used_desc_q;
5025 - p_used_rx_desc = &mp->p_rx_desc_area[used_rx_desc];
5026 + dev_printk(KERN_NOTICE, &dev->dev, "port %d with MAC address %s\n",
5027 + mp->port_num, print_mac(mac, dev->dev_addr));
5028
5029 - p_used_rx_desc->buf_ptr = p_pkt_info->buf_ptr;
5030 - p_used_rx_desc->buf_size = p_pkt_info->byte_cnt;
5031 - mp->rx_skb[used_rx_desc] = p_pkt_info->return_info;
5032 + if (dev->features & NETIF_F_SG)
5033 + dev_printk(KERN_NOTICE, &dev->dev, "scatter/gather enabled\n");
5034
5035 - /* Flush the write pipe */
5036 + if (dev->features & NETIF_F_IP_CSUM)
5037 + dev_printk(KERN_NOTICE, &dev->dev, "tx checksum offload\n");
5038
5039 - /* Return the descriptor to DMA ownership */
5040 - wmb();
5041 - p_used_rx_desc->cmd_sts =
5042 - ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT;
5043 - wmb();
5044 +#ifdef MV643XX_ETH_NAPI
5045 + dev_printk(KERN_NOTICE, &dev->dev, "napi enabled\n");
5046 +#endif
5047
5048 - /* Move the used descriptor pointer to the next descriptor */
5049 - mp->rx_used_desc_q = (used_rx_desc + 1) % mp->rx_ring_size;
5050 + if (mp->tx_desc_sram_size > 0)
5051 + dev_printk(KERN_NOTICE, &dev->dev, "configured with sram\n");
5052
5053 - /* Any Rx return cancels the Rx resource error status */
5054 - mp->rx_resource_err = 0;
5055 + return 0;
5056
5057 - spin_unlock_irqrestore(&mp->lock, flags);
5058 +out:
5059 + free_netdev(dev);
5060
5061 - return ETH_OK;
5062 + return err;
5063 }
5064
5065 -/************* Begin ethtool support *************************/
5066 -
5067 -struct mv643xx_stats {
5068 - char stat_string[ETH_GSTRING_LEN];
5069 - int sizeof_stat;
5070 - int stat_offset;
5071 -};
5072 -
5073 -#define MV643XX_STAT(m) FIELD_SIZEOF(struct mv643xx_private, m), \
5074 - offsetof(struct mv643xx_private, m)
5075 -
5076 -static const struct mv643xx_stats mv643xx_gstrings_stats[] = {
5077 - { "rx_packets", MV643XX_STAT(stats.rx_packets) },
5078 - { "tx_packets", MV643XX_STAT(stats.tx_packets) },
5079 - { "rx_bytes", MV643XX_STAT(stats.rx_bytes) },
5080 - { "tx_bytes", MV643XX_STAT(stats.tx_bytes) },
5081 - { "rx_errors", MV643XX_STAT(stats.rx_errors) },
5082 - { "tx_errors", MV643XX_STAT(stats.tx_errors) },
5083 - { "rx_dropped", MV643XX_STAT(stats.rx_dropped) },
5084 - { "tx_dropped", MV643XX_STAT(stats.tx_dropped) },
5085 - { "good_octets_received", MV643XX_STAT(mib_counters.good_octets_received) },
5086 - { "bad_octets_received", MV643XX_STAT(mib_counters.bad_octets_received) },
5087 - { "internal_mac_transmit_err", MV643XX_STAT(mib_counters.internal_mac_transmit_err) },
5088 - { "good_frames_received", MV643XX_STAT(mib_counters.good_frames_received) },
5089 - { "bad_frames_received", MV643XX_STAT(mib_counters.bad_frames_received) },
5090 - { "broadcast_frames_received", MV643XX_STAT(mib_counters.broadcast_frames_received) },
5091 - { "multicast_frames_received", MV643XX_STAT(mib_counters.multicast_frames_received) },
5092 - { "frames_64_octets", MV643XX_STAT(mib_counters.frames_64_octets) },
5093 - { "frames_65_to_127_octets", MV643XX_STAT(mib_counters.frames_65_to_127_octets) },
5094 - { "frames_128_to_255_octets", MV643XX_STAT(mib_counters.frames_128_to_255_octets) },
5095 - { "frames_256_to_511_octets", MV643XX_STAT(mib_counters.frames_256_to_511_octets) },
5096 - { "frames_512_to_1023_octets", MV643XX_STAT(mib_counters.frames_512_to_1023_octets) },
5097 - { "frames_1024_to_max_octets", MV643XX_STAT(mib_counters.frames_1024_to_max_octets) },
5098 - { "good_octets_sent", MV643XX_STAT(mib_counters.good_octets_sent) },
5099 - { "good_frames_sent", MV643XX_STAT(mib_counters.good_frames_sent) },
5100 - { "excessive_collision", MV643XX_STAT(mib_counters.excessive_collision) },
5101 - { "multicast_frames_sent", MV643XX_STAT(mib_counters.multicast_frames_sent) },
5102 - { "broadcast_frames_sent", MV643XX_STAT(mib_counters.broadcast_frames_sent) },
5103 - { "unrec_mac_control_received", MV643XX_STAT(mib_counters.unrec_mac_control_received) },
5104 - { "fc_sent", MV643XX_STAT(mib_counters.fc_sent) },
5105 - { "good_fc_received", MV643XX_STAT(mib_counters.good_fc_received) },
5106 - { "bad_fc_received", MV643XX_STAT(mib_counters.bad_fc_received) },
5107 - { "undersize_received", MV643XX_STAT(mib_counters.undersize_received) },
5108 - { "fragments_received", MV643XX_STAT(mib_counters.fragments_received) },
5109 - { "oversize_received", MV643XX_STAT(mib_counters.oversize_received) },
5110 - { "jabber_received", MV643XX_STAT(mib_counters.jabber_received) },
5111 - { "mac_receive_error", MV643XX_STAT(mib_counters.mac_receive_error) },
5112 - { "bad_crc_event", MV643XX_STAT(mib_counters.bad_crc_event) },
5113 - { "collision", MV643XX_STAT(mib_counters.collision) },
5114 - { "late_collision", MV643XX_STAT(mib_counters.late_collision) },
5115 -};
5116 +static int mv643xx_eth_remove(struct platform_device *pdev)
5117 +{
5118 + struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
5119
5120 -#define MV643XX_STATS_LEN ARRAY_SIZE(mv643xx_gstrings_stats)
5121 + unregister_netdev(mp->dev);
5122 + flush_scheduled_work();
5123 + free_netdev(mp->dev);
5124
5125 -static void mv643xx_get_drvinfo(struct net_device *netdev,
5126 - struct ethtool_drvinfo *drvinfo)
5127 -{
5128 - strncpy(drvinfo->driver, mv643xx_driver_name, 32);
5129 - strncpy(drvinfo->version, mv643xx_driver_version, 32);
5130 - strncpy(drvinfo->fw_version, "N/A", 32);
5131 - strncpy(drvinfo->bus_info, "mv643xx", 32);
5132 - drvinfo->n_stats = MV643XX_STATS_LEN;
5133 -}
5134 + platform_set_drvdata(pdev, NULL);
5135
5136 -static int mv643xx_get_sset_count(struct net_device *netdev, int sset)
5137 -{
5138 - switch (sset) {
5139 - case ETH_SS_STATS:
5140 - return MV643XX_STATS_LEN;
5141 - default:
5142 - return -EOPNOTSUPP;
5143 - }
5144 + return 0;
5145 }
5146
5147 -static void mv643xx_get_ethtool_stats(struct net_device *netdev,
5148 - struct ethtool_stats *stats, uint64_t *data)
5149 +static void mv643xx_eth_shutdown(struct platform_device *pdev)
5150 {
5151 - struct mv643xx_private *mp = netdev->priv;
5152 - int i;
5153 + struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
5154
5155 - eth_update_mib_counters(mp);
5156 + /* Mask all interrupts on ethernet port */
5157 + wrl(mp, INT_MASK(mp->port_num), 0);
5158 + rdl(mp, INT_MASK(mp->port_num));
5159
5160 - for (i = 0; i < MV643XX_STATS_LEN; i++) {
5161 - char *p = (char *)mp+mv643xx_gstrings_stats[i].stat_offset;
5162 - data[i] = (mv643xx_gstrings_stats[i].sizeof_stat ==
5163 - sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p;
5164 - }
5165 + if (netif_running(mp->dev))
5166 + port_reset(mp);
5167 }
5168
5169 -static void mv643xx_get_strings(struct net_device *netdev, uint32_t stringset,
5170 - uint8_t *data)
5171 -{
5172 - int i;
5173 -
5174 - switch(stringset) {
5175 - case ETH_SS_STATS:
5176 - for (i=0; i < MV643XX_STATS_LEN; i++) {
5177 - memcpy(data + i * ETH_GSTRING_LEN,
5178 - mv643xx_gstrings_stats[i].stat_string,
5179 - ETH_GSTRING_LEN);
5180 - }
5181 - break;
5182 - }
5183 -}
5184 +static struct platform_driver mv643xx_eth_driver = {
5185 + .probe = mv643xx_eth_probe,
5186 + .remove = mv643xx_eth_remove,
5187 + .shutdown = mv643xx_eth_shutdown,
5188 + .driver = {
5189 + .name = MV643XX_ETH_NAME,
5190 + .owner = THIS_MODULE,
5191 + },
5192 +};
5193
5194 -static u32 mv643xx_eth_get_link(struct net_device *dev)
5195 +static int __init mv643xx_eth_init_module(void)
5196 {
5197 - struct mv643xx_private *mp = netdev_priv(dev);
5198 -
5199 - return mii_link_ok(&mp->mii);
5200 -}
5201 + int rc;
5202
5203 -static int mv643xx_eth_nway_restart(struct net_device *dev)
5204 -{
5205 - struct mv643xx_private *mp = netdev_priv(dev);
5206 + rc = platform_driver_register(&mv643xx_eth_shared_driver);
5207 + if (!rc) {
5208 + rc = platform_driver_register(&mv643xx_eth_driver);
5209 + if (rc)
5210 + platform_driver_unregister(&mv643xx_eth_shared_driver);
5211 + }
5212
5213 - return mii_nway_restart(&mp->mii);
5214 + return rc;
5215 }
5216 +module_init(mv643xx_eth_init_module);
5217
5218 -static int mv643xx_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5219 +static void __exit mv643xx_eth_cleanup_module(void)
5220 {
5221 - struct mv643xx_private *mp = netdev_priv(dev);
5222 -
5223 - return generic_mii_ioctl(&mp->mii, if_mii(ifr), cmd, NULL);
5224 + platform_driver_unregister(&mv643xx_eth_driver);
5225 + platform_driver_unregister(&mv643xx_eth_shared_driver);
5226 }
5227 +module_exit(mv643xx_eth_cleanup_module);
5228
5229 -static const struct ethtool_ops mv643xx_ethtool_ops = {
5230 - .get_settings = mv643xx_get_settings,
5231 - .set_settings = mv643xx_set_settings,
5232 - .get_drvinfo = mv643xx_get_drvinfo,
5233 - .get_link = mv643xx_eth_get_link,
5234 - .set_sg = ethtool_op_set_sg,
5235 - .get_sset_count = mv643xx_get_sset_count,
5236 - .get_ethtool_stats = mv643xx_get_ethtool_stats,
5237 - .get_strings = mv643xx_get_strings,
5238 - .nway_reset = mv643xx_eth_nway_restart,
5239 -};
5240 -
5241 -/************* End ethtool support *************************/
5242 +MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, "
5243 + "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek");
5244 +MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
5245 +MODULE_LICENSE("GPL");
5246 +MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME);
5247 +MODULE_ALIAS("platform:" MV643XX_ETH_NAME);
5248 --- a/include/linux/mv643xx_eth.h
5249 +++ b/include/linux/mv643xx_eth.h
5250 @@ -17,30 +17,59 @@
5251
5252 struct mv643xx_eth_shared_platform_data {
5253 struct mbus_dram_target_info *dram;
5254 - unsigned int t_clk;
5255 + unsigned int t_clk;
5256 };
5257
5258 struct mv643xx_eth_platform_data {
5259 + /*
5260 + * Pointer back to our parent instance, and our port number.
5261 + */
5262 struct platform_device *shared;
5263 - int port_number;
5264 + int port_number;
5265
5266 + /*
5267 + * Whether a PHY is present, and if yes, at which address.
5268 + */
5269 struct platform_device *shared_smi;
5270 + int force_phy_addr;
5271 + int phy_addr;
5272
5273 - u16 force_phy_addr; /* force override if phy_addr == 0 */
5274 - u16 phy_addr;
5275 -
5276 - /* If speed is 0, then speed and duplex are autonegotiated. */
5277 - int speed; /* 0, SPEED_10, SPEED_100, SPEED_1000 */
5278 - int duplex; /* DUPLEX_HALF or DUPLEX_FULL */
5279 -
5280 - /* non-zero values of the following fields override defaults */
5281 - u32 tx_queue_size;
5282 - u32 rx_queue_size;
5283 - u32 tx_sram_addr;
5284 - u32 tx_sram_size;
5285 - u32 rx_sram_addr;
5286 - u32 rx_sram_size;
5287 - u8 mac_addr[6]; /* mac address if non-zero*/
5288 + /*
5289 + * Use this MAC address if it is valid, overriding the
5290 + * address that is already in the hardware.
5291 + */
5292 + u8 mac_addr[6];
5293 +
5294 + /*
5295 + * If speed is 0, autonegotiation is enabled.
5296 + * Valid values for speed: 0, SPEED_10, SPEED_100, SPEED_1000.
5297 + * Valid values for duplex: DUPLEX_HALF, DUPLEX_FULL.
5298 + */
5299 + int speed;
5300 + int duplex;
5301 +
5302 + /*
5303 + * Which RX/TX queues to use.
5304 + */
5305 + int rx_queue_mask;
5306 + int tx_queue_mask;
5307 +
5308 + /*
5309 + * Override default RX/TX queue sizes if nonzero.
5310 + */
5311 + int rx_queue_size;
5312 + int tx_queue_size;
5313 +
5314 + /*
5315 + * Use on-chip SRAM for RX/TX descriptors if size is nonzero
5316 + * and sufficient to contain all descriptors for the requested
5317 + * ring sizes.
5318 + */
5319 + unsigned long rx_sram_addr;
5320 + int rx_sram_size;
5321 + unsigned long tx_sram_addr;
5322 + int tx_sram_size;
5323 };
5324
5325 -#endif /* __LINUX_MV643XX_ETH_H */
5326 +
5327 +#endif
5328 --- a/MAINTAINERS
5329 +++ b/MAINTAINERS
5330 @@ -2689,12 +2689,10 @@
5331 S: Maintained
5332
5333 MARVELL MV643XX ETHERNET DRIVER
5334 -P: Dale Farnsworth
5335 -M: dale@farnsworth.org
5336 -P: Manish Lachwani
5337 -M: mlachwani@mvista.com
5338 +P: Lennert Buytenhek
5339 +M: buytenh@marvell.com
5340 L: netdev@vger.kernel.org
5341 -S: Odd Fixes for 2.4; Maintained for 2.6.
5342 +S: Supported
5343
5344 MATROX FRAMEBUFFER DRIVER
5345 P: Petr Vandrovec