kernel: bump kernel 4.4 to 4.4.129 for 17.01
[openwrt/openwrt.git] / target / linux / mvebu / patches-4.4 / 035-net-mvneta-Configure-XPS-support.patch
1 From: Gregory CLEMENT <gregory.clement@free-electrons.com>
2 Date: Wed, 9 Dec 2015 18:23:51 +0100
3 Subject: [PATCH] net: mvneta: Configure XPS support
4
5 With this patch each CPU is associated with its own set of TX queues.
6
7 It also setup the XPS with an initial configuration which set the
8 affinity matching the hardware configuration.
9
10 Suggested-by: Arnd Bergmann <arnd@arndb.de>
11 Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com>
12 Signed-off-by: David S. Miller <davem@davemloft.net>
13 ---
14
15 --- a/drivers/net/ethernet/marvell/mvneta.c
16 +++ b/drivers/net/ethernet/marvell/mvneta.c
17 @@ -111,6 +111,7 @@
18 #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
19 #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
20 #define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq)
21 +#define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8)
22 #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
23
24 /* Exception Interrupt Port/Queue Cause register
25 @@ -514,6 +515,9 @@ struct mvneta_tx_queue {
26
27 /* DMA address of TSO headers */
28 dma_addr_t tso_hdrs_phys;
29 +
30 + /* Affinity mask for CPUs*/
31 + cpumask_t affinity_mask;
32 };
33
34 struct mvneta_rx_queue {
35 @@ -1066,20 +1070,30 @@ static void mvneta_defaults_set(struct m
36 /* Enable MBUS Retry bit16 */
37 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
38
39 - /* Set CPU queue access map. CPUs are assigned to the RX
40 - * queues modulo their number and all the TX queues are
41 - * assigned to the CPU associated to the default RX queue.
42 + /* Set CPU queue access map. CPUs are assigned to the RX and
43 + * TX queues modulo their number. If there is only one TX
44 + * queue then it is assigned to the CPU associated to the
45 + * default RX queue.
46 */
47 for_each_present_cpu(cpu) {
48 int rxq_map = 0, txq_map = 0;
49 - int rxq;
50 + int rxq, txq;
51
52 for (rxq = 0; rxq < rxq_number; rxq++)
53 if ((rxq % max_cpu) == cpu)
54 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
55
56 - if (cpu == pp->rxq_def)
57 - txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
58 + for (txq = 0; txq < txq_number; txq++)
59 + if ((txq % max_cpu) == cpu)
60 + txq_map |= MVNETA_CPU_TXQ_ACCESS(txq);
61 +
62 + /* With only one TX queue we configure a special case
63 + * which will allow to get all the irq on a single
64 + * CPU
65 + */
66 + if (txq_number == 1)
67 + txq_map = (cpu == pp->rxq_def) ?
68 + MVNETA_CPU_TXQ_ACCESS(1) : 0;
69
70 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
71 }
72 @@ -2366,6 +2380,8 @@ static void mvneta_rxq_deinit(struct mvn
73 static int mvneta_txq_init(struct mvneta_port *pp,
74 struct mvneta_tx_queue *txq)
75 {
76 + int cpu;
77 +
78 txq->size = pp->tx_ring_size;
79
80 /* A queue must always have room for at least one skb.
81 @@ -2418,6 +2434,14 @@ static int mvneta_txq_init(struct mvneta
82 }
83 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
84
85 + /* Setup XPS mapping */
86 + if (txq_number > 1)
87 + cpu = txq->id % num_present_cpus();
88 + else
89 + cpu = pp->rxq_def % num_present_cpus();
90 + cpumask_set_cpu(cpu, &txq->affinity_mask);
91 + netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id);
92 +
93 return 0;
94 }
95
96 @@ -2840,13 +2864,23 @@ static void mvneta_percpu_elect(struct m
97 if ((rxq % max_cpu) == cpu)
98 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
99
100 - if (i == online_cpu_idx) {
101 - /* Map the default receive queue and transmit
102 - * queue to the elected CPU
103 + if (i == online_cpu_idx)
104 + /* Map the default receive queue queue to the
105 + * elected CPU
106 */
107 rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def);
108 - txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
109 - }
110 +
111 + /* We update the TX queue map only if we have one
112 + * queue. In this case we associate the TX queue to
113 + * the CPU bound to the default RX queue
114 + */
115 + if (txq_number == 1)
116 + txq_map = (i == online_cpu_idx) ?
117 + MVNETA_CPU_TXQ_ACCESS(1) : 0;
118 + else
119 + txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
120 + MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
121 +
122 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
123
124 /* Update the interrupt mask on each CPU according the