ipq40xx: add support for Aruba AP-303
[openwrt/openwrt.git] / target / linux / ipq40xx / patches-4.19 / 714-essedma-add-fix-for-memory-allocation.patch
1 From 72c050acbc425ef99313d5c2e4c866e25567e569 Mon Sep 17 00:00:00 2001
2 From: Rakesh Nair <ranair@codeaurora.org>
3 Date: Thu, 8 Jun 2017 14:29:20 +0530
4 Subject: [PATCH] CHROMIUM: net: qualcomm: Add fix for memory allocation issues
5
6 Added ethtool counters for memory allocation failures accounting.
7 Added support to track number of allocation failures that could
8 not be fulfilled in the current iteration in the rx descriptor
9 field and use the info to allocate in the subsequent iteration.
10
11 Change-Id: Ie4fd3b6cf25304e5db2c9247a498791e7e9bb4aa
12 Signed-off-by: Rakesh Nair <ranair@codeaurora.org>
13 Signed-off-by: Kan Yan <kyan@google.com>
14 Reviewed-on: https://chromium-review.googlesource.com/535419
15 Reviewed-by: Grant Grundler <grundler@chromium.org>
16 ---
17 drivers/net/ethernet/qualcomm/essedma/edma.c | 54 ++++++++++++++-----
18 drivers/net/ethernet/qualcomm/essedma/edma.h | 2 +
19 .../ethernet/qualcomm/essedma/edma_ethtool.c | 1 +
20 3 files changed, 43 insertions(+), 14 deletions(-)
21
22 --- a/drivers/net/ethernet/qualcomm/essedma/edma.c
23 +++ b/drivers/net/ethernet/qualcomm/essedma/edma.c
24 @@ -103,6 +103,9 @@ static int edma_alloc_rx_ring(struct edm
25 return -ENOMEM;
26 }
27
28 + /* Initialize pending_fill */
29 + erxd->pending_fill = 0;
30 +
31 return 0;
32 }
33
34 @@ -185,11 +188,8 @@ static int edma_alloc_rx_buf(struct edma
35 u16 prod_idx, length;
36 u32 reg_data;
37
38 - if (cleaned_count > erdr->count) {
39 - dev_err(&pdev->dev, "Incorrect cleaned_count %d",
40 - cleaned_count);
41 - return -1;
42 - }
43 + if (cleaned_count > erdr->count)
44 + cleaned_count = erdr->count - 1;
45
46 i = erdr->sw_next_to_fill;
47
48 @@ -199,6 +199,9 @@ static int edma_alloc_rx_buf(struct edma
49
50 if (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_REUSE) {
51 skb = sw_desc->skb;
52 +
53 + /* Clear REUSE Flag */
54 + sw_desc->flags &= ~EDMA_SW_DESC_FLAG_SKB_REUSE;
55 } else {
56 /* alloc skb */
57 skb = netdev_alloc_skb_ip_align(edma_netdev[0], length);
58 @@ -264,6 +267,13 @@ static int edma_alloc_rx_buf(struct edma
59 reg_data &= ~EDMA_RFD_PROD_IDX_BITS;
60 reg_data |= prod_idx;
61 edma_write_reg(EDMA_REG_RFD_IDX_Q(queue_id), reg_data);
62 +
63 + /* If we couldn't allocate all the buffers
64 + * we increment the alloc failure counters
65 + */
66 + if (cleaned_count)
67 + edma_cinfo->edma_ethstats.rx_alloc_fail_ctr++;
68 +
69 return cleaned_count;
70 }
71
72 @@ -534,7 +544,7 @@ static int edma_rx_complete_paged(struct
73 * edma_rx_complete()
74 * Main api called from the poll function to process rx packets.
75 */
76 -static void edma_rx_complete(struct edma_common_info *edma_cinfo,
77 +static u16 edma_rx_complete(struct edma_common_info *edma_cinfo,
78 int *work_done, int work_to_do, int queue_id,
79 struct napi_struct *napi)
80 {
81 @@ -554,6 +564,7 @@ static void edma_rx_complete(struct edma
82 u16 count = erdr->count, rfd_avail;
83 u8 queue_to_rxid[8] = {0, 0, 1, 1, 2, 2, 3, 3};
84
85 + cleaned_count = erdr->pending_fill;
86 sw_next_to_clean = erdr->sw_next_to_clean;
87
88 edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &data);
89 @@ -652,12 +663,13 @@ static void edma_rx_complete(struct edma
90 (*work_done)++;
91 drop_count = 0;
92 }
93 - if (cleaned_count == EDMA_RX_BUFFER_WRITE) {
94 + if (cleaned_count >= EDMA_RX_BUFFER_WRITE) {
95 /* If buffer clean count reaches 16, we replenish HW buffers. */
96 ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
97 edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
98 sw_next_to_clean);
99 cleaned_count = ret_count;
100 + erdr->pending_fill = ret_count;
101 }
102 continue;
103 }
104 @@ -730,11 +742,12 @@ static void edma_rx_complete(struct edma
105 adapter->stats.rx_bytes += length;
106
107 /* Check if we reached refill threshold */
108 - if (cleaned_count == EDMA_RX_BUFFER_WRITE) {
109 + if (cleaned_count >= EDMA_RX_BUFFER_WRITE) {
110 ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
111 edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
112 sw_next_to_clean);
113 cleaned_count = ret_count;
114 + erdr->pending_fill = ret_count;
115 }
116
117 /* At this point skb should go to stack */
118 @@ -756,11 +769,17 @@ static void edma_rx_complete(struct edma
119 /* Refill here in case refill threshold wasn't reached */
120 if (likely(cleaned_count)) {
121 ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
122 - if (ret_count)
123 - dev_dbg(&pdev->dev, "Not all buffers was reallocated");
124 + erdr->pending_fill = ret_count;
125 + if (ret_count) {
126 + if (net_ratelimit())
127 + dev_dbg(&pdev->dev, "Not all buffers was reallocated");
128 + }
129 +
130 edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
131 erdr->sw_next_to_clean);
132 }
133 +
134 + return erdr->pending_fill;
135 }
136
137 /* edma_delete_rfs_filter()
138 @@ -2064,6 +2083,7 @@ int edma_poll(struct napi_struct *napi,
139 u32 shadow_rx_status, shadow_tx_status;
140 int queue_id;
141 int i, work_done = 0;
142 + u16 rx_pending_fill;
143
144 /* Store the Rx/Tx status by ANDing it with
145 * appropriate CPU RX?TX mask
146 @@ -2097,13 +2117,19 @@ int edma_poll(struct napi_struct *napi,
147 */
148 while (edma_percpu_info->rx_status) {
149 queue_id = ffs(edma_percpu_info->rx_status) - 1;
150 - edma_rx_complete(edma_cinfo, &work_done,
151 - budget, queue_id, napi);
152 + rx_pending_fill = edma_rx_complete(edma_cinfo, &work_done,
153 + budget, queue_id, napi);
154
155 - if (likely(work_done < budget))
156 + if (likely(work_done < budget)) {
157 + if (rx_pending_fill) {
158 + /* reschedule poll() to refill rx buffer deficit */
159 + work_done = budget;
160 + break;
161 + }
162 edma_percpu_info->rx_status &= ~(1 << queue_id);
163 - else
164 + } else {
165 break;
166 + }
167 }
168
169 /* Clear the status register, to avoid the interrupts to
170 --- a/drivers/net/ethernet/qualcomm/essedma/edma.h
171 +++ b/drivers/net/ethernet/qualcomm/essedma/edma.h
172 @@ -225,6 +225,7 @@ struct edma_ethtool_statistics {
173 u32 rx_q6_byte;
174 u32 rx_q7_byte;
175 u32 tx_desc_error;
176 + u32 rx_alloc_fail_ctr;
177 };
178
179 struct edma_mdio_data {
180 @@ -362,6 +363,7 @@ struct edma_rfd_desc_ring {
181 dma_addr_t dma; /* descriptor ring physical address */
182 u16 sw_next_to_fill; /* next descriptor to fill */
183 u16 sw_next_to_clean; /* next descriptor to clean */
184 + u16 pending_fill; /* fill pending from previous iteration */
185 };
186
187 /* edma_rfs_flter_node - rfs filter node in hash table */
188 --- a/drivers/net/ethernet/qualcomm/essedma/edma_ethtool.c
189 +++ b/drivers/net/ethernet/qualcomm/essedma/edma_ethtool.c
190 @@ -78,6 +78,7 @@ static const struct edma_ethtool_stats e
191 {"rx_q6_byte", EDMA_STAT(rx_q6_byte)},
192 {"rx_q7_byte", EDMA_STAT(rx_q7_byte)},
193 {"tx_desc_error", EDMA_STAT(tx_desc_error)},
194 + {"rx_alloc_fail_ctr", EDMA_STAT(rx_alloc_fail_ctr)},
195 };
196
197 #define EDMA_STATS_LEN ARRAY_SIZE(edma_gstrings_stats)