1 From 72c050acbc425ef99313d5c2e4c866e25567e569 Mon Sep 17 00:00:00 2001
2 From: Rakesh Nair <ranair@codeaurora.org>
3 Date: Thu, 8 Jun 2017 14:29:20 +0530
4 Subject: [PATCH] CHROMIUM: net: qualcomm: Add fix for memory allocation issues
6 Added ethtool counters for memory allocation failures accounting.
7 Added support to track number of allocation failures that could
8 not be fulfilled in the current iteration in the rx descriptor
9 field and use the info to allocate in the subsequent iteration.
11 Change-Id: Ie4fd3b6cf25304e5db2c9247a498791e7e9bb4aa
12 Signed-off-by: Rakesh Nair <ranair@codeaurora.org>
13 Signed-off-by: Kan Yan <kyan@google.com>
14 Reviewed-on: https://chromium-review.googlesource.com/535419
15 Reviewed-by: Grant Grundler <grundler@chromium.org>
17 drivers/net/ethernet/qualcomm/essedma/edma.c | 54 ++++++++++++++-----
18 drivers/net/ethernet/qualcomm/essedma/edma.h | 2 +
19 .../ethernet/qualcomm/essedma/edma_ethtool.c | 1 +
20 3 files changed, 43 insertions(+), 14 deletions(-)
22 --- a/drivers/net/ethernet/qualcomm/essedma/edma.c
23 +++ b/drivers/net/ethernet/qualcomm/essedma/edma.c
24 @@ -103,6 +103,9 @@ static int edma_alloc_rx_ring(struct edm
28 + /* Initialize pending_fill */
29 + erxd->pending_fill = 0;
34 @@ -185,11 +188,8 @@ static int edma_alloc_rx_buf(struct edma
38 - if (cleaned_count > erdr->count) {
39 - dev_err(&pdev->dev, "Incorrect cleaned_count %d",
43 + if (cleaned_count > erdr->count)
44 + cleaned_count = erdr->count - 1;
46 i = erdr->sw_next_to_fill;
48 @@ -199,6 +199,9 @@ static int edma_alloc_rx_buf(struct edma
50 if (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_REUSE) {
53 + /* Clear REUSE Flag */
54 + sw_desc->flags &= ~EDMA_SW_DESC_FLAG_SKB_REUSE;
57 skb = netdev_alloc_skb_ip_align(edma_netdev[0], length);
58 @@ -264,6 +267,13 @@ static int edma_alloc_rx_buf(struct edma
59 reg_data &= ~EDMA_RFD_PROD_IDX_BITS;
61 edma_write_reg(EDMA_REG_RFD_IDX_Q(queue_id), reg_data);
63 + /* If we couldn't allocate all the buffers
64 + * we increment the alloc failure counters
67 + edma_cinfo->edma_ethstats.rx_alloc_fail_ctr++;
72 @@ -534,7 +544,7 @@ static int edma_rx_complete_paged(struct
74 * Main api called from the poll function to process rx packets.
76 -static void edma_rx_complete(struct edma_common_info *edma_cinfo,
77 +static u16 edma_rx_complete(struct edma_common_info *edma_cinfo,
78 int *work_done, int work_to_do, int queue_id,
79 struct napi_struct *napi)
81 @@ -554,6 +564,7 @@ static void edma_rx_complete(struct edma
82 u16 count = erdr->count, rfd_avail;
83 u8 queue_to_rxid[8] = {0, 0, 1, 1, 2, 2, 3, 3};
85 + cleaned_count = erdr->pending_fill;
86 sw_next_to_clean = erdr->sw_next_to_clean;
88 edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &data);
89 @@ -652,12 +663,13 @@ static void edma_rx_complete(struct edma
93 - if (cleaned_count == EDMA_RX_BUFFER_WRITE) {
94 + if (cleaned_count >= EDMA_RX_BUFFER_WRITE) {
95 /* If buffer clean count reaches 16, we replenish HW buffers. */
96 ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
97 edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
99 cleaned_count = ret_count;
100 + erdr->pending_fill = ret_count;
104 @@ -730,11 +742,12 @@ static void edma_rx_complete(struct edma
105 adapter->stats.rx_bytes += length;
107 /* Check if we reached refill threshold */
108 - if (cleaned_count == EDMA_RX_BUFFER_WRITE) {
109 + if (cleaned_count >= EDMA_RX_BUFFER_WRITE) {
110 ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
111 edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
113 cleaned_count = ret_count;
114 + erdr->pending_fill = ret_count;
117 /* At this point skb should go to stack */
118 @@ -756,11 +769,17 @@ static void edma_rx_complete(struct edma
119 /* Refill here in case refill threshold wasn't reached */
120 if (likely(cleaned_count)) {
121 ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
123 - dev_dbg(&pdev->dev, "Not all buffers was reallocated");
124 + erdr->pending_fill = ret_count;
126 + if (net_ratelimit())
127 + dev_dbg(&pdev->dev, "Not all buffers was reallocated");
130 edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
131 erdr->sw_next_to_clean);
134 + return erdr->pending_fill;
137 /* edma_delete_rfs_filter()
138 @@ -2064,6 +2083,7 @@ int edma_poll(struct napi_struct *napi,
139 u32 shadow_rx_status, shadow_tx_status;
141 int i, work_done = 0;
142 + u16 rx_pending_fill;
144 /* Store the Rx/Tx status by ANDing it with
145 * appropriate CPU RX?TX mask
146 @@ -2097,13 +2117,19 @@ int edma_poll(struct napi_struct *napi,
148 while (edma_percpu_info->rx_status) {
149 queue_id = ffs(edma_percpu_info->rx_status) - 1;
150 - edma_rx_complete(edma_cinfo, &work_done,
151 - budget, queue_id, napi);
152 + rx_pending_fill = edma_rx_complete(edma_cinfo, &work_done,
153 + budget, queue_id, napi);
155 - if (likely(work_done < budget))
156 + if (likely(work_done < budget)) {
157 + if (rx_pending_fill) {
158 + /* reschedule poll() to refill rx buffer deficit */
159 + work_done = budget;
162 edma_percpu_info->rx_status &= ~(1 << queue_id);
169 /* Clear the status register, to avoid the interrupts to
170 --- a/drivers/net/ethernet/qualcomm/essedma/edma.h
171 +++ b/drivers/net/ethernet/qualcomm/essedma/edma.h
172 @@ -225,6 +225,7 @@ struct edma_ethtool_statistics {
176 + u32 rx_alloc_fail_ctr;
179 struct edma_mdio_data {
180 @@ -362,6 +363,7 @@ struct edma_rfd_desc_ring {
181 dma_addr_t dma; /* descriptor ring physical address */
182 u16 sw_next_to_fill; /* next descriptor to fill */
183 u16 sw_next_to_clean; /* next descriptor to clean */
184 + u16 pending_fill; /* fill pending from previous iteration */
187 /* edma_rfs_flter_node - rfs filter node in hash table */
188 --- a/drivers/net/ethernet/qualcomm/essedma/edma_ethtool.c
189 +++ b/drivers/net/ethernet/qualcomm/essedma/edma_ethtool.c
190 @@ -78,6 +78,7 @@ static const struct edma_ethtool_stats e
191 {"rx_q6_byte", EDMA_STAT(rx_q6_byte)},
192 {"rx_q7_byte", EDMA_STAT(rx_q7_byte)},
193 {"tx_desc_error", EDMA_STAT(tx_desc_error)},
194 + {"rx_alloc_fail_ctr", EDMA_STAT(rx_alloc_fail_ctr)},
197 #define EDMA_STATS_LEN ARRAY_SIZE(edma_gstrings_stats)