kernel: bump 5.4 to 5.4.109
[openwrt/openwrt.git] / target / linux / layerscape / patches-5.4 / 701-net-0198-dpaa2-eth-Update-FQ-taildrop-threshold-and-buffer-po.patch
1 From 0c7cb8b132f28cd150eb578a73c959de736364a2 Mon Sep 17 00:00:00 2001
2 From: Ioana Radulescu <ruxandra.radulescu@nxp.com>
3 Date: Mon, 16 Sep 2019 13:15:02 +0300
4 Subject: [PATCH] dpaa2-eth: Update FQ taildrop threshold and buffer pool count
5
6 Now that we have congestion group taildrop configured at all
7 times, we can afford to increase the frame queue taildrop
8 threshold; this will ensure a better response when receiving
9 bursts of large-sized frames.
10
11 Also decouple the buffer pool count from the Rx FQ taildrop
12 threshold, as above change would increase it too much. Instead,
13 keep the old count as a hardcoded value.
14
15 With the new limits, we try to ensure that:
16 * we allow enough leeway for large frame bursts (by buffering
17 enough of them in queues to avoid heavy dropping in case of
18 bursty traffic, but when overall ingress bandwidth is manageable)
19 * allow pending frames to be evenly spread between ingress FQs,
20 regardless of frame size
21 * avoid dropping frames due to the buffer pool being empty; this
22 is not a bad behaviour per se, but system overall response is
23 more linear and predictable when frames are dropped at frame
24 queue/group level.
25
26 Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
27 ---
28 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h | 23 +++++++++++------------
29 1 file changed, 11 insertions(+), 12 deletions(-)
30
31 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
32 +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
33 @@ -35,24 +35,24 @@
34 /* Convert L3 MTU to L2 MFL */
35 #define DPAA2_ETH_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN)
36
37 -/* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo
38 - * frames in the Rx queues (length of the current frame is not
39 - * taken into account when making the taildrop decision)
40 +/* Set the taildrop threshold (in bytes) to allow the enqueue of a large
41 + * enuough number of jumbo frames in the Rx queues (length of the current
42 + * frame is not taken into account when making the taildrop decision)
43 */
44 -#define DPAA2_ETH_FQ_TAILDROP_THRESH (64 * 1024)
45 +#define DPAA2_ETH_FQ_TAILDROP_THRESH (1024 * 1024)
46
47 /* Maximum number of Tx confirmation frames to be processed
48 * in a single NAPI call
49 */
50 #define DPAA2_ETH_TXCONF_PER_NAPI 256
51
52 -/* Buffer quota per queue. Must be large enough such that for minimum sized
53 - * frames taildrop kicks in before the bpool gets depleted, so we compute
54 - * how many 64B frames fit inside the taildrop threshold and add a margin
55 - * to accommodate the buffer refill delay.
56 +/* Buffer qouta per channel. We want to keep in check number of ingress frames
57 + * in flight: for small sized frames, congestion group taildrop may kick in
58 + * first; for large sizes, Rx FQ taildrop threshold will ensure only a
59 + * reasonable number of frames will be pending at any given time.
60 + * Ingress frame drop due to buffer pool depletion should be a corner case only
61 */
62 -#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_FQ_TAILDROP_THRESH / 64)
63 -#define DPAA2_ETH_NUM_BUFS (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
64 +#define DPAA2_ETH_NUM_BUFS 1280
65 #define DPAA2_ETH_REFILL_THRESH \
66 (DPAA2_ETH_NUM_BUFS - DPAA2_ETH_BUFS_PER_CMD)
67
68 @@ -62,8 +62,7 @@
69 * taildrop kicks in
70 */
71 #define DPAA2_ETH_CG_TAILDROP_THRESH(priv) \
72 - (DPAA2_ETH_MAX_FRAMES_PER_QUEUE * dpaa2_eth_queue_count(priv) / \
73 - dpaa2_eth_tc_count(priv))
74 + (1024 * dpaa2_eth_queue_count(priv) / dpaa2_eth_tc_count(priv))
75
76 /* Maximum number of buffers that can be acquired/released through a single
77 * QBMan command