kernel: Backport mvneta crash fix to 5.15
authorMarek Behún <kabel@kernel.org>
Wed, 12 Apr 2023 11:01:25 +0000 (13:01 +0200)
committerChristian Lamparter <chunkeey@gmail.com>
Thu, 8 Jun 2023 13:33:14 +0000 (15:33 +0200)
Backport Russell King's series [1]
  net: mvneta: reduce size of TSO header allocation
to pending-5.15 to fix random crashes on Turris Omnia.

This also backports two patches that are dependencies to this series:
  net: mvneta: Delete unused variable
  net: mvneta: fix potential double-frees in mvneta_txq_sw_deinit()

[1] https://lore.kernel.org/netdev/ZCsbJ4nG+So%2Fn9qY@shell.armlinux.org.uk/

Signed-off-by: Marek Behún <kabel@kernel.org>
Signed-off-by: Christian Lamparter <chunkeey@gmail.com> (squashed)
(cherry picked from commit 7b31c2e9ed4da7bfeecbd393c17c249eca870717)

12 files changed:
target/linux/generic/backport-5.15/708-01-v5.16-net-mvneta-Delete-unused-variable.patch [new file with mode: 0644]
target/linux/generic/backport-5.15/708-02-v6.3-net-mvneta-fix-potential-double-frees-in-mvneta_txq_.patch [new file with mode: 0644]
target/linux/generic/pending-5.15/704-01-v6.4-net-mvneta-fix-transmit-path-dma-unmapping-on-error.patch [new file with mode: 0644]
target/linux/generic/pending-5.15/704-02-v6.4-net-mvneta-mark-mapped-and-tso-buffers-separately.patch [new file with mode: 0644]
target/linux/generic/pending-5.15/704-03-v6.4-net-mvneta-use-buf-type-to-determine-whether-to-dma-.patch [new file with mode: 0644]
target/linux/generic/pending-5.15/704-04-v6.4-net-mvneta-move-tso_build_hdr-into-mvneta_tso_put_hd.patch [new file with mode: 0644]
target/linux/generic/pending-5.15/704-05-v6.4-net-mvneta-allocate-TSO-header-DMA-memory-in-chunks.patch [new file with mode: 0644]
target/linux/mvebu/patches-5.15/700-mvneta-tx-queue-workaround.patch
target/linux/mvebu/patches-5.15/702-net-next-ethernet-marvell-mvnetaMQPrioOffload.patch
target/linux/mvebu/patches-5.15/703-net-next-ethernet-marvell-mvnetaMQPrioFlag.patch
target/linux/mvebu/patches-5.15/704-net-next-ethernet-marvell-mvnetaMQPrioQueue.patch
target/linux/mvebu/patches-5.15/705-net-next-ethernet-marvell-mvnetaMQPrioTCOffload.patch

diff --git a/target/linux/generic/backport-5.15/708-01-v5.16-net-mvneta-Delete-unused-variable.patch b/target/linux/generic/backport-5.15/708-01-v5.16-net-mvneta-Delete-unused-variable.patch
new file mode 100644 (file)
index 0000000..421563e
--- /dev/null
@@ -0,0 +1,62 @@
+From 43ed6fff01333868a1d0e19876f67c22d9939952 Mon Sep 17 00:00:00 2001
+From: Yuval Shaia <yshaia@marvell.com>
+Date: Wed, 13 Oct 2021 09:49:21 +0300
+Subject: [PATCH] net: mvneta: Delete unused variable
+
+The variable pp is not in use - delete it.
+
+Signed-off-by: Yuval Shaia <yshaia@marvell.com>
+Link: https://lore.kernel.org/r/20211013064921.26346-1-yshaia@marvell.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+---
+ drivers/net/ethernet/marvell/mvneta.c | 11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -1914,7 +1914,7 @@ static int mvneta_rx_refill(struct mvnet
+ }
+ /* Handle tx checksum */
+-static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
++static u32 mvneta_skb_tx_csum(struct sk_buff *skb)
+ {
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               int ip_hdr_len = 0;
+@@ -2595,8 +2595,7 @@ err_drop_frame:
+ }
+ static inline void
+-mvneta_tso_put_hdr(struct sk_buff *skb,
+-                 struct mvneta_port *pp, struct mvneta_tx_queue *txq)
++mvneta_tso_put_hdr(struct sk_buff *skb, struct mvneta_tx_queue *txq)
+ {
+       int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+       struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
+@@ -2604,7 +2603,7 @@ mvneta_tso_put_hdr(struct sk_buff *skb,
+       tx_desc = mvneta_txq_next_desc_get(txq);
+       tx_desc->data_size = hdr_len;
+-      tx_desc->command = mvneta_skb_tx_csum(pp, skb);
++      tx_desc->command = mvneta_skb_tx_csum(skb);
+       tx_desc->command |= MVNETA_TXD_F_DESC;
+       tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
+                                txq->txq_put_index * TSO_HEADER_SIZE;
+@@ -2681,7 +2680,7 @@ static int mvneta_tx_tso(struct sk_buff
+               hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
+               tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
+-              mvneta_tso_put_hdr(skb, pp, txq);
++              mvneta_tso_put_hdr(skb, txq);
+               while (data_left > 0) {
+                       int size;
+@@ -2799,7 +2798,7 @@ static netdev_tx_t mvneta_tx(struct sk_b
+       /* Get a descriptor for the first part of the packet */
+       tx_desc = mvneta_txq_next_desc_get(txq);
+-      tx_cmd = mvneta_skb_tx_csum(pp, skb);
++      tx_cmd = mvneta_skb_tx_csum(skb);
+       tx_desc->data_size = skb_headlen(skb);
diff --git a/target/linux/generic/backport-5.15/708-02-v6.3-net-mvneta-fix-potential-double-frees-in-mvneta_txq_.patch b/target/linux/generic/backport-5.15/708-02-v6.3-net-mvneta-fix-potential-double-frees-in-mvneta_txq_.patch
new file mode 100644 (file)
index 0000000..a16e68e
--- /dev/null
@@ -0,0 +1,37 @@
+From 0cf39c6543469aae4a30cba354243125514ed568 Mon Sep 17 00:00:00 2001
+From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
+Date: Wed, 29 Mar 2023 13:11:17 +0100
+Subject: [PATCH] net: mvneta: fix potential double-frees in
+ mvneta_txq_sw_deinit()
+
+Reported on the Turris forum, mvneta provokes kernel warnings in the
+architecture DMA mapping code when mvneta_setup_txqs() fails to
+allocate memory. This happens because when mvneta_cleanup_txqs() is
+called in the mvneta_stop() path, we leave pointers in the structure
+that have been freed.
+
+Then on mvneta_open(), we call mvneta_setup_txqs(), which starts
+allocating memory. On memory allocation failure, mvneta_cleanup_txqs()
+will walk all the queues freeing any non-NULL pointers - which includes
+pointers that were previously freed in mvneta_stop().
+
+Fix this by setting these pointers to NULL to prevent double-freeing
+of the same memory.
+
+Link: https://forum.turris.cz/t/random-kernel-exceptions-on-hbl-tos-7-0/18865/8
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+---
+ drivers/net/ethernet/marvell/mvneta.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -3481,6 +3481,8 @@ static void mvneta_txq_sw_deinit(struct
+       netdev_tx_reset_queue(nq);
++      txq->buf               = NULL;
++      txq->tso_hdrs          = NULL;
+       txq->descs             = NULL;
+       txq->last_desc         = 0;
+       txq->next_desc_to_proc = 0;
diff --git a/target/linux/generic/pending-5.15/704-01-v6.4-net-mvneta-fix-transmit-path-dma-unmapping-on-error.patch b/target/linux/generic/pending-5.15/704-01-v6.4-net-mvneta-fix-transmit-path-dma-unmapping-on-error.patch
new file mode 100644 (file)
index 0000000..287728b
--- /dev/null
@@ -0,0 +1,111 @@
+From d6d80269cf5c79f9dfe7d69f8b41a72015c89748 Mon Sep 17 00:00:00 2001
+From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
+Date: Mon, 3 Apr 2023 19:30:20 +0100
+Subject: [PATCH 1/5] net: mvneta: fix transmit path dma-unmapping on error
+
+The transmit code assumes that the transmit descriptors that are used
+begin with the first descriptor in the ring, but this may not be the
+case. Fix this by providing a new function that dma-unmaps a range of
+numbered descriptor entries, and use that to do the unmapping.
+
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+---
+ drivers/net/ethernet/marvell/mvneta.c | 53 +++++++++++++++++----------
+ 1 file changed, 33 insertions(+), 20 deletions(-)
+
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -2647,14 +2647,40 @@ mvneta_tso_put_data(struct net_device *d
+       return 0;
+ }
++static void mvneta_release_descs(struct mvneta_port *pp,
++                               struct mvneta_tx_queue *txq,
++                               int first, int num)
++{
++      int desc_idx, i;
++
++      desc_idx = first + num;
++      if (desc_idx >= txq->size)
++              desc_idx -= txq->size;
++
++      for (i = num; i >= 0; i--) {
++              struct mvneta_tx_desc *tx_desc = txq->descs + desc_idx;
++
++              if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
++                      dma_unmap_single(pp->dev->dev.parent,
++                                       tx_desc->buf_phys_addr,
++                                       tx_desc->data_size,
++                                       DMA_TO_DEVICE);
++
++              mvneta_txq_desc_put(txq);
++
++              if (desc_idx == 0)
++                      desc_idx = txq->size;
++              desc_idx -= 1;
++      }
++}
++
+ static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
+                        struct mvneta_tx_queue *txq)
+ {
+       int hdr_len, total_len, data_left;
+-      int desc_count = 0;
++      int first_desc, desc_count = 0;
+       struct mvneta_port *pp = netdev_priv(dev);
+       struct tso_t tso;
+-      int i;
+       /* Count needed descriptors */
+       if ((txq->count + tso_count_descs(skb)) >= txq->size)
+@@ -2665,6 +2691,8 @@ static int mvneta_tx_tso(struct sk_buff
+               return 0;
+       }
++      first_desc = txq->txq_put_index;
++
+       /* Initialize the TSO handler, and prepare the first payload */
+       hdr_len = tso_start(skb, &tso);
+@@ -2705,15 +2733,7 @@ err_release:
+       /* Release all used data descriptors; header descriptors must not
+        * be DMA-unmapped.
+        */
+-      for (i = desc_count - 1; i >= 0; i--) {
+-              struct mvneta_tx_desc *tx_desc = txq->descs + i;
+-              if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
+-                      dma_unmap_single(pp->dev->dev.parent,
+-                                       tx_desc->buf_phys_addr,
+-                                       tx_desc->data_size,
+-                                       DMA_TO_DEVICE);
+-              mvneta_txq_desc_put(txq);
+-      }
++      mvneta_release_descs(pp, txq, first_desc, desc_count - 1);
+       return 0;
+ }
+@@ -2723,6 +2743,7 @@ static int mvneta_tx_frag_process(struct
+ {
+       struct mvneta_tx_desc *tx_desc;
+       int i, nr_frags = skb_shinfo(skb)->nr_frags;
++      int first_desc = txq->txq_put_index;
+       for (i = 0; i < nr_frags; i++) {
+               struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
+@@ -2761,15 +2782,7 @@ error:
+       /* Release all descriptors that were used to map fragments of
+        * this packet, as well as the corresponding DMA mappings
+        */
+-      for (i = i - 1; i >= 0; i--) {
+-              tx_desc = txq->descs + i;
+-              dma_unmap_single(pp->dev->dev.parent,
+-                               tx_desc->buf_phys_addr,
+-                               tx_desc->data_size,
+-                               DMA_TO_DEVICE);
+-              mvneta_txq_desc_put(txq);
+-      }
+-
++      mvneta_release_descs(pp, txq, first_desc, i - 1);
+       return -ENOMEM;
+ }
diff --git a/target/linux/generic/pending-5.15/704-02-v6.4-net-mvneta-mark-mapped-and-tso-buffers-separately.patch b/target/linux/generic/pending-5.15/704-02-v6.4-net-mvneta-mark-mapped-and-tso-buffers-separately.patch
new file mode 100644 (file)
index 0000000..4db3ffe
--- /dev/null
@@ -0,0 +1,42 @@
+From e3c77d0a1b635d114c147fd2078afb57ed558b81 Mon Sep 17 00:00:00 2001
+From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
+Date: Mon, 3 Apr 2023 19:30:25 +0100
+Subject: [PATCH 2/5] net: mvneta: mark mapped and tso buffers separately
+
+Mark dma-mapped skbs and TSO buffers separately, so we can use
+buf->type to identify their differences.
+
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+---
+ drivers/net/ethernet/marvell/mvneta.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -607,6 +607,7 @@ struct mvneta_rx_desc {
+ #endif
+ enum mvneta_tx_buf_type {
++      MVNETA_TYPE_TSO,
+       MVNETA_TYPE_SKB,
+       MVNETA_TYPE_XDP_TX,
+       MVNETA_TYPE_XDP_NDO,
+@@ -1852,7 +1853,8 @@ static void mvneta_txq_bufs_free(struct
+                       dma_unmap_single(pp->dev->dev.parent,
+                                        tx_desc->buf_phys_addr,
+                                        tx_desc->data_size, DMA_TO_DEVICE);
+-              if (buf->type == MVNETA_TYPE_SKB && buf->skb) {
++              if ((buf->type == MVNETA_TYPE_TSO ||
++                   buf->type == MVNETA_TYPE_SKB) && buf->skb) {
+                       bytes_compl += buf->skb->len;
+                       pkts_compl++;
+                       dev_kfree_skb_any(buf->skb);
+@@ -2607,7 +2609,7 @@ mvneta_tso_put_hdr(struct sk_buff *skb,
+       tx_desc->command |= MVNETA_TXD_F_DESC;
+       tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
+                                txq->txq_put_index * TSO_HEADER_SIZE;
+-      buf->type = MVNETA_TYPE_SKB;
++      buf->type = MVNETA_TYPE_TSO;
+       buf->skb = NULL;
+       mvneta_txq_inc_put(txq);
diff --git a/target/linux/generic/pending-5.15/704-03-v6.4-net-mvneta-use-buf-type-to-determine-whether-to-dma-.patch b/target/linux/generic/pending-5.15/704-03-v6.4-net-mvneta-use-buf-type-to-determine-whether-to-dma-.patch
new file mode 100644 (file)
index 0000000..37511ff
--- /dev/null
@@ -0,0 +1,59 @@
+From fe2abc1abc0dfc6c13fe8f189216f00dbbb33044 Mon Sep 17 00:00:00 2001
+From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
+Date: Mon, 3 Apr 2023 19:30:30 +0100
+Subject: [PATCH 3/5] net: mvneta: use buf->type to determine whether to
+ dma-unmap
+
+Now that we use a different buffer type for TSO headers, we can use
+buf->type to determine whether the original buffer was DMA-mapped or
+not. The rules are:
+
+       MVNETA_TYPE_XDP_TX - from a DMA pool, no unmap is required
+       MVNETA_TYPE_XDP_NDO - dma_map_single()'d
+       MVNETA_TYPE_SKB - normal skbuff, dma_map_single()'d
+       MVNETA_TYPE_TSO - from the TSO buffer area
+
+This means we only need to call dma_unmap_single() on the XDP_NDO and
+SKB types of buffer, and we no longer need the private IS_TSO_HEADER()
+which relies on the TSO region being contiguously allocated.
+
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+---
+ drivers/net/ethernet/marvell/mvneta.c | 11 ++++-------
+ 1 file changed, 4 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -334,10 +334,6 @@
+                        MVNETA_SKB_HEADROOM))
+ #define MVNETA_MAX_RX_BUF_SIZE        (PAGE_SIZE - MVNETA_SKB_PAD)
+-#define IS_TSO_HEADER(txq, addr) \
+-      ((addr >= txq->tso_hdrs_phys) && \
+-       (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
+-
+ #define MVNETA_RX_GET_BM_POOL_ID(rxd) \
+       (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT)
+@@ -1848,8 +1844,8 @@ static void mvneta_txq_bufs_free(struct
+               mvneta_txq_inc_get(txq);
+-              if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr) &&
+-                  buf->type != MVNETA_TYPE_XDP_TX)
++              if (buf->type == MVNETA_TYPE_XDP_NDO ||
++                  buf->type == MVNETA_TYPE_SKB)
+                       dma_unmap_single(pp->dev->dev.parent,
+                                        tx_desc->buf_phys_addr,
+                                        tx_desc->data_size, DMA_TO_DEVICE);
+@@ -2661,8 +2657,9 @@ static void mvneta_release_descs(struct
+       for (i = num; i >= 0; i--) {
+               struct mvneta_tx_desc *tx_desc = txq->descs + desc_idx;
++              struct mvneta_tx_buf *buf = &txq->buf[desc_idx];
+-              if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
++              if (buf->type == MVNETA_TYPE_SKB)
+                       dma_unmap_single(pp->dev->dev.parent,
+                                        tx_desc->buf_phys_addr,
+                                        tx_desc->data_size,
diff --git a/target/linux/generic/pending-5.15/704-04-v6.4-net-mvneta-move-tso_build_hdr-into-mvneta_tso_put_hd.patch b/target/linux/generic/pending-5.15/704-04-v6.4-net-mvneta-move-tso_build_hdr-into-mvneta_tso_put_hd.patch
new file mode 100644 (file)
index 0000000..444b60f
--- /dev/null
@@ -0,0 +1,65 @@
+From 210ca75d4949f1ace8ea53a75148806cc28224a0 Mon Sep 17 00:00:00 2001
+From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
+Date: Mon, 3 Apr 2023 19:30:35 +0100
+Subject: [PATCH 4/5] net: mvneta: move tso_build_hdr() into
+ mvneta_tso_put_hdr()
+
+Move tso_build_hdr() into mvneta_tso_put_hdr() so that all the TSO
+header building code is in one place.
+
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+---
+ drivers/net/ethernet/marvell/mvneta.c | 22 +++++++++++-----------
+ 1 file changed, 11 insertions(+), 11 deletions(-)
+
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -2592,19 +2592,24 @@ err_drop_frame:
+       return rx_done;
+ }
+-static inline void
+-mvneta_tso_put_hdr(struct sk_buff *skb, struct mvneta_tx_queue *txq)
++static void mvneta_tso_put_hdr(struct sk_buff *skb, struct mvneta_tx_queue *txq,
++                             struct tso_t *tso, int size, bool is_last)
+ {
+-      int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
++      int tso_offset, hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+       struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
+       struct mvneta_tx_desc *tx_desc;
++      char *hdr;
++
++      tso_offset = txq->txq_put_index * TSO_HEADER_SIZE;
++
++      hdr = txq->tso_hdrs + tso_offset;
++      tso_build_hdr(skb, hdr, tso, size, is_last);
+       tx_desc = mvneta_txq_next_desc_get(txq);
+       tx_desc->data_size = hdr_len;
+       tx_desc->command = mvneta_skb_tx_csum(skb);
+       tx_desc->command |= MVNETA_TXD_F_DESC;
+-      tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
+-                               txq->txq_put_index * TSO_HEADER_SIZE;
++      tx_desc->buf_phys_addr = txq->tso_hdrs_phys + tso_offset;
+       buf->type = MVNETA_TYPE_TSO;
+       buf->skb = NULL;
+@@ -2697,17 +2702,12 @@ static int mvneta_tx_tso(struct sk_buff
+       total_len = skb->len - hdr_len;
+       while (total_len > 0) {
+-              char *hdr;
+-
+               data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+               total_len -= data_left;
+               desc_count++;
+               /* prepare packet headers: MAC + IP + TCP */
+-              hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
+-              tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
+-
+-              mvneta_tso_put_hdr(skb, txq);
++              mvneta_tso_put_hdr(skb, txq, &tso, data_left, total_len == 0);
+               while (data_left > 0) {
+                       int size;
diff --git a/target/linux/generic/pending-5.15/704-05-v6.4-net-mvneta-allocate-TSO-header-DMA-memory-in-chunks.patch b/target/linux/generic/pending-5.15/704-05-v6.4-net-mvneta-allocate-TSO-header-DMA-memory-in-chunks.patch
new file mode 100644 (file)
index 0000000..395a0bf
--- /dev/null
@@ -0,0 +1,179 @@
+From 58d50fb089da553023df5a05f5ae86feaacc7f24 Mon Sep 17 00:00:00 2001
+From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
+Date: Mon, 3 Apr 2023 19:30:40 +0100
+Subject: [PATCH 5/5] net: mvneta: allocate TSO header DMA memory in chunks
+
+Now that we no longer need to check whether the DMA address is within
+the TSO header DMA memory range for the queue, we can allocate the TSO
+header DMA memory in chunks rather than one contiguous order-6 chunk,
+which can stress the kernel's memory subsystems to allocate.
+
+Instead, use order-1 (8k) allocations, which will result in 32 order-1
+pages containing 32 TSO headers.
+
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+---
+ drivers/net/ethernet/marvell/mvneta.c | 88 +++++++++++++++++++++------
+ 1 file changed, 70 insertions(+), 18 deletions(-)
+
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -314,6 +314,15 @@
+ #define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
++/* The size of a TSO header page */
++#define MVNETA_TSO_PAGE_SIZE (2 * PAGE_SIZE)
++
++/* Number of TSO headers per page. This should be a power of 2 */
++#define MVNETA_TSO_PER_PAGE (MVNETA_TSO_PAGE_SIZE / TSO_HEADER_SIZE)
++
++/* Maximum number of TSO header pages */
++#define MVNETA_MAX_TSO_PAGES (MVNETA_MAX_TXD / MVNETA_TSO_PER_PAGE)
++
+ /* descriptor aligned size */
+ #define MVNETA_DESC_ALIGNED_SIZE      32
+@@ -656,10 +665,10 @@ struct mvneta_tx_queue {
+       int next_desc_to_proc;
+       /* DMA buffers for TSO headers */
+-      char *tso_hdrs;
++      char *tso_hdrs[MVNETA_MAX_TSO_PAGES];
+       /* DMA address of TSO headers */
+-      dma_addr_t tso_hdrs_phys;
++      dma_addr_t tso_hdrs_phys[MVNETA_MAX_TSO_PAGES];
+       /* Affinity mask for CPUs*/
+       cpumask_t affinity_mask;
+@@ -2592,24 +2601,71 @@ err_drop_frame:
+       return rx_done;
+ }
++static void mvneta_free_tso_hdrs(struct mvneta_port *pp,
++                               struct mvneta_tx_queue *txq)
++{
++      struct device *dev = pp->dev->dev.parent;
++      int i;
++
++      for (i = 0; i < MVNETA_MAX_TSO_PAGES; i++) {
++              if (txq->tso_hdrs[i]) {
++                      dma_free_coherent(dev, MVNETA_TSO_PAGE_SIZE,
++                                        txq->tso_hdrs[i],
++                                        txq->tso_hdrs_phys[i]);
++                      txq->tso_hdrs[i] = NULL;
++              }
++      }
++}
++
++static int mvneta_alloc_tso_hdrs(struct mvneta_port *pp,
++                               struct mvneta_tx_queue *txq)
++{
++      struct device *dev = pp->dev->dev.parent;
++      int i, num;
++
++      num = DIV_ROUND_UP(txq->size, MVNETA_TSO_PER_PAGE);
++      for (i = 0; i < num; i++) {
++              txq->tso_hdrs[i] = dma_alloc_coherent(dev, MVNETA_TSO_PAGE_SIZE,
++                                                    &txq->tso_hdrs_phys[i],
++                                                    GFP_KERNEL);
++              if (!txq->tso_hdrs[i]) {
++                      mvneta_free_tso_hdrs(pp, txq);
++                      return -ENOMEM;
++              }
++      }
++
++      return 0;
++}
++
++static char *mvneta_get_tso_hdr(struct mvneta_tx_queue *txq, dma_addr_t *dma)
++{
++      int index, offset;
++
++      index = txq->txq_put_index / MVNETA_TSO_PER_PAGE;
++      offset = (txq->txq_put_index % MVNETA_TSO_PER_PAGE) * TSO_HEADER_SIZE;
++
++      *dma = txq->tso_hdrs_phys[index] + offset;
++
++      return txq->tso_hdrs[index] + offset;
++}
++
+ static void mvneta_tso_put_hdr(struct sk_buff *skb, struct mvneta_tx_queue *txq,
+                              struct tso_t *tso, int size, bool is_last)
+ {
+-      int tso_offset, hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
++      int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+       struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
+       struct mvneta_tx_desc *tx_desc;
++      dma_addr_t hdr_phys;
+       char *hdr;
+-      tso_offset = txq->txq_put_index * TSO_HEADER_SIZE;
+-
+-      hdr = txq->tso_hdrs + tso_offset;
++      hdr = mvneta_get_tso_hdr(txq, &hdr_phys);
+       tso_build_hdr(skb, hdr, tso, size, is_last);
+       tx_desc = mvneta_txq_next_desc_get(txq);
+       tx_desc->data_size = hdr_len;
+       tx_desc->command = mvneta_skb_tx_csum(skb);
+       tx_desc->command |= MVNETA_TXD_F_DESC;
+-      tx_desc->buf_phys_addr = txq->tso_hdrs_phys + tso_offset;
++      tx_desc->buf_phys_addr = hdr_phys;
+       buf->type = MVNETA_TYPE_TSO;
+       buf->skb = NULL;
+@@ -3401,7 +3457,7 @@ static void mvneta_rxq_deinit(struct mvn
+ static int mvneta_txq_sw_init(struct mvneta_port *pp,
+                             struct mvneta_tx_queue *txq)
+ {
+-      int cpu;
++      int cpu, err;
+       txq->size = pp->tx_ring_size;
+@@ -3426,11 +3482,9 @@ static int mvneta_txq_sw_init(struct mvn
+               return -ENOMEM;
+       /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
+-      txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
+-                                         txq->size * TSO_HEADER_SIZE,
+-                                         &txq->tso_hdrs_phys, GFP_KERNEL);
+-      if (!txq->tso_hdrs)
+-              return -ENOMEM;
++      err = mvneta_alloc_tso_hdrs(pp, txq);
++      if (err)
++              return err;
+       /* Setup XPS mapping */
+       if (pp->neta_armada3700)
+@@ -3482,10 +3536,7 @@ static void mvneta_txq_sw_deinit(struct
+       kfree(txq->buf);
+-      if (txq->tso_hdrs)
+-              dma_free_coherent(pp->dev->dev.parent,
+-                                txq->size * TSO_HEADER_SIZE,
+-                                txq->tso_hdrs, txq->tso_hdrs_phys);
++      mvneta_free_tso_hdrs(pp, txq);
+       if (txq->descs)
+               dma_free_coherent(pp->dev->dev.parent,
+                                 txq->size * MVNETA_DESC_ALIGNED_SIZE,
+@@ -3494,7 +3545,6 @@ static void mvneta_txq_sw_deinit(struct
+       netdev_tx_reset_queue(nq);
+       txq->buf               = NULL;
+-      txq->tso_hdrs          = NULL;
+       txq->descs             = NULL;
+       txq->last_desc         = 0;
+       txq->next_desc_to_proc = 0;
+@@ -5543,6 +5593,8 @@ static int __init mvneta_driver_init(voi
+ {
+       int ret;
++      BUILD_BUG_ON_NOT_POWER_OF_2(MVNETA_TSO_PER_PAGE);
++
+       ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvneta:online",
+                                     mvneta_cpu_online,
+                                     mvneta_cpu_down_prepare);
index 826f95d1c2ceefced061ffa570503ecd2a1cbf44..32e8ef4b7d872723531ce8aee6dd31bcddb4d620 100644 (file)
@@ -9,7 +9,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
 ---
 --- a/drivers/net/ethernet/marvell/mvneta.c
 +++ b/drivers/net/ethernet/marvell/mvneta.c
-@@ -4943,6 +4943,16 @@ static int mvneta_setup_tc(struct net_de
+@@ -5006,6 +5006,16 @@ static int mvneta_setup_tc(struct net_de
        }
  }
  
@@ -26,7 +26,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
  static const struct net_device_ops mvneta_netdev_ops = {
        .ndo_open            = mvneta_open,
        .ndo_stop            = mvneta_stop,
-@@ -4953,6 +4963,9 @@ static const struct net_device_ops mvnet
+@@ -5016,6 +5026,9 @@ static const struct net_device_ops mvnet
        .ndo_fix_features    = mvneta_fix_features,
        .ndo_get_stats64     = mvneta_get_stats64,
        .ndo_eth_ioctl        = mvneta_ioctl,
index c3a6d9f4d06b45871c53bd9a7318c7c7e08c94ed..019b9528c360e5716d5d1d120d0a7b57d5ef5869 100644 (file)
@@ -26,7 +26,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
  #include <linux/bpf_trace.h>
  
  /* Registers */
-@@ -4903,14 +4904,14 @@ static void mvneta_setup_rx_prio_map(str
+@@ -4966,14 +4967,14 @@ static void mvneta_setup_rx_prio_map(str
  }
  
  static int mvneta_setup_mqprio(struct net_device *dev,
@@ -44,7 +44,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
  
        if (num_tc > rxq_number)
                return -EINVAL;
-@@ -4921,13 +4922,15 @@ static int mvneta_setup_mqprio(struct ne
+@@ -4984,13 +4985,15 @@ static int mvneta_setup_mqprio(struct ne
                return 0;
        }
  
index 8ef585be9a122437e2df0276f3a42fd9346c689f..c878a2884392164eb9955883541ce3b99e7e7361 100644 (file)
@@ -17,7 +17,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
 
 --- a/drivers/net/ethernet/marvell/mvneta.c
 +++ b/drivers/net/ethernet/marvell/mvneta.c
-@@ -4910,7 +4910,9 @@ static int mvneta_setup_mqprio(struct ne
+@@ -4973,7 +4973,9 @@ static int mvneta_setup_mqprio(struct ne
        u8 num_tc;
        int i;
  
index 196b986541e976e7bc7c6d1cb966c265c8d34e9e..546a8486ef0588165f0d22ab7acf0275d76e7761 100644 (file)
@@ -22,7 +22,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
 
 --- a/drivers/net/ethernet/marvell/mvneta.c
 +++ b/drivers/net/ethernet/marvell/mvneta.c
-@@ -493,7 +493,6 @@ struct mvneta_port {
+@@ -498,7 +498,6 @@ struct mvneta_port {
        u8 mcast_count[256];
        u16 tx_ring_size;
        u16 rx_ring_size;
@@ -30,7 +30,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
  
        phy_interface_t phy_interface;
        struct device_node *dn;
-@@ -4892,13 +4891,12 @@ static void mvneta_clear_rx_prio_map(str
+@@ -4955,13 +4954,12 @@ static void mvneta_clear_rx_prio_map(str
        mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, 0);
  }
  
@@ -48,7 +48,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
  
        mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, val);
  }
-@@ -4907,8 +4905,8 @@ static int mvneta_setup_mqprio(struct ne
+@@ -4970,8 +4968,8 @@ static int mvneta_setup_mqprio(struct ne
                               struct tc_mqprio_qopt_offload *mqprio)
  {
        struct mvneta_port *pp = netdev_priv(dev);
@@ -58,7 +58,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
  
        if (mqprio->qopt.hw != TC_MQPRIO_HW_OFFLOAD_TCS)
                return 0;
-@@ -4918,21 +4916,28 @@ static int mvneta_setup_mqprio(struct ne
+@@ -4981,21 +4979,28 @@ static int mvneta_setup_mqprio(struct ne
        if (num_tc > rxq_number)
                return -EINVAL;
  
index d640da4d4490e4a6f8cad5c2e3189829189d3d0c..1d4a055a71938e9a37e71fc40898085890f9982b 100644 (file)
@@ -62,7 +62,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
  #define MVNETA_LPI_CTRL_0                        0x2cc0
  #define MVNETA_LPI_CTRL_1                        0x2cc4
  #define      MVNETA_LPI_REQUEST_ENABLE           BIT(0)
-@@ -4901,11 +4928,74 @@ static void mvneta_map_vlan_prio_to_rxq(
+@@ -4964,11 +4991,74 @@ static void mvneta_map_vlan_prio_to_rxq(
        mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, val);
  }
  
@@ -138,7 +138,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
        u8 num_tc;
  
        if (mqprio->qopt.hw != TC_MQPRIO_HW_OFFLOAD_TCS)
-@@ -4919,6 +5009,7 @@ static int mvneta_setup_mqprio(struct ne
+@@ -4982,6 +5072,7 @@ static int mvneta_setup_mqprio(struct ne
        mvneta_clear_rx_prio_map(pp);
  
        if (!num_tc) {
@@ -146,7 +146,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
                netdev_reset_tc(dev);
                return 0;
        }
-@@ -4939,6 +5030,33 @@ static int mvneta_setup_mqprio(struct ne
+@@ -5002,6 +5093,33 @@ static int mvneta_setup_mqprio(struct ne
                }
        }