if (tx_q->dirty_tx != tx_q->cur_tx)
return -EBUSY; /* still unfinished work */
-@@ -1302,7 +1302,7 @@ static void stmmac_display_rx_rings(stru
+@@ -1308,7 +1308,7 @@ static void stmmac_display_rx_rings(stru
/* Display RX rings */
for (queue = 0; queue < rx_cnt; queue++) {
pr_info("\tRX Queue %u rings\n", queue);
-@@ -1315,7 +1315,7 @@ static void stmmac_display_rx_rings(stru
+@@ -1321,7 +1321,7 @@ static void stmmac_display_rx_rings(stru
}
/* Display RX ring */
rx_q->dma_rx_phy, desc_size);
}
}
-@@ -1329,7 +1329,7 @@ static void stmmac_display_tx_rings(stru
+@@ -1335,7 +1335,7 @@ static void stmmac_display_tx_rings(stru
/* Display TX rings */
for (queue = 0; queue < tx_cnt; queue++) {
pr_info("\tTX Queue %d rings\n", queue);
-@@ -1344,7 +1344,7 @@ static void stmmac_display_tx_rings(stru
+@@ -1350,7 +1350,7 @@ static void stmmac_display_tx_rings(stru
desc_size = sizeof(struct dma_desc);
}
tx_q->dma_tx_phy, desc_size);
}
}
-@@ -1385,21 +1385,21 @@ static int stmmac_set_bfsize(int mtu, in
+@@ -1391,21 +1391,21 @@ static int stmmac_set_bfsize(int mtu, in
*/
static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
{
}
/**
-@@ -1411,12 +1411,12 @@ static void stmmac_clear_rx_descriptors(
+@@ -1417,12 +1417,12 @@ static void stmmac_clear_rx_descriptors(
*/
static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
{
struct dma_desc *p;
if (priv->extend_desc)
-@@ -1464,7 +1464,7 @@ static void stmmac_clear_descriptors(str
+@@ -1470,7 +1470,7 @@ static void stmmac_clear_descriptors(str
static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
int i, gfp_t flags, u32 queue)
{
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
if (!buf->page) {
-@@ -1489,7 +1489,7 @@ static int stmmac_init_rx_buffers(struct
+@@ -1495,7 +1495,7 @@ static int stmmac_init_rx_buffers(struct
buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
stmmac_set_desc_addr(priv, p, buf->addr);
stmmac_init_desc3(priv, p);
return 0;
-@@ -1503,7 +1503,7 @@ static int stmmac_init_rx_buffers(struct
+@@ -1509,7 +1509,7 @@ static int stmmac_init_rx_buffers(struct
*/
static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
{
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
if (buf->page)
-@@ -1523,7 +1523,7 @@ static void stmmac_free_rx_buffer(struct
+@@ -1529,7 +1529,7 @@ static void stmmac_free_rx_buffer(struct
*/
static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
{
if (tx_q->tx_skbuff_dma[i].buf &&
tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
-@@ -1568,17 +1568,17 @@ static void dma_free_rx_skbufs(struct st
+@@ -1574,17 +1574,17 @@ static void dma_free_rx_skbufs(struct st
{
int i;
struct dma_desc *p;
int ret;
-@@ -1605,10 +1605,10 @@ static int stmmac_alloc_rx_buffers(struc
+@@ -1611,10 +1611,10 @@ static int stmmac_alloc_rx_buffers(struc
*/
static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
{
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
if (!buf->xdp)
-@@ -1621,10 +1621,10 @@ static void dma_free_rx_xskbufs(struct s
+@@ -1627,10 +1627,10 @@ static void dma_free_rx_xskbufs(struct s
static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
{
struct stmmac_rx_buffer *buf;
dma_addr_t dma_addr;
struct dma_desc *p;
-@@ -1667,7 +1667,7 @@ static struct xsk_buff_pool *stmmac_get_
+@@ -1673,7 +1673,7 @@ static struct xsk_buff_pool *stmmac_get_
*/
static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags)
{
int ret;
netif_dbg(priv, probe, priv->dev,
-@@ -1713,11 +1713,11 @@ static int __init_dma_rx_desc_rings(stru
+@@ -1719,11 +1719,11 @@ static int __init_dma_rx_desc_rings(stru
if (priv->extend_desc)
stmmac_mode_init(priv, rx_q->dma_erx,
rx_q->dma_rx_phy,
}
return 0;
-@@ -1744,7 +1744,7 @@ static int init_dma_rx_desc_rings(struct
+@@ -1750,7 +1750,7 @@ static int init_dma_rx_desc_rings(struct
err_init_rx_buffers:
while (queue >= 0) {
if (rx_q->xsk_pool)
dma_free_rx_xskbufs(priv, queue);
-@@ -1773,7 +1773,7 @@ err_init_rx_buffers:
+@@ -1779,7 +1779,7 @@ err_init_rx_buffers:
*/
static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
{
int i;
netif_dbg(priv, probe, priv->dev,
-@@ -1785,16 +1785,16 @@ static int __init_dma_tx_desc_rings(stru
+@@ -1791,16 +1791,16 @@ static int __init_dma_tx_desc_rings(stru
if (priv->extend_desc)
stmmac_mode_init(priv, tx_q->dma_etx,
tx_q->dma_tx_phy,
struct dma_desc *p;
if (priv->extend_desc)
-@@ -1864,12 +1864,12 @@ static int init_dma_desc_rings(struct ne
+@@ -1870,12 +1870,12 @@ static int init_dma_desc_rings(struct ne
*/
static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
{
stmmac_free_tx_buffer(priv, queue, i);
if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
-@@ -1899,7 +1899,7 @@ static void stmmac_free_tx_skbufs(struct
+@@ -1905,7 +1905,7 @@ static void stmmac_free_tx_skbufs(struct
*/
static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
{
/* Release the DMA RX socket buffers */
if (rx_q->xsk_pool)
-@@ -1912,11 +1912,11 @@ static void __free_dma_rx_desc_resources
+@@ -1918,11 +1918,11 @@ static void __free_dma_rx_desc_resources
/* Free DMA regions of consistent memory previously allocated */
if (!priv->extend_desc)
sizeof(struct dma_extended_desc),
rx_q->dma_erx, rx_q->dma_rx_phy);
-@@ -1945,7 +1945,7 @@ static void free_dma_rx_desc_resources(s
+@@ -1951,7 +1951,7 @@ static void free_dma_rx_desc_resources(s
*/
static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
{
size_t size;
void *addr;
-@@ -1963,7 +1963,7 @@ static void __free_dma_tx_desc_resources
+@@ -1969,7 +1969,7 @@ static void __free_dma_tx_desc_resources
addr = tx_q->dma_tx;
}
dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
-@@ -1992,7 +1992,7 @@ static void free_dma_tx_desc_resources(s
+@@ -1998,7 +1998,7 @@ static void free_dma_tx_desc_resources(s
*/
static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
{
struct stmmac_channel *ch = &priv->channel[queue];
bool xdp_prog = stmmac_xdp_is_enabled(priv);
struct page_pool_params pp_params = { 0 };
-@@ -2004,8 +2004,8 @@ static int __alloc_dma_rx_desc_resources
+@@ -2010,8 +2010,8 @@ static int __alloc_dma_rx_desc_resources
rx_q->priv_data = priv;
pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
pp_params.order = ilog2(num_pages);
pp_params.nid = dev_to_node(priv->device);
pp_params.dev = priv->device;
-@@ -2020,7 +2020,7 @@ static int __alloc_dma_rx_desc_resources
+@@ -2026,7 +2026,7 @@ static int __alloc_dma_rx_desc_resources
return ret;
}
sizeof(*rx_q->buf_pool),
GFP_KERNEL);
if (!rx_q->buf_pool)
-@@ -2028,7 +2028,7 @@ static int __alloc_dma_rx_desc_resources
+@@ -2034,7 +2034,7 @@ static int __alloc_dma_rx_desc_resources
if (priv->extend_desc) {
rx_q->dma_erx = dma_alloc_coherent(priv->device,
sizeof(struct dma_extended_desc),
&rx_q->dma_rx_phy,
GFP_KERNEL);
-@@ -2037,7 +2037,7 @@ static int __alloc_dma_rx_desc_resources
+@@ -2043,7 +2043,7 @@ static int __alloc_dma_rx_desc_resources
} else {
rx_q->dma_rx = dma_alloc_coherent(priv->device,
sizeof(struct dma_desc),
&rx_q->dma_rx_phy,
GFP_KERNEL);
-@@ -2094,20 +2094,20 @@ err_dma:
+@@ -2100,20 +2100,20 @@ err_dma:
*/
static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
{
sizeof(struct sk_buff *),
GFP_KERNEL);
if (!tx_q->tx_skbuff)
-@@ -2120,7 +2120,7 @@ static int __alloc_dma_tx_desc_resources
+@@ -2126,7 +2126,7 @@ static int __alloc_dma_tx_desc_resources
else
size = sizeof(struct dma_desc);
addr = dma_alloc_coherent(priv->device, size,
&tx_q->dma_tx_phy, GFP_KERNEL);
-@@ -2364,7 +2364,7 @@ static void stmmac_dma_operation_mode(st
+@@ -2370,7 +2370,7 @@ static void stmmac_dma_operation_mode(st
/* configure all channels */
for (chan = 0; chan < rx_channels_count; chan++) {
u32 buf_size;
qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
-@@ -2379,7 +2379,7 @@ static void stmmac_dma_operation_mode(st
+@@ -2385,7 +2385,7 @@ static void stmmac_dma_operation_mode(st
chan);
} else {
stmmac_set_dma_bfsize(priv, priv->ioaddr,
chan);
}
}
-@@ -2395,7 +2395,7 @@ static void stmmac_dma_operation_mode(st
+@@ -2401,7 +2401,7 @@ static void stmmac_dma_operation_mode(st
static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
{
struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
struct xsk_buff_pool *pool = tx_q->xsk_pool;
unsigned int entry = tx_q->cur_tx;
struct dma_desc *tx_desc = NULL;
-@@ -2470,7 +2470,7 @@ static bool stmmac_xdp_xmit_zc(struct st
+@@ -2476,7 +2476,7 @@ static bool stmmac_xdp_xmit_zc(struct st
stmmac_enable_dma_transmission(priv, priv->ioaddr);
entry = tx_q->cur_tx;
}
-@@ -2496,7 +2496,7 @@ static bool stmmac_xdp_xmit_zc(struct st
+@@ -2502,7 +2502,7 @@ static bool stmmac_xdp_xmit_zc(struct st
*/
static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
{
unsigned int bytes_compl = 0, pkts_compl = 0;
unsigned int entry, xmits = 0, count = 0;
-@@ -2509,7 +2509,7 @@ static int stmmac_tx_clean(struct stmmac
+@@ -2515,7 +2515,7 @@ static int stmmac_tx_clean(struct stmmac
entry = tx_q->dirty_tx;
/* Try to clean all TX complete frame in 1 shot */
struct xdp_frame *xdpf;
struct sk_buff *skb;
struct dma_desc *p;
-@@ -2609,7 +2609,7 @@ static int stmmac_tx_clean(struct stmmac
+@@ -2615,7 +2615,7 @@ static int stmmac_tx_clean(struct stmmac
stmmac_release_tx_desc(priv, p, priv->mode);
}
tx_q->dirty_tx = entry;
-@@ -2674,7 +2674,7 @@ static int stmmac_tx_clean(struct stmmac
+@@ -2680,7 +2680,7 @@ static int stmmac_tx_clean(struct stmmac
*/
static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
{
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
-@@ -2741,8 +2741,8 @@ static int stmmac_napi_check(struct stmm
+@@ -2747,8 +2747,8 @@ static int stmmac_napi_check(struct stmm
{
int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
&priv->xstats, chan, dir);
struct stmmac_channel *ch = &priv->channel[chan];
struct napi_struct *rx_napi;
struct napi_struct *tx_napi;
-@@ -2918,7 +2918,7 @@ static int stmmac_init_dma_engine(struct
+@@ -2924,7 +2924,7 @@ static int stmmac_init_dma_engine(struct
/* DMA RX Channel Configuration */
for (chan = 0; chan < rx_channels_count; chan++) {
stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
rx_q->dma_rx_phy, chan);
-@@ -2932,7 +2932,7 @@ static int stmmac_init_dma_engine(struct
+@@ -2938,7 +2938,7 @@ static int stmmac_init_dma_engine(struct
/* DMA TX Channel Configuration */
for (chan = 0; chan < tx_channels_count; chan++) {
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
tx_q->dma_tx_phy, chan);
-@@ -2947,7 +2947,7 @@ static int stmmac_init_dma_engine(struct
+@@ -2953,7 +2953,7 @@ static int stmmac_init_dma_engine(struct
static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
{
hrtimer_start(&tx_q->txtimer,
STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
-@@ -2997,7 +2997,7 @@ static void stmmac_init_coalesce(struct
+@@ -3003,7 +3003,7 @@ static void stmmac_init_coalesce(struct
u32 chan;
for (chan = 0; chan < tx_channel_count; chan++) {
priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
-@@ -3019,12 +3019,12 @@ static void stmmac_set_rings_length(stru
+@@ -3025,12 +3025,12 @@ static void stmmac_set_rings_length(stru
/* set TX ring length */
for (chan = 0; chan < tx_channels_count; chan++)
stmmac_set_tx_ring_len(priv, priv->ioaddr,
}
/**
-@@ -3359,7 +3359,7 @@ static int stmmac_hw_setup(struct net_de
+@@ -3365,7 +3365,7 @@ static int stmmac_hw_setup(struct net_de
/* Enable TSO */
if (priv->tso) {
for (chan = 0; chan < tx_cnt; chan++) {
/* TSO and TBS cannot co-exist */
if (tx_q->tbs & STMMAC_TBS_AVAIL)
-@@ -3381,7 +3381,7 @@ static int stmmac_hw_setup(struct net_de
+@@ -3387,7 +3387,7 @@ static int stmmac_hw_setup(struct net_de
/* TBS */
for (chan = 0; chan < tx_cnt; chan++) {
int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
-@@ -3425,7 +3425,7 @@ static void stmmac_free_irq(struct net_d
+@@ -3431,7 +3431,7 @@ static void stmmac_free_irq(struct net_d
for (j = irq_idx - 1; j >= 0; j--) {
if (priv->tx_irq[j] > 0) {
irq_set_affinity_hint(priv->tx_irq[j], NULL);
}
}
irq_idx = priv->plat->rx_queues_to_use;
-@@ -3434,7 +3434,7 @@ static void stmmac_free_irq(struct net_d
+@@ -3440,7 +3440,7 @@ static void stmmac_free_irq(struct net_d
for (j = irq_idx - 1; j >= 0; j--) {
if (priv->rx_irq[j] > 0) {
irq_set_affinity_hint(priv->rx_irq[j], NULL);
}
}
-@@ -3567,7 +3567,7 @@ static int stmmac_request_irq_multi_msi(
+@@ -3573,7 +3573,7 @@ static int stmmac_request_irq_multi_msi(
sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
ret = request_irq(priv->rx_irq[i],
stmmac_msi_intr_rx,
if (unlikely(ret < 0)) {
netdev_err(priv->dev,
"%s: alloc rx-%d MSI %d (error: %d)\n",
-@@ -3590,7 +3590,7 @@ static int stmmac_request_irq_multi_msi(
+@@ -3596,7 +3596,7 @@ static int stmmac_request_irq_multi_msi(
sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
ret = request_irq(priv->tx_irq[i],
stmmac_msi_intr_tx,
if (unlikely(ret < 0)) {
netdev_err(priv->dev,
"%s: alloc tx-%d MSI %d (error: %d)\n",
-@@ -3721,21 +3721,21 @@ static int stmmac_open(struct net_device
+@@ -3727,21 +3727,21 @@ static int stmmac_open(struct net_device
bfsize = 0;
if (bfsize < BUF_SIZE_16KiB)
int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
/* Setup per-TXQ tbs flag before TX descriptor alloc */
-@@ -3793,7 +3793,7 @@ irq_error:
+@@ -3799,7 +3799,7 @@ irq_error:
phylink_stop(priv->phylink);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
stmmac_hw_teardown(dev);
init_error:
-@@ -3835,7 +3835,7 @@ static int stmmac_release(struct net_dev
+@@ -3841,7 +3841,7 @@ static int stmmac_release(struct net_dev
stmmac_disable_all_queues(priv);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
netif_tx_disable(dev);
-@@ -3899,7 +3899,7 @@ static bool stmmac_vlan_insert(struct st
+@@ -3905,7 +3905,7 @@ static bool stmmac_vlan_insert(struct st
return false;
stmmac_set_tx_owner(priv, p);
return true;
}
-@@ -3917,7 +3917,7 @@ static bool stmmac_vlan_insert(struct st
+@@ -3923,7 +3923,7 @@ static bool stmmac_vlan_insert(struct st
static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
int total_len, bool last_segment, u32 queue)
{
struct dma_desc *desc;
u32 buff_size;
int tmp_len;
-@@ -3928,7 +3928,7 @@ static void stmmac_tso_allocator(struct
+@@ -3934,7 +3934,7 @@ static void stmmac_tso_allocator(struct
dma_addr_t curr_addr;
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
if (tx_q->tbs & STMMAC_TBS_AVAIL)
-@@ -3956,7 +3956,7 @@ static void stmmac_tso_allocator(struct
+@@ -3962,7 +3962,7 @@ static void stmmac_tso_allocator(struct
static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
{
int desc_size;
if (likely(priv->extend_desc))
-@@ -4018,7 +4018,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -4024,7 +4024,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
dma_addr_t des;
int i;
first_tx = tx_q->cur_tx;
/* Compute header lengths */
-@@ -4058,7 +4058,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -4064,7 +4064,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
stmmac_set_mss(priv, mss_desc, mss);
tx_q->mss = mss;
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
}
-@@ -4170,7 +4170,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -4176,7 +4176,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
* ndo_start_xmit will fill this descriptor the next time it's
* called and stmmac_tx_clean may clean up to this descriptor.
*/
if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
-@@ -4258,7 +4258,7 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -4264,7 +4264,7 @@ static netdev_tx_t stmmac_xmit(struct sk
int entry, first_tx;
dma_addr_t des;
first_tx = tx_q->cur_tx;
if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
-@@ -4321,7 +4321,7 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -4327,7 +4327,7 @@ static netdev_tx_t stmmac_xmit(struct sk
int len = skb_frag_size(frag);
bool last_segment = (i == (nfrags - 1));
WARN_ON(tx_q->tx_skbuff[entry]);
if (likely(priv->extend_desc))
-@@ -4392,7 +4392,7 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -4398,7 +4398,7 @@ static netdev_tx_t stmmac_xmit(struct sk
* ndo_start_xmit will fill this descriptor the next time it's
* called and stmmac_tx_clean may clean up to this descriptor.
*/
tx_q->cur_tx = entry;
if (netif_msg_pktdata(priv)) {
-@@ -4507,7 +4507,7 @@ static void stmmac_rx_vlan(struct net_de
+@@ -4513,7 +4513,7 @@ static void stmmac_rx_vlan(struct net_de
*/
static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
{
int dirty = stmmac_rx_dirty(priv, queue);
unsigned int entry = rx_q->dirty_rx;
-@@ -4557,7 +4557,7 @@ static inline void stmmac_rx_refill(stru
+@@ -4563,7 +4563,7 @@ static inline void stmmac_rx_refill(stru
dma_wmb();
stmmac_set_rx_owner(priv, p, use_rx_wd);
}
rx_q->dirty_rx = entry;
rx_q->rx_tail_addr = rx_q->dma_rx_phy +
-@@ -4585,12 +4585,12 @@ static unsigned int stmmac_rx_buf1_len(s
+@@ -4591,12 +4591,12 @@ static unsigned int stmmac_rx_buf1_len(s
/* First descriptor, not last descriptor and not split header */
if (status & rx_not_ls)
}
static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
-@@ -4606,7 +4606,7 @@ static unsigned int stmmac_rx_buf2_len(s
+@@ -4612,7 +4612,7 @@ static unsigned int stmmac_rx_buf2_len(s
/* Not last descriptor */
if (status & rx_not_ls)
plen = stmmac_get_rx_frame_len(priv, p, coe);
-@@ -4617,7 +4617,7 @@ static unsigned int stmmac_rx_buf2_len(s
+@@ -4623,7 +4623,7 @@ static unsigned int stmmac_rx_buf2_len(s
static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
struct xdp_frame *xdpf, bool dma_map)
{
unsigned int entry = tx_q->cur_tx;
struct dma_desc *tx_desc;
dma_addr_t dma_addr;
-@@ -4680,7 +4680,7 @@ static int stmmac_xdp_xmit_xdpf(struct s
+@@ -4686,7 +4686,7 @@ static int stmmac_xdp_xmit_xdpf(struct s
stmmac_enable_dma_transmission(priv, priv->ioaddr);
tx_q->cur_tx = entry;
return STMMAC_XDP_TX;
-@@ -4854,7 +4854,7 @@ static void stmmac_dispatch_skb_zc(struc
+@@ -4860,7 +4860,7 @@ static void stmmac_dispatch_skb_zc(struc
static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
{
unsigned int entry = rx_q->dirty_rx;
struct dma_desc *rx_desc = NULL;
bool ret = true;
-@@ -4897,7 +4897,7 @@ static bool stmmac_rx_refill_zc(struct s
+@@ -4903,7 +4903,7 @@ static bool stmmac_rx_refill_zc(struct s
dma_wmb();
stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
}
if (rx_desc) {
-@@ -4912,7 +4912,7 @@ static bool stmmac_rx_refill_zc(struct s
+@@ -4918,7 +4918,7 @@ static bool stmmac_rx_refill_zc(struct s
static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
{
unsigned int count = 0, error = 0, len = 0;
int dirty = stmmac_rx_dirty(priv, queue);
unsigned int next_entry = rx_q->cur_rx;
-@@ -4934,7 +4934,7 @@ static int stmmac_rx_zc(struct stmmac_pr
+@@ -4940,7 +4940,7 @@ static int stmmac_rx_zc(struct stmmac_pr
desc_size = sizeof(struct dma_desc);
}
rx_q->dma_rx_phy, desc_size);
}
while (count < limit) {
-@@ -4981,7 +4981,7 @@ read_again:
+@@ -4987,7 +4987,7 @@ read_again:
/* Prefetch the next RX descriptor */
rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
next_entry = rx_q->cur_rx;
if (priv->extend_desc)
-@@ -5102,7 +5102,7 @@ read_again:
+@@ -5108,7 +5108,7 @@ read_again:
*/
static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
{
struct stmmac_channel *ch = &priv->channel[queue];
unsigned int count = 0, error = 0, len = 0;
int status = 0, coe = priv->hw->rx_csum;
-@@ -5115,7 +5115,7 @@ static int stmmac_rx(struct stmmac_priv
+@@ -5121,7 +5121,7 @@ static int stmmac_rx(struct stmmac_priv
int buf_sz;
dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
if (netif_msg_rx_status(priv)) {
void *rx_head;
-@@ -5129,7 +5129,7 @@ static int stmmac_rx(struct stmmac_priv
+@@ -5135,7 +5135,7 @@ static int stmmac_rx(struct stmmac_priv
desc_size = sizeof(struct dma_desc);
}
rx_q->dma_rx_phy, desc_size);
}
while (count < limit) {
-@@ -5173,7 +5173,7 @@ read_again:
+@@ -5179,7 +5179,7 @@ read_again:
break;
rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
next_entry = rx_q->cur_rx;
if (priv->extend_desc)
-@@ -5307,7 +5307,7 @@ read_again:
+@@ -5313,7 +5313,7 @@ read_again:
buf1_len, dma_dir);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
buf->page, buf->page_offset, buf1_len,
/* Data payload appended into SKB */
page_pool_release_page(rx_q->page_pool, buf->page);
-@@ -5319,7 +5319,7 @@ read_again:
+@@ -5325,7 +5325,7 @@ read_again:
buf2_len, dma_dir);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
buf->sec_page, 0, buf2_len,
/* Data payload appended into SKB */
page_pool_release_page(rx_q->page_pool, buf->sec_page);
-@@ -5761,11 +5761,13 @@ static irqreturn_t stmmac_safety_interru
+@@ -5767,11 +5767,13 @@ static irqreturn_t stmmac_safety_interru
static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
{
struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
if (unlikely(!data)) {
netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
-@@ -5805,10 +5807,12 @@ static irqreturn_t stmmac_msi_intr_tx(in
+@@ -5811,10 +5813,12 @@ static irqreturn_t stmmac_msi_intr_tx(in
static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
{
struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
if (unlikely(!data)) {
netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
-@@ -5839,10 +5843,10 @@ static void stmmac_poll_controller(struc
+@@ -5845,10 +5849,10 @@ static void stmmac_poll_controller(struc
if (priv->plat->multi_msi_en) {
for (i = 0; i < priv->plat->rx_queues_to_use; i++)
} else {
disable_irq(dev->irq);
stmmac_interrupt(dev->irq, dev);
-@@ -6023,34 +6027,34 @@ static int stmmac_rings_status_show(stru
+@@ -6029,34 +6033,34 @@ static int stmmac_rings_status_show(stru
return 0;
for (queue = 0; queue < rx_count; queue++) {
}
}
-@@ -6391,7 +6395,7 @@ void stmmac_disable_rx_queue(struct stmm
+@@ -6397,7 +6401,7 @@ void stmmac_disable_rx_queue(struct stmm
void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
{
struct stmmac_channel *ch = &priv->channel[queue];
unsigned long flags;
u32 buf_size;
-@@ -6428,7 +6432,7 @@ void stmmac_enable_rx_queue(struct stmma
+@@ -6434,7 +6438,7 @@ void stmmac_enable_rx_queue(struct stmma
rx_q->queue_index);
} else {
stmmac_set_dma_bfsize(priv, priv->ioaddr,
rx_q->queue_index);
}
-@@ -6454,7 +6458,7 @@ void stmmac_disable_tx_queue(struct stmm
+@@ -6460,7 +6464,7 @@ void stmmac_disable_tx_queue(struct stmm
void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
{
struct stmmac_channel *ch = &priv->channel[queue];
unsigned long flags;
int ret;
-@@ -6504,7 +6508,7 @@ void stmmac_xdp_release(struct net_devic
+@@ -6510,7 +6514,7 @@ void stmmac_xdp_release(struct net_devic
stmmac_disable_all_queues(priv);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
/* Free the IRQ lines */
stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
-@@ -6563,7 +6567,7 @@ int stmmac_xdp_open(struct net_device *d
+@@ -6569,7 +6573,7 @@ int stmmac_xdp_open(struct net_device *d
/* DMA RX Channel Configuration */
for (chan = 0; chan < rx_cnt; chan++) {
stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
rx_q->dma_rx_phy, chan);
-@@ -6581,7 +6585,7 @@ int stmmac_xdp_open(struct net_device *d
+@@ -6587,7 +6591,7 @@ int stmmac_xdp_open(struct net_device *d
rx_q->queue_index);
} else {
stmmac_set_dma_bfsize(priv, priv->ioaddr,
rx_q->queue_index);
}
-@@ -6590,7 +6594,7 @@ int stmmac_xdp_open(struct net_device *d
+@@ -6596,7 +6600,7 @@ int stmmac_xdp_open(struct net_device *d
/* DMA TX Channel Configuration */
for (chan = 0; chan < tx_cnt; chan++) {
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
tx_q->dma_tx_phy, chan);
-@@ -6623,7 +6627,7 @@ int stmmac_xdp_open(struct net_device *d
+@@ -6629,7 +6633,7 @@ int stmmac_xdp_open(struct net_device *d
irq_error:
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
stmmac_hw_teardown(dev);
init_error:
-@@ -6650,8 +6654,8 @@ int stmmac_xsk_wakeup(struct net_device
+@@ -6656,8 +6660,8 @@ int stmmac_xsk_wakeup(struct net_device
queue >= priv->plat->tx_queues_to_use)
return -EINVAL;
ch = &priv->channel[queue];
if (!rx_q->xsk_pool && !tx_q->xsk_pool)
-@@ -6907,8 +6911,8 @@ int stmmac_reinit_ringparam(struct net_d
+@@ -6913,8 +6917,8 @@ int stmmac_reinit_ringparam(struct net_d
if (netif_running(dev))
stmmac_release(dev);
if (netif_running(dev))
ret = stmmac_open(dev);
-@@ -7344,7 +7348,7 @@ int stmmac_suspend(struct device *dev)
+@@ -7352,7 +7356,7 @@ int stmmac_suspend(struct device *dev)
stmmac_disable_all_queues(priv);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
if (priv->eee_enabled) {
priv->tx_path_in_lpi_mode = false;
-@@ -7395,7 +7399,7 @@ EXPORT_SYMBOL_GPL(stmmac_suspend);
+@@ -7403,7 +7407,7 @@ EXPORT_SYMBOL_GPL(stmmac_suspend);
static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
{
rx_q->cur_rx = 0;
rx_q->dirty_rx = 0;
-@@ -7403,7 +7407,7 @@ static void stmmac_reset_rx_queue(struct
+@@ -7411,7 +7415,7 @@ static void stmmac_reset_rx_queue(struct
static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
{
stmmac_set_rx_tail_ptr(priv, priv->ioaddr, tail, i);
stmmac_start_rx(priv, priv->ioaddr, i);
-@@ -1680,7 +1680,7 @@ cleanup:
+@@ -1684,7 +1684,7 @@ cleanup:
static int __stmmac_test_jumbo(struct stmmac_priv *priv, u16 queue)
{
struct stmmac_packet_attrs attr = { };
attr.dst = priv->dev->dev_addr;
attr.max_size = size - ETH_FCS_LEN;
-@@ -1763,7 +1763,7 @@ static int stmmac_test_tbs(struct stmmac
+@@ -1767,7 +1767,7 @@ static int stmmac_test_tbs(struct stmmac
/* Find first TBS enabled Queue, if any */
for (i = 0; i < priv->plat->tx_queues_to_use; i++)