- mapped_addr = skb_frag_dma_map(&dev->dev, frag, 0,
- skb_frag_size(frag), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
- goto err_dma;
-
- if (i & 0x1) {
- j = NEXT_TX_DESP_IDX(j);
- txd = &priv->tx_dma[j];
- txd->txd1 = mapped_addr;
- txd2 = TX_DMA_PLEN0(frag->size);
- txd->txd4 = def_txd4;
- } else {
- txd->txd3 = mapped_addr;
- txd2 |= TX_DMA_PLEN1(frag->size);
- if (i != (nr_frags -1))
- txd->txd2 = txd2;
- priv->tx_skb[j] = (struct sk_buff *) DMA_DUMMY_DESC;
+ frag_size = skb_frag_size(frag);
+
+ while (frag_size > 0) {
+ frag_map_size = min(frag_size, TX_DMA_BUF_LEN);
+ mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
+ frag_map_size, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
+ goto err_dma;
+
+ if (k & 0x1) {
+ j = NEXT_TX_DESP_IDX(j);
+ txd.txd1 = mapped_addr;
+ txd.txd2 = TX_DMA_PLEN0(frag_map_size);
+ txd.txd4 = def_txd4;
+
+ tx_buf = &ring->tx_buf[j];
+ memset(tx_buf, 0, sizeof(*tx_buf));
+
+ tx_buf->flags |= FE_TX_FLAGS_PAGE0;
+ dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
+ dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
+ } else {
+ txd.txd3 = mapped_addr;
+ txd.txd2 |= TX_DMA_PLEN1(frag_map_size);
+
+ tx_buf->skb = (struct sk_buff *) DMA_DUMMY_DESC;
+ tx_buf->flags |= FE_TX_FLAGS_PAGE1;
+ dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
+ dma_unmap_len_set(tx_buf, dma_len1, frag_map_size);
+
+ if (!((i == (nr_frags -1)) &&
+ (frag_map_size == frag_size))) {
+ fe_set_txd(&txd, &ring->tx_dma[j]);
+ memset(&txd, 0, sizeof(txd));
+ }
+ }
+ frag_size -= frag_map_size;
+ offset += frag_map_size;
+ k++;