+static int fe_tx_dma_map_page(struct device *dev, struct fe_tx_buf *tx_buf,
+ struct fe_tx_dma *txd, int idx,
+ struct page *page, size_t offset, size_t size)
+{
+ dma_addr_t mapped_addr;
+
+ mapped_addr = dma_map_page(dev, page, offset, size, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, mapped_addr)))
+ return -EIO;
+
+ if (idx & 1) {
+ txd->txd3 = mapped_addr;
+ txd->txd2 |= TX_DMA_PLEN1(size);
+ dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
+ dma_unmap_len_set(tx_buf, dma_len1, size);
+ } else {
+ tx_buf->skb = (struct sk_buff *)DMA_DUMMY_DESC;
+ txd->txd1 = mapped_addr;
+ txd->txd2 = TX_DMA_PLEN0(size);
+ dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
+ dma_unmap_len_set(tx_buf, dma_len0, size);
+ }
+ return 0;
+}
+
+static int fe_tx_dma_map_skb(struct device *dev, struct fe_tx_buf *tx_buf,
+ struct fe_tx_dma *txd, int idx,
+ struct sk_buff *skb)
+{
+ struct page *page = virt_to_page(skb->data);
+ size_t offset = offset_in_page(skb->data);
+ size_t size = skb_headlen(skb);
+
+ return fe_tx_dma_map_page(dev, tx_buf, txd, idx, page, offset, size);
+}
+
+static inline struct sk_buff *
+fe_next_frag(struct sk_buff *head, struct sk_buff *skb)
+{
+ if (skb != head)
+ return skb->next;
+
+ if (skb_has_frag_list(skb))
+ return skb_shinfo(skb)->frag_list;
+
+ return NULL;
+}
+