diff options
Diffstat (limited to 'drivers/net/ethernet/freescale/fec_main.c')
-rw-r--r-- | drivers/net/ethernet/freescale/fec_main.c | 510 |
1 files changed, 278 insertions, 232 deletions
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 66b5cbdb43b9..e08c7b572497 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -38,6 +38,7 @@ #include <linux/in.h> #include <linux/ip.h> #include <net/ip.h> +#include <net/page_pool/helpers.h> #include <net/selftests.h> #include <net/tso.h> #include <linux/tcp.h> @@ -51,11 +52,11 @@ #include <linux/clk.h> #include <linux/crc32.h> #include <linux/platform_device.h> +#include <linux/property.h> #include <linux/mdio.h> #include <linux/phy.h> #include <linux/fec.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/regulator/consumer.h> @@ -68,6 +69,7 @@ #include <soc/imx/cpuidle.h> #include <linux/filter.h> #include <linux/bpf.h> +#include <linux/bpf_trace.h> #include <asm/cacheflush.h> @@ -75,6 +77,9 @@ static void set_multicast_list(struct net_device *ndev); static void fec_enet_itr_coal_set(struct net_device *ndev); +static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep, + int cpu, struct xdp_buff *xdp, + u32 dma_sync_len); #define DRIVER_NAME "fec" @@ -182,65 +187,22 @@ static struct platform_device_id fec_devtype[] = { .name = DRIVER_NAME, .driver_data = 0, }, { - .name = "imx25-fec", - .driver_data = (kernel_ulong_t)&fec_imx25_info, - }, { - .name = "imx27-fec", - .driver_data = (kernel_ulong_t)&fec_imx27_info, - }, { - .name = "imx28-fec", - .driver_data = (kernel_ulong_t)&fec_imx28_info, - }, { - .name = "imx6q-fec", - .driver_data = (kernel_ulong_t)&fec_imx6q_info, - }, { - .name = "mvf600-fec", - .driver_data = (kernel_ulong_t)&fec_mvf600_info, - }, { - .name = "imx6sx-fec", - .driver_data = (kernel_ulong_t)&fec_imx6x_info, - }, { - .name = "imx6ul-fec", - .driver_data = (kernel_ulong_t)&fec_imx6ul_info, - }, { - .name = "imx8mq-fec", - .driver_data = (kernel_ulong_t)&fec_imx8mq_info, - }, { - .name = "imx8qm-fec", - .driver_data = (kernel_ulong_t)&fec_imx8qm_info, - }, { - .name = "s32v234-fec", - .driver_data = (kernel_ulong_t)&fec_s32v234_info, - }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(platform, fec_devtype); -enum imx_fec_type { - IMX25_FEC = 1, /* runs on i.mx25/50/53 */ - IMX27_FEC, /* runs on i.mx27/35/51 */ - IMX28_FEC, - IMX6Q_FEC, - MVF600_FEC, - IMX6SX_FEC, - IMX6UL_FEC, - IMX8MQ_FEC, - IMX8QM_FEC, - S32V234_FEC, -}; - static const struct of_device_id fec_dt_ids[] = { - { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], }, - { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], }, - { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], }, - { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], }, - { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], }, - { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], }, - { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], }, - { .compatible = "fsl,imx8mq-fec", .data = &fec_devtype[IMX8MQ_FEC], }, - { .compatible = "fsl,imx8qm-fec", .data = &fec_devtype[IMX8QM_FEC], }, - { .compatible = "fsl,s32v234-fec", .data = &fec_devtype[S32V234_FEC], }, + { .compatible = "fsl,imx25-fec", .data = &fec_imx25_info, }, + { .compatible = "fsl,imx27-fec", .data = &fec_imx27_info, }, + { .compatible = "fsl,imx28-fec", .data = &fec_imx28_info, }, + { .compatible = "fsl,imx6q-fec", .data = &fec_imx6q_info, }, + { .compatible = "fsl,mvf600-fec", .data = &fec_mvf600_info, }, + { .compatible = "fsl,imx6sx-fec", .data = &fec_imx6x_info, }, + { .compatible = "fsl,imx6ul-fec", .data = &fec_imx6ul_info, }, + { .compatible = "fsl,imx8mq-fec", .data = &fec_imx8mq_info, }, + { .compatible = "fsl,imx8qm-fec", .data = &fec_imx8qm_info, }, + { .compatible = "fsl,s32v234-fec", .data = &fec_s32v234_info, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, fec_dt_ids); @@ -325,8 +287,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); #define FEC_WOL_FLAG_ENABLE (0x1 << 1) #define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2) -#define COPYBREAK_DEFAULT 256 - /* Max number of allowed TCP segments for software TSO */ #define FEC_MAX_TSO_SEGS 100 #define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) @@ -397,12 +357,76 @@ static void fec_dump(struct net_device *ndev) fec16_to_cpu(bdp->cbd_sc), fec32_to_cpu(bdp->cbd_bufaddr), fec16_to_cpu(bdp->cbd_datlen), - txq->tx_buf[index].skb); + txq->tx_buf[index].buf_p); bdp = fec_enet_get_nextdesc(bdp, &txq->bd); index++; } while (bdp != txq->bd.base); } +/* + * Coldfire does not support DMA coherent allocations, and has historically used + * a band-aid with a manual flush in fec_enet_rx_queue. + */ +#if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA) +static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, + gfp_t gfp) +{ + return dma_alloc_noncoherent(dev, size, handle, DMA_BIDIRECTIONAL, gfp); +} + +static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t handle) +{ + dma_free_noncoherent(dev, size, cpu_addr, handle, DMA_BIDIRECTIONAL); +} +#else /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */ +static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, + gfp_t gfp) +{ + return dma_alloc_coherent(dev, size, handle, gfp); +} + +static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t handle) +{ + dma_free_coherent(dev, size, cpu_addr, handle); +} +#endif /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */ + +struct fec_dma_devres { + size_t size; + void *vaddr; + dma_addr_t dma_handle; +}; + +static void fec_dmam_release(struct device *dev, void *res) +{ + struct fec_dma_devres *this = res; + + fec_dma_free(dev, this->size, this->vaddr, this->dma_handle); +} + +static void *fec_dmam_alloc(struct device *dev, size_t size, dma_addr_t *handle, + gfp_t gfp) +{ + struct fec_dma_devres *dr; + void *vaddr; + + dr = devres_alloc(fec_dmam_release, sizeof(*dr), gfp); + if (!dr) + return NULL; + vaddr = fec_dma_alloc(dev, size, handle, gfp); + if (!vaddr) { + devres_free(dr); + return NULL; + } + dr->vaddr = vaddr; + dr->dma_handle = *handle; + dr->size = size; + devres_add(dev, dr); + return vaddr; +} + static inline bool is_ipv4_pkt(struct sk_buff *skb) { return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4; @@ -654,7 +678,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, index = fec_enet_get_bd_index(last_bdp, &txq->bd); /* Save skb pointer */ - txq->tx_buf[index].skb = skb; + txq->tx_buf[index].buf_p = skb; /* Make sure the updates to rest of the descriptor are performed before * transferring ownership. @@ -860,7 +884,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, } /* Save skb pointer */ - txq->tx_buf[index].skb = skb; + txq->tx_buf[index].buf_p = skb; skb_tx_timestamp(skb); txq->bd.cur = bdp; @@ -957,26 +981,27 @@ static void fec_enet_bd_init(struct net_device *dev) fec32_to_cpu(bdp->cbd_bufaddr), fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE); - if (txq->tx_buf[i].skb) { - dev_kfree_skb_any(txq->tx_buf[i].skb); - txq->tx_buf[i].skb = NULL; - } - } else { + if (txq->tx_buf[i].buf_p) + dev_kfree_skb_any(txq->tx_buf[i].buf_p); + } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) { if (bdp->cbd_bufaddr) dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr), fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE); - if (txq->tx_buf[i].xdp) { - xdp_return_frame(txq->tx_buf[i].xdp); - txq->tx_buf[i].xdp = NULL; - } + if (txq->tx_buf[i].buf_p) + xdp_return_frame(txq->tx_buf[i].buf_p); + } else { + struct page *page = txq->tx_buf[i].buf_p; - /* restore default tx buffer type: FEC_TXBUF_T_SKB */ - txq->tx_buf[i].type = FEC_TXBUF_T_SKB; + if (page) + page_pool_put_page(page->pp, page, 0, false); } + txq->tx_buf[i].buf_p = NULL; + /* restore default tx buffer type: FEC_TXBUF_T_SKB */ + txq->tx_buf[i].type = FEC_TXBUF_T_SKB; bdp->cbd_bufaddr = cpu_to_fec32(0); bdp = fec_enet_get_nextdesc(bdp, &txq->bd); } @@ -1383,6 +1408,8 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget) struct netdev_queue *nq; int index = 0; int entries_free; + struct page *page; + int frame_len; fep = netdev_priv(ndev); @@ -1404,8 +1431,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget) index = fec_enet_get_bd_index(bdp, &txq->bd); if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) { - skb = txq->tx_buf[index].skb; - txq->tx_buf[index].skb = NULL; + skb = txq->tx_buf[index].buf_p; if (bdp->cbd_bufaddr && !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) dma_unmap_single(&fep->pdev->dev, @@ -1424,17 +1450,24 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget) if (unlikely(!budget)) break; - xdpf = txq->tx_buf[index].xdp; - if (bdp->cbd_bufaddr) - dma_unmap_single(&fep->pdev->dev, - fec32_to_cpu(bdp->cbd_bufaddr), - fec16_to_cpu(bdp->cbd_datlen), - DMA_TO_DEVICE); + if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) { + xdpf = txq->tx_buf[index].buf_p; + if (bdp->cbd_bufaddr) + dma_unmap_single(&fep->pdev->dev, + fec32_to_cpu(bdp->cbd_bufaddr), + fec16_to_cpu(bdp->cbd_datlen), + DMA_TO_DEVICE); + } else { + page = txq->tx_buf[index].buf_p; + } + bdp->cbd_bufaddr = cpu_to_fec32(0); - if (!xdpf) { + if (unlikely(!txq->tx_buf[index].buf_p)) { txq->tx_buf[index].type = FEC_TXBUF_T_SKB; goto tx_buf_done; } + + frame_len = fec16_to_cpu(bdp->cbd_datlen); } /* Check for errors. */ @@ -1458,7 +1491,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget) if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) ndev->stats.tx_bytes += skb->len; else - ndev->stats.tx_bytes += xdpf->len; + ndev->stats.tx_bytes += frame_len; } /* Deferred means some collisions occurred during transmit, @@ -1482,15 +1515,18 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget) } /* Free the sk buffer associated with this last transmit */ - dev_kfree_skb_any(skb); - } else { - xdp_return_frame(xdpf); - - txq->tx_buf[index].xdp = NULL; - /* restore default tx buffer type: FEC_TXBUF_T_SKB */ - txq->tx_buf[index].type = FEC_TXBUF_T_SKB; + napi_consume_skb(skb, budget); + } else if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) { + xdp_return_frame_rx_napi(xdpf); + } else { /* recycle pages of XDP_TX frames */ + /* The dma_sync_size = 0 as XDP_TX has already synced DMA for_device */ + page_pool_put_page(page->pp, page, 0, true); } + txq->tx_buf[index].buf_p = NULL; + /* restore default tx buffer type: FEC_TXBUF_T_SKB */ + txq->tx_buf[index].type = FEC_TXBUF_T_SKB; + tx_buf_done: /* Make sure the update to bdp and tx_buf are performed * before dirty_tx @@ -1543,7 +1579,7 @@ static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq, static u32 fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog, - struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int index) + struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int cpu) { unsigned int sync, len = xdp->data_end - xdp->data; u32 ret = FEC_ENET_XDP_PASS; @@ -1553,8 +1589,10 @@ fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog, act = bpf_prog_run_xdp(prog, xdp); - /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */ - sync = xdp->data_end - xdp->data_hard_start - FEC_ENET_XDP_HEADROOM; + /* Due xdp_adjust_tail and xdp_adjust_head: DMA sync for_device cover + * max len CPU touch + */ + sync = xdp->data_end - xdp->data; sync = max(sync, len); switch (act) { @@ -1566,31 +1604,38 @@ fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog, case XDP_REDIRECT: rxq->stats[RX_XDP_REDIRECT]++; err = xdp_do_redirect(fep->netdev, xdp, prog); - if (!err) { - ret = FEC_ENET_XDP_REDIR; - } else { - ret = FEC_ENET_XDP_CONSUMED; - page = virt_to_head_page(xdp->data); - page_pool_put_page(rxq->page_pool, page, sync, true); + if (unlikely(err)) + goto xdp_err; + + ret = FEC_ENET_XDP_REDIR; + break; + + case XDP_TX: + rxq->stats[RX_XDP_TX]++; + err = fec_enet_xdp_tx_xmit(fep, cpu, xdp, sync); + if (unlikely(err)) { + rxq->stats[RX_XDP_TX_ERRORS]++; + goto xdp_err; } + + ret = FEC_ENET_XDP_TX; break; default: bpf_warn_invalid_xdp_action(fep->netdev, prog, act); fallthrough; - case XDP_TX: - bpf_warn_invalid_xdp_action(fep->netdev, prog, act); - fallthrough; - case XDP_ABORTED: fallthrough; /* handle aborts by dropping packet */ case XDP_DROP: rxq->stats[RX_XDP_DROP]++; +xdp_err: ret = FEC_ENET_XDP_CONSUMED; page = virt_to_head_page(xdp->data); page_pool_put_page(rxq->page_pool, page, sync, true); + if (act != XDP_DROP) + trace_xdp_exception(fep->netdev, prog, act); break; } @@ -1621,6 +1666,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog); u32 ret, xdp_result = FEC_ENET_XDP_PASS; u32 data_start = FEC_ENET_XDP_HEADROOM; + int cpu = smp_processor_id(); struct xdp_buff xdp; struct page *page; u32 sub_len = 4; @@ -1635,7 +1681,11 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) } #endif -#ifdef CONFIG_M532x +#if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA) + /* + * Hacky flush of all caches instead of using the DMA API for the TSO + * headers. + */ flush_cache_all(); #endif rxq = fep->rx_queue[queue_id]; @@ -1699,7 +1749,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) /* subtract 16bit shift and FCS */ xdp_prepare_buff(&xdp, page_address(page), data_start, pkt_len - sub_len, false); - ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, index); + ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, cpu); xdp_result |= ret; if (ret != FEC_ENET_XDP_PASS) goto rx_processing_done; @@ -1807,7 +1857,7 @@ rx_processing_done: rxq->bd.cur = bdp; if (xdp_result & FEC_ENET_XDP_REDIR) - xdp_do_flush_map(); + xdp_do_flush(); return pkt_received; } @@ -2882,12 +2932,10 @@ static void fec_enet_get_strings(struct net_device *netdev, switch (stringset) { case ETH_SS_STATS: for (i = 0; i < ARRAY_SIZE(fec_stats); i++) { - memcpy(data, fec_stats[i].name, ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN; + ethtool_sprintf(&data, "%s", fec_stats[i].name); } for (i = 0; i < ARRAY_SIZE(fec_xdp_stat_strs); i++) { - strncpy(data, fec_xdp_stat_strs[i], ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN; + ethtool_sprintf(&data, "%s", fec_xdp_stat_strs[i]); } page_pool_ethtool_stats_get_strings(data); @@ -3059,44 +3107,6 @@ static int fec_enet_set_coalesce(struct net_device *ndev, return 0; } -static int fec_enet_get_tunable(struct net_device *netdev, - const struct ethtool_tunable *tuna, - void *data) -{ - struct fec_enet_private *fep = netdev_priv(netdev); - int ret = 0; - - switch (tuna->id) { - case ETHTOOL_RX_COPYBREAK: - *(u32 *)data = fep->rx_copybreak; - break; - default: - ret = -EINVAL; - break; - } - - return ret; -} - -static int fec_enet_set_tunable(struct net_device *netdev, - const struct ethtool_tunable *tuna, - const void *data) -{ - struct fec_enet_private *fep = netdev_priv(netdev); - int ret = 0; - - switch (tuna->id) { - case ETHTOOL_RX_COPYBREAK: - fep->rx_copybreak = *(u32 *)data; - break; - default: - ret = -EINVAL; - break; - } - - return ret; -} - /* LPI Sleep Ts count base on tx clk (clk_ref). * The lpi sleep cnt value = X us / (cycle_ns). */ @@ -3234,8 +3244,6 @@ static const struct ethtool_ops fec_enet_ethtool_ops = { .get_sset_count = fec_enet_get_sset_count, #endif .get_ts_info = fec_enet_get_ts_info, - .get_tunable = fec_enet_get_tunable, - .set_tunable = fec_enet_set_tunable, .get_wol = fec_enet_get_wol, .set_wol = fec_enet_set_wol, .get_eee = fec_enet_get_eee, @@ -3245,38 +3253,10 @@ static const struct ethtool_ops fec_enet_ethtool_ops = { .self_test = net_selftest, }; -static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) -{ - struct fec_enet_private *fep = netdev_priv(ndev); - struct phy_device *phydev = ndev->phydev; - - if (!netif_running(ndev)) - return -EINVAL; - - if (!phydev) - return -ENODEV; - - if (fep->bufdesc_ex) { - bool use_fec_hwts = !phy_has_hwtstamp(phydev); - - if (cmd == SIOCSHWTSTAMP) { - if (use_fec_hwts) - return fec_ptp_set(ndev, rq); - fec_ptp_disable_hwts(ndev); - } else if (cmd == SIOCGHWTSTAMP) { - if (use_fec_hwts) - return fec_ptp_get(ndev, rq); - } - } - - return phy_mii_ioctl(phydev, rq, cmd); -} - static void fec_enet_free_buffers(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); unsigned int i; - struct sk_buff *skb; struct fec_enet_priv_tx_q *txq; struct fec_enet_priv_rx_q *rxq; unsigned int q; @@ -3301,18 +3281,23 @@ static void fec_enet_free_buffers(struct net_device *ndev) kfree(txq->tx_bounce[i]); txq->tx_bounce[i] = NULL; + if (!txq->tx_buf[i].buf_p) { + txq->tx_buf[i].type = FEC_TXBUF_T_SKB; + continue; + } + if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) { - skb = txq->tx_buf[i].skb; - txq->tx_buf[i].skb = NULL; - dev_kfree_skb(skb); + dev_kfree_skb(txq->tx_buf[i].buf_p); + } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) { + xdp_return_frame(txq->tx_buf[i].buf_p); } else { - if (txq->tx_buf[i].xdp) { - xdp_return_frame(txq->tx_buf[i].xdp); - txq->tx_buf[i].xdp = NULL; - } + struct page *page = txq->tx_buf[i].buf_p; - txq->tx_buf[i].type = FEC_TXBUF_T_SKB; + page_pool_put_page(page->pp, page, 0, false); } + + txq->tx_buf[i].buf_p = NULL; + txq->tx_buf[i].type = FEC_TXBUF_T_SKB; } } } @@ -3326,10 +3311,9 @@ static void fec_enet_free_queue(struct net_device *ndev) for (i = 0; i < fep->num_tx_queues; i++) if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) { txq = fep->tx_queue[i]; - dma_free_coherent(&fep->pdev->dev, - txq->bd.ring_size * TSO_HEADER_SIZE, - txq->tso_hdrs, - txq->tso_hdrs_dma); + fec_dma_free(&fep->pdev->dev, + txq->bd.ring_size * TSO_HEADER_SIZE, + txq->tso_hdrs, txq->tso_hdrs_dma); } for (i = 0; i < fep->num_rx_queues; i++) @@ -3359,10 +3343,9 @@ static int fec_enet_alloc_queue(struct net_device *ndev) txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; txq->tx_wake_threshold = FEC_MAX_SKB_DESCS + 2 * MAX_SKB_FRAGS; - txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev, + txq->tso_hdrs = fec_dma_alloc(&fep->pdev->dev, txq->bd.ring_size * TSO_HEADER_SIZE, - &txq->tso_hdrs_dma, - GFP_KERNEL); + &txq->tso_hdrs_dma, GFP_KERNEL); if (!txq->tso_hdrs) { ret = -ENOMEM; goto alloc_failed; @@ -3748,31 +3731,26 @@ static int fec_set_features(struct net_device *netdev, return 0; } -static u16 fec_enet_get_raw_vlan_tci(struct sk_buff *skb) -{ - struct vlan_ethhdr *vhdr; - unsigned short vlan_TCI = 0; - - if (skb->protocol == htons(ETH_P_ALL)) { - vhdr = (struct vlan_ethhdr *)(skb->data); - vlan_TCI = ntohs(vhdr->h_vlan_TCI); - } - - return vlan_TCI; -} - static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb, struct net_device *sb_dev) { struct fec_enet_private *fep = netdev_priv(ndev); - u16 vlan_tag; + u16 vlan_tag = 0; if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) return netdev_pick_tx(ndev, skb, NULL); - vlan_tag = fec_enet_get_raw_vlan_tci(skb); - if (!vlan_tag) + /* VLAN is present in the payload.*/ + if (eth_type_vlan(skb->protocol)) { + struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb); + + vlan_tag = ntohs(vhdr->h_vlan_TCI); + /* VLAN is present in the skb but not yet pushed in the payload.*/ + } else if (skb_vlan_tag_present(skb)) { + vlan_tag = skb->vlan_tci; + } else { return vlan_tag; + } return fec_enet_vlan_pri_to_queue[vlan_tag >> 13]; } @@ -3835,12 +3813,14 @@ fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index) static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, struct fec_enet_priv_tx_q *txq, - struct xdp_frame *frame) + void *frame, u32 dma_sync_len, + bool ndo_xmit) { unsigned int index, status, estatus; struct bufdesc *bdp; dma_addr_t dma_addr; int entries_free; + u16 frame_len; entries_free = fec_enet_get_free_txdesc_num(txq); if (entries_free < MAX_SKB_FRAGS + 1) { @@ -3855,17 +3835,37 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, index = fec_enet_get_bd_index(bdp, &txq->bd); - dma_addr = dma_map_single(&fep->pdev->dev, frame->data, - frame->len, DMA_TO_DEVICE); - if (dma_mapping_error(&fep->pdev->dev, dma_addr)) - return -ENOMEM; + if (ndo_xmit) { + struct xdp_frame *xdpf = frame; + + dma_addr = dma_map_single(&fep->pdev->dev, xdpf->data, + xdpf->len, DMA_TO_DEVICE); + if (dma_mapping_error(&fep->pdev->dev, dma_addr)) + return -ENOMEM; + + frame_len = xdpf->len; + txq->tx_buf[index].buf_p = xdpf; + txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO; + } else { + struct xdp_buff *xdpb = frame; + struct page *page; + + page = virt_to_page(xdpb->data); + dma_addr = page_pool_get_dma_addr(page) + + (xdpb->data - xdpb->data_hard_start); + dma_sync_single_for_device(&fep->pdev->dev, dma_addr, + dma_sync_len, DMA_BIDIRECTIONAL); + frame_len = xdpb->data_end - xdpb->data; + txq->tx_buf[index].buf_p = page; + txq->tx_buf[index].type = FEC_TXBUF_T_XDP_TX; + } status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); if (fep->bufdesc_ex) estatus = BD_ENET_TX_INT; bdp->cbd_bufaddr = cpu_to_fec32(dma_addr); - bdp->cbd_datlen = cpu_to_fec16(frame->len); + bdp->cbd_datlen = cpu_to_fec16(frame_len); if (fep->bufdesc_ex) { struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; @@ -3877,9 +3877,6 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, ebdp->cbd_esc = cpu_to_fec32(estatus); } - txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO; - txq->tx_buf[index].xdp = frame; - /* Make sure the updates to rest of the descriptor are performed before * transferring ownership. */ @@ -3905,6 +3902,29 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, return 0; } +static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep, + int cpu, struct xdp_buff *xdp, + u32 dma_sync_len) +{ + struct fec_enet_priv_tx_q *txq; + struct netdev_queue *nq; + int queue, ret; + + queue = fec_enet_xdp_get_tx_queue(fep, cpu); + txq = fep->tx_queue[queue]; + nq = netdev_get_tx_queue(fep->netdev, queue); + + __netif_tx_lock(nq, cpu); + + /* Avoid tx timeout as XDP shares the queue with kernel stack */ + txq_trans_cond_update(nq); + ret = fec_enet_txq_xmit_frame(fep, txq, xdp, dma_sync_len, false); + + __netif_tx_unlock(nq); + + return ret; +} + static int fec_enet_xdp_xmit(struct net_device *dev, int num_frames, struct xdp_frame **frames, @@ -3927,7 +3947,7 @@ static int fec_enet_xdp_xmit(struct net_device *dev, /* Avoid tx timeout as XDP shares the queue with kernel stack */ txq_trans_cond_update(nq); for (i = 0; i < num_frames; i++) { - if (fec_enet_txq_xmit_frame(fep, txq, frames[i]) < 0) + if (fec_enet_txq_xmit_frame(fep, txq, frames[i], 0, true) < 0) break; sent_frames++; } @@ -3937,6 +3957,37 @@ static int fec_enet_xdp_xmit(struct net_device *dev, return sent_frames; } +static int fec_hwtstamp_get(struct net_device *ndev, + struct kernel_hwtstamp_config *config) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + if (!netif_running(ndev)) + return -EINVAL; + + if (!fep->bufdesc_ex) + return -EOPNOTSUPP; + + fec_ptp_get(ndev, config); + + return 0; +} + +static int fec_hwtstamp_set(struct net_device *ndev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + if (!netif_running(ndev)) + return -EINVAL; + + if (!fep->bufdesc_ex) + return -EOPNOTSUPP; + + return fec_ptp_set(ndev, config, extack); +} + static const struct net_device_ops fec_netdev_ops = { .ndo_open = fec_enet_open, .ndo_stop = fec_enet_close, @@ -3946,13 +3997,15 @@ static const struct net_device_ops fec_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = fec_timeout, .ndo_set_mac_address = fec_set_mac_address, - .ndo_eth_ioctl = fec_enet_ioctl, + .ndo_eth_ioctl = phy_do_ioctl_running, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = fec_poll_controller, #endif .ndo_set_features = fec_set_features, .ndo_bpf = fec_enet_bpf, .ndo_xdp_xmit = fec_enet_xdp_xmit, + .ndo_hwtstamp_get = fec_hwtstamp_get, + .ndo_hwtstamp_set = fec_hwtstamp_set, }; static const unsigned short offset_des_active_rxq[] = { @@ -4006,8 +4059,8 @@ static int fec_enet_init(struct net_device *ndev) bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize; /* Allocate memory for buffer descriptors. */ - cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma, - GFP_KERNEL); + cbd_base = fec_dmam_alloc(&fep->pdev->dev, bd_size, &bd_dma, + GFP_KERNEL); if (!cbd_base) { ret = -ENOMEM; goto free_queue_mem; @@ -4018,9 +4071,6 @@ static int fec_enet_init(struct net_device *ndev) if (ret) goto free_queue_mem; - /* make sure MAC we just acquired is programmed into the hw */ - fec_set_mac_address(ndev, NULL); - /* Set receive and transmit descriptor base. */ for (i = 0; i < fep->num_rx_queues; i++) { struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i]; @@ -4258,14 +4308,13 @@ fec_probe(struct platform_device *pdev) phy_interface_t interface; struct net_device *ndev; int i, irq, ret = 0; - const struct of_device_id *of_id; static int dev_id; struct device_node *np = pdev->dev.of_node, *phy_node; int num_tx_qs; int num_rx_qs; char irq_name[8]; int irq_cnt; - struct fec_devinfo *dev_info; + const struct fec_devinfo *dev_info; fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs); @@ -4280,10 +4329,9 @@ fec_probe(struct platform_device *pdev) /* setup board info structure */ fep = netdev_priv(ndev); - of_id = of_match_device(fec_dt_ids, &pdev->dev); - if (of_id) - pdev->id_entry = of_id->data; - dev_info = (struct fec_devinfo *)pdev->id_entry->driver_data; + dev_info = device_get_match_data(&pdev->dev); + if (!dev_info) + dev_info = (const struct fec_devinfo *)pdev->id_entry->driver_data; if (dev_info) fep->quirks = dev_info->quirks; @@ -4486,7 +4534,6 @@ fec_probe(struct platform_device *pdev) if (fep->bufdesc_ex && fep->ptp_clock) netdev_info(ndev, "registered PHC device %d\n", fep->dev_id); - fep->rx_copybreak = COPYBREAK_DEFAULT; INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); pm_runtime_mark_last_busy(&pdev->dev); @@ -4526,7 +4573,7 @@ failed_ioremap: return ret; } -static int +static void fec_drv_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); @@ -4562,7 +4609,6 @@ fec_drv_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); free_netdev(ndev); - return 0; } static int __maybe_unused fec_suspend(struct device *dev) @@ -4718,7 +4764,7 @@ static struct platform_driver fec_driver = { }, .id_table = fec_devtype, .probe = fec_probe, - .remove = fec_drv_remove, + .remove_new = fec_drv_remove, }; module_platform_driver(fec_driver); |