[PATCH 2/3] net: xilinx: axi_mrmac: Add MRMAC driver

Ashok Reddy Soma ashok.reddy.soma at xilinx.com
Thu Jun 24 08:43:00 CEST 2021


Add support for xilinx multirate(MRMAC) ethernet driver.
This driver uses multichannel DMA(MCDMA) for data transfers of MRMAC.
Added support for 4 ports of MRMAC for speeds 10G and 25G.
MCDMA supports upto 16 channels but in this driver we have setup only
one channel which is enough.

Tested 10G and 25G on all 4 ports.

Signed-off-by: Ashok Reddy Soma <ashok.reddy.soma at xilinx.com>
---

 MAINTAINERS                    |   1 +
 drivers/net/Kconfig            |   9 +
 drivers/net/Makefile           |   1 +
 drivers/net/xilinx_axi_mrmac.c | 525 +++++++++++++++++++++++++++++++++
 drivers/net/xilinx_axi_mrmac.h | 192 ++++++++++++
 5 files changed, 728 insertions(+)
 create mode 100644 drivers/net/xilinx_axi_mrmac.c
 create mode 100644 drivers/net/xilinx_axi_mrmac.h

diff --git a/MAINTAINERS b/MAINTAINERS
index 2accd1fb83..cc05e13968 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -543,6 +543,7 @@ M:	Michal Simek <michal.simek at xilinx.com>
 S:	Maintained
 T:	git https://source.denx.de/u-boot/custodians/u-boot-microblaze.git
 F:	arch/arm/mach-versal/
+F:	drivers/net/xilinx_axi_mrmac.*
 F:	drivers/watchdog/xilinx_wwdt.c
 N:	(?<!uni)versal
 
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 9fc28b149d..ccc5fc8fb6 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -617,6 +617,15 @@ config XILINX_AXIEMAC
 	help
 	  This MAC is present in Xilinx Microblaze, Zynq and ZynqMP SoCs.
 
+config XILINX_AXIMRMAC
+	depends on DM_ETH && ARCH_VERSAL
+	bool "Xilinx AXI MRMAC"
+	help
+	  MRMAC is a high performance, low latency, adaptable Ethernet
+	  integrated hard IP. This can be configured up to four ports with MAC
+	  rates from 10GE to 100GE. This could be present in some of the Xilinx
+	  Versal designs.
+
 config XILINX_EMACLITE
 	depends on DM_ETH
 	select PHYLIB
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index d56baa65b2..77a1d23dd3 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -78,6 +78,7 @@ obj-$(CONFIG_FMAN_ENET) += fsl_mdio.o
 obj-$(CONFIG_ULI526X) += uli526x.o
 obj-$(CONFIG_VSC7385_ENET) += vsc7385.o
 obj-$(CONFIG_XILINX_AXIEMAC) += xilinx_axi_emac.o
+obj-$(CONFIG_XILINX_AXIMRMAC) += xilinx_axi_mrmac.o
 obj-$(CONFIG_XILINX_EMACLITE) += xilinx_emaclite.o
 obj-$(CONFIG_ZYNQ_GEM) += zynq_gem.o
 obj-$(CONFIG_FSL_MC_ENET) += fsl-mc/
diff --git a/drivers/net/xilinx_axi_mrmac.c b/drivers/net/xilinx_axi_mrmac.c
new file mode 100644
index 0000000000..bffedf8280
--- /dev/null
+++ b/drivers/net/xilinx_axi_mrmac.c
@@ -0,0 +1,525 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Multirate Ethernet MAC(MRMAC) driver
+ *
+ * Author(s):   Ashok Reddy Soma <ashok.reddy.soma at xilinx.com>
+ *              Michal Simek <michal.simek at xilinx.com>
+ *
+ * Copyright (C) 2021 Xilinx, Inc. All rights reserved.
+ */
+
+#include <config.h>
+#include <common.h>
+#include <cpu_func.h>
+#include <dm.h>
+#include <log.h>
+#include <net.h>
+#include <malloc.h>
+#include <wait_bit.h>
+#include <asm/io.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include "xilinx_axi_mrmac.h"
+
+static u8 rxframe[RX_DESC * PKTSIZE_ALIGN] __attribute((aligned(DMAALIGN)));
+static u8 txminframe[MIN_PKT_SIZE] __attribute((aligned(DMAALIGN)));
+
+/* Static buffer descriptors:
+ * MRMAC needs atleast two buffer descriptors for the TX/RX to happen.
+ * Otherwise MRMAC will drop the packets. So, have two tx and rx bd's here.
+ */
+static struct mcdma_bd tx_bd[TX_DESC] __attribute((aligned(DMAALIGN)));
+static struct mcdma_bd rx_bd[RX_DESC] __attribute((aligned(DMAALIGN)));
+
+static void axi_mrmac_dma_write(struct mcdma_bd *bd, u32 *desc)
+{
+	if (IS_ENABLED(CONFIG_PHYS_64BIT))
+		writeq((unsigned long)bd, desc);
+	else
+		writel((uintptr_t)bd, desc);
+}
+
+static int axi_mrmac_ethernet_init(struct axi_mrmac_priv *priv)
+{
+	struct mrmac_regs *regs = priv->iobase;
+	u32 val, reg;
+	u32 ret;
+
+	/* Perform all the RESET's required */
+	val = readl(&regs->reset);
+	val |= MRMAC_RX_SERDES_RST_MASK | MRMAC_TX_SERDES_RST_MASK |
+		MRMAC_RX_RST_MASK | MRMAC_TX_RST_MASK;
+	writel(val, &regs->reset);
+
+	mdelay(MRMAC_RESET_DELAY);
+
+	/* Configure Mode register */
+	reg = readl(&regs->mode);
+
+	debug("Configuring MRMAC speed to %d\n", priv->mrmac_rate);
+
+	if (priv->mrmac_rate == SPEED_25000) {
+		reg &= ~MRMAC_CTL_RATE_CFG_MASK;
+		reg |= MRMAC_CTL_DATA_RATE_25G;
+		reg |= (MRMAC_CTL_AXIS_CFG_25G_IND << MRMAC_CTL_AXIS_CFG_SHIFT);
+		reg |= (MRMAC_CTL_SERDES_WIDTH_25G <<
+			MRMAC_CTL_SERDES_WIDTH_SHIFT);
+	} else {
+		reg &= ~MRMAC_CTL_RATE_CFG_MASK;
+		reg |= MRMAC_CTL_DATA_RATE_10G;
+		reg |= (MRMAC_CTL_AXIS_CFG_10G_IND << MRMAC_CTL_AXIS_CFG_SHIFT);
+		reg |= (MRMAC_CTL_SERDES_WIDTH_10G <<
+			MRMAC_CTL_SERDES_WIDTH_SHIFT);
+	}
+
+	/* For tick reg */
+	reg |= MRMAC_CTL_PM_TICK_MASK;
+	writel(reg, &regs->mode);
+
+	val = readl(&regs->reset);
+	val &= ~(MRMAC_RX_SERDES_RST_MASK | MRMAC_TX_SERDES_RST_MASK |
+		 MRMAC_RX_RST_MASK | MRMAC_TX_RST_MASK);
+	writel(val, &regs->reset);
+
+	mdelay(MRMAC_RESET_DELAY);
+
+	/* Setup MRMAC hardware options */
+	writel(readl(&regs->rx_config) | MRMAC_RX_DEL_FCS_MASK,
+	       &regs->rx_config);
+	writel(readl(&regs->tx_config) | MRMAC_TX_INS_FCS_MASK,
+	       &regs->tx_config);
+	writel(readl(&regs->tx_config) | MRMAC_TX_EN_MASK, &regs->tx_config);
+	writel(readl(&regs->rx_config) | MRMAC_RX_EN_MASK, &regs->rx_config);
+
+	/* Check for block lock bit to be set. This ensures that
+	 * MRMAC ethernet IP is functioning normally.
+	 */
+	writel(MRMAC_STS_ALL_MASK, (phys_addr_t)priv->iobase +
+		MRMAC_TX_STS_OFFSET);
+	writel(MRMAC_STS_ALL_MASK, (phys_addr_t)priv->iobase +
+		MRMAC_RX_STS_OFFSET);
+	writel(MRMAC_STS_ALL_MASK, (phys_addr_t)priv->iobase +
+		MRMAC_STATRX_BLKLCK_OFFSET);
+
+	ret = wait_for_bit_le32((u32 *)((phys_addr_t)priv->iobase +
+				MRMAC_STATRX_BLKLCK_OFFSET),
+				MRMAC_RX_BLKLCK_MASK, true,
+				MRMAC_BLKLCK_TIMEOUT, true);
+	if (ret) {
+		printf("%s: MRMAC block lock not complete!\n", __func__);
+		return 1;
+	}
+
+	writel(MRMAC_TICK_TRIGGER, &regs->tick_reg);
+
+	return 0;
+}
+
+/* Reset DMA engine */
+static int axi_mcdma_init(struct axi_mrmac_priv *priv)
+{
+	u32 ret;
+
+	/* Reset the engine so the hardware starts from a known state */
+	writel(XMCDMA_CR_RESET, &priv->mm2s_cmn->control);
+	writel(XMCDMA_CR_RESET, &priv->s2mm_cmn->control);
+
+	/* Check TX/RX MCDMA.RST. Reset is done when the reset bit is low */
+	ret = wait_for_bit_le32(&priv->mm2s_cmn->control, XMCDMA_CR_RESET,
+				false, MRMAC_DMARST_TIMEOUT, true);
+	if (ret) {
+		printf("%s: TX MCDMA reset Timeout\n", __func__);
+		return -1;
+	}
+
+	ret = wait_for_bit_le32(&priv->s2mm_cmn->control, XMCDMA_CR_RESET,
+				false, MRMAC_DMARST_TIMEOUT, true);
+	if (ret) {
+		printf("%s: RX MCDMA reset Timeout\n", __func__);
+		return -1;
+	}
+
+	/* Enable channel 1 for TX and RX */
+	writel(XMCDMA_CHANNEL_1, &priv->mm2s_cmn->chen);
+	writel(XMCDMA_CHANNEL_1, &priv->s2mm_cmn->chen);
+
+	return 0;
+}
+
+static int axi_mrmac_start(struct udevice *dev)
+{
+	struct axi_mrmac_priv *priv = dev_get_priv(dev);
+	struct mrmac_regs *regs = priv->iobase;
+	u32 temp;
+
+	/*
+	 * Initialize MCDMA engine. MCDMA engine must be initialized before
+	 * MRMAC. During MCDMA engine initialization, MCDMA hardware is reset,
+	 * since MCDMA reset line is connected to MRMAC, this would ensure a
+	 * reset of MRMAC.
+	 */
+	axi_mcdma_init(priv);
+
+	/* Initialize MRMAC hardware */
+	if (axi_mrmac_ethernet_init(priv))
+		return -1;
+
+	/* Disable all RX interrupts before RxBD space setup */
+	temp = readl(&priv->mcdma_rx->control);
+	temp &= ~XMCDMA_IRQ_ALL_MASK;
+	writel(temp, &priv->mcdma_rx->control);
+
+	/* Update current descriptor */
+	axi_mrmac_dma_write(&rx_bd[0], &priv->mcdma_rx->current);
+
+	/* Setup Rx BD. MRMAC needs atleast two descriptors */
+	memset(&rx_bd[0], 0, sizeof(rx_bd));
+	rx_bd[0].next_desc = lower_32_bits((u64)&rx_bd[1]);
+	rx_bd[0].buf_addr = lower_32_bits((u64)&rxframe);
+
+	rx_bd[1].next_desc = lower_32_bits((u64)&rx_bd[0]);
+	rx_bd[1].buf_addr = lower_32_bits((u64)&rxframe[PKTSIZE_ALIGN]);
+
+	if (IS_ENABLED(CONFIG_PHYS_64BIT)) {
+		rx_bd[0].next_desc_msb = upper_32_bits((u64)&rx_bd[1]);
+		rx_bd[0].buf_addr_msb = upper_32_bits((u64)&rxframe);
+
+		rx_bd[1].next_desc_msb = upper_32_bits((u64)&rx_bd[0]);
+		rx_bd[1].buf_addr_msb = upper_32_bits((u64)&rxframe[PKTSIZE_ALIGN]);
+	}
+
+	rx_bd[0].cntrl = PKTSIZE_ALIGN;
+	rx_bd[1].cntrl = PKTSIZE_ALIGN;
+	/* Flush the last BD so DMA core could see the updates */
+	flush_cache((phys_addr_t)&rx_bd, sizeof(rx_bd));
+
+	/* It is necessary to flush rxframe because if you don't do it
+	 * then cache can contain uninitialized data
+	 */
+	flush_cache((phys_addr_t)&rxframe, sizeof(rxframe));
+
+	/* Start the hardware */
+	temp = readl(&priv->s2mm_cmn->control);
+	temp |= XMCDMA_CR_RUNSTOP_MASK;
+	writel(temp, &priv->s2mm_cmn->control);
+
+	temp = readl(&priv->mm2s_cmn->control);
+	temp |= XMCDMA_CR_RUNSTOP_MASK;
+	writel(temp, &priv->mm2s_cmn->control);
+
+	temp = readl(&priv->mcdma_rx->control);
+	temp |= XMCDMA_IRQ_ALL_MASK;
+	writel(temp, &priv->mcdma_rx->control);
+
+	/* Channel fetch */
+	temp = readl(&priv->mcdma_rx->control);
+	temp |= XMCDMA_CR_RUNSTOP_MASK;
+	writel(temp, &priv->mcdma_rx->control);
+
+	/* Update tail descriptor. Now it's ready to receive data */
+	axi_mrmac_dma_write(&rx_bd[1], &priv->mcdma_rx->tail);
+
+	/* Enable TX */
+	writel(readl(&regs->tx_config) | MRMAC_TX_EN_MASK, &regs->tx_config);
+
+	/* Enable RX */
+	writel(readl(&regs->rx_config) | MRMAC_RX_EN_MASK, &regs->rx_config);
+
+	return 0;
+}
+
+static int axi_mrmac_send(struct udevice *dev, void *ptr, int len)
+{
+	struct axi_mrmac_priv *priv = dev_get_priv(dev);
+	u32 val;
+	u32 ret;
+	u32 temp;
+
+#ifdef DEBUG
+	print_buffer(ptr, ptr, 1, len, 16);
+#endif
+	if (len > PKTSIZE_ALIGN)
+		len = PKTSIZE_ALIGN;
+
+	/* If size is less than min packet size, pad to min size */
+	if (len < MIN_PKT_SIZE) {
+		memset(txminframe, 0, MIN_PKT_SIZE);
+		memcpy(txminframe, ptr, len);
+		len = MIN_PKT_SIZE;
+		ptr = txminframe;
+	}
+
+	writel(XMCDMA_IRQ_ALL_MASK, &priv->mcdma_tx->status);
+
+	temp = readl(&priv->mcdma_tx->control);
+	temp &= ~XMCDMA_CR_RUNSTOP_MASK;
+	writel(temp, &priv->mcdma_tx->control);
+
+	/* Flush packet to main memory to be trasfered by DMA */
+	flush_cache((phys_addr_t)ptr, len);
+
+	/* Setup Tx BD. MRMAC needs atleast two descriptors */
+	memset(&tx_bd[0], 0, sizeof(tx_bd));
+	tx_bd[0].next_desc = lower_32_bits((u64)&tx_bd[1]);
+	tx_bd[0].buf_addr = lower_32_bits((u64)ptr);
+
+	/* At the end of the ring, link the last BD back to the top */
+	tx_bd[1].next_desc = lower_32_bits((u64)&tx_bd[0]);
+	tx_bd[1].buf_addr = lower_32_bits((u64)ptr + len / 2);
+
+	if (IS_ENABLED(CONFIG_PHYS_64BIT)) {
+		tx_bd[0].next_desc_msb = upper_32_bits((u64)&tx_bd[1]);
+		tx_bd[0].buf_addr_msb = upper_32_bits((u64)ptr);
+
+		tx_bd[1].next_desc_msb = upper_32_bits((u64)&tx_bd[0]);
+		tx_bd[1].buf_addr_msb = upper_32_bits((u64)ptr + len / 2);
+	}
+
+	/* Split TX data in to half and send in two descriptors */
+	tx_bd[0].cntrl = (len / 2) | XMCDMA_BD_CTRL_TXSOF_MASK;
+	tx_bd[1].cntrl = (len - len / 2) | XMCDMA_BD_CTRL_TXEOF_MASK;
+
+	/* Flush the last BD so DMA core could see the updates */
+	flush_cache((phys_addr_t)&tx_bd, sizeof(tx_bd));
+
+	if (readl(&priv->mcdma_tx->status) & XMCDMA_CH_IDLE) {
+		axi_mrmac_dma_write(&tx_bd[0], &priv->mcdma_tx->current);
+		/* Channel fetch */
+		temp = readl(&priv->mcdma_tx->control);
+		temp |= XMCDMA_CR_RUNSTOP_MASK;
+		writel(temp, &priv->mcdma_tx->control);
+	} else {
+		printf("Error: current desc is not updated\n");
+		return 1;
+	}
+
+	val = readl(&priv->mcdma_tx->control);
+	val |= XMCDMA_IRQ_ALL_MASK;
+	writel(val, &priv->mcdma_tx->control);
+
+	/* Start transfer */
+	axi_mrmac_dma_write(&tx_bd[1], &priv->mcdma_tx->tail);
+
+	/* Wait for transmission to complete */
+	ret = wait_for_bit_le32(&priv->mcdma_tx->status, XMCDMA_IRQ_IOC_MASK,
+				true, 1, true);
+	if (ret) {
+		printf("%s: Timeout\n", __func__);
+		return 1;
+	}
+
+	/* Clear status */
+	tx_bd[0].sband_stats = 0;
+	tx_bd[1].sband_stats = 0;
+
+	debug("axi mrmac: Sending complete\n");
+
+	return 0;
+}
+
+static int isrxready(struct axi_mrmac_priv *priv)
+{
+	u32 status;
+
+	/* Read pending interrupts */
+	status = readl(&priv->mcdma_rx->status);
+
+	/* Acknowledge pending interrupts */
+	writel(status & XMCDMA_IRQ_ALL_MASK, &priv->mcdma_rx->status);
+
+	/*
+	 * If Reception done interrupt is asserted, call RX call back function
+	 * to handle the processed BDs and then raise the according flag.
+	 */
+	if (status & (XMCDMA_IRQ_IOC_MASK | XMCDMA_IRQ_DELAY_MASK))
+		return 1;
+
+	return 0;
+}
+
+static int axi_mrmac_recv(struct udevice *dev, int flags, uchar **packetp)
+{
+	struct axi_mrmac_priv *priv = dev_get_priv(dev);
+	u32 length;
+	u32 temp;
+
+	/* Wait for an incoming packet */
+	if (!isrxready(priv))
+		return -1;
+
+	/* Clear all interrupts */
+	writel(XMCDMA_IRQ_ALL_MASK, &priv->mcdma_rx->status);
+
+	/* Disable IRQ for a moment till packet is handled */
+	temp = readl(&priv->mcdma_rx->control);
+	temp &= ~XMCDMA_IRQ_ALL_MASK;
+	writel(temp, &priv->mcdma_rx->control);
+
+	/* Disable channel fetch */
+	temp = readl(&priv->mcdma_rx->control);
+	temp &= ~XMCDMA_CR_RUNSTOP_MASK;
+	writel(temp, &priv->mcdma_rx->control);
+
+	length = rx_bd[0].status & XMCDMA_BD_STS_ACTUAL_LEN_MASK;
+	*packetp = rxframe;
+
+	if (!length) {
+		length = rx_bd[1].status & XMCDMA_BD_STS_ACTUAL_LEN_MASK;
+		*packetp = &rxframe[PKTSIZE_ALIGN];
+	}
+
+#ifdef DEBUG
+	print_buffer(*packetp, *packetp, 1, length, 16);
+#endif
+
+	/* Clear status */
+	rx_bd[0].status = 0;
+	rx_bd[1].status = 0;
+
+	return length;
+}
+
+static int axi_mrmac_free_pkt(struct udevice *dev, uchar *packet, int length)
+{
+	struct axi_mrmac_priv *priv = dev_get_priv(dev);
+	u32 temp;
+
+#ifdef DEBUG
+	/* It is useful to clear buffer to be sure that it is consistent */
+	memset(rxframe, 0, sizeof(rxframe));
+#endif
+	/* Disable all RX interrupts before RxBD space setup */
+	temp = readl(&priv->mcdma_rx->control);
+	temp &= ~XMCDMA_IRQ_ALL_MASK;
+	writel(temp, &priv->mcdma_rx->control);
+
+	/* Disable channel fetch */
+	temp = readl(&priv->mcdma_rx->control);
+	temp &= ~XMCDMA_CR_RUNSTOP_MASK;
+	writel(temp, &priv->mcdma_rx->control);
+
+	/* Update current descriptor */
+	axi_mrmac_dma_write(&rx_bd[0], &priv->mcdma_rx->current);
+
+	/* Write bd to HW */
+	flush_cache((phys_addr_t)&rx_bd, sizeof(rx_bd));
+
+	/* It is necessary to flush rxframe because if you don't do it
+	 * then cache will contain previous packet
+	 */
+	flush_cache((phys_addr_t)&rxframe, sizeof(rxframe));
+
+	/* Enable all IRQ */
+	temp = readl(&priv->mcdma_rx->control);
+	temp |= XMCDMA_IRQ_ALL_MASK;
+	writel(temp, &priv->mcdma_rx->control);
+
+	/* Channel fetch */
+	temp = readl(&priv->mcdma_rx->control);
+	temp |= XMCDMA_CR_RUNSTOP_MASK;
+	writel(temp, &priv->mcdma_rx->control);
+
+	/* Update tail descriptor. Now it's ready to receive data */
+	axi_mrmac_dma_write(&rx_bd[1], &priv->mcdma_rx->tail);
+
+	debug("axi mrmac: RX completed, framelength = %x\n", length);
+
+	return 0;
+}
+
+/* STOP DMA transfers */
+static void axi_mrmac_stop(struct udevice *dev)
+{
+	struct axi_mrmac_priv *priv = dev_get_priv(dev);
+	u32 temp;
+
+	/* Stop the hardware */
+	temp = readl(&priv->mcdma_tx->control);
+	temp &= ~XMCDMA_CR_RUNSTOP_MASK;
+	writel(temp, &priv->mcdma_tx->control);
+
+	temp = readl(&priv->mcdma_rx->control);
+	temp &= ~XMCDMA_CR_RUNSTOP_MASK;
+	writel(temp, &priv->mcdma_rx->control);
+
+	debug("%s: Halted\n", __func__);
+}
+
+static int axi_mrmac_probe(struct udevice *dev)
+{
+	struct axi_mrmac_plat *plat = dev_get_plat(dev);
+	struct eth_pdata *pdata = &plat->eth_pdata;
+	struct axi_mrmac_priv *priv = dev_get_priv(dev);
+
+	priv->iobase = (struct mrmac_regs *)pdata->iobase;
+
+	priv->mm2s_cmn = plat->mm2s_cmn;
+	priv->mcdma_tx = (struct mcdma_chan_reg *)((phys_addr_t)priv->mm2s_cmn
+						   + XMCDMA_CHAN_OFFSET);
+	priv->s2mm_cmn = (struct mcdma_common_regs *)((phys_addr_t)priv->mm2s_cmn
+						      + XMCDMA_RX_OFFSET);
+	priv->mcdma_rx = (struct mcdma_chan_reg *)((phys_addr_t)priv->s2mm_cmn
+						   + XMCDMA_CHAN_OFFSET);
+	priv->mrmac_rate = plat->mrmac_rate;
+
+	return 0;
+}
+
+static int axi_mrmac_remove(struct udevice *dev)
+{
+	return 0;
+}
+
+static int axi_mrmac_of_to_plat(struct udevice *dev)
+{
+	struct axi_mrmac_plat *plat = dev_get_plat(dev);
+	struct eth_pdata *pdata = &plat->eth_pdata;
+	struct ofnode_phandle_args phandle_args;
+	int ret = 0;
+
+	pdata->iobase = dev_read_addr(dev);
+
+	ret = dev_read_phandle_with_args(dev, "axistream-connected", NULL, 0, 0,
+					 &phandle_args);
+	if (ret) {
+		printf("%s: axistream not found\n", __func__);
+		return -EINVAL;
+	}
+
+	plat->mm2s_cmn = (struct mcdma_common_regs *)ofnode_read_u64_default
+						(phandle_args.node, "reg", -1);
+	if (!plat->mm2s_cmn) {
+		printf("%s: MRMAC dma register space not found\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Set default MRMAC rate to 10000 */
+	plat->mrmac_rate = dev_read_u32_default(dev, "xlnx,mrmac-rate", 10000);
+
+	return 0;
+}
+
+static const struct eth_ops axi_mrmac_ops = {
+	.start			= axi_mrmac_start,
+	.send			= axi_mrmac_send,
+	.recv			= axi_mrmac_recv,
+	.free_pkt		= axi_mrmac_free_pkt,
+	.stop			= axi_mrmac_stop,
+};
+
+static const struct udevice_id axi_mrmac_ids[] = {
+	{ .compatible = "xlnx,mrmac-ethernet-1.0" },
+	{ }
+};
+
+U_BOOT_DRIVER(axi_mrmac) = {
+	.name   = "axi_mrmac",
+	.id     = UCLASS_ETH,
+	.of_match = axi_mrmac_ids,
+	.of_to_plat = axi_mrmac_of_to_plat,
+	.probe  = axi_mrmac_probe,
+	.remove = axi_mrmac_remove,
+	.ops    = &axi_mrmac_ops,
+	.priv_auto = sizeof(struct axi_mrmac_priv),
+	.plat_auto = sizeof(struct axi_mrmac_plat),
+};
diff --git a/drivers/net/xilinx_axi_mrmac.h b/drivers/net/xilinx_axi_mrmac.h
new file mode 100644
index 0000000000..32d37e9f15
--- /dev/null
+++ b/drivers/net/xilinx_axi_mrmac.h
@@ -0,0 +1,192 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx Multirate Ethernet MAC(MRMAC) driver
+ *
+ * Author(s):   Ashok Reddy Soma <ashok.reddy.soma at xilinx.com>
+ *              Michal Simek <michal.simek at xilinx.com>
+ *
+ * Copyright (C) 2021 Xilinx, Inc. All rights reserved.
+ */
+
+#ifndef __XILINX_AXI_MRMAC_H
+#define __XILINX_AXI_MRMAC_H
+
+#define DMAALIGN	128
+#define MIN_PKT_SIZE	60
+
+#define TX_DESC		2
+#define RX_DESC		2
+
+/* MRMAC platform data structure */
+struct axi_mrmac_plat {
+	struct eth_pdata eth_pdata;
+	struct mcdma_common_regs *mm2s_cmn;
+	u32 mrmac_rate;
+};
+
+/* MRMAC private driver structure */
+struct axi_mrmac_priv {
+	struct mrmac_regs *iobase;
+	struct mcdma_common_regs *mm2s_cmn;
+	struct mcdma_common_regs *s2mm_cmn;
+	struct mcdma_chan_reg *mcdma_tx;
+	struct mcdma_chan_reg *mcdma_rx;
+	u32 mrmac_rate;
+};
+
+/* MRMAC Register Definitions */
+struct mrmac_regs {
+	u32 revision;	/* 0x0: Revision Register */
+	u32 reset;	/* 0x4: Reset Register */
+	u32 mode;	/* 0x8: Mode */
+	u32 tx_config;	/* 0xC: TX Configuration */
+	u32 rx_config;	/* 0x10: RX Configuration */
+	u32 reserved[6];/* 0x14-0x28: Reserved */
+	u32 tick_reg;	/* 0x2C: Tick Register */
+};
+
+/* Status Registers */
+#define MRMAC_TX_STS_OFFSET		0x740
+#define MRMAC_RX_STS_OFFSET		0x744
+#define MRMAC_TX_RT_STS_OFFSET		0x748
+#define MRMAC_RX_RT_STS_OFFSET		0x74C
+#define MRMAC_STATRX_BLKLCK_OFFSET	0x754
+
+/* Register bit masks */
+#define MRMAC_RX_SERDES_RST_MASK	(BIT(3) | BIT(2) | BIT(1) | BIT(0))
+#define MRMAC_TX_SERDES_RST_MASK	BIT(4)
+#define MRMAC_RX_RST_MASK		BIT(5)
+#define MRMAC_TX_RST_MASK		BIT(6)
+#define MRMAC_RX_AXI_RST_MASK		BIT(8)
+#define MRMAC_TX_AXI_RST_MASK		BIT(9)
+#define MRMAC_STS_ALL_MASK		0xFFFFFFFF
+
+#define MRMAC_RX_EN_MASK		BIT(0)
+#define MRMAC_RX_DEL_FCS_MASK		BIT(1)
+
+#define MRMAC_TX_EN_MASK		BIT(0)
+#define MRMAC_TX_INS_FCS_MASK		BIT(1)
+
+#define MRMAC_RX_BLKLCK_MASK		BIT(0)
+
+#define MRMAC_TICK_TRIGGER		BIT(0)
+
+#define MRMAC_RESET_DELAY		1   /* Delay in msecs */
+#define MRMAC_BLKLCK_TIMEOUT		100 /* Block lock timeout in msecs */
+#define MRMAC_DMARST_TIMEOUT		500 /* MCDMA reset timeout in msecs */
+
+#define XMCDMA_RX_OFFSET		0x500
+#define XMCDMA_CHAN_OFFSET		0x40
+
+/* MCDMA Channel numbers are from 1-16 */
+#define XMCDMA_CHANNEL_1	BIT(0)
+#define XMCDMA_CHANNEL_2	BIT(1)
+
+#define XMCDMA_CR_RUNSTOP	BIT(0)
+#define XMCDMA_CR_RESET		BIT(2)
+
+#define XMCDMA_BD_CTRL_TXSOF_MASK	BIT(31)		/* First tx packet */
+#define XMCDMA_BD_CTRL_TXEOF_MASK	BIT(30)		/* Last tx packet */
+#define XMCDMA_BD_CTRL_ALL_MASK		GENMASK(31, 30)	/* All control bits */
+#define XMCDMA_BD_STS_ALL_MASK		GENMASK(31, 28)	/* All status bits */
+
+/* MCDMA Mask registers */
+#define XMCDMA_CR_RUNSTOP_MASK		BIT(0) /* Start/stop DMA channel */
+#define XMCDMA_CR_RESET_MASK		BIT(2) /* Reset DMA engine */
+
+#define XMCDMA_SR_HALTED_MASK		BIT(0)
+#define XMCDMA_SR_IDLE_MASK		BIT(1)
+
+#define XMCDMA_CH_IDLE			BIT(0)
+
+#define XMCDMA_BD_STS_COMPLETE		BIT(31) /* Completed */
+#define XMCDMA_BD_STS_DEC_ERR		BIT(20) /* Decode error */
+#define XMCDMA_BD_STS_SLV_ERR		BIT(29) /* Slave error */
+#define XMCDMA_BD_STS_INT_ERR		BIT(28) /* Internal err */
+#define XMCDMA_BD_STS_ALL_ERR		GENMASK(30, 28) /* All errors */
+
+#define XMCDMA_IRQ_ERRON_OTHERQ_MASK	BIT(3)
+#define XMCDMA_IRQ_PKTDROP_MASK		BIT(4)
+#define XMCDMA_IRQ_IOC_MASK		BIT(5)
+#define XMCDMA_IRQ_DELAY_MASK		BIT(6)
+#define XMCDMA_IRQ_ERR_MASK		BIT(7)
+#define XMCDMA_IRQ_ALL_MASK		GENMASK(7, 5)
+#define XMCDMA_PKTDROP_COALESCE_MASK	GENMASK(15, 8)
+#define XMCDMA_COALESCE_MASK		GENMASK(23, 16)
+#define XMCDMA_DELAY_MASK		GENMASK(31, 24)
+
+#define MRMAC_CTL_DATA_RATE_MASK	GENMASK(2, 0)
+#define MRMAC_CTL_DATA_RATE_10G		0
+#define MRMAC_CTL_DATA_RATE_25G		1
+#define MRMAC_CTL_DATA_RATE_40G		2
+#define MRMAC_CTL_DATA_RATE_50G		3
+#define MRMAC_CTL_DATA_RATE_100G	4
+
+#define MRMAC_CTL_AXIS_CFG_MASK		GENMASK(11, 9)
+#define MRMAC_CTL_AXIS_CFG_SHIFT	9
+#define MRMAC_CTL_AXIS_CFG_10G_IND	1
+#define MRMAC_CTL_AXIS_CFG_25G_IND	1
+
+#define MRMAC_CTL_SERDES_WIDTH_MASK	GENMASK(6, 4)
+#define MRMAC_CTL_SERDES_WIDTH_SHIFT	4
+#define MRMAC_CTL_SERDES_WIDTH_10G	4
+#define MRMAC_CTL_SERDES_WIDTH_25G	6
+
+#define MRMAC_CTL_RATE_CFG_MASK		(MRMAC_CTL_DATA_RATE_MASK | \
+					 MRMAC_CTL_AXIS_CFG_MASK | \
+					 MRMAC_CTL_SERDES_WIDTH_MASK)
+
+#define MRMAC_CTL_PM_TICK_MASK		BIT(30)
+#define MRMAC_TICK_TRIGGER		BIT(0)
+
+#define XMCDMA_BD_STS_ACTUAL_LEN_MASK  0x007FFFFF /* Actual len */
+
+/* MCDMA common offsets */
+struct mcdma_common_regs {
+	u32 control;	/* Common control */
+	u32 status;	/* Common status */
+	u32 chen;	/* Channel enable/disable */
+	u32 chser;	/* Channel in progress */
+	u32 err;	/* Error */
+	u32 ch_schd_type;	/* Channel Q scheduler type */
+	u32 wrr_reg1;	/* Weight of each channel (ch1-8) */
+	u32 wrr_reg2;	/* Weight of each channel (ch9-16) */
+	u32 ch_serviced;	/* Channels completed */
+	u32 arcache_aruser;	/* ARCACHE and ARUSER values for AXI4 read */
+	u32 intr_status;	/* Interrupt monitor */
+	u32 reserved[5];
+};
+
+/* MCDMA per channel registers */
+struct mcdma_chan_reg {
+	u32 control;	/* Control */
+	u32 status;	/* Status */
+	u32 current;	/* Current descriptor */
+	u32 current_hi;	/* Current descriptor high 32bit */
+	u32 tail;	/* Tail descriptor */
+	u32 tail_hi;	/* Tail descriptor high 32bit */
+	u32 pktcnt;	/* Packet processed count */
+};
+
+/* MCDMA buffer descriptors */
+struct mcdma_bd {
+	u32 next_desc;	/* Next descriptor pointer */
+	u32 next_desc_msb;
+	u32 buf_addr;	/* Buffer address */
+	u32 buf_addr_msb;
+	u32 reserved1;
+	u32 cntrl;	/* Control */
+	u32 status;	/* Status */
+	u32 sband_stats;
+	u32 app0;
+	u32 app1;	/* TX start << 16 | insert */
+	u32 app2;	/* TX csum seed */
+	u32 app3;
+	u32 app4;
+	u32 sw_id_offset;
+	u32 reserved2;
+	u32 reserved3;
+	u32 reserved4[16];
+};
+
+#endif	/* __XILINX_AXI_MRMAC_H */
-- 
2.17.1



More information about the U-Boot mailing list