[PATCH v3] net: Add NIC controller driver for OcteonTX2

Stefan Roese sr at denx.de
Wed Oct 14 10:13:34 CEST 2020


On 26.08.20 14:37, Stefan Roese wrote:
> From: Suneel Garapati <sgarapati at marvell.com>
> 
> Adds support for Network Interface controllers found on
> OcteonTX2 SoC platforms.
> 
> Signed-off-by: Suneel Garapati <sgarapati at marvell.com>
> Signed-off-by: Stefan Roese <sr at denx.de>
> Cc: Joe Hershberger <joe.hershberger at ni.com>
> ---
> Series-changes: 3
> - Add SoB from Stefan
> - Remove spdx.org line from comment
> - Remove inclusion of common.h header
> - Order header file inclusion
> - Misc minor checkpatch fixes
> 
> Series-changes: 1
> - Change patch subject
> - Rebased on latest TOT
> - Removed inclusion of common.h

Applied to u-boot-marvell/master

Thanks,
Stefan

>   drivers/net/Kconfig                |   17 +
>   drivers/net/Makefile               |    2 +
>   drivers/net/octeontx2/Makefile     |    8 +
>   drivers/net/octeontx2/cgx.c        |  296 ++++++++
>   drivers/net/octeontx2/cgx.h        |  105 +++
>   drivers/net/octeontx2/cgx_intf.c   |  715 ++++++++++++++++++
>   drivers/net/octeontx2/cgx_intf.h   |  448 +++++++++++
>   drivers/net/octeontx2/lmt.h        |   49 ++
>   drivers/net/octeontx2/nix.c        |  831 +++++++++++++++++++++
>   drivers/net/octeontx2/nix.h        |  353 +++++++++
>   drivers/net/octeontx2/nix_af.c     | 1102 ++++++++++++++++++++++++++++
>   drivers/net/octeontx2/npc.h        |   90 +++
>   drivers/net/octeontx2/rvu.h        |  119 +++
>   drivers/net/octeontx2/rvu_af.c     |  171 +++++
>   drivers/net/octeontx2/rvu_common.c |   71 ++
>   drivers/net/octeontx2/rvu_pf.c     |  116 +++
>   16 files changed, 4493 insertions(+)
>   create mode 100644 drivers/net/octeontx2/Makefile
>   create mode 100644 drivers/net/octeontx2/cgx.c
>   create mode 100644 drivers/net/octeontx2/cgx.h
>   create mode 100644 drivers/net/octeontx2/cgx_intf.c
>   create mode 100644 drivers/net/octeontx2/cgx_intf.h
>   create mode 100644 drivers/net/octeontx2/lmt.h
>   create mode 100644 drivers/net/octeontx2/nix.c
>   create mode 100644 drivers/net/octeontx2/nix.h
>   create mode 100644 drivers/net/octeontx2/nix_af.c
>   create mode 100644 drivers/net/octeontx2/npc.h
>   create mode 100644 drivers/net/octeontx2/rvu.h
>   create mode 100644 drivers/net/octeontx2/rvu_af.c
>   create mode 100644 drivers/net/octeontx2/rvu_common.c
>   create mode 100644 drivers/net/octeontx2/rvu_pf.c
> 
> diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
> index 26ea53d346..6e758f5581 100644
> --- a/drivers/net/Kconfig
> +++ b/drivers/net/Kconfig
> @@ -414,6 +414,15 @@ config NET_OCTEONTX
>   	help
>   	  You must select Y to enable network device support for
>   	  OcteonTX SoCs. If unsure, say n
> +
> +config NET_OCTEONTX2
> +	bool "OcteonTX2 Ethernet support"
> +	depends on ARCH_OCTEONTX2
> +	select OCTEONTX2_CGX_INTF
> +	help
> +	  You must select Y to enable network device support for
> +	  OcteonTX2 SoCs. If unsure, say n
> +
>   config OCTEONTX_SMI
>   	bool "OcteonTX SMI Device support"
>   	depends on ARCH_OCTEONTX || ARCH_OCTEONTX2
> @@ -421,6 +430,14 @@ config OCTEONTX_SMI
>   	  You must select Y to enable SMI controller support for
>   	  OcteonTX or OcteonTX2 SoCs. If unsure, say n
>   
> +config OCTEONTX2_CGX_INTF
> +	bool "OcteonTX2 CGX ATF interface support"
> +	depends on ARCH_OCTEONTX2
> +	default y if ARCH_OCTEONTX2
> +	help
> +	  You must select Y to enable CGX ATF interface support for
> +	  OcteonTX2 SoCs. If unsure, say n
> +
>   config PCH_GBE
>   	bool "Intel Platform Controller Hub EG20T GMAC driver"
>   	depends on DM_ETH && DM_PCI
> diff --git a/drivers/net/Makefile b/drivers/net/Makefile
> index bee9680f76..c07b5ad698 100644
> --- a/drivers/net/Makefile
> +++ b/drivers/net/Makefile
> @@ -66,7 +66,9 @@ obj-$(CONFIG_SMC91111) += smc91111.o
>   obj-$(CONFIG_SMC911X) += smc911x.o
>   obj-$(CONFIG_TSEC_ENET) += tsec.o fsl_mdio.o
>   obj-$(CONFIG_NET_OCTEONTX) += octeontx/
> +obj-$(CONFIG_NET_OCTEONTX2) += octeontx2/
>   obj-$(CONFIG_OCTEONTX_SMI) += octeontx/smi.o
> +obj-$(CONFIG_OCTEONTX2_CGX_INTF) += octeontx2/cgx_intf.o
>   obj-$(CONFIG_FMAN_ENET) += fsl_mdio.o
>   obj-$(CONFIG_ULI526X) += uli526x.o
>   obj-$(CONFIG_VSC7385_ENET) += vsc7385.o
> diff --git a/drivers/net/octeontx2/Makefile b/drivers/net/octeontx2/Makefile
> new file mode 100644
> index 0000000000..c9300727ae
> --- /dev/null
> +++ b/drivers/net/octeontx2/Makefile
> @@ -0,0 +1,8 @@
> +# SPDX-License-Identifier:    GPL-2.0
> +#
> +# Copyright (C) 2018 Marvell International Ltd.
> +#
> +
> +obj-$(CONFIG_NET_OCTEONTX2) += cgx.o nix_af.o nix.o rvu_pf.o \
> +				rvu_af.o rvu_common.o
> +
> diff --git a/drivers/net/octeontx2/cgx.c b/drivers/net/octeontx2/cgx.c
> new file mode 100644
> index 0000000000..ff2ebc25ce
> --- /dev/null
> +++ b/drivers/net/octeontx2/cgx.c
> @@ -0,0 +1,296 @@
> +// SPDX-License-Identifier:    GPL-2.0
> +/*
> + * Copyright (C) 2018 Marvell International Ltd.
> + */
> +
> +#include <dm.h>
> +#include <errno.h>
> +#include <malloc.h>
> +#include <misc.h>
> +#include <net.h>
> +#include <pci_ids.h>
> +#include <linux/list.h>
> +#include <asm/arch/board.h>
> +#include <asm/arch/csrs/csrs-cgx.h>
> +#include <asm/io.h>
> +
> +#include "cgx.h"
> +
> +char lmac_type_to_str[][8] = {
> +	"SGMII",
> +	"XAUI",
> +	"RXAUI",
> +	"10G_R",
> +	"40G_R",
> +	"RGMII",
> +	"QSGMII",
> +	"25G_R",
> +	"50G_R",
> +	"100G_R",
> +	"USXGMII",
> +};
> +
> +char lmac_speed_to_str[][8] = {
> +	"0",
> +	"10M",
> +	"100M",
> +	"1G",
> +	"2.5G",
> +	"5G",
> +	"10G",
> +	"20G",
> +	"25G",
> +	"40G",
> +	"50G",
> +	"80G",
> +	"100G",
> +};
> +
> +/**
> + * Given an LMAC/PF instance number, return the lmac
> + * Per design, each PF has only one LMAC mapped.
> + *
> + * @param instance	instance to find
> + *
> + * @return	pointer to lmac data structure or NULL if not found
> + */
> +struct lmac *nix_get_cgx_lmac(int lmac_instance)
> +{
> +	struct cgx *cgx;
> +	struct udevice *dev;
> +	int i, idx, err;
> +
> +	for (i = 0; i < CGX_PER_NODE; i++) {
> +		err = dm_pci_find_device(PCI_VENDOR_ID_CAVIUM,
> +					 PCI_DEVICE_ID_OCTEONTX2_CGX, i,
> +					 &dev);
> +		if (err)
> +			continue;
> +
> +		cgx = dev_get_priv(dev);
> +		debug("%s udev %p cgx %p instance %d\n", __func__, dev, cgx,
> +		      lmac_instance);
> +		for (idx = 0; idx < cgx->lmac_count; idx++) {
> +			if (cgx->lmac[idx]->instance == lmac_instance)
> +				return cgx->lmac[idx];
> +		}
> +	}
> +	return NULL;
> +}
> +
> +void cgx_lmac_mac_filter_clear(struct lmac *lmac)
> +{
> +	union cgxx_cmrx_rx_dmac_ctl0 dmac_ctl0;
> +	union cgxx_cmr_rx_dmacx_cam0 dmac_cam0;
> +	void *reg_addr;
> +
> +	dmac_cam0.u = 0x0;
> +	reg_addr = lmac->cgx->reg_base +
> +			CGXX_CMR_RX_DMACX_CAM0(lmac->lmac_id * 8);
> +	writeq(dmac_cam0.u, reg_addr);
> +	debug("%s: reg %p dmac_cam0 %llx\n", __func__, reg_addr, dmac_cam0.u);
> +
> +	dmac_ctl0.u = 0x0;
> +	dmac_ctl0.s.bcst_accept = 1;
> +	dmac_ctl0.s.mcst_mode = 1;
> +	dmac_ctl0.s.cam_accept = 0;
> +	reg_addr = lmac->cgx->reg_base +
> +			CGXX_CMRX_RX_DMAC_CTL0(lmac->lmac_id);
> +	writeq(dmac_ctl0.u, reg_addr);
> +	debug("%s: reg %p dmac_ctl0 %llx\n", __func__, reg_addr, dmac_ctl0.u);
> +}
> +
> +void cgx_lmac_mac_filter_setup(struct lmac *lmac)
> +{
> +	union cgxx_cmrx_rx_dmac_ctl0 dmac_ctl0;
> +	union cgxx_cmr_rx_dmacx_cam0 dmac_cam0;
> +	u64 mac, tmp;
> +	void *reg_addr;
> +
> +	memcpy((void *)&tmp, lmac->mac_addr, 6);
> +	debug("%s: tmp %llx\n", __func__, tmp);
> +	debug("%s: swab tmp %llx\n", __func__, swab64(tmp));
> +	mac = swab64(tmp) >> 16;
> +	debug("%s: mac %llx\n", __func__, mac);
> +	dmac_cam0.u = 0x0;
> +	dmac_cam0.s.id = lmac->lmac_id;
> +	dmac_cam0.s.adr = mac;
> +	dmac_cam0.s.en = 1;
> +	reg_addr = lmac->cgx->reg_base +
> +			CGXX_CMR_RX_DMACX_CAM0(lmac->lmac_id * 8);
> +	writeq(dmac_cam0.u, reg_addr);
> +	debug("%s: reg %p dmac_cam0 %llx\n", __func__, reg_addr, dmac_cam0.u);
> +	dmac_ctl0.u = 0x0;
> +	dmac_ctl0.s.bcst_accept = 1;
> +	dmac_ctl0.s.mcst_mode = 0;
> +	dmac_ctl0.s.cam_accept = 1;
> +	reg_addr = lmac->cgx->reg_base +
> +			CGXX_CMRX_RX_DMAC_CTL0(lmac->lmac_id);
> +	writeq(dmac_ctl0.u, reg_addr);
> +	debug("%s: reg %p dmac_ctl0 %llx\n", __func__, reg_addr, dmac_ctl0.u);
> +}
> +
> +int cgx_lmac_set_pkind(struct lmac *lmac, u8 lmac_id, int pkind)
> +{
> +	cgx_write(lmac->cgx, lmac_id, CGXX_CMRX_RX_ID_MAP(0),
> +		  (pkind & 0x3f));
> +	return 0;
> +}
> +
> +int cgx_lmac_link_status(struct lmac *lmac, int lmac_id, u64 *status)
> +{
> +	int ret = 0;
> +
> +	ret = cgx_intf_get_link_sts(lmac->cgx->cgx_id, lmac_id, status);
> +	if (ret) {
> +		debug("%s request failed for cgx%d lmac%d\n",
> +		      __func__, lmac->cgx->cgx_id, lmac->lmac_id);
> +		ret = -1;
> +	}
> +	return ret;
> +}
> +
> +int cgx_lmac_rx_tx_enable(struct lmac *lmac, int lmac_id, bool enable)
> +{
> +	struct cgx *cgx = lmac->cgx;
> +	union cgxx_cmrx_config cmrx_config;
> +
> +	if (!cgx || lmac_id >= cgx->lmac_count)
> +		return -ENODEV;
> +
> +	cmrx_config.u = cgx_read(cgx, lmac_id, CGXX_CMRX_CONFIG(0));
> +	cmrx_config.s.data_pkt_rx_en =
> +	cmrx_config.s.data_pkt_tx_en = enable ? 1 : 0;
> +	cgx_write(cgx, lmac_id, CGXX_CMRX_CONFIG(0), cmrx_config.u);
> +	return 0;
> +}
> +
> +int cgx_lmac_link_enable(struct lmac *lmac, int lmac_id, bool enable,
> +			 u64 *status)
> +{
> +	int ret = 0;
> +
> +	ret = cgx_intf_link_up_dwn(lmac->cgx->cgx_id, lmac_id, enable,
> +				   status);
> +	if (ret) {
> +		debug("%s request failed for cgx%d lmac%d\n",
> +		      __func__, lmac->cgx->cgx_id, lmac->lmac_id);
> +		ret = -1;
> +	}
> +	return ret;
> +}
> +
> +int cgx_lmac_internal_loopback(struct lmac *lmac, int lmac_id, bool enable)
> +{
> +	struct cgx *cgx = lmac->cgx;
> +	union cgxx_cmrx_config cmrx_cfg;
> +	union cgxx_gmp_pcs_mrx_control mrx_control;
> +	union cgxx_spux_control1 spux_control1;
> +	enum lmac_type lmac_type;
> +
> +	if (!cgx || lmac_id >= cgx->lmac_count)
> +		return -ENODEV;
> +
> +	cmrx_cfg.u = cgx_read(cgx, lmac_id, CGXX_CMRX_CONFIG(0));
> +	lmac_type = cmrx_cfg.s.lmac_type;
> +	if (lmac_type == LMAC_MODE_SGMII || lmac_type == LMAC_MODE_QSGMII) {
> +		mrx_control.u = cgx_read(cgx, lmac_id,
> +					 CGXX_GMP_PCS_MRX_CONTROL(0));
> +		mrx_control.s.loopbck1 = enable ? 1 : 0;
> +		cgx_write(cgx, lmac_id, CGXX_GMP_PCS_MRX_CONTROL(0),
> +			  mrx_control.u);
> +	} else {
> +		spux_control1.u = cgx_read(cgx, lmac_id,
> +					   CGXX_SPUX_CONTROL1(0));
> +		spux_control1.s.loopbck = enable ? 1 : 0;
> +		cgx_write(cgx, lmac_id, CGXX_SPUX_CONTROL1(0),
> +			  spux_control1.u);
> +	}
> +	return 0;
> +}
> +
> +static int cgx_lmac_init(struct cgx *cgx)
> +{
> +	struct lmac *lmac;
> +	union cgxx_cmrx_config cmrx_cfg;
> +	static int instance = 1;
> +	int i;
> +
> +	cgx->lmac_count = cgx_read(cgx, 0, CGXX_CMR_RX_LMACS());
> +	debug("%s: Found %d lmacs for cgx %d@%p\n", __func__, cgx->lmac_count,
> +	      cgx->cgx_id, cgx->reg_base);
> +
> +	for (i = 0; i < cgx->lmac_count; i++) {
> +		lmac = calloc(1, sizeof(*lmac));
> +		if (!lmac)
> +			return -ENOMEM;
> +		lmac->instance = instance++;
> +		snprintf(lmac->name, sizeof(lmac->name), "cgx_fwi_%d_%d",
> +			 cgx->cgx_id, i);
> +		/* Get LMAC type */
> +		cmrx_cfg.u = cgx_read(cgx, i, CGXX_CMRX_CONFIG(0));
> +		lmac->lmac_type = cmrx_cfg.s.lmac_type;
> +
> +		lmac->lmac_id = i;
> +		lmac->cgx = cgx;
> +		cgx->lmac[i] = lmac;
> +		debug("%s: map id %d to lmac %p (%s), type:%d instance %d\n",
> +		      __func__, i, lmac, lmac->name, lmac->lmac_type,
> +		      lmac->instance);
> +		lmac->init_pend = 1;
> +		printf("CGX%d LMAC%d [%s]\n", lmac->cgx->cgx_id,
> +		       lmac->lmac_id, lmac_type_to_str[lmac->lmac_type]);
> +		octeontx2_board_get_mac_addr((lmac->instance - 1),
> +					     lmac->mac_addr);
> +		debug("%s: MAC %pM\n", __func__, lmac->mac_addr);
> +		cgx_lmac_mac_filter_setup(lmac);
> +	}
> +	return 0;
> +}
> +
> +int cgx_probe(struct udevice *dev)
> +{
> +	struct cgx *cgx = dev_get_priv(dev);
> +	int err;
> +
> +	cgx->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0,
> +				       PCI_REGION_MEM);
> +	cgx->dev = dev;
> +	cgx->cgx_id = ((u64)(cgx->reg_base) >> 24) & 0x7;
> +
> +	debug("%s CGX BAR %p, id: %d\n", __func__, cgx->reg_base,
> +	      cgx->cgx_id);
> +	debug("%s CGX %p, udev: %p\n", __func__, cgx, dev);
> +
> +	err = cgx_lmac_init(cgx);
> +
> +	return err;
> +}
> +
> +int cgx_remove(struct udevice *dev)
> +{
> +	struct cgx *cgx = dev_get_priv(dev);
> +	int i;
> +
> +	debug("%s: cgx remove reg_base %p cgx_id %d",
> +	      __func__, cgx->reg_base, cgx->cgx_id);
> +	for (i = 0; i < cgx->lmac_count; i++)
> +		cgx_lmac_mac_filter_clear(cgx->lmac[i]);
> +
> +	return 0;
> +}
> +
> +U_BOOT_DRIVER(cgx) = {
> +	.name	= "cgx",
> +	.id	= UCLASS_MISC,
> +	.probe	= cgx_probe,
> +	.remove	= cgx_remove,
> +	.priv_auto_alloc_size = sizeof(struct cgx),
> +};
> +
> +static struct pci_device_id cgx_supported[] = {
> +	{PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_CGX) },
> +	{}
> +};
> +
> +U_BOOT_PCI_DEVICE(cgx, cgx_supported);
> diff --git a/drivers/net/octeontx2/cgx.h b/drivers/net/octeontx2/cgx.h
> new file mode 100644
> index 0000000000..f287692712
> --- /dev/null
> +++ b/drivers/net/octeontx2/cgx.h
> @@ -0,0 +1,105 @@
> +/* SPDX-License-Identifier:    GPL-2.0
> + *
> + * Copyright (C) 2018 Marvell International Ltd.
> + */
> +
> +#ifndef __CGX_H__
> +#define __CGX_H__
> +
> +#include "cgx_intf.h"
> +
> +#define PCI_DEVICE_ID_OCTEONTX2_CGX	0xA059
> +
> +#define MAX_LMAC_PER_CGX		4
> +#define CGX_PER_NODE			3
> +
> +enum lmac_type {
> +	LMAC_MODE_SGMII		= 0,
> +	LMAC_MODE_XAUI		= 1,
> +	LMAC_MODE_RXAUI		= 2,
> +	LMAC_MODE_10G_R		= 3,
> +	LMAC_MODE_40G_R		= 4,
> +	LMAC_MODE_QSGMII	= 6,
> +	LMAC_MODE_25G_R		= 7,
> +	LMAC_MODE_50G_R		= 8,
> +	LMAC_MODE_100G_R	= 9,
> +	LMAC_MODE_USXGMII	= 10,
> +};
> +
> +extern char lmac_type_to_str[][8];
> +
> +extern char lmac_speed_to_str[][8];
> +
> +struct lmac_priv {
> +	u8 enable:1;
> +	u8 full_duplex:1;
> +	u8 speed:4;
> +	u8 mode:1;
> +	u8 rsvd:1;
> +	u8 mac_addr[6];
> +};
> +
> +struct cgx;
> +struct nix;
> +struct nix_af;
> +
> +struct lmac {
> +	struct cgx	*cgx;
> +	struct nix	*nix;
> +	char		name[16];
> +	enum lmac_type	lmac_type;
> +	bool		init_pend;
> +	u8		instance;
> +	u8		lmac_id;
> +	u8		pknd;
> +	u8		link_num;
> +	u32		chan_num;
> +	u8		mac_addr[6];
> +};
> +
> +struct cgx {
> +	struct nix_af		*nix_af;
> +	void __iomem		*reg_base;
> +	struct udevice		*dev;
> +	struct lmac		*lmac[MAX_LMAC_PER_CGX];
> +	u8			cgx_id;
> +	u8			lmac_count;
> +};
> +
> +static inline void cgx_write(struct cgx *cgx, u8 lmac, u64 offset, u64 val)
> +{
> +	writeq(val, cgx->reg_base + CMR_SHIFT(lmac) + offset);
> +}
> +
> +static inline u64 cgx_read(struct cgx *cgx, u8 lmac, u64 offset)
> +{
> +	return readq(cgx->reg_base + CMR_SHIFT(lmac) + offset);
> +}
> +
> +/**
> + * Given an LMAC/PF instance number, return the lmac
> + * Per design, each PF has only one LMAC mapped.
> + *
> + * @param instance	instance to find
> + *
> + * @return	pointer to lmac data structure or NULL if not found
> + */
> +struct lmac *nix_get_cgx_lmac(int lmac_instance);
> +
> +int cgx_lmac_set_pkind(struct lmac *lmac, u8 lmac_id, int pkind);
> +int cgx_lmac_internal_loopback(struct lmac *lmac, int lmac_id, bool enable);
> +int cgx_lmac_rx_tx_enable(struct lmac *lmac, int lmac_id, bool enable);
> +int cgx_lmac_link_enable(struct lmac *lmac, int lmac_id, bool enable,
> +			 u64 *status);
> +int cgx_lmac_link_status(struct lmac *lmac, int lmac_id, u64 *status);
> +void cgx_lmac_mac_filter_setup(struct lmac *lmac);
> +
> +int cgx_intf_get_link_sts(u8 cgx, u8 lmac, u64 *lnk_sts);
> +int cgx_intf_link_up_dwn(u8 cgx, u8 lmac, u8 up_dwn, u64 *lnk_sts);
> +int cgx_intf_get_mac_addr(u8 cgx, u8 lmac, u8 *mac);
> +int cgx_intf_set_macaddr(struct udevice *dev);
> +int cgx_intf_prbs(u8 qlm, u8 mode, u32 time, u8 lane);
> +int cgx_intf_display_eye(u8 qlm, u8 lane);
> +int cgx_intf_display_serdes(u8 qlm, u8 lane);
> +
> +#endif /* __CGX_H__ */
> diff --git a/drivers/net/octeontx2/cgx_intf.c b/drivers/net/octeontx2/cgx_intf.c
> new file mode 100644
> index 0000000000..37d9a2bb73
> --- /dev/null
> +++ b/drivers/net/octeontx2/cgx_intf.c
> @@ -0,0 +1,715 @@
> +// SPDX-License-Identifier:    GPL-2.0
> +/*
> + * Copyright (C) 2018 Marvell International Ltd.
> + */
> +
> +#include <dm.h>
> +#include <errno.h>
> +#include <malloc.h>
> +#include <misc.h>
> +#include <net.h>
> +
> +#include <linux/bitops.h>
> +#include <linux/delay.h>
> +#include <linux/list.h>
> +
> +#include <asm/arch/board.h>
> +#include <asm/io.h>
> +
> +#include "cgx_intf.h"
> +#include "cgx.h"
> +#include "nix.h"
> +
> +static u64 cgx_rd_scrx(u8 cgx, u8 lmac, u8 index)
> +{
> +	u64 addr;
> +
> +	addr = (index == 1) ? CGX_CMR_SCRATCH1 : CGX_CMR_SCRATCH0;
> +	addr += CGX_SHIFT(cgx) + CMR_SHIFT(lmac);
> +	return readq(addr);
> +}
> +
> +static void cgx_wr_scrx(u8 cgx, u8 lmac, u8 index, u64 val)
> +{
> +	u64 addr;
> +
> +	addr = (index == 1) ? CGX_CMR_SCRATCH1 : CGX_CMR_SCRATCH0;
> +	addr += CGX_SHIFT(cgx) + CMR_SHIFT(lmac);
> +	writeq(val, addr);
> +}
> +
> +static u64 cgx_rd_scr0(u8 cgx, u8 lmac)
> +{
> +	return cgx_rd_scrx(cgx, lmac, 0);
> +}
> +
> +static u64 cgx_rd_scr1(u8 cgx, u8 lmac)
> +{
> +	return cgx_rd_scrx(cgx, lmac, 1);
> +}
> +
> +static void cgx_wr_scr0(u8 cgx, u8 lmac, u64 val)
> +{
> +	return cgx_wr_scrx(cgx, lmac, 0, val);
> +}
> +
> +static void cgx_wr_scr1(u8 cgx, u8 lmac, u64 val)
> +{
> +	return cgx_wr_scrx(cgx, lmac, 1, val);
> +}
> +
> +static inline void set_ownership(u8 cgx, u8 lmac, u8 val)
> +{
> +	union cgx_scratchx1 scr1;
> +
> +	scr1.u = cgx_rd_scr1(cgx, lmac);
> +	scr1.s.own_status = val;
> +	cgx_wr_scr1(cgx, lmac, scr1.u);
> +}
> +
> +static int wait_for_ownership(u8 cgx, u8 lmac)
> +{
> +	union cgx_scratchx1 scr1;
> +	union cgx_scratchx0 scr0;
> +	u64 cmrx_int;
> +	int timeout = 5000;
> +
> +	do {
> +		scr1.u = cgx_rd_scr1(cgx, lmac);
> +		scr0.u = cgx_rd_scr0(cgx, lmac);
> +		/* clear async events if any */
> +		if (scr0.s.evt_sts.evt_type == CGX_EVT_ASYNC &&
> +		    scr0.s.evt_sts.ack) {
> +			/* clear interrupt */
> +			cmrx_int = readq(CGX_CMR_INT +
> +					 CGX_SHIFT(cgx) + CMR_SHIFT(lmac));
> +			cmrx_int |= 0x2; // Overflw bit
> +			writeq(cmrx_int, CGX_CMR_INT +
> +					 CGX_SHIFT(cgx) + CMR_SHIFT(lmac));
> +
> +			/* clear ack */
> +			scr0.s.evt_sts.ack = 0;
> +			cgx_wr_scr0(cgx, lmac, scr0.u);
> +		}
> +
> +		if (timeout-- < 0) {
> +			debug("timeout waiting for ownership\n");
> +			return -ETIMEDOUT;
> +		}
> +		mdelay(1);
> +	} while ((scr1.s.own_status == CGX_OWN_FIRMWARE) &&
> +		  scr0.s.evt_sts.ack);
> +
> +	return 0;
> +}
> +
> +int cgx_intf_req(u8 cgx, u8 lmac, union cgx_cmd_s cmd_args, u64 *rsp,
> +		 int use_cmd_id_only)
> +{
> +	union cgx_scratchx1 scr1;
> +	union cgx_scratchx0 scr0;
> +	u64 cmrx_int;
> +	int timeout = 500;
> +	int err = 0;
> +	u8 cmd = cmd_args.cmd.id;
> +
> +	if (wait_for_ownership(cgx, lmac)) {
> +		err = -ETIMEDOUT;
> +		goto error;
> +	}
> +
> +	/* send command */
> +	scr1.u = cgx_rd_scr1(cgx, lmac);
> +
> +	if (use_cmd_id_only) {
> +		scr1.s.cmd.id = cmd;
> +	} else {
> +		cmd_args.own_status = scr1.s.own_status;
> +		scr1.s = cmd_args;
> +	}
> +	cgx_wr_scr1(cgx, lmac, scr1.u);
> +
> +	set_ownership(cgx, lmac, CGX_OWN_FIRMWARE);
> +
> +	/* wait for response and ownership */
> +	do {
> +		scr0.u = cgx_rd_scr0(cgx, lmac);
> +		scr1.u = cgx_rd_scr1(cgx, lmac);
> +		mdelay(10);
> +	} while (timeout-- && (!scr0.s.evt_sts.ack) &&
> +		 (scr1.s.own_status == CGX_OWN_FIRMWARE));
> +	if (timeout < 0) {
> +		debug("%s timeout waiting for ack\n", __func__);
> +		err = -ETIMEDOUT;
> +		goto error;
> +	}
> +
> +	if (cmd == CGX_CMD_INTF_SHUTDOWN)
> +		goto error;
> +
> +	if (scr0.s.evt_sts.evt_type != CGX_EVT_CMD_RESP) {
> +		debug("%s received async event instead of cmd resp event\n",
> +		      __func__);
> +		err = -1;
> +		goto error;
> +	}
> +	if (scr0.s.evt_sts.id != cmd) {
> +		debug("%s received resp for cmd %d expected cmd %d\n",
> +		      __func__, scr0.s.evt_sts.id, cmd);
> +		err = -1;
> +		goto error;
> +	}
> +	if (scr0.s.evt_sts.stat != CGX_STAT_SUCCESS) {
> +		debug("%s cmd%d failed on cgx%u lmac%u with errcode %d\n",
> +		      __func__, cmd, cgx, lmac, scr0.s.link_sts.err_type);
> +		err = -1;
> +	}
> +
> +error:
> +	/* clear interrupt */
> +	cmrx_int = readq(CGX_CMR_INT + CGX_SHIFT(cgx) + CMR_SHIFT(lmac));
> +	cmrx_int |= 0x2; // Overflw bit
> +	writeq(cmrx_int, CGX_CMR_INT + CGX_SHIFT(cgx) + CMR_SHIFT(lmac));
> +
> +	/* clear ownership and ack */
> +	scr0.s.evt_sts.ack = 0;
> +	cgx_wr_scr0(cgx, lmac, scr0.u);
> +
> +	*rsp = err ? 0 : scr0.u;
> +
> +	return err;
> +}
> +
> +int cgx_intf_get_mac_addr(u8 cgx, u8 lmac, u8 *mac)
> +{
> +	union cgx_scratchx0 scr0;
> +	int ret;
> +	union cgx_cmd_s cmd;
> +
> +	cmd.cmd.id = CGX_CMD_GET_MAC_ADDR;
> +
> +	ret = cgx_intf_req(cgx, lmac, cmd, &scr0.u, 1);
> +	if (ret)
> +		return -1;
> +
> +	scr0.u >>= 9;
> +	memcpy(mac, &scr0.u, 6);
> +
> +	return 0;
> +}
> +
> +int cgx_intf_get_ver(u8 cgx, u8 lmac, u8 *ver)
> +{
> +	union cgx_scratchx0 scr0;
> +	int ret;
> +	union cgx_cmd_s cmd;
> +
> +	cmd.cmd.id = CGX_CMD_GET_FW_VER;
> +
> +	ret = cgx_intf_req(cgx, lmac, cmd, &scr0.u, 1);
> +	if (ret)
> +		return -1;
> +
> +	scr0.u >>= 9;
> +	*ver = scr0.u & 0xFFFF;
> +
> +	return 0;
> +}
> +
> +int cgx_intf_get_link_sts(u8 cgx, u8 lmac, u64 *lnk_sts)
> +{
> +	union cgx_scratchx0 scr0;
> +	int ret;
> +	union cgx_cmd_s cmd;
> +
> +	cmd.cmd.id = CGX_CMD_GET_LINK_STS;
> +
> +	ret = cgx_intf_req(cgx, lmac, cmd, &scr0.u, 1);
> +	if (ret)
> +		return -1;
> +
> +	scr0.u >>= 9;
> +	/* pass the same format as cgx_lnk_sts_s
> +	 * err_type:10, speed:4, full_duplex:1, link_up:1
> +	 */
> +	*lnk_sts = scr0.u & 0xFFFF;
> +	return 0;
> +}
> +
> +int cgx_intf_link_up_dwn(u8 cgx, u8 lmac, u8 up_dwn, u64 *lnk_sts)
> +{
> +	union cgx_scratchx0 scr0;
> +	int ret;
> +	union cgx_cmd_s cmd;
> +
> +	cmd.cmd.id = up_dwn ? CGX_CMD_LINK_BRING_UP : CGX_CMD_LINK_BRING_DOWN;
> +
> +	ret = cgx_intf_req(cgx, lmac, cmd, &scr0.u, 1);
> +	if (ret)
> +		return -1;
> +
> +	scr0.u >>= 9;
> +	/* pass the same format as cgx_lnk_sts_s
> +	 * err_type:10, speed:4, full_duplex:1, link_up:1
> +	 */
> +	*lnk_sts = scr0.u & 0xFFFF;
> +	return 0;
> +}
> +
> +void cgx_intf_shutdown(void)
> +{
> +	union cgx_scratchx0 scr0;
> +	union cgx_cmd_s cmd;
> +
> +	cmd.cmd.id = CGX_CMD_INTF_SHUTDOWN;
> +
> +	cgx_intf_req(0, 0, cmd, &scr0.u, 1);
> +}
> +
> +int cgx_intf_prbs(u8 qlm, u8 mode, u32 time, u8 lane)
> +{
> +	union cgx_scratchx0 scr0;
> +	int ret;
> +	union cgx_cmd_s cmd;
> +
> +	cmd.cmd.id = CGX_CMD_PRBS;
> +
> +	cmd.prbs_args.qlm = qlm;
> +	cmd.prbs_args.mode = mode;
> +	cmd.prbs_args.time = time;
> +	cmd.prbs_args.lane = lane;
> +
> +	ret = cgx_intf_req(0, 0, cmd, &scr0.u, 0);
> +	if (ret)
> +		return -1;
> +
> +	return 0;
> +}
> +
> +enum cgx_mode {
> +	MODE_10G_C2C,
> +	MODE_10G_C2M,
> +	MODE_10G_KR,
> +	MODE_25G_C2C,
> +	MODE_25G_2_C2C,
> +	MODE_50G_C2C,
> +	MODE_50G_4_C2C
> +};
> +
> +static char intf_speed_to_str[][8] = {
> +	"10M",
> +	"100M",
> +	"1G",
> +	"2.5G",
> +	"5G",
> +	"10G",
> +	"20G",
> +	"25G",
> +	"40G",
> +	"50G",
> +	"80G",
> +	"100G",
> +};
> +
> +static void mode_to_args(int mode, struct cgx_mode_change_args *args)
> +{
> +	args->an = 0;
> +	args->duplex = 0;
> +	args->port = 0;
> +
> +	switch (mode) {
> +	case MODE_10G_C2C:
> +		args->speed = CGX_LINK_10G;
> +		args->mode = BIT_ULL(CGX_MODE_10G_C2C_BIT);
> +		break;
> +	case MODE_10G_C2M:
> +		args->speed = CGX_LINK_10G;
> +		args->mode = BIT_ULL(CGX_MODE_10G_C2M_BIT);
> +		break;
> +	case MODE_10G_KR:
> +		args->speed = CGX_LINK_10G;
> +		args->mode = BIT_ULL(CGX_MODE_10G_KR_BIT);
> +		args->an = 1;
> +		break;
> +	case MODE_25G_C2C:
> +		args->speed = CGX_LINK_25G;
> +		args->mode = BIT_ULL(CGX_MODE_25G_C2C_BIT);
> +		break;
> +	case MODE_25G_2_C2C:
> +		args->speed = CGX_LINK_25G;
> +		args->mode = BIT_ULL(CGX_MODE_25G_2_C2C_BIT);
> +		break;
> +	case MODE_50G_C2C:
> +		args->speed = CGX_LINK_50G;
> +		args->mode = BIT_ULL(CGX_MODE_50G_C2C_BIT);
> +		break;
> +	case MODE_50G_4_C2C:
> +		args->speed = CGX_LINK_50G;
> +		args->mode = BIT_ULL(CGX_MODE_50G_4_C2C_BIT);
> +	}
> +}
> +
> +int cgx_intf_set_mode(struct udevice *ethdev, int mode)
> +{
> +	struct rvu_pf *rvu = dev_get_priv(ethdev);
> +	struct nix *nix = rvu->nix;
> +	union cgx_scratchx0 scr0;
> +	int ret;
> +	union cgx_cmd_s cmd;
> +
> +	cmd.cmd.id = CGX_CMD_MODE_CHANGE;
> +
> +	mode_to_args(mode, &cmd.mode_change_args);
> +
> +	ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id,
> +			   cmd, &scr0.u, 0);
> +	if (ret) {
> +		printf("Mode change command failed for %s\n", ethdev->name);
> +		return -1;
> +	}
> +
> +	cmd.cmd.id = CGX_CMD_GET_LINK_STS;
> +	ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id,
> +			   cmd, &scr0.u, 1);
> +	if (ret) {
> +		printf("Get Link Status failed for %s\n", ethdev->name);
> +		return -1;
> +	}
> +
> +	printf("Current Link Status: ");
> +	if (scr0.s.link_sts.speed) {
> +		printf("%s\n", intf_speed_to_str[scr0.s.link_sts.speed]);
> +		switch (scr0.s.link_sts.fec) {
> +		case 0:
> +			printf("FEC_NONE\n");
> +			break;
> +		case 1:
> +			printf("FEC_BASE_R\n");
> +			break;
> +		case 2:
> +			printf("FEC_RS\n");
> +			break;
> +		}
> +		printf("Auto Negotiation %sabled\n",
> +		       scr0.s.link_sts.an ? "En" : "Dis");
> +		printf("%s Duplex\n",
> +		       scr0.s.link_sts.full_duplex ? "Full" : "Half");
> +	} else {
> +		printf("Down\n");
> +	}
> +	return 0;
> +}
> +
> +int cgx_intf_get_mode(struct udevice *ethdev)
> +{
> +	struct rvu_pf *rvu = dev_get_priv(ethdev);
> +	struct nix *nix = rvu->nix;
> +	union cgx_scratchx0 scr0;
> +	int ret;
> +	union cgx_cmd_s cmd;
> +
> +	cmd.cmd.id = CGX_CMD_GET_LINK_STS;
> +	ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id,
> +			   cmd, &scr0.u, 1);
> +	if (ret) {
> +		printf("Get link status failed for %s\n", ethdev->name);
> +		return -1;
> +	}
> +	printf("Current Interface Mode: ");
> +	switch (scr0.s.link_sts.mode) {
> +	case CGX_MODE_10G_C2C_BIT:
> +		printf("10G_C2C\n");
> +		break;
> +	case CGX_MODE_10G_C2M_BIT:
> +		printf("10G_C2M\n");
> +		break;
> +	case CGX_MODE_10G_KR_BIT:
> +		printf("10G_KR\n");
> +		break;
> +	case CGX_MODE_25G_C2C_BIT:
> +		printf("25G_C2C\n");
> +		break;
> +	case CGX_MODE_25G_2_C2C_BIT:
> +		printf("25G_2_C2C\n");
> +		break;
> +	case CGX_MODE_50G_C2C_BIT:
> +		printf("50G_C2C\n");
> +		break;
> +	case CGX_MODE_50G_4_C2C_BIT:
> +		printf("50G_4_C2C\n");
> +		break;
> +	default:
> +		printf("Unknown\n");
> +		break;
> +	}
> +	return 0;
> +}
> +
> +int cgx_intf_get_fec(struct udevice *ethdev)
> +{
> +	struct rvu_pf *rvu = dev_get_priv(ethdev);
> +	struct nix *nix = rvu->nix;
> +	union cgx_scratchx0 scr0;
> +	int ret;
> +	union cgx_cmd_s cmd;
> +
> +	cmd.cmd.id = CGX_CMD_GET_SUPPORTED_FEC;
> +
> +	ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id,
> +			   cmd, &scr0.u, 1);
> +	if (ret) {
> +		printf("Get supported FEC failed for %s\n", ethdev->name);
> +		return -1;
> +	}
> +
> +	printf("Supported FEC type: ");
> +	switch (scr0.s.supported_fec.fec) {
> +	case 0:
> +		printf("FEC_NONE\n");
> +		break;
> +	case 1:
> +		printf("FEC_BASE_R\n");
> +		break;
> +	case 2:
> +		printf("FEC_RS\n");
> +		break;
> +	case 3:
> +		printf("FEC_BASE_R FEC_RS\n");
> +		break;
> +	}
> +
> +	cmd.cmd.id = CGX_CMD_GET_LINK_STS;
> +	ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id,
> +			   cmd, &scr0.u, 1);
> +	if (ret) {
> +		printf("Get active fec failed for %s\n", ethdev->name);
> +		return -1;
> +	}
> +	printf("Active FEC type: ");
> +	switch (scr0.s.link_sts.fec) {
> +	case 0:
> +		printf("FEC_NONE\n");
> +		break;
> +	case 1:
> +		printf("FEC_BASE_R\n");
> +		break;
> +	case 2:
> +		printf("FEC_RS\n");
> +		break;
> +	}
> +	return 0;
> +}
> +
> +int cgx_intf_set_fec(struct udevice *ethdev, int type)
> +{
> +	struct rvu_pf *rvu = dev_get_priv(ethdev);
> +	struct nix *nix = rvu->nix;
> +	union cgx_scratchx0 scr0;
> +	int ret;
> +	union cgx_cmd_s cmd;
> +
> +	cmd.cmd.id = CGX_CMD_SET_FEC;
> +	cmd.fec_args.fec = type;
> +
> +	ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id,
> +			   cmd, &scr0.u, 0);
> +	if (ret) {
> +		printf("Set FEC type %d failed for %s\n", type, ethdev->name);
> +		return -1;
> +	}
> +	return 0;
> +}
> +
> +int cgx_intf_get_phy_mod_type(struct udevice *ethdev)
> +{
> +	struct rvu_pf *rvu = dev_get_priv(ethdev);
> +	struct nix *nix = rvu->nix;
> +	union cgx_scratchx0 scr0;
> +	int ret;
> +	union cgx_cmd_s cmd;
> +
> +	cmd.cmd.id = CGX_CMD_GET_PHY_MOD_TYPE;
> +
> +	ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id,
> +			   cmd, &scr0.u, 1);
> +	if (ret) {
> +		printf("Get PHYMOD type failed for %s\n", ethdev->name);
> +		return -1;
> +	}
> +	printf("Current phy mod type %s\n",
> +	       scr0.s.phy_mod_type.mod ? "PAM4" : "NRZ");
> +	return 0;
> +}
> +
> +int cgx_intf_set_phy_mod_type(struct udevice *ethdev, int type)
> +{
> +	struct rvu_pf *rvu = dev_get_priv(ethdev);
> +	struct nix *nix = rvu->nix;
> +	union cgx_scratchx0 scr0;
> +	int ret;
> +	union cgx_cmd_s cmd;
> +
> +	cmd.cmd.id = CGX_CMD_SET_PHY_MOD_TYPE;
> +	cmd.phy_mod_args.mod = type;
> +
> +	ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id,
> +			   cmd, &scr0.u, 0);
> +	if (ret) {
> +		printf("Set PHYMOD type %d failed for %s\n", type,
> +		       ethdev->name);
> +		return -1;
> +	}
> +
> +	return 0;
> +}
> +
> +int cgx_intf_set_an_lbk(struct udevice *ethdev, int enable)
> +{
> +	struct rvu_pf *rvu = dev_get_priv(ethdev);
> +	struct nix *nix = rvu->nix;
> +	union cgx_scratchx0 scr0;
> +	int ret;
> +	union cgx_cmd_s cmd;
> +
> +	cmd.cmd.id = CGX_CMD_AN_LOOPBACK;
> +	cmd.cmd_args.enable = enable;
> +
> +	ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id,
> +			   cmd, &scr0.u, 0);
> +	if (ret) {
> +		printf("Set AN loopback command failed on %s\n", ethdev->name);
> +		return -1;
> +	}
> +	printf("AN loopback %s for %s\n", enable ? "set" : "clear",
> +	       ethdev->name);
> +
> +	return 0;
> +}
> +
> +int cgx_intf_get_ignore(struct udevice *ethdev, int cgx, int lmac)
> +{
> +	struct rvu_pf *rvu;
> +	struct nix *nix;
> +	union cgx_scratchx0 scr0;
> +	int ret, cgx_id = cgx, lmac_id = lmac;
> +	union cgx_cmd_s cmd;
> +
> +	if (ethdev) {
> +		rvu = dev_get_priv(ethdev);
> +		nix = rvu->nix;
> +		cgx_id = nix->lmac->cgx->cgx_id;
> +		lmac_id = nix->lmac->lmac_id;
> +	}
> +	cmd.cmd.id = CGX_CMD_GET_PERSIST_IGNORE;
> +
> +	ret = cgx_intf_req(cgx_id, lmac_id, cmd, &scr0.u, 1);
> +	if (ret) {
> +		if (ethdev)
> +			printf("Get ignore command failed for %s\n",
> +			       ethdev->name);
> +		else
> +			printf("Get ignore command failed for CGX%d LMAC%d\n",
> +			       cgx_id, lmac_id);
> +		return -1;
> +	}
> +	if (ethdev)
> +		printf("Persist settings %signored for %s\n",
> +		       scr0.s.persist.ignore ? "" : "not ", ethdev->name);
> +	else
> +		printf("Persist settings %signored for CGX%d LMAC%d\n",
> +		       scr0.s.persist.ignore ? "" : "not ", cgx_id, lmac_id);
> +
> +	return 0;
> +}
> +
> +int cgx_intf_set_ignore(struct udevice *ethdev, int cgx, int lmac, int ignore)
> +{
> +	struct rvu_pf *rvu;
> +	struct nix *nix;
> +	union cgx_scratchx0 scr0;
> +	int ret, cgx_id = cgx, lmac_id = lmac;
> +	union cgx_cmd_s cmd;
> +
> +	if (ethdev) {
> +		rvu = dev_get_priv(ethdev);
> +		nix = rvu->nix;
> +		cgx_id = nix->lmac->cgx->cgx_id;
> +		lmac_id = nix->lmac->lmac_id;
> +	}
> +	cmd.cmd.id = CGX_CMD_SET_PERSIST_IGNORE;
> +	cmd.persist_args.ignore = ignore;
> +
> +	ret = cgx_intf_req(cgx_id, lmac_id, cmd, &scr0.u, 0);
> +	if (ret) {
> +		if (ethdev)
> +			printf("Set ignore command failed for %s\n",
> +			       ethdev->name);
> +		else
> +			printf("Set ignore command failed for CGX%d LMAC%d\n",
> +			       cgx_id, lmac_id);
> +		return -1;
> +	}
> +
> +	return 0;
> +}
> +
> +int cgx_intf_set_macaddr(struct udevice *ethdev)
> +{
> +	struct rvu_pf *rvu = dev_get_priv(ethdev);
> +	struct nix *nix = rvu->nix;
> +	union cgx_scratchx0 scr0;
> +	int ret;
> +	union cgx_cmd_s cmd;
> +	u64 mac, tmp;
> +
> +	memcpy((void *)&tmp, nix->lmac->mac_addr, 6);
> +	mac = swab64(tmp) >> 16;
> +	cmd.cmd.id = CGX_CMD_SET_MAC_ADDR;
> +	cmd.mac_args.addr = mac;
> +	cmd.mac_args.pf_id = rvu->pfid;
> +
> +	ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id,
> +			   cmd, &scr0.u, 0);
> +	if (ret) {
> +		printf("Set user mac addr failed for %s\n", ethdev->name);
> +		return -1;
> +	}
> +
> +	return 0;
> +}
> +
> +int cgx_intf_display_eye(u8 qlm, u8 lane)
> +{
> +	union cgx_scratchx0 scr0;
> +	int ret;
> +	union cgx_cmd_s cmd;
> +
> +	cmd.cmd.id = CGX_CMD_DISPLAY_EYE;
> +
> +	cmd.dsp_eye_args.qlm = qlm;
> +	cmd.dsp_eye_args.lane = lane;
> +
> +	ret = cgx_intf_req(0, 0, cmd, &scr0.u, 0);
> +	if (ret)
> +		return -1;
> +
> +	return 0;
> +}
> +
> +int cgx_intf_display_serdes(u8 qlm, u8 lane)
> +{
> +	union cgx_scratchx0 scr0;
> +	int ret;
> +	union cgx_cmd_s cmd;
> +
> +	cmd.cmd.id = CGX_CMD_DISPLAY_SERDES;
> +
> +	cmd.dsp_eye_args.qlm = qlm;
> +	cmd.dsp_eye_args.lane = lane;
> +
> +	ret = cgx_intf_req(0, 0, cmd, &scr0.u, 0);
> +	if (ret)
> +		return -1;
> +
> +	return 0;
> +}
> diff --git a/drivers/net/octeontx2/cgx_intf.h b/drivers/net/octeontx2/cgx_intf.h
> new file mode 100644
> index 0000000000..62a7203ad8
> --- /dev/null
> +++ b/drivers/net/octeontx2/cgx_intf.h
> @@ -0,0 +1,448 @@
> +/* SPDX-License-Identifier:    GPL-2.0
> + *
> + * Copyright (C) 2018 Marvell International Ltd.
> + */
> +
> +#ifndef __CGX_INTF_H__
> +#define __CGX_INTF_H__
> +
> +#define CGX_FIRMWARE_MAJOR_VER		1
> +#define CGX_FIRMWARE_MINOR_VER		0
> +
> +/* Register offsets */
> +#define CGX_CMR_INT		0x87e0e0000040
> +#define CGX_CMR_SCRATCH0	0x87e0e0001050
> +#define CGX_CMR_SCRATCH1	0x87e0e0001058
> +
> +#define CGX_SHIFT(x)		(0x1000000 * ((x) & 0x3))
> +#define CMR_SHIFT(x)		(0x40000 * ((x) & 0x3))
> +
> +/* CGX error types. set for cmd response status as CGX_STAT_FAIL */
> +enum cgx_error_type {
> +	CGX_ERR_NONE,
> +	CGX_ERR_LMAC_NOT_ENABLED,
> +	CGX_ERR_LMAC_MODE_INVALID,
> +	CGX_ERR_REQUEST_ID_INVALID,
> +	CGX_ERR_PREV_ACK_NOT_CLEAR,
> +	CGX_ERR_PHY_LINK_DOWN,
> +	CGX_ERR_PCS_RESET_FAIL,
> +	CGX_ERR_AN_CPT_FAIL,
> +	CGX_ERR_TX_NOT_IDLE,
> +	CGX_ERR_RX_NOT_IDLE,
> +	CGX_ERR_SPUX_BR_BLKLOCK_FAIL,
> +	CGX_ERR_SPUX_RX_ALIGN_FAIL,
> +	CGX_ERR_SPUX_TX_FAULT,
> +	CGX_ERR_SPUX_RX_FAULT,
> +	CGX_ERR_SPUX_RESET_FAIL,
> +	CGX_ERR_SPUX_AN_RESET_FAIL,
> +	CGX_ERR_SPUX_USX_AN_RESET_FAIL,
> +	CGX_ERR_SMUX_RX_LINK_NOT_OK,
> +	CGX_ERR_PCS_LINK_FAIL,
> +	CGX_ERR_TRAINING_FAIL,
> +	CGX_ERR_RX_EQU_FAIL,
> +	CGX_ERR_SPUX_BER_FAIL,
> +	CGX_ERR_SPUX_RSFEC_ALGN_FAIL,
> +	CGX_ERR_SPUX_MARKER_LOCK_FAIL,
> +	CGX_ERR_SET_FEC_INVALID,
> +	CGX_ERR_SET_FEC_FAIL,
> +	CGX_ERR_MODULE_INVALID,
> +	CGX_ERR_MODULE_NOT_PRESENT,
> +	CGX_ERR_SPEED_CHANGE_INVALID,	/* = 28 */
> +	/* FIXME : add more error types when adding support for new modes */
> +};
> +
> +/* LINK speed types */
> +enum cgx_link_speed {
> +	CGX_LINK_NONE,
> +	CGX_LINK_10M,
> +	CGX_LINK_100M,
> +	CGX_LINK_1G,
> +	CGX_LINK_2HG,	/* 2.5 Gbps */
> +	CGX_LINK_5G,
> +	CGX_LINK_10G,
> +	CGX_LINK_20G,
> +	CGX_LINK_25G,
> +	CGX_LINK_40G,
> +	CGX_LINK_50G,
> +	CGX_LINK_80G,
> +	CGX_LINK_100G,
> +	CGX_LINK_MAX,
> +};
> +
> +/* REQUEST ID types. Input to firmware */
> +enum cgx_cmd_id {
> +	CGX_CMD_NONE = 0,
> +	CGX_CMD_GET_FW_VER,
> +	CGX_CMD_GET_MAC_ADDR,
> +	CGX_CMD_SET_MTU,
> +	CGX_CMD_GET_LINK_STS,		/* optional to user */
> +	CGX_CMD_LINK_BRING_UP,		/* = 5 */
> +	CGX_CMD_LINK_BRING_DOWN,
> +	CGX_CMD_INTERNAL_LBK,
> +	CGX_CMD_EXTERNAL_LBK,
> +	CGX_CMD_HIGIG,
> +	CGX_CMD_LINK_STAT_CHANGE,	/* = 10 */
> +	CGX_CMD_MODE_CHANGE,		/* hot plug support */
> +	CGX_CMD_INTF_SHUTDOWN,
> +	CGX_CMD_GET_MKEX_SIZE,
> +	CGX_CMD_GET_MKEX_PROFILE,
> +	CGX_CMD_GET_FWD_BASE,		/* get base address of shared FW data */
> +	CGX_CMD_GET_LINK_MODES,		/* Supported Link Modes */
> +	CGX_CMD_SET_LINK_MODE,
> +	CGX_CMD_GET_SUPPORTED_FEC,
> +	CGX_CMD_SET_FEC,
> +	CGX_CMD_GET_AN,			/* = 20 */
> +	CGX_CMD_SET_AN,
> +	CGX_CMD_GET_ADV_LINK_MODES,
> +	CGX_CMD_GET_ADV_FEC,
> +	CGX_CMD_GET_PHY_MOD_TYPE, /* line-side modulation type: NRZ or PAM4 */
> +	CGX_CMD_SET_PHY_MOD_TYPE,	/* = 25 */
> +	CGX_CMD_PRBS,
> +	CGX_CMD_DISPLAY_EYE,
> +	CGX_CMD_GET_PHY_FEC_STATS,
> +	CGX_CMD_DISPLAY_SERDES,
> +	CGX_CMD_AN_LOOPBACK,	/* = 30 */
> +	CGX_CMD_GET_PERSIST_IGNORE,
> +	CGX_CMD_SET_PERSIST_IGNORE,
> +	CGX_CMD_SET_MAC_ADDR,
> +};
> +
> +/* async event ids */
> +enum cgx_evt_id {
> +	CGX_EVT_NONE,
> +	CGX_EVT_LINK_CHANGE,
> +};
> +
> +/* event types - cause of interrupt */
> +enum cgx_evt_type {
> +	CGX_EVT_ASYNC,
> +	CGX_EVT_CMD_RESP
> +};
> +
> +enum cgx_stat {
> +	CGX_STAT_SUCCESS,
> +	CGX_STAT_FAIL
> +};
> +
> +enum cgx_cmd_own {
> +	/* default ownership with kernel/uefi/u-boot */
> +	CGX_OWN_NON_SECURE_SW,
> +	/* set by kernel/uefi/u-boot after posting a new request to ATF */
> +	CGX_OWN_FIRMWARE,
> +};
> +
> +/* Supported LINK MODE enums
> + * Each link mode is a bit mask of these
> + * enums which are represented as bits
> + */
> +enum cgx_mode_t {
> +	CGX_MODE_SGMII_BIT = 0,
> +	CGX_MODE_1000_BASEX_BIT,
> +	CGX_MODE_QSGMII_BIT,
> +	CGX_MODE_10G_C2C_BIT,
> +	CGX_MODE_10G_C2M_BIT,
> +	CGX_MODE_10G_KR_BIT,
> +	CGX_MODE_20G_C2C_BIT,
> +	CGX_MODE_25G_C2C_BIT,
> +	CGX_MODE_25G_C2M_BIT,
> +	CGX_MODE_25G_2_C2C_BIT,
> +	CGX_MODE_25G_CR_BIT,
> +	CGX_MODE_25G_KR_BIT,
> +	CGX_MODE_40G_C2C_BIT,
> +	CGX_MODE_40G_C2M_BIT,
> +	CGX_MODE_40G_CR4_BIT,
> +	CGX_MODE_40G_KR4_BIT,
> +	CGX_MODE_40GAUI_C2C_BIT,
> +	CGX_MODE_50G_C2C_BIT,
> +	CGX_MODE_50G_C2M_BIT,
> +	CGX_MODE_50G_4_C2C_BIT,
> +	CGX_MODE_50G_CR_BIT,
> +	CGX_MODE_50G_KR_BIT,
> +	CGX_MODE_80GAUI_C2C_BIT,
> +	CGX_MODE_100G_C2C_BIT,
> +	CGX_MODE_100G_C2M_BIT,
> +	CGX_MODE_100G_CR4_BIT,
> +	CGX_MODE_100G_KR4_BIT,
> +	CGX_MODE_MAX_BIT /* = 29 */
> +};
> +
> +/* scratchx(0) CSR used for ATF->non-secure SW communication.
> + * This acts as the status register
> + * Provides details on command ack/status, link status, error details
> + */
> +
> +/* CAUTION : below structures are placed in order based on the bit positions
> + * For any updates/new bitfields, corresponding structures needs to be updated
> + */
> +struct cgx_evt_sts_s {			/* start from bit 0 */
> +	u64 ack:1;
> +	u64 evt_type:1;		/* cgx_evt_type */
> +	u64 stat:1;		/* cgx_stat */
> +	u64 id:6;			/* cgx_evt_id/cgx_cmd_id */
> +	u64 reserved:55;
> +};
> +
> +/* all the below structures are in the same memory location of SCRATCHX(0)
> + * value can be read/written based on command ID
> + */
> +
> +/* Resp to command IDs with command status as CGX_STAT_FAIL
> + * Not applicable for commands :
> + *	CGX_CMD_LINK_BRING_UP/DOWN/CGX_EVT_LINK_CHANGE
> + *	check struct cgx_lnk_sts_s comments
> + */
> +struct cgx_err_sts_s {			/* start from bit 9 */
> +	u64 reserved1:9;
> +	u64 type:10;		/* cgx_error_type */
> +	u64 reserved2:35;
> +};
> +
> +/* Resp to cmd ID as CGX_CMD_GET_FW_VER with cmd status as CGX_STAT_SUCCESS */
> +struct cgx_ver_s {			/* start from bit 9 */
> +	u64 reserved1:9;
> +	u64 major_ver:4;
> +	u64 minor_ver:4;
> +	u64 reserved2:47;
> +};
> +
> +/* Resp to cmd ID as CGX_CMD_GET_MAC_ADDR with cmd status as CGX_STAT_SUCCESS
> + * Returns each byte of MAC address in a separate bit field
> + */
> +struct cgx_mac_addr_s {			/* start from bit 9 */
> +	u64 reserved1:9;
> +	u64 addr_0:8;
> +	u64 addr_1:8;
> +	u64 addr_2:8;
> +	u64 addr_3:8;
> +	u64 addr_4:8;
> +	u64 addr_5:8;
> +	u64 reserved2:7;
> +};
> +
> +/* Resp to cmd ID - CGX_CMD_LINK_BRING_UP/DOWN, event ID CGX_EVT_LINK_CHANGE
> + * status can be either CGX_STAT_FAIL or CGX_STAT_SUCCESS
> + * In case of CGX_STAT_FAIL, it indicates CGX configuration failed when
> + * processing link up/down/change command. Both err_type and current link status
> + * will be updated
> + * In case of CGX_STAT_SUCCESS, err_type will be CGX_ERR_NONE and current
> + * link status will be updated
> + */
> +struct cgx_lnk_sts_s {
> +	u64 reserved1:9;
> +	u64 link_up:1;
> +	u64 full_duplex:1;
> +	u64 speed:4;	/* cgx_link_speed */
> +	u64 err_type:10;
> +	u64 an:1;		/* Current AN state : enabled/disabled */
> +	u64 fec:2;		/* Current FEC type if enabled, if not 0 */
> +	u64 port:8;	/* Share the current port info if required */
> +	u64 mode:8;	/* cgx_mode_t enum integer value */
> +	u64 reserved2:20;
> +};
> +
> +struct sh_fwd_base_s {
> +	u64 reserved1:9;
> +	u64 addr:55;
> +};
> +
> +struct cgx_link_modes_s {
> +	u64 reserved1:9;
> +	u64 modes:55;
> +};
> +
> +/* Resp to cmd ID - CGX_CMD_GET_ADV_FEC/CGX_CMD_GET_SUPPORTED_FEC
> + * fec : 2 bits
> + * typedef enum cgx_fec_type {
> + *     CGX_FEC_NONE,
> + *     CGX_FEC_BASE_R,
> + *     CGX_FEC_RS
> + * } fec_type_t;
> + */
> +struct cgx_fec_types_s {
> +	u64 reserved1:9;
> +	u64 fec:2;
> +	u64 reserved2:53;
> +};
> +
> +/* Resp to cmd ID - CGX_CMD_GET_AN */
> +struct cgx_get_an_s {
> +	u64 reserved1:9;
> +	u64 an:1;
> +	u64 reserved2:54;
> +};
> +
> +/* Resp to cmd ID - CGX_CMD_GET_PHY_MOD_TYPE */
> +struct cgx_get_phy_mod_type_s {
> +	u64 reserved1:9;
> +	u64 mod:1;		/* 0=NRZ, 1=PAM4 */
> +	u64 reserved2:54;
> +};
> +
> +/* Resp to cmd ID - CGX_CMD_GET_PERSIST_IGNORE */
> +struct cgx_get_flash_ignore_s {
> +	uint64_t reserved1:9;
> +	uint64_t ignore:1;
> +	uint64_t reserved2:54;
> +};
> +
> +union cgx_rsp_sts {
> +	/* Fixed, applicable for all commands/events */
> +	struct cgx_evt_sts_s evt_sts;
> +	/* response to CGX_CMD_LINK_BRINGUP/DOWN/LINK_CHANGE */
> +	struct cgx_lnk_sts_s link_sts;
> +	/* response to CGX_CMD_GET_FW_VER */
> +	struct cgx_ver_s ver;
> +	/* response to CGX_CMD_GET_MAC_ADDR */
> +	struct cgx_mac_addr_s mac_s;
> +	/* response to CGX_CMD_GET_FWD_BASE */
> +	struct sh_fwd_base_s fwd_base_s;
> +	/* response if evt_status = CMD_FAIL */
> +	struct cgx_err_sts_s err;
> +	/* response to CGX_CMD_GET_SUPPORTED_FEC */
> +	struct cgx_fec_types_s supported_fec;
> +	/* response to CGX_CMD_GET_LINK_MODES */
> +	struct cgx_link_modes_s supported_modes;
> +	/* response to CGX_CMD_GET_ADV_LINK_MODES */
> +	struct cgx_link_modes_s adv_modes;
> +	/* response to CGX_CMD_GET_ADV_FEC */
> +	struct cgx_fec_types_s adv_fec;
> +	/* response to CGX_CMD_GET_AN */
> +	struct cgx_get_an_s an;
> +	/* response to CGX_CMD_GET_PHY_MOD_TYPE */
> +	struct cgx_get_phy_mod_type_s phy_mod_type;
> +	/* response to CGX_CMD_GET_PERSIST_IGNORE */
> +	struct cgx_get_flash_ignore_s persist;
> +#ifdef NT_FW_CONFIG
> +	/* response to CGX_CMD_GET_MKEX_SIZE */
> +	struct cgx_mcam_profile_sz_s prfl_sz;
> +	/* response to CGX_CMD_GET_MKEX_PROFILE */
> +	struct cgx_mcam_profile_addr_s prfl_addr;
> +#endif
> +};
> +
> +union cgx_scratchx0 {
> +	u64 u;
> +	union cgx_rsp_sts s;
> +};
> +
> +/* scratchx(1) CSR used for non-secure SW->ATF communication
> + * This CSR acts as a command register
> + */
> +struct cgx_cmd {			/* start from bit 2 */
> +	u64 reserved1:2;
> +	u64 id:6;			/* cgx_request_id */
> +	u64 reserved2:56;
> +};
> +
> +/* all the below structures are in the same memory location of SCRATCHX(1)
> + * corresponding arguments for command Id needs to be updated
> + */
> +
> +/* Any command using enable/disable as an argument need
> + * to pass the option via this structure.
> + * Ex: Loopback, HiGig...
> + */
> +struct cgx_ctl_args {			/* start from bit 8 */
> +	u64 reserved1:8;
> +	u64 enable:1;
> +	u64 reserved2:55;
> +};
> +
> +/* command argument to be passed for cmd ID - CGX_CMD_SET_MTU */
> +struct cgx_mtu_args {
> +	u64 reserved1:8;
> +	u64 size:16;
> +	u64 reserved2:40;
> +};
> +
> +/* command argument to be passed for cmd ID - CGX_CMD_MODE_CHANGE */
> +struct cgx_mode_change_args {
> +	uint64_t reserved1:8;
> +	uint64_t speed:4; /* cgx_link_speed enum */
> +	uint64_t duplex:1; /* 0 - full duplex, 1 - half duplex */
> +	uint64_t an:1;	/* 0 - disable AN, 1 - enable AN */
> +	uint64_t port:8; /* device port */
> +	uint64_t mode:42;
> +};
> +
> +/* command argument to be passed for cmd ID - CGX_CMD_LINK_CHANGE */
> +struct cgx_link_change_args {		/* start from bit 8 */
> +	u64 reserved1:8;
> +	u64 link_up:1;
> +	u64 full_duplex:1;
> +	u64 speed:4;		/* cgx_link_speed */
> +	u64 reserved2:50;
> +};
> +
> +/* command argument to be passed for cmd ID - CGX_CMD_SET_LINK_MODE */
> +struct cgx_set_mode_args {
> +	u64 reserved1:8;
> +	u64 mode:56;
> +};
> +
> +/* command argument to be passed for cmd ID - CGX_CMD_SET_FEC */
> +struct cgx_set_fec_args {
> +	u64 reserved1:8;
> +	u64 fec:2;
> +	u64 reserved2:54;
> +};
> +
> +/* command argument to be passed for cmd ID - CGX_CMD_SET_PHY_MOD_TYPE */
> +struct cgx_set_phy_mod_args {
> +	u64 reserved1:8;
> +	u64 mod:1;		/* 0=NRZ, 1=PAM4 */
> +	u64 reserved2:55;
> +};
> +
> +/* command argument to be passed for cmd ID - CGX_CMD_SET_PERSIST_IGNORE */
> +struct cgx_set_flash_ignore_args {
> +	uint64_t reserved1:8;
> +	uint64_t ignore:1;
> +	uint64_t reserved2:55;
> +};
> +
> +/* command argument to be passed for cmd ID - CGX_CMD_SET_MAC_ADDR */
> +struct cgx_mac_addr_args {
> +	uint64_t reserved1:8;
> +	uint64_t addr:48;
> +	uint64_t pf_id:8;
> +};
> +
> +struct cgx_prbs_args {
> +	u64 reserved1:8; /* start from bit 8 */
> +	u64 lane:8;
> +	u64 qlm:8;
> +	u64 stop_on_error:1;
> +	u64 mode:8;
> +	u64 time:31;
> +};
> +
> +struct cgx_display_eye_args {
> +	u64 reserved1:8; /* start from bit 8 */
> +	u64 qlm:8;
> +	u64 lane:47;
> +};
> +
> +union cgx_cmd_s {
> +	u64 own_status:2;			/* cgx_cmd_own */
> +	struct cgx_cmd cmd;
> +	struct cgx_ctl_args cmd_args;
> +	struct cgx_mtu_args mtu_size;
> +	struct cgx_link_change_args lnk_args; /* Input to CGX_CMD_LINK_CHANGE */
> +	struct cgx_set_mode_args mode_args;
> +	struct cgx_mode_change_args mode_change_args;
> +	struct cgx_set_fec_args fec_args;
> +	struct cgx_set_phy_mod_args phy_mod_args;
> +	struct cgx_set_flash_ignore_args persist_args;
> +	struct cgx_mac_addr_args mac_args;
> +	/* any other arg for command id * like : mtu, dmac filtering control */
> +	struct cgx_prbs_args prbs_args;
> +	struct cgx_display_eye_args dsp_eye_args;
> +};
> +
> +union cgx_scratchx1 {
> +	u64 u;
> +	union cgx_cmd_s s;
> +};
> +
> +#endif /* __CGX_INTF_H__ */
> diff --git a/drivers/net/octeontx2/lmt.h b/drivers/net/octeontx2/lmt.h
> new file mode 100644
> index 0000000000..84a7eab814
> --- /dev/null
> +++ b/drivers/net/octeontx2/lmt.h
> @@ -0,0 +1,49 @@
> +/* SPDX-License-Identifier:    GPL-2.0
> + *
> + * Copyright (C) 2018 Marvell International Ltd.
> + */
> +
> +/**
> + * Atomically adds a signed value to a 64 bit (aligned) memory location,
> + * and returns previous value.
> + *
> + * This version does not perform 'sync' operations to enforce memory
> + * operations.  This should only be used when there are no memory operation
> + * ordering constraints.  (This should NOT be used for reference counting -
> + * use the standard version instead.)
> + *
> + * @param ptr    address in memory to add incr to
> + * @param incr   amount to increment memory location by (signed)
> + *
> + * @return Value of memory location before increment
> + */
> +static inline s64 atomic_fetch_and_add64_nosync(s64 *ptr, s64 incr)
> +{
> +	s64 result;
> +	/* Atomic add with no ordering */
> +	asm volatile("ldadd %x[i], %x[r], [%[b]]"
> +		     : [r] "=r" (result), "+m" (*ptr)
> +		     : [i] "r" (incr), [b] "r" (ptr)
> +		     : "memory");
> +	return result;
> +}
> +
> +static inline void lmt_cancel(const struct nix *nix)
> +{
> +	writeq(0, nix->lmt_base + LMT_LF_LMTCANCEL());
> +}
> +
> +static inline u64 *lmt_store_ptr(struct nix *nix)
> +{
> +	return (u64 *)((u8 *)(nix->lmt_base) +
> +				       LMT_LF_LMTLINEX(0));
> +}
> +
> +static inline s64 lmt_submit(u64 io_address)
> +{
> +	s64 result = 0;
> +
> +	asm volatile("ldeor xzr, %x[rf],[%[rs]]"
> +			: [rf] "=r"(result) : [rs] "r"(io_address));
> +	return result;
> +}
> diff --git a/drivers/net/octeontx2/nix.c b/drivers/net/octeontx2/nix.c
> new file mode 100644
> index 0000000000..0a3e8e4af0
> --- /dev/null
> +++ b/drivers/net/octeontx2/nix.c
> @@ -0,0 +1,831 @@
> +// SPDX-License-Identifier:    GPL-2.0
> +/*
> + * Copyright (C) 2018 Marvell International Ltd.
> + */
> +
> +#include <dm.h>
> +#include <errno.h>
> +#include <log.h>
> +#include <malloc.h>
> +#include <memalign.h>
> +#include <misc.h>
> +#include <net.h>
> +#include <pci.h>
> +#include <watchdog.h>
> +
> +#include <asm/arch/board.h>
> +#include <asm/arch/csrs/csrs-lmt.h>
> +#include <asm/io.h>
> +#include <asm/types.h>
> +
> +#include <linux/delay.h>
> +#include <linux/log2.h>
> +#include <linux/types.h>
> +
> +#include "nix.h"
> +#include "lmt.h"
> +#include "cgx.h"
> +
> +/**
> + * NIX needs a lot of memory areas. Rather than handle all the failure cases,
> + * we'll use a wrapper around alloc that prints an error if a memory
> + * allocation fails.
> + *
> + * @param num_elements
> + *                  Number of elements to allocate
> + * @param elem_size Size of each element
> + * @param msg       Text string to show when allocation fails
> + *
> + * @return A valid memory location or NULL on failure
> + */
> +static void *nix_memalloc(int num_elements, size_t elem_size, const char *msg)
> +{
> +	size_t alloc_size = num_elements * elem_size;
> +	void *base = memalign(CONFIG_SYS_CACHELINE_SIZE, alloc_size);
> +
> +	if (!base)
> +		printf("NIX: Mem alloc failed for %s (%d * %zu = %zu bytes)\n",
> +		       msg ? msg : __func__, num_elements, elem_size,
> +		       alloc_size);
> +	else
> +		memset(base, 0, alloc_size);
> +
> +	debug("NIX: Memory alloc for %s (%d * %zu = %zu bytes) at %p\n",
> +	      msg ? msg : __func__, num_elements, elem_size, alloc_size, base);
> +	return base;
> +}
> +
> +int npc_lf_setup(struct nix *nix)
> +{
> +	int err;
> +
> +	err = npc_lf_admin_setup(nix);
> +	if (err) {
> +		printf("%s: Error setting up npc lf admin\n", __func__);
> +		return err;
> +	}
> +
> +	return 0;
> +}
> +
> +static int npa_setup_pool(struct npa *npa, u32 pool_id,
> +			  size_t buffer_size, u32 queue_length, void *buffers[])
> +{
> +	struct {
> +		union npa_lf_aura_op_free0 f0;
> +		union npa_lf_aura_op_free1 f1;
> +	} aura_descr;
> +	int index;
> +
> +	for (index = 0; index < queue_length; index++) {
> +		buffers[index] = memalign(CONFIG_SYS_CACHELINE_SIZE,
> +					  buffer_size);
> +		if (!buffers[index]) {
> +			printf("%s: Out of memory %d, size: %zu\n",
> +			       __func__, index, buffer_size);
> +			return -ENOMEM;
> +		}
> +		debug("%s: allocating buffer %d, addr %p size: %zu\n",
> +		      __func__, index, buffers[index], buffer_size);
> +
> +		/* Add the newly obtained pointer to the pool.  128 bit
> +		 * writes only.
> +		 */
> +		aura_descr.f0.s.addr = (u64)buffers[index];
> +		aura_descr.f1.u = 0;
> +		aura_descr.f1.s.aura = pool_id;
> +		st128(npa->npa_base + NPA_LF_AURA_OP_FREE0(),
> +		      aura_descr.f0.u, aura_descr.f1.u);
> +	}
> +
> +	return 0;
> +}
> +
> +int npa_lf_setup(struct nix *nix)
> +{
> +	struct rvu_pf *rvu = dev_get_priv(nix->dev);
> +	struct nix_af *nix_af = nix->nix_af;
> +	struct npa *npa;
> +	union npa_af_const npa_af_const;
> +	union npa_aura_s *aura;
> +	union npa_pool_s *pool;
> +	union rvu_func_addr_s block_addr;
> +	int idx;
> +	int stack_page_pointers;
> +	int stack_page_bytes;
> +	int err;
> +
> +	npa = (struct npa *)calloc(1, sizeof(struct npa));
> +	if (!npa) {
> +		printf("%s: out of memory for npa instance\n", __func__);
> +		return -ENOMEM;
> +	}
> +	block_addr.u = 0;
> +	block_addr.s.block = RVU_BLOCK_ADDR_E_NPA;
> +	npa->npa_base = rvu->pf_base + block_addr.u;
> +	npa->npa_af = nix_af->npa_af;
> +	nix->npa = npa;
> +
> +	npa_af_const.u = npa_af_reg_read(npa->npa_af, NPA_AF_CONST());
> +	stack_page_pointers = npa_af_const.s.stack_page_ptrs;
> +	stack_page_bytes = npa_af_const.s.stack_page_bytes;
> +
> +	npa->stack_pages[NPA_POOL_RX] = (RQ_QLEN + stack_page_pointers - 1) /
> +							stack_page_pointers;
> +	npa->stack_pages[NPA_POOL_TX] = (SQ_QLEN + stack_page_pointers - 1) /
> +							stack_page_pointers;
> +	npa->stack_pages[NPA_POOL_SQB] = (SQB_QLEN + stack_page_pointers - 1) /
> +							stack_page_pointers;
> +	npa->pool_stack_pointers = stack_page_pointers;
> +
> +	npa->q_len[NPA_POOL_RX] = RQ_QLEN;
> +	npa->q_len[NPA_POOL_TX] = SQ_QLEN;
> +	npa->q_len[NPA_POOL_SQB] = SQB_QLEN;
> +
> +	npa->buf_size[NPA_POOL_RX] = MAX_MTU + CONFIG_SYS_CACHELINE_SIZE;
> +	npa->buf_size[NPA_POOL_TX] = MAX_MTU + CONFIG_SYS_CACHELINE_SIZE;
> +	npa->buf_size[NPA_POOL_SQB] = nix_af->sqb_size;
> +
> +	npa->aura_ctx = nix_memalloc(NPA_POOL_COUNT,
> +				     sizeof(union npa_aura_s),
> +				     "aura context");
> +	if (!npa->aura_ctx) {
> +		printf("%s: Out of memory for aura context\n", __func__);
> +		return -ENOMEM;
> +	}
> +
> +	for (idx = 0; idx < NPA_POOL_COUNT; idx++) {
> +		npa->pool_ctx[idx] = nix_memalloc(1,
> +						  sizeof(union npa_pool_s),
> +						  "pool context");
> +		if (!npa->pool_ctx[idx]) {
> +			printf("%s: Out of memory for pool context\n",
> +			       __func__);
> +			return -ENOMEM;
> +		}
> +		npa->pool_stack[idx] = nix_memalloc(npa->stack_pages[idx],
> +						    stack_page_bytes,
> +						    "pool stack");
> +		if (!npa->pool_stack[idx]) {
> +			printf("%s: Out of memory for pool stack\n", __func__);
> +			return -ENOMEM;
> +		}
> +	}
> +
> +	err = npa_lf_admin_setup(npa, nix->lf, (dma_addr_t)npa->aura_ctx);
> +	if (err) {
> +		printf("%s: Error setting up NPA LF admin for lf %d\n",
> +		       __func__, nix->lf);
> +		return err;
> +	}
> +
> +	/* Set up the auras */
> +	for (idx = 0; idx < NPA_POOL_COUNT; idx++) {
> +		aura = npa->aura_ctx + (idx * sizeof(union npa_aura_s));
> +		pool = npa->pool_ctx[idx];
> +		debug("%s aura %p pool %p\n", __func__, aura, pool);
> +		memset(aura, 0, sizeof(union npa_aura_s));
> +		aura->s.fc_ena = 0;
> +		aura->s.pool_addr = (u64)npa->pool_ctx[idx];
> +		debug("%s aura.s.pool_addr %llx pool_addr %p\n", __func__,
> +		      aura->s.pool_addr, npa->pool_ctx[idx]);
> +		aura->s.shift = 64 - __builtin_clzll(npa->q_len[idx]) - 8;
> +		aura->s.count = npa->q_len[idx];
> +		aura->s.limit = npa->q_len[idx];
> +		aura->s.ena = 1;
> +		err = npa_attach_aura(nix_af, nix->lf, aura, idx);
> +		if (err)
> +			return err;
> +
> +		memset(pool, 0, sizeof(*pool));
> +		pool->s.fc_ena = 0;
> +		pool->s.nat_align = 1;
> +		pool->s.stack_base = (u64)(npa->pool_stack[idx]);
> +		debug("%s pool.s.stack_base %llx stack_base %p\n", __func__,
> +		      pool->s.stack_base, npa->pool_stack[idx]);
> +		pool->s.buf_size =
> +			npa->buf_size[idx] / CONFIG_SYS_CACHELINE_SIZE;
> +		pool->s.stack_max_pages = npa->stack_pages[idx];
> +		pool->s.shift =
> +			64 - __builtin_clzll(npa->pool_stack_pointers) - 8;
> +		pool->s.ptr_start = 0;
> +		pool->s.ptr_end = (1ULL << 40) -  1;
> +		pool->s.ena = 1;
> +		err = npa_attach_pool(nix_af, nix->lf, pool, idx);
> +		if (err)
> +			return err;
> +	}
> +
> +	for (idx = 0; idx < NPA_POOL_COUNT; idx++) {
> +		npa->buffers[idx] = nix_memalloc(npa->q_len[idx],
> +						 sizeof(void *),
> +						 "buffers");
> +		if (!npa->buffers[idx]) {
> +			printf("%s: Out of memory\n", __func__);
> +			return -ENOMEM;
> +		}
> +	}
> +
> +	for (idx = 0; idx < NPA_POOL_COUNT; idx++) {
> +		err = npa_setup_pool(npa, idx, npa->buf_size[idx],
> +				     npa->q_len[idx], npa->buffers[idx]);
> +		if (err) {
> +			printf("%s: Error setting up pool %d\n",
> +			       __func__, idx);
> +			return err;
> +		}
> +	}
> +	return 0;
> +}
> +
> +int npa_lf_shutdown(struct nix *nix)
> +{
> +	struct npa *npa = nix->npa;
> +	int err;
> +	int pool;
> +
> +	err = npa_lf_admin_shutdown(nix->nix_af, nix->lf, NPA_POOL_COUNT);
> +	if (err) {
> +		printf("%s: Error %d shutting down NPA LF admin\n",
> +		       __func__, err);
> +		return err;
> +	}
> +	free(npa->aura_ctx);
> +	npa->aura_ctx = NULL;
> +
> +	for (pool = 0; pool < NPA_POOL_COUNT; pool++) {
> +		free(npa->pool_ctx[pool]);
> +		npa->pool_ctx[pool] = NULL;
> +		free(npa->pool_stack[pool]);
> +		npa->pool_stack[pool] = NULL;
> +		free(npa->buffers[pool]);
> +		npa->buffers[pool] = NULL;
> +	}
> +
> +	return 0;
> +}
> +
> +int nix_lf_setup(struct nix *nix)
> +{
> +	struct nix_af *nix_af = nix->nix_af;
> +	int idx;
> +	int err = -1;
> +
> +	/* Alloc NIX RQ HW context memory */
> +	nix->rq_ctx_base = nix_memalloc(nix->rq_cnt, nix_af->rq_ctx_sz,
> +					"RQ CTX");
> +	if (!nix->rq_ctx_base)
> +		goto error;
> +	memset(nix->rq_ctx_base, 0, nix_af->rq_ctx_sz);
> +
> +	/* Alloc NIX SQ HW context memory */
> +	nix->sq_ctx_base = nix_memalloc(nix->sq_cnt, nix_af->sq_ctx_sz,
> +					"SQ CTX");
> +	if (!nix->sq_ctx_base)
> +		goto error;
> +	memset(nix->sq_ctx_base, 0, nix_af->sq_ctx_sz);
> +
> +	/* Alloc NIX CQ HW context memory */
> +	nix->cq_ctx_base = nix_memalloc(nix->cq_cnt, nix_af->cq_ctx_sz,
> +					"CQ CTX");
> +	if (!nix->cq_ctx_base)
> +		goto error;
> +	memset(nix->cq_ctx_base, 0, nix_af->cq_ctx_sz * NIX_CQ_COUNT);
> +	/* Alloc NIX CQ Ring memory */
> +	for (idx = 0; idx < NIX_CQ_COUNT; idx++) {
> +		err = qmem_alloc(&nix->cq[idx], CQ_ENTRIES, CQ_ENTRY_SIZE);
> +		if (err)
> +			goto error;
> +	}
> +
> +	/* Alloc memory for Qints HW contexts */
> +	nix->qint_base = nix_memalloc(nix_af->qints, nix_af->qint_ctx_sz,
> +				      "Qint CTX");
> +	if (!nix->qint_base)
> +		goto error;
> +	/* Alloc memory for CQints HW contexts */
> +	nix->cint_base = nix_memalloc(nix_af->cints, nix_af->cint_ctx_sz,
> +				      "Cint CTX");
> +	if (!nix->cint_base)
> +		goto error;
> +	/* Alloc NIX RSS HW context memory and config the base */
> +	nix->rss_base = nix_memalloc(nix->rss_grps, nix_af->rsse_ctx_sz,
> +				     "RSS CTX");
> +	if (!nix->rss_base)
> +		goto error;
> +
> +	err = nix_lf_admin_setup(nix);
> +	if (err) {
> +		printf("%s: Error setting up LF\n", __func__);
> +		goto error;
> +	}
> +
> +	return 0;
> +
> +error:
> +	if (nix->rq_ctx_base)
> +		free(nix->rq_ctx_base);
> +	nix->rq_ctx_base = NULL;
> +	if (nix->rq_ctx_base)
> +		free(nix->rq_ctx_base);
> +	nix->rq_ctx_base = NULL;
> +	if (nix->sq_ctx_base)
> +		free(nix->sq_ctx_base);
> +	nix->sq_ctx_base = NULL;
> +	if (nix->cq_ctx_base)
> +		free(nix->cq_ctx_base);
> +	nix->cq_ctx_base = NULL;
> +
> +	for (idx = 0; idx < NIX_CQ_COUNT; idx++)
> +		qmem_free(&nix->cq[idx]);
> +
> +	return err;
> +}
> +
> +int nix_lf_shutdown(struct nix *nix)
> +{
> +	struct nix_af *nix_af = nix->nix_af;
> +	int index;
> +	int err;
> +
> +	err = nix_lf_admin_shutdown(nix_af, nix->lf, nix->cq_cnt,
> +				    nix->rq_cnt, nix->sq_cnt);
> +	if (err) {
> +		printf("%s: Error shutting down LF admin\n", __func__);
> +		return err;
> +	}
> +
> +	if (nix->rq_ctx_base)
> +		free(nix->rq_ctx_base);
> +	nix->rq_ctx_base = NULL;
> +	if (nix->rq_ctx_base)
> +		free(nix->rq_ctx_base);
> +	nix->rq_ctx_base = NULL;
> +	if (nix->sq_ctx_base)
> +		free(nix->sq_ctx_base);
> +	nix->sq_ctx_base = NULL;
> +	if (nix->cq_ctx_base)
> +		free(nix->cq_ctx_base);
> +	nix->cq_ctx_base = NULL;
> +
> +	for (index = 0; index < NIX_CQ_COUNT; index++)
> +		qmem_free(&nix->cq[index]);
> +
> +	debug("%s: nix lf %d reset --\n", __func__, nix->lf);
> +	return 0;
> +}
> +
> +struct nix *nix_lf_alloc(struct udevice *dev)
> +{
> +	union rvu_func_addr_s block_addr;
> +	struct nix *nix;
> +	struct rvu_pf *rvu = dev_get_priv(dev);
> +	struct rvu_af *rvu_af = dev_get_priv(rvu->afdev);
> +	union rvu_pf_func_s pf_func;
> +	int err;
> +
> +	debug("%s(%s )\n", __func__, dev->name);
> +
> +	nix = (struct nix *)calloc(1, sizeof(*nix));
> +	if (!nix) {
> +		printf("%s: Out of memory for nix instance\n", __func__);
> +		return NULL;
> +	}
> +	nix->nix_af = rvu_af->nix_af;
> +
> +	block_addr.u = 0;
> +	block_addr.s.block = RVU_BLOCK_ADDR_E_NIXX(0);
> +	nix->nix_base = rvu->pf_base + block_addr.u;
> +	block_addr.u = 0;
> +	block_addr.s.block = RVU_BLOCK_ADDR_E_NPC;
> +	nix->npc_base = rvu->pf_base + block_addr.u;
> +	block_addr.u = 0;
> +	block_addr.s.block = RVU_BLOCK_ADDR_E_LMT;
> +	nix->lmt_base = rvu->pf_base + block_addr.u;
> +
> +	pf_func.u = 0;
> +	pf_func.s.pf = rvu->pfid;
> +	nix->pf_func = pf_func.u;
> +	nix->lf = rvu->nix_lfid;
> +	nix->pf = rvu->pfid;
> +	nix->dev = dev;
> +	nix->sq_cnt = 1;
> +	nix->rq_cnt = 1;
> +	nix->rss_grps = 1;
> +	nix->cq_cnt = 2;
> +	nix->xqe_sz = NIX_CQE_SIZE_W16;
> +
> +	nix->lmac = nix_get_cgx_lmac(nix->pf);
> +	if (!nix->lmac) {
> +		printf("%s: Error: could not find lmac for pf %d\n",
> +		       __func__, nix->pf);
> +		free(nix);
> +		return NULL;
> +	}
> +	nix->lmac->link_num =
> +		NIX_LINK_E_CGXX_LMACX(nix->lmac->cgx->cgx_id,
> +				      nix->lmac->lmac_id);
> +	nix->lmac->chan_num =
> +		NIX_CHAN_E_CGXX_LMACX_CHX(nix->lmac->cgx->cgx_id,
> +					  nix->lmac->lmac_id, 0);
> +	/* This is rx pkind in 1:1 mapping to NIX_LINK_E */
> +	nix->lmac->pknd = nix->lmac->link_num;
> +
> +	cgx_lmac_set_pkind(nix->lmac, nix->lmac->lmac_id, nix->lmac->pknd);
> +	debug("%s(%s CGX%x LMAC%x)\n", __func__, dev->name,
> +	      nix->lmac->cgx->cgx_id, nix->lmac->lmac_id);
> +	debug("%s(%s Link %x Chan %x Pknd %x)\n", __func__, dev->name,
> +	      nix->lmac->link_num, nix->lmac->chan_num,	nix->lmac->pknd);
> +
> +	err = npa_lf_setup(nix);
> +	if (err)
> +		return NULL;
> +
> +	err = npc_lf_setup(nix);
> +	if (err)
> +		return NULL;
> +
> +	err = nix_lf_setup(nix);
> +	if (err)
> +		return NULL;
> +
> +	return nix;
> +}
> +
> +u64 npa_aura_op_alloc(struct npa *npa, u64 aura_id)
> +{
> +	union npa_lf_aura_op_allocx op_allocx;
> +
> +	op_allocx.u = atomic_fetch_and_add64_nosync(npa->npa_base +
> +			NPA_LF_AURA_OP_ALLOCX(0), aura_id);
> +	return op_allocx.s.addr;
> +}
> +
> +u64 nix_cq_op_status(struct nix *nix, u64 cq_id)
> +{
> +	union nixx_lf_cq_op_status op_status;
> +	s64 *reg = nix->nix_base + NIXX_LF_CQ_OP_STATUS();
> +
> +	op_status.u = atomic_fetch_and_add64_nosync(reg, cq_id << 32);
> +	return op_status.u;
> +}
> +
> +/* TX */
> +static inline void nix_write_lmt(struct nix *nix, void *buffer,
> +				 int num_words)
> +{
> +	int i;
> +
> +	u64 *lmt_ptr = lmt_store_ptr(nix);
> +	u64 *ptr = buffer;
> +
> +	debug("%s lmt_ptr %p %p\n", __func__, nix->lmt_base, lmt_ptr);
> +	for (i = 0; i < num_words; i++) {
> +		debug("%s data %llx lmt_ptr %p\n", __func__, ptr[i],
> +		      lmt_ptr + i);
> +		lmt_ptr[i] = ptr[i];
> +	}
> +}
> +
> +void nix_cqe_tx_pkt_handler(struct nix *nix, void *cqe)
> +{
> +	union nix_cqe_hdr_s *txcqe = (union nix_cqe_hdr_s *)cqe;
> +
> +	debug("%s: txcqe: %p\n", __func__, txcqe);
> +
> +	if (txcqe->s.cqe_type != NIX_XQE_TYPE_E_SEND) {
> +		printf("%s: Error: Unsupported CQ header type %d\n",
> +		       __func__, txcqe->s.cqe_type);
> +		return;
> +	}
> +	nix_pf_reg_write(nix, NIXX_LF_CQ_OP_DOOR(),
> +			 (NIX_CQ_TX << 32) | 1);
> +}
> +
> +void nix_lf_flush_tx(struct udevice *dev)
> +{
> +	struct rvu_pf *rvu = dev_get_priv(dev);
> +	struct nix *nix = rvu->nix;
> +	union nixx_lf_cq_op_status op_status;
> +	u32 head, tail;
> +	void *cq_tx_base = nix->cq[NIX_CQ_TX].base;
> +	union nix_cqe_hdr_s *cqe;
> +
> +	/* ack tx cqe entries */
> +	op_status.u = nix_cq_op_status(nix, NIX_CQ_TX);
> +	head = op_status.s.head;
> +	tail = op_status.s.tail;
> +	head &= (nix->cq[NIX_CQ_TX].qsize - 1);
> +	tail &= (nix->cq[NIX_CQ_TX].qsize - 1);
> +
> +	debug("%s cq tx head %d tail %d\n", __func__, head, tail);
> +	while (head != tail) {
> +		cqe = cq_tx_base + head * nix->cq[NIX_CQ_TX].entry_sz;
> +		nix_cqe_tx_pkt_handler(nix, cqe);
> +		op_status.u = nix_cq_op_status(nix, NIX_CQ_TX);
> +		head = op_status.s.head;
> +		tail = op_status.s.tail;
> +		head &= (nix->cq[NIX_CQ_TX].qsize - 1);
> +		tail &= (nix->cq[NIX_CQ_TX].qsize - 1);
> +		debug("%s cq tx head %d tail %d\n", __func__, head, tail);
> +	}
> +}
> +
> +int nix_lf_xmit(struct udevice *dev, void *pkt, int pkt_len)
> +{
> +	struct rvu_pf *rvu = dev_get_priv(dev);
> +	struct nix *nix = rvu->nix;
> +	struct nix_tx_dr tx_dr;
> +	int dr_sz = (sizeof(struct nix_tx_dr) + 15) / 16 - 1;
> +	s64 result;
> +	void *packet;
> +
> +	nix_lf_flush_tx(dev);
> +	memset((void *)&tx_dr, 0, sizeof(struct nix_tx_dr));
> +	/* Dump TX packet in to NPA buffer */
> +	packet = (void *)npa_aura_op_alloc(nix->npa, NPA_POOL_TX);
> +	if (!packet) {
> +		printf("%s TX buffers unavailable\n", __func__);
> +		return -1;
> +	}
> +	memcpy(packet, pkt, pkt_len);
> +	debug("%s TX buffer %p\n", __func__, packet);
> +
> +	tx_dr.hdr.s.aura = NPA_POOL_TX;
> +	tx_dr.hdr.s.df = 0;
> +	tx_dr.hdr.s.pnc = 1;
> +	tx_dr.hdr.s.sq = 0;
> +	tx_dr.hdr.s.total = pkt_len;
> +	tx_dr.hdr.s.sizem1 = dr_sz - 2; /* FIXME - for now hdr+sg+sg1addr */
> +	debug("%s dr_sz %d\n", __func__, dr_sz);
> +
> +	tx_dr.tx_sg.s.segs = 1;
> +	tx_dr.tx_sg.s.subdc = NIX_SUBDC_E_SG;
> +	tx_dr.tx_sg.s.seg1_size = pkt_len;
> +	tx_dr.tx_sg.s.ld_type = NIX_SENDLDTYPE_E_LDT;
> +	tx_dr.sg1_addr = (dma_addr_t)packet;
> +
> +#define DEBUG_PKT
> +#ifdef DEBUG_PKT
> +	debug("TX PKT Data\n");
> +	for (int i = 0; i < pkt_len; i++) {
> +		if (i && (i % 8 == 0))
> +			debug("\n");
> +		debug("%02x ", *((u8 *)pkt + i));
> +	}
> +	debug("\n");
> +#endif
> +	do {
> +		nix_write_lmt(nix, &tx_dr, (dr_sz - 1) * 2);
> +		__iowmb();
> +		result = lmt_submit((u64)(nix->nix_base +
> +					       NIXX_LF_OP_SENDX(0)));
> +		WATCHDOG_RESET();
> +	} while (result == 0);
> +
> +	return 0;
> +}
> +
> +/* RX */
> +void nix_lf_flush_rx(struct udevice *dev)
> +{
> +	struct rvu_pf *rvu = dev_get_priv(dev);
> +	struct nix *nix = rvu->nix;
> +	union nixx_lf_cq_op_status op_status;
> +	void *cq_rx_base = nix->cq[NIX_CQ_RX].base;
> +	struct nix_rx_dr *rx_dr;
> +	union nix_rx_parse_s *rxparse;
> +	u32 head, tail;
> +	u32 rx_cqe_sz = nix->cq[NIX_CQ_RX].entry_sz;
> +	u64 *seg;
> +
> +	/* flush rx cqe entries */
> +	op_status.u = nix_cq_op_status(nix, NIX_CQ_RX);
> +	head = op_status.s.head;
> +	tail = op_status.s.tail;
> +	head &= (nix->cq[NIX_CQ_RX].qsize - 1);
> +	tail &= (nix->cq[NIX_CQ_RX].qsize - 1);
> +
> +	debug("%s cq rx head %d tail %d\n", __func__, head, tail);
> +	while (head != tail) {
> +		rx_dr = (struct nix_rx_dr *)cq_rx_base + head * rx_cqe_sz;
> +		rxparse = &rx_dr->rx_parse;
> +
> +		debug("%s: rx parse: %p\n", __func__, rxparse);
> +		debug("%s: rx parse: desc_sizem1 %x pkt_lenm1 %x\n",
> +		      __func__, rxparse->s.desc_sizem1, rxparse->s.pkt_lenm1);
> +
> +		seg = (dma_addr_t *)(&rx_dr->rx_sg + 1);
> +
> +		st128(nix->npa->npa_base + NPA_LF_AURA_OP_FREE0(),
> +		      seg[0], (1ULL << 63) | NPA_POOL_RX);
> +
> +		debug("%s return %llx to NPA\n", __func__, seg[0]);
> +		nix_pf_reg_write(nix, NIXX_LF_CQ_OP_DOOR(),
> +				 (NIX_CQ_RX << 32) | 1);
> +
> +		op_status.u = nix_cq_op_status(nix, NIX_CQ_RX);
> +		head = op_status.s.head;
> +		tail = op_status.s.tail;
> +		head &= (nix->cq[NIX_CQ_RX].qsize - 1);
> +		tail &= (nix->cq[NIX_CQ_RX].qsize - 1);
> +		debug("%s cq rx head %d tail %d\n", __func__, head, tail);
> +	}
> +}
> +
> +int nix_lf_free_pkt(struct udevice *dev, uchar *pkt, int pkt_len)
> +{
> +	struct rvu_pf *rvu = dev_get_priv(dev);
> +	struct nix *nix = rvu->nix;
> +
> +	/* Return rx packet to NPA */
> +	debug("%s return %p to NPA\n", __func__, pkt);
> +	st128(nix->npa->npa_base + NPA_LF_AURA_OP_FREE0(), (u64)pkt,
> +	      (1ULL << 63) | NPA_POOL_RX);
> +	nix_pf_reg_write(nix, NIXX_LF_CQ_OP_DOOR(),
> +			 (NIX_CQ_RX << 32) | 1);
> +
> +	nix_lf_flush_tx(dev);
> +	return 0;
> +}
> +
> +int nix_lf_recv(struct udevice *dev, int flags, uchar **packetp)
> +{
> +	struct rvu_pf *rvu = dev_get_priv(dev);
> +	struct nix *nix = rvu->nix;
> +	union nixx_lf_cq_op_status op_status;
> +	void *cq_rx_base = nix->cq[NIX_CQ_RX].base;
> +	struct nix_rx_dr *rx_dr;
> +	union nix_rx_parse_s *rxparse;
> +	void *pkt, *cqe;
> +	int pkt_len = 0;
> +	u64 *addr;
> +	u32 head, tail;
> +
> +	/* fetch rx cqe entries */
> +	op_status.u = nix_cq_op_status(nix, NIX_CQ_RX);
> +	head = op_status.s.head;
> +	tail = op_status.s.tail;
> +	head &= (nix->cq[NIX_CQ_RX].qsize - 1);
> +	tail &= (nix->cq[NIX_CQ_RX].qsize - 1);
> +	debug("%s cq rx head %d tail %d\n", __func__, head, tail);
> +	if (head == tail)
> +		return -EAGAIN;
> +
> +	debug("%s: rx_base %p head %d sz %d\n", __func__, cq_rx_base, head,
> +	      nix->cq[NIX_CQ_RX].entry_sz);
> +	cqe = cq_rx_base + head * nix->cq[NIX_CQ_RX].entry_sz;
> +	rx_dr = (struct nix_rx_dr *)cqe;
> +	rxparse = &rx_dr->rx_parse;
> +
> +	debug("%s: rx completion: %p\n", __func__, cqe);
> +	debug("%s: rx dr: %p\n", __func__, rx_dr);
> +	debug("%s: rx parse: %p\n", __func__, rxparse);
> +	debug("%s: rx parse: desc_sizem1 %x pkt_lenm1 %x\n",
> +	      __func__, rxparse->s.desc_sizem1, rxparse->s.pkt_lenm1);
> +	debug("%s: rx parse: pkind %x chan %x\n",
> +	      __func__, rxparse->s.pkind, rxparse->s.chan);
> +
> +	if (rx_dr->hdr.s.cqe_type != NIX_XQE_TYPE_E_RX) {
> +		printf("%s: Error: Unsupported CQ header type in Rx %d\n",
> +		       __func__, rx_dr->hdr.s.cqe_type);
> +		return -1;
> +	}
> +
> +	pkt_len = rxparse->s.pkt_lenm1 + 1;
> +	addr = (dma_addr_t *)(&rx_dr->rx_sg + 1);
> +	pkt = (void *)addr[0];
> +
> +	debug("%s: segs: %d (%d at 0x%llx, %d at 0x%llx, %d at 0x%llx)\n", __func__,
> +	      rx_dr->rx_sg.s.segs, rx_dr->rx_sg.s.seg1_size, addr[0],
> +	      rx_dr->rx_sg.s.seg2_size, addr[1],
> +	      rx_dr->rx_sg.s.seg3_size, addr[2]);
> +	if (pkt_len < rx_dr->rx_sg.s.seg1_size + rx_dr->rx_sg.s.seg2_size +
> +			rx_dr->rx_sg.s.seg3_size) {
> +		debug("%s: Error: rx buffer size too small\n", __func__);
> +		return -1;
> +	}
> +
> +	__iowmb();
> +#define DEBUG_PKT
> +#ifdef DEBUG_PKT
> +	debug("RX PKT Data\n");
> +	for (int i = 0; i < pkt_len; i++) {
> +		if (i && (i % 8 == 0))
> +			debug("\n");
> +		debug("%02x ", *((u8 *)pkt + i));
> +	}
> +	debug("\n");
> +#endif
> +
> +	*packetp = (uchar *)pkt;
> +
> +	return pkt_len;
> +}
> +
> +int nix_lf_setup_mac(struct udevice *dev)
> +{
> +	struct rvu_pf *rvu = dev_get_priv(dev);
> +	struct nix *nix = rvu->nix;
> +	struct eth_pdata *pdata = dev_get_platdata(dev);
> +
> +	/* If lower level firmware fails to set proper MAC
> +	 * u-boot framework updates MAC to random address.
> +	 * Use this hook to update mac address in cgx lmac
> +	 * and call mac filter setup to update new address.
> +	 */
> +	if (memcmp(nix->lmac->mac_addr, pdata->enetaddr, ARP_HLEN)) {
> +		memcpy(nix->lmac->mac_addr, pdata->enetaddr, 6);
> +		eth_env_set_enetaddr_by_index("eth", rvu->dev->seq,
> +					      pdata->enetaddr);
> +		cgx_lmac_mac_filter_setup(nix->lmac);
> +		/* Update user given MAC address to ATF for update
> +		 * in sh_fwdata to use in Linux.
> +		 */
> +		cgx_intf_set_macaddr(dev);
> +		debug("%s: lMAC %pM\n", __func__, nix->lmac->mac_addr);
> +		debug("%s: pMAC %pM\n", __func__, pdata->enetaddr);
> +	}
> +	debug("%s: setupMAC %pM\n", __func__, pdata->enetaddr);
> +	return 0;
> +}
> +
> +void nix_lf_halt(struct udevice *dev)
> +{
> +	struct rvu_pf *rvu = dev_get_priv(dev);
> +	struct nix *nix = rvu->nix;
> +
> +	cgx_lmac_rx_tx_enable(nix->lmac, nix->lmac->lmac_id, false);
> +
> +	mdelay(1);
> +
> +	/* Flush tx and rx descriptors */
> +	nix_lf_flush_rx(dev);
> +	nix_lf_flush_tx(dev);
> +}
> +
> +int nix_lf_init(struct udevice *dev)
> +{
> +	struct rvu_pf *rvu = dev_get_priv(dev);
> +	struct nix *nix = rvu->nix;
> +	struct lmac *lmac = nix->lmac;
> +	int ret;
> +	u64 link_sts;
> +	u8 link, speed;
> +	u16 errcode;
> +
> +	printf("Waiting for CGX%d LMAC%d [%s] link status...",
> +	       lmac->cgx->cgx_id, lmac->lmac_id,
> +	       lmac_type_to_str[lmac->lmac_type]);
> +
> +	if (lmac->init_pend) {
> +		/* Bring up LMAC */
> +		ret = cgx_lmac_link_enable(lmac, lmac->lmac_id,
> +					   true, &link_sts);
> +		lmac->init_pend = 0;
> +	} else {
> +		ret = cgx_lmac_link_status(lmac, lmac->lmac_id, &link_sts);
> +	}
> +
> +	if (ret) {
> +		printf(" [Down]\n");
> +		return -1;
> +	}
> +
> +	link = link_sts & 0x1;
> +	speed = (link_sts >> 2) & 0xf;
> +	errcode = (link_sts >> 6) & 0x2ff;
> +	debug("%s: link %x speed %x errcode %x\n",
> +	      __func__, link, speed, errcode);
> +
> +	/* Print link status */
> +	printf(" [%s]\n", link ? lmac_speed_to_str[speed] : "Down");
> +	if (!link)
> +		return -1;
> +
> +	if (!lmac->init_pend)
> +		cgx_lmac_rx_tx_enable(lmac, lmac->lmac_id, true);
> +
> +	return 0;
> +}
> +
> +void nix_get_cgx_lmac_id(struct udevice *dev, int *cgxid, int *lmacid)
> +{
> +	struct rvu_pf *rvu = dev_get_priv(dev);
> +	struct nix *nix = rvu->nix;
> +	struct lmac *lmac = nix->lmac;
> +
> +	*cgxid = lmac->cgx->cgx_id;
> +	*lmacid = lmac->lmac_id;
> +}
> +
> +void nix_print_mac_info(struct udevice *dev)
> +{
> +	struct rvu_pf *rvu = dev_get_priv(dev);
> +	struct nix *nix = rvu->nix;
> +	struct lmac *lmac = nix->lmac;
> +
> +	printf(" CGX%d LMAC%d [%s]", lmac->cgx->cgx_id, lmac->lmac_id,
> +	       lmac_type_to_str[lmac->lmac_type]);
> +}
> +
> diff --git a/drivers/net/octeontx2/nix.h b/drivers/net/octeontx2/nix.h
> new file mode 100644
> index 0000000000..03260dddb3
> --- /dev/null
> +++ b/drivers/net/octeontx2/nix.h
> @@ -0,0 +1,353 @@
> +/* SPDX-License-Identifier:    GPL-2.0
> + *
> + * Copyright (C) 2018 Marvell International Ltd.
> + */
> +
> +#ifndef __NIX_H__
> +#define	__NIX_H__
> +
> +#include <asm/arch/csrs/csrs-npa.h>
> +#include <asm/arch/csrs/csrs-nix.h>
> +#include "rvu.h"
> +
> +/** Maximum number of LMACs supported */
> +#define MAX_LMAC			12
> +
> +/* NIX RX action operation*/
> +#define NIX_RX_ACTIONOP_DROP		(0x0ull)
> +#define NIX_RX_ACTIONOP_UCAST		(0x1ull)
> +#define NIX_RX_ACTIONOP_UCAST_IPSEC	(0x2ull)
> +#define NIX_RX_ACTIONOP_MCAST		(0x3ull)
> +#define NIX_RX_ACTIONOP_RSS		(0x4ull)
> +
> +/* NIX TX action operation*/
> +#define NIX_TX_ACTIONOP_DROP		(0x0ull)
> +#define NIX_TX_ACTIONOP_UCAST_DEFAULT	(0x1ull)
> +#define NIX_TX_ACTIONOP_UCAST_CHAN	(0x2ull)
> +#define NIX_TX_ACTIONOP_MCAST		(0x3ull)
> +#define NIX_TX_ACTIONOP_DROP_VIOL	(0x5ull)
> +
> +#define NIX_INTF_RX			0
> +#define NIX_INTF_TX			1
> +
> +#define NIX_INTF_TYPE_CGX		0
> +#define NIX_INTF_TYPE_LBK		1
> +#define NIX_MAX_HW_MTU			9212
> +#define NIX_MIN_HW_MTU			40
> +#define MAX_MTU				1536
> +
> +#define NPA_POOL_COUNT			3
> +#define NPA_AURA_COUNT(x)		(1ULL << ((x) + 6))
> +#define NPA_POOL_RX			0ULL
> +#define NPA_POOL_TX			1ULL
> +#define NPA_POOL_SQB			2ULL
> +#define RQ_QLEN				Q_COUNT(Q_SIZE_1K)
> +#define SQ_QLEN				Q_COUNT(Q_SIZE_1K)
> +#define SQB_QLEN			Q_COUNT(Q_SIZE_16)
> +
> +#define NIX_CQ_RX			0ULL
> +#define NIX_CQ_TX			1ULL
> +#define NIX_CQ_COUNT			2ULL
> +#define NIX_CQE_SIZE_W16		(16 * sizeof(u64))
> +#define NIX_CQE_SIZE_W64		(64 * sizeof(u64))
> +
> +/** Size of aura hardware context */
> +#define NPA_AURA_HW_CTX_SIZE		48
> +/** Size of pool hardware context */
> +#define NPA_POOL_HW_CTX_SIZE		64
> +
> +#define NPA_DEFAULT_PF_FUNC		0xffff
> +
> +#define NIX_CHAN_CGX_LMAC_CHX(a, b, c)	(0x800 + 0x100 * (a) + 0x10 * (b) + (c))
> +#define NIX_LINK_CGX_LMAC(a, b)		(0 + 4 * (a) + (b))
> +#define NIX_LINK_LBK(a)			(12 + (a))
> +#define NIX_CHAN_LBK_CHX(a, b)		(0 + 0x100 * (a) + (b))
> +#define MAX_LMAC_PKIND			12
> +
> +/** Number of Admin queue entries */
> +#define AQ_RING_SIZE	Q_COUNT(Q_SIZE_16)
> +
> +/** Each completion queue contains 256 entries, see NIC_CQ_CTX_S[qsize] */
> +#define CQS_QSIZE			Q_SIZE_256
> +#define CQ_ENTRIES			Q_COUNT(CQS_QSIZE)
> +/**
> + * Each completion queue entry contains 128 bytes, see
> + * NIXX_AF_LFX_CFG[xqe_size]
> + */
> +#define CQ_ENTRY_SIZE			NIX_CQE_SIZE_W16
> +
> +enum npa_aura_size {
> +	NPA_AURA_SZ_0,
> +	NPA_AURA_SZ_128,
> +	NPA_AURA_SZ_256,
> +	NPA_AURA_SZ_512,
> +	NPA_AURA_SZ_1K,
> +	NPA_AURA_SZ_2K,
> +	NPA_AURA_SZ_4K,
> +	NPA_AURA_SZ_8K,
> +	NPA_AURA_SZ_16K,
> +	NPA_AURA_SZ_32K,
> +	NPA_AURA_SZ_64K,
> +	NPA_AURA_SZ_128K,
> +	NPA_AURA_SZ_256K,
> +	NPA_AURA_SZ_512K,
> +	NPA_AURA_SZ_1M,
> +	NPA_AURA_SZ_MAX,
> +};
> +
> +#define NPA_AURA_SIZE_DEFAULT		NPA_AURA_SZ_128
> +
> +/* NIX Transmit schedulers */
> +enum nix_scheduler {
> +	NIX_TXSCH_LVL_SMQ = 0x0,
> +	NIX_TXSCH_LVL_MDQ = 0x0,
> +	NIX_TXSCH_LVL_TL4 = 0x1,
> +	NIX_TXSCH_LVL_TL3 = 0x2,
> +	NIX_TXSCH_LVL_TL2 = 0x3,
> +	NIX_TXSCH_LVL_TL1 = 0x4,
> +	NIX_TXSCH_LVL_CNT = 0x5,
> +};
> +
> +struct cgx;
> +
> +struct nix_stats {
> +	u64	num_packets;
> +	u64	num_bytes;
> +};
> +
> +struct nix;
> +struct lmac;
> +
> +struct npa_af {
> +	void __iomem		*npa_af_base;
> +	struct admin_queue	aq;
> +	u32			aura;
> +};
> +
> +struct npa {
> +	struct npa_af		*npa_af;
> +	void __iomem		*npa_base;
> +	void __iomem		*npc_base;
> +	void __iomem		*lmt_base;
> +	/** Hardware aura context */
> +	void			*aura_ctx;
> +	/** Hardware pool context */
> +	void			*pool_ctx[NPA_POOL_COUNT];
> +	void			*pool_stack[NPA_POOL_COUNT];
> +	void                    **buffers[NPA_POOL_COUNT];
> +	u32                     pool_stack_pages[NPA_POOL_COUNT];
> +	u32			pool_stack_pointers;
> +	u32			q_len[NPA_POOL_COUNT];
> +	u32			buf_size[NPA_POOL_COUNT];
> +	u32			stack_pages[NPA_POOL_COUNT];
> +};
> +
> +struct nix_af {
> +	struct udevice			*dev;
> +	struct nix			*lmacs[MAX_LMAC];
> +	struct npa_af			*npa_af;
> +	void __iomem			*nix_af_base;
> +	void __iomem			*npc_af_base;
> +	struct admin_queue		aq;
> +	u8				num_lmacs;
> +	s8				index;
> +	u8				xqe_size;
> +	u32				sqb_size;
> +	u32				qints;
> +	u32				cints;
> +	u32				sq_ctx_sz;
> +	u32				rq_ctx_sz;
> +	u32				cq_ctx_sz;
> +	u32				rsse_ctx_sz;
> +	u32				cint_ctx_sz;
> +	u32				qint_ctx_sz;
> +};
> +
> +struct nix_tx_dr {
> +	union nix_send_hdr_s	hdr;
> +	union nix_send_sg_s	tx_sg;
> +	dma_addr_t			sg1_addr;
> +	dma_addr_t			sg2_addr;
> +	dma_addr_t			sg3_addr;
> +	u64				in_use;
> +};
> +
> +struct nix_rx_dr {
> +	union nix_cqe_hdr_s hdr;
> +	union nix_rx_parse_s rx_parse;
> +	union nix_rx_sg_s rx_sg;
> +};
> +
> +struct nix {
> +	struct udevice			*dev;
> +	struct eth_device		*netdev;
> +	struct nix_af			*nix_af;
> +	struct npa			*npa;
> +	struct lmac			*lmac;
> +	union nix_cint_hw_s	*cint_base;
> +	union nix_cq_ctx_s		*cq_ctx_base;
> +	union nix_qint_hw_s	*qint_base;
> +	union nix_rq_ctx_s		*rq_ctx_base;
> +	union nix_rsse_s		*rss_base;
> +	union nix_sq_ctx_s		*sq_ctx_base;
> +	void				*cqe_base;
> +	struct qmem			sq;
> +	struct qmem			cq[NIX_CQ_COUNT];
> +	struct qmem			rq;
> +	struct qmem			rss;
> +	struct qmem			cq_ints;
> +	struct qmem			qints;
> +	char				name[16];
> +	void __iomem			*nix_base;	/** PF reg base */
> +	void __iomem			*npc_base;
> +	void __iomem			*lmt_base;
> +	struct nix_stats		tx_stats;
> +	struct nix_stats		rx_stats;
> +	u32				aura;
> +	int				pknd;
> +	int				lf;
> +	int				pf;
> +	u16				pf_func;
> +	u32				rq_cnt;	/** receive queues count */
> +	u32				sq_cnt;	/** send queues count */
> +	u32				cq_cnt;	/** completion queues count */
> +	u16				rss_sz;
> +	u16				sqb_size;
> +	u8				rss_grps;
> +	u8				xqe_sz;
> +};
> +
> +struct nix_aq_cq_dis {
> +	union nix_aq_res_s	resp ALIGNED;
> +	union nix_cq_ctx_s	cq ALIGNED;
> +	union nix_cq_ctx_s	mcq ALIGNED;
> +};
> +
> +struct nix_aq_rq_dis {
> +	union nix_aq_res_s	resp ALIGNED;
> +	union nix_rq_ctx_s	rq ALIGNED;
> +	union nix_rq_ctx_s	mrq ALIGNED;
> +};
> +
> +struct nix_aq_sq_dis {
> +	union nix_aq_res_s	resp ALIGNED;
> +	union nix_sq_ctx_s	sq ALIGNED;
> +	union nix_sq_ctx_s	msq ALIGNED;
> +};
> +
> +struct nix_aq_cq_request {
> +	union nix_aq_res_s	resp ALIGNED;
> +	union nix_cq_ctx_s	cq ALIGNED;
> +};
> +
> +struct nix_aq_rq_request {
> +	union nix_aq_res_s	resp ALIGNED;
> +	union nix_rq_ctx_s	rq ALIGNED;
> +};
> +
> +struct nix_aq_sq_request {
> +	union nix_aq_res_s	resp ALIGNED;
> +	union nix_sq_ctx_s	sq ALIGNED;
> +};
> +
> +static inline u64 nix_af_reg_read(struct nix_af *nix_af, u64 offset)
> +{
> +	u64 val = readq(nix_af->nix_af_base + offset);
> +
> +	debug("%s reg %p val %llx\n", __func__, nix_af->nix_af_base + offset,
> +	      val);
> +	return val;
> +}
> +
> +static inline void nix_af_reg_write(struct nix_af *nix_af, u64 offset,
> +				    u64 val)
> +{
> +	debug("%s reg %p val %llx\n", __func__, nix_af->nix_af_base + offset,
> +	      val);
> +	writeq(val, nix_af->nix_af_base + offset);
> +}
> +
> +static inline u64 nix_pf_reg_read(struct nix *nix, u64 offset)
> +{
> +	u64 val = readq(nix->nix_base + offset);
> +
> +	debug("%s reg %p val %llx\n", __func__, nix->nix_base + offset,
> +	      val);
> +	return val;
> +}
> +
> +static inline void nix_pf_reg_write(struct nix *nix, u64 offset,
> +				    u64 val)
> +{
> +	debug("%s reg %p val %llx\n", __func__, nix->nix_base + offset,
> +	      val);
> +	writeq(val, nix->nix_base + offset);
> +}
> +
> +static inline u64 npa_af_reg_read(struct npa_af *npa_af, u64 offset)
> +{
> +	u64 val = readq(npa_af->npa_af_base + offset);
> +
> +	debug("%s reg %p val %llx\n", __func__, npa_af->npa_af_base + offset,
> +	      val);
> +	return val;
> +}
> +
> +static inline void npa_af_reg_write(struct npa_af *npa_af, u64 offset,
> +				    u64 val)
> +{
> +	debug("%s reg %p val %llx\n", __func__, npa_af->npa_af_base + offset,
> +	      val);
> +	writeq(val, npa_af->npa_af_base + offset);
> +}
> +
> +static inline u64 npc_af_reg_read(struct nix_af *nix_af, u64 offset)
> +{
> +	u64 val = readq(nix_af->npc_af_base + offset);
> +
> +	debug("%s reg %p val %llx\n", __func__, nix_af->npc_af_base + offset,
> +	      val);
> +	return val;
> +}
> +
> +static inline void npc_af_reg_write(struct nix_af *nix_af, u64 offset,
> +				    u64 val)
> +{
> +	debug("%s reg %p val %llx\n", __func__, nix_af->npc_af_base + offset,
> +	      val);
> +	writeq(val, nix_af->npc_af_base + offset);
> +}
> +
> +int npa_attach_aura(struct nix_af *nix_af, int lf,
> +		    const union npa_aura_s *desc, u32 aura_id);
> +int npa_attach_pool(struct nix_af *nix_af, int lf,
> +		    const union npa_pool_s *desc, u32 pool_id);
> +int npa_af_setup(struct npa_af *npa_af);
> +int npa_af_shutdown(struct npa_af *npa_af);
> +int npa_lf_setup(struct nix *nix);
> +int npa_lf_shutdown(struct nix *nix);
> +int npa_lf_admin_setup(struct npa *npa, int lf, dma_addr_t aura_base);
> +int npa_lf_admin_shutdown(struct nix_af *nix_af, int lf, u32 pool_count);
> +
> +int npc_lf_admin_setup(struct nix *nix);
> +int npc_af_shutdown(struct nix_af *nix_af);
> +
> +int nix_af_setup(struct nix_af *nix_af);
> +int nix_af_shutdown(struct nix_af *nix_af);
> +int nix_lf_setup(struct nix *nix);
> +int nix_lf_shutdown(struct nix *nix);
> +struct nix *nix_lf_alloc(struct udevice *dev);
> +int nix_lf_admin_setup(struct nix *nix);
> +int nix_lf_admin_shutdown(struct nix_af *nix_af, int lf,
> +			  u32 cq_count, u32 rq_count, u32 sq_count);
> +struct rvu_af *get_af(void);
> +
> +int nix_lf_setup_mac(struct udevice *dev);
> +int nix_lf_read_rom_mac(struct udevice *dev);
> +void nix_lf_halt(struct udevice *dev);
> +int nix_lf_free_pkt(struct udevice *dev, uchar *pkt, int pkt_len);
> +int nix_lf_recv(struct udevice *dev, int flags, uchar **packetp);
> +int nix_lf_init(struct udevice *dev);
> +int nix_lf_xmit(struct udevice *dev, void *pkt, int pkt_len);
> +
> +#endif /* __NIX_H__ */
> diff --git a/drivers/net/octeontx2/nix_af.c b/drivers/net/octeontx2/nix_af.c
> new file mode 100644
> index 0000000000..d513917ee7
> --- /dev/null
> +++ b/drivers/net/octeontx2/nix_af.c
> @@ -0,0 +1,1102 @@
> +// SPDX-License-Identifier:    GPL-2.0
> +/*
> + * Copyright (C) 2018 Marvell International Ltd.
> + */
> +
> +#include <dm.h>
> +#include <errno.h>
> +#include <malloc.h>
> +#include <memalign.h>
> +#include <misc.h>
> +#include <net.h>
> +#include <pci.h>
> +#include <watchdog.h>
> +#include <linux/types.h>
> +#include <linux/list.h>
> +#include <linux/log2.h>
> +#include <asm/arch/board.h>
> +#include <asm/arch/csrs/csrs-npc.h>
> +#include <asm/arch/csrs/csrs-lmt.h>
> +#include <asm/io.h>
> +
> +#include "nix.h"
> +#include "lmt.h"
> +#include "cgx.h"
> +
> +static struct nix_aq_cq_dis cq_dis ALIGNED;
> +static struct nix_aq_rq_dis rq_dis ALIGNED;
> +static struct nix_aq_sq_dis sq_dis ALIGNED;
> +
> +/***************
> + * NPA API
> + ***************/
> +int npa_attach_aura(struct nix_af *nix_af, int lf,
> +		    const union npa_aura_s *desc, u32 aura_id)
> +{
> +	struct npa_af *npa = nix_af->npa_af;
> +	union npa_aq_inst_s *inst;
> +	union npa_aq_res_s *res;
> +	union npa_af_aq_status aq_stat;
> +	union npa_aura_s *context;
> +	u64 head;
> +	ulong start;
> +
> +	debug("%s(%p, %d, %p, %u)\n", __func__, nix_af, lf, desc, aura_id);
> +	aq_stat.u = npa_af_reg_read(npa, NPA_AF_AQ_STATUS());
> +	head = aq_stat.s.head_ptr;
> +	inst = (union npa_aq_inst_s *)(npa->aq.inst.base) + head;
> +	res = (union npa_aq_res_s *)(npa->aq.res.base);
> +
> +	memset(inst, 0, sizeof(*inst));
> +	inst->s.lf = lf;
> +	inst->s.doneint = 0;
> +	inst->s.ctype = NPA_AQ_CTYPE_E_AURA;
> +	inst->s.op = NPA_AQ_INSTOP_E_INIT;
> +	inst->s.res_addr = npa->aq.res.iova;
> +	inst->s.cindex = aura_id;
> +
> +	context = (union npa_aura_s *)(npa->aq.res.base +
> +						CONFIG_SYS_CACHELINE_SIZE);
> +	memset(npa->aq.res.base, 0, npa->aq.res.entry_sz);
> +	memcpy(context, desc, sizeof(union npa_aura_s));
> +	__iowmb();
> +	npa_af_reg_write(npa, NPA_AF_AQ_DOOR(), 1);
> +
> +	start = get_timer(0);
> +	while ((res->s.compcode == NPA_AQ_COMP_E_NOTDONE) &&
> +	       (get_timer(start) < 1000))
> +		WATCHDOG_RESET();
> +	if (res->s.compcode != NPA_AQ_COMP_E_GOOD) {
> +		printf("%s: Error: result 0x%x not good\n",
> +		       __func__, res->s.compcode);
> +		return -1;
> +	}
> +
> +	return 0;
> +}
> +
> +int npa_attach_pool(struct nix_af *nix_af, int lf,
> +		    const union npa_pool_s *desc, u32 pool_id)
> +{
> +	union npa_aq_inst_s *inst;
> +	union npa_aq_res_s *res;
> +	union npa_af_aq_status aq_stat;
> +	struct npa_af *npa = nix_af->npa_af;
> +	union npa_aura_s *context;
> +	u64 head;
> +	ulong start;
> +
> +	debug("%s(%p, %d, %p, %u)\n", __func__, nix_af, lf, desc, pool_id);
> +	aq_stat.u = npa_af_reg_read(npa, NPA_AF_AQ_STATUS());
> +	head = aq_stat.s.head_ptr;
> +
> +	inst = (union npa_aq_inst_s *)(npa->aq.inst.base) + head;
> +	res = (union npa_aq_res_s *)(npa->aq.res.base);
> +
> +	memset(inst, 0, sizeof(*inst));
> +	inst->s.cindex = pool_id;
> +	inst->s.lf = lf;
> +	inst->s.doneint = 0;
> +	inst->s.ctype = NPA_AQ_CTYPE_E_POOL;
> +	inst->s.op = NPA_AQ_INSTOP_E_INIT;
> +	inst->s.res_addr = npa->aq.res.iova;
> +
> +	context = (union npa_aura_s *)(npa->aq.res.base +
> +						CONFIG_SYS_CACHELINE_SIZE);
> +	memset(npa->aq.res.base, 0, npa->aq.res.entry_sz);
> +	memcpy(context, desc, sizeof(union npa_aura_s));
> +	__iowmb();
> +	npa_af_reg_write(npa, NPA_AF_AQ_DOOR(), 1);
> +
> +	start = get_timer(0);
> +	while ((res->s.compcode == NPA_AQ_COMP_E_NOTDONE) &&
> +	       (get_timer(start) < 1000))
> +		WATCHDOG_RESET();
> +
> +	if (res->s.compcode != NPA_AQ_COMP_E_GOOD) {
> +		printf("%s: Error: result 0x%x not good\n",
> +		       __func__, res->s.compcode);
> +		return -1;
> +	}
> +
> +	return 0;
> +}
> +
> +int npa_lf_admin_setup(struct npa *npa, int lf, dma_addr_t aura_base)
> +{
> +	union npa_af_lf_rst lf_rst;
> +	union npa_af_lfx_auras_cfg auras_cfg;
> +	struct npa_af *npa_af = npa->npa_af;
> +
> +	debug("%s(%p, %d, 0x%llx)\n", __func__, npa_af, lf, aura_base);
> +	lf_rst.u = 0;
> +	lf_rst.s.exec = 1;
> +	lf_rst.s.lf = lf;
> +	npa_af_reg_write(npa_af, NPA_AF_LF_RST(), lf_rst.u);
> +
> +	do {
> +		lf_rst.u = npa_af_reg_read(npa_af, NPA_AF_LF_RST());
> +		WATCHDOG_RESET();
> +	} while (lf_rst.s.exec);
> +
> +	/* Set Aura size and enable caching of contexts */
> +	auras_cfg.u = npa_af_reg_read(npa_af, NPA_AF_LFX_AURAS_CFG(lf));
> +	auras_cfg.s.loc_aura_size = NPA_AURA_SIZE_DEFAULT; //FIXME aura_size;
> +	auras_cfg.s.caching = 1;
> +	auras_cfg.s.rmt_aura_size = 0;
> +	auras_cfg.s.rmt_aura_offset = 0;
> +	auras_cfg.s.rmt_lf = 0;
> +	npa_af_reg_write(npa_af, NPA_AF_LFX_AURAS_CFG(lf), auras_cfg.u);
> +	/* Configure aura HW context base */
> +	npa_af_reg_write(npa_af, NPA_AF_LFX_LOC_AURAS_BASE(lf),
> +			 aura_base);
> +
> +	return 0;
> +}
> +
> +int npa_lf_admin_shutdown(struct nix_af *nix_af, int lf, u32 pool_count)
> +{
> +	int pool_id;
> +	u32 head;
> +	union npa_aq_inst_s *inst;
> +	union npa_aq_res_s *res;
> +	struct npa_aq_pool_request {
> +		union npa_aq_res_s	resp ALIGNED;
> +		union npa_pool_s p0 ALIGNED;
> +		union npa_pool_s p1 ALIGNED;
> +	} pool_req ALIGNED;
> +	struct npa_aq_aura_request {
> +		union npa_aq_res_s	resp ALIGNED;
> +		union npa_aura_s a0 ALIGNED;
> +		union npa_aura_s a1 ALIGNED;
> +	} aura_req ALIGNED;
> +	union npa_af_aq_status aq_stat;
> +	union npa_af_lf_rst lf_rst;
> +	struct npa_af *npa = nix_af->npa_af;
> +	ulong start;
> +
> +	for (pool_id = 0; pool_id < pool_count; pool_id++) {
> +		aq_stat.u = npa_af_reg_read(npa, NPA_AF_AQ_STATUS());
> +		head = aq_stat.s.head_ptr;
> +		inst = (union npa_aq_inst_s *)(npa->aq.inst.base) + head;
> +		res = &pool_req.resp;
> +
> +		memset(inst, 0, sizeof(*inst));
> +		inst->s.cindex = pool_id;
> +		inst->s.lf = lf;
> +		inst->s.doneint = 0;
> +		inst->s.ctype = NPA_AQ_CTYPE_E_POOL;
> +		inst->s.op = NPA_AQ_INSTOP_E_WRITE;
> +		inst->s.res_addr = (u64)&pool_req.resp;
> +
> +		memset((void *)&pool_req, 0, sizeof(pool_req));
> +		pool_req.p0.s.ena = 0;
> +		pool_req.p1.s.ena = 1;	/* Write mask */
> +		__iowmb();
> +
> +		npa_af_reg_write(npa, NPA_AF_AQ_DOOR(), 1);
> +
> +		start = get_timer(0);
> +		while ((res->s.compcode == NPA_AQ_COMP_E_NOTDONE) &&
> +		       (get_timer(start) < 1000))
> +			WATCHDOG_RESET();
> +
> +		if (res->s.compcode != NPA_AQ_COMP_E_GOOD) {
> +			printf("%s: Error: result 0x%x not good for lf %d\n"
> +			       " aura id %d", __func__, res->s.compcode, lf,
> +				pool_id);
> +			return -1;
> +		}
> +		debug("%s(LF %d, pool id %d) disabled\n", __func__, lf,
> +		      pool_id);
> +	}
> +
> +	for (pool_id = 0; pool_id < pool_count; pool_id++) {
> +		aq_stat.u = npa_af_reg_read(npa, NPA_AF_AQ_STATUS());
> +		head = aq_stat.s.head_ptr;
> +		inst = (union npa_aq_inst_s *)(npa->aq.inst.base) + head;
> +		res = &aura_req.resp;
> +
> +		memset(inst, 0, sizeof(*inst));
> +		inst->s.cindex = pool_id;
> +		inst->s.lf = lf;
> +		inst->s.doneint = 0;
> +		inst->s.ctype = NPA_AQ_CTYPE_E_AURA;
> +		inst->s.op = NPA_AQ_INSTOP_E_WRITE;
> +		inst->s.res_addr = (u64)&aura_req.resp;
> +
> +		memset((void *)&aura_req, 0, sizeof(aura_req));
> +		aura_req.a0.s.ena = 0;
> +		aura_req.a1.s.ena = 1;	/* Write mask */
> +		__iowmb();
> +
> +		npa_af_reg_write(npa, NPA_AF_AQ_DOOR(), 1);
> +
> +		start = get_timer(0);
> +		while ((res->s.compcode == NPA_AQ_COMP_E_NOTDONE) &&
> +		       (get_timer(start) < 1000))
> +			WATCHDOG_RESET();
> +
> +		if (res->s.compcode != NPA_AQ_COMP_E_GOOD) {
> +			printf("%s: Error: result 0x%x not good for lf %d\n"
> +			       " aura id %d", __func__, res->s.compcode, lf,
> +			       pool_id);
> +			return -1;
> +		}
> +		debug("%s(LF %d, aura id %d) disabled\n", __func__, lf,
> +		      pool_id);
> +	}
> +
> +	/* Reset the LF */
> +	lf_rst.u = 0;
> +	lf_rst.s.exec = 1;
> +	lf_rst.s.lf = lf;
> +	npa_af_reg_write(npa, NPA_AF_LF_RST(), lf_rst.u);
> +
> +	do {
> +		lf_rst.u = npa_af_reg_read(npa, NPA_AF_LF_RST());
> +		WATCHDOG_RESET();
> +	} while (lf_rst.s.exec);
> +
> +	return 0;
> +}
> +
> +int npa_af_setup(struct npa_af *npa_af)
> +{
> +	int err;
> +	union npa_af_gen_cfg npa_cfg;
> +	union npa_af_ndc_cfg ndc_cfg;
> +	union npa_af_aq_cfg aq_cfg;
> +	union npa_af_blk_rst blk_rst;
> +
> +	err = rvu_aq_alloc(&npa_af->aq, Q_COUNT(AQ_SIZE),
> +			   sizeof(union npa_aq_inst_s),
> +			   sizeof(union npa_aq_res_s));
> +	if (err) {
> +		printf("%s: Error %d allocating admin queue\n", __func__, err);
> +		return err;
> +	}
> +	debug("%s: NPA admin queue allocated at %p %llx\n", __func__,
> +	      npa_af->aq.inst.base, npa_af->aq.inst.iova);
> +
> +	blk_rst.u = 0;
> +	blk_rst.s.rst = 1;
> +	npa_af_reg_write(npa_af, NPA_AF_BLK_RST(), blk_rst.u);
> +
> +	/* Wait for reset to complete */
> +	do {
> +		blk_rst.u = npa_af_reg_read(npa_af, NPA_AF_BLK_RST());
> +		WATCHDOG_RESET();
> +	} while (blk_rst.s.busy);
> +
> +	/* Set little Endian */
> +	npa_cfg.u = npa_af_reg_read(npa_af, NPA_AF_GEN_CFG());
> +	npa_cfg.s.af_be = 0;
> +	npa_af_reg_write(npa_af, NPA_AF_GEN_CFG(), npa_cfg.u);
> +	/* Enable NDC cache */
> +	ndc_cfg.u = npa_af_reg_read(npa_af, NPA_AF_NDC_CFG());
> +	ndc_cfg.s.ndc_bypass = 0;
> +	npa_af_reg_write(npa_af, NPA_AF_NDC_CFG(), ndc_cfg.u);
> +	/* Set up queue size */
> +	aq_cfg.u = npa_af_reg_read(npa_af, NPA_AF_AQ_CFG());
> +	aq_cfg.s.qsize = AQ_SIZE;
> +	npa_af_reg_write(npa_af, NPA_AF_AQ_CFG(), aq_cfg.u);
> +	/* Set up queue base address */
> +	npa_af_reg_write(npa_af, NPA_AF_AQ_BASE(), npa_af->aq.inst.iova);
> +
> +	return 0;
> +}
> +
> +int npa_af_shutdown(struct npa_af *npa_af)
> +{
> +	union npa_af_blk_rst blk_rst;
> +
> +	blk_rst.u = 0;
> +	blk_rst.s.rst = 1;
> +	npa_af_reg_write(npa_af, NPA_AF_BLK_RST(), blk_rst.u);
> +
> +	/* Wait for reset to complete */
> +	do {
> +		blk_rst.u = npa_af_reg_read(npa_af, NPA_AF_BLK_RST());
> +		WATCHDOG_RESET();
> +	} while (blk_rst.s.busy);
> +
> +	rvu_aq_free(&npa_af->aq);
> +
> +	debug("%s: npa af reset --\n", __func__);
> +
> +	return 0;
> +}
> +
> +/***************
> + * NIX API
> + ***************/
> +/**
> + * Setup SMQ -> TL4 -> TL3 -> TL2 -> TL1 -> MAC mapping
> + *
> + * @param nix     Handle to setup
> + *
> + * @return 0, or negative on failure
> + */
> +static int nix_af_setup_sq(struct nix *nix)
> +{
> +	union nixx_af_tl1x_schedule tl1_sched;
> +	union nixx_af_tl2x_parent tl2_parent;
> +	union nixx_af_tl3x_parent tl3_parent;
> +	union nixx_af_tl3_tl2x_cfg tl3_tl2_cfg;
> +	union nixx_af_tl3_tl2x_linkx_cfg tl3_tl2_link_cfg;
> +	union nixx_af_tl4x_parent tl4_parent;
> +	union nixx_af_tl4x_sdp_link_cfg tl4_sdp_link_cfg;
> +	union nixx_af_smqx_cfg smq_cfg;
> +	union nixx_af_mdqx_schedule mdq_sched;
> +	union nixx_af_mdqx_parent mdq_parent;
> +	union nixx_af_rx_linkx_cfg link_cfg;
> +	int tl1_index = nix->lmac->link_num; /* NIX_LINK_E enum */
> +	int tl2_index = tl1_index;
> +	int tl3_index = tl2_index;
> +	int tl4_index = tl3_index;
> +	int smq_index = tl4_index;
> +	struct nix_af *nix_af = nix->nix_af;
> +	u64 offset = 0;
> +
> +	tl1_sched.u = nix_af_reg_read(nix_af,
> +				      NIXX_AF_TL1X_SCHEDULE(tl1_index));
> +	tl1_sched.s.rr_quantum = MAX_MTU;
> +	nix_af_reg_write(nix_af, NIXX_AF_TL1X_SCHEDULE(tl1_index),
> +			 tl1_sched.u);
> +
> +	tl2_parent.u = nix_af_reg_read(nix_af,
> +				       NIXX_AF_TL2X_PARENT(tl2_index));
> +	tl2_parent.s.parent = tl1_index;
> +	nix_af_reg_write(nix_af, NIXX_AF_TL2X_PARENT(tl2_index),
> +			 tl2_parent.u);
> +
> +	tl3_parent.u = nix_af_reg_read(nix_af,
> +				       NIXX_AF_TL3X_PARENT(tl3_index));
> +	tl3_parent.s.parent = tl2_index;
> +	nix_af_reg_write(nix_af, NIXX_AF_TL3X_PARENT(tl3_index),
> +			 tl3_parent.u);
> +	tl3_tl2_cfg.u = nix_af_reg_read(nix_af,
> +					NIXX_AF_TL3_TL2X_CFG(tl3_index));
> +	tl3_tl2_cfg.s.express = 0;
> +	nix_af_reg_write(nix_af, NIXX_AF_TL3_TL2X_CFG(tl3_index),
> +			 tl3_tl2_cfg.u);
> +
> +	offset = NIXX_AF_TL3_TL2X_LINKX_CFG(tl3_index,
> +					    nix->lmac->link_num);
> +	tl3_tl2_link_cfg.u = nix_af_reg_read(nix_af, offset);
> +	tl3_tl2_link_cfg.s.bp_ena = 1;
> +	tl3_tl2_link_cfg.s.ena = 1;
> +	tl3_tl2_link_cfg.s.relchan = 0;
> +	offset = NIXX_AF_TL3_TL2X_LINKX_CFG(tl3_index,
> +					    nix->lmac->link_num);
> +	nix_af_reg_write(nix_af, offset, tl3_tl2_link_cfg.u);
> +
> +	tl4_parent.u = nix_af_reg_read(nix_af,
> +				       NIXX_AF_TL4X_PARENT(tl4_index));
> +	tl4_parent.s.parent = tl3_index;
> +	nix_af_reg_write(nix_af, NIXX_AF_TL4X_PARENT(tl4_index),
> +			 tl4_parent.u);
> +
> +	offset = NIXX_AF_TL4X_SDP_LINK_CFG(tl4_index);
> +	tl4_sdp_link_cfg.u = nix_af_reg_read(nix_af, offset);
> +	tl4_sdp_link_cfg.s.bp_ena = 0;
> +	tl4_sdp_link_cfg.s.ena = 0;
> +	tl4_sdp_link_cfg.s.relchan = 0;
> +	offset = NIXX_AF_TL4X_SDP_LINK_CFG(tl4_index);
> +	nix_af_reg_write(nix_af, offset, tl4_sdp_link_cfg.u);
> +
> +	smq_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_SMQX_CFG(smq_index));
> +	smq_cfg.s.express = 0;
> +	smq_cfg.s.lf = nix->lf;
> +	smq_cfg.s.desc_shp_ctl_dis = 1;
> +	smq_cfg.s.maxlen = MAX_MTU;
> +	smq_cfg.s.minlen = NIX_MIN_HW_MTU;
> +	nix_af_reg_write(nix_af, NIXX_AF_SMQX_CFG(smq_index), smq_cfg.u);
> +
> +	mdq_sched.u = nix_af_reg_read(nix_af,
> +				      NIXX_AF_MDQX_SCHEDULE(smq_index));
> +	mdq_sched.s.rr_quantum = MAX_MTU;
> +	offset = NIXX_AF_MDQX_SCHEDULE(smq_index);
> +	nix_af_reg_write(nix_af, offset, mdq_sched.u);
> +	mdq_parent.u = nix_af_reg_read(nix_af,
> +				       NIXX_AF_MDQX_PARENT(smq_index));
> +	mdq_parent.s.parent = tl4_index;
> +	nix_af_reg_write(nix_af, NIXX_AF_MDQX_PARENT(smq_index),
> +			 mdq_parent.u);
> +
> +	link_cfg.u = 0;
> +	link_cfg.s.maxlen = NIX_MAX_HW_MTU;
> +	link_cfg.s.minlen = NIX_MIN_HW_MTU;
> +	nix_af_reg_write(nix->nix_af,
> +			 NIXX_AF_RX_LINKX_CFG(nix->lmac->link_num),
> +			 link_cfg.u);
> +
> +	return 0;
> +}
> +
> +/**
> + * Issue a command to the NIX AF Admin Queue
> + *
> + * @param nix    nix handle
> + * @param lf     Logical function number for command
> + * @param op     Operation
> + * @param ctype  Context type
> + * @param cindex Context index
> + * @param resp   Result pointer
> + *
> + * @return	0 for success, -EBUSY on failure
> + */
> +static int nix_aq_issue_command(struct nix_af *nix_af,
> +				int lf,
> +				int op,
> +				int ctype,
> +				int cindex, union nix_aq_res_s *resp)
> +{
> +	union nixx_af_aq_status aq_status;
> +	union nix_aq_inst_s *aq_inst;
> +	union nix_aq_res_s *result = resp;
> +	ulong start;
> +
> +	debug("%s(%p, 0x%x, 0x%x, 0x%x, 0x%x, %p)\n", __func__, nix_af, lf,
> +	      op, ctype, cindex, resp);
> +	aq_status.u = nix_af_reg_read(nix_af, NIXX_AF_AQ_STATUS());
> +	aq_inst = (union nix_aq_inst_s *)(nix_af->aq.inst.base) +
> +						aq_status.s.head_ptr;
> +	aq_inst->u[0] = 0;
> +	aq_inst->u[1] = 0;
> +	aq_inst->s.op = op;
> +	aq_inst->s.ctype = ctype;
> +	aq_inst->s.lf = lf;
> +	aq_inst->s.cindex = cindex;
> +	aq_inst->s.doneint = 0;
> +	aq_inst->s.res_addr = (u64)resp;
> +	debug("%s: inst@%p: 0x%llx 0x%llx\n", __func__, aq_inst,
> +	      aq_inst->u[0], aq_inst->u[1]);
> +	__iowmb();
> +
> +	/* Ring doorbell and wait for result */
> +	nix_af_reg_write(nix_af, NIXX_AF_AQ_DOOR(), 1);
> +
> +	start = get_timer(0);
> +	/* Wait for completion */
> +	do {
> +		WATCHDOG_RESET();
> +		dsb();
> +	} while (result->s.compcode == 0 && get_timer(start) < 2);
> +
> +	if (result->s.compcode != NIX_AQ_COMP_E_GOOD) {
> +		printf("NIX:AQ fail or time out with code %d after %ld ms\n",
> +		       result->s.compcode, get_timer(start));
> +		return -EBUSY;
> +	}
> +	return 0;
> +}
> +
> +static int nix_attach_receive_queue(struct nix_af *nix_af, int lf)
> +{
> +	struct nix_aq_rq_request rq_req ALIGNED;
> +	int err;
> +
> +	debug("%s(%p, %d)\n", __func__, nix_af, lf);
> +
> +	memset(&rq_req, 0, sizeof(struct nix_aq_rq_request));
> +
> +	rq_req.rq.s.ena = 1;
> +	rq_req.rq.s.spb_ena = 1;
> +	rq_req.rq.s.ipsech_ena = 0;
> +	rq_req.rq.s.ena_wqwd = 0;
> +	rq_req.rq.s.cq = NIX_CQ_RX;
> +	rq_req.rq.s.substream = 0;	/* FIXME: Substream IDs? */
> +	rq_req.rq.s.wqe_aura = -1;	/* No WQE aura */
> +	rq_req.rq.s.spb_aura = NPA_POOL_RX;
> +	rq_req.rq.s.lpb_aura = NPA_POOL_RX;
> +	/* U-Boot doesn't use WQE group for anything */
> +	rq_req.rq.s.pb_caching = 1;
> +	rq_req.rq.s.xqe_drop_ena = 0;	/* Disable RED dropping */
> +	rq_req.rq.s.spb_drop_ena = 0;
> +	rq_req.rq.s.lpb_drop_ena = 0;
> +	rq_req.rq.s.spb_sizem1 = (MAX_MTU / (3 * 8)) - 1; /* 512 bytes */
> +	rq_req.rq.s.lpb_sizem1 = (MAX_MTU / 8) - 1;
> +	rq_req.rq.s.first_skip = 0;
> +	rq_req.rq.s.later_skip = 0;
> +	rq_req.rq.s.xqe_imm_copy = 0;
> +	rq_req.rq.s.xqe_hdr_split = 0;
> +	rq_req.rq.s.xqe_drop = 0;
> +	rq_req.rq.s.xqe_pass = 0;
> +	rq_req.rq.s.wqe_pool_drop = 0;	/* No WQE pool */
> +	rq_req.rq.s.wqe_pool_pass = 0;	/* No WQE pool */
> +	rq_req.rq.s.spb_aura_drop = 255;
> +	rq_req.rq.s.spb_aura_pass = 255;
> +	rq_req.rq.s.spb_pool_drop = 0;
> +	rq_req.rq.s.spb_pool_pass = 0;
> +	rq_req.rq.s.lpb_aura_drop = 255;
> +	rq_req.rq.s.lpb_aura_pass = 255;
> +	rq_req.rq.s.lpb_pool_drop = 0;
> +	rq_req.rq.s.lpb_pool_pass = 0;
> +	rq_req.rq.s.qint_idx = 0;
> +
> +	err = nix_aq_issue_command(nix_af, lf,
> +				   NIX_AQ_INSTOP_E_INIT,
> +				   NIX_AQ_CTYPE_E_RQ,
> +				   0, &rq_req.resp);
> +	if (err) {
> +		printf("%s: Error requesting send queue\n", __func__);
> +		return err;
> +	}
> +
> +	return 0;
> +}
> +
> +static int nix_attach_send_queue(struct nix *nix)
> +{
> +	struct nix_af *nix_af = nix->nix_af;
> +	struct nix_aq_sq_request sq_req ALIGNED;
> +	int err;
> +
> +	debug("%s(%p)\n", __func__, nix_af);
> +	err = nix_af_setup_sq(nix);
> +
> +	memset(&sq_req, 0, sizeof(sq_req));
> +
> +	sq_req.sq.s.ena = 1;
> +	sq_req.sq.s.cq_ena = 1;
> +	sq_req.sq.s.max_sqe_size = NIX_MAXSQESZ_E_W16;
> +	sq_req.sq.s.substream = 0; // FIXME: Substream IDs?
> +	sq_req.sq.s.sdp_mcast = 0;
> +	sq_req.sq.s.cq = NIX_CQ_TX;
> +	sq_req.sq.s.cq_limit = 0;
> +	sq_req.sq.s.smq = nix->lmac->link_num; // scheduling index
> +	sq_req.sq.s.sso_ena = 0;
> +	sq_req.sq.s.smq_rr_quantum = MAX_MTU / 4;
> +	sq_req.sq.s.default_chan = nix->lmac->chan_num;
> +	sq_req.sq.s.sqe_stype = NIX_STYPE_E_STP;
> +	sq_req.sq.s.qint_idx = 0;
> +	sq_req.sq.s.sqb_aura = NPA_POOL_SQB;
> +
> +	err = nix_aq_issue_command(nix_af, nix->lf,
> +				   NIX_AQ_INSTOP_E_INIT,
> +				   NIX_AQ_CTYPE_E_SQ,
> +				   0, &sq_req.resp);
> +	if (err) {
> +		printf("%s: Error requesting send queue\n", __func__);
> +		return err;
> +	}
> +
> +	return 0;
> +}
> +
> +static int nix_attach_completion_queue(struct nix *nix, int cq_idx)
> +{
> +	struct nix_af *nix_af = nix->nix_af;
> +	struct nix_aq_cq_request cq_req ALIGNED;
> +	int err;
> +
> +	debug("%s(%p)\n", __func__, nix_af);
> +	memset(&cq_req, 0, sizeof(cq_req));
> +	cq_req.cq.s.ena = 1;
> +	cq_req.cq.s.bpid = nix->lmac->pknd;
> +	cq_req.cq.s.substream = 0;	/* FIXME: Substream IDs? */
> +	cq_req.cq.s.drop_ena = 0;
> +	cq_req.cq.s.caching = 1;
> +	cq_req.cq.s.qsize = CQS_QSIZE;
> +	cq_req.cq.s.drop = 255 * 7 / 8;
> +	cq_req.cq.s.qint_idx = 0;
> +	cq_req.cq.s.cint_idx = 0;
> +	cq_req.cq.s.base = nix->cq[cq_idx].iova;
> +	debug("%s: CQ(%d)  base %p\n", __func__, cq_idx,
> +	      nix->cq[cq_idx].base);
> +
> +	err = nix_aq_issue_command(nix_af, nix->lf,
> +				   NIX_AQ_INSTOP_E_INIT,
> +				   NIX_AQ_CTYPE_E_CQ,
> +				   cq_idx, &cq_req.resp);
> +	if (err) {
> +		printf("%s: Error requesting completion queue\n", __func__);
> +		return err;
> +	}
> +	debug("%s: CQ(%d) allocated, base %p\n", __func__, cq_idx,
> +	      nix->cq[cq_idx].base);
> +
> +	return 0;
> +}
> +
> +int nix_lf_admin_setup(struct nix *nix)
> +{
> +	union nixx_af_lfx_rqs_cfg rqs_cfg;
> +	union nixx_af_lfx_sqs_cfg sqs_cfg;
> +	union nixx_af_lfx_cqs_cfg cqs_cfg;
> +	union nixx_af_lfx_rss_cfg rss_cfg;
> +	union nixx_af_lfx_cints_cfg cints_cfg;
> +	union nixx_af_lfx_qints_cfg qints_cfg;
> +	union nixx_af_lfx_rss_grpx rss_grp;
> +	union nixx_af_lfx_tx_cfg2 tx_cfg2;
> +	union nixx_af_lfx_cfg lfx_cfg;
> +	union nixx_af_lf_rst lf_rst;
> +	u32 index;
> +	struct nix_af *nix_af = nix->nix_af;
> +	int err;
> +
> +	/* Reset the LF */
> +	lf_rst.u = 0;
> +	lf_rst.s.lf = nix->lf;
> +	lf_rst.s.exec = 1;
> +	nix_af_reg_write(nix_af, NIXX_AF_LF_RST(), lf_rst.u);
> +
> +	do {
> +		lf_rst.u = nix_af_reg_read(nix_af, NIXX_AF_LF_RST());
> +		WATCHDOG_RESET();
> +	} while (lf_rst.s.exec);
> +
> +	/* Config NIX RQ HW context and base*/
> +	nix_af_reg_write(nix_af, NIXX_AF_LFX_RQS_BASE(nix->lf),
> +			 (u64)nix->rq_ctx_base);
> +	/* Set caching and queue count in HW */
> +	rqs_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_LFX_RQS_CFG(nix->lf));
> +	rqs_cfg.s.caching = 1;
> +	rqs_cfg.s.max_queuesm1 = nix->rq_cnt - 1;
> +	nix_af_reg_write(nix_af, NIXX_AF_LFX_RQS_CFG(nix->lf), rqs_cfg.u);
> +
> +	/* Config NIX SQ HW context and base*/
> +	nix_af_reg_write(nix_af, NIXX_AF_LFX_SQS_BASE(nix->lf),
> +			 (u64)nix->sq_ctx_base);
> +	sqs_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_LFX_SQS_CFG(nix->lf));
> +	sqs_cfg.s.caching = 1;
> +	sqs_cfg.s.max_queuesm1 = nix->sq_cnt - 1;
> +	nix_af_reg_write(nix_af, NIXX_AF_LFX_SQS_CFG(nix->lf), sqs_cfg.u);
> +
> +	/* Config NIX CQ HW context and base*/
> +	nix_af_reg_write(nix_af, NIXX_AF_LFX_CQS_BASE(nix->lf),
> +			 (u64)nix->cq_ctx_base);
> +	cqs_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_LFX_CQS_CFG(nix->lf));
> +	cqs_cfg.s.caching = 1;
> +	cqs_cfg.s.max_queuesm1 = nix->cq_cnt - 1;
> +	nix_af_reg_write(nix_af, NIXX_AF_LFX_CQS_CFG(nix->lf), cqs_cfg.u);
> +
> +	/* Config NIX RSS HW context and base */
> +	nix_af_reg_write(nix_af, NIXX_AF_LFX_RSS_BASE(nix->lf),
> +			 (u64)nix->rss_base);
> +	rss_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_LFX_RSS_CFG(nix->lf));
> +	rss_cfg.s.ena = 1;
> +	rss_cfg.s.size = ilog2(nix->rss_sz) / 256;
> +	nix_af_reg_write(nix_af, NIXX_AF_LFX_RSS_CFG(nix->lf), rss_cfg.u);
> +
> +	for (index = 0; index < nix->rss_grps; index++) {
> +		rss_grp.u = 0;
> +		rss_grp.s.sizem1 = 0x7;
> +		rss_grp.s.offset = nix->rss_sz * index;
> +		nix_af_reg_write(nix_af,
> +				 NIXX_AF_LFX_RSS_GRPX(nix->lf, index),
> +				 rss_grp.u);
> +	}
> +
> +	/* Config CQints HW contexts and base */
> +	nix_af_reg_write(nix_af, NIXX_AF_LFX_CINTS_BASE(nix->lf),
> +			 (u64)nix->cint_base);
> +	cints_cfg.u = nix_af_reg_read(nix_af,
> +				      NIXX_AF_LFX_CINTS_CFG(nix->lf));
> +	cints_cfg.s.caching = 1;
> +	nix_af_reg_write(nix_af, NIXX_AF_LFX_CINTS_CFG(nix->lf),
> +			 cints_cfg.u);
> +
> +	/* Config Qints HW context and base */
> +	nix_af_reg_write(nix_af, NIXX_AF_LFX_QINTS_BASE(nix->lf),
> +			 (u64)nix->qint_base);
> +	qints_cfg.u = nix_af_reg_read(nix_af,
> +				      NIXX_AF_LFX_QINTS_CFG(nix->lf));
> +	qints_cfg.s.caching = 1;
> +	nix_af_reg_write(nix_af, NIXX_AF_LFX_QINTS_CFG(nix->lf),
> +			 qints_cfg.u);
> +
> +	debug("%s(%p, %d, %d)\n", __func__, nix_af, nix->lf, nix->pf);
> +
> +	/* Enable LMTST for this NIX LF */
> +	tx_cfg2.u = nix_af_reg_read(nix_af, NIXX_AF_LFX_TX_CFG2(nix->lf));
> +	tx_cfg2.s.lmt_ena = 1;
> +	nix_af_reg_write(nix_af, NIXX_AF_LFX_TX_CFG2(nix->lf), tx_cfg2.u);
> +
> +	/* Use 16-word XQEs, write the npa pf_func number only */
> +	lfx_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_LFX_CFG(nix->lf));
> +	lfx_cfg.s.xqe_size = NIX_XQESZ_E_W16;
> +	lfx_cfg.s.npa_pf_func = nix->pf_func;
> +	nix_af_reg_write(nix_af, NIXX_AF_LFX_CFG(nix->lf), lfx_cfg.u);
> +
> +	nix_af_reg_write(nix_af, NIXX_AF_LFX_RX_CFG(nix->lf), 0);
> +
> +	for (index = 0; index < nix->cq_cnt; index++) {
> +		err = nix_attach_completion_queue(nix, index);
> +		if (err) {
> +			printf("%s: Error attaching completion queue %d\n",
> +			       __func__, index);
> +			return err;
> +		}
> +	}
> +
> +	for (index = 0; index < nix->rq_cnt; index++) {
> +		err = nix_attach_receive_queue(nix_af, nix->lf);
> +		if (err) {
> +			printf("%s: Error attaching receive queue %d\n",
> +			       __func__, index);
> +			return err;
> +		}
> +	}
> +
> +	for (index = 0; index < nix->sq_cnt; index++) {
> +		err = nix_attach_send_queue(nix);
> +		if (err) {
> +			printf("%s: Error attaching send queue %d\n",
> +			       __func__, index);
> +			return err;
> +		}
> +	}
> +
> +	return 0;
> +}
> +
> +int nix_lf_admin_shutdown(struct nix_af *nix_af, int lf,
> +			  u32 cq_count, u32 rq_count, u32 sq_count)
> +{
> +	union nixx_af_rx_sw_sync sw_sync;
> +	union nixx_af_lf_rst lf_rst;
> +	int index, err;
> +
> +	/* Flush all tx packets */
> +	sw_sync.u = 0;
> +	sw_sync.s.ena = 1;
> +	nix_af_reg_write(nix_af, NIXX_AF_RX_SW_SYNC(), sw_sync.u);
> +
> +	do {
> +		sw_sync.u = nix_af_reg_read(nix_af, NIXX_AF_RX_SW_SYNC());
> +		WATCHDOG_RESET();
> +	} while (sw_sync.s.ena);
> +
> +	for (index = 0; index < rq_count; index++) {
> +		memset((void *)&rq_dis, 0, sizeof(rq_dis));
> +		rq_dis.rq.s.ena = 0;	/* Context */
> +		rq_dis.mrq.s.ena = 1;	/* Mask */
> +		__iowmb();
> +
> +		err = nix_aq_issue_command(nix_af, lf,
> +					   NIX_AQ_INSTOP_E_WRITE,
> +					   NIX_AQ_CTYPE_E_RQ,
> +					   index, &rq_dis.resp);
> +		if (err) {
> +			printf("%s: Error disabling LF %d RQ(%d)\n",
> +			       __func__, lf, index);
> +			return err;
> +		}
> +		debug("%s: LF %d RQ(%d) disabled\n", __func__, lf, index);
> +	}
> +
> +	for (index = 0; index < sq_count; index++) {
> +		memset((void *)&sq_dis, 0, sizeof(sq_dis));
> +		sq_dis.sq.s.ena = 0;	/* Context */
> +		sq_dis.msq.s.ena = 1;	/* Mask */
> +		__iowmb();
> +
> +		err = nix_aq_issue_command(nix_af, lf,
> +					   NIX_AQ_INSTOP_E_WRITE,
> +					   NIX_AQ_CTYPE_E_SQ,
> +					   index, &sq_dis.resp);
> +		if (err) {
> +			printf("%s: Error disabling LF %d SQ(%d)\n",
> +			       __func__, lf, index);
> +			return err;
> +		}
> +		debug("%s: LF %d SQ(%d) disabled\n", __func__, lf, index);
> +	}
> +
> +	for (index = 0; index < cq_count; index++) {
> +		memset((void *)&cq_dis, 0, sizeof(cq_dis));
> +		cq_dis.cq.s.ena = 0;	/* Context */
> +		cq_dis.mcq.s.ena = 1;	/* Mask */
> +		__iowmb();
> +
> +		err = nix_aq_issue_command(nix_af, lf,
> +					   NIX_AQ_INSTOP_E_WRITE,
> +					   NIX_AQ_CTYPE_E_CQ,
> +					   index, &cq_dis.resp);
> +		if (err) {
> +			printf("%s: Error disabling LF %d CQ(%d)\n",
> +			       __func__, lf, index);
> +			return err;
> +		}
> +		debug("%s: LF %d CQ(%d) disabled\n", __func__, lf, index);
> +	}
> +
> +	/* Reset the LF */
> +	lf_rst.u = 0;
> +	lf_rst.s.lf = lf;
> +	lf_rst.s.exec = 1;
> +	nix_af_reg_write(nix_af, NIXX_AF_LF_RST(), lf_rst.u);
> +
> +	do {
> +		lf_rst.u = nix_af_reg_read(nix_af, NIXX_AF_LF_RST());
> +		WATCHDOG_RESET();
> +	} while (lf_rst.s.exec);
> +
> +	return 0;
> +}
> +
> +int npc_lf_admin_setup(struct nix *nix)
> +{
> +	union npc_af_const af_const;
> +	union npc_af_pkindx_action0 action0;
> +	union npc_af_pkindx_action1 action1;
> +	union npc_af_intfx_kex_cfg kex_cfg;
> +	union npc_af_intfx_miss_stat_act intfx_stat_act;
> +	union npc_af_mcamex_bankx_camx_intf camx_intf;
> +	union npc_af_mcamex_bankx_camx_w0 camx_w0;
> +	union npc_af_mcamex_bankx_cfg bankx_cfg;
> +	union npc_af_mcamex_bankx_stat_act mcamex_stat_act;
> +
> +	union nix_rx_action_s rx_action;
> +	union nix_tx_action_s tx_action;
> +
> +	struct nix_af *nix_af = nix->nix_af;
> +	u32 kpus;
> +	int pkind = nix->lmac->link_num;
> +	int index;
> +	u64 offset;
> +
> +	debug("%s(%p, pkind 0x%x)\n", __func__, nix_af, pkind);
> +	af_const.u = npc_af_reg_read(nix_af, NPC_AF_CONST());
> +	kpus = af_const.s.kpus;
> +
> +	action0.u = 0;
> +	action0.s.parse_done = 1;
> +	npc_af_reg_write(nix_af, NPC_AF_PKINDX_ACTION0(pkind), action0.u);
> +
> +	action1.u = 0;
> +	npc_af_reg_write(nix_af, NPC_AF_PKINDX_ACTION1(pkind), action1.u);
> +
> +	kex_cfg.u = 0;
> +	kex_cfg.s.keyw = NPC_MCAMKEYW_E_X1;
> +	kex_cfg.s.parse_nibble_ena = 0x7;
> +	npc_af_reg_write(nix_af,
> +			 NPC_AF_INTFX_KEX_CFG(NPC_INTF_E_NIXX_RX(0)),
> +			 kex_cfg.u);
> +
> +	/* HW Issue */
> +	kex_cfg.u = 0;
> +	kex_cfg.s.parse_nibble_ena = 0x7;
> +	npc_af_reg_write(nix_af,
> +			 NPC_AF_INTFX_KEX_CFG(NPC_INTF_E_NIXX_TX(0)),
> +			 kex_cfg.u);
> +
> +	camx_intf.u = 0;
> +	camx_intf.s.intf = ~NPC_INTF_E_NIXX_RX(0);
> +	npc_af_reg_write(nix_af,
> +			 NPC_AF_MCAMEX_BANKX_CAMX_INTF(pkind, 0, 0),
> +			 camx_intf.u);
> +
> +	camx_intf.u = 0;
> +	camx_intf.s.intf = NPC_INTF_E_NIXX_RX(0);
> +	npc_af_reg_write(nix_af,
> +			 NPC_AF_MCAMEX_BANKX_CAMX_INTF(pkind, 0, 1),
> +			 camx_intf.u);
> +
> +	camx_w0.u = 0;
> +	camx_w0.s.md = ~(nix->lmac->chan_num) & (~((~0x0ull) << 12));
> +	debug("NPC LF ADMIN camx_w0.u %llx\n", camx_w0.u);
> +	npc_af_reg_write(nix_af,
> +			 NPC_AF_MCAMEX_BANKX_CAMX_W0(pkind, 0, 0),
> +			 camx_w0.u);
> +
> +	camx_w0.u = 0;
> +	camx_w0.s.md = nix->lmac->chan_num;
> +	npc_af_reg_write(nix_af,
> +			 NPC_AF_MCAMEX_BANKX_CAMX_W0(pkind, 0, 1),
> +			 camx_w0.u);
> +
> +	npc_af_reg_write(nix_af, NPC_AF_MCAMEX_BANKX_CAMX_W1(pkind, 0, 0),
> +			 0);
> +
> +	npc_af_reg_write(nix_af, NPC_AF_MCAMEX_BANKX_CAMX_W1(pkind, 0, 1),
> +			 0);
> +
> +	/* Enable stats for NPC INTF RX */
> +	mcamex_stat_act.u = 0;
> +	mcamex_stat_act.s.ena = 1;
> +	mcamex_stat_act.s.stat_sel = pkind;
> +	npc_af_reg_write(nix_af,
> +			 NPC_AF_MCAMEX_BANKX_STAT_ACT(pkind, 0),
> +			 mcamex_stat_act.u);
> +	intfx_stat_act.u = 0;
> +	intfx_stat_act.s.ena = 1;
> +	intfx_stat_act.s.stat_sel = 16;
> +	offset = NPC_AF_INTFX_MISS_STAT_ACT(NPC_INTF_E_NIXX_RX(0));
> +	npc_af_reg_write(nix_af, offset, intfx_stat_act.u);
> +	rx_action.u = 0;
> +	rx_action.s.pf_func = nix->pf_func;
> +	rx_action.s.op = NIX_RX_ACTIONOP_E_UCAST;
> +	npc_af_reg_write(nix_af, NPC_AF_MCAMEX_BANKX_ACTION(pkind, 0),
> +			 rx_action.u);
> +
> +	for (index = 0; index < kpus; index++)
> +		npc_af_reg_write(nix_af, NPC_AF_KPUX_CFG(index), 0);
> +
> +	rx_action.u = 0;
> +	rx_action.s.pf_func = nix->pf_func;
> +	rx_action.s.op = NIX_RX_ACTIONOP_E_DROP;
> +	npc_af_reg_write(nix_af,
> +			 NPC_AF_INTFX_MISS_ACT(NPC_INTF_E_NIXX_RX(0)),
> +			 rx_action.u);
> +	bankx_cfg.u = 0;
> +	bankx_cfg.s.ena = 1;
> +	npc_af_reg_write(nix_af, NPC_AF_MCAMEX_BANKX_CFG(pkind, 0),
> +			 bankx_cfg.u);
> +
> +	tx_action.u = 0;
> +	tx_action.s.op = NIX_TX_ACTIONOP_E_UCAST_DEFAULT;
> +	npc_af_reg_write(nix_af,
> +			 NPC_AF_INTFX_MISS_ACT(NPC_INTF_E_NIXX_TX(0)),
> +			 tx_action.u);
> +
> +#ifdef DEBUG
> +	/* Enable debug capture on RX intf */
> +	npc_af_reg_write(nix_af, NPC_AF_DBG_CTL(), 0x4);
> +#endif
> +
> +	return 0;
> +}
> +
> +int npc_af_shutdown(struct nix_af *nix_af)
> +{
> +	union npc_af_blk_rst blk_rst;
> +
> +	blk_rst.u = 0;
> +	blk_rst.s.rst = 1;
> +	npc_af_reg_write(nix_af, NPC_AF_BLK_RST(), blk_rst.u);
> +
> +	/* Wait for reset to complete */
> +	do {
> +		blk_rst.u = npc_af_reg_read(nix_af, NPC_AF_BLK_RST());
> +		WATCHDOG_RESET();
> +	} while (blk_rst.s.busy);
> +
> +	debug("%s: npc af reset --\n", __func__);
> +
> +	return 0;
> +}
> +
> +int nix_af_setup(struct nix_af *nix_af)
> +{
> +	int err;
> +	union nixx_af_const2 af_const2;
> +	union nixx_af_const3 af_const3;
> +	union nixx_af_sq_const sq_const;
> +	union nixx_af_cfg af_cfg;
> +	union nixx_af_status af_status;
> +	union nixx_af_ndc_cfg ndc_cfg;
> +	union nixx_af_aq_cfg aq_cfg;
> +	union nixx_af_blk_rst blk_rst;
> +
> +	debug("%s(%p)\n", __func__, nix_af);
> +	err = rvu_aq_alloc(&nix_af->aq, Q_COUNT(AQ_SIZE),
> +			   sizeof(union nix_aq_inst_s),
> +			   sizeof(union nix_aq_res_s));
> +	if (err) {
> +		printf("%s: Error allocating nix admin queue\n", __func__);
> +		return err;
> +	}
> +
> +	blk_rst.u = 0;
> +	blk_rst.s.rst = 1;
> +	nix_af_reg_write(nix_af, NIXX_AF_BLK_RST(), blk_rst.u);
> +
> +	/* Wait for reset to complete */
> +	do {
> +		blk_rst.u = nix_af_reg_read(nix_af, NIXX_AF_BLK_RST());
> +		WATCHDOG_RESET();
> +	} while (blk_rst.s.busy);
> +
> +	/* Put in LE mode */
> +	af_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_CFG());
> +	if (af_cfg.s.force_cond_clk_en || af_cfg.s.calibrate_x2p ||
> +	    af_cfg.s.force_intf_clk_en) {
> +		printf("%s: Error: Invalid NIX_AF_CFG value 0x%llx\n",
> +		       __func__, af_cfg.u);
> +		return -1;
> +	}
> +	af_cfg.s.af_be = 0;
> +	af_cfg.u |= 0x5E;	/* HW Issue */
> +	nix_af_reg_write(nix_af, NIXX_AF_CFG(), af_cfg.u);
> +
> +	/* Perform Calibration */
> +	af_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_CFG());
> +	af_cfg.s.calibrate_x2p = 1;
> +	nix_af_reg_write(nix_af, NIXX_AF_CFG(), af_cfg.u);
> +
> +	/* Wait for calibration to complete */
> +	do {
> +		af_status.u = nix_af_reg_read(nix_af, NIXX_AF_STATUS());
> +		WATCHDOG_RESET();
> +	} while (af_status.s.calibrate_done == 0);
> +
> +	af_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_CFG());
> +	af_cfg.s.calibrate_x2p = 0;
> +	nix_af_reg_write(nix_af, NIXX_AF_CFG(), af_cfg.u);
> +
> +	/* Enable NDC cache */
> +	ndc_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_NDC_CFG());
> +	ndc_cfg.s.ndc_ign_pois = 0;
> +	ndc_cfg.s.byp_sq = 0;
> +	ndc_cfg.s.byp_sqb = 0;
> +	ndc_cfg.s.byp_cqs = 0;
> +	ndc_cfg.s.byp_cints = 0;
> +	ndc_cfg.s.byp_dyno = 0;
> +	ndc_cfg.s.byp_mce = 0;
> +	ndc_cfg.s.byp_rqc = 0;
> +	ndc_cfg.s.byp_rsse = 0;
> +	ndc_cfg.s.byp_mc_data = 0;
> +	ndc_cfg.s.byp_mc_wqe = 0;
> +	ndc_cfg.s.byp_mr_data = 0;
> +	ndc_cfg.s.byp_mr_wqe = 0;
> +	ndc_cfg.s.byp_qints = 0;
> +	nix_af_reg_write(nix_af, NIXX_AF_NDC_CFG(), ndc_cfg.u);
> +
> +	/* Set up queue size */
> +	aq_cfg.u = 0;
> +	aq_cfg.s.qsize = AQ_SIZE;
> +	nix_af_reg_write(nix_af, NIXX_AF_AQ_CFG(), aq_cfg.u);
> +
> +	/* Set up queue base address */
> +	nix_af_reg_write(nix_af, NIXX_AF_AQ_BASE(), nix_af->aq.inst.iova);
> +
> +	af_const3.u = nix_af_reg_read(nix_af, NIXX_AF_CONST3());
> +	af_const2.u = nix_af_reg_read(nix_af, NIXX_AF_CONST2());
> +	sq_const.u = nix_af_reg_read(nix_af, NIXX_AF_SQ_CONST());
> +	nix_af->rq_ctx_sz = 1ULL << af_const3.s.rq_ctx_log2bytes;
> +	nix_af->sq_ctx_sz = 1ULL << af_const3.s.sq_ctx_log2bytes;
> +	nix_af->cq_ctx_sz = 1ULL << af_const3.s.cq_ctx_log2bytes;
> +	nix_af->rsse_ctx_sz = 1ULL << af_const3.s.rsse_log2bytes;
> +	nix_af->qints = af_const2.s.qints;
> +	nix_af->cints = af_const2.s.cints;
> +	nix_af->cint_ctx_sz = 1ULL << af_const3.s.cint_log2bytes;
> +	nix_af->qint_ctx_sz = 1ULL << af_const3.s.qint_log2bytes;
> +	nix_af->sqb_size = sq_const.s.sqb_size;
> +
> +	return 0;
> +}
> +
> +int nix_af_shutdown(struct nix_af *nix_af)
> +{
> +	union nixx_af_blk_rst blk_rst;
> +
> +	blk_rst.u = 0;
> +	blk_rst.s.rst = 1;
> +	nix_af_reg_write(nix_af, NIXX_AF_BLK_RST(), blk_rst.u);
> +
> +	/* Wait for reset to complete */
> +	do {
> +		blk_rst.u = nix_af_reg_read(nix_af, NIXX_AF_BLK_RST());
> +		WATCHDOG_RESET();
> +	} while (blk_rst.s.busy);
> +
> +	rvu_aq_free(&nix_af->aq);
> +
> +	debug("%s: nix af reset --\n", __func__);
> +
> +	return 0;
> +}
> diff --git a/drivers/net/octeontx2/npc.h b/drivers/net/octeontx2/npc.h
> new file mode 100644
> index 0000000000..6e645cd32e
> --- /dev/null
> +++ b/drivers/net/octeontx2/npc.h
> @@ -0,0 +1,90 @@
> +/* SPDX-License-Identifier:    GPL-2.0
> + *
> + * Copyright (C) 2018 Marvell International Ltd.
> + */
> +
> +#ifndef __NPC_H__
> +#define __NPC_H__
> +
> +#define RSVD_MCAM_ENTRIES_PER_PF	2	/** Ucast and Bcast */
> +#define RSVD_MCAM_ENTRIES_PER_NIXLF	1	/** Ucast for VFs */
> +
> +struct npc_kpu_profile_cam {
> +	u8 state;
> +	u8 state_mask;
> +	u16 dp0;
> +	u16 dp0_mask;
> +	u16 dp1;
> +	u16 dp1_mask;
> +	u16 dp2;
> +	u16 dp2_mask;
> +};
> +
> +struct npc_kpu_profile_action {
> +	u8 errlev;
> +	u8 errcode;
> +	u8 dp0_offset;
> +	u8 dp1_offset;
> +	u8 dp2_offset;
> +	u8 bypass_count;
> +	u8 parse_done;
> +	u8 next_state;
> +	u8 ptr_advance;
> +	u8 cap_ena;
> +	u8 lid;
> +	u8 ltype;
> +	u8 flags;
> +	u8 offset;
> +	u8 mask;
> +	u8 right;
> +	u8 shift;
> +};
> +
> +struct npc_kpu_profile {
> +	int cam_entries;
> +	int action_entries;
> +	struct npc_kpu_profile_cam *cam;
> +	struct npc_kpu_profile_action *action;
> +};
> +
> +struct npc_pkind {
> +	struct rsrc_bmap rsrc;
> +	u32	*pfchan_map;
> +};
> +
> +struct npc_mcam {
> +	struct rsrc_bmap rsrc;
> +	u16	*pfvf_map;
> +	u16	total_entries; /* Total number of MCAM entries */
> +	u16	entries;  /* Total - reserved for NIX LFs */
> +	u8	banks_per_entry;  /* Number of keywords in key */
> +	u8	keysize;
> +	u8	banks;    /* Number of MCAM banks */
> +	u16	banksize; /* Number of MCAM entries in each bank */
> +	u16	counters; /* Number of match counters */
> +	u16	nixlf_offset;
> +	u16	pf_offset;
> +};
> +
> +struct nix_af_handle;
> +struct nix_handle;
> +struct rvu_hwinfo;
> +
> +struct npc_af {
> +	struct nix_af_handle	*nix_af;
> +	struct npc_pkind	pkind;
> +	void __iomem		*npc_af_base;
> +	u8			npc_kpus;	/** Number of parser units */
> +	struct npc_mcam		mcam;
> +	struct rvu_block	block;
> +	struct rvu_hwinfo	*hw;
> +};
> +
> +struct npc {
> +	struct npc_af		*npc_af;
> +	void __iomem		*npc_base;
> +	struct nix_handle	*nix;
> +}
> +
> +#endif /* __NPC_H__ */
> +
> diff --git a/drivers/net/octeontx2/rvu.h b/drivers/net/octeontx2/rvu.h
> new file mode 100644
> index 0000000000..f455260a6f
> --- /dev/null
> +++ b/drivers/net/octeontx2/rvu.h
> @@ -0,0 +1,119 @@
> +/* SPDX-License-Identifier:    GPL-2.0
> + *
> + * Copyright (C) 2018 Marvell International Ltd.
> + */
> +
> +#ifndef __RVU_H__
> +#define __RVU_H__
> +
> +#include <asm/arch/csrs/csrs-rvu.h>
> +
> +#define ALIGNED		__aligned(CONFIG_SYS_CACHELINE_SIZE)
> +
> +#define Q_SIZE_16		0ULL /* 16 entries */
> +#define Q_SIZE_64		1ULL /* 64 entries */
> +#define Q_SIZE_256		2ULL
> +#define Q_SIZE_1K		3ULL
> +#define Q_SIZE_4K		4ULL
> +#define Q_SIZE_16K		5ULL
> +#define Q_SIZE_64K		6ULL
> +#define Q_SIZE_256K		7ULL
> +#define Q_SIZE_1M		8ULL /* Million entries */
> +#define Q_SIZE_MIN		Q_SIZE_16
> +#define Q_SIZE_MAX		Q_SIZE_1M
> +
> +#define Q_COUNT(x)		(16ULL << (2 * (x)))
> +#define Q_SIZE(x, n)		((ilog2(x) - (n)) / 2)
> +
> +/* Admin queue info */
> +
> +/* Since we intend to add only one instruction at a time,
> + * keep queue size to it's minimum.
> + */
> +#define AQ_SIZE			Q_SIZE_16
> +/* HW head & tail pointer mask */
> +#define AQ_PTR_MASK		0xFFFFF
> +
> +struct qmem {
> +	void		*base;
> +	dma_addr_t	iova;
> +	size_t		alloc_sz;
> +	u32		qsize;
> +	u8		entry_sz;
> +};
> +
> +struct admin_queue {
> +	struct qmem inst;
> +	struct qmem res;
> +};
> +
> +struct rvu_af {
> +	struct udevice *dev;
> +	void __iomem *af_base;
> +	struct nix_af *nix_af;
> +};
> +
> +struct rvu_pf {
> +	struct udevice *dev;
> +	struct udevice *afdev;
> +	void __iomem *pf_base;
> +	struct nix *nix;
> +	u8 pfid;
> +	int nix_lfid;
> +	int npa_lfid;
> +};
> +
> +/**
> + * Store 128 bit value
> + *
> + * @param[out]	dest	pointer to destination address
> + * @param	val0	first 64 bits to write
> + * @param	val1	second 64 bits to write
> + */
> +static inline void st128(void *dest, u64 val0, u64 val1)
> +{
> +	__asm__ __volatile__("stp %x[x0], %x[x1], [%[pm]]" :
> +		: [x0]"r"(val0), [x1]"r"(val1), [pm]"r"(dest)
> +		: "memory");
> +}
> +
> +/**
> + * Load 128 bit value
> + *
> + * @param[in]	source		pointer to 128 bits of data to load
> + * @param[out]	val0		first 64 bits of data
> + * @param[out]	val1		second 64 bits of data
> + */
> +static inline void ld128(const u64 *src, u64 *val0, u64 *val1)
> +{
> +	__asm__ __volatile__ ("ldp %x[x0], %x[x1], [%[pm]]" :
> +		 : [x0]"r"(*val0), [x1]"r"(*val1), [pm]"r"(src));
> +}
> +
> +void qmem_free(struct qmem *q);
> +int qmem_alloc(struct qmem *q, u32 qsize, size_t entry_sz);
> +
> +/**
> + * Allocates an admin queue for instructions and results
> + *
> + * @param	aq	admin queue to allocate for
> + * @param	qsize	Number of entries in the queue
> + * @param	inst_size	Size of each instruction
> + * @param	res_size	Size of each result
> + *
> + * @return	-ENOMEM on error, 0 on success
> + */
> +int rvu_aq_alloc(struct admin_queue *aq, unsigned int qsize,
> +		 size_t inst_size, size_t res_size);
> +
> +/**
> + * Frees an admin queue
> + *
> + * @param	aq	Admin queue to free
> + */
> +void rvu_aq_free(struct admin_queue *aq);
> +
> +void rvu_get_lfid_for_pf(int pf, int *nixid, int *npaid);
> +
> +#endif /* __RVU_H__ */
> +
> diff --git a/drivers/net/octeontx2/rvu_af.c b/drivers/net/octeontx2/rvu_af.c
> new file mode 100644
> index 0000000000..7750089a20
> --- /dev/null
> +++ b/drivers/net/octeontx2/rvu_af.c
> @@ -0,0 +1,171 @@
> +// SPDX-License-Identifier:    GPL-2.0
> +/*
> + * Copyright (C) 2018 Marvell International Ltd.
> + */
> +
> +#include <dm.h>
> +#include <errno.h>
> +#include <malloc.h>
> +#include <misc.h>
> +#include <net.h>
> +#include <pci_ids.h>
> +#include <linux/list.h>
> +#include <asm/io.h>
> +#include <asm/arch/board.h>
> +#include <asm/arch/csrs/csrs-npa.h>
> +
> +#include "nix.h"
> +
> +struct udevice *rvu_af_dev;
> +
> +inline struct rvu_af *get_af(void)
> +{
> +	return rvu_af_dev ? dev_get_priv(rvu_af_dev) : NULL;
> +}
> +
> +void rvu_get_lfid_for_pf(int pf, int *nixid, int *npaid)
> +{
> +	union nixx_af_rvu_lf_cfg_debug nix_lf_dbg;
> +	union npa_af_rvu_lf_cfg_debug npa_lf_dbg;
> +	union rvu_pf_func_s pf_func;
> +	struct rvu_af *af = dev_get_priv(rvu_af_dev);
> +	struct nix_af *nix_af = af->nix_af;
> +
> +	pf_func.u = 0;
> +	pf_func.s.pf = pf;
> +
> +	nix_lf_dbg.u = 0;
> +	nix_lf_dbg.s.pf_func = pf_func.u & 0xFFFF;
> +	nix_lf_dbg.s.exec = 1;
> +	nix_af_reg_write(nix_af, NIXX_AF_RVU_LF_CFG_DEBUG(),
> +			 nix_lf_dbg.u);
> +	do {
> +		nix_lf_dbg.u = nix_af_reg_read(nix_af,
> +					       NIXX_AF_RVU_LF_CFG_DEBUG());
> +	} while (nix_lf_dbg.s.exec);
> +
> +	if (nix_lf_dbg.s.lf_valid)
> +		*nixid = nix_lf_dbg.s.lf;
> +
> +	debug("%s: nix lf_valid %d lf %d nixid %d\n", __func__,
> +	      nix_lf_dbg.s.lf_valid, nix_lf_dbg.s.lf, *nixid);
> +
> +	npa_lf_dbg.u = 0;
> +	npa_lf_dbg.s.pf_func = pf_func.u & 0xFFFF;
> +	npa_lf_dbg.s.exec = 1;
> +	npa_af_reg_write(nix_af->npa_af, NPA_AF_RVU_LF_CFG_DEBUG(),
> +			 npa_lf_dbg.u);
> +	do {
> +		npa_lf_dbg.u = npa_af_reg_read(nix_af->npa_af,
> +					       NPA_AF_RVU_LF_CFG_DEBUG());
> +	} while (npa_lf_dbg.s.exec);
> +
> +	if (npa_lf_dbg.s.lf_valid)
> +		*npaid = npa_lf_dbg.s.lf;
> +	debug("%s: npa lf_valid %d lf %d npaid %d\n", __func__,
> +	      npa_lf_dbg.s.lf_valid, npa_lf_dbg.s.lf, *npaid);
> +}
> +
> +struct nix_af *rvu_af_init(struct rvu_af *rvu_af)
> +{
> +	struct nix_af *nix_af;
> +	union rvu_af_addr_s block_addr;
> +	int err;
> +
> +	nix_af = (struct nix_af *)calloc(1, sizeof(struct nix_af));
> +	if (!nix_af) {
> +		printf("%s: out of memory\n", __func__);
> +		goto error;
> +	}
> +
> +	nix_af->dev = rvu_af->dev;
> +
> +	block_addr.u = 0;
> +	block_addr.s.block = RVU_BLOCK_ADDR_E_NIXX(0);
> +	nix_af->nix_af_base = rvu_af->af_base + block_addr.u;
> +
> +	nix_af->npa_af = (struct npa_af *)calloc(1, sizeof(struct npa_af));
> +	if (!nix_af->npa_af) {
> +		printf("%s: out of memory\n", __func__);
> +		goto error;
> +	}
> +
> +	block_addr.u = 0;
> +	block_addr.s.block = RVU_BLOCK_ADDR_E_NPA;
> +	nix_af->npa_af->npa_af_base = rvu_af->af_base + block_addr.u;
> +
> +	block_addr.u = 0;
> +	block_addr.s.block = RVU_BLOCK_ADDR_E_NPC;
> +	nix_af->npc_af_base = rvu_af->af_base + block_addr.u;
> +
> +	debug("%s: Setting up npa admin\n", __func__);
> +	err = npa_af_setup(nix_af->npa_af);
> +	if (err) {
> +		printf("%s: Error %d setting up NPA admin\n", __func__, err);
> +		goto error;
> +	}
> +	debug("%s: Setting up nix af\n", __func__);
> +	err = nix_af_setup(nix_af);
> +	if (err) {
> +		printf("%s: Error %d setting up NIX admin\n", __func__, err);
> +		goto error;
> +	}
> +	debug("%s: nix_af: %p\n", __func__, nix_af);
> +	return nix_af;
> +
> +error:
> +	if (nix_af->npa_af) {
> +		free(nix_af->npa_af);
> +		memset(nix_af, 0, sizeof(*nix_af));
> +	}
> +	if (nix_af)
> +		free(nix_af);
> +	return NULL;
> +}
> +
> +int rvu_af_probe(struct udevice *dev)
> +{
> +	struct rvu_af *af_ptr = dev_get_priv(dev);
> +
> +	af_ptr->af_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0,
> +					 PCI_REGION_MEM);
> +	debug("%s RVU AF BAR %p\n", __func__, af_ptr->af_base);
> +	af_ptr->dev = dev;
> +	rvu_af_dev = dev;
> +
> +	af_ptr->nix_af = rvu_af_init(af_ptr);
> +	if (!af_ptr->nix_af) {
> +		printf("%s: Error: could not initialize NIX AF\n", __func__);
> +		return -1;
> +	}
> +	debug("%s: Done\n", __func__);
> +
> +	return 0;
> +}
> +
> +int rvu_af_remove(struct udevice *dev)
> +{
> +	struct rvu_af *rvu_af = dev_get_priv(dev);
> +
> +	nix_af_shutdown(rvu_af->nix_af);
> +	npa_af_shutdown(rvu_af->nix_af->npa_af);
> +	npc_af_shutdown(rvu_af->nix_af);
> +
> +	debug("%s: rvu af down --\n", __func__);
> +	return 0;
> +}
> +
> +U_BOOT_DRIVER(rvu_af) = {
> +	.name   = "rvu_af",
> +	.id     = UCLASS_MISC,
> +	.probe  = rvu_af_probe,
> +	.remove = rvu_af_remove,
> +	.priv_auto_alloc_size = sizeof(struct rvu_af),
> +};
> +
> +static struct pci_device_id rvu_af_supported[] = {
> +	{ PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_RVU_AF) },
> +	{}
> +};
> +
> +U_BOOT_PCI_DEVICE(rvu_af, rvu_af_supported);
> diff --git a/drivers/net/octeontx2/rvu_common.c b/drivers/net/octeontx2/rvu_common.c
> new file mode 100644
> index 0000000000..173b28ba4b
> --- /dev/null
> +++ b/drivers/net/octeontx2/rvu_common.c
> @@ -0,0 +1,71 @@
> +// SPDX-License-Identifier:    GPL-2.0
> +/*
> + * Copyright (C) 2018 Marvell International Ltd.
> + */
> +
> +#include <dm.h>
> +#include <errno.h>
> +#include <malloc.h>
> +#include <misc.h>
> +#include <net.h>
> +#include <asm/io.h>
> +
> +#include "rvu.h"
> +
> +int qmem_alloc(struct qmem *q, u32 qsize, size_t entry_sz)
> +{
> +	q->base = memalign(CONFIG_SYS_CACHELINE_SIZE, qsize * entry_sz);
> +	if (!q->base)
> +		return -ENOMEM;
> +	q->entry_sz = entry_sz;
> +	q->qsize = qsize;
> +	q->alloc_sz = (size_t)qsize * entry_sz;
> +	q->iova = (dma_addr_t)(q->base);
> +	debug("NIX: qmem alloc for (%d * %d = %ld bytes) at %p\n",
> +	      q->qsize, q->entry_sz, q->alloc_sz, q->base);
> +	return 0;
> +}
> +
> +void qmem_free(struct qmem *q)
> +{
> +	if (q->base)
> +		free(q->base);
> +	memset(q, 0, sizeof(*q));
> +}
> +
> +/**
> + * Allocates an admin queue for instructions and results
> + *
> + * @param	aq	admin queue to allocate for
> + * @param	qsize	Number of entries in the queue
> + * @param	inst_size	Size of each instruction
> + * @param	res_size	Size of each result
> + *
> + * @return	-ENOMEM on error, 0 on success
> + */
> +int rvu_aq_alloc(struct admin_queue *aq, unsigned int qsize,
> +		 size_t inst_size, size_t res_size)
> +{
> +	int err;
> +
> +	err = qmem_alloc(&aq->inst, qsize, inst_size);
> +	if (err)
> +		return err;
> +	err = qmem_alloc(&aq->res, qsize, res_size);
> +	if (err)
> +		qmem_free(&aq->inst);
> +
> +	return err;
> +}
> +
> +/**
> + * Frees an admin queue
> + *
> + * @param	aq	Admin queue to free
> + */
> +void rvu_aq_free(struct admin_queue *aq)
> +{
> +	qmem_free(&aq->inst);
> +	qmem_free(&aq->res);
> +	memset(aq, 0, sizeof(*aq));
> +}
> diff --git a/drivers/net/octeontx2/rvu_pf.c b/drivers/net/octeontx2/rvu_pf.c
> new file mode 100644
> index 0000000000..201ecf2c16
> --- /dev/null
> +++ b/drivers/net/octeontx2/rvu_pf.c
> @@ -0,0 +1,116 @@
> +// SPDX-License-Identifier:    GPL-2.0
> +/*
> + * Copyright (C) 2018 Marvell International Ltd.
> + */
> +
> +#include <dm.h>
> +#include <errno.h>
> +#include <malloc.h>
> +#include <misc.h>
> +#include <net.h>
> +#include <pci_ids.h>
> +#include <asm/io.h>
> +#include <asm/types.h>
> +#include <asm/arch/board.h>
> +#include "cgx.h"
> +#include "nix.h"
> +
> +extern struct udevice *rvu_af_dev;
> +
> +int rvu_pf_init(struct rvu_pf *rvu)
> +{
> +	struct nix *nix;
> +	struct eth_pdata *pdata = dev_get_platdata(rvu->dev);
> +
> +	debug("%s: Allocating nix lf\n", __func__);
> +	nix = nix_lf_alloc(rvu->dev);
> +	if (!nix) {
> +		printf("%s: Error allocating lf for pf %d\n",
> +		       __func__, rvu->pfid);
> +		return -1;
> +	}
> +	rvu->nix = nix;
> +
> +	/* to make post_probe happy */
> +	if (is_valid_ethaddr(nix->lmac->mac_addr)) {
> +		memcpy(pdata->enetaddr, nix->lmac->mac_addr, 6);
> +		eth_env_set_enetaddr_by_index("eth", rvu->dev->seq,
> +					      pdata->enetaddr);
> +	}
> +
> +	return 0;
> +}
> +
> +static const struct eth_ops nix_eth_ops = {
> +	.start			= nix_lf_init,
> +	.send			= nix_lf_xmit,
> +	.recv			= nix_lf_recv,
> +	.free_pkt		= nix_lf_free_pkt,
> +	.stop			= nix_lf_halt,
> +	.write_hwaddr		= nix_lf_setup_mac,
> +};
> +
> +int rvu_pf_probe(struct udevice *dev)
> +{
> +	struct rvu_pf *rvu = dev_get_priv(dev);
> +	int err;
> +	char name[16];
> +
> +	debug("%s: name: %s\n", __func__, dev->name);
> +
> +	rvu->pf_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_2, PCI_REGION_MEM);
> +	rvu->pfid = dev->seq + 1; // RVU PF's start from 1;
> +	rvu->dev = dev;
> +	if (!rvu_af_dev) {
> +		printf("%s: Error: Could not find RVU AF device\n",
> +		       __func__);
> +		return -1;
> +	}
> +	rvu->afdev = rvu_af_dev;
> +
> +	debug("RVU PF %u BAR2 %p\n", rvu->pfid, rvu->pf_base);
> +
> +	rvu_get_lfid_for_pf(rvu->pfid, &rvu->nix_lfid, &rvu->npa_lfid);
> +
> +	err = rvu_pf_init(rvu);
> +	if (err)
> +		printf("%s: Error %d adding nix\n", __func__, err);
> +
> +	/*
> +	 * modify device name to include index/sequence number,
> +	 * for better readability, this is 1:1 mapping with eth0/1/2.. names.
> +	 */
> +	sprintf(name, "rvu_pf#%d", dev->seq);
> +	device_set_name(dev, name);
> +	debug("%s: name: %s\n", __func__, dev->name);
> +	return err;
> +}
> +
> +int rvu_pf_remove(struct udevice *dev)
> +{
> +	struct rvu_pf *rvu = dev_get_priv(dev);
> +
> +	nix_lf_shutdown(rvu->nix);
> +	npa_lf_shutdown(rvu->nix);
> +
> +	debug("%s: rvu pf%d down --\n", __func__,  rvu->pfid);
> +
> +	return 0;
> +}
> +
> +U_BOOT_DRIVER(rvu_pf) = {
> +	.name   = "rvu_pf",
> +	.id     = UCLASS_ETH,
> +	.probe	= rvu_pf_probe,
> +	.remove = rvu_pf_remove,
> +	.ops    = &nix_eth_ops,
> +	.priv_auto_alloc_size = sizeof(struct rvu_pf),
> +	.platdata_auto_alloc_size = sizeof(struct eth_pdata),
> +};
> +
> +static struct pci_device_id rvu_pf_supported[] = {
> +	{ PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_RVU_PF) },
> +	{}
> +};
> +
> +U_BOOT_PCI_DEVICE(rvu_pf, rvu_pf_supported);
> 


Viele Grüße,
Stefan

-- 
DENX Software Engineering GmbH,      Managing Director: Wolfgang Denk
HRB 165235 Munich, Office: Kirchenstr.5, D-82194 Groebenzell, Germany
Phone: (+49)-8142-66989-51 Fax: (+49)-8142-66989-80 Email: sr at denx.de


More information about the U-Boot mailing list