[U-Boot] [PATCH v2] Gbe Controller driver support for kirkwood SOCs
Prafulla Wadaskar
prafulla at marvell.com
Mon May 4 19:56:57 CEST 2009
Contributors:
Yotam Admon <yotam at marvell.com>
Michael Blostein <michaelbl at marvell.com
Reviewed by: Ronen Shitrit <rshitrit at marvell.com>
Signed-off-by: Prafulla Wadaskar <prafulla at marvell.com>
---
Change log:
v2: entire rewrite/restructure of v1
used small names for variable/function names
readl/writel used to access SoC registers
Soc registers accssed using pointers through net device struct
miiphy registration done for external smi read/write access
miiphy_link used to detect phy link presence
cleaned for cosmetic changes
cpu/arm926ejs/kirkwood/cpu.c | 8 +
drivers/net/Makefile | 1 +
drivers/net/kirkwood_egiga.c | 1669 ++++++++++++++++++++++++++++++++++++++++++
drivers/net/kirkwood_egiga.h | 828 +++++++++++++++++++++
include/netdev.h | 1 +
5 files changed, 2507 insertions(+), 0 deletions(-)
create mode 100644 drivers/net/kirkwood_egiga.c
create mode 100644 drivers/net/kirkwood_egiga.h
diff --git a/cpu/arm926ejs/kirkwood/cpu.c b/cpu/arm926ejs/kirkwood/cpu.c
index bfbbe05..8ed184c 100644
--- a/cpu/arm926ejs/kirkwood/cpu.c
+++ b/cpu/arm926ejs/kirkwood/cpu.c
@@ -293,3 +293,11 @@ int arch_misc_init(void)
return 0;
}
#endif /* CONFIG_ARCH_MISC_INIT */
+
+#ifdef CONFIG_KIRKWOOD_EGIGA
+int cpu_eth_init(bd_t *bis)
+{
+ kirkwood_egiga_initialize(bis);
+ return 0;
+}
+#endif
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index a360a50..f0c5654 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -40,6 +40,7 @@ COBJS-$(CONFIG_ENC28J60) += enc28j60.o
COBJS-$(CONFIG_FSLDMAFEC) += fsl_mcdmafec.o mcfmii.o
COBJS-$(CONFIG_GRETH) += greth.o
COBJS-$(CONFIG_INCA_IP_SWITCH) += inca-ip_sw.o
+COBJS-$(CONFIG_KIRKWOOD_EGIGA) += kirkwood_egiga.o
COBJS-$(CONFIG_DRIVER_KS8695ETH) += ks8695eth.o
COBJS-$(CONFIG_DRIVER_LAN91C96) += lan91c96.o
COBJS-$(CONFIG_MACB) += macb.o
diff --git a/drivers/net/kirkwood_egiga.c b/drivers/net/kirkwood_egiga.c
new file mode 100644
index 0000000..d9b295a
--- /dev/null
+++ b/drivers/net/kirkwood_egiga.c
@@ -0,0 +1,1669 @@
+/*
+ * (C) Copyright 2009
+ * Marvell Semiconductor <www.marvell.com>
+ * Prafulla Wadaskar <prafulla at marvell.com>
+ *
+ * (C) Copyright 2003
+ * Ingo Assmus <ingo.assmus at keymile.com>
+ *
+ * based on - Driver for MV64360X ethernet ports
+ * Copyright (C) 2002 rabeeh at galileo.co.il
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ */
+
+#include <common.h>
+#include <net.h>
+#include <malloc.h>
+#include <miiphy.h>
+#include <asm/errno.h>
+#include <asm/types.h>
+#include <asm/byteorder.h>
+
+/* In case SRAM is cache coherent or non-cacheable */
+#define CONFIG_NOT_COHERENT_CACHE
+#define D_CACHE_FLUSH_LINE(addr, offset) ;
+#define CPU_PIPE_FLUSH { __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop");}
+
+#include "kirkwood_egiga.h"
+
+/*
+ * smi_reg_read - Read from ethernet phy register.
+ *
+ * INPUT:
+ * @phy_adr - Phy address.
+ * @reg_ofs - Phy register offset.
+ *
+ * This function reads ethernet phy register.
+ * REturns 16bit phy register value, or 0xffff on error
+ */
+int smi_reg_read(char *devname, u8 phy_adr, u8 reg_ofs, u16 * data)
+{
+ struct eth_device *dev = eth_get_dev_by_name(devname);
+ struct kwgbe_device *dkwgbe = to_dkwgbe(dev);
+ struct kwgbe_registers *regs = dkwgbe->regs;
+ u32 smi_reg;
+ volatile u32 timeout;
+
+ /* check to read parameters */
+ if (phy_adr == 0xEE && reg_ofs == 0xEE) {
+ data = (void *)(0x00ff & KWGBEREG_RD(regs->phyadr));
+ return 0;
+ }
+ /* check parameters */
+ if ((phy_adr << KWGBE_PHY_SMI_DEV_ADDR_OFFS) &
+ ~KWGBE_PHY_SMI_DEV_ADDR_MASK) {
+ printf("Illegal PHY device address %d\n", phy_adr);
+ return -EFAULT;
+ }
+ if ((reg_ofs << KWGBE_SMI_REG_ADDR_OFFS) & ~KWGBE_SMI_REG_ADDR_MASK) {
+ printf("Illegal PHY register offset %d\n", reg_ofs);
+ return -EFAULT;
+ }
+
+ timeout = KWGBE_PHY_SMI_TIMEOUT;
+ /* wait till the SMI is not busy */
+ do {
+ /* read smi register */
+ smi_reg = KWGBEREG_RD(regs->smi);
+ if (timeout-- == 0) {
+ printf("SMI busy timeout\n");
+ return -EFAULT;
+ }
+ } while (smi_reg & KWGBE_PHY_SMI_BUSY_MASK);
+
+ /* fill the phy address and regiser offset and read opcode */
+ smi_reg =
+ (phy_adr << KWGBE_PHY_SMI_DEV_ADDR_OFFS) | (reg_ofs <<
+ KWGBE_SMI_REG_ADDR_OFFS)
+ | KWGBE_PHY_SMI_OPCODE_READ;
+
+ /* write the smi register */
+ KWGBEREG_WR(regs->smi, smi_reg);
+
+ /*wait till readed value is ready */
+ timeout = KWGBE_PHY_SMI_TIMEOUT;
+ do {
+ /* read smi register */
+ smi_reg = KWGBEREG_RD(regs->smi);
+ if (timeout-- == 0) {
+ printf("SMI read-valid timeout\n");
+ return -EFAULT;
+ }
+ } while (!(smi_reg & KWGBE_PHY_SMI_READ_VALID_MASK));
+
+ /* Wait for the data to update in the SMI register */
+ for (timeout = 0; timeout < KWGBE_PHY_SMI_TIMEOUT; timeout++) ;
+
+ *data = (u16) (KWGBEREG_RD(regs->smi) & KWGBE_PHY_SMI_DATA_MASK);
+
+ debug("Reg(phyadr %d, off %d) Phy-value = %04x\n", phy_adr,
+ reg_ofs, *data);
+
+ return 0;
+}
+
+/*
+ * smi_reg_write - Write to ethernet phy register.
+ *
+ * @phy_adr - Phy address.
+ * @reg_ofs - Phy register offset.
+ * @data - 16bit data.
+ *
+ * This function write to ethernet phy register.
+ * Returns 0 if write succeed, -EINVAL on bad parameters , MV_ERROR on error .
+ * -ETIME on timeout
+ */
+int smi_reg_write(char *devname, u8 phy_adr, u8 reg_ofs, u16 data)
+{
+ struct eth_device *dev = eth_get_dev_by_name(devname);
+ struct kwgbe_device *dkwgbe = to_dkwgbe(dev);
+ struct kwgbe_registers *regs = dkwgbe->regs;
+ u32 smi_reg;
+ volatile u32 timeout;
+
+ /* check parameters */
+ if ((phy_adr << KWGBE_PHY_SMI_DEV_ADDR_OFFS) &
+ ~KWGBE_PHY_SMI_DEV_ADDR_MASK) {
+ printf("Illegal phy address\n");
+ return -EINVAL;
+ }
+ if ((reg_ofs << KWGBE_SMI_REG_ADDR_OFFS) & ~KWGBE_SMI_REG_ADDR_MASK) {
+ printf("Illegal register offset\n");
+ return -EINVAL;
+ }
+
+ /* wait till the SMI is not busy */
+ timeout = KWGBE_PHY_SMI_TIMEOUT;
+ do {
+ /* read smi register */
+ smi_reg = KWGBEREG_RD(regs->smi);
+ if (timeout-- == 0) {
+ printf("SMI busy timeout\n");
+ return -ETIME;
+ }
+ } while (smi_reg & KWGBE_PHY_SMI_BUSY_MASK);
+
+ /* fill the phy address and regiser offset and write opcode and data */
+ smi_reg = (data << KWGBE_PHY_SMI_DATA_OFFS);
+ smi_reg |=
+ (phy_adr << KWGBE_PHY_SMI_DEV_ADDR_OFFS) | (reg_ofs <<
+ KWGBE_SMI_REG_ADDR_OFFS);
+ smi_reg &= ~KWGBE_PHY_SMI_OPCODE_READ;
+
+ /* write the smi register */
+ KWGBEREG_WR(regs->smi, smi_reg);
+
+ return 0;
+}
+
+static int egiga_free_tx_rings(struct eth_device *dev)
+{
+ struct kwgbe_device *dkwgbe = to_dkwgbe(dev);
+ struct kwgbe_portinfo *portinfo = &dkwgbe->portinfo;
+ struct kwgbe_registers *regs = dkwgbe->regs;
+
+ u32 queue;
+ volatile struct kwgbe_tx_dscr *p_tx_curr_desc;
+
+ /* Stop Tx Queues */
+ KWGBEREG_WR(regs->tqc, 0x0000ff00);
+
+ /* Free TX rings */
+ debug("Clearing previously allocated TX queues... \n");
+ for (queue = 0; queue < KW_TXQ_NO; queue++) {
+ /* Free on TX rings */
+ for (p_tx_curr_desc = portinfo->p_tx_desc_area_base[queue];
+ ((u32) p_tx_curr_desc <=
+ (u32) portinfo->p_tx_desc_area_base[queue] +
+ portinfo->tx_desc_area_size[queue]);
+ p_tx_curr_desc =
+ (struct kwgbe_tx_dscr *)((u32) p_tx_curr_desc +
+ TX_DESC_ALIGNED_SIZE)) {
+ /* this is inside for loop */
+ if (p_tx_curr_desc->retinfo != 0) {
+ p_tx_curr_desc->retinfo = 0;
+ debug("freed\n");
+ }
+ }
+ debug("Done\n");
+ }
+ return 0;
+}
+
+static int egiga_free_rx_rings(struct eth_device *dev)
+{
+ struct kwgbe_device *dkwgbe = to_dkwgbe(dev);
+ struct kwgbe_portinfo *portinfo = &dkwgbe->portinfo;
+ struct kwgbe_registers *regs = dkwgbe->regs;
+
+ u32 queue;
+ volatile struct kwgbe_rx_dscr *p_rx_curr_desc;
+
+ /* Stop RX Queues */
+ KWGBEREG_WR(regs->rqc, 0x0000ff00);
+
+ /* Free RX rings */
+ debug("Clearing previously allocated RX queues...\n");
+ for (queue = 0; queue < KW_RXQ_NO; queue++) {
+ /* Free preallocated skb's on RX rings */
+ for (p_rx_curr_desc = portinfo->p_rx_desc_area_base[queue];
+ (((u32) p_rx_curr_desc <
+ ((u32) portinfo->p_rx_desc_area_base[queue] +
+ portinfo->rx_desc_area_size[queue])));
+ p_rx_curr_desc =
+ (struct kwgbe_rx_dscr *)((u32) p_rx_curr_desc +
+ RX_DESC_ALIGNED_SIZE)) {
+ if (p_rx_curr_desc->retinfo != 0) {
+ p_rx_curr_desc->retinfo = 0;
+ debug("freed\n");
+ }
+ }
+ debug("Done\n");
+ }
+ return 0;
+}
+
+static void phy_set_addr(struct kwgbe_registers *regs, int phy_addr)
+{
+ KWGBEREG_WR(regs->phyadr, phy_addr);
+}
+
+#ifdef UPDATE_STATS_BY_SOFTWARE
+/*
+ * egiga_print_stat
+ *
+ * Update the statistics structure in the private data structure
+ *
+ * Input : pointer to ethernet interface network device structure
+ * Output : N/A
+ */
+static void egiga_print_stat(struct eth_device *dev)
+{
+ struct kwgbe_device *dkwgbe = to_dkwgbe(dev);
+ struct egiga_priv *portpriv = &dkwgbe->portpriv;
+ struct net_device_stats *stats = &portpriv->stats;
+
+ /* These are false updates */
+ printf("\n### Network statistics: ###\n");
+ printf("--------------------------\n");
+ printf(" Packets received: %d\n", stats->rx_packets);
+ printf(" Packets send: %d\n", stats->tx_packets);
+ printf(" Received bytes: %lld\n", stats->rx_bytes);
+ printf(" Send bytes: %lld\n", stats->tx_bytes);
+ if (stats->rx_errors != 0)
+ printf(" Rx Errors: %d\n",
+ stats->rx_errors);
+ if (stats->rx_dropped != 0)
+ printf(" Rx dropped (CRC Errors): %d\n",
+ stats->rx_dropped);
+ if (stats->multicast != 0)
+ printf(" Rx mulicast frames: %d\n",
+ stats->multicast);
+ if (stats->collisions != 0)
+ printf(" No. of collisions: %d\n",
+ stats->collisions);
+ if (stats->rx_length_errors != 0)
+ printf(" Rx length errors: %d\n",
+ stats->rx_length_errors);
+}
+#endif
+
+/*
+ * kwgbe_stop
+ *
+ * This function is used when closing the network device.
+ * It updates the hardware,
+ * release all memory that holds buffers and descriptors and release the IRQ.
+ * Input : a pointer to the device structure
+ * Output : zero if success , nonzero if fails
+ */
+int kwgbe_stop(struct eth_device *dev)
+{
+ struct kwgbe_device *dkwgbe = to_dkwgbe(dev);
+ struct kwgbe_registers *regs = dkwgbe->regs;
+
+ /* Disable all gigE address decoder */
+ KWGBEREG_WR(regs->bare, 0x3f);
+
+ egiga_free_tx_rings(dev);
+ egiga_free_rx_rings(dev);
+
+ port_reset(regs);
+ /* Disable ethernet port interrupts */
+ KWGBEREG_WR(regs->ic, 0);
+ KWGBEREG_WR(regs->ice, 0);
+ /* Mask RX buffer and TX end interrupt */
+ KWGBEREG_WR(regs->pim, 0);
+ /* Mask phy and link status changes interrupts */
+ KWGBEREG_WR(regs->peim, 0);
+
+ /* Print Network statistics */
+#ifdef UPDATE_STATS_BY_SOFTWARE
+ /*
+ * Print statistics (only if ethernet is running),
+ * then zero all the stats fields in memory
+ */
+ if (portpriv->running == MAGIC_KWGBE_RUNNING) {
+ portpriv->running = 0;
+ egiga_print_stat(dev);
+ }
+ memset(&portpriv->stats, 0, sizeof(struct net_device_stats));
+#endif
+ debug("Ethernet stopped ...\n");
+ return 0;
+}
+
+/*
+ * clear_mib_counters - Clear all MIB counters
+ *
+ * This function clears all MIB counters of a specific ethernet port.
+ * A read from the MIB counter will reset the counter.
+ *
+ * OUTPUT: After reading all MIB counters, the counters resets.
+ * RETURN: MIB counter value.
+ */
+static void clear_mib_counters(u32 mibc)
+{
+ u32 i = mibc + 0x80;
+ u32 dummy;
+
+ /* Perform dummy reads from MIB counters */
+ for (; mibc < i; mibc += 4) {
+ dummy = readl(mibc);
+ }
+}
+
+/*
+ * egiga_update_stat
+ *
+ * Update the statistics structure in the private data structure
+ *
+ * Input : pointer to ethernet interface network device structure
+ * Output : N/A
+ */
+static void egiga_update_stat(struct eth_device *dev)
+{
+ struct kwgbe_device *dkwgbe = to_dkwgbe(dev);
+ struct egiga_priv *portpriv = &dkwgbe->portpriv;
+ struct net_device_stats *stats = &portpriv->stats;
+ struct mib_counters *mibc = &dkwgbe->regs->mibc;
+
+ /* These are false updates */
+ stats->rx_packets += KWGBEREG_RD(mibc->good_frames_received);
+ stats->tx_packets += KWGBEREG_RD(mibc->good_frames_sent);
+ stats->rx_bytes += KWGBEREG_RD(mibc->good_octets_received);
+ stats->tx_bytes += KWGBEREG_RD(mibc->good_octets_sent);
+ stats->rx_errors += KWGBEREG_RD(mibc->mac_receive_error);
+ /* Rx dropped is for received packet with CRC error */
+ stats->rx_dropped += KWGBEREG_RD(mibc->bad_crc_event);
+ stats->multicast += KWGBEREG_RD(mibc->multicast_frames_received);
+ stats->collisions +=
+ KWGBEREG_RD(mibc->collision) + KWGBEREG_RD(mibc->late_collision);
+ stats->rx_length_errors +=
+ KWGBEREG_RD(mibc->undersize_received) +
+ KWGBEREG_RD(mibc->oversize_received);
+}
+
+/*
+ * egiga_get_stats
+ *
+ * Returns a pointer to the interface statistics.
+ *
+ * Input : dev - a pointer to the required interface
+ * Output : a pointer to the interface's statistics
+ */
+static struct net_device_stats *egiga_get_stats(struct eth_device *dev)
+{
+ struct kwgbe_device *dkwgbe = to_dkwgbe(dev);
+ struct egiga_priv *portpriv = &dkwgbe->portpriv;
+
+ egiga_update_stat(dev);
+ return &portpriv->stats;
+}
+
+/*
+ * set_access_control - Config address decode parameters for Ethernet unit
+ *
+ * This function configures the address decode parameters for the Gigabit
+ * Ethernet Controller according the given parameters struct.
+ *
+ * @regs Register struct pointer.
+ * @param Address decode parameter struct.
+ */
+static void set_access_control(struct kwgbe_registers *regs,
+ struct kwgbe_winparam *param)
+{
+ u32 access_prot_reg;
+
+ /* Set access control register */
+ access_prot_reg = KWGBEREG_RD(regs->epap);
+ access_prot_reg &= (~(3 << (param->win * 2))); /* clear window permission */
+ access_prot_reg |= (param->access_ctrl << (param->win * 2));
+ KWGBEREG_WR(regs->epap, access_prot_reg);
+
+ /* Set window Size reg (SR) */
+ KWGBEREG_WR(regs->barsz[param->win].size,
+ (((param->size / 0x10000) - 1) << 16));
+
+ /* Set window Base address reg (BA) */
+ KWGBEREG_WR(regs->barsz[param->win].bar,
+ (param->target | param->attrib | param->base_addr));
+ /* High address remap reg (HARR) */
+ if (param->win < 4)
+ KWGBEREG_WR(regs->ha_remap[param->win], param->high_addr);
+
+ /* Base address enable reg (BARER) */
+ if (param->enable == 1)
+ KWGBEREG_BITS_RESET(regs->bare, (1 << param->win));
+ else
+ KWGBEREG_BITS_SET(regs->bare, (1 << param->win));
+}
+
+static void set_dram_access(struct kwgbe_registers *regs)
+{
+ struct kwgbe_winparam win_param;
+ int i;
+
+ for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
+ /* Set access parameters for DRAM bank i */
+ win_param.win = i; /* Use Ethernet window i */
+ win_param.target = KWGBE_TARGET_DRAM; /* Window target - DDR */
+ win_param.access_ctrl = EWIN_ACCESS_FULL; /* Enable full access */
+ win_param.high_addr = 0;
+ /* Get bank base */
+ win_param.base_addr = kw_sdram_bar(i);
+ win_param.size = kw_sdram_bs(i); /* Get bank size */
+ if (win_param.size == 0)
+ win_param.enable = 0;
+ else
+ win_param.enable = 1; /* Enable the access */
+ switch (i) {
+ case 0:
+ /* Enable DRAM bank 0 */
+ win_param.attrib = EBAR_ATTR_DRAM_CS0;
+ break;
+ case 1:
+ /* Enable DRAM bank 1 */
+ win_param.attrib = EBAR_ATTR_DRAM_CS1;
+ break;
+ case 2:
+ /* Enable DRAM bank 2 */
+ win_param.attrib = EBAR_ATTR_DRAM_CS2;
+ break;
+ case 3:
+ /* Enable DRAM bank 3 */
+ win_param.attrib = EBAR_ATTR_DRAM_CS3;
+ break;
+ default:
+ /* invalide bank, disable access */
+ win_param.enable = 0;
+ win_param.attrib = 0;
+ break;
+ }
+#ifndef CONFIG_NOT_COHERENT_CACHE
+ win_param.attrib |= EBAR_ATTR_DRAM_CACHE_COHERENCY_WB;
+#endif
+ /* Set the access control for address window (EPAPR) READ & WRITE */
+ set_access_control(regs, &win_param);
+ }
+}
+
+/*
+ * port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
+ *
+ * Go through all the DA filter tables (Unicast, Special Multicast & Other
+ * Multicast) and set each entry to 0.
+ *
+ */
+static void port_init_mac_tables(struct kwgbe_registers *regs)
+{
+ int table_index;
+
+ /* Clear DA filter unicast table (Ex_dFUT) */
+ for (table_index = 0; table_index < 4; ++table_index)
+ KWGBEREG_WR(regs->dfut[table_index], 0);
+
+ for (table_index = 0; table_index < 64; ++table_index) {
+ /* Clear DA filter special multicast table (Ex_dFSMT) */
+ KWGBEREG_WR(regs->dfsmt[table_index], 0);
+ /* Clear DA filter other multicast table (Ex_dFOMT) */
+ KWGBEREG_WR(regs->dfomt[table_index], 0);
+ }
+}
+
+/*
+ * port_init - Initialize the Ethernet port driver
+ *
+ * @p_gbe_pctrl - ptr to Ethernet port control struct
+ *
+ * This function prepares the ethernet port to start its activity:
+ * 1) Completes the ethernet port driver struct initialization toward port
+ * start routine.
+ * 2) Resets the device to a quiescent state in case of warm reboot.
+ * 3) Enable SDMA access to all four DRAM banks as well as internal SRAM.
+ * 4) Clean MAC tables. The reset status of those tables is unknown.
+ * 5) Set PHY address.
+ * Note: Call this routine prior to port_start routine and after setting
+ * user values in the user fields of Ethernet port control struct (i.e.
+ * port_phy_addr).
+ *
+ */
+static void port_init(struct kwgbe_device *dkwgbe)
+{
+ struct kwgbe_portinfo *p_gbe_pctrl = &dkwgbe->portinfo;
+ struct kwgbe_registers *regs = dkwgbe->regs;
+ int queue;
+
+ p_gbe_pctrl->port_config = PRT_CFG_VAL;
+ p_gbe_pctrl->port_config_extend = PORT_CFG_EXTEND_VALUE;
+ p_gbe_pctrl->port_sdma_config = PORT_SDMA_CFG_VALUE;
+ p_gbe_pctrl->port_serial_control = PORT_SERIAL_CONTROL_VALUE;
+
+ p_gbe_pctrl->port_rx_queue_command = 0;
+ p_gbe_pctrl->port_tx_queue_command = 0;
+
+ /* Stop RX Queues */
+ KWGBEREG_WR(regs->rqc, 0x0000ff00);
+
+ /* Clear the ethernet port interrupts */
+ KWGBEREG_WR(regs->ic, 0);
+ KWGBEREG_WR(regs->ice, 0);
+
+ /* Unmask RX buffer and TX end interrupt */
+ KWGBEREG_WR(regs->pim, INT_CAUSE_UNMASK_ALL);
+
+ /* Unmask phy and link status changes interrupts */
+ KWGBEREG_WR(regs->peim, INT_CAUSE_UNMASK_ALL_EXT);
+
+ /* Zero out SW structs */
+ for (queue = 0; queue < MAX_RXQ_NO; queue++) {
+ CURR_RFD_SET((struct kwgbe_rx_dscr *)0x00000000, queue);
+ USED_RFD_SET((struct kwgbe_rx_dscr *)0x00000000, queue);
+ p_gbe_pctrl->rx_resource_err[queue] = 0;
+ }
+
+ for (queue = 0; queue < MAX_TXQ_NO; queue++) {
+ CURR_TFD_SET((struct kwgbe_tx_dscr *)0x00000000, queue);
+ USED_TFD_SET((struct kwgbe_tx_dscr *)0x00000000, queue);
+ FIRST_TFD_SET((struct kwgbe_tx_dscr *)0x00000000, queue);
+ p_gbe_pctrl->tx_resource_err[queue] = 0;
+ }
+
+ port_reset(regs);
+ set_dram_access(regs);
+ port_init_mac_tables(regs);
+}
+
+/*
+ * port_start - Start the Ethernet port activity.
+ *
+ * This routine prepares the Ethernet port for Rx and Tx activity:
+ * 1. Initialize Tx and Rx Current Descriptor Pointer for each queue that
+ * has been initialized a descriptor's ring (using kwgbe_init_tx_desc_ring
+ * for Tx and kwgbe_init_rx_desc_ring for Rx)
+ * 2. Initialize and enable the Ethernet configuration port by writing to
+ * the port's configuration and command registers.
+ * 3. Initialize and enable the SDMA by writing to the SDMA's
+ * configuration and command registers.
+ * After completing these steps, the ethernet port SDMA can starts to
+ * perform Rx and Tx activities.
+ *
+ * Note: Each Rx and Tx queue descriptor's list must be initialized prior
+ * to calling this function (use kwgbe_init_tx_desc_ring for Tx queues and
+ * kwgbe_init_rx_desc_ring for Rx queues).
+ *
+ * OUTPUT: Ethernet port is ready to receive and transmit.
+ *
+ * RETURN: 0 if the port PHY is not up. 1 otherwise.
+ */
+static void port_start(struct kwgbe_device *dkwgbe)
+{
+ struct kwgbe_portinfo *p_gbe_pctrl = &dkwgbe->portinfo;
+ struct kwgbe_registers *regs = dkwgbe->regs;
+ int queue;
+ volatile struct kwgbe_tx_dscr *p_tx_curr_desc;
+ volatile struct kwgbe_rx_dscr *p_rx_curr_desc;
+
+ /* Assignment of Tx CTRP of given queue */
+ for (queue = 0; queue < MAX_TXQ_NO; queue++) {
+ CURR_TFD_GET(p_tx_curr_desc, queue);
+ KWGBEREG_WR(regs->tcqdp[queue], (u32) p_tx_curr_desc);
+ }
+
+ /* Assignment of Rx CRDB of given queue */
+ for (queue = 0; queue < MAX_RXQ_NO; queue++) {
+ CURR_RFD_GET(p_rx_curr_desc, queue);
+ KWGBEREG_WR(regs->crdp[queue], (u32) p_rx_curr_desc);
+
+ if (p_rx_curr_desc != NULL)
+ /* Add the assigned Ethernet address to the port's address table */
+ port_uc_addr_set(regs, p_gbe_pctrl->port_mac_addr,
+ queue);
+ }
+
+ /* Assign port configuration and command. */
+ KWGBEREG_WR(regs->pxc, p_gbe_pctrl->port_config);
+ KWGBEREG_WR(regs->pxcx, p_gbe_pctrl->port_config_extend);
+ KWGBEREG_WR(regs->psc0, p_gbe_pctrl->port_serial_control);
+ KWGBEREG_BITS_SET(regs->psc0, KWGBE_SERIAL_PORT_EN);
+
+ /* Assign port SDMA configuration */
+ KWGBEREG_WR(regs->sdc, p_gbe_pctrl->port_sdma_config);
+ KWGBEREG_WR(regs->tqx[0].qxttbc, 0x3fffffff);
+ KWGBEREG_WR(regs->tqx[0].tqxtbc, 0x03fffcff);
+ /* Turn off the port/queue bandwidth limitation */
+ KWGBEREG_WR(regs->pmtu, 0x0);
+
+ /* Enable port Rx. */
+ KWGBEREG_WR(regs->rqc, p_gbe_pctrl->port_rx_queue_command);
+
+ /* Set maximum receive buffer to 9700 bytes */
+ KWGBEREG_WR(regs->psc0,
+ (0x5 << 17) | (KWGBEREG_RD(regs->psc0) & 0xfff1ffff));
+
+ /*
+ * Set ethernet MTU for leaky bucket mechanism to 0 - this will
+ * disable the leaky bucket mechanism .
+ */
+ KWGBEREG_WR(regs->pmtu, 0);
+}
+
+/*
+ * port_uc_addr - This function Set the port unicast address table
+ *
+ * This function locates the proper entry in the Unicast table for the
+ * specified MAC nibble and sets its properties according to function
+ * parameters.
+ * This function add/removes MAC addresses from the port unicast address
+ * table.
+ *
+ * @uc_nibble Unicast MAC Address last nibble.
+ * @queue Rx queue number for this MAC address.
+ * @option 0 = Add, 1 = remove address.
+ *
+ * RETURN: 1 if output succeeded. 0 if option parameter is invalid.
+ */
+static int port_uc_addr(struct kwgbe_registers *regs, u8 uc_nibble,
+ enum kwgbe_q queue, int option)
+{
+ u32 unicast_reg;
+ u32 tbl_offset;
+ u32 reg_offset;
+
+ /* Locate the Unicast table entry */
+ uc_nibble = (0xf & uc_nibble);
+ tbl_offset = (uc_nibble / 4); /* Register offset from unicast table base */
+ reg_offset = uc_nibble % 4; /* Entry offset within the above register */
+
+ switch (option) {
+ case REJECT_MAC_ADDR:
+ /* Clear accepts frame bit at specified unicast DA table entry */
+ unicast_reg = KWGBEREG_RD(regs->dfut[tbl_offset]);
+ unicast_reg &= (0xFF << (8 * reg_offset));
+ KWGBEREG_WR(regs->dfut[tbl_offset], unicast_reg);
+ break;
+ case ACCEPT_MAC_ADDR:
+ /* Set accepts frame bit at unicast DA filter table entry */
+ unicast_reg = KWGBEREG_RD(regs->dfut[tbl_offset]);
+ unicast_reg &= (0xFF << (8 * reg_offset));
+ unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
+ KWGBEREG_WR(regs->dfut[tbl_offset], unicast_reg);
+ break;
+ default:
+ return 0;
+ }
+ return 1;
+}
+
+/*
+ * port_uc_addr_set - This function Set the port Unicast address.
+ *
+ * This function Set the port Ethernet MAC address.
+ *
+ * @p_addr Address to be set
+ * @queue Rx queue number for this MAC address.
+ *
+ * OUTPUT: Set MAC address low and high registers. also calls
+ * port_uc_addr() To set the unicast table with the proper info.
+ */
+static void port_uc_addr_set(struct kwgbe_registers *regs, u8 * p_addr,
+ enum kwgbe_q queue)
+{
+ u32 mac_h;
+ u32 mac_l;
+
+ mac_l = (p_addr[4] << 8) | (p_addr[5]);
+ mac_h =
+ (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) |
+ (p_addr[3] << 0);
+
+ KWGBEREG_WR(regs->macal, mac_l);
+ KWGBEREG_WR(regs->macah, mac_h);
+
+ /* Accept frames of this address */
+ port_uc_addr(regs, p_addr[5], queue, ACCEPT_MAC_ADDR);
+}
+
+/*
+ * port_reset - Reset Ethernet port
+ *
+ * This routine resets the chip by aborting any SDMA engine activity and
+ * clearing the MIB counters. The Receiver and the Transmit unit are in
+ * idle state after this command is performed and the port is disabled.
+ *
+ */
+static void port_reset(struct kwgbe_registers *regs)
+{
+ u32 reg_data, i;
+
+ /* Stop Tx port activity. Check port Tx activity. */
+ reg_data = KWGBEREG_RD(regs->tqc);
+
+ if (reg_data & 0xFF) {
+ /* Issue stop command for active channels only */
+ KWGBEREG_WR(regs->tqc, (reg_data << 8));
+
+ /* Wait for all Tx activity to terminate. */
+ do {
+ /* Check port cause register that all Tx queues are stopped */
+ reg_data = KWGBEREG_RD(regs->tqc);
+ }
+ while (reg_data & 0xFF);
+ }
+
+ /* Stop Rx port activity. Check port Rx activity. */
+ reg_data = KWGBEREG_RD(regs->rqc);
+
+ if (reg_data & 0xFF) {
+ /* Issue stop command for active channels only */
+ KWGBEREG_WR(regs->rqc, (reg_data << 8));
+
+ /* Wait for all Rx activity to terminate. */
+ do {
+ /* Check port cause register that all Rx queues are stopped */
+ reg_data = KWGBEREG_RD(regs->rqc);
+ }
+ while (reg_data & 0xFF);
+ }
+
+ /* Clear all MIB counters */
+ clear_mib_counters((u32) & regs->mibc);
+
+ /* Enable port in the Configuration Register */
+ KWGBEREG_BITS_RESET(regs->psc0, KWGBE_SERIAL_PORT_EN);
+ /* Set port of active in the Configuration Register */
+ KWGBEREG_BITS_RESET(regs->psc1, 1 << 4);
+#ifdef CONFIG_SYS_MII_MODE
+ /* Set MMI interface up */
+ KWGBEREG_BITS_RESET(regs->psc1, 1 << 3);
+#endif
+ for (i = 0; i < 4000; i++) ;
+ return;
+}
+
+/*
+ * kwgbe_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
+ *
+ * This function prepares a Rx chained list of descriptors and packet
+ * buffers in a form of a ring. The routine must be called after port
+ * initialization routine and before port start routine.
+ * The Ethernet SDMA engine uses CPU bus addresses to access the various
+ * devices in the system (i.e. DRAM). This function uses the ethernet
+ * struct 'virtual to physical' routine (set by the user) to set the ring
+ * with physical addresses.
+ *
+ * @rx_queue Number of Rx queue.
+ * @rx_desc_num Number of Rx descriptors
+ * @rx_buff_size Size of Rx buffer
+ * @rx_desc_base_addr Rx descriptors memory area base addr.
+ * @rx_buff_base_addr Rx buffer memory area base addr.
+ *
+ * The routine updates the Ethernet port control struct with information
+ * regarding the Rx descriptors and buffers.
+ *
+ * RETURN:
+ * 0 if the given descriptors memory area is not aligned according to
+ * Ethernet SDMA specifications.
+ * 1 otherwise.
+ */
+static int kwgbe_init_rx_desc_ring(struct kwgbe_device *dkwgbe,
+ enum kwgbe_q rx_queue,
+ int rx_desc_num,
+ int rx_buff_size,
+ u32 rx_desc_base_addr, u32 rx_buff_base_addr)
+{
+ struct kwgbe_portinfo *p_gbe_pctrl = &dkwgbe->portinfo;
+ struct kwgbe_rx_dscr *p_rx_desc;
+ /* pointer to link with the last descriptor */
+ struct kwgbe_rx_dscr *p_rx_prev_desc;
+ u32 buffer_addr;
+ int ix; /* a counter */
+
+ p_rx_desc = (struct kwgbe_rx_dscr *)rx_desc_base_addr;
+ p_rx_prev_desc = p_rx_desc;
+ buffer_addr = rx_buff_base_addr;
+
+ /* Rx desc Must be 4LW aligned (i.e. Descriptor_Address[3:0]=0000). */
+ if (rx_buff_base_addr & 0xF)
+ return 0;
+
+ /* Rx buffers are limited to 64K bytes and Minimum size is 8 bytes */
+ if ((rx_buff_size < 8) || (rx_buff_size > RX_BUF_MXSZ))
+ return 0;
+
+ /* Rx buffers must be 64-bit aligned. */
+ if ((rx_buff_base_addr + rx_buff_size) & 0x7)
+ return 0;
+
+ /* initialize the Rx descriptors ring */
+ for (ix = 0; ix < rx_desc_num; ix++) {
+ p_rx_desc->buf_size = rx_buff_size;
+ p_rx_desc->byte_cnt = 0x0000;
+ p_rx_desc->cmd_sts =
+ KWGBE_BUFFER_OWNED_BY_DMA | KWGBE_RX_EN_INTERRUPT;
+ p_rx_desc->nxtdesc_p = ((u32) p_rx_desc) + RX_DESC_ALIGNED_SIZE;
+ p_rx_desc->buf_ptr = buffer_addr;
+ p_rx_desc->retinfo = 0x00000000;
+ D_CACHE_FLUSH_LINE(p_rx_desc, 0);
+ buffer_addr += rx_buff_size;
+ p_rx_prev_desc = p_rx_desc;
+ p_rx_desc = (struct kwgbe_rx_dscr *)
+ ((u32) p_rx_desc + RX_DESC_ALIGNED_SIZE);
+ }
+
+ /* Closing Rx descriptors ring */
+ p_rx_prev_desc->nxtdesc_p = (rx_desc_base_addr);
+ D_CACHE_FLUSH_LINE(p_rx_prev_desc, 0);
+
+ /* Save Rx desc pointer to driver struct. */
+ CURR_RFD_SET((struct kwgbe_rx_dscr *)rx_desc_base_addr, rx_queue);
+ USED_RFD_SET((struct kwgbe_rx_dscr *)rx_desc_base_addr, rx_queue);
+
+ p_gbe_pctrl->p_rx_desc_area_base[rx_queue] =
+ (struct kwgbe_rx_dscr *)rx_desc_base_addr;
+ p_gbe_pctrl->rx_desc_area_size[rx_queue] =
+ rx_desc_num * RX_DESC_ALIGNED_SIZE;
+
+ p_gbe_pctrl->port_rx_queue_command |= (1 << rx_queue);
+
+ return 1;
+}
+
+/*
+ * kwgbe_init_tx_desc_ring - Curve a Tx chain desc list and buffer in memory.
+ *
+ * This function prepares a Tx chained list of descriptors and packet
+ * buffers in a form of a ring. The routine must be called after port
+ * initialization routine and before port start routine.
+ * The Ethernet SDMA engine uses CPU bus addresses to access the various
+ * devices in the system (i.e. DRAM). This function uses the ethernet
+ * struct 'virtual to physical' routine (set by the user) to set the ring
+ * with physical addresses.
+ *
+ * @p_gbe_pctrl Ethernet Port Control srtuct ptr.
+ * @tx_queue Number of Tx queue.
+ * @tx_desc_num Number of Tx descriptors
+ * @tx_buff_size Size of Tx buffer
+ * @tx_desc_base_addr Tx descriptors memory area base addr.
+ * @tx_buff_base_addr Tx buffer memory area base addr.
+ *
+ * The routine updates the Ethernet port control struct with information
+ * regarding the Tx descriptors and buffers.
+ *
+ * RETURN:
+ * 0 if the given descriptors memory area is not aligned according to
+ * Ethernet SDMA specifications.
+ * 1 otherwise.
+ */
+static int kwgbe_init_tx_desc_ring(struct kwgbe_device *dkwgbe,
+ enum kwgbe_q tx_queue,
+ int tx_desc_num,
+ int tx_buff_size,
+ u32 tx_desc_base_addr, u32 tx_buff_base_addr)
+{
+ struct kwgbe_portinfo *p_gbe_pctrl = &dkwgbe->portinfo;
+ struct kwgbe_tx_dscr *p_tx_desc;
+ struct kwgbe_tx_dscr *p_tx_prev_desc;
+ u32 buffer_addr;
+ int ix; /* a counter */
+
+ /* save the first desc pointer to link with the last descriptor */
+ p_tx_desc = (struct kwgbe_tx_dscr *)tx_desc_base_addr;
+ p_tx_prev_desc = p_tx_desc;
+ buffer_addr = tx_buff_base_addr;
+
+ /* Tx desc Must be 4LW aligned (i.e. Descriptor_Address[3:0]=0000). */
+ if (tx_buff_base_addr & 0xF)
+ return 0;
+
+ /* Tx buffers are limited to 64K bytes and Minimum size is 8 bytes */
+ if ((tx_buff_size > TX_BUF_MXSZ)
+ || (tx_buff_size < TX_BUF_MNSZ))
+ return 0;
+
+ /* Initialize the Tx descriptors ring */
+ for (ix = 0; ix < tx_desc_num; ix++) {
+ p_tx_desc->byte_cnt = 0x0000;
+ p_tx_desc->l4i_chk = 0x0000;
+ p_tx_desc->cmd_sts = 0x00000000;
+ p_tx_desc->nxtdesc_p = ((u32) p_tx_desc) + TX_DESC_ALIGNED_SIZE;
+
+ p_tx_desc->buf_ptr = buffer_addr;
+ p_tx_desc->retinfo = 0x00000000;
+ D_CACHE_FLUSH_LINE(p_tx_desc, 0);
+ buffer_addr += tx_buff_size;
+ p_tx_prev_desc = p_tx_desc;
+ p_tx_desc = (struct kwgbe_tx_dscr *)
+ ((u32) p_tx_desc + TX_DESC_ALIGNED_SIZE);
+
+ }
+ /* Closing Tx descriptors ring */
+ p_tx_prev_desc->nxtdesc_p = tx_desc_base_addr;
+ D_CACHE_FLUSH_LINE(p_tx_prev_desc, 0);
+ /* Set Tx desc pointer in driver struct. */
+ CURR_TFD_SET((struct kwgbe_tx_dscr *)tx_desc_base_addr, tx_queue);
+ USED_TFD_SET((struct kwgbe_tx_dscr *)tx_desc_base_addr, tx_queue);
+
+ /* Init Tx ring base and size parameters */
+ p_gbe_pctrl->p_tx_desc_area_base[tx_queue] =
+ (struct kwgbe_tx_dscr *)tx_desc_base_addr;
+ p_gbe_pctrl->tx_desc_area_size[tx_queue] =
+ (tx_desc_num * TX_DESC_ALIGNED_SIZE);
+
+ /* Add the queue to the list of Tx queues of this port */
+ p_gbe_pctrl->port_tx_queue_command |= (1 << tx_queue);
+
+ return 1;
+}
+
+/*
+ * b_copy - Copy bytes from source to destination
+ *
+ * This function supports the eight bytes limitation on Tx buffer size.
+ * The routine will zero eight bytes starting from the destination address
+ * followed by copying bytes from the source address to the destination.
+ *
+ * @src_addr 32 bit source address.
+ * @dst_add 32 bit destination address.
+ * @byte_count Number of bytes to copy.
+ */
+static void b_copy(u32 src_addr, u32 dst_addr, int byte_count)
+{
+ /* Zero the dst_addr area */
+ *(u32 *) dst_addr = 0x0;
+
+ while (byte_count != 0) {
+ *(char *)dst_addr = *(char *)src_addr;
+ dst_addr++;
+ src_addr++;
+ byte_count--;
+ }
+}
+
+/*
+ * port_send - Send an Ethernet packet
+ *
+ * This routine send a given packet described by p_pktinfo parameter. It
+ * supports transmitting of a packet spaned over multiple buffers. The
+ * routine updates 'curr' and 'first' indexes according to the packet
+ * segment passed to the routine. In case the packet segment is first,
+ * the 'first' index is update. In any case, the 'curr' index is updated.
+ * If the routine get into Tx resource error it assigns 'curr' index as
+ * 'first'. This way the function can abort Tx process of multiple
+ * descriptors per packet.
+ *
+ * @tx_queue Number of Tx queue.
+ * @p_pkt_info User packet buffer ptr.
+ *
+ * OUTPUT: Tx ring 'curr' and 'first' indexes are updated.
+ *
+ * RETURN:
+ * KWGBE_QFULL in case of Tx resource error.
+ * KWGBE_ERROR in case the routine can not access Tx desc ring.
+ * KWGBE_QLAST_RESO if the routine uses the last Tx resource.
+ * KWGBE_OK otherwise.
+ */
+static enum kwgbe_fret_sts port_send(struct kwgbe_device *dkwgbe,
+ enum kwgbe_q tx_queue,
+ struct kwgbe_pktinf *p_pkt_info)
+{
+ struct kwgbe_portinfo *p_gbe_pctrl = &dkwgbe->portinfo;
+ struct kwgbe_registers *regs = dkwgbe->regs;
+ volatile struct kwgbe_tx_dscr *p_tx_desc_first;
+ volatile struct kwgbe_tx_dscr *p_tx_desc_curr;
+ volatile struct kwgbe_tx_dscr *p_tx_next_desc_curr;
+ volatile struct kwgbe_tx_dscr *p_tx_desc_used;
+ u32 command_status;
+
+#ifdef CONFIG_TX_PKT_DISPLAY
+ {
+ u16 pcnt = p_pkt_info->byte_cnt;
+ u8 *prnt = (u8 *) p_pkt_info->buf_ptr;
+ printf("cnt=%d,", pcnt);
+ while (pcnt) {
+ printf("%02x,", prnt[0]);
+ prnt++;
+ pcnt--;
+ }
+ printf(" pckend\n");
+ }
+#endif
+ /* Do not process Tx ring in case of Tx ring resource error */
+ if (p_gbe_pctrl->tx_resource_err[tx_queue] == 1)
+ return KWGBE_QFULL;
+
+ /* Get the Tx Desc ring indexes */
+ CURR_TFD_GET(p_tx_desc_curr, tx_queue);
+ USED_TFD_GET(p_tx_desc_used, tx_queue);
+
+ if (p_tx_desc_curr == NULL)
+ return KWGBE_ERROR;
+
+ /* The following parameters are used to save readings from memory */
+ p_tx_next_desc_curr = TX_NEXT_DESC_PTR(p_tx_desc_curr, tx_queue);
+ command_status =
+ p_pkt_info->cmd_sts | KWGBE_ZERO_PADDING | KWGBE_GEN_CRC;
+
+ if (command_status & (KWGBE_TX_FIRST_DESC)) {
+ /* Update first desc */
+ FIRST_TFD_SET(p_tx_desc_curr, tx_queue);
+ p_tx_desc_first = p_tx_desc_curr;
+ } else {
+ FIRST_TFD_GET(p_tx_desc_first, tx_queue);
+ command_status |= KWGBE_BUFFER_OWNED_BY_DMA;
+ }
+
+ /*
+ * Buffers with a payload smaller than 8 bytes must be aligned to 64-bit
+ * boundary. We use the memory allocated for Tx descriptor. This memory
+ * located in TX_BUF_OFFSET_IN_DESC offset within the Tx descriptor.
+ */
+ if (p_pkt_info->byte_cnt <= 8) {
+ printf("You have failed in the < 8 bytes errata - fixme\n");
+ return KWGBE_ERROR;
+
+ p_tx_desc_curr->buf_ptr =
+ (u32) p_tx_desc_curr + TX_BUF_OFFSET_IN_DESC;
+ b_copy(p_pkt_info->buf_ptr, p_tx_desc_curr->buf_ptr,
+ p_pkt_info->byte_cnt);
+ } else
+ p_tx_desc_curr->buf_ptr = p_pkt_info->buf_ptr;
+
+ p_tx_desc_curr->byte_cnt = p_pkt_info->byte_cnt;
+ p_tx_desc_curr->retinfo = p_pkt_info->retinfo;
+
+ if (p_pkt_info->cmd_sts & (KWGBE_TX_LAST_DESC)) {
+ /* Set last desc with DMA ownership and interrupt enable. */
+ p_tx_desc_curr->cmd_sts = command_status |
+ KWGBE_BUFFER_OWNED_BY_DMA | KWGBE_TX_EN_INTERRUPT;
+
+ if (p_tx_desc_curr != p_tx_desc_first)
+ p_tx_desc_first->cmd_sts |= KWGBE_BUFFER_OWNED_BY_DMA;
+
+ /* Flush CPU pipe */
+ D_CACHE_FLUSH_LINE((u32) p_tx_desc_curr, 0);
+ D_CACHE_FLUSH_LINE((u32) p_tx_desc_first, 0);
+ CPU_PIPE_FLUSH;
+
+ /* Apply send command */
+ KWGBEREG_WR(regs->tqc, (1 << tx_queue));
+ debug("packet xmitted\n");
+
+ /*
+ * Finish Tx packet.
+ * Update first desc in case of Tx resource error
+ */
+ p_tx_desc_first = p_tx_next_desc_curr;
+ FIRST_TFD_SET(p_tx_desc_first, tx_queue);
+
+ } else {
+ p_tx_desc_curr->cmd_sts = command_status;
+ D_CACHE_FLUSH_LINE((u32) p_tx_desc_curr, 0);
+ }
+
+ /* Check for ring index overlap in the Tx desc ring */
+ if (p_tx_next_desc_curr == p_tx_desc_used) {
+ /* Update the current descriptor */
+ CURR_TFD_SET(p_tx_desc_first, tx_queue);
+
+ p_gbe_pctrl->tx_resource_err[tx_queue] = 1;
+ return KWGBE_QLAST_RESO;
+ } else {
+ /* Update the current descriptor */
+ CURR_TFD_SET(p_tx_next_desc_curr, tx_queue);
+ return KWGBE_OK;
+ }
+}
+
+/*
+ * tx_return_desc - Free all used Tx descriptors
+ *
+ * This routine returns the transmitted packet information to the caller.
+ * It uses the 'first' index to support Tx desc return in case a transmit
+ * of a packet spanned over multiple buffer still in process.
+ * In case the Tx queue was in "resource error" condition, where there are
+ * no available Tx resources, the function resets the resource error flag.
+ *
+ * @p_gbe_pctrl Ethernet Port Control srtuct ptr.
+ * @tx_queue Number of Tx queue.
+ * @p_pkt_info User packet buffer ptr.
+ *
+ * OUTPUT:Tx ring 'first' and 'used' indexes are updated.
+ *
+ * RETURN:
+ * KWGBE_ERROR in case the routine can not access Tx desc ring.
+ * KWGBE_RETRY in case there is transmission in process.
+ * KWGBE_END_OF_JOB if the routine has nothing to release.
+ * KWGBE_OK otherwise.
+ */
+static enum kwgbe_fret_sts tx_return_desc(struct kwgbe_device *dkwgbe,
+ enum kwgbe_q tx_queue,
+ struct kwgbe_pktinf *p_pkt_info)
+{
+ struct kwgbe_portinfo *p_gbe_pctrl = &dkwgbe->portinfo;
+ volatile struct kwgbe_tx_dscr *p_tx_desc_used = NULL;
+ volatile struct kwgbe_tx_dscr *p_tx_desc_first = NULL;
+ u32 command_status;
+
+ /* Get the Tx Desc ring indexes */
+ USED_TFD_GET(p_tx_desc_used, tx_queue);
+ FIRST_TFD_GET(p_tx_desc_first, tx_queue);
+
+ /* Sanity check */
+ if (p_tx_desc_used == NULL)
+ return KWGBE_ERROR;
+
+ command_status = p_tx_desc_used->cmd_sts;
+
+ /* Still transmitting... */
+ if (command_status & (KWGBE_BUFFER_OWNED_BY_DMA)) {
+ D_CACHE_FLUSH_LINE((u32) p_tx_desc_used, 0);
+ return KWGBE_RETRY;
+ }
+
+ /* Stop release. About to overlap the current available Tx descriptor */
+ if ((p_tx_desc_used == p_tx_desc_first) &&
+ (p_gbe_pctrl->tx_resource_err[tx_queue] == 0)) {
+ D_CACHE_FLUSH_LINE((u32) p_tx_desc_used, 0);
+ return KWGBE_END_OF_JOB;
+ }
+
+ /* Pass the packet information to the caller */
+ p_pkt_info->cmd_sts = command_status;
+ p_pkt_info->retinfo = p_tx_desc_used->retinfo;
+ p_tx_desc_used->retinfo = 0;
+
+ /* Update the next descriptor to release. */
+ USED_TFD_SET(TX_NEXT_DESC_PTR(p_tx_desc_used, tx_queue), tx_queue);
+
+ /* Any Tx return cancels the Tx resource error status */
+ if (p_gbe_pctrl->tx_resource_err[tx_queue] == 1)
+ p_gbe_pctrl->tx_resource_err[tx_queue] = 0;
+
+ D_CACHE_FLUSH_LINE((u32) p_tx_desc_used, 0);
+
+ return KWGBE_OK;
+}
+
+/*
+ * port_receive - Get received information from Rx ring.
+ *
+ * This routine returns the received data to the caller. There is no
+ * data copying during routine operation. All information is returned
+ * using pointer to packet information struct passed from the caller.
+ * If the routine exhausts Rx ring resources then the resource error flag
+ * is set.
+ *
+ * @p_gbe_pctrl Ethernet Port Control srtuct ptr.
+ * @rx_queue Number of Rx queue.
+ * @p_pkt_info User packet buffer.
+ *
+ * OUTPUT: Rx ring current and used indexes are updated.
+ *
+ * RETURN:
+ * KWGBE_ERROR in case the routine can not access Rx desc ring.
+ * KWGBE_QFULL if Rx ring resources are exhausted.
+ * KWGBE_END_OF_JOB if there is no received data.
+ * KWGBE_OK otherwise.
+ */
+static enum kwgbe_fret_sts port_receive(struct kwgbe_device *dkwgbe,
+ enum kwgbe_q rx_queue,
+ struct kwgbe_pktinf *p_pkt_info)
+{
+ struct kwgbe_portinfo *p_gbe_pctrl = &dkwgbe->portinfo;
+ volatile struct kwgbe_rx_dscr *p_rx_curr_desc;
+ volatile struct kwgbe_rx_dscr *p_rx_next_curr_desc;
+ volatile struct kwgbe_rx_dscr *p_rx_used_desc;
+ u32 command_status;
+
+ /* Do not process Rx ring in case of Rx ring resource error */
+ if (p_gbe_pctrl->rx_resource_err[rx_queue] == 1) {
+ printf("Rx Queue is full ...\n");
+ return KWGBE_QFULL;
+ }
+
+ /* Get the Rx Desc ring 'curr and 'used' indexes */
+ CURR_RFD_GET(p_rx_curr_desc, rx_queue);
+ USED_RFD_GET(p_rx_used_desc, rx_queue);
+
+ /* Sanity check */
+ if (p_rx_curr_desc == NULL)
+ return KWGBE_ERROR;
+
+ /* The following parameters are used to save readings from memory */
+ p_rx_next_curr_desc = RX_NEXT_DESC_PTR(p_rx_curr_desc, rx_queue);
+ command_status = p_rx_curr_desc->cmd_sts;
+
+ /* Nothing to receive... */
+ if (command_status & (KWGBE_BUFFER_OWNED_BY_DMA)) {
+ /*debug("Rx: command_status: %08x\n", command_status); */
+ D_CACHE_FLUSH_LINE((u32) p_rx_curr_desc, 0);
+ /*debug("KWGBE_END_OF_JOB ...\n"); */
+ return KWGBE_END_OF_JOB;
+ }
+
+ p_pkt_info->byte_cnt = (p_rx_curr_desc->byte_cnt) - RX_BUF_OFFSET;
+ p_pkt_info->cmd_sts = command_status;
+ p_pkt_info->buf_ptr = (p_rx_curr_desc->buf_ptr) + RX_BUF_OFFSET;
+ p_pkt_info->retinfo = p_rx_curr_desc->retinfo;
+ p_pkt_info->l4i_chk = p_rx_curr_desc->buf_size; /* IP fragment indicator */
+
+ /* Clean the return info field to indicate that the packet has been */
+ /* moved to the upper layers */
+ p_rx_curr_desc->retinfo = 0;
+
+ /* Update 'curr' in data structure */
+ CURR_RFD_SET(p_rx_next_curr_desc, rx_queue);
+
+ /* Rx descriptors resource exhausted. Set the Rx ring resource error flag */
+ if (p_rx_next_curr_desc == p_rx_used_desc)
+ p_gbe_pctrl->rx_resource_err[rx_queue] = 1;
+
+ D_CACHE_FLUSH_LINE((u32) p_rx_curr_desc, 0);
+ CPU_PIPE_FLUSH;
+ return KWGBE_OK;
+}
+
+/*
+ * rx_return_buff - Returns a Rx buffer back to the Rx ring.
+ *
+ * This routine returns a Rx buffer back to the Rx ring. It retrieves the
+ * next 'used' descriptor and attached the returned buffer to it.
+ * In case the Rx ring was in "resource error" condition, where there are
+ * no available Rx resources, the function resets the resource error flag.
+ *
+ * @p_gbe_pctrl Ethernet Port Control srtuct ptr.
+ * @rx_queue Number of Rx queue.
+ * @p_pkt_info Information on the returned buffer ptr.
+ *
+ * OUTPUT: New available Rx resource in Rx descriptor ring.
+ *
+ * RETURN:
+ * KWGBE_ERROR in case the routine can not access Rx desc ring.
+ * KWGBE_OK otherwise.
+ */
+static enum kwgbe_fret_sts rx_return_buff(struct kwgbe_device *dkwgbe,
+ enum kwgbe_q rx_queue,
+ struct kwgbe_pktinf *p_pkt_info)
+{
+ struct kwgbe_portinfo *p_gbe_pctrl = &dkwgbe->portinfo;
+ volatile struct kwgbe_rx_dscr *p_used_rx_desc; /* Where to return Rx resource */
+
+ /* Get 'used' Rx descriptor */
+ USED_RFD_GET(p_used_rx_desc, rx_queue);
+
+ /* Sanity check */
+ if (p_used_rx_desc == NULL)
+ return KWGBE_ERROR;
+
+ p_used_rx_desc->buf_ptr = p_pkt_info->buf_ptr;
+ p_used_rx_desc->retinfo = p_pkt_info->retinfo;
+ p_used_rx_desc->byte_cnt = p_pkt_info->byte_cnt;
+ p_used_rx_desc->buf_size = KW_RX_BUF_SZ; /* Reset Buffer size */
+
+ /* Flush the write pipe */
+ CPU_PIPE_FLUSH;
+
+ /* Return the descriptor to DMA ownership */
+ p_used_rx_desc->cmd_sts =
+ KWGBE_BUFFER_OWNED_BY_DMA | KWGBE_RX_EN_INTERRUPT;
+
+ /* Flush descriptor and CPU pipe */
+ D_CACHE_FLUSH_LINE((u32) p_used_rx_desc, 0);
+ CPU_PIPE_FLUSH;
+
+ /* Move the used descriptor pointer to the next descriptor */
+ USED_RFD_SET(RX_NEXT_DESC_PTR(p_used_rx_desc, rx_queue), rx_queue);
+
+ /* Any Rx return cancels the Rx resource error status */
+ if (p_gbe_pctrl->rx_resource_err[rx_queue] == 1)
+ p_gbe_pctrl->rx_resource_err[rx_queue] = 0;
+
+ return KWGBE_OK;
+}
+
+/*
+ * init function
+ */
+static int kwgbe_init(struct eth_device *dev)
+{
+ struct kwgbe_device *dkwgbe = to_dkwgbe(dev);
+ struct kwgbe_portinfo *portinfo = &dkwgbe->portinfo;
+ struct egiga_priv *portpriv = &dkwgbe->portpriv;
+
+ u32 queue;
+
+ /*
+ * ronen - when we update the MAC env params we only update dev->enetaddr
+ * see ./net/eth.c set_enetaddr()
+ */
+ memcpy(portinfo->port_mac_addr, dev->enetaddr, 6);
+
+ /* Activate the DMA channels etc */
+ port_init(dkwgbe);
+
+ /* "Allocate" setup TX rings */
+ for (queue = 0; queue < KW_TXQ_NO; queue++) {
+ u32 size;
+
+ portpriv->tx_ring_size[queue] = KW_TXQ_SZ;
+ /*size = no of DESCs times DESC-size */
+ size = (portpriv->tx_ring_size[queue] * TX_DESC_ALIGNED_SIZE);
+ portinfo->tx_desc_area_size[queue] = size;
+
+ /* first clear desc area completely */
+ memset((void *)portinfo->p_tx_desc_area_base[queue],
+ 0, portinfo->tx_desc_area_size[queue]);
+
+ /* initialize tx desc ring with low level driver */
+ if (kwgbe_init_tx_desc_ring
+ (dkwgbe, KWGBE_Q0,
+ portpriv->tx_ring_size[queue],
+ KW_TX_BUF_SZ /* Each Buffer is 1600 Byte */ ,
+ (u32) portinfo->
+ p_tx_desc_area_base[queue],
+ (u32) portinfo->p_tx_buffer_base[queue]) == 0)
+ printf("### initializing TX Ring\n");
+ }
+
+ /* "Allocate" setup RX rings */
+ for (queue = 0; queue < KW_RXQ_NO; queue++) {
+ u32 size;
+
+ /* Meantime RX Ring are fixed - but must be configurable by user */
+ portpriv->rx_ring_size[queue] = KW_RXQ_SZ;
+ size = (portpriv->rx_ring_size[queue] * RX_DESC_ALIGNED_SIZE);
+ portinfo->rx_desc_area_size[queue] = size;
+
+ /* first clear desc area completely */
+ memset((void *)portinfo->p_rx_desc_area_base[queue],
+ 0, portinfo->rx_desc_area_size[queue]);
+ if ((kwgbe_init_rx_desc_ring
+ (dkwgbe, KWGBE_Q0,
+ portpriv->rx_ring_size[queue],
+ KW_RX_BUF_SZ /* Each Buffer is 1600 Byte */ ,
+ (u32) portinfo->
+ p_rx_desc_area_base[queue],
+ (u32) portinfo->p_rx_buffer_base[queue])) == 0)
+ printf("### initializing RX Ring\n");
+ }
+
+ port_start(dkwgbe);
+
+#if (defined (CONFIG_MII) || defined (CONFIG_CMD_MII)) \
+ && defined (CONFIG_SYS_FAULT_ECHO_LINK_DOWN)
+ if (!miiphy_link(dev->name, portinfo->port_phy_addr)) {
+ printf("No link on %s\n", dev->name);
+ return 0;
+ }
+#endif
+ portpriv->running = MAGIC_KWGBE_RUNNING;
+ return 1;
+}
+
+/*
+ * egiga_start_xmit
+ *
+ * This function is queues a packet in the Tx descriptor for
+ * required port.
+ *
+ * Output : zero upon success
+ */
+int kwgbe_xmit(struct eth_device *dev, volatile void *dataPtr, int dataSize)
+{
+ struct kwgbe_device *dkwgbe = to_dkwgbe(dev);
+ struct egiga_priv *portpriv = &dkwgbe->portpriv;
+ struct net_device_stats *stats = &portpriv->stats;
+
+ struct kwgbe_pktinf pkt_info;
+ enum kwgbe_fret_sts status;
+ enum kwgbe_fret_sts release_result;
+
+ /* Update packet info data structure */
+ pkt_info.cmd_sts = KWGBE_TX_FIRST_DESC | KWGBE_TX_LAST_DESC; /* first last */
+ pkt_info.cmd_sts |= KWGBE_BUFFER_OWNED_BY_DMA; /* DMA owned */
+
+ pkt_info.byte_cnt = dataSize;
+ pkt_info.buf_ptr = (u32) dataPtr;
+ pkt_info.retinfo = 0;
+
+ status = port_send(dkwgbe, KWGBE_Q0, &pkt_info);
+ if ((status == KWGBE_ERROR) || (status == KWGBE_QFULL)) {
+ printf("On transmitting packet ..\n");
+ if (status == KWGBE_QFULL)
+ printf("ETH Queue is full.\n");
+ if (status == KWGBE_QLAST_RESO)
+ printf("ETH Queue: using last available resource.\n");
+ goto error;
+ }
+
+ /* Update statistics and start of transmittion time */
+ stats->tx_bytes += dataSize;
+ stats->tx_packets++;
+
+ /* Check if packet(s) is(are) transmitted correctly (release everything) */
+ do {
+ release_result = tx_return_desc(dkwgbe, KWGBE_Q0, &pkt_info);
+ switch (release_result) {
+ case KWGBE_OK:
+ debug("descriptor released\n");
+ if (pkt_info.cmd_sts & 1) {
+ printf("in TX\n");
+ stats->tx_errors++;
+ }
+ break;
+ case KWGBE_RETRY:
+ debug("transmission still in process\n");
+ break;
+ case KWGBE_ERROR:
+ printf("routine can not access Tx desc ring\n");
+ break;
+ case KWGBE_END_OF_JOB:
+ debug("the routine has nothing to release\n");
+ break;
+ default: /* should not happen */
+ break;
+ }
+ } while (release_result == KWGBE_OK);
+
+ return 0; /* success */
+ error:
+ return 1; /* Failed - higher layers will free the skb */
+}
+
+/*
+ * kwgbe_receive
+ *
+ * This function is forward packets that are received from the port's
+ * queues toward kernel core or FastRoute them to another interface.
+ *
+ * @dev a pointer to the required interface
+ *
+ * Output : number of served packets
+ */
+int kwgbe_receive(struct eth_device *dev)
+{
+ struct kwgbe_device *dkwgbe = to_dkwgbe(dev);
+ struct egiga_priv *portpriv = &dkwgbe->portpriv;
+ struct net_device_stats *stats = &portpriv->stats;
+ struct kwgbe_pktinf pkt_info;
+
+ while ((port_receive(dkwgbe, KWGBE_Q0, &pkt_info) == KWGBE_OK)) {
+
+ if (pkt_info.byte_cnt != 0) {
+ debug("%s: Received %d byte Packet @ 0x%x",
+ __FUNCTION__, pkt_info.byte_cnt,
+ pkt_info.buf_ptr);
+ }
+ /* Update statistics. Note byte count includes 4 byte CRC count */
+ stats->rx_packets++;
+ stats->rx_bytes += pkt_info.byte_cnt;
+
+ /*
+ * In case received a packet without first / last bits on OR the error
+ * summary bit is on, the packets needs to be dropeed.
+ */
+ if (((pkt_info.
+ cmd_sts & (KWGBE_RX_FIRST_DESC | KWGBE_RX_LAST_DESC)) !=
+ (KWGBE_RX_FIRST_DESC | KWGBE_RX_LAST_DESC))
+ || (pkt_info.cmd_sts & KWGBE_ERROR_SUMMARY)) {
+ stats->rx_dropped++;
+
+ printf
+ ("Received packet spread on multiple descriptors\n");
+
+ /* Is this caused by an error ? */
+ if (pkt_info.cmd_sts & KWGBE_ERROR_SUMMARY) {
+ stats->rx_errors++;
+ }
+
+ /* free these descriptors again without forwarding them to the higher layers */
+ pkt_info.buf_ptr &= ~0x7; /* realign buffer again */
+ pkt_info.byte_cnt = 0x0000; /* Reset Byte count */
+
+ if (rx_return_buff(dkwgbe, KWGBE_Q0, &pkt_info) !=
+ KWGBE_OK) {
+ printf
+ ("Error while returning the RX Desc to Ring\n");
+ } else {
+ debug("RX Desc returned to Ring");
+ }
+ /* /free these descriptors again */
+ } else {
+
+ /* !!! call higher layer processing */
+ debug
+ ("\nNow send it to upper layer protocols (NetReceive) ...");
+
+ /* let the upper layer handle the packet */
+ NetReceive((uchar *) pkt_info.buf_ptr,
+ (int)pkt_info.byte_cnt);
+
+ /* free descriptor */
+ pkt_info.buf_ptr &= ~0x7; /* realign buffer again */
+ pkt_info.byte_cnt = 0x0000; /* Reset Byte count */
+ debug("RX: pkt_info.buf_ptr = %x", pkt_info.buf_ptr);
+ if (rx_return_buff(dkwgbe, KWGBE_Q0, &pkt_info) !=
+ KWGBE_OK) {
+ printf("while returning the RX Desc to Ring");
+ } else {
+ debug("RX Desc returned to Ring");
+ }
+ /* free descriptor code end */
+ }
+ }
+ egiga_get_stats(dev); /* update statistics */
+ return 1;
+}
+
+/*
+ * Called from net/eth.c
+ */
+int kirkwood_egiga_initialize(bd_t * bis)
+{
+ struct kwgbe_device *dkwgbe;
+ struct eth_device *dev;
+ struct kwgbe_portinfo *portinfo;
+ struct egiga_priv *portpriv;
+ int devnum, x, temp;
+ char *s, *e, buf[64];
+ u8 used_ports[MAX_KWGBE_DEVS] = CONFIG_KIRKWOOD_EGIGA_PORTS;
+
+ for (devnum = 0; devnum < MAX_KWGBE_DEVS; devnum++) {
+ /*skip if port is configured not to use */
+ if (used_ports[devnum] == 0)
+ continue;
+
+ dkwgbe = malloc(sizeof(struct kwgbe_device));
+ if (!dkwgbe) {
+ printf("Error: Failed to allocate memory for kwgbe%d\n",
+ devnum);
+ return -1;
+ }
+ memset(dkwgbe, 0, sizeof(struct kwgbe_device));
+
+ dev = &dkwgbe->dev;
+ portinfo = &dkwgbe->portinfo;
+ portpriv = &dkwgbe->portpriv;
+
+ /* must be less than NAMESIZE (16) */
+ sprintf(dev->name, "egiga%d", devnum);
+ debug("Initializing %s", dev->name);
+
+ /* Extract the MAC address from the environment */
+ switch (devnum) {
+ case 0:
+ dkwgbe->regs = (void *)KW_EGIGA0_BASE;
+ s = "ethaddr";
+ break;
+ case 1:
+ dkwgbe->regs = (void *)KW_EGIGA1_BASE;
+ s = "eth1addr";
+ break;
+ default: /* this should never happen */
+ printf("%s: Invalid device number %d\n",
+ __FUNCTION__, devnum);
+ return -1;
+ }
+
+ e = getenv(s);
+ if (!e) {
+ /* Generate Ramdom MAC addresses if not set */
+ sprintf(buf, "00:50:43:%02x:%02x:%02x",
+ get_random_hex(), get_random_hex(),
+ get_random_hex());
+ setenv(s, buf);
+ }
+
+ temp = getenv_r(s, buf, sizeof(buf));
+ s = (temp > 0) ? buf : NULL;
+
+ debug("Setting MAC %d to %s", devnum, s);
+ /* on POR port is pre-powered up, so assume it is up */
+
+ for (x = 0; x < 6; ++x) {
+ dev->enetaddr[x] = s ? simple_strtoul(s, &e, 16) : 0;
+ if (s)
+ s = (*e) ? e + 1 : e;
+ }
+ /* set the MAC addr in the HW */
+ memcpy(portinfo->port_mac_addr, dev->enetaddr, 6);
+ port_uc_addr_set(dkwgbe->regs, dev->enetaddr, 0);
+
+ dev->init = (void *)kwgbe_init;
+ dev->halt = (void *)kwgbe_stop;
+ dev->send = (void *)kwgbe_xmit;
+ dev->recv = (void *)kwgbe_receive;
+
+ /*
+ * Read MIB counter on the GT in order to reset them,
+ * then zero all the stats fields in memory
+ */
+ egiga_update_stat(dev);
+ memset(&portpriv->stats, 0, sizeof(struct net_device_stats));
+
+ debug("Allocating descriptor and buffer rings");
+
+ portinfo->p_rx_desc_area_base[0] =
+ (struct kwgbe_rx_dscr *)memalign(16,
+ RX_DESC_ALIGNED_SIZE *
+ KW_RXQ_SZ + 1);
+ portinfo->p_tx_desc_area_base[0] =
+ (struct kwgbe_tx_dscr *)memalign(16,
+ TX_DESC_ALIGNED_SIZE *
+ KW_TXQ_SZ + 1);
+
+ portinfo->p_rx_buffer_base[0] =
+ (char *)memalign(16, KW_RXQ_SZ * KW_TX_BUF_SZ + 1);
+ portinfo->p_tx_buffer_base[0] =
+ (char *)memalign(16, KW_RXQ_SZ * KW_TX_BUF_SZ + 1);
+
+ eth_register(dev);
+
+ /* Set phy address of the port */
+ portinfo->port_phy_addr = PHY_BASE_ADR + devnum;
+ phy_set_addr(dkwgbe->regs, portinfo->port_phy_addr);
+
+#if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
+ miiphy_register(dev->name, smi_reg_read, smi_reg_write);
+#endif
+ }
+ return 0;
+}
diff --git a/drivers/net/kirkwood_egiga.h b/drivers/net/kirkwood_egiga.h
new file mode 100644
index 0000000..3a5a306
--- /dev/null
+++ b/drivers/net/kirkwood_egiga.h
@@ -0,0 +1,828 @@
+/*
+ * (C) Copyright 2009
+ * Marvell Semiconductor <www.marvell.com>
+ * Prafulla Wadaskar <prafulla at marvell.com>
+ *
+ * based on - Driver for MV64360X ethernet ports
+ * Copyright (C) 2002 rabeeh at galileo.co.il
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ */
+
+#ifndef __EGIGA_H__
+#define __EGIGA_H__
+
+#define MAX_KWGBE_DEVS 2 /*controller has two ports */
+
+/* PHY_BASE_ADR is board specific and can be configured */
+#if defined (CONFIG_PHY_BASE_ADR)
+#define PHY_BASE_ADR CONFIG_PHY_BASE_ADR
+#else
+#define PHY_BASE_ADR 0x08 /* default phy base addr */
+#endif
+
+/* Egiga driver statistics can be displayed optionally */
+#ifdef CONFIG_EGIGA_STATS_BY_SOFTWARE
+#define UPDATE_STATS_BY_SOFTWARE
+#else
+#undef UPDATE_STATS_BY_SOFTWARE
+#endif
+
+/* Constants */
+#define MAGIC_KWGBE_RUNNING 8031971
+#define INT_CAUSE_UNMASK_ALL 0x0007ffff
+#define INT_CAUSE_UNMASK_ALL_EXT 0x0011ffff
+
+/* Port attrib */
+/* Max queue number is 8 for controller but here configured 1 */
+#define MAX_RXQ_NO 1
+#define MAX_TXQ_NO 1
+/* Use one TX queue and one RX queue */
+#define KW_TXQ_NO 1
+#define KW_RXQ_NO 1
+
+/*
+ * Number of RX / TX descriptors on RX / TX rings.
+ * Note that allocating RX descriptors is done by allocating the RX
+ * ring AND a preallocated RX buffers (skb's) for each descriptor.
+ * The TX descriptors only allocates the TX descriptors ring,
+ * with no pre allocated TX buffers (skb's are allocated by higher layers.
+ */
+
+/* TX/RX parameters can be passed externally */
+#ifdef CONFIG_KWGBE_TXQ_SZ
+#define KW_TXQ_SZ CONFIG_KWGBE_TXQ_SZ
+#else
+#define KW_TXQ_SZ 4
+#endif
+
+#ifdef CONFIG_KWGBE_RXQ_SZ
+#define KW_RXQ_SZ CONFIG_KWGBE_RXQ_SZ
+#else
+#define KW_RXQ_SZ 4
+#endif
+
+#ifdef CONFIG_KWGBE_RXBUF_SZ
+#define KW_RX_BUF_SZ CONFIG_KWGBE_RXBUF_SZ
+#else
+#define KW_RX_BUF_SZ 1600
+#endif
+
+#ifdef CONFIG_KWGBE_TXBUF_SZ
+#define KW_TX_BUF_SZ CONFIG_KWGBE_TXBUF_SZ
+#else
+#define KW_TX_BUF_SZ 1600
+#endif
+
+#define RX_BUF_MXSZ 0xFFFF
+#define TX_BUF_MXSZ 0xFFFF /* Buffers are limited to 64k */
+#define RX_BUF_MNSZ 0x8
+#define TX_BUF_MNSZ 0x8
+
+/*
+ * Network device statistics. Akin to the 2.0 ether stats but
+ * with byte counters.
+ */
+struct net_device_stats {
+ u32 rx_packets; /* total packets received */
+ u32 tx_packets; /* total packets transmitted */
+ u64 rx_bytes; /* total bytes received */
+ u64 tx_bytes; /* total bytes transmitted */
+ u32 rx_errors; /* bad packets received */
+ u32 tx_errors; /* packet transmit problems */
+ u32 rx_dropped; /* no space in linux buffers */
+ u32 tx_dropped; /* no space available in linux */
+ u32 multicast; /* multicast packets received */
+ u32 collisions;
+ /* detailed rx_errors: */
+ u32 rx_length_errors;
+ u32 rx_over_errors; /* receiver ring buff overflow */
+ u32 rx_crc_errors; /* recved pkt with crc error */
+ u32 rx_frame_errors; /* recv'd frame alignment error */
+ u32 rx_fifo_errors; /* recv'r fifo overrun */
+ u32 rx_missed_errors; /* receiver missed packet */
+ /* detailed tx_errors */
+ u32 tx_aborted_errors;
+ u32 tx_carrier_errors;
+ u32 tx_fifo_errors;
+ u32 tx_heartbeat_errors;
+ u32 tx_window_errors;
+ /* for cslip etc */
+ u32 rx_compressed;
+ u32 tx_compressed;
+};
+
+/* Private data structure used for ethernet device */
+struct egiga_priv {
+ struct net_device_stats stats;
+ /* to buffer area aligned */
+ char *p_tx_buffer[KW_TXQ_SZ + 1];
+ char *p_rx_buffer[KW_RXQ_SZ + 1];
+ /* Size of Tx Ring per queue */
+ u32 tx_ring_size[MAX_TXQ_NO];
+ /* Size of Rx Ring per queue */
+ u32 rx_ring_size[MAX_RXQ_NO];
+ /* Magic Number for Ethernet running */
+ u32 running;
+};
+
+/* Chip Registers Structures */
+struct mib_counters {
+ u64 good_octets_received;
+ u32 bad_octets_received;
+ u32 internal_mac_transmit_err;
+ u32 good_frames_received;
+ u32 bad_frames_received;
+ u32 broadcast_frames_received;
+ u32 multicast_frames_received;
+ u32 frames_64_octets;
+ u32 frames_65_to_127_octets;
+ u32 frames_128_to_255_octets;
+ u32 frames_256_to_511_octets;
+ u32 frames_512_to_1023_octets;
+ u32 frames_1024_to_max_octets;
+ u64 good_octets_sent;
+ u32 good_frames_sent;
+ u32 excessive_collision;
+ u32 multicast_frames_sent;
+ u32 broadcast_frames_sent;
+ u32 unrec_mac_control_received;
+ u32 fc_sent;
+ u32 good_fc_received;
+ u32 bad_fc_received;
+ u32 undersize_received;
+ u32 fragments_received;
+ u32 oversize_received;
+ u32 jabber_received;
+ u32 mac_receive_error;
+ u32 bad_crc_event;
+ u32 collision;
+ u32 late_collision;
+};
+
+struct kwgbe_barsz {
+ u32 bar;
+ u32 size;
+};
+
+struct kwgbe_crdp {
+ u32 crdp;
+ u32 crdp_pad[3];
+};
+
+struct kwgbe_tqx {
+ u32 qxttbc;
+ u32 tqxtbc;
+ u32 tqxac;
+ u32 tqxpad;
+};
+
+struct kwgbe_registers {
+ u32 phyadr;
+ u32 smi;
+ u32 euda;
+ u32 eudid;
+ u8 pad1[0x080 - 0x00c - 4];
+ u32 euic;
+ u32 euim;
+ u8 pad2[0x094 - 0x084 - 4];
+ u32 euea;
+ u32 euiae;
+ u8 pad3[0x0b0 - 0x098 - 4];
+ u32 euc;
+ u8 pad3a[0x200 - 0x0b0 - 4];
+ struct kwgbe_barsz barsz[6];
+ u8 pad4[0x280 - 0x22c - 4];
+ u32 ha_remap[4];
+ u32 bare;
+ u32 epap;
+ u8 pad5[0x400 - 0x294 - 4];
+ u32 pxc;
+ u32 pxcx;
+ u32 mii_ser_params;
+ u8 pad6[0x410 - 0x408 - 4];
+ u32 evlane;
+ u32 macal;
+ u32 macah;
+ u32 sdc;
+ u32 dscp[7];
+ u32 psc0;
+ u32 vpt2p;
+ u32 ps0;
+ u32 tqc;
+ u32 psc1;
+ u32 ps1;
+ u32 mrvl_header;
+ u8 pad7[0x460 - 0x454 - 4];
+ u32 ic;
+ u32 ice;
+ u32 pim;
+ u32 peim;
+ u8 pad8[0x474 - 0x46c - 4];
+ u32 pxtfut;
+ u32 pad9;
+ u32 pxmfs;
+ u32 pad10;
+ u32 pxdfc;
+ u32 pxofc;
+ u8 pad11[0x494 - 0x488 - 4];
+ u32 peuiae;
+ u8 pad12[0x4bc - 0x494 - 4];
+ u32 eth_type_prio;
+ u8 pad13[0x4dc - 0x4bc - 4];
+ u32 tqfpc;
+ u32 pttbrc;
+ u32 tqc1;
+ u32 pmtu;
+ u32 pmtbs;
+ u8 pad14[0x60c - 0x4ec - 4];
+ struct kwgbe_crdp crdp[7];
+ u32 crdp7;
+ u32 rqc;
+ u32 cur_servd_dscr_ptr;
+ u8 pad15[0x6c0 - 0x684 - 4];
+ u32 tcqdp[8];
+ u8 pad16[0x700 - 0x6dc - 4];
+ struct kwgbe_tqx tqx[8];
+ u32 pttbc;
+ u8 pad17[0x7a8 - 0x780 - 4];
+ u32 tqxipg0;
+ u32 pad18[3];
+ u32 tqxipg1;
+ u8 pad19[0x7c0 - 0x7b8 - 4];
+ u32 hitkninlopkt;
+ u32 hitkninasyncpkt;
+ u32 lotkninasyncpkt;
+ u32 pad20;
+ u32 ts;
+ u8 pad21[0x3000 - 0x27d0 - 4];
+ struct mib_counters mibc;
+ u8 pad22[0x3400 - 0x3000 - sizeof(struct mib_counters)];
+ u32 dfsmt[64];
+ u32 dfomt[64];
+ u32 dfut[4];
+ u8 pad23[0xe20c0 - 0x7360c - 4];
+ u32 pmbus_top_arbiter;
+};
+
+#define to_dkwgbe(_kd) container_of(_kd, struct kwgbe_device, dev)
+#define KWGBEREG_WR(adr, val) writel(val, (u32)&adr)
+#define KWGBEREG_RD(adr) readl((u32)&adr)
+#define KWGBEREG_BITS_RESET(adr, val) writel(readl((u32)&adr) & ~(val), (u32)&adr)
+#define KWGBEREG_BITS_SET(adr, val) writel(readl((u32)&adr) | val, (u32)&adr)
+
+/* Default port configuration value */
+#define PRT_CFG_VAL \
+ KWGBE_UCAST_MOD_NRML | \
+ KWGBE_DFLT_RX_Q0 | \
+ KWGBE_DFLT_RX_ARPQ0 | \
+ KWGBE_RX_BC_IF_NOT_IP_OR_ARP | \
+ KWGBE_RX_BC_IF_IP | \
+ KWGBE_RX_BC_IF_ARP | \
+ KWGBE_CPTR_TCP_FRMS_DIS | \
+ KWGBE_CPTR_UDP_FRMS_DIS | \
+ KWGBE_DFLT_RX_TCPQ0 | \
+ KWGBE_DFLT_RX_UDPQ0 | \
+ KWGBE_DFLT_RX_BPDU_Q_0
+
+/* Default port extend configuration value */
+#define PORT_CFG_EXTEND_VALUE \
+ KWGBE_SPAN_BPDU_PACKETS_AS_NORMAL | \
+ KWGBE_PARTITION_DIS | \
+ KWGBE_TX_CRC_GENERATION_EN
+
+/* Default sdma control value */
+#ifdef CONFIG_NOT_COHERENT_CACHE
+#define PORT_SDMA_CFG_VALUE \
+ KWGBE_RX_BURST_SIZE_16_64BIT | \
+ KWGBE_BLM_RX_NO_SWAP | \
+ KWGBE_BLM_TX_NO_SWAP | \
+ GT_KWGBE_IPG_INT_RX(0) | \
+ KWGBE_TX_BURST_SIZE_16_64BIT;
+#else
+#define PORT_SDMA_CFG_VALUE \
+ KWGBE_RX_BURST_SIZE_4_64BIT | \
+ GT_KWGBE_IPG_INT_RX(0) | \
+ KWGBE_TX_BURST_SIZE_4_64BIT;
+#endif
+
+#define GT_KWGBE_IPG_INT_RX(value) ((value & 0x3fff) << 8)
+
+/* Default port serial control value */
+#define PORT_SERIAL_CONTROL_VALUE_TMP \
+ KWGBE_FORCE_LINK_PASS | \
+ KWGBE_DIS_AUTO_NEG_FOR_DUPLX | \
+ KWGBE_DIS_AUTO_NEG_FOR_FLOW_CTRL | \
+ KWGBE_ADV_NO_FLOW_CTRL | \
+ KWGBE_FORCE_FC_MODE_NO_PAUSE_DIS_TX | \
+ KWGBE_FORCE_BP_MODE_NO_JAM | \
+ (1 << 9) | \
+ KWGBE_DO_NOT_FORCE_LINK_FAIL | \
+ KWGBE_DIS_AUTO_NEG_SPEED_GMII | \
+ KWGBE_DTE_ADV_0 | \
+ KWGBE_MIIPHY_MAC_MODE | \
+ KWGBE_AUTO_NEG_NO_CHANGE | \
+ KWGBE_MAX_RX_PACKET_1552BYTE | \
+ KWGBE_CLR_EXT_LOOPBACK | \
+ KWGBE_SET_FULL_DUPLEX_MODE | \
+ KWGBE_DIS_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX | \
+ KWGBE_SET_MII_SPEED_TO_100
+
+#ifdef CONFIG_SYS_MII_MODE
+#define PORT_SERIAL_CONTROL_VALUE \
+ PORT_SERIAL_CONTROL_VALUE_TMP | \
+ KWGBE_SET_GMII_SPEED_TO_10_100
+#else
+#define PORT_SERIAL_CONTROL_VALUE \
+ PORT_SERIAL_CONTROL_VALUE_TMP | \
+ KWGBE_SET_GMII_SPEED_TO_1000
+#endif
+
+/* Tx WRR confoguration macros */
+#define PORT_MAX_TRAN_UNIT 0x24 /* MTU register (default) 9KByte */
+#define PORT_MAX_TOKEN_BUCKET_SIZE 0x_fFFF /* PMTBS reg (default) */
+#define PORT_TOKEN_RATE 1023 /* PTTBRC reg (default) */
+/* MAC accepet/reject macros */
+#define ACCEPT_MAC_ADDR 0
+#define REJECT_MAC_ADDR 1
+/* Size of a Tx/Rx descriptor used in chain list data structure */
+#define RX_DESC_ALIGNED_SIZE 0x20
+#define TX_DESC_ALIGNED_SIZE 0x20
+/* An offest in Tx descriptors to store data for buffers less than 8 Bytes */
+#define TX_BUF_OFFSET_IN_DESC 0x18
+/* Buffer offset from buffer pointer */
+#define RX_BUF_OFFSET 0x2
+
+/* Port serial status reg (PSR) */
+#define KWGBE_INTERFACE_GMII_MII 0
+#define KWGBE_INTERFACE_PCM 1
+#define KWGBE_LINK_IS_DOWN 0
+#define KWGBE_LINK_IS_UP (1 << 1)
+#define KWGBE_PORT_AT_HALF_DUPLEX 0
+#define KWGBE_PORT_AT_FULL_DUPLEX (1 << 2)
+#define KWGBE_RX_FLOW_CTRL_DISD 0
+#define KWGBE_RX_FLOW_CTRL_ENBALED (1 << 3)
+#define KWGBE_GMII_SPEED_100_10 0
+#define KWGBE_GMII_SPEED_1000 (1 << 4)
+#define KWGBE_MII_SPEED_10 0
+#define KWGBE_MII_SPEED_100 (1 << 5)
+#define KWGBE_NO_TX 0
+#define KWGBE_TX_IN_PROGRESS (1 << 7)
+#define KWGBE_BYPASS_NO_ACTIVE 0
+#define KWGBE_BYPASS_ACTIVE (1 << 8)
+#define KWGBE_PORT_NOT_AT_PARTN_STT 0
+#define KWGBE_PORT_AT_PARTN_STT (1 << 9)
+#define KWGBE_PORT_TX_FIFO_NOT_EMPTY 0
+#define KWGBE_PORT_TX_FIFO_EMPTY (1 << 10)
+
+/* These macros describes the Port configuration reg (Px_cR) bits */
+#define KWGBE_UCAST_MOD_NRML 0
+#define KWGBE_UNICAST_PROMISCUOUS_MODE 1
+#define KWGBE_DFLT_RX_Q0 0
+#define KWGBE_DFLT_RX_Q1 (1 << 1)
+#define KWGBE_DFLT_RX_Q2 (1 << 2)
+#define KWGBE_DFLT_RX_Q3 ((1 << 2) | (1 << 1))
+#define KWGBE_DFLT_RX_Q4 (1 << 3)
+#define KWGBE_DFLT_RX_Q5 ((1 << 3) | (1 << 1))
+#define KWGBE_DFLT_RX_Q6 ((1 << 3) | (1 << 2))
+#define KWGBE_DFLT_RX_Q7 ((1 << 3) | (1 << 2) | (1 << 1))
+#define KWGBE_DFLT_RX_ARPQ0 0
+#define KWGBE_DFLT_RX_ARPQ1 (1 << 4)
+#define KWGBE_DFLT_RX_ARPQ2 (1 << 5)
+#define KWGBE_DFLT_RX_ARPQ3 ((1 << 5) | (1 << 4))
+#define KWGBE_DFLT_RX_ARPQ4 (1 << 6)
+#define KWGBE_DFLT_RX_ARPQ5 ((1 << 6) | (1 << 4))
+#define KWGBE_DFLT_RX_ARPQ6 ((1 << 6) | (1 << 5))
+#define KWGBE_DFLT_RX_ARPQ7 ((1 << 6) | (1 << 5) | (1 << 4))
+#define KWGBE_RX_BC_IF_NOT_IP_OR_ARP 0
+#define KWGBE_REJECT_BC_IF_NOT_IP_OR_ARP (1 << 7)
+#define KWGBE_RX_BC_IF_IP 0
+#define KWGBE_REJECT_BC_IF_IP (1 << 8)
+#define KWGBE_RX_BC_IF_ARP 0
+#define KWGBE_REJECT_BC_IF_ARP (1 << 9)
+#define KWGBE_TX_AM_NO_UPDATE_ERR_SMRY (1 << 12)
+#define KWGBE_CPTR_TCP_FRMS_DIS 0
+#define KWGBE_CPTR_TCP_FRMS_EN (1 << 14)
+#define KWGBE_CPTR_UDP_FRMS_DIS 0
+#define KWGBE_CPTR_UDP_FRMS_EN (1 << 15)
+#define KWGBE_DFLT_RX_TCPQ0 0
+#define KWGBE_DFLT_RX_TCPQ1 (1 << 16)
+#define KWGBE_DFLT_RX_TCPQ2 (1 << 17)
+#define KWGBE_DFLT_RX_TCPQ3 ((1 << 17) | (1 << 16))
+#define KWGBE_DFLT_RX_TCPQ4 (1 << 18)
+#define KWGBE_DFLT_RX_TCPQ5 ((1 << 18) | (1 << 16))
+#define KWGBE_DFLT_RX_TCPQ6 ((1 << 18) | (1 << 17))
+#define KWGBE_DFLT_RX_TCPQ7 ((1 << 18) | (1 << 17) | (1 << 16))
+#define KWGBE_DFLT_RX_UDPQ0 0
+#define KWGBE_DFLT_RX_UDPQ1 (1 << 19)
+#define KWGBE_DFLT_RX_UDPQ2 (1 << 20)
+#define KWGBE_DFLT_RX_UDPQ3 ((1 << 20) | (1 << 19))
+#define KWGBE_DFLT_RX_UDPQ4 (1 << 21)
+#define KWGBE_DFLT_RX_UDPQ5 ((1 << 21) | (1 << 19))
+#define KWGBE_DFLT_RX_UDPQ6 ((1 << 21) | (1 << 20))
+#define KWGBE_DFLT_RX_UDPQ7 ((1 << 21) | (1 << 20) | (1 << 19))
+#define KWGBE_DFLT_RX_BPDU_Q_0 0
+#define KWGBE_DFLT_RX_BPDU_Q_1 (1 << 22)
+#define KWGBE_DFLT_RX_BPDU_Q_2 (1 << 23)
+#define KWGBE_DFLT_RX_BPDU_Q_3 ((1 << 23) | (1 << 22))
+#define KWGBE_DFLT_RX_BPDU_Q_4 (1 << 24)
+#define KWGBE_DFLT_RX_BPDU_Q_5 ((1 << 24) | (1 << 22))
+#define KWGBE_DFLT_RX_BPDU_Q_6 ((1 << 24) | (1 << 23))
+#define KWGBE_DFLT_RX_BPDU_Q_7 ((1 << 24) | (1 << 23) | (1 << 22))
+#define KWGBE_DFLT_RX_TCP_CHKSUM_MODE (1 << 25)
+
+/* These macros describes the Port configuration extend reg (Px_cXR) bits*/
+#define KWGBE_CLASSIFY_EN 1
+#define KWGBE_SPAN_BPDU_PACKETS_AS_NORMAL 0
+#define KWGBE_SPAN_BPDU_PACKETS_TO_RX_Q7 (1 << 1)
+#define KWGBE_PARTITION_DIS 0
+#define KWGBE_PARTITION_EN (1 << 2)
+#define KWGBE_TX_CRC_GENERATION_EN 0
+#define KWGBE_TX_CRC_GENERATION_DIS (1 << 3)
+
+/* Tx/Rx queue command reg (RQCR/TQCR)*/
+#define KWGBE_Q_0_EN 1
+#define KWGBE_Q_1_EN (1 << 1)
+#define KWGBE_Q_2_EN (1 << 2)
+#define KWGBE_Q_3_EN (1 << 3)
+#define KWGBE_Q_4_EN (1 << 4)
+#define KWGBE_Q_5_EN (1 << 5)
+#define KWGBE_Q_6_EN (1 << 6)
+#define KWGBE_Q_7_EN (1 << 7)
+#define KWGBE_Q_0_DIS (1 << 8)
+#define KWGBE_Q_1_DIS (1 << 9)
+#define KWGBE_Q_2_DIS (1 << 10)
+#define KWGBE_Q_3_DIS (1 << 11)
+#define KWGBE_Q_4_DIS (1 << 12)
+#define KWGBE_Q_5_DIS (1 << 13)
+#define KWGBE_Q_6_DIS (1 << 14)
+#define KWGBE_Q_7_DIS (1 << 15)
+
+/* These macros describes the Port Sdma configuration reg (SDCR) bits */
+#define KWGBE_RIFB 1
+#define KWGBE_RX_BURST_SIZE_1_64BIT 0
+#define KWGBE_RX_BURST_SIZE_2_64BIT (1 << 1)
+#define KWGBE_RX_BURST_SIZE_4_64BIT (1 << 2)
+#define KWGBE_RX_BURST_SIZE_8_64BIT ((1 << 2) | (1 << 1))
+#define KWGBE_RX_BURST_SIZE_16_64BIT (1 << 3)
+#define KWGBE_BLM_RX_NO_SWAP (1 << 4)
+#define KWGBE_BLM_RX_BYTE_SWAP 0
+#define KWGBE_BLM_TX_NO_SWAP (1 << 5)
+#define KWGBE_BLM_TX_BYTE_SWAP 0
+#define KWGBE_DESCRIPTORS_BYTE_SWAP (1 << 6)
+#define KWGBE_DESCRIPTORS_NO_SWAP 0
+#define KWGBE_TX_BURST_SIZE_1_64BIT 0
+#define KWGBE_TX_BURST_SIZE_2_64BIT (1 << 22)
+#define KWGBE_TX_BURST_SIZE_4_64BIT (1 << 23)
+#define KWGBE_TX_BURST_SIZE_8_64BIT ((1 << 23) | (1 << 22))
+#define KWGBE_TX_BURST_SIZE_16_64BIT (1 << 24)
+
+/* These macros describes the Port serial control reg (PSCR) bits */
+#define KWGBE_SERIAL_PORT_DIS 0
+#define KWGBE_SERIAL_PORT_EN 1
+#define KWGBE_FORCE_LINK_PASS (1 << 1)
+#define KWGBE_DO_NOT_FORCE_LINK_PASS 0
+#define KWGBE_EN_AUTO_NEG_FOR_DUPLX 0
+#define KWGBE_DIS_AUTO_NEG_FOR_DUPLX (1 << 2)
+#define KWGBE_EN_AUTO_NEG_FOR_FLOW_CTRL 0
+#define KWGBE_DIS_AUTO_NEG_FOR_FLOW_CTRL (1 << 3)
+#define KWGBE_ADV_NO_FLOW_CTRL 0
+#define KWGBE_ADV_SYMMETRIC_FLOW_CTRL (1 << 4)
+#define KWGBE_FORCE_FC_MODE_NO_PAUSE_DIS_TX 0
+#define KWGBE_FORCE_FC_MODE_TX_PAUSE_DIS (1 << 5)
+#define KWGBE_FORCE_BP_MODE_NO_JAM 0
+#define KWGBE_FORCE_BP_MODE_JAM_TX (1 << 7)
+#define KWGBE_FORCE_BP_MODE_JAM_TX_ON_RX_ERR (1 << 8)
+#define KWGBE_FORCE_LINK_FAIL 0
+#define KWGBE_DO_NOT_FORCE_LINK_FAIL (1 << 10)
+#define KWGBE_DIS_AUTO_NEG_SPEED_GMII (1 << 13)
+#define KWGBE_EN_AUTO_NEG_SPEED_GMII 0
+#define KWGBE_DTE_ADV_0 0
+#define KWGBE_DTE_ADV_1 (1 << 14)
+#define KWGBE_MIIPHY_MAC_MODE 0
+#define KWGBE_MIIPHY_PHY_MODE (1 << 15)
+#define KWGBE_AUTO_NEG_NO_CHANGE 0
+#define KWGBE_RESTART_AUTO_NEG (1 << 16)
+#define KWGBE_MAX_RX_PACKET_1518BYTE 0
+#define KWGBE_MAX_RX_PACKET_1522BYTE (1 << 17)
+#define KWGBE_MAX_RX_PACKET_1552BYTE (1 << 18)
+#define KWGBE_MAX_RX_PACKET_9022BYTE ((1 << 18) | (1 << 17))
+#define KWGBE_MAX_RX_PACKET_9192BYTE (1 << 19)
+#define KWGBE_MAX_RX_PACKET_9700BYTE ((1 << 19) | (1 << 17))
+#define KWGBE_SET_EXT_LOOPBACK (1 << 20)
+#define KWGBE_CLR_EXT_LOOPBACK 0
+#define KWGBE_SET_FULL_DUPLEX_MODE (1 << 21)
+#define KWGBE_SET_HALF_DUPLEX_MODE 0
+#define KWGBE_EN_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX (1 << 22)
+#define KWGBE_DIS_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX 0
+#define KWGBE_SET_GMII_SPEED_TO_10_100 0
+#define KWGBE_SET_GMII_SPEED_TO_1000 (1 << 23)
+#define KWGBE_SET_MII_SPEED_TO_10 0
+#define KWGBE_SET_MII_SPEED_TO_100 (1 << 24)
+
+/* SMI register fields (KWGBE_SMI_REG) */
+#define KWGBE_PHY_SMI_TIMEOUT 10000
+#define KWGBE_PHY_SMI_DATA_OFFS 0 /* Data */
+#define KWGBE_PHY_SMI_DATA_MASK (0xffff << KWGBE_PHY_SMI_DATA_OFFS)
+#define KWGBE_PHY_SMI_DEV_ADDR_OFFS 16 /* PHY device address */
+#define KWGBE_PHY_SMI_DEV_ADDR_MASK (0x1f << KWGBE_PHY_SMI_DEV_ADDR_OFFS)
+#define KWGBE_SMI_REG_ADDR_OFFS 21 /* PHY device register address */
+#define KWGBE_SMI_REG_ADDR_MASK (0x1f << KWGBE_SMI_REG_ADDR_OFFS)
+#define KWGBE_PHY_SMI_OPCODE_OFFS 26 /* Write/Read opcode */
+#define KWGBE_PHY_SMI_OPCODE_MASK (3 << KWGBE_PHY_SMI_OPCODE_OFFS)
+#define KWGBE_PHY_SMI_OPCODE_WRITE (0 << KWGBE_PHY_SMI_OPCODE_OFFS)
+#define KWGBE_PHY_SMI_OPCODE_READ (1 << KWGBE_PHY_SMI_OPCODE_OFFS)
+#define KWGBE_PHY_SMI_READ_VALID_MASK (1 << 27) /* Read Valid */
+#define KWGBE_PHY_SMI_BUSY_MASK (1 << 28) /* Busy */
+
+/* SDMA command status fields macros */
+/* Tx & Rx descriptors status */
+#define KWGBE_ERROR_SUMMARY 1
+
+/* Tx & Rx descriptors command */
+#define KWGBE_BUFFER_OWNED_BY_DMA (1 << 31)
+
+/* Tx descriptors status */
+#define KWGBE_LC_ERROR 0
+#define KWGBE_UR_ERROR (1 << 1)
+#define KWGBE_RL_ERROR (1 << 2)
+#define KWGBE_LLC_SNAP_FORMAT (1 << 9)
+
+/* Rx descriptors status */
+#define KWGBE_CRC_ERROR 0
+#define KWGBE_OVERRUN_ERROR (1 << 1)
+#define KWGBE_MAX_FRAME_LENGTH_ERROR (1 << 2)
+#define KWGBE_RESOURCE_ERROR ((1 << 2) | (1 << 1))
+#define KWGBE_VLAN_TAGGED (1 << 19)
+#define KWGBE_BPDU_FRAME (1 << 20)
+#define KWGBE_TCP_FRAME_OVER_IP_V_4 0
+#define KWGBE_UDP_FRAME_OVER_IP_V_4 (1 << 21)
+#define KWGBE_OTHER_FRAME_TYPE (1 << 22)
+#define KWGBE_LAYER_2_IS_KWGBE_V_2 (1 << 23)
+#define KWGBE_FRAME_TYPE_IP_V_4 (1 << 24)
+#define KWGBE_FRAME_HEADER_OK (1 << 25)
+#define KWGBE_RX_LAST_DESC (1 << 26)
+#define KWGBE_RX_FIRST_DESC (1 << 27)
+#define KWGBE_UNKNOWN_DESTINATION_ADDR (1 << 28)
+#define KWGBE_RX_EN_INTERRUPT (1 << 29)
+#define KWGBE_LAYER_4_CHECKSUM_OK (1 << 30)
+
+/* Rx descriptors byte count */
+#define KWGBE_FRAME_FRAGMENTED (1 << 2)
+
+/* Tx descriptors command */
+#define KWGBE_LAYER_4_CHECKSUM_FIRST_DESC (1 << 10)
+#define KWGBE_FRAME_SET_TO_VLAN (1 << 15)
+#define KWGBE_TCP_FRAME 0
+#define KWGBE_UDP_FRAME (1 << 16)
+#define KWGBE_GEN_TCP_UDP_CHECKSUM (1 << 17)
+#define KWGBE_GEN_IP_V_4_CHECKSUM (1 << 18)
+#define KWGBE_ZERO_PADDING (1 << 19)
+#define KWGBE_TX_LAST_DESC (1 << 20)
+#define KWGBE_TX_FIRST_DESC (1 << 21)
+#define KWGBE_GEN_CRC (1 << 22)
+#define KWGBE_TX_EN_INTERRUPT (1 << 23)
+#define KWGBE_AUTO_MODE (1 << 30)
+
+/* Address decode parameters */
+/* Ethernet Base Address Register bits */
+#define EBAR_TARGET_DRAM 0x00000000
+#define EBAR_TARGET_DEVICE 0x00000001
+#define EBAR_TARGET_CBS 0x00000002
+#define EBAR_TARGET_PCI0 0x00000003
+#define EBAR_TARGET_PCI1 0x00000004
+#define EBAR_TARGET_CUNIT 0x00000005
+#define EBAR_TARGET_AUNIT 0x00000006
+#define EBAR_TARGET_GUNIT 0x00000007
+
+/* Window attrib */
+#define EBAR_ATTR_DRAM_CS0 0x00000E00
+#define EBAR_ATTR_DRAM_CS1 0x00000D00
+#define EBAR_ATTR_DRAM_CS2 0x00000B00
+#define EBAR_ATTR_DRAM_CS3 0x00000700
+
+/* DRAM Target interface */
+#define EBAR_ATTR_DRAM_NO_CACHE_COHERENCY 0x00000000
+#define EBAR_ATTR_DRAM_CACHE_COHERENCY_WT 0x00001000
+#define EBAR_ATTR_DRAM_CACHE_COHERENCY_WB 0x00002000
+
+/* Device Bus Target interface */
+#define EBAR_ATTR_DEVICE_DEVCS0 0x00001E00
+#define EBAR_ATTR_DEVICE_DEVCS1 0x00001D00
+#define EBAR_ATTR_DEVICE_DEVCS2 0x00001B00
+#define EBAR_ATTR_DEVICE_DEVCS3 0x00001700
+#define EBAR_ATTR_DEVICE_BOOTCS3 0x00000F00
+
+/* PCI Target interface */
+#define EBAR_ATTR_PCI_BYTE_SWAP 0x00000000
+#define EBAR_ATTR_PCI_NO_SWAP 0x00000100
+#define EBAR_ATTR_PCI_BYTE_WORD_SWAP 0x00000200
+#define EBAR_ATTR_PCI_WORD_SWAP 0x00000300
+#define EBAR_ATTR_PCI_NO_SNOOP_NOT_ASSERT 0x00000000
+#define EBAR_ATTR_PCI_NO_SNOOP_ASSERT 0x00000400
+#define EBAR_ATTR_PCI_IO_SPACE 0x00000000
+#define EBAR_ATTR_PCI_MEMORY_SPACE 0x00000800
+#define EBAR_ATTR_PCI_REQ64_FORCE 0x00000000
+#define EBAR_ATTR_PCI_REQ64_SIZE 0x00001000
+
+/* Window access control */
+#define EWIN_ACCESS_NOT_ALLOWED 0
+#define EWIN_ACCESS_READ_ONLY 1
+#define EWIN_ACCESS_FULL ((1 << 1) | 1)
+
+/* SDMA command macros */
+#define CURR_RFD_GET(p_curr_desc, queue) \
+ ((p_curr_desc) = p_gbe_pctrl->p_rx_curr_desc_q[queue])
+
+#define CURR_RFD_SET(p_curr_desc, queue) \
+ (p_gbe_pctrl->p_rx_curr_desc_q[queue] = (p_curr_desc))
+
+#define USED_RFD_GET(p_used_desc, queue) \
+ ((p_used_desc) = p_gbe_pctrl->p_rx_used_desc_q[queue])
+
+#define USED_RFD_SET(p_used_desc, queue)\
+(p_gbe_pctrl->p_rx_used_desc_q[queue] = (p_used_desc))
+
+#define CURR_TFD_GET(p_curr_desc, queue) \
+ ((p_curr_desc) = p_gbe_pctrl->p_tx_curr_desc_q[queue])
+
+#define CURR_TFD_SET(p_curr_desc, queue) \
+ (p_gbe_pctrl->p_tx_curr_desc_q[queue] = (p_curr_desc))
+
+#define USED_TFD_GET(p_used_desc, queue) \
+ ((p_used_desc) = p_gbe_pctrl->p_tx_used_desc_q[queue])
+
+#define USED_TFD_SET(p_used_desc, queue) \
+ (p_gbe_pctrl->p_tx_used_desc_q[queue] = (p_used_desc))
+
+#define FIRST_TFD_GET(p_first_desc, queue) \
+ ((p_first_desc) = p_gbe_pctrl->p_tx_first_desc_q[queue])
+
+#define FIRST_TFD_SET(p_first_desc, queue) \
+ (p_gbe_pctrl->p_tx_first_desc_q[queue] = (p_first_desc))
+
+/* Macros that save access to desc in order to find next desc pointer */
+#define RX_NEXT_DESC_PTR(p_rx_desc, queue) ( \
+ struct kwgbe_rx_dscr*)(((((u32)p_rx_desc - \
+ (u32)p_gbe_pctrl->p_rx_desc_area_base[queue]) + \
+ RX_DESC_ALIGNED_SIZE) % p_gbe_pctrl->rx_desc_area_size[queue]) + \
+ (u32)p_gbe_pctrl->p_rx_desc_area_base[queue])
+
+#define TX_NEXT_DESC_PTR(p_tx_desc, queue) ( \
+ struct kwgbe_tx_dscr*)(((((u32)p_tx_desc - \
+ (u32)p_gbe_pctrl->p_tx_desc_area_base[queue]) + \
+ TX_DESC_ALIGNED_SIZE) % p_gbe_pctrl->tx_desc_area_size[queue]) + \
+ (u32)p_gbe_pctrl->p_tx_desc_area_base[queue])
+
+enum kwgbe_fret_sts {
+ KWGBE_OK, /* Returned as expected. */
+ KWGBE_ERROR, /* Fundamental error. */
+ KWGBE_RETRY, /* Could not process request. Try later. */
+ KWGBE_END_OF_JOB, /* Ring has nothing to process. */
+ KWGBE_QFULL, /* Ring resource error. */
+ KWGBE_QLAST_RESO /* Ring resources about to exhaust. */
+};
+
+enum kwgbe_q {
+ KWGBE_Q0 = 0,
+ KWGBE_Q1 = 1,
+ KWGBE_Q2 = 2,
+ KWGBE_Q3 = 3,
+ KWGBE_Q4 = 4,
+ KWGBE_Q5 = 5,
+ KWGBE_Q6 = 6,
+ KWGBE_Q7 = 7
+};
+
+enum kwgbe_adrwin {
+ KWGBE_WIN0,
+ KWGBE_WIN1,
+ KWGBE_WIN2,
+ KWGBE_WIN3,
+ KWGBE_WIN4,
+ KWGBE_WIN5
+};
+
+enum kwgbe_target {
+ KWGBE_TARGET_DRAM,
+ KWGBE_TARGET_DEV,
+ KWGBE_TARGET_CBS,
+ KWGBE_TARGET_PCI0,
+ KWGBE_TARGET_PCI1
+};
+
+struct kwgbe_rx_dscr {
+ u32 cmd_sts; /* Descriptor command status */
+ u16 buf_size; /* Buffer size */
+ u16 byte_cnt; /* Descriptor buffer byte count */
+ u32 buf_ptr; /* Descriptor buffer pointer */
+ u32 nxtdesc_p; /* Next descriptor pointer */
+ u32 retinfo; /* User resource return information */
+};
+
+struct kwgbe_tx_dscr {
+ u32 cmd_sts; /* Descriptor command status */
+ u16 l4i_chk; /* CPU provided TCP Checksum */
+ u16 byte_cnt; /* Descriptor buffer byte count */
+ u32 buf_ptr; /* Descriptor buffer pointer */
+ u32 nxtdesc_p; /* Next descriptor pointer */
+ u32 retinfo; /* User resource return information */
+};
+
+/*
+ * Unified struct for Rx and Tx operations. The user is not required to
+ * be familier with neither Tx nor Rx descriptors.
+ */
+struct kwgbe_pktinf {
+ u32 cmd_sts; /* Descriptor command status */
+ u16 byte_cnt; /* Descriptor buffer byte count */
+ u16 l4i_chk; /* Tx CPU provided TCP Checksum */
+ u32 buf_ptr; /* Descriptor buffer pointer */
+ u32 retinfo; /* User resource return information */
+};
+
+struct kwgbe_winparam {
+ enum kwgbe_adrwin win; /* Window number */
+ enum kwgbe_target target; /* System targets */
+ u16 attrib; /* BAR attrib. See above macros */
+ u32 base_addr; /* Window base address in u32 form */
+ u32 high_addr; /* Window high address in u32 form */
+ u32 size; /* Size in MBytes. Must be % 64Kbyte. */
+ int enable; /* Enable/disable access to the window. */
+ u16 access_ctrl; /*Access ctrl register. see above macros */
+};
+
+/* Ethernet port specific infomation */
+struct kwgbe_portinfo {
+ int port_phy_addr; /* User phy address of Ethrnet port */
+ u8 port_mac_addr[6]; /* User defined port MAC address. */
+ u32 port_config; /* User port configuration value */
+ u32 port_config_extend; /* User port config extend value */
+ u32 port_sdma_config; /* User port SDMA config value */
+ u32 port_serial_control; /* User port serial control value */
+ u32 port_tx_queue_command; /* Port active Tx queues summary */
+ u32 port_rx_queue_command; /* Port active Rx queues summary */
+
+ /* User function to cast virtual address to CPU bus address */
+ u32(*port_virt_to_phys) (u32 addr);
+ /* User scratch pad for user specific data structures */
+ void *port_private;
+
+ int rx_resource_err[MAX_RXQ_NO]; /* Rx ring resource err flag */
+ int tx_resource_err[MAX_TXQ_NO]; /* Tx ring resource err flag */
+
+ /* Tx/Rx rings managment indexes fields. For driver use */
+
+ /* Next available Rx resource */
+ volatile struct kwgbe_rx_dscr *p_rx_curr_desc_q[MAX_RXQ_NO];
+ /* Returning Rx resource */
+ volatile struct kwgbe_rx_dscr *p_rx_used_desc_q[MAX_RXQ_NO];
+
+ /* Next available Tx resource */
+ volatile struct kwgbe_tx_dscr *p_tx_curr_desc_q[MAX_TXQ_NO];
+ /* Returning Tx resource */
+ volatile struct kwgbe_tx_dscr *p_tx_used_desc_q[MAX_TXQ_NO];
+ /* An extra Tx index to support txmt of multiple buffs per packet */
+ volatile struct kwgbe_tx_dscr *p_tx_first_desc_q[MAX_TXQ_NO];
+
+ /* Tx/Rx rings size and base variables fields. For driver use */
+
+ volatile struct kwgbe_rx_dscr *p_rx_desc_area_base[MAX_RXQ_NO];
+ u32 rx_desc_area_size[MAX_RXQ_NO];
+ char *p_rx_buffer_base[MAX_RXQ_NO];
+
+ volatile struct kwgbe_tx_dscr *p_tx_desc_area_base[MAX_TXQ_NO];
+ u32 tx_desc_area_size[MAX_TXQ_NO];
+ char *p_tx_buffer_base[MAX_TXQ_NO];
+};
+
+/* port device data struct */
+struct kwgbe_device {
+ struct eth_device dev;
+ struct kwgbe_portinfo portinfo;
+ struct egiga_priv portpriv;
+ struct kwgbe_registers *regs;
+};
+
+/* functions */
+static void port_uc_addr_set(struct kwgbe_registers *regs, u8 * p_addr,
+ enum kwgbe_q queue);
+static void port_reset(struct kwgbe_registers *regs);
+
+#endif /* __EGIGA_H__ */
diff --git a/include/netdev.h b/include/netdev.h
index 68d664c..932d986 100644
--- a/include/netdev.h
+++ b/include/netdev.h
@@ -80,6 +80,7 @@ int fec_initialize (bd_t *bis);
int greth_initialize(bd_t *bis);
void gt6426x_eth_initialize(bd_t *bis);
int inca_switch_initialize(bd_t *bis);
+int kirkwood_egiga_initialize(bd_t *bis);
int macb_eth_initialize(int id, void *regs, unsigned int phy_addr);
int mcdmafec_initialize(bd_t *bis);
int mcffec_initialize(bd_t *bis);
--
1.5.3.4
More information about the U-Boot
mailing list