summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Kconfig10
-rw-r--r--Makefile27
-rw-r--r--Readme50
-rw-r--r--dpaa_eth_im.c1548
-rw-r--r--dpaa_eth_im.h219
-rw-r--r--fsl_fman.h465
-rw-r--r--fsl_memac.h270
7 files changed, 2589 insertions, 0 deletions
diff --git a/Kconfig b/Kconfig
new file mode 100644
index 0000000..f39423c
--- /dev/null
+++ b/Kconfig
@@ -0,0 +1,10 @@
+config FSL_DPAA_ETH_IM
+ tristate "DPAA Ethernet with independent mode"
+ depends on ARCH_LAYERSCAPE
+ depends on !FSL_SDK_DPA
+ ---help---
+ Data Path Acceleration Architecture Ethernet independent
+ mode driver supporting the Freescale QorIQ chips with
+ DPAA1.
+ This is DPAA simplified ethernet driver, target for debug
+ only, not for product.
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..0520731
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,27 @@
+#
+# Makefile for the Freescale network device drivers.
+#
+KERNEL_DIR ?= /lib/modules/$(shell uname -r)/build
+PWD ?= $(shell pwd)
+
+obj-m += dpaa_eth_im.o
+
+KERNEL_MAKE_OPTS := -C $(KERNEL_DIR) M=$(PWD)
+ifneq ($(ARCH),)
+KERNEL_MAKE_OPTS += ARCH=$(ARCH)
+endif
+ifneq ($(CROSS_COMPILE),)
+KERNEL_MAKE_OPTS += CROSS_COMPILE=$(CROSS_COMPILE)
+endif
+
+build:
+ $(MAKE) $(KERNEL_MAKE_OPTS) modules
+
+install: modules_install
+
+modules_install:
+ $(MAKE) -C $(KERNEL_DIR) M=$(PWD) modules_install
+
+clean:
+ $(MAKE) -C $(KERNEL_DIR) M=$(PWD) clean
+ rm -f $(hostprogs) *~
diff --git a/Readme b/Readme
new file mode 100644
index 0000000..b8f3cbb
--- /dev/null
+++ b/Readme
@@ -0,0 +1,50 @@
+Dpaa-im:
+ Dpaa-im is an Ethernet driver using Dpaa Fman to implement in independent mode.
+
+Dependence:
+ 1. All the DPAA drivers should be disabled in kernel configuration file, the list as below:
+
+ CONFIG_FSL_SDK_DPA
+ CONFIG_FSL_SDK_FMAN
+ CONFIG_FSL_SDK_DPAA_ETH
+ CONFIG_FSL_DPAA
+ CONFIG_FSL_FMAN
+ CONFIG_FSL_DPAA_ETH
+
+ 2. Linux should be built before building dpaa-im
+ 3. Dpaa-im is based on linux-4.9 and linux-4.14 of LSDK release
+
+Building:
+ To build dpaa-im as a module
+
+ cd dpaa-im
+ make build KERNEL_DIR=<path-to-linux> ARCH=arm64 CROSS_COMPILE=<arm64-toolchain>
+ e.g. make build KERNEL_DIR=~/linux ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu-
+
+ after building, you will see module file "dpaa_eth_im.ko"
+ In addition, use "make clean KERNEL_DIR=<path-to-linux> ARCH=arm64 CROSS_COMPILE=<arm64-toolchain>" to clean
+
+Using:
+ 1. Fman firmware should be loaded in uboot.
+ 2. Boot up linux.
+ 3. In linux, run command "insmod dpaa_eth_im.ko", kernel will print:
+
+ [ 0.535089] fman_im: QorIQ FMAN Independent Mode Ethernet Driver loaded
+ [ 0.541782] DEV: FM1@DTSEC3, DTS Node: fsl,dpaa:ethernet@6
+
+ 4. Run command "ifconfig -a", dpaa-im ethernet(FM1@DTSEC3) could be saw, then use it as normal ethernet.
+
+ FM1@DTSEC3 Link encap:Ethernet HWaddr 00:e0:0c:00:77:00
+ BROADCAST MULTICAST MTU:1500 Metric:1
+ RX packets:0 errors:0 dropped:0 overruns:0 frame:0
+ TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
+ collisions:0 txqueuelen:1000
+ RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)
+ lo Link encap:Local Loopback
+ inet addr:127.0.0.1 Mask:255.0.0.0
+ inet6 addr: ::1/128 Scope:Host
+ UP LOOPBACK RUNNING MTU:65536 Metric:1
+ RX packets:0 errors:0 dropped:0 overruns:0 frame:0
+ TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
+ collisions:0 txqueuelen:0
+ RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)
diff --git a/dpaa_eth_im.c b/dpaa_eth_im.c
new file mode 100644
index 0000000..4a156e6
--- /dev/null
+++ b/dpaa_eth_im.c
@@ -0,0 +1,1548 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright 2017-2018 NXP
+//
+// dpaa_eth_im.c -- DPAA1 FMAN independent mode Ethernet driver implementation
+//
+// Author: Alan Wang <alan.wang@nxp.com>
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/string.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/of_net.h>
+#include <linux/of_irq.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/crc32.h>
+
+#include "dpaa_eth_im.h"
+#include "fsl_memac.h"
+
+#define RCTRL_INIT (RCTRL_GRS | RCTRL_UPROM)
+#define TCTRL_INIT TCTRL_GTS
+#define MACCFG1_INIT MACCFG1_SOFT_RST
+
+#define MACCFG2_INIT (MACCFG2_PRE_LEN(0x7) | MACCFG2_LEN_CHECK | \
+ MACCFG2_PAD_CRC | MACCFG2_FULL_DUPLEX | \
+ MACCFG2_IF_MODE_NIBBLE)
+
+/* MAXFRM - maximum frame length register */
+#define MAXFRM_MASK 0x0000ffff
+
+#define CONFIG_SYS_TBIPA_VALUE 8
+#define JUMBO_FRAME_SIZE 9600
+
+/* #define DEBUG */
+#ifdef DEBUG
+#define TRACE printk
+
+static void fm_im_xmit_dump(struct sk_buff *skb)
+{
+ int i;
+
+ TRACE("+++XMIT DATA DUMP(%x)+++\n", skb->len);
+ TRACE("<0>""head: (len: 0x%x)\n", skb_headlen(skb));
+ for(i = 0; i < skb_headlen(skb); i++) {
+ if(i % 16 == 0)
+ TRACE("\n%p:", skb->head + i);
+ TRACE("%02x ", *(skb->head + i));
+
+ }
+ TRACE("data:(len:0x%x)\n",skb->data_len);
+ for(i = 0; i < skb->data_len; i++) {
+ if(i % 16 == 0)
+ TRACE("\n%p:", skb->data + i);
+ TRACE("%02x ", *(skb->data + i));
+ }
+}
+
+static void fm_im_dump(struct net_device *dev)
+{
+ struct fm_im_private *priv;
+ char *regs;
+ u32 i;
+
+ priv = netdev_priv(dev);
+ regs = (char *)priv->reg;
+
+ for(i = 0xE2000; i < 0xE3000; i++) {
+ if(i % 32 == 0)
+ TRACE("\n%06x: ", i);
+ TRACE("%02X ", regs[i]);
+ }
+
+ TRACE("size:%lx\n", sizeof(*(priv->reg)));
+}
+#else
+#define TRACE(x...) do { ; } while(0)
+#endif
+
+static u32 fm_assign_risc(int port_id)
+{
+ u32 risc_sel, val;
+ risc_sel = (port_id & 0x1) ? FMFPPRC_RISC2 : FMFPPRC_RISC1;
+ val = (port_id << FMFPPRC_PORTID_SHIFT) & FMFPPRC_PORTID_MASK;
+ val |= ((risc_sel << FMFPPRC_ORA_SHIFT) | risc_sel);
+
+ return val;
+}
+
+static void bmi_rx_port_init(struct fm_im_private *priv, struct fm_bmi_rx_port *rx_port)
+{
+ int port_id, val;
+
+ /* Set BMI to independent mode, Rx port disable */
+ fm_im_write(&rx_port->fmbm_rcfg, FMBM_RCFG_IM);
+ /* Clear FOF in IM case */
+ fm_im_write(&rx_port->fmbm_rim, 0);
+ /* Rx frame next engine -RISC */
+ fm_im_write(&rx_port->fmbm_rfne, NIA_ENG_RISC | NIA_RISC_AC_IM_RX);
+ /* Rx command attribute - no order, MR[3] = 1 */
+ fm_im_clrbits(&rx_port->fmbm_rfca, FMBM_RFCA_ORDER | FMBM_RFCA_MR_MASK);
+ fm_im_setbits(&rx_port->fmbm_rfca, FMBM_RFCA_MR(4));
+ /* Enable Rx statistic counters */
+ fm_im_write(&rx_port->fmbm_rstc, FMBM_RSTC_EN);
+ /* Disable Rx performance counters */
+ fm_im_write(&rx_port->fmbm_rpc, 0);
+
+ /* Common BMI parameter for this port */
+ /*
+ * Set port parameters - FMBM_PP_x
+ * max tasks 10G Rx/Tx=12, 1G Rx/Tx 4, others is 1
+ * max dma 10G Rx/Tx=3, others is 1
+ * set port FIFO size - FMBM_PFS_x
+ * 4KB for all Rx and Tx ports
+ */
+ /* Rx 1G port */
+ port_id = RX_PORT_1G_BASE + priv->num - 1;
+ /* Max tasks=4, max dma=1, no extra */
+ fm_im_write(&priv->reg->fm_bmi_common.fmbm_pp[port_id], FMBM_PP_MXT(4));
+ /* FIFO size - 3KB, no extra */
+ fm_im_write(&priv->reg->fm_bmi_common.fmbm_pfs[port_id], FMBM_PFS_IFSZ(0xf));
+
+ val = fm_im_read(&priv->reg->fm_bmi_common.fmbm_pp[port_id]);
+ TRACE("%s(): fmbm_pp[%d] = 0x%0x\n", __func__, port_id, val);
+ val = fm_im_read(&priv->reg->fm_bmi_common.fmbm_pfs[port_id]);
+ TRACE("%s(): fmbm_pfs[%d] = 0x%0x\n", __func__, port_id, val);
+ /* IM mode, each even port ID to RISC#1, each odd port ID to RISC#2 */
+
+ /* Rx 1G port */
+ val = fm_assign_risc(port_id + 1);
+ fm_im_write(&priv->reg->fm_fpm.fpmprc, val);
+}
+
+static void bmi_tx_port_init(struct fm_im_private *priv, struct fm_bmi_tx_port *tx_port)
+{
+ int port_id, val;
+
+ /* Set BMI to independent mode, Tx port disable */
+ fm_im_write(&tx_port->fmbm_tcfg, FMBM_TCFG_IM);
+
+ /* Tx frame next engine -RISC */
+ fm_im_write(&tx_port->fmbm_tfne, NIA_ENG_RISC | NIA_RISC_AC_IM_TX);
+ fm_im_write(&tx_port->fmbm_tfene, NIA_ENG_RISC | NIA_RISC_AC_IM_TX);
+
+ /* Tx command attribute - no order, MR[3] = 1 */
+ fm_im_clrbits(&tx_port->fmbm_tfca, FMBM_TFCA_ORDER | FMBM_TFCA_MR_MASK);
+ fm_im_setbits(&tx_port->fmbm_tfca, FMBM_TFCA_MR(4));
+
+ /* Enable Tx statistic counters */
+ fm_im_write(&tx_port->fmbm_tstc, FMBM_TSTC_EN);
+
+ /* Disable Tx performance counters */
+ fm_im_write(&tx_port->fmbm_tpc, 0);
+
+ /* Common BMI parameter for this port */
+ /*
+ * set port parameters - FMBM_PP_x
+ * max tasks 10G Rx/Tx=12, 1G Rx/Tx 4, others is 1
+ * max dma 10G Rx/Tx=3, others is 1
+ * set port FIFO size - FMBM_PFS_x
+ * 4KB for all Rx and Tx ports
+ */
+ /* Tx 1G port FIFO size - 4KB, no extra */
+ port_id = TX_PORT_1G_BASE + priv->num - 1;
+
+ /* Max tasks=4, max dma=1, no extra */
+ fm_im_write(&priv->reg->fm_bmi_common.fmbm_pp[port_id], FMBM_PP_MXT(4));
+
+ /* FIFO size - 4KB, no extra */
+ fm_im_write(&priv->reg->fm_bmi_common.fmbm_pfs[port_id], FMBM_PFS_IFSZ(0xf));
+
+ val = fm_im_read(&priv->reg->fm_bmi_common.fmbm_pp[port_id]);
+ TRACE("%s(): fmbm_pp[%d] = 0x%0x\n", __func__, port_id, val);
+ val = fm_im_read(&priv->reg->fm_bmi_common.fmbm_pfs[port_id]);
+ TRACE("%s(): fmbm_pfs[%d] = 0x%0x\n", __func__, port_id, val);
+
+ /* IM mode, each even port ID to RISC#1, each odd port ID to RISC#2 */
+ /* Tx 1G port */
+ val = fm_assign_risc(port_id + 1);
+ fm_im_write(&priv->reg->fm_fpm.fpmprc, val);
+}
+
+struct fm_muram muram[CONFIG_SYS_NUM_FMAN];
+static void fm_init_muram(int fm_idx, void *muram_base)
+{
+ muram[fm_idx].base = muram_base;
+ muram[fm_idx].size = CONFIG_SYS_FM_MURAM_SIZE;
+ muram[fm_idx].alloc = muram_base + FM_MURAM_RES_SIZE;
+ muram[fm_idx].top = muram_base + CONFIG_SYS_FM_MURAM_SIZE;
+ TRACE("%s():%d: MURAM base 0x%p\n", __func__, __LINE__, muram_base);
+}
+
+void *fm_muram_base(int fm_idx)
+{
+ return muram[fm_idx].base;
+}
+
+void *fm_muram_alloc(int fm_idx, size_t size, u64 align)
+{
+ void *ret;
+ u64 align_mask;
+ size_t off;
+ void *save;
+ u32 *p;
+
+ align_mask = align - 1;
+ save = muram[fm_idx].alloc;
+
+ off = (u64)save & align_mask;
+ if (off != 0)
+ muram[fm_idx].alloc += (align - off);
+ off = size & align_mask;
+ if (off != 0)
+ size += (align - off);
+ if ((muram[fm_idx].alloc + size) >= muram[fm_idx].top) {
+ muram[fm_idx].alloc = save;
+ printk("%s: Run out of ram.\n", __func__);
+ return NULL;
+ }
+
+ ret = muram[fm_idx].alloc;
+ muram[fm_idx].alloc += size;
+ /* memset((void *)ret, 0, size); */
+ for (p = (u32 *)ret; p < (u32 *)ret + size; p++) {
+ *(u32 *)p = 0;
+ }
+
+ return ret;
+}
+
+static u16 muram_readw(u16 *addr)
+{
+ u64 base = (u64)addr & ~0x3UL;
+ u32 val32 = fm_im_read((void *)base);
+ int byte_pos;
+ u16 ret;
+
+ byte_pos = (u64)addr & 0x3UL;
+ if (byte_pos)
+ ret = (u16)(val32 & 0x0000ffff);
+ else
+ ret = (u16)((val32 & 0xffff0000) >> 16);
+
+ return ret;
+}
+
+static void muram_writew(u16 *addr, u16 val)
+{
+ u64 base = (u64)addr & ~0x3;
+ u32 org32 = fm_im_read((void *)base);
+ u32 val32;
+ int byte_pos;
+
+ byte_pos = (u64)addr & 0x3UL;
+ if (byte_pos)
+ val32 = (org32 & 0xffff0000) | val;
+ else
+ val32 = (org32 & 0x0000ffff) | ((u32)val << 16);
+
+ fm_im_write((void *)base, val32);
+}
+
+ /* De-active all the ports */
+static void fman_de_active(struct ccsr_fman *reg)
+{
+ int i, port_id;
+ struct fm_bmi_rx_port *port_reg;
+
+ /* Rx 1G port */
+ for (i = 0; i < MAX_NUM_RX_PORT_1G; i++) {
+ port_id = RX_PORT_1G_BASE + i - 1;
+ port_reg = (struct fm_bmi_rx_port *)&(reg->port[port_id].fm_bmi);
+ fm_im_clrbits(&port_reg->fmbm_rcfg,FMBM_RCFG_EN);
+ }
+
+ /* Tx 1G port */
+ for (i = 0; i < MAX_NUM_TX_PORT_1G; i++) {
+ port_id = TX_PORT_1G_BASE + i - 1;
+ port_reg = (struct fm_bmi_rx_port *)&(reg->port[port_id].fm_bmi);
+ fm_im_clrbits(&port_reg->fmbm_rcfg,FMBM_RCFG_EN);
+ }
+}
+
+/* Active return 1 */
+static int fman_is_active(struct ccsr_fman *reg, int mac_idx)
+{
+ int port_id, val;
+ struct fm_bmi_rx_port *port_reg;
+
+ /* Rx 1G port */
+ port_id = RX_PORT_1G_BASE + mac_idx - 1;
+ port_reg = (struct fm_bmi_rx_port *)&(reg->port[port_id].fm_bmi);
+ val = fm_im_read(&port_reg->fmbm_rcfg);
+ if (val & FMBM_RCFG_EN) {
+ printk("%s: port_id = %d, val = 0x%0x\n", __func__, port_id+1, val);
+ return 1;
+ }
+
+ /* Tx 1G p rt */
+ port_id = TX_PORT_1G_BASE + mac_idx - 1;
+ port_reg = (struct fm_bmi_rx_port *)&(reg->port[port_id].fm_bmi);
+ val = fm_im_read(&port_reg->fmbm_rcfg);
+ if (val & FMBM_RCFG_EN) {
+ printk("%s: port_id = %d, val = 0x%0x\n", __func__, port_id+1, val);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int fm_eth_rx_port_parameter_init(struct fm_im_private *priv)
+{
+ struct fm_port_global_pram *pram;
+ u32 pram_page_offset;
+ void *rx_bd_ring_base;
+ struct fm_port_bd *rxbd;
+ struct fm_port_qd *rxqd;
+ struct fm_bmi_rx_port *bmi_rx_port = priv->rx_port;
+ dma_addr_t buf;
+ int i, j;
+ int mac_idx = priv->num;
+ u16 val;
+
+ /* Alloc global parameter ram at MURAM */
+ if (priv->tx_pram) {
+ priv->rx_pram = priv->tx_pram;
+ pram = priv->tx_pram;
+ } else {
+ if (fman_is_active(priv->reg, mac_idx)) {
+ printk("%s: Could not allocate muram when other BMI ports are active.\n",
+ __func__);
+ return 0;
+ }
+ pram = (struct fm_port_global_pram *)fm_muram_alloc(priv->fm_index, FM_PRAM_SIZE, FM_PRAM_ALIGN);
+ priv->rx_pram = pram;
+ }
+
+ /* Parameter page offset to MURAM */
+ pram_page_offset = (u64)pram - (u64)fm_muram_base(priv->fm_index);
+
+ TRACE("Rx param address (virt): 0x%llx, (phy): 0x%x\n",(u64)pram, pram_page_offset + 0x1a00000);
+
+ /* Enable global mode- snooping data buffers and BDs */
+ TRACE("rx_port_pram mode: 0x%llx\n", (u64)&pram->mode - (u64)pram);
+ fm_im_write(&pram->mode, PRAM_MODE_GLOBAL);
+
+ /* Init the Rx queue descriptor pionter */
+ TRACE("rx_port_pram rxqd_ptr: 0x%llx\n", (u64)&pram->rxqd_ptr - (u64)pram);
+ fm_im_write(&pram->rxqd_ptr, pram_page_offset + 0x20);
+
+ /* Set the max receive buffer length, power of 2 */
+ TRACE("rx_port_pram mrblr: 0x%llx\n", (u64)&pram->mrblr - (u64)pram);
+ muram_writew(&pram->mrblr, MAX_RXBUF_LOG2);
+
+ /* Alloc Rx buffer descriptors from main memory */
+ rx_bd_ring_base = kzalloc(sizeof(struct fm_port_bd) * RX_BD_RING_SIZE, GFP_KERNEL);
+ if (!rx_bd_ring_base)
+ return 0;
+ memset(rx_bd_ring_base, 0, sizeof(struct fm_port_bd) * RX_BD_RING_SIZE);
+
+ /* Alloc Rx buffer from main memory */
+ priv->rx_skbuff = kmalloc(sizeof(*priv->rx_skbuff) * RX_BD_RING_SIZE, GFP_KERNEL);
+ if (!priv->rx_skbuff) {
+ printk("Could not allocate rx_skbuff\n");
+ return 0;
+ }
+
+ for (j = 0; j < RX_BD_RING_SIZE; j++)
+ priv->rx_skbuff[j] = NULL;
+
+ /* Save them to priv */
+ priv->rx_bd_ring = rx_bd_ring_base;
+ priv->cur_rxbd = rx_bd_ring_base;
+ priv->skb_currx = 0;
+
+ /* Init Rx BDs ring */
+ rxbd = (struct fm_port_bd *)rx_bd_ring_base;
+ for (i = 0; i < RX_BD_RING_SIZE; i++) {
+ struct sk_buff *skb;
+ skb = netdev_alloc_skb(priv->ndev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
+ if (!skb) {
+ printk("Can't allocate RX buffers\n");
+ return 0;
+ }
+ skb_reserve(skb, RXBUF_ALIGNMENT - (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
+ priv->rx_skbuff[i] = skb;
+
+ buf = dma_map_single(priv->dev, skb->data, priv->rx_buffer_size, DMA_FROM_DEVICE);
+ TRACE("------rxbd buf addr: 0x%0llx------\n", buf);
+
+ muram_writew(&rxbd->status, RxBD_EMPTY);
+ muram_writew(&rxbd->len, 0);
+ muram_writew(&rxbd->buf_ptr_hi, (buf >> 32) & 0xffff);
+ fm_im_write(&rxbd->buf_ptr_lo, (u32)(buf & 0xffffffff));
+ rxbd++;
+ }
+
+ /* Set the Rx queue descriptor */
+ TRACE("rx_port_pram rxqd: 0x%llx\n", (u64)&pram->rxqd - (u64)pram);
+ rxqd = &pram->rxqd;
+ muram_writew(&rxqd->gen, RX_QD_RXF_INTMASK | RX_QD_BSY_INTMASK | priv->fpm_event_num);
+ val = muram_readw(&rxqd->gen);
+ buf = virt_to_phys(rx_bd_ring_base);
+ TRACE("------rxqd bdring phys addr: 0x%0llx, virtual addr %p ------\n", buf, rx_bd_ring_base);
+ muram_writew(&rxqd->bd_ring_base_hi, (buf >> 32) & 0xffff);
+ fm_im_write(&rxqd->bd_ring_base_lo, (u32)(buf & 0xffffffff));
+ muram_writew(&rxqd->bd_ring_size, sizeof(struct fm_port_bd) * RX_BD_RING_SIZE);
+ muram_writew(&rxqd->offset_in, 0);
+ muram_writew(&rxqd->offset_out, 0);
+
+ /* Set IM parameter ram pointer to Rx Frame Queue ID */
+ fm_im_write(&bmi_rx_port->fmbm_rfqid, pram_page_offset);
+
+ return 1;
+}
+
+static int fm_eth_tx_port_parameter_init(struct fm_im_private *priv)
+{
+ struct fm_port_global_pram *pram;
+ u32 pram_page_offset;
+ void *tx_bd_ring_base;
+ struct fm_port_bd *txbd;
+ struct fm_port_qd *txqd;
+ struct fm_bmi_tx_port *bmi_tx_port = priv->tx_port;
+ dma_addr_t buf;
+ int i;
+ int mac_idx = priv->num;
+
+ /* Alloc global parameter ram at MURAM */
+ if (priv->rx_pram) {
+ priv->tx_pram = priv->rx_pram;
+ pram = priv->rx_pram;
+ } else {
+ if (fman_is_active(priv->reg, mac_idx)) {
+ printk("%s: Could not allocate muram when other BMI ports are active.\n",
+ __func__);
+ return 0;
+ }
+ pram = (struct fm_port_global_pram *)fm_muram_alloc(priv->fm_index,
+ FM_PRAM_SIZE, FM_PRAM_ALIGN);
+ priv->tx_pram = pram;
+ }
+
+ /* Parameter page offset to MURAM */
+ pram_page_offset = (u64)pram - (u64)fm_muram_base(priv->fm_index);
+
+ TRACE("Rx param address (virt): 0x%llx, (phy): 0x%x\n", (u64)pram, pram_page_offset + 0x1a00000);
+
+ /* Enable global mode- snooping data buffers and BDs */
+ TRACE("tx_port_pram mode: 0x%llx\n", (u64)&pram->mode - (u64)pram);
+ fm_im_write(&pram->mode, PRAM_MODE_GLOBAL);
+
+ /* Init the Tx queue descriptor pionter */
+ TRACE("tx_port_pram txqd_ptr: 0x%llx\n", (u64)&pram->txqd_ptr - (u64)pram);
+ fm_im_write(&pram->txqd_ptr, pram_page_offset + 0x40);
+
+ /* Alloc Tx buffer descriptors from main memory */
+ tx_bd_ring_base = kzalloc(sizeof(struct fm_port_bd) * TX_BD_RING_SIZE, GFP_KERNEL);
+ if (!tx_bd_ring_base)
+ return 0;
+ memset(tx_bd_ring_base, 0, sizeof(struct fm_port_bd) * TX_BD_RING_SIZE);
+ /* Save it to priv */
+ priv->tx_bd_ring = tx_bd_ring_base;
+ priv->cur_txbd = tx_bd_ring_base;
+ priv->skb_curtx = 0;
+
+ /* Init Tx BDs ring */
+ txbd = (struct fm_port_bd *)tx_bd_ring_base;
+ for (i = 0; i < TX_BD_RING_SIZE; i++) {
+ muram_writew(&txbd->status, TxBD_LAST);
+ muram_writew(&txbd->len, 0);
+ muram_writew(&txbd->buf_ptr_hi, 0);
+ fm_im_write(&txbd->buf_ptr_lo, 0);
+ txbd++;
+ }
+
+ /* Alloc SKB free queue from main memory */
+ priv->tx_skbuff = kmalloc(sizeof(*priv->tx_skbuff) * TX_BD_RING_SIZE, GFP_KERNEL);
+ if (!priv->tx_skbuff) {
+ printk("Could not allocate tx_skbuff\n");
+ return 0;
+ }
+
+ for (i = 0; i < TX_BD_RING_SIZE; i++)
+ priv->tx_skbuff[i] = NULL;
+
+ /* Set the Tx queue decriptor */
+ TRACE("tx_port_pram txqd: 0x%llx\n", (u64)&pram->txqd - (u64)pram);
+ txqd = &pram->txqd;
+ buf = virt_to_phys(tx_bd_ring_base);
+ TRACE("------txqd bdring phys addr: 0x%0llx, virtual addr %p ------\n", buf, tx_bd_ring_base);
+ muram_writew(&txqd->bd_ring_base_hi, (buf >> 32) & 0xffff);
+ fm_im_write(&txqd->bd_ring_base_lo, (u32)(buf & 0xffffffff));
+ muram_writew(&txqd->bd_ring_size, sizeof(struct fm_port_bd) * TX_BD_RING_SIZE);
+ muram_writew(&txqd->offset_in, 0);
+ muram_writew(&txqd->offset_out, 0);
+
+ /* Set IM parameter ram pointer to Tx Confirmation Frame Queue ID */
+ fm_im_write(&bmi_tx_port->fmbm_tcfqid, pram_page_offset);
+
+ return 1;
+}
+
+
+static int port_parameter_init(struct fm_im_private *priv)
+{
+
+ if (!fm_eth_rx_port_parameter_init(priv))
+ return 0;
+
+ if (!fm_eth_tx_port_parameter_init(priv))
+ return 0;
+
+ return 1;
+}
+
+static void memac_init_mac(struct fsl_enet_mac *mac)
+{
+ struct memac *regs = mac->base;
+
+ /* Mask all interrupt */
+ fm_im_write(&regs->imask, IMASK_MASK_ALL);
+
+ /* Clear all events */
+ fm_im_write(&regs->ievent, IEVENT_CLEAR_ALL);
+
+ /* Set the max receive length */
+ fm_im_write(&regs->maxfrm, mac->max_rx_len & MAXFRM_MASK);
+
+ /* Multicast frame reception for the hash entry disable */
+ fm_im_write(&regs->hashtable_ctrl, 0);
+}
+
+static void memac_enable_mac(struct fsl_enet_mac *mac)
+{
+ struct memac *regs = mac->base;
+
+ fm_im_setbits(&regs->command_config, MEMAC_CMD_CFG_RXTX_EN | MEMAC_CMD_CFG_NO_LEN_CHK);
+}
+
+static void memac_disable_mac(struct fsl_enet_mac *mac)
+{
+ struct memac *regs = mac->base;
+
+ fm_im_clrbits(&regs->command_config, MEMAC_CMD_CFG_RXTX_EN);
+}
+
+static void memac_set_mac_addr(struct fsl_enet_mac *mac, u8 *mac_addr)
+{
+ struct memac *regs = mac->base;
+ u32 mac_addr0, mac_addr1;
+ u32 val0, val1;
+
+ /*
+ * If a station address of 0x12345678ABCD, perform a write to
+ * MAC_ADDR0 of 0x78563412, MAC_ADDR1 of 0x0000CDAB
+ */
+ mac_addr0 = (mac_addr[3] << 24) | (mac_addr[2] << 16) | \
+ (mac_addr[1] << 8) | (mac_addr[0]);
+ fm_im_write(&regs->mac_addr_0, mac_addr0);
+
+ mac_addr1 = ((mac_addr[5] << 8) | mac_addr[4]) & 0x0000ffff;
+ fm_im_write(&regs->mac_addr_1, mac_addr1);
+ val0 = fm_im_read(&regs->mac_addr_0);
+ val1 = fm_im_read(&regs->mac_addr_1);
+ TRACE("%s: mac_addr0 = 0x%0x, mac_addr0 = 0x%0x\n", __func__, val0, val1);
+}
+
+static void memac_set_interface_mode(struct fsl_enet_mac *mac, phy_interface_t type, int speed)
+{
+ struct memac *regs = mac->base;
+ u32 if_mode, if_status;
+
+ /* Clear all bits relative with interface mode */
+ if_mode = fm_im_read(&regs->if_mode);
+ if_status = fm_im_read(&regs->if_status);
+
+ /* Set interface mode */
+ switch (type) {
+ case PHY_INTERFACE_MODE_GMII:
+ if_mode &= ~IF_MODE_MASK;
+ if_mode |= IF_MODE_GMII;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ if_mode |= (IF_MODE_GMII | IF_MODE_RG);
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ if_mode |= (IF_MODE_GMII | IF_MODE_RG);
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ if_mode |= (IF_MODE_GMII | IF_MODE_RM);
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ if_mode &= ~IF_MODE_MASK;
+ if_mode |= (IF_MODE_GMII);
+ break;
+ case PHY_INTERFACE_MODE_XGMII:
+ if_mode &= ~IF_MODE_MASK;
+ if_mode |= IF_MODE_XGMII;
+ break;
+ default:
+ break;
+ }
+ /* Enable automatic speed selection for Non-XGMII */
+ if (type != PHY_INTERFACE_MODE_XGMII)
+ if_mode |= IF_MODE_EN_AUTO;
+
+ if ((type == PHY_INTERFACE_MODE_RGMII) ||
+ (type == PHY_INTERFACE_MODE_RGMII_TXID)) {
+ if_mode &= ~IF_MODE_EN_AUTO;
+ if_mode &= ~IF_MODE_SETSP_MASK;
+ switch (speed) {
+ case SPEED_1000:
+ if_mode |= IF_MODE_SETSP_1000M;
+ break;
+ case SPEED_100:
+ if_mode |= IF_MODE_SETSP_100M;
+ break;
+ case SPEED_10:
+ if_mode |= IF_MODE_SETSP_10M;
+ default:
+ break;
+ }
+ }
+
+ TRACE(" %s: if_mode = 0x%0x\n", __func__, if_mode);
+ TRACE(" %s: if_status = 0x%0x\n", __func__, if_status);
+ fm_im_write(&regs->if_mode, if_mode);
+
+ return;
+}
+
+void init_memac(struct fsl_enet_mac *mac, void *base, void *phyregs, int max_rx_len)
+{
+ mac->base = base;
+ mac->phyregs = phyregs;
+ mac->max_rx_len = max_rx_len;
+ mac->init_mac = memac_init_mac;
+ mac->enable_mac = memac_enable_mac;
+ mac->disable_mac = memac_disable_mac;
+ mac->set_mac_addr = memac_set_mac_addr;
+ mac->set_if_mode = memac_set_interface_mode;
+}
+
+static int fm_eth_init_mac(struct fm_im_private *priv, struct ccsr_fman *reg)
+{
+ struct fsl_enet_mac *mac;
+ void *base, *phyregs = NULL;
+ int num;
+
+ num = priv->num;
+
+ if (priv->type == FM_ETH_10G_E)
+ num += 8;
+ base = &reg->memac[num].fm_memac;
+ phyregs = &reg->memac[num].fm_memac_mdio;
+ TRACE("%s(): memac mdio base (virt):0x%llx\n", __func__, (u64)phyregs);
+
+ /* Alloc mac controller */
+ mac = kzalloc(sizeof(struct fsl_enet_mac), GFP_KERNEL);
+ if (!mac)
+ return 0;
+ memset(mac, 0, sizeof(struct fsl_enet_mac));
+
+ /* Save the mac to fm_eth struct */
+ priv->mac = mac;
+
+ init_memac(mac, base, phyregs, MAX_RXBUF_LEN);
+
+ return 1;
+}
+
+static void adjust_link(struct net_device *dev)
+{
+ struct fm_im_private *priv = netdev_priv(dev);
+ struct memac __iomem *regs = priv->mac->base;
+ struct phy_device *phydev = priv->phydev;
+ uint32_t tmp;
+ int new_state = 0;
+ u32 if_mode, if_status;
+
+ if (phydev->link) {
+ tmp = fm_im_read(&regs->if_mode);
+
+ if (phydev->duplex != priv->oldduplex) {
+ new_state = 1;
+ if (phydev->duplex)
+ tmp &= ~IF_MODE_HD;
+ else
+ tmp |= IF_MODE_HD;
+
+ priv->oldduplex = phydev->duplex;
+ }
+
+ if ((phydev->speed != priv->oldspeed) &&
+ ((priv->interface == PHY_INTERFACE_MODE_RGMII) ||
+ (priv->interface == PHY_INTERFACE_MODE_RGMII_TXID))) {
+ new_state = 1;
+
+ /* Configure RGMII in manual mode */
+ tmp &= ~IF_MODE_EN_AUTO;
+ tmp &= ~IF_MODE_SETSP_MASK;
+
+ if (phydev->duplex)
+ tmp |= IF_MODE_RGMII_FD;
+ else
+ tmp &= ~IF_MODE_RGMII_FD;
+
+ switch (phydev->speed) {
+ case 1000:
+ tmp |= IF_MODE_SETSP_1000M;
+ break;
+ case 100:
+ tmp |= IF_MODE_SETSP_100M;
+ break;
+ case 10:
+ tmp |= IF_MODE_SETSP_10M;
+ break;
+ default:
+ break;
+ }
+ priv->oldspeed = phydev->speed;
+ }
+ fm_im_write(&regs->if_mode, tmp);
+
+ if (!priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 1;
+ }
+ } else if (priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+ }
+
+ if (new_state && netif_msg_link(priv))
+ phy_print_status(phydev);
+
+ if_mode = fm_im_read(&regs->if_mode);
+ if_status = fm_im_read(&regs->if_status);
+ TRACE("%s(): if_mode:0x%0x, if_status:0x%0x\n", __func__, if_mode, if_status);
+}
+
+static int init_phy(struct net_device *dev)
+{
+ struct fm_im_private* priv = netdev_priv(dev);
+ u32 supported;
+
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+
+ priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, priv->interface);
+
+ if (!priv->phydev) {
+ dev_err(&dev->dev, "could not attach to PHY\n");
+ return -ENODEV;
+ }
+
+ if (priv->type == FM_ETH_1G_E) {
+ supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full);
+ } else {
+ supported = SUPPORTED_10000baseT_Full;
+ }
+
+ /* Remove any features not supported by the controller */
+ priv->phydev->supported &= supported;
+ priv->phydev->advertising = priv->phydev->supported;
+
+ return 0;
+}
+
+static void fm_init_qmi(struct ccsr_fman *reg, int mac_idx)
+{
+ struct fm_qmi_common *qmi = &(reg->fm_qmi_common);
+
+ /* Disable enqueue and dequeue of QMI */
+ fm_im_clrbits(&qmi->fmqm_gc, FMQM_GC_ENQ_EN | FMQM_GC_DEQ_EN);
+
+ if (!fman_is_active(reg, mac_idx)) {
+ /* Disable all error interrupts */
+ fm_im_write(&qmi->fmqm_eien, FMQM_EIEN_DISABLE_ALL);
+ /* Clear all error events */
+ fm_im_write(&qmi->fmqm_eie, FMQM_EIE_CLEAR_ALL);
+
+ /* Disable all interrupts */
+ fm_im_write(&qmi->fmqm_ien, FMQM_IEN_DISABLE_ALL);
+ /* Clear all interrupts */
+ fm_im_write(&qmi->fmqm_ie, FMQM_IE_CLEAR_ALL);
+ }
+}
+
+static void fm_init_fpm(struct ccsr_fman *reg, int mac_idx)
+{
+ int i;
+ struct fm_fpm *fpm = &(reg->fm_fpm);
+
+ if (!fman_is_active(reg, mac_idx)) {
+ /* Disable the dispatch limit in IM case */
+ fm_im_write(&fpm->fpmflc, FMFP_FLC_DISP_LIM_NONE);
+ /* Clear events */
+ fm_im_write(&fpm->fmfpee, FMFPEE_CLEAR_EVENT);
+
+ /* Clear risc events */
+ for (i = 0; i < 4; i++)
+ fm_im_write(&fpm->fpmcev[i], 0xffffffff);
+
+ /* Clear error */
+ fm_im_write(&fpm->fpmrcr, FMFP_RCR_MDEC | FMFP_RCR_IDEC);
+ }
+}
+
+static int fm_init_bmi(int fm_idx, int mac_idx, struct ccsr_fman *reg)
+{
+ int blk;
+ u32 val, offset;
+ void *base;
+ struct fm_bmi_common *bmi = &(reg->fm_bmi_common);
+
+ /* Assume U-Boot or other FMAN software has changed it.*/
+ if ((!fman_is_active(reg, mac_idx))) {
+ /* Disable all BMI interrupt */
+ fm_im_write(&bmi->fmbm_ier, FMBM_IER_DISABLE_ALL);
+
+ /* Clear all events */
+ fm_im_write(&bmi->fmbm_ievr, FMBM_IEVR_CLEAR_ALL);
+
+ /* Alloc free buffer pool in MURAM */
+ base = fm_muram_alloc(fm_idx, FM_FREE_POOL_SIZE, FM_FREE_POOL_ALIGN);
+ if (!base) {
+ printk("%s: no muram for free buffer pool\n", __func__);
+ return -ENOMEM;
+ }
+ offset = base - fm_muram_base(fm_idx);
+
+ /* Need 128KB total free buffer pool size */
+ val = offset / 256;
+ blk = FM_FREE_POOL_SIZE / 256;
+
+ /* In IM, we must not begin from offset 0 in MURAM */
+ val |= ((blk - 1) << FMBM_CFG1_FBPS_SHIFT);
+ fm_im_write(&bmi->fmbm_cfg1, val);
+ fm_im_write(&bmi->fmbm_cfg2, FMBM_CFG2_TNTSKS_MASK);
+
+ /* Initialize internal buffers data base (linked list) */
+ fm_im_write(&bmi->fmbm_init, FMBM_INIT_START);
+ }
+ return 0;
+}
+
+static int fm_init_common(int fm_idx, int mac_idx, struct ccsr_fman *reg)
+{
+ /* Workaround: to de-active all the ports first */
+ fman_de_active(reg);
+ fm_init_muram(fm_idx, &reg->muram);
+ fm_init_qmi(reg, mac_idx);
+ fm_init_fpm(reg, mac_idx);
+
+ if (!fman_is_active(reg, mac_idx)) {
+ /* Clear DMA status */
+ fm_im_setbits(&reg->fm_dma.fmdmsr, FMDMSR_CLEAR_ALL);
+
+ /* Set DMA mode */
+ fm_im_setbits(&reg->fm_dma.fmdmmr, FMDMMR_SBER);
+ }
+
+ return fm_init_bmi(fm_idx, mac_idx, reg);
+}
+
+int check_shared_interrupt(struct fm_im_private *priv, u32 pending)
+{
+ if((pending & FMNPI_EN_REV0) && priv->fpm_event_num == 0)
+ return 1;
+ if((pending & FMNPI_EN_REV1) && priv->fpm_event_num == 1)
+ return 1;
+ if((pending & FMNPI_EN_REV2) && priv->fpm_event_num == 2)
+ return 1;
+ if((pending & FMNPI_EN_REV3) && priv->fpm_event_num == 3)
+ return 1;
+
+ return 0;
+}
+
+static struct of_device_id fman_match[] =
+{
+ {
+ .compatible = "fsl,im-ethernet",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, fman_match);
+
+irqreturn_t fm_im_receive(int irq, void *private)
+{
+ struct fm_im_private *priv;
+ struct net_device *dev;
+ struct sk_buff *skb;
+ struct fm_port_global_pram *pram;
+ struct fm_port_bd *rxbd, *rxbd_base;
+ u16 status, offset_out;
+ u32 ievent, pending;
+ int pkt_len;
+ struct fm_fpm *fpm;
+ dma_addr_t buf;
+ u32 buf_lo, buf_hi;
+
+ priv = (struct fm_im_private*)private;
+ dev = priv->ndev;
+ pram = priv->rx_pram;
+ rxbd = priv->cur_rxbd;
+ status = muram_readw(&rxbd->status);
+ fpm = &priv->reg->fm_fpm;
+
+ pending = fm_im_read(&fpm->fmnpi);
+
+ if(!check_shared_interrupt(priv, pending))
+ return IRQ_NONE;
+
+ /* Clear event register */
+ ievent = fm_im_read(&fpm->fpmfcevent[priv->fpm_event_num]);
+ fm_im_write(&fpm->fpmcev[priv->fpm_event_num], ievent);
+
+ while(!(status & RxBD_EMPTY)) {
+ buf_hi = muram_readw(&rxbd->buf_ptr_hi);
+ buf_lo = fm_im_read(&rxbd->buf_ptr_lo);
+ buf = ((u64)buf_hi << 32) | buf_lo;
+
+ dma_unmap_single(priv->dev, buf, priv->rx_buffer_size + RXBUF_ALIGNMENT,
+ DMA_FROM_DEVICE);
+ skb = priv->rx_skbuff[priv->skb_currx];
+
+ if (!skb || (!(status & (RxBD_FIRST | RxBD_LAST))) || (status & RxBD_ERROR)) {
+ if (status & RxBD_ERROR)
+ dev->stats.rx_errors++;
+ else
+ dev->stats.rx_dropped++;
+
+ dev_kfree_skb(skb);
+ priv->rx_skbuff[priv->skb_currx] = NULL;
+ } else {
+ pkt_len = muram_readw(&rxbd->len) - ETH_FCS_LEN;
+ skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->dev = dev;
+ netif_rx(skb);
+
+ dev->stats.rx_packets ++;
+ dev->stats.rx_bytes += pkt_len;
+ }
+
+ /* Clear the RxBDs */
+ muram_writew(&rxbd->status, RxBD_EMPTY);
+ muram_writew(&rxbd->len, 0);
+ mb();
+
+ skb = netdev_alloc_skb(priv->ndev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
+ if (!skb) {
+ if (printk_ratelimit())
+ printk("Can't allocate Rx buffer\n");
+ dev->stats.rx_dropped++;
+ break;
+ }
+ skb_reserve(skb, RXBUF_ALIGNMENT -
+ (((unsigned long)skb->data) & (RXBUF_ALIGNMENT - 1)));
+ buf = dma_map_single(priv->dev, skb->data,
+ priv->rx_buffer_size + RXBUF_ALIGNMENT, DMA_FROM_DEVICE);
+ if (dma_mapping_error(priv->dev, buf)) {
+ printk("%s: %d: dma_map_single error\n", __func__, __LINE__);
+ break;
+ }
+ priv->rx_skbuff[priv->skb_currx] = skb;
+ muram_writew(&rxbd->buf_ptr_hi, (buf >> 32) & 0xffff);
+ fm_im_write(&rxbd->buf_ptr_lo, (u32)(buf & 0xffffffff));
+ mb();
+
+ /* Advance RxBD */
+ rxbd++;
+ rxbd_base = (struct fm_port_bd *)priv->rx_bd_ring;
+ if (rxbd >= (rxbd_base + RX_BD_RING_SIZE))
+ rxbd = rxbd_base;
+ /* Read next status */
+ status = muram_readw(&rxbd->status);
+
+ /* Update to point at the next skb */
+ priv->skb_currx = (priv->skb_currx + 1) & (RX_BD_RING_SIZE - 1);
+
+ /* Update RxQD */
+ offset_out = muram_readw(&pram->rxqd.offset_out);
+ offset_out += sizeof(struct fm_port_bd);
+ if (offset_out >= muram_readw(&pram->rxqd.bd_ring_size))
+ offset_out = 0;
+ muram_writew(&pram->rxqd.offset_out, offset_out);
+ mb();
+
+ }
+ priv->cur_rxbd = (void *)rxbd;
+
+ return IRQ_HANDLED;
+}
+
+static int fm_im_startup(struct net_device *dev)
+{
+ struct fm_im_private *priv;
+ struct memac *regs;
+
+ priv = netdev_priv(dev);
+
+ /* Rx/TxBDs, Rx/TxQDs, Rx buff and parameter ram init */
+ if(!port_parameter_init(priv))
+ return 0;
+
+ regs = priv->mac->base;
+ TRACE("%s(): memac%d controller base:0x%llx\n", __func__, priv->num, (u64)regs);
+
+ priv->mac->init_mac(priv->mac);
+
+ return 1;
+}
+
+static int fm_im_enet_open(struct net_device *dev)
+{
+ struct fm_im_private *priv;
+ struct fsl_enet_mac *mac;
+ int i, err;
+ u32 val;
+
+ priv = netdev_priv(dev);
+
+ err = request_irq(priv->irq, fm_im_receive, IRQF_SHARED|IRQF_NO_SUSPEND, "fman_im", priv);
+ if (err < 0)
+ printk("Request irq ERROR!\n");
+
+ mac = priv->mac;
+ mac->set_mac_addr(mac, dev->dev_addr);
+
+ if(init_phy(dev))
+ return 0;
+
+ /* Init bmi rx port, IM mode and disable */
+ bmi_rx_port_init(priv, priv->rx_port);
+ /* Enable bmi Rx port */
+ fm_im_write(&priv->rx_port->fmbm_rfqid, ((u64)priv->rx_pram - (u64)fm_muram_base(priv->fm_index)));
+ fm_im_setbits(&priv->rx_port->fmbm_rcfg, FMBM_RCFG_EN);
+
+ /* Enable MAC rx/tx port */
+ mac->enable_mac(mac);
+
+ /* Init bmi tx port, IM mode and disable */
+ bmi_tx_port_init(priv, priv->tx_port);
+ /* Enable bmi Tx port */
+ fm_im_write(&priv->tx_port->fmbm_tcfqid,((u64)priv->tx_pram - (u64)fm_muram_base(priv->fm_index)));
+ fm_im_setbits(&priv->tx_port->fmbm_tcfg, FMBM_TCFG_EN);
+ /* Re-enable transmission of frame */
+ fm_im_clrbits(&priv->tx_pram->mode, PRAM_MODE_GRACEFUL_STOP);
+ /* Enable interrupt */
+ for(i = 0; i < 4; i++) {
+ fm_im_setbits(&(&priv->reg->fm_fpm)->fpmfcmask[i], FMFPCEE_IM_MASK_RXF);
+ val = fm_im_read(&(&priv->reg->fm_fpm)->fpmfcmask[i]);
+ TRACE("%s():%d: fpmfcmask[%d] = 0x%x\n", __func__, __LINE__, i, val);
+ }
+
+ mb();
+
+ phy_start(priv->phydev);
+
+ /* Set the MAC-PHY mode */
+ mac->set_if_mode(mac, priv->interface, priv->phydev->speed);
+
+ netif_start_queue(dev);
+
+ for(i = 0; i < 4; i++) {
+ val = fm_im_read(&(&priv->reg->fm_fpm)->fpmfcevent[i]);
+ TRACE("%s(): fpmfcevent[%d] = 0x%x\n", __func__, i, val);
+ }
+ return 0;
+}
+
+static int fm_im_close(struct net_device *dev)
+{
+
+ struct fm_im_private *priv = netdev_priv(dev);
+ struct fm_port_global_pram *tx_pram = priv->tx_pram;
+ int i;
+
+ /* Allow the Fman (Tx) port to process in-flight frames before we
+ * try switching it off.
+ */
+ /* Re-enable transmission of frame */
+ fm_im_setbits(&tx_pram->mode, PRAM_MODE_GRACEFUL_STOP);
+ usleep_range(5000, 10000);
+
+ phy_stop(priv->phydev);
+
+ for(i = 0; i < 4; i++)
+ fm_im_setbits(&(&priv->reg->fm_fpm)->fpmfcmask[i], 0x0);
+
+ /* Clear DMA status */
+ fm_im_setbits(&priv->reg->fm_dma.fmdmsr, FMDMSR_CLEAR_ALL);
+
+ /* Disable bmi Tx port */
+ fm_im_clrbits(&priv->tx_port->fmbm_tcfg, FMBM_TCFG_EN);
+
+ /* Disable MAC rx/tx port */
+ priv->mac->disable_mac(priv->mac);
+
+ /* Disable bmi Rx port */
+ fm_im_clrbits(&priv->rx_port->fmbm_rcfg, FMBM_RCFG_EN);
+
+ /* Release irq line */
+ free_irq(priv->irq, priv);
+
+ /* Free skb resource */
+ /* Not implemented yet */
+
+ /* Disconnect from the PHY */
+ phy_disconnect(priv->phydev);
+ priv->phydev = NULL;
+ netif_stop_queue(dev);
+
+ return 0;
+}
+
+static int fm_im_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct fm_im_private *priv;
+ struct fm_port_global_pram *pram;
+ struct fm_port_bd *txbd, *txbd_base;
+ u16 offset_in;
+ dma_addr_t buf;
+ int i;
+
+ priv = netdev_priv(dev);
+ pram = priv->tx_pram;
+ txbd = priv->cur_txbd;
+
+#ifdef DEBUG
+ fm_im_xmit_dump(skb);
+#endif
+
+ i = priv->skb_curtx;
+ if (priv->tx_skbuff[i])
+ dev_kfree_skb(priv->tx_skbuff[i]);
+
+ /* Save the skb pointer so we can free it later */
+ priv->tx_skbuff[i] = skb;
+ /* Move forward and wrap if come to end */
+ priv->skb_curtx = (priv->skb_curtx + 1) & (TX_BD_RING_SIZE - 1);
+
+ /* Setup TxBD */
+ buf = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->dev, buf)) {
+ printk("%s: %d: dma_map_single error\n", __func__, __LINE__);
+ dev->stats.tx_dropped ++;
+ return NETDEV_TX_BUSY;
+ }
+ TRACE("------Tx buffer addr: 0x%0llx------\n", buf);
+ muram_writew(&txbd->buf_ptr_hi, (buf >> 32) & 0xffff);
+ fm_im_write(&txbd->buf_ptr_lo, (u32)(buf & 0xffffffff));
+ muram_writew(&txbd->len, skb->len);
+ mb();
+ muram_writew(&txbd->status, TxBD_READY | TxBD_LAST);
+ mb();
+
+ /* Update TxQD, let RISC to send the packet */
+ offset_in = muram_readw(&pram->txqd.offset_in);
+ offset_in += sizeof(struct fm_port_bd);
+ if (offset_in >= muram_readw(&pram->txqd.bd_ring_size))
+ offset_in = 0;
+ muram_writew(&pram->txqd.offset_in, offset_in);
+ mb();
+
+ /* Wait for buffer to be transmitted */
+ for (i = 0; muram_readw(&txbd->status) & TxBD_READY; i++) {
+ udelay(100);
+ if (i > 0x10000) {
+ printk("%s: Tx error\n", dev->name);
+ dev->stats.tx_dropped ++;
+ return NETDEV_TX_BUSY;
+ }
+ }
+
+ dev->stats.tx_bytes = txbd->len;
+ dev->stats.tx_packets ++;
+ /* Advance the TxBD */
+ txbd++;
+ txbd_base = (struct fm_port_bd *)priv->tx_bd_ring;
+ if (txbd >= (txbd_base + TX_BD_RING_SIZE))
+ txbd = txbd_base;
+ /* Update current txbd */
+ priv->cur_txbd = (void *)txbd;
+
+ dma_unmap_single(priv->dev, buf, skb->len, DMA_TO_DEVICE);
+
+ return NETDEV_TX_OK;
+}
+
+static int fm_im_change_mtu(struct net_device *dev, int new_mtu)
+{
+ int frame_size = new_mtu + ETH_HLEN;
+
+ if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
+ printk("%s(): invalid MTU setting\n", __func__);
+ return -EINVAL;
+ }
+
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
+/* Change the promiscuity of the device based on the flags
+ * (this function is called whenever dev->flags is changed)
+ */
+static void fm_im_set_rx_mode(struct net_device *dev)
+{
+ struct fm_im_private *priv = netdev_priv(dev);
+ struct memac *regs = priv->mac->base;
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Set CONFIG_COMMAND to PROM */
+ fm_im_setbits(&regs->command_config, MEMAC_CMD_CFG_PROMIS);
+ } else {
+ /* Set CONFIG_COMMAND to non PROM */
+ fm_im_clrbits(&regs->command_config, MEMAC_CMD_CFG_PROMIS);
+ }
+}
+
+static void fm_im_timeout(struct net_device *dev)
+{
+ dev->stats.tx_errors++;
+ /* ToDo: re-schedule to work
+ * schedule_work(&priv->reset_task);
+ */
+}
+
+static int fm_im_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ return 0;
+}
+
+static int fm_im_set_mac_addr(struct net_device *dev, void *p)
+{
+ struct fm_im_private *priv = netdev_priv(dev);
+ struct fsl_enet_mac *mac;
+
+ eth_mac_addr(dev, p);
+
+ mac = priv->mac;
+ mac->set_mac_addr(mac, p);
+
+ return 0;
+}
+
+static const struct net_device_ops fm_im_netdev_ops = {
+ .ndo_open = fm_im_enet_open,
+ .ndo_start_xmit = fm_im_start_xmit,
+ .ndo_stop = fm_im_close,
+ .ndo_change_mtu = fm_im_change_mtu,
+ .ndo_set_rx_mode = fm_im_set_rx_mode,
+ .ndo_tx_timeout = fm_im_timeout,
+ .ndo_do_ioctl = fm_im_ioctl,
+ .ndo_set_mac_address = fm_im_set_mac_addr,
+};
+
+static phys_addr_t sys_ccsrbar, sys_fm1_offset;
+static phys_addr_t sys_fm1_addr;
+
+static int fm_im_remove(struct platform_device *of_dev)
+{
+ struct fm_im_private *priv = dev_get_drvdata(&of_dev->dev);
+ int i;
+ struct fm_port_bd *txbd,*rxbd;
+ dma_addr_t buf;
+ struct sk_buff *skb;
+
+ if (priv->phy_node)
+ of_node_put(priv->phy_node);
+ if (priv->tbi_node)
+ of_node_put(priv->tbi_node);
+
+ dev_set_drvdata(&of_dev->dev, NULL);
+
+ /* free Rx resources */
+ for (i = 0, rxbd=priv->rx_bd_ring; i < RX_BD_RING_SIZE; i++) {
+ skb = priv->rx_skbuff[i];
+ buf = ((uint64_t)rxbd->buf_ptr_hi << 32) + rxbd->buf_ptr_lo;
+ dma_unmap_single(priv->dev, buf, skb->len, DMA_FROM_DEVICE);
+ dev_kfree_skb(skb);
+ rxbd++;
+ }
+ kfree( priv->rx_bd_ring);
+ kfree(priv->rx_skbuff);
+
+ /* free Tx resources */
+ for (i = 0, txbd=priv->tx_bd_ring; i < TX_BD_RING_SIZE; i++, txbd++) {
+ if (!priv->tx_skbuff[i])
+ continue;
+ skb = priv->tx_skbuff[i];
+ buf = ((uint64_t)txbd->buf_ptr_hi << 32) + txbd->buf_ptr_lo;
+ dma_unmap_single(priv->dev, buf, skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb(skb);
+ }
+ kfree( priv->tx_bd_ring);
+ kfree(priv->tx_skbuff);
+
+ unregister_netdev(priv->ndev);
+ free_netdev(priv->ndev);
+
+ return 0;
+}
+
+static int fm_im_probe(struct platform_device *of_dev)
+{
+ struct ccsr_fman __iomem *reg;
+ static struct ccsr_fman *fm1_reg = NULL;
+ static int fm1_flag = 0;
+ struct net_device *net_dev = NULL;
+ struct fm_im_private *priv = NULL;
+ struct device *dev = &of_dev->dev;
+ const char *dev_name, *ctype;
+ const int *fm_id, *mac_id, *fpm_event_id;
+ int fm_idx, mac_idx;
+ u16 rx_port_id, tx_port_id;
+ const struct of_device_id *match;
+ const void *mac_addr;
+ struct device_node *mac_node;
+ char *cp = NULL;
+ int i = 0, err = 0;
+
+ match = of_match_device(fman_match, dev);
+ if (!match) {
+ printk("%s(): No matching device found.\n", __func__);
+ return -EINVAL;
+ }
+
+ if (dev->init_name)
+ dev_name = dev->init_name;
+ else
+ dev_name = (&dev->kobj)->name;
+
+ TRACE("--------------------------------------------\n");
+ mac_node = of_parse_phandle(dev->of_node, "fsl,fman-mac", 0);
+ if (!mac_node) {
+ printk("%s(): of_parse_phandle get fsl,fman-mac failed!\n", __func__);
+ return -EINVAL;
+ }
+
+ fm_id = of_get_property(mac_node->parent, "cell-index", NULL);
+ if(!fm_id) {
+ printk("of_get_property get cell-index failed!\n");
+ return -EINVAL;
+ }
+ mac_id = of_get_property(mac_node, "cell-index", NULL);
+ if(!mac_id) {
+ printk("of_get_property get cell-index failed!\n");
+ return -EINVAL;
+ }
+
+ fm_idx = fm_im_read((unsigned __iomem *)fm_id);
+ mac_idx = fm_im_read((unsigned __iomem *)mac_id);
+ /* In driver, index starts from 0, while in reference manual,
+ * it starts from 1, align to RM.
+ */
+ printk("DEV: FM%d@DTSEC%d, DTS Node: %s\n", fm_idx+1, mac_idx+1, dev_name);
+
+ fpm_event_id = of_get_property(dev->of_node, "fpmevt-sel", NULL);
+ if(!fpm_event_id) {
+ printk("of_get_property get fpmevt-sel failed!\n");
+ return -EINVAL;
+ }
+
+ rx_port_id = RX_PORT_1G_BASE + mac_idx;
+ tx_port_id = TX_PORT_1G_BASE + mac_idx;
+
+ sys_ccsrbar = CONFIG_SYS_CCSRBAR_BASE;
+ sys_fm1_offset = CONFIG_SYS_FM1_OFFSET;
+ sys_fm1_addr = (sys_ccsrbar + sys_fm1_offset);
+
+ if(fm_idx == 0) {
+ if(!fm1_reg) {
+ TRACE("FM1 FIRST DETECTED! IOREMAP......\n");
+ reg = ioremap(sys_fm1_addr, sizeof(ccsr_fman_t));
+ TRACE("FM1 phy base: 0x%x, virt base: 0x%llx, size: 0x%x\n",
+ (u32)sys_fm1_addr, (u64)reg, sizeof(ccsr_fman_t));
+ fm_init_common(fm_idx, mac_idx, reg);
+ fm1_reg = reg;
+ } else {
+ reg = fm1_reg;
+ fm1_flag = 1;
+ }
+ } else {
+ printk("FM NUM ERROR!\n");
+ return -EINVAL;
+ }
+
+ net_dev = alloc_etherdev(sizeof(*priv));
+ if(!net_dev) {
+ dev_err(dev, "alloc_etherdev() failed\n");
+ err = -ENOMEM;
+ goto alloc_etherdev_fail;
+ }
+
+ priv = netdev_priv(net_dev);
+ priv->ndev = net_dev;
+ priv->ofdev = of_dev;
+ priv->dev = dev;
+ SET_NETDEV_DEV(net_dev, dev);
+ dev_set_drvdata(dev, priv);
+
+ priv->reg = reg;
+ priv->fm_index = fm_idx;
+ priv->num = mac_idx;
+ priv->type = FM_ETH_1G_E;
+ priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
+
+ /* Enable most messages by default */
+ priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
+
+ priv->rx_port = (void *)&reg->port[rx_port_id - 1].fm_bmi;
+ priv->tx_port = (void *)&reg->port[tx_port_id - 1].fm_bmi;
+
+ ctype = of_get_property(mac_node, "phy-connection-type", NULL);
+ if(ctype && !strcmp(ctype, "rgmii-id"))
+ priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
+ else if(ctype && !strcmp(ctype, "rgmii"))
+ priv->interface = PHY_INTERFACE_MODE_RGMII;
+ else if(ctype && !strcmp(ctype, "rgmii-txid"))
+ priv->interface = PHY_INTERFACE_MODE_RGMII_TXID;
+ else if(ctype && !strcmp(ctype, "sgmii"))
+ priv->interface = PHY_INTERFACE_MODE_SGMII;
+ else
+ priv->interface = PHY_INTERFACE_MODE_MII;
+
+ priv->phy_node = of_parse_phandle(mac_node, "phy-handle", 0);
+
+ /* Find the TBI PHY. If it's not there, we don't support SGMII */
+ priv->tbi_node = of_parse_phandle(mac_node, "tbi-handle", 0);
+ TRACE("ctype:%s, phy_node:%p, tbi_node:%p\n", ctype, priv->phy_node, priv->tbi_node);
+
+ priv->irq = irq_of_parse_and_map(mac_node->parent, 0);
+ TRACE("IRQ:%d\n", priv->irq);
+
+ priv->fpm_event_num = fm_im_read((unsigned __iomem *)fpm_event_id);
+
+ if(priv->fpm_event_num < 0 || priv->fpm_event_num > 3) {
+ printk("of_get_property get wrong fpm event register num!\n");
+ err = -EINVAL;
+ goto ioremap_fail;
+ }
+
+ mac_addr = of_get_mac_address(mac_node);
+ if (mac_addr) {
+ memcpy(net_dev->dev_addr, mac_addr, ETH_ALEN);
+ TRACE("MAC address: ");
+ cp = (char *)mac_addr;
+ for(i = 0; i < ETH_ALEN; i++) {
+ TRACE("%02X", cp[i]);
+ if(i != ETH_ALEN - 1)
+ TRACE(":");
+ }
+ TRACE("\n");
+ }
+
+ /* Set the ethernet max receive length */
+ priv->max_rx_len = MAX_RXBUF_LEN;
+
+ /* Init global mac structure */
+ if (!fm_eth_init_mac(priv, reg)) {
+ err = -EINVAL;
+ goto ioremap_fail;
+ }
+
+ /* To align the same name in U-Boot */
+ sprintf(net_dev->name, "FM%d@DTSEC%d", fm_idx+1, mac_idx+1);
+
+ if(!fm_im_startup(net_dev)) {
+ err = -EINVAL;
+ goto ioremap_fail;
+ }
+ net_dev->base_addr = (unsigned long)reg;
+ net_dev->watchdog_timeo = HZ;
+ net_dev->mtu = 1500;
+
+ net_dev->netdev_ops = &fm_im_netdev_ops;
+
+ spin_lock_init(&priv->lock);
+
+ err = register_netdev(net_dev);
+ if(err) {
+ printk("%s: register net device failed.\n", net_dev->name);
+ goto ioremap_fail;
+ }
+
+ return 0;
+
+ioremap_fail:
+ iounmap(priv->reg);
+alloc_etherdev_fail:
+ free_netdev(priv->ndev);
+ return err;
+}
+
+static struct platform_driver fm_im_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = fman_match,
+ .owner = THIS_MODULE,
+ },
+ .probe = fm_im_probe,
+ .remove = fm_im_remove,
+};
+
+static int __init __cold fm_im_load(void)
+{
+ int _errno;
+
+ printk(KBUILD_MODNAME ": " "QorIQ FMAN Independent Mode Ethernet Driver\n");
+ _errno = platform_driver_register(&fm_im_driver);
+ if (unlikely(_errno < 0)) {
+ pr_err(KBUILD_MODNAME
+ ": %s:%hu:%s(): platform_driver_register() = %d\n",
+ KBUILD_BASENAME".c", __LINE__, __func__, _errno);
+ }
+
+ return _errno;
+}
+
+static void __exit __cold fm_im_unload(void)
+{
+ printk(KBUILD_MODNAME ": -> %s:%s()\n", KBUILD_BASENAME".c", __func__);
+ platform_driver_unregister(&fm_im_driver);
+}
+
+module_init(fm_im_load);
+module_exit(fm_im_unload);
+MODULE_DESCRIPTION("QorIQ FMAN Independent Mode Ethernet driver for NXP DPAA1.");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("NXP Corporation");
diff --git a/dpaa_eth_im.h b/dpaa_eth_im.h
new file mode 100644
index 0000000..e055cef
--- /dev/null
+++ b/dpaa_eth_im.h
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright 2017-2018 NXP */
+/* Author: Alan Wang <alan.wang@nxp.com> */
+
+#ifndef __FMAN_IM_H__
+#define __FMAN_IM_H__
+
+#define CONFIG_LS1043A 1
+
+#include <asm/io.h>
+#include "fsl_fman.h"
+#include <linux/etherdevice.h>
+
+#define CONFIG_SYS_NUM_FMAN 1
+
+#if defined(CONFIG_LS1043A)
+#define CONFIG_SYS_CCSRBAR_BASE 0x01000000;
+#define CONFIG_SYS_FM1_OFFSET 0xA00000;
+#define CONFIG_SYS_NUM_FM1_MEMAC 6
+#define CONFIG_SYS_NUM_FM1_10GEC 1
+#define CONFIG_SYS_FM_MURAM_SIZE 0x60000
+#else
+#error SoC not defined
+#endif
+
+/* Port ID */
+#if defined(CONFIG_LS1043A)
+#define OH_PORT_ID_BASE 0x02
+#define MAX_NUM_OH_PORT 4
+#define RX_PORT_1G_BASE 0x08
+#define MAX_NUM_RX_PORT_1G CONFIG_SYS_NUM_FM1_MEMAC
+#define MAX_NUM_RX_PORT_10G CONFIG_SYS_NUM_FM1_10GEC
+#define RX_PORT_10G_BASE 0x10
+#define TX_PORT_1G_BASE 0x28
+#define MAX_NUM_TX_PORT_1G CONFIG_SYS_NUM_FM1_MEMAC
+#define MAX_NUM_TX_PORT_10G CONFIG_SYS_NUM_FM1_10GEC
+#define TX_PORT_10G_BASE 0x30
+#else
+#error SoC not defined
+#endif
+
+enum fm_eth_type {
+ FM_ETH_1G_E,
+ FM_ETH_10G_E,
+};
+
+struct fm_muram {
+ void *base;
+ void *top;
+ size_t size;
+ void *alloc;
+};
+
+#define FM_MURAM_RES_SIZE 0x01000
+
+/* Rx/Tx buffer descriptor */
+struct fm_port_bd {
+ u16 status;
+ u16 len;
+ u32 res0;
+ u16 res1;
+ u16 buf_ptr_hi;
+ u32 buf_ptr_lo;
+};
+
+/* Common BD flags */
+#define BD_LAST 0x0800
+
+/* Rx BD status flags */
+#define RxBD_EMPTY 0x8000
+#define RxBD_LAST BD_LAST
+#define RxBD_FIRST 0x0400
+#define RxBD_PHYS_ERR 0x0008
+#define RxBD_SIZE_ERR 0x0004
+#define RxBD_ERROR (RxBD_PHYS_ERR | RxBD_SIZE_ERR)
+
+/* Tx BD status flags */
+#define TxBD_READY 0x8000
+#define TxBD_LAST BD_LAST
+
+/* Rx/Tx queue descriptor */
+struct fm_port_qd {
+ u16 gen;
+ u16 bd_ring_base_hi;
+ u32 bd_ring_base_lo;
+ u16 bd_ring_size;
+ u16 offset_in;
+ u16 offset_out;
+ u16 res0;
+ u32 res1[0x4];
+};
+
+/* IM global parameter RAM */
+struct fm_port_global_pram {
+ u32 mode; /* independent mode register */
+ u32 rxqd_ptr; /* Rx queue descriptor pointer */
+ u32 txqd_ptr; /* Tx queue descriptor pointer */
+ u16 mrblr; /* max Rx buffer length */
+ u16 rxqd_bsy_cnt; /* RxQD busy counter, should be cleared */
+ u32 res0[0x4];
+ struct fm_port_qd rxqd; /* Rx queue descriptor */
+ struct fm_port_qd txqd; /* Tx queue descriptor */
+ u32 res1[0x28];
+};
+
+#define FM_PRAM_SIZE sizeof(struct fm_port_global_pram)
+#define FM_PRAM_ALIGN 256
+#define PRAM_MODE_GLOBAL 0x20000000
+#define PRAM_MODE_GRACEFUL_STOP 0x00800000
+
+#define FM_FREE_POOL_SIZE 0x20000 /* 128K bytes */
+#define FM_FREE_POOL_ALIGN 256
+
+struct fsl_enet_mac {
+ void *base; /* MAC controller registers base address */
+ void *phyregs;
+ int max_rx_len;
+ void (*init_mac)(struct fsl_enet_mac *mac);
+ void (*enable_mac)(struct fsl_enet_mac *mac);
+ void (*disable_mac)(struct fsl_enet_mac *mac);
+ void (*set_mac_addr)(struct fsl_enet_mac *mac, u8 *mac_addr);
+ void (*set_if_mode)(struct fsl_enet_mac *mac, phy_interface_t type,
+ int speed);
+};
+
+/* Fman ethernet private struct */
+struct fm_im_private {
+ struct device *dev;
+ struct net_device *ndev;
+ struct platform_device *ofdev;
+ int fm_index; /* Fman index */
+ int irq;
+ u32 num; /* 0..n-1 for give type */
+ int fpm_event_num; /* FPM Fman Controller Event Register 0..3 for im device */
+ struct fm_bmi_tx_port *tx_port;
+ struct fm_bmi_rx_port *rx_port;
+ enum fm_eth_type type; /* 1G or 10G ethernet */
+ struct ccsr_fman __iomem *reg;
+ struct sk_buff **rx_skbuff;
+ struct sk_buff **tx_skbuff;
+ struct napi_struct napi;
+ int rx_buffer_size;
+ u16 skb_currx;
+ u16 skb_curtx;
+
+ /* PHY stuff */
+ phy_interface_t interface;
+ struct device_node *phy_node;
+ struct device_node *tbi_node;
+ struct phy_device *phydev;
+ struct mii_dev *bus;
+ int oldspeed;
+ int oldduplex;
+ int oldlink;
+
+ struct fsl_enet_mac *mac; /* MAC controller */
+ int phyaddr;
+ int max_rx_len;
+ struct fm_port_global_pram *rx_pram; /* Rx parameter table */
+ struct fm_port_global_pram *tx_pram; /* Tx parameter table */
+ void *rx_bd_ring; /* Rx BD ring base */
+ void *cur_rxbd; /* current Rx BD */
+ void *rx_buf; /* Rx buffer base */
+ void *tx_bd_ring; /* Tx BD ring base */
+ void *cur_txbd; /* current Tx BD */
+
+ uint32_t msg_enable;
+ spinlock_t lock;
+
+};
+
+#define RX_QD_RXF_INTMASK 0x0010
+#define RX_QD_BSY_INTMASK 0x0008
+#define RX_BD_RING_SIZE 8
+#define TX_BD_RING_SIZE 8
+#define MAX_RXBUF_LOG2 11
+#define MAX_RXBUF_LEN (1 << MAX_RXBUF_LOG2)
+#define RXBUF_ALIGNMENT 64
+#define DEFAULT_RX_BUFFER_SIZE 1536
+
+
+#define PORT_IS_ENABLED(port) fm_info[fm_port_to_index(port)].enabled
+
+#define FMAN_IM_TX_QUEUES 1
+#define FMAN_IM_RX_QUEUES 128
+
+#define IMASK_TXCEN 0x00800000
+#define IMASK_TXEEN 0x00400000
+#define IMASK_RXCEN 0x40000000
+#define IMASK_TX_DEFAULT IMASK_TXCEN | IMASK_TXEEN
+#define IMASK_RX_DEFAULT IMASK_RXCEN
+
+static inline u32 fm_im_read(unsigned __iomem *addr)
+{
+ return ioread32be(addr);
+}
+
+static inline void fm_im_write(unsigned __iomem *addr, u32 val)
+{
+ iowrite32be(val, addr);
+}
+
+static inline void fm_im_clrbits(unsigned __iomem *addr, u32 clear)
+{
+ u32 val;
+ val = ioread32be(addr);
+ val = val & (~clear);
+ iowrite32be(val, addr);
+}
+
+static inline void fm_im_setbits(unsigned __iomem *addr, u32 set)
+{
+ u32 val;
+ val = ioread32be(addr);
+ val = val | set;
+ iowrite32be(val, addr);
+}
+
+#endif /* __FM_H__ */
diff --git a/fsl_fman.h b/fsl_fman.h
new file mode 100644
index 0000000..c48651c
--- /dev/null
+++ b/fsl_fman.h
@@ -0,0 +1,465 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright 2017-2018 NXP */
+/* Author: Alan Wang <alan.wang@nxp.com> */
+
+/*
+ * FMAN Internal Memory Map
+ *
+ */
+
+#ifndef __FSL_FMAN_H__
+#define __FSL_FMAN_H__
+
+#include <linux/types.h>
+
+typedef struct fm_bmi_common {
+ u32 fmbm_init; /* BMI initialization */
+ u32 fmbm_cfg1; /* BMI configuration1 */
+ u32 fmbm_cfg2; /* BMI configuration2 */
+ u32 res0[0x5];
+ u32 fmbm_ievr; /* interrupt event register */
+ u32 fmbm_ier; /* interrupt enable register */
+ u32 fmbm_ifr; /* interrupt force register */
+ u32 res1[0x5];
+ u32 fmbm_arb[0x8]; /* BMI arbitration */
+ u32 res2[0x28];
+ u32 fmbm_gde; /* global debug enable */
+ u32 fmbm_pp[0x3f]; /* BMI port parameters */
+ u32 res3;
+ u32 fmbm_pfs[0x3f]; /* BMI port FIFO size */
+ u32 res4;
+ u32 fmbm_ppid[0x3f];/* port partition ID */
+} fm_bmi_common_t;
+
+typedef struct fm_qmi_common {
+ u32 fmqm_gc; /* general configuration register */
+ u32 res0;
+ u32 fmqm_eie; /* error interrupt event register */
+ u32 fmqm_eien; /* error interrupt enable register */
+ u32 fmqm_eif; /* error interrupt force register */
+ u32 fmqm_ie; /* interrupt event register */
+ u32 fmqm_ien; /* interrupt enable register */
+ u32 fmqm_if; /* interrupt force register */
+ u32 fmqm_gs; /* global status register */
+ u32 fmqm_ts; /* task status register */
+ u32 fmqm_etfc; /* enqueue total frame counter */
+ u32 fmqm_dtfc; /* dequeue total frame counter */
+ u32 fmqm_dc0; /* dequeue counter 0 */
+ u32 fmqm_dc1; /* dequeue counter 1 */
+ u32 fmqm_dc2; /* dequeue counter 2 */
+ u32 fmqm_dc3; /* dequeue counter 3 */
+ u32 fmqm_dfnoc; /* dequeue FQID not override counter */
+ u32 fmqm_dfcc; /* dequeue FQID from context counter */
+ u32 fmqm_dffc; /* dequeue FQID from FD counter */
+ u32 fmqm_dcc; /* dequeue confirm counter */
+ u32 res1[0xc];
+ u32 fmqm_dtrc; /* debug trap configuration register */
+ u32 fmqm_efddd; /* enqueue frame descriptor dynamic debug */
+ u32 res3[0x2];
+ u32 res4[0xdc]; /* missing debug regs */
+} fm_qmi_common_t;
+
+typedef struct fm_bmi {
+ u8 res[1024];
+} fm_bmi_t;
+
+typedef struct fm_qmi {
+ u8 res[1024];
+} fm_qmi_t;
+
+struct fm_bmi_rx_port {
+ u32 fmbm_rcfg; /* Rx configuration */
+ u32 fmbm_rst; /* Rx status */
+ u32 fmbm_rda; /* Rx DMA attributes */
+ u32 fmbm_rfp; /* Rx FIFO parameters */
+ u32 fmbm_rfed; /* Rx frame end data */
+ u32 fmbm_ricp; /* Rx internal context parameters */
+ u32 fmbm_rim; /* Rx internal margins */
+ u32 fmbm_rebm; /* Rx external buffer margins */
+ u32 fmbm_rfne; /* Rx frame next engine */
+ u32 fmbm_rfca; /* Rx frame command attributes */
+ u32 fmbm_rfpne; /* Rx frame parser next engine */
+ u32 fmbm_rpso; /* Rx parse start offset */
+ u32 fmbm_rpp; /* Rx policer profile */
+ u32 fmbm_rccb; /* Rx coarse classification base */
+ u32 res1[0x2];
+ u32 fmbm_rprai[0x8]; /* Rx parse results array Initialization */
+ u32 fmbm_rfqid; /* Rx frame queue ID */
+ u32 fmbm_refqid; /* Rx error frame queue ID */
+ u32 fmbm_rfsdm; /* Rx frame status discard mask */
+ u32 fmbm_rfsem; /* Rx frame status error mask */
+ u32 fmbm_rfene; /* Rx frame enqueue next engine */
+ u32 res2[0x23];
+ u32 fmbm_ebmpi[0x8]; /* buffer manager pool information */
+ u32 fmbm_acnt[0x8]; /* allocate counter */
+ u32 res3[0x8];
+ u32 fmbm_cgm[0x8]; /* congestion group map */
+ u32 fmbm_mpd; /* BMan pool depletion */
+ u32 res4[0x1F];
+ u32 fmbm_rstc; /* Rx statistics counters */
+ u32 fmbm_rfrc; /* Rx frame counters */
+ u32 fmbm_rfbc; /* Rx bad frames counter */
+ u32 fmbm_rlfc; /* Rx large frames counter */
+ u32 fmbm_rffc; /* Rx filter frames counter */
+ u32 fmbm_rfdc; /* Rx frame discard counter */
+ u32 fmbm_rfldec; /* Rx frames list DMA error counter */
+ u32 fmbm_rodc; /* Rx out of buffers discard counter */
+ u32 fmbm_rbdc; /* Rx buffers deallocate counter */
+ u32 res5[0x17];
+ u32 fmbm_rpc; /* Rx performance counters */
+ u32 fmbm_rpcp; /* Rx performance count parameters */
+ u32 fmbm_rccn; /* Rx cycle counter */
+ u32 fmbm_rtuc; /* Rx tasks utilization counter */
+ u32 fmbm_rrquc; /* Rx receive queue utilization counter */
+ u32 fmbm_rduc; /* Rx DMA utilization counter */
+ u32 fmbm_rfuc; /* Rx FIFO utilization counter */
+ u32 fmbm_rpac; /* Rx pause activation counter */
+ u32 res6[0x18];
+ u32 fmbm_rdbg; /* Rx debug configuration */
+};
+
+/* FMBM_RCFG - Rx configuration */
+#define FMBM_RCFG_EN 0x80000000 /* port is enabled to receive data */
+#define FMBM_RCFG_FDOVR 0x02000000 /* frame discard override */
+#define FMBM_RCFG_IM 0x01000000 /* independent mode */
+
+/* FMBM_RST - Rx status */
+#define FMBM_RST_BSY 0x80000000 /* Rx port is busy */
+
+/* FMBM_RFCA - Rx frame command attributes */
+#define FMBM_RFCA_ORDER 0x80000000
+#define FMBM_RFCA_MR_MASK 0x003f0000
+#define FMBM_RFCA_MR(x) ((x << 16) & FMBM_RFCA_MR_MASK)
+
+/* FMBM_RSTC - Rx statistics */
+#define FMBM_RSTC_EN 0x80000000 /* statistics counters enable */
+
+struct fm_bmi_tx_port {
+ u32 fmbm_tcfg; /* Tx configuration */
+ u32 fmbm_tst; /* Tx status */
+ u32 fmbm_tda; /* Tx DMA attributes */
+ u32 fmbm_tfp; /* Tx FIFO parameters */
+ u32 fmbm_tfed; /* Tx frame end data */
+ u32 fmbm_ticp; /* Tx internal context parameters */
+ u32 fmbm_tfne; /* Tx frame next engine */
+ u32 fmbm_tfca; /* Tx frame command attributes */
+ u32 fmbm_tcfqid;/* Tx confirmation frame queue ID */
+ u32 fmbm_tfeqid;/* Tx error frame queue ID */
+ u32 fmbm_tfene; /* Tx frame enqueue next engine */
+ u32 fmbm_trlmts;/* Tx rate limiter scale */
+ u32 fmbm_trlmt; /* Tx rate limiter */
+ u32 res0[0x73];
+ u32 fmbm_tstc; /* Tx statistics counters */
+ u32 fmbm_tfrc; /* Tx frame counter */
+ u32 fmbm_tfdc; /* Tx frames discard counter */
+ u32 fmbm_tfledc;/* Tx frame length error discard counter */
+ u32 fmbm_tfufdc;/* Tx frame unsupported format discard counter */
+ u32 fmbm_tbdc; /* Tx buffers deallocate counter */
+ u32 res1[0x1a];
+ u32 fmbm_tpc; /* Tx performance counters */
+ u32 fmbm_tpcp; /* Tx performance count parameters */
+ u32 fmbm_tccn; /* Tx cycle counter */
+ u32 fmbm_ttuc; /* Tx tasks utilization counter */
+ u32 fmbm_ttcquc;/* Tx transmit confirm queue utilization counter */
+ u32 fmbm_tduc; /* Tx DMA utilization counter */
+ u32 fmbm_tfuc; /* Tx FIFO utilization counter */
+ u32 res2[0x19];
+ u32 fmbm_tdcfg; /* Tx debug configuration */
+};
+
+/* FMBM_TCFG - Tx configuration */
+#define FMBM_TCFG_EN 0x80000000 /* port is enabled to transmit data */
+#define FMBM_TCFG_IM 0x01000000 /* independent mode enable */
+
+/* FMBM_TST - Tx status */
+#define FMBM_TST_BSY 0x80000000 /* Tx port is busy */
+
+/* FMBM_TFCA - Tx frame command attributes */
+#define FMBM_TFCA_ORDER 0x80000000
+#define FMBM_TFCA_MR_MASK 0x003f0000
+#define FMBM_TFCA_MR(x) ((x << 16) & FMBM_TFCA_MR_MASK)
+
+/* FMBM_TSTC - Tx statistics counters */
+#define FMBM_TSTC_EN 0x80000000
+
+/* FMBM_INIT - BMI initialization register */
+#define FMBM_INIT_START 0x80000000 /* init internal buffers */
+
+/* FMBM_CFG1 - BMI configuration 1 */
+#define FMBM_CFG1_FBPS_MASK 0x03ff0000 /* Free buffer pool size */
+#define FMBM_CFG1_FBPS_SHIFT 16
+#define FMBM_CFG1_FBPO_MASK 0x000003ff /* Free buffer pool offset */
+
+/* FMBM_CFG2 - BMI configuration 2 */
+#define FMBM_CFG2_TNTSKS_MASK 0x7b0000 /* Total number of tasks */
+
+/* FMBM_IEVR - interrupt event */
+#define FMBM_IEVR_PEC 0x80000000 /* pipeline table ECC err detected */
+#define FMBM_IEVR_LEC 0x40000000 /* linked list RAM ECC error */
+#define FMBM_IEVR_SEC 0x20000000 /* statistics count RAM ECC error */
+#define FMBM_IEVR_CLEAR_ALL (FMBM_IEVR_PEC | FMBM_IEVR_LEC | FMBM_IEVR_SEC)
+
+/* FMBM_IER - interrupt enable */
+#define FMBM_IER_PECE 0x80000000 /* PEC interrupt enable */
+#define FMBM_IER_LECE 0x40000000 /* LEC interrupt enable */
+#define FMBM_IER_SECE 0x20000000 /* SEC interrupt enable */
+
+#define FMBM_IER_DISABLE_ALL 0x00000000
+
+/* FMBM_PP - BMI Port Parameters */
+#define FMBM_PP_MXT_MASK 0x3f000000 /* Max # tasks */
+#define FMBM_PP_MXT(x) (((x-1) << 24) & FMBM_PP_MXT_MASK)
+#define FMBM_PP_MXD_MASK 0x00000f00 /* Max DMA */
+#define FMBM_PP_MXD(x) (((x-1) << 8) & FMBM_PP_MXD_MASK)
+
+/* FMBM_PFS - BMI Port FIFO Size */
+#define FMBM_PFS_IFSZ_MASK 0x000003ff /* Internal Fifo Size */
+#define FMBM_PFS_IFSZ(x) (x & FMBM_PFS_IFSZ_MASK)
+
+/* FMQM_GC - global configuration */
+#define FMQM_GC_ENQ_EN 0x80000000 /* enqueue enable */
+#define FMQM_GC_DEQ_EN 0x40000000 /* dequeue enable */
+#define FMQM_GC_STEN 0x10000000 /* enable global stat counters */
+#define FMQM_GC_ENQ_THR_MASK 0x00003f00 /* max number of enqueue Tnum */
+#define FMQM_GC_ENQ(x) ((x << 8) & FMQM_GC_ENQ_THR_MAS)
+#define FMQM_GC_DEQ_THR_MASK 0x0000003f /* max number of dequeue Tnum */
+#define FMQM_GC_DEQ(x) (x & FMQM_GC_DEQ_THR_MASK)
+
+/* FMQM_EIE - error interrupt event register */
+#define FMQM_EIE_DEE 0x80000000 /* double-bit ECC error */
+#define FMQM_EIE_DFUPE 0x40000000 /* dequeue from unknown PortID */
+#define FMQM_EIE_CLEAR_ALL (FMQM_EIE_DEE | FMQM_EIE_DFUPE)
+
+/* FMQM_EIEN - error interrupt enable register */
+#define FMQM_EIEN_DEEN 0x80000000 /* double-bit ECC error */
+#define FMQM_EIEN_DFUPEN 0x40000000 /* dequeue from unknown PortID */
+#define FMQM_EIEN_DISABLE_ALL 0x00000000
+
+/* FMQM_IE - interrupt event register */
+#define FMQM_IE_SEE 0x80000000 /* single-bit ECC error detected */
+#define FMQM_IE_CLEAR_ALL FMQM_IE_SEE
+
+/* FMQM_IEN - interrupt enable register */
+#define FMQM_IEN_SEE 0x80000000 /* single-bit ECC err IRQ enable */
+#define FMQM_IEN_DISABLE_ALL 0x00000000
+
+/* NIA - next invoked action */
+#define NIA_ENG_RISC 0x00000000
+#define NIA_ENG_MASK 0x007c0000
+
+/* action code */
+#define NIA_RISC_AC_CC 0x00000006
+#define NIA_RISC_AC_IM_TX 0x00000008 /* independent mode Tx */
+#define NIA_RISC_AC_IM_RX 0x0000000a /* independent mode Rx */
+#define NIA_RISC_AC_HC 0x0000000c
+
+typedef struct fm_parser {
+ u8 res[1024];
+} fm_parser_t;
+
+typedef struct fm_policer {
+ u8 res[4*1024];
+} fm_policer_t;
+
+typedef struct fm_keygen {
+ u8 res[4*1024];
+} fm_keygen_t;
+
+typedef struct fm_dma {
+ u32 fmdmsr; /* status register */
+ u32 fmdmmr; /* mode register */
+ u32 fmdmtr; /* bus threshold register */
+ u32 fmdmhy; /* bus hysteresis register */
+ u32 fmdmsetr; /* SOS emergency threshold register */
+ u32 fmdmtah; /* transfer bus address high register */
+ u32 fmdmtal; /* transfer bus address low register */
+ u32 fmdmtcid; /* transfer bus communication ID register */
+ u32 fmdmra; /* DMA bus internal ram address register */
+ u32 fmdmrd; /* DMA bus internal ram data register */
+ u32 res0[0xb];
+ u32 fmdmdcr; /* debug counter */
+ u32 fmdmemsr; /* emrgency smoother register */
+ u32 res1;
+ u32 fmdmplr[32]; /* FM DMA PID-LIODN # register */
+ u32 res[0x3c8];
+} fm_dma_t;
+
+/* FMDMSR - Fman DMA status register */
+#define FMDMSR_CMDQNE 0x10000000 /* command queue not empty */
+#define FMDMSR_BER 0x08000000 /* bus err event occurred on bus */
+#define FMDMSR_RDB_ECC 0x04000000 /* read buffer ECC error */
+#define FMDMSR_WRB_SECC 0x02000000 /* write buf ECC err sys side */
+#define FMDMSR_WRB_FECC 0x01000000 /* write buf ECC err Fman side */
+#define FMDMSR_DPEXT_SECC 0x00800000 /* DP external ECC err sys side */
+#define FMDMSR_DPEXT_FECC 0x00400000 /* DP external ECC err Fman side */
+#define FMDMSR_DPDAT_SECC 0x00200000 /* DP data ECC err on sys side */
+#define FMDMSR_DPDAT_FECC 0x00100000 /* DP data ECC err on Fman side */
+#define FMDMSR_SPDAT_FECC 0x00080000 /* SP data ECC error Fman side */
+
+#define FMDMSR_CLEAR_ALL (FMDMSR_BER | FMDMSR_RDB_ECC \
+ | FMDMSR_WRB_SECC | FMDMSR_WRB_FECC \
+ | FMDMSR_DPEXT_SECC | FMDMSR_DPEXT_FECC \
+ | FMDMSR_DPDAT_SECC | FMDMSR_DPDAT_FECC \
+ | FMDMSR_SPDAT_FECC)
+
+/* FMDMMR - FMan DMA mode register */
+#define FMDMMR_SBER 0x10000000 /* stop the DMA if a bus error */
+
+typedef struct fm_fpm {
+ u32 fpmtnc; /* TNUM control */
+ u32 fpmprc; /* Port_ID control */
+ u32 res0;
+ u32 fpmflc; /* flush control */
+ u32 fpmdis1; /* dispatch thresholds1 */
+ u32 fpmdis2; /* dispatch thresholds2 */
+ u32 fmepi; /* error pending interrupts */
+ u32 fmrie; /* rams interrupt enable */
+ u32 fpmfcevent[0x4];/* FMan controller event 0-3 */
+ u32 res1[0x4];
+ u32 fpmfcmask[0x4]; /* FMan controller mask 0-3 */
+ u32 res2[0x4];
+ u32 fpmtsc1; /* timestamp control1 */
+ u32 fpmtsc2; /* timestamp control2 */
+ u32 fpmtsp; /* time stamp */
+ u32 fpmtsf; /* time stamp fraction */
+ u32 fpmrcr; /* rams control and event */
+ u32 res3[0x3];
+ u32 fpmdrd[0x4]; /* data_ram data 0-3 */
+ u32 res4[0xc];
+ u32 fpmdra; /* data ram access */
+ u32 fm_ip_rev_1; /* IP block revision 1 */
+ u32 fm_ip_rev_2; /* IP block revision 2 */
+ u32 fmrstc; /* reset command */
+ u32 fmcld; /* classifier debug control */
+ u32 fmnpi; /* normal pending interrupts */
+ u32 res5;
+ u32 fmfpee; /* event and enable */
+ u32 fpmcev[0x4]; /* CPU event 0-3 */
+ u32 res6[0x4];
+ u32 fmfp_ps[0x40]; /* port status */
+ u32 res7[0x260];
+ u32 fpmts[0x80]; /* task status */
+ u32 res8[0xa0];
+} fm_fpm_t;
+
+/* FMFP_PRC - FPM Port_ID Control Register */
+#define FMFPPRC_PORTID_MASK 0x3f000000
+#define FMFPPRC_PORTID_SHIFT 24
+#define FMFPPRC_ORA_SHIFT 16
+#define FMFPPRC_RISC1 0x00000001
+#define FMFPPRC_RISC2 0x00000002
+#define FMFPPRC_RISC_ALL (FMFPPRC_RISC1 | FMFPPRC_RSIC2)
+
+/* FPM Flush Control Register */
+#define FMFP_FLC_DISP_LIM_NONE 0x00000000 /* no dispatch limitation */
+
+/* FMFP_CEE - Interrupt Enable */
+#define FMFPCEE_IM_MASK_RXF 0x80000000 /* Receive frame interrupt event Mask*/
+#define FMFPCEE_IM_MASK_BSY 0x40000000 /* BD ring busy interrupt event Mask*/
+#define FMNPI_EN_REV0 0x00008000
+#define FMNPI_EN_REV1 0x00004000
+#define FMNPI_EN_REV2 0x00002000
+#define FMNPI_EN_REV3 0x00001000
+
+/* FMFP_EE - FPM event and enable register */
+#define FMFPEE_DECC 0x80000000 /* double ECC err on FPM ram */
+#define FMFPEE_STL 0x40000000 /* stall of task ... */
+#define FMFPEE_SECC 0x20000000 /* single ECC error */
+#define FMFPEE_RFM 0x00010000 /* release FMan */
+#define FMFPEE_DECC_EN 0x00008000 /* double ECC interrupt enable */
+#define FMFPEE_STL_EN 0x00004000 /* stall of task interrupt enable */
+#define FMFPEE_SECC_EN 0x00002000 /* single ECC err interrupt enable */
+#define FMFPEE_EHM 0x00000008 /* external halt enable */
+#define FMFPEE_UEC 0x00000004 /* FMan is not halted */
+#define FMFPEE_CER 0x00000002 /* only errornous task stalled */
+#define FMFPEE_DER 0x00000001 /* DMA error is just reported */
+
+#define FMFPEE_CLEAR_EVENT (FMFPEE_DECC | FMFPEE_STL | FMFPEE_SECC | \
+ FMFPEE_EHM | FMFPEE_UEC | FMFPEE_CER | \
+ FMFPEE_DER | FMFPEE_RFM)
+
+/* FMFP_RCR - FMan Rams Control and Event */
+#define FMFP_RCR_MDEC 0x00008000 /* double ECC error in muram */
+#define FMFP_RCR_IDEC 0x00004000 /* double ECC error in iram */
+
+typedef struct fm_imem {
+ u32 iadd; /* instruction address register */
+ u32 idata; /* instruction data register */
+ u32 itcfg; /* timing config register */
+ u32 iready; /* ready register */
+ u8 res[0xff0];
+} fm_imem_t;
+#define IRAM_IADD_AIE 0x80000000 /* address auto increase enable */
+#define IRAM_READY 0x80000000 /* ready to use */
+
+typedef struct fm_soft_parser {
+ u8 res[4*1024];
+} fm_soft_parser_t;
+
+typedef struct fm_dtesc {
+ u8 res[4*1024];
+} fm_dtsec_t;
+
+typedef struct fm_mdio {
+ u8 res0[0x120];
+ u32 miimcfg; /* MII management configuration reg */
+ u32 miimcom; /* MII management command reg */
+ u32 miimadd; /* MII management address reg */
+ u32 miimcon; /* MII management control reg */
+ u32 miimstat; /* MII management status reg */
+ u32 miimind; /* MII management indication reg */
+ u8 res1[0x1000 - 0x138];
+} fm_mdio_t;
+
+typedef struct fm_10gec {
+ u8 res[4*1024];
+} fm_10gec_t;
+
+typedef struct fm_10gec_mdio {
+ u8 res[4*1024];
+} fm_10gec_mdio_t;
+
+typedef struct fm_memac {
+ u8 res[4*1024];
+} fm_memac_t;
+
+typedef struct fm_memac_mdio {
+ u8 res[4*1024];
+} fm_memac_mdio_t;
+
+typedef struct fm_1588 {
+ u8 res[4*1024];
+} fm_1588_t;
+
+typedef struct ccsr_fman {
+ u8 muram[0x80000];
+ fm_bmi_common_t fm_bmi_common;
+ fm_qmi_common_t fm_qmi_common;
+ u8 res0[2048];
+ struct {
+ fm_bmi_t fm_bmi;
+ fm_qmi_t fm_qmi;
+ fm_parser_t fm_parser;
+ u8 res[1024];
+ } port[63];
+ fm_policer_t fm_policer;
+ fm_keygen_t fm_keygen;
+ fm_dma_t fm_dma;
+ fm_fpm_t fm_fpm;
+ fm_imem_t fm_imem;
+ u8 res1[8*1024];
+ fm_soft_parser_t fm_soft_parser;
+ u8 res2[96*1024];
+
+ struct {
+ fm_memac_t fm_memac;
+ fm_memac_mdio_t fm_memac_mdio;
+ } memac[10];
+ u8 res4[32*1024];
+ fm_memac_mdio_t fm_dedicated_mdio[2];
+
+ fm_1588_t fm_1588;
+ u8 res5[4*1024];
+} ccsr_fman_t;
+
+#endif /*__FSL_FMAN_H__*/
diff --git a/fsl_memac.h b/fsl_memac.h
new file mode 100644
index 0000000..c68e2bb
--- /dev/null
+++ b/fsl_memac.h
@@ -0,0 +1,270 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright 2017-2018 NXP */
+/* Author: Alan Wang <alan.wang@nxp.com> */
+
+/*
+ * MEMAC internal mapping.
+ *
+ */
+
+#ifndef __MEMAC_H__
+#define __MEMAC_H__
+
+struct memac {
+ /* memac general control and status registers */
+ u32 res_0[2];
+ u32 command_config; /* Control and configuration register */
+ u32 mac_addr_0; /* Lower 32 bits of 48-bit MAC address */
+ u32 mac_addr_1; /* Upper 16 bits of 48-bit MAC address */
+ u32 maxfrm; /* Maximum frame length register */
+ u32 res_18;
+ u32 rx_fifo_sections; /* Receive FIFO configuration register */
+ u32 tx_fifo_sections; /* Transmit FIFO configuration register */
+ u32 res_24[2];
+ u32 hashtable_ctrl; /* Hash table control register */
+ u32 res_30[4];
+ u32 ievent; /* Interrupt event register */
+ u32 tx_ipg_length; /* Transmitter inter-packet-gap register */
+ u32 res_48;
+ u32 imask; /* interrupt mask register */
+ u32 res_50;
+ u32 cl_pause_quanta[4]; /* CL01-CL67 pause quanta register */
+ u32 cl_pause_thresh[4]; /* CL01-CL67 pause thresh register */
+ u32 rx_pause_status; /* Receive pause status register */
+ u32 res_78[2];
+ u32 mac_addr[14]; /* MAC address */
+ u32 lpwake_timer; /* EEE low power wakeup timer register */
+ u32 sleep_timer; /* Transmit EEE Low Power Timer register */
+ u32 res_c0[8];
+ u32 statn_config; /* Statistics configuration register */
+ u32 res_e4[7];
+
+ /* memac statistics counter registers */
+ u32 rx_eoct_l; /* Rx ethernet octests lower */
+ u32 rx_eoct_u; /* Rx ethernet octests upper */
+ u32 rx_oct_l; /* Rx octests lower */
+ u32 rx_oct_u; /* Rx octests upper */
+ u32 rx_align_err_l; /* Rx alignment error lower */
+ u32 rx_align_err_u; /* Rx alignment error upper */
+ u32 rx_pause_frame_l; /* Rx valid pause frame upper */
+ u32 rx_pause_frame_u; /* Rx valid pause frame upper */
+ u32 rx_frame_l; /* Rx frame counter lower */
+ u32 rx_frame_u; /* Rx frame counter upper */
+ u32 rx_frame_crc_err_l; /* Rx frame check sequence error lower */
+ u32 rx_frame_crc_err_u; /* Rx frame check sequence error upper */
+ u32 rx_vlan_l; /* Rx VLAN frame lower */
+ u32 rx_vlan_u; /* Rx VLAN frame upper */
+ u32 rx_err_l; /* Rx frame error lower */
+ u32 rx_err_u; /* Rx frame error upper */
+ u32 rx_uni_l; /* Rx unicast frame lower */
+ u32 rx_uni_u; /* Rx unicast frame upper */
+ u32 rx_multi_l; /* Rx multicast frame lower */
+ u32 rx_multi_u; /* Rx multicast frame upper */
+ u32 rx_brd_l; /* Rx broadcast frame lower */
+ u32 rx_brd_u; /* Rx broadcast frame upper */
+ u32 rx_drop_l; /* Rx dropped packets lower */
+ u32 rx_drop_u; /* Rx dropped packets upper */
+ u32 rx_pkt_l; /* Rx packets lower */
+ u32 rx_pkt_u; /* Rx packets upper */
+ u32 rx_undsz_l; /* Rx undersized packet lower */
+ u32 rx_undsz_u; /* Rx undersized packet upper */
+ u32 rx_64_l; /* Rx 64 oct packet lower */
+ u32 rx_64_u; /* Rx 64 oct packet upper */
+ u32 rx_127_l; /* Rx 65 to 127 oct packet lower */
+ u32 rx_127_u; /* Rx 65 to 127 oct packet upper */
+ u32 rx_255_l; /* Rx 128 to 255 oct packet lower */
+ u32 rx_255_u; /* Rx 128 to 255 oct packet upper */
+ u32 rx_511_l; /* Rx 256 to 511 oct packet lower */
+ u32 rx_511_u; /* Rx 256 to 511 oct packet upper */
+ u32 rx_1023_l; /* Rx 512 to 1023 oct packet lower */
+ u32 rx_1023_u; /* Rx 512 to 1023 oct packet upper */
+ u32 rx_1518_l; /* Rx 1024 to 1518 oct packet lower */
+ u32 rx_1518_u; /* Rx 1024 to 1518 oct packet upper */
+ u32 rx_1519_l; /* Rx 1519 to max oct packet lower */
+ u32 rx_1519_u; /* Rx 1519 to max oct packet upper */
+ u32 rx_oversz_l; /* Rx oversized packet lower */
+ u32 rx_oversz_u; /* Rx oversized packet upper */
+ u32 rx_jabber_l; /* Rx Jabber packet lower */
+ u32 rx_jabber_u; /* Rx Jabber packet upper */
+ u32 rx_frag_l; /* Rx Fragment packet lower */
+ u32 rx_frag_u; /* Rx Fragment packet upper */
+ u32 rx_cnp_l; /* Rx control packet lower */
+ u32 rx_cnp_u; /* Rx control packet upper */
+ u32 rx_drntp_l; /* Rx dripped not truncated packet lower */
+ u32 rx_drntp_u; /* Rx dripped not truncated packet upper */
+ u32 res_1d0[0xc];
+
+ u32 tx_eoct_l; /* Tx ethernet octests lower */
+ u32 tx_eoct_u; /* Tx ethernet octests upper */
+ u32 tx_oct_l; /* Tx octests lower */
+ u32 tx_oct_u; /* Tx octests upper */
+ u32 res_210[0x2];
+ u32 tx_pause_frame_l; /* Tx valid pause frame lower */
+ u32 tx_pause_frame_u; /* Tx valid pause frame upper */
+ u32 tx_frame_l; /* Tx frame counter lower */
+ u32 tx_frame_u; /* Tx frame counter upper */
+ u32 tx_frame_crc_err_l; /* Tx frame check sequence error lower */
+ u32 tx_frame_crc_err_u; /* Tx frame check sequence error upper */
+ u32 tx_vlan_l; /* Tx VLAN frame lower */
+ u32 tx_vlan_u; /* Tx VLAN frame upper */
+ u32 tx_frame_err_l; /* Tx frame error lower */
+ u32 tx_frame_err_u; /* Tx frame error upper */
+ u32 tx_uni_l; /* Tx unicast frame lower */
+ u32 tx_uni_u; /* Tx unicast frame upper */
+ u32 tx_multi_l; /* Tx multicast frame lower */
+ u32 tx_multi_u; /* Tx multicast frame upper */
+ u32 tx_brd_l; /* Tx broadcast frame lower */
+ u32 tx_brd_u; /* Tx broadcast frame upper */
+ u32 res_258[0x2];
+ u32 tx_pkt_l; /* Tx packets lower */
+ u32 tx_pkt_u; /* Tx packets upper */
+ u32 tx_undsz_l; /* Tx undersized packet lower */
+ u32 tx_undsz_u; /* Tx undersized packet upper */
+ u32 tx_64_l; /* Tx 64 oct packet lower */
+ u32 tx_64_u; /* Tx 64 oct packet upper */
+ u32 tx_127_l; /* Tx 65 to 127 oct packet lower */
+ u32 tx_127_u; /* Tx 65 to 127 oct packet upper */
+ u32 tx_255_l; /* Tx 128 to 255 oct packet lower */
+ u32 tx_255_u; /* Tx 128 to 255 oct packet upper */
+ u32 tx_511_l; /* Tx 256 to 511 oct packet lower */
+ u32 tx_511_u; /* Tx 256 to 511 oct packet upper */
+ u32 tx_1023_l; /* Tx 512 to 1023 oct packet lower */
+ u32 tx_1023_u; /* Tx 512 to 1023 oct packet upper */
+ u32 tx_1518_l; /* Tx 1024 to 1518 oct packet lower */
+ u32 tx_1518_u; /* Tx 1024 to 1518 oct packet upper */
+ u32 tx_1519_l; /* Tx 1519 to max oct packet lower */
+ u32 tx_1519_u; /* Tx 1519 to max oct packet upper */
+ u32 res_2a8[0x6];
+ u32 tx_cnp_l; /* Tx control packet lower */
+ u32 tx_cnp_u; /* Tx control packet upper */
+ u32 res_2c8[0xe];
+
+ /* Line interface control register */
+ u32 if_mode; /* interface mode control */
+ u32 if_status; /* interface status */
+ u32 res_308[0xe];
+
+ /* HiGig/2 Register */
+ u32 hg_config; /* HiGig2 control and configuration */
+ u32 res_344[0x3];
+ u32 hg_pause_quanta; /* HiGig2 pause quanta */
+ u32 res_354[0x3];
+ u32 hg_pause_thresh; /* HiGig2 pause quanta threshold */
+ u32 res_364[0x3];
+ u32 hgrx_pause_status; /* HiGig2 rx pause quanta status */
+ u32 hg_fifos_status; /* HiGig2 fifos status */
+ u32 rhm; /* Rx HiGig2 message counter register */
+ u32 thm;/* Tx HiGig2 message counter register */
+ u32 res_380[0x320];
+};
+
+/* COMMAND_CONFIG - command and configuration register */
+#define MEMAC_CMD_CFG_RX_EN 0x00000002 /* MAC Rx path enable */
+#define MEMAC_CMD_CFG_TX_EN 0x00000001 /* MAC Tx path enable */
+#define MEMAC_CMD_CFG_RXTX_EN (MEMAC_CMD_CFG_RX_EN | MEMAC_CMD_CFG_TX_EN)
+#define MEMAC_CMD_CFG_NO_LEN_CHK 0x20000 /* Payload length check disable */
+#define MEMAC_CMD_CFG_PROMIS 0x10 /* Promiscuous enable */
+
+/* HASHTABLE_CTRL - Hashtable control register */
+#define HASHTABLE_CTRL_MCAST_EN 0x00000200 /* enable mulitcast Rx hash */
+#define HASHTABLE_CTRL_ADDR_MASK 0x000001ff
+
+/* TX_IPG_LENGTH - Transmit inter-packet gap length register */
+#define TX_IPG_LENGTH_IPG_LEN_MASK 0x000003ff
+
+/* IMASK - interrupt mask register */
+#define IMASK_MDIO_SCAN_EVENT 0x00010000 /* MDIO scan event mask */
+#define IMASK_MDIO_CMD_CMPL 0x00008000 /* MDIO cmd completion mask */
+#define IMASK_REM_FAULT 0x00004000 /* remote fault mask */
+#define IMASK_LOC_FAULT 0x00002000 /* local fault mask */
+#define IMASK_TX_ECC_ER 0x00001000 /* Tx frame ECC error mask */
+#define IMASK_TX_FIFO_UNFL 0x00000800 /* Tx FIFO underflow mask */
+#define IMASK_TX_ER 0x00000200 /* Tx frame error mask */
+#define IMASK_RX_FIFO_OVFL 0x00000100 /* Rx FIFO overflow mask */
+#define IMASK_RX_ECC_ER 0x00000080 /* Rx frame ECC error mask */
+#define IMASK_RX_JAB_FRM 0x00000040 /* Rx jabber frame mask */
+#define IMASK_RX_OVRSZ_FRM 0x00000020 /* Rx oversized frame mask */
+#define IMASK_RX_RUNT_FRM 0x00000010 /* Rx runt frame mask */
+#define IMASK_RX_FRAG_FRM 0x00000008 /* Rx fragment frame mask */
+#define IMASK_RX_LEN_ER 0x00000004 /* Rx payload length error mask */
+#define IMASK_RX_CRC_ER 0x00000002 /* Rx CRC error mask */
+#define IMASK_RX_ALIGN_ER 0x00000001 /* Rx alignment error mask */
+
+#define IMASK_MASK_ALL 0x00000000
+
+/* IEVENT - interrupt event register */
+#define IEVENT_MDIO_SCAN_EVENT 0x00010000 /* MDIO scan event */
+#define IEVENT_MDIO_CMD_CMPL 0x00008000 /* MDIO cmd completion */
+#define IEVENT_REM_FAULT 0x00004000 /* remote fault */
+#define IEVENT_LOC_FAULT 0x00002000 /* local fault */
+#define IEVENT_TX_ECC_ER 0x00001000 /* Tx frame ECC error */
+#define IEVENT_TX_FIFO_UNFL 0x00000800 /* Tx FIFO underflow */
+#define IEVENT_TX_ER 0x00000200 /* Tx frame error */
+#define IEVENT_RX_FIFO_OVFL 0x00000100 /* Rx FIFO overflow */
+#define IEVENT_RX_ECC_ER 0x00000080 /* Rx frame ECC error */
+#define IEVENT_RX_JAB_FRM 0x00000040 /* Rx jabber frame */
+#define IEVENT_RX_OVRSZ_FRM 0x00000020 /* Rx oversized frame */
+#define IEVENT_RX_RUNT_FRM 0x00000010 /* Rx runt frame */
+#define IEVENT_RX_FRAG_FRM 0x00000008 /* Rx fragment frame */
+#define IEVENT_RX_LEN_ER 0x00000004 /* Rx payload length error */
+#define IEVENT_RX_CRC_ER 0x00000002 /* Rx CRC error */
+#define IEVENT_RX_ALIGN_ER 0x00000001 /* Rx alignment error */
+
+#define IEVENT_CLEAR_ALL 0xffffffff
+
+/* IF_MODE - Interface Mode Register */
+#define IF_MODE_EN_AUTO 0x00008000 /* 1 - Enable automatic speed selection */
+#define IF_MODE_SETSP_1000M 0x00004000 /* 10 - 1000Mbps RGMII */
+#define IF_MODE_SETSP_100M 0x00000000 /* 00 - 100Mbps RGMII */
+#define IF_MODE_SETSP_10M 0x00002000 /* 01 - 10Mbps RGMII */
+#define IF_MODE_SETSP_MASK 0x00006000 /* setsp mask bits */
+#define IF_MODE_XGMII 0x00000000 /* 00- XGMII(10) interface mode */
+#define IF_MODE_GMII 0x00000002 /* 10- GMII interface mode */
+#define IF_MODE_MII 0x00000001 /* 01- MII interface mode */
+#define IF_MODE_MASK 0x00000003 /* mask for mode interface mode */
+#define IF_MODE_RG 0x00000004 /* 1- RGMII */
+#define IF_MODE_RM 0x00000008 /* 1- RGMII */
+#define IF_MODE_RGMII_FD 0x00001000 /* Full duplex RGMII */
+#define IF_MODE_HD 0x00000040 /* Half duplex operation */
+
+#define IF_DEFAULT (IF_GMII)
+
+/* Internal PHY Registers - SGMII */
+#define PHY_SGMII_CR_PHY_RESET 0x8000
+#define PHY_SGMII_CR_RESET_AN 0x0200
+#define PHY_SGMII_CR_DEF_VAL 0x1140
+#define PHY_SGMII_DEV_ABILITY_SGMII 0x4001
+#define PHY_SGMII_IF_MODE_AN 0x0002
+#define PHY_SGMII_IF_MODE_SGMII 0x0001
+
+struct memac_mdio_controller {
+ u32 res0[0xc];
+ u32 mdio_stat; /* MDIO configuration and status */
+ u32 mdio_ctl; /* MDIO control */
+ u32 mdio_data; /* MDIO data */
+ u32 mdio_addr; /* MDIO address */
+};
+
+#define MDIO_STAT_CLKDIV(x) (((x>>1) & 0xff) << 8)
+#define MDIO_STAT_BSY (1 << 0)
+#define MDIO_STAT_RD_ER (1 << 1)
+#define MDIO_STAT_PRE (1 << 5)
+#define MDIO_STAT_ENC (1 << 6)
+#define MDIO_STAT_HOLD_15_CLK (7 << 2)
+
+#define MDIO_CTL_DEV_ADDR(x) (x & 0x1f)
+#define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5)
+#define MDIO_CTL_PRE_DIS (1 << 10)
+#define MDIO_CTL_SCAN_EN (1 << 11)
+#define MDIO_CTL_POST_INC (1 << 14)
+#define MDIO_CTL_READ (1 << 15)
+
+#define MDIO_DATA(x) (x & 0xffff)
+#define MDIO_DATA_BSY (1 << 31)
+
+struct fsl_enet_mac;
+
+void init_memac(struct fsl_enet_mac *mac, void *base, void *phyregs,
+ int max_rx_len);
+
+#endif