[PATCH 16/31] spi: add support for MediaTek spi-mem controller

Weijie Gao weijie.gao at mediatek.com
Thu Aug 4 05:36:00 CEST 2022


This patch adds support for spi-mem controller found on newer MediaTek SoCs
This controller supports Single/Dual/Quad SPI mode.

Signed-off-by: SkyLake.Huang <skylake.huang at mediatek.com>
---
 drivers/spi/Kconfig    |   8 +
 drivers/spi/Makefile   |   1 +
 drivers/spi/mtk_spim.c | 705 +++++++++++++++++++++++++++++++++++++++++
 3 files changed, 714 insertions(+)
 create mode 100644 drivers/spi/mtk_spim.c

diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 75b794548b..7e72ab9c24 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -276,6 +276,14 @@ config MTK_SNFI_SPI
 	  used to access SPI memory devices like SPI-NOR or SPI-NAND on
 	  platforms embedding this IP core, like MT7622/M7629.
 
+config MTK_SPIM
+	bool "Mediatek SPI-MEM master controller driver"
+	depends on SPI_MEM
+	help
+	  Enable MediaTek SPI-MEM master controller driver. This driver mainly
+	  supports SPI flashes. You can use single, dual or quad mode
+	  transmission on this controller.
+
 config MVEBU_A3700_SPI
 	bool "Marvell Armada 3700 SPI driver"
 	select CLK_ARMADA_3720
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 4de77c260a..309f6b5328 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -43,6 +43,7 @@ obj-$(CONFIG_MPC8XX_SPI) += mpc8xx_spi.o
 obj-$(CONFIG_MPC8XXX_SPI) += mpc8xxx_spi.o
 obj-$(CONFIG_MTK_SNFI_SPI) += mtk_snfi_spi.o
 obj-$(CONFIG_MTK_SNOR) += mtk_snor.o
+obj-$(CONFIG_MTK_SPIM) += mtk_spim.o
 obj-$(CONFIG_MT7620_SPI) += mt7620_spi.o
 obj-$(CONFIG_MT7621_SPI) += mt7621_spi.o
 obj-$(CONFIG_MSCC_BB_SPI) += mscc_bb_spi.o
diff --git a/drivers/spi/mtk_spim.c b/drivers/spi/mtk_spim.c
new file mode 100644
index 0000000000..b0f63c3c3f
--- /dev/null
+++ b/drivers/spi/mtk_spim.c
@@ -0,0 +1,705 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: SkyLake.Huang <skylake.huang at mediatek.com>
+ */
+
+#include <clk.h>
+#include <cpu_func.h>
+#include <div64.h>
+#include <dm.h>
+#include <dm/device.h>
+#include <dm/device_compat.h>
+#include <dm/devres.h>
+#include <dm/pinctrl.h>
+#include <linux/bitops.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <spi.h>
+#include <spi-mem.h>
+#include <stdbool.h>
+#include <watchdog.h>
+
+#define SPI_CFG0_REG				0x0000
+#define SPI_CFG1_REG				0x0004
+#define SPI_TX_SRC_REG				0x0008
+#define SPI_RX_DST_REG				0x000c
+#define SPI_TX_DATA_REG				0x0010
+#define SPI_RX_DATA_REG				0x0014
+#define SPI_CMD_REG				0x0018
+#define SPI_IRQ_REG				0x001c
+#define SPI_STATUS_REG				0x0020
+#define SPI_PAD_SEL_REG				0x0024
+#define SPI_CFG2_REG				0x0028
+#define SPI_TX_SRC_REG_64			0x002c
+#define SPI_RX_DST_REG_64			0x0030
+#define SPI_CFG3_IPM_REG			0x0040
+
+#define SPI_CFG0_SCK_HIGH_OFFSET		0
+#define SPI_CFG0_SCK_LOW_OFFSET			8
+#define SPI_CFG0_CS_HOLD_OFFSET			16
+#define SPI_CFG0_CS_SETUP_OFFSET		24
+#define SPI_ADJUST_CFG0_CS_HOLD_OFFSET		0
+#define SPI_ADJUST_CFG0_CS_SETUP_OFFSET		16
+
+#define SPI_CFG1_CS_IDLE_OFFSET			0
+#define SPI_CFG1_PACKET_LOOP_OFFSET		8
+#define SPI_CFG1_PACKET_LENGTH_OFFSET		16
+#define SPI_CFG1_GET_TICKDLY_OFFSET		29
+
+#define SPI_CFG1_GET_TICKDLY_MASK		GENMASK(31, 29)
+#define SPI_CFG1_CS_IDLE_MASK			0xff
+#define SPI_CFG1_PACKET_LOOP_MASK		0xff00
+#define SPI_CFG1_PACKET_LENGTH_MASK		0x3ff0000
+#define SPI_CFG1_IPM_PACKET_LENGTH_MASK		GENMASK(31, 16)
+#define SPI_CFG2_SCK_HIGH_OFFSET		0
+#define SPI_CFG2_SCK_LOW_OFFSET			16
+#define SPI_CFG2_SCK_HIGH_MASK			GENMASK(15, 0)
+#define SPI_CFG2_SCK_LOW_MASK			GENMASK(31, 16)
+
+#define SPI_CMD_ACT				BIT(0)
+#define SPI_CMD_RESUME				BIT(1)
+#define SPI_CMD_RST				BIT(2)
+#define SPI_CMD_PAUSE_EN			BIT(4)
+#define SPI_CMD_DEASSERT			BIT(5)
+#define SPI_CMD_SAMPLE_SEL			BIT(6)
+#define SPI_CMD_CS_POL				BIT(7)
+#define SPI_CMD_CPHA				BIT(8)
+#define SPI_CMD_CPOL				BIT(9)
+#define SPI_CMD_RX_DMA				BIT(10)
+#define SPI_CMD_TX_DMA				BIT(11)
+#define SPI_CMD_TXMSBF				BIT(12)
+#define SPI_CMD_RXMSBF				BIT(13)
+#define SPI_CMD_RX_ENDIAN			BIT(14)
+#define SPI_CMD_TX_ENDIAN			BIT(15)
+#define SPI_CMD_FINISH_IE			BIT(16)
+#define SPI_CMD_PAUSE_IE			BIT(17)
+#define SPI_CMD_IPM_NONIDLE_MODE		BIT(19)
+#define SPI_CMD_IPM_SPIM_LOOP			BIT(21)
+#define SPI_CMD_IPM_GET_TICKDLY_OFFSET		22
+
+#define SPI_CMD_IPM_GET_TICKDLY_MASK		GENMASK(24, 22)
+
+#define PIN_MODE_CFG(x)				((x) / 2)
+
+#define SPI_CFG3_IPM_PIN_MODE_OFFSET		0
+#define SPI_CFG3_IPM_HALF_DUPLEX_DIR		BIT(2)
+#define SPI_CFG3_IPM_HALF_DUPLEX_EN		BIT(3)
+#define SPI_CFG3_IPM_XMODE_EN			BIT(4)
+#define SPI_CFG3_IPM_NODATA_FLAG		BIT(5)
+#define SPI_CFG3_IPM_CMD_BYTELEN_OFFSET		8
+#define SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET	12
+#define SPI_CFG3_IPM_DUMMY_BYTELEN_OFFSET	16
+
+#define SPI_CFG3_IPM_CMD_PIN_MODE_MASK		GENMASK(1, 0)
+#define SPI_CFG3_IPM_CMD_BYTELEN_MASK		GENMASK(11, 8)
+#define SPI_CFG3_IPM_ADDR_BYTELEN_MASK		GENMASK(15, 12)
+#define SPI_CFG3_IPM_DUMMY_BYTELEN_MASK		GENMASK(19, 16)
+
+#define MT8173_SPI_MAX_PAD_SEL			3
+
+#define MTK_SPI_PAUSE_INT_STATUS		0x2
+
+#define MTK_SPI_IDLE				0
+#define MTK_SPI_PAUSED				1
+
+#define MTK_SPI_MAX_FIFO_SIZE			32U
+#define MTK_SPI_PACKET_SIZE			1024
+#define MTK_SPI_IPM_PACKET_SIZE			SZ_64K
+#define MTK_SPI_IPM_PACKET_LOOP			SZ_256
+
+#define MTK_SPI_32BITS_MASK			0xffffffff
+
+#define DMA_ADDR_EXT_BITS			36
+#define DMA_ADDR_DEF_BITS			32
+
+#define CLK_TO_US(freq, clkcnt) DIV_ROUND_UP((clkcnt), (freq) / 1000000)
+
+struct mtk_spim_capability {
+	bool need_pad_sel;
+	/* Must explicitly send dummy Tx bytes to do Rx only transfer */
+	bool must_tx;
+	/* some IC design adjust cfg register to enhance time accuracy */
+	bool enhance_timing;
+	/* some IC support DMA addr extension */
+	bool dma_ext;
+	/* the IPM IP design improve some feature, and support dual/quad mode */
+	bool ipm_design;
+	bool support_quad;
+};
+
+struct mtk_spim_priv {
+	void __iomem *base;
+	u32 state;
+	int pad_num;
+	u32 *pad_sel;
+	struct clk sel_clk, spi_clk;
+	struct spi_transfer *cur_transfer;
+	u32 xfer_len;
+	u32 num_xfered;
+	struct scatterlist *tx_sgl, *rx_sgl;
+	u32 tx_sgl_len, rx_sgl_len;
+	struct mtk_spim_capability hw_cap;
+	u32 tick_dly;
+	u32 sample_sel;
+
+	struct completion spimem_done;
+	bool use_spimem;
+	struct device *dev;
+	dma_addr_t tx_dma;
+	dma_addr_t rx_dma;
+};
+
+static void mtk_spim_reset(struct mtk_spim_priv *priv)
+{
+	u32 reg_val;
+
+	/* set the software reset bit in SPI_CMD_REG. */
+	reg_val = readl(priv->base + SPI_CMD_REG);
+	reg_val |= SPI_CMD_RST;
+	writel(reg_val, priv->base + SPI_CMD_REG);
+
+	reg_val = readl(priv->base + SPI_CMD_REG);
+	reg_val &= ~SPI_CMD_RST;
+	writel(reg_val, priv->base + SPI_CMD_REG);
+}
+
+static int mtk_spim_hw_init(struct spi_slave *slave)
+{
+	struct udevice *bus = dev_get_parent(slave->dev);
+	struct mtk_spim_priv *priv = dev_get_priv(bus);
+	u16 cpha, cpol;
+	u32 reg_val;
+
+	cpha = slave->mode & SPI_CPHA ? 1 : 0;
+	cpol = slave->mode & SPI_CPOL ? 1 : 0;
+
+	if (priv->hw_cap.enhance_timing) {
+		if (priv->hw_cap.ipm_design) {
+			/* CFG3 reg only used for spi-mem,
+			 * here write to default value
+			 */
+			writel(0x0, priv->base + SPI_CFG3_IPM_REG);
+
+			reg_val = readl(priv->base + SPI_CMD_REG);
+			reg_val &= ~SPI_CMD_IPM_GET_TICKDLY_MASK;
+			reg_val |= priv->tick_dly
+				   << SPI_CMD_IPM_GET_TICKDLY_OFFSET;
+			writel(reg_val, priv->base + SPI_CMD_REG);
+		} else {
+			reg_val = readl(priv->base + SPI_CFG1_REG);
+			reg_val &= ~SPI_CFG1_GET_TICKDLY_MASK;
+			reg_val |= priv->tick_dly
+				   << SPI_CFG1_GET_TICKDLY_OFFSET;
+			writel(reg_val, priv->base + SPI_CFG1_REG);
+		}
+	}
+
+	reg_val = readl(priv->base + SPI_CMD_REG);
+	if (priv->hw_cap.ipm_design) {
+		/* SPI transfer without idle time until packet length done */
+		reg_val |= SPI_CMD_IPM_NONIDLE_MODE;
+		if (slave->mode & SPI_LOOP)
+			reg_val |= SPI_CMD_IPM_SPIM_LOOP;
+		else
+			reg_val &= ~SPI_CMD_IPM_SPIM_LOOP;
+	}
+
+	if (cpha)
+		reg_val |= SPI_CMD_CPHA;
+	else
+		reg_val &= ~SPI_CMD_CPHA;
+	if (cpol)
+		reg_val |= SPI_CMD_CPOL;
+	else
+		reg_val &= ~SPI_CMD_CPOL;
+
+	/* set the mlsbx and mlsbtx */
+	if (slave->mode & SPI_LSB_FIRST) {
+		reg_val &= ~SPI_CMD_TXMSBF;
+		reg_val &= ~SPI_CMD_RXMSBF;
+	} else {
+		reg_val |= SPI_CMD_TXMSBF;
+		reg_val |= SPI_CMD_RXMSBF;
+	}
+
+	/* set the tx/rx endian */
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+	reg_val &= ~SPI_CMD_TX_ENDIAN;
+	reg_val &= ~SPI_CMD_RX_ENDIAN;
+#else
+	reg_val |= SPI_CMD_TX_ENDIAN;
+	reg_val |= SPI_CMD_RX_ENDIAN;
+#endif
+
+	if (priv->hw_cap.enhance_timing) {
+		/* set CS polarity */
+		if (slave->mode & SPI_CS_HIGH)
+			reg_val |= SPI_CMD_CS_POL;
+		else
+			reg_val &= ~SPI_CMD_CS_POL;
+
+		if (priv->sample_sel)
+			reg_val |= SPI_CMD_SAMPLE_SEL;
+		else
+			reg_val &= ~SPI_CMD_SAMPLE_SEL;
+	}
+
+	/* disable dma mode */
+	reg_val &= ~(SPI_CMD_TX_DMA | SPI_CMD_RX_DMA);
+
+	/* disable deassert mode */
+	reg_val &= ~SPI_CMD_DEASSERT;
+
+	writel(reg_val, priv->base + SPI_CMD_REG);
+
+#if !CONFIG_IS_ENABLED(DM_SPI)
+	/* pad select, we don't need this in IPM design. */
+	if (priv->hw_cap.need_pad_sel)
+		writel(priv->pad_sel[slave->cs],
+		       priv->base + SPI_PAD_SEL_REG);
+#endif
+
+	return 0;
+}
+
+static void mtk_spim_prepare_transfer(struct mtk_spim_priv *priv,
+				      u32 speed_hz)
+{
+	u32 spi_clk_hz, div, sck_time, cs_time, reg_val;
+
+	spi_clk_hz = clk_get_rate(&priv->spi_clk);
+	if (speed_hz <= spi_clk_hz / 4)
+		div = DIV_ROUND_UP(spi_clk_hz, speed_hz);
+	else
+		div = 4;
+
+	sck_time = (div + 1) / 2;
+	cs_time = sck_time * 2;
+
+	if (priv->hw_cap.enhance_timing) {
+		reg_val = (((sck_time - 1) & 0xffff)
+			   << SPI_CFG2_SCK_HIGH_OFFSET);
+		reg_val |= (((sck_time - 1) & 0xffff)
+			   << SPI_CFG2_SCK_LOW_OFFSET);
+		writel(reg_val, priv->base + SPI_CFG2_REG);
+		reg_val = (((cs_time - 1) & 0xffff)
+			   << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
+		reg_val |= (((cs_time - 1) & 0xffff)
+			   << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
+		writel(reg_val, priv->base + SPI_CFG0_REG);
+	} else {
+		reg_val = (((sck_time - 1) & 0xff)
+			   << SPI_CFG0_SCK_HIGH_OFFSET);
+		reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET);
+		reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
+		reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_SETUP_OFFSET);
+		writel(reg_val, priv->base + SPI_CFG0_REG);
+	}
+
+	reg_val = readl(priv->base + SPI_CFG1_REG);
+	reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
+	reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET);
+	writel(reg_val, priv->base + SPI_CFG1_REG);
+}
+
+static void mtk_spim_setup_packet(struct mtk_spim_priv *priv)
+{
+	u32 packet_size, packet_loop, reg_val;
+
+	if (priv->hw_cap.ipm_design)
+		packet_size = min_t(u32,
+				    priv->xfer_len,
+				    MTK_SPI_IPM_PACKET_SIZE);
+	else
+		packet_size = min_t(u32,
+				    priv->xfer_len,
+				    MTK_SPI_PACKET_SIZE);
+
+	packet_loop = priv->xfer_len / packet_size;
+
+	reg_val = readl(priv->base + SPI_CFG1_REG);
+	if (priv->hw_cap.ipm_design)
+		reg_val &= ~SPI_CFG1_IPM_PACKET_LENGTH_MASK;
+	else
+		reg_val &= ~SPI_CFG1_PACKET_LENGTH_MASK;
+
+	reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET;
+
+	reg_val &= ~SPI_CFG1_PACKET_LOOP_MASK;
+
+	reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET;
+
+	writel(reg_val, priv->base + SPI_CFG1_REG);
+}
+
+static void mtk_spim_enable_transfer(struct mtk_spim_priv *priv)
+{
+	u32 cmd;
+
+	cmd = readl(priv->base + SPI_CMD_REG);
+	if (priv->state == MTK_SPI_IDLE)
+		cmd |= SPI_CMD_ACT;
+	else
+		cmd |= SPI_CMD_RESUME;
+	writel(cmd, priv->base + SPI_CMD_REG);
+}
+
+static bool mtk_spim_supports_op(struct spi_slave *slave,
+				 const struct spi_mem_op *op)
+{
+	if (op->cmd.buswidth == 0 || op->cmd.buswidth > 4 ||
+	    op->addr.buswidth > 4 || op->dummy.buswidth > 4 ||
+	    op->data.buswidth > 4)
+		return false;
+
+	if (op->addr.nbytes && op->dummy.nbytes &&
+	    op->addr.buswidth != op->dummy.buswidth)
+		return false;
+
+	if (op->addr.nbytes + op->dummy.nbytes > 16)
+		return false;
+
+	if (op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
+		if (op->data.nbytes / MTK_SPI_IPM_PACKET_SIZE >
+		    MTK_SPI_IPM_PACKET_LOOP ||
+		    op->data.nbytes % MTK_SPI_IPM_PACKET_SIZE != 0)
+			return false;
+	}
+
+	return true;
+}
+
+static void mtk_spim_setup_dma_xfer(struct mtk_spim_priv *priv,
+				    const struct spi_mem_op *op)
+{
+	writel((u32)(priv->tx_dma & MTK_SPI_32BITS_MASK),
+	       priv->base + SPI_TX_SRC_REG);
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+	if (priv->dev_comp->dma_ext)
+		writel((u32)(priv->tx_dma >> 32),
+		       priv->base + SPI_TX_SRC_REG_64);
+#endif
+
+	if (op->data.dir == SPI_MEM_DATA_IN) {
+		writel((u32)(priv->rx_dma & MTK_SPI_32BITS_MASK),
+		       priv->base + SPI_RX_DST_REG);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+		if (priv->dev_comp->dma_ext)
+			writel((u32)(priv->rx_dma >> 32),
+			       priv->base + SPI_RX_DST_REG_64);
+#endif
+	}
+}
+
+static int mtk_spim_transfer_wait(struct spi_slave *slave,
+				  const struct spi_mem_op *op)
+{
+	struct udevice *bus = dev_get_parent(slave->dev);
+	struct mtk_spim_priv *priv = dev_get_priv(bus);
+	u32 sck_l, sck_h, spi_bus_clk, clk_count, reg;
+	ulong us = 1;
+	int ret = 0;
+
+	if (op->data.dir == SPI_MEM_NO_DATA)
+		clk_count = 32;
+	else
+		clk_count = op->data.nbytes;
+
+	spi_bus_clk = clk_get_rate(&priv->spi_clk);
+	sck_l = readl(priv->base + SPI_CFG2_REG) >> SPI_CFG2_SCK_LOW_OFFSET;
+	sck_h = readl(priv->base + SPI_CFG2_REG) & SPI_CFG2_SCK_HIGH_MASK;
+	do_div(spi_bus_clk, sck_l + sck_h + 2);
+
+	us = CLK_TO_US(spi_bus_clk, clk_count * 8);
+	us += 1000 * 1000; /* 1s tolerance */
+
+	if (us > UINT_MAX)
+		us = UINT_MAX;
+
+	ret = readl_poll_timeout(priv->base + SPI_STATUS_REG, reg,
+				 reg & 0x1, us);
+	if (ret < 0) {
+		dev_err(priv->dev, "transfer timeout, val: 0x%lx\n", us);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static int mtk_spim_exec_op(struct spi_slave *slave,
+			    const struct spi_mem_op *op)
+{
+	struct udevice *bus = dev_get_parent(slave->dev);
+	struct mtk_spim_priv *priv = dev_get_priv(bus);
+	u32 reg_val, nio = 1, tx_size;
+	char *tx_tmp_buf;
+	char *rx_tmp_buf;
+	int i, ret = 0;
+
+	priv->use_spimem = true;
+
+	mtk_spim_reset(priv);
+	mtk_spim_hw_init(slave);
+	mtk_spim_prepare_transfer(priv, slave->max_hz);
+
+	reg_val = readl(priv->base + SPI_CFG3_IPM_REG);
+	/* opcode byte len */
+	reg_val &= ~SPI_CFG3_IPM_CMD_BYTELEN_MASK;
+	reg_val |= 1 << SPI_CFG3_IPM_CMD_BYTELEN_OFFSET;
+
+	/* addr & dummy byte len */
+	if (op->addr.nbytes || op->dummy.nbytes)
+		reg_val |= (op->addr.nbytes + op->dummy.nbytes) <<
+			    SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET;
+
+	/* data byte len */
+	if (!op->data.nbytes) {
+		reg_val |= SPI_CFG3_IPM_NODATA_FLAG;
+		writel(0, priv->base + SPI_CFG1_REG);
+	} else {
+		reg_val &= ~SPI_CFG3_IPM_NODATA_FLAG;
+		priv->xfer_len = op->data.nbytes;
+		mtk_spim_setup_packet(priv);
+	}
+
+	if (op->addr.nbytes || op->dummy.nbytes) {
+		if (op->addr.buswidth == 1 || op->dummy.buswidth == 1)
+			reg_val |= SPI_CFG3_IPM_XMODE_EN;
+		else
+			reg_val &= ~SPI_CFG3_IPM_XMODE_EN;
+	}
+
+	if (op->addr.buswidth == 2 ||
+	    op->dummy.buswidth == 2 ||
+	    op->data.buswidth == 2)
+		nio = 2;
+	else if (op->addr.buswidth == 4 ||
+		 op->dummy.buswidth == 4 ||
+		 op->data.buswidth == 4)
+		nio = 4;
+
+	reg_val &= ~SPI_CFG3_IPM_CMD_PIN_MODE_MASK;
+	reg_val |= PIN_MODE_CFG(nio) << SPI_CFG3_IPM_PIN_MODE_OFFSET;
+
+	reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
+	if (op->data.dir == SPI_MEM_DATA_IN)
+		reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
+	else
+		reg_val &= ~SPI_CFG3_IPM_HALF_DUPLEX_DIR;
+	writel(reg_val, priv->base + SPI_CFG3_IPM_REG);
+
+	tx_size = 1 + op->addr.nbytes + op->dummy.nbytes;
+	if (op->data.dir == SPI_MEM_DATA_OUT)
+		tx_size += op->data.nbytes;
+
+	tx_size = max(tx_size, (u32)32);
+
+	/* Fill up tx data*/
+	tx_tmp_buf = kzalloc(tx_size, GFP_KERNEL);
+	if (!tx_tmp_buf) {
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	tx_tmp_buf[0] = op->cmd.opcode;
+
+	if (op->addr.nbytes) {
+		for (i = 0; i < op->addr.nbytes; i++)
+			tx_tmp_buf[i + 1] = op->addr.val >>
+					(8 * (op->addr.nbytes - i - 1));
+	}
+
+	if (op->dummy.nbytes)
+		memset(tx_tmp_buf + op->addr.nbytes + 1, 0xff,
+		       op->dummy.nbytes);
+
+	if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
+		memcpy(tx_tmp_buf + op->dummy.nbytes + op->addr.nbytes + 1,
+		       op->data.buf.out, op->data.nbytes);
+	/* Finish filling up tx data*/
+
+	priv->tx_dma = dma_map_single(tx_tmp_buf, tx_size, DMA_TO_DEVICE);
+	if (dma_mapping_error(priv->dev, priv->tx_dma)) {
+		ret = -ENOMEM;
+		goto tx_free;
+	}
+
+	if (op->data.dir == SPI_MEM_DATA_IN) {
+		if (!IS_ALIGNED((size_t)op->data.buf.in, 4)) {
+			rx_tmp_buf = kzalloc(op->data.nbytes, GFP_KERNEL);
+			if (!rx_tmp_buf) {
+				ret = -ENOMEM;
+				goto tx_unmap;
+			}
+		} else {
+			rx_tmp_buf = op->data.buf.in;
+		}
+
+		priv->rx_dma = dma_map_single(rx_tmp_buf, op->data.nbytes,
+					      DMA_FROM_DEVICE);
+		if (dma_mapping_error(priv->dev, priv->rx_dma)) {
+			ret = -ENOMEM;
+			goto rx_free;
+		}
+	}
+
+	reg_val = readl(priv->base + SPI_CMD_REG);
+	reg_val |= SPI_CMD_TX_DMA;
+	if (op->data.dir == SPI_MEM_DATA_IN)
+		reg_val |= SPI_CMD_RX_DMA;
+
+	writel(reg_val, priv->base + SPI_CMD_REG);
+
+	mtk_spim_setup_dma_xfer(priv, op);
+
+	mtk_spim_enable_transfer(priv);
+
+	/* Wait for the interrupt. */
+	ret = mtk_spim_transfer_wait(slave, op);
+	if (ret)
+		goto rx_unmap;
+
+	if (op->data.dir == SPI_MEM_DATA_IN &&
+	    !IS_ALIGNED((size_t)op->data.buf.in, 4))
+		memcpy(op->data.buf.in, rx_tmp_buf, op->data.nbytes);
+
+rx_unmap:
+	/* spi disable dma */
+	reg_val = readl(priv->base + SPI_CMD_REG);
+	reg_val &= ~SPI_CMD_TX_DMA;
+	if (op->data.dir == SPI_MEM_DATA_IN)
+		reg_val &= ~SPI_CMD_RX_DMA;
+	writel(reg_val, priv->base + SPI_CMD_REG);
+
+	writel(0, priv->base + SPI_TX_SRC_REG);
+	writel(0, priv->base + SPI_RX_DST_REG);
+
+	if (op->data.dir == SPI_MEM_DATA_IN)
+		dma_unmap_single(priv->rx_dma,
+				 op->data.nbytes, DMA_FROM_DEVICE);
+rx_free:
+	if (op->data.dir == SPI_MEM_DATA_IN &&
+	    !IS_ALIGNED((size_t)op->data.buf.in, 4))
+		kfree(rx_tmp_buf);
+tx_unmap:
+	dma_unmap_single(priv->tx_dma,
+			 tx_size, DMA_TO_DEVICE);
+tx_free:
+	kfree(tx_tmp_buf);
+exit:
+	priv->use_spimem = false;
+
+	return ret;
+}
+
+static int mtk_spim_adjust_op_size(struct spi_slave *slave,
+				   struct spi_mem_op *op)
+{
+	int opcode_len;
+
+	if (!op->data.nbytes)
+		return 0;
+
+	if (op->data.dir != SPI_MEM_NO_DATA) {
+		opcode_len = 1 + op->addr.nbytes + op->dummy.nbytes;
+		if (opcode_len + op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
+			op->data.nbytes = MTK_SPI_IPM_PACKET_SIZE - opcode_len;
+			/* force data buffer dma-aligned. */
+			op->data.nbytes -= op->data.nbytes % 4;
+		}
+	}
+
+	return 0;
+}
+
+static int mtk_spim_get_attr(struct mtk_spim_priv *priv, struct udevice
 *dev)
+{
+	int ret;
+
+	priv->hw_cap.need_pad_sel = dev_read_bool(dev, "need_pad_sel");
+	priv->hw_cap.must_tx = dev_read_bool(dev, "must_tx");
+	priv->hw_cap.enhance_timing = dev_read_bool(dev, "enhance_timing");
+	priv->hw_cap.dma_ext = dev_read_bool(dev, "dma_ext");
+	priv->hw_cap.ipm_design = dev_read_bool(dev, "ipm_design");
+	priv->hw_cap.support_quad = dev_read_bool(dev, "support_quad");
+
+	ret = dev_read_u32(dev, "tick_dly", &priv->tick_dly);
+	if (ret < 0)
+		dev_err(priv->dev, "tick dly not set.\n");
+
+	ret = dev_read_u32(dev, "sample_sel", &priv->sample_sel);
+	if (ret < 0)
+		dev_err(priv->dev, "sample sel not set.\n");
+
+	return ret;
+}
+
+static int mtk_spim_probe(struct udevice *dev)
+{
+	struct mtk_spim_priv *priv = dev_get_priv(dev);
+	int ret;
+
+	priv->base = (void __iomem *)devfdt_get_addr(dev);
+	if (!priv->base)
+		return -EINVAL;
+
+	mtk_spim_get_attr(priv, dev);
+
+	ret = clk_get_by_name(dev, "sel-clk", &priv->sel_clk);
+	if (ret < 0) {
+		dev_err(dev, "failed to get sel-clk\n");
+		return ret;
+	}
+
+	ret = clk_get_by_name(dev, "spi-clk", &priv->spi_clk);
+	if (ret < 0) {
+		dev_err(dev, "failed to get spi-clk\n");
+		return ret;
+	}
+
+	clk_enable(&priv->sel_clk);
+	clk_enable(&priv->spi_clk);
+
+	return 0;
+}
+
+static int mtk_spim_set_speed(struct udevice *dev, uint speed)
+{
+	return 0;
+}
+
+static int mtk_spim_set_mode(struct udevice *dev, uint mode)
+{
+	return 0;
+}
+
+static const struct spi_controller_mem_ops mtk_spim_mem_ops = {
+	.adjust_op_size = mtk_spim_adjust_op_size,
+	.supports_op = mtk_spim_supports_op,
+	.exec_op = mtk_spim_exec_op
+};
+
+static const struct dm_spi_ops mtk_spim_ops = {
+	.mem_ops = &mtk_spim_mem_ops,
+	.set_speed = mtk_spim_set_speed,
+	.set_mode = mtk_spim_set_mode,
+};
+
+static const struct udevice_id mtk_spim_ids[] = {
+	{ .compatible = "mediatek,ipm-spi" },
+	{}
+};
+
+U_BOOT_DRIVER(mtk_spim) = {
+	.name = "mtk_spim",
+	.id = UCLASS_SPI,
+	.of_match = mtk_spim_ids,
+	.ops = &mtk_spim_ops,
+	.priv_auto = sizeof(struct mtk_spim_priv),
+	.probe = mtk_spim_probe,
+};
-- 
2.17.1



More information about the U-Boot mailing list