[RFC PATCH 08/10] remoteproc: add zynqmp driver

Tanmay Shah tanmay.shah at amd.com
Tue Jul 25 16:06:48 CEST 2023


ZynqMP platform has ARM Cortex-R5 dual core remote processor also
known as RPU (Real-time Processing Unit).
This remoteproc platform driver is responsible to configure the RPU
and manages life-cycle of the RPU along with other platform specific
operations.

Signed-off-by: Tanmay Shah <tanmay.shah at amd.com>
---
 MAINTAINERS                        |   1 +
 drivers/firmware/firmware-zynqmp.c |  20 ++
 drivers/remoteproc/Kconfig         |   9 +
 drivers/remoteproc/Makefile        |   1 +
 drivers/remoteproc/xlnx_rproc.c    | 411 +++++++++++++++++++++++++++++
 include/zynqmp_firmware.h          |  16 ++
 6 files changed, 458 insertions(+)
 create mode 100644 drivers/remoteproc/xlnx_rproc.c

diff --git a/MAINTAINERS b/MAINTAINERS
index 8e40da38e7..feaf2ecd90 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1323,6 +1323,7 @@ REMOTEPROC
 M:	Tanmay Shah <tanmay.shah at amd.com>
 S:	Maintained
 F:	drivers/remoteproc/rproc_virtio.c
+F:	drivers/remoteproc/xlnx_rproc.c
 F:	include/rproc_virtio.h
 
 RISC-V
diff --git a/drivers/firmware/firmware-zynqmp.c b/drivers/firmware/firmware-zynqmp.c
index 0897992405..ec6057d4e3 100644
--- a/drivers/firmware/firmware-zynqmp.c
+++ b/drivers/firmware/firmware-zynqmp.c
@@ -202,6 +202,26 @@ int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
 				 qos, ack, NULL);
 }
 
+/**
+ * zynqmp_pm_request_wake - PM call to wake up selected master or subsystem
+ * @node:  Node ID of the master or subsystem
+ * @set_addr:  Specifies whether the address argument is relevant
+ * @address:   Address from which to resume when woken up
+ * @ack:   Flag to specify whether acknowledge requested
+ *
+ * Return: status, either success or error+reason
+ */
+int zynqmp_pm_request_wake(const u32 node,
+			   const bool set_addr,
+			   const u64 address,
+			   const enum zynqmp_pm_request_ack ack)
+{
+	/* set_addr flag is encoded into 1st bit of address */
+	return xilinx_pm_request(PM_REQUEST_WAKEUP, node,
+				 lower_32_bits(address | set_addr),
+				 upper_32_bits(address), ack, NULL);
+}
+
 int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id)
 {
 	int ret;
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index b758c248e4..9fd8cda3c8 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -113,4 +113,13 @@ config REMOTEPROC_VIRTIO
 	  drivers provide a set of ops for the real virtio device
 	  driver to call.
 
+config REMOTEPROC_XLNX
+	bool "Support for AMD-Xilinx platform's remoteproc driver"
+	select REMOTEPROC_VIRTIO
+	depends on DM
+	depends on ZYNQMP_FIRMWARE
+	help
+	  Say 'y' here to add support for remoteproc platform
+	  driver for various AMD-Xilinx platforms
+
 endmenu
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index 61fdb87efb..1d0c48820b 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -17,3 +17,4 @@ obj-$(CONFIG_REMOTEPROC_TI_POWER) += ti_power_proc.o
 obj-$(CONFIG_REMOTEPROC_TI_PRU) += pru_rproc.o
 obj-$(CONFIG_REMOTEPROC_TI_IPU) += ipu_rproc.o
 obj-$(CONFIG_REMOTEPROC_VIRTIO) += rproc_virtio.o
+obj-$(CONFIG_REMOTEPROC_XLNX) += xlnx_rproc.o
diff --git a/drivers/remoteproc/xlnx_rproc.c b/drivers/remoteproc/xlnx_rproc.c
new file mode 100644
index 0000000000..f29b958b7f
--- /dev/null
+++ b/drivers/remoteproc/xlnx_rproc.c
@@ -0,0 +1,411 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD-Xilinx remoteproc driver
+ *
+ * Copyright (C) 2023, Advanced Micro Devices, Inc.
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <errno.h>
+#include <remoteproc.h>
+#include <rpmsg.h>
+#include <zynqmp_firmware.h>
+#include <asm/dma-mapping.h>
+#include <asm/io.h>
+#include <dm/device_compat.h>
+#include <dm/device-internal.h>
+#include <linux/ioport.h>
+
+#define MAX_CORE 2
+#define RSCTBL_PROP	"xlnx,rsc-tbl"
+#define RSC_TBL_SIZE	0x400
+
+enum xlnx_cluster_mode_dt_prop {
+	split_mode = 0,
+	lockstep_mode = 1,
+	single_cpu_mode = 2,
+};
+
+enum xlnx_rpu_state {
+	INVALID_STATE = 0,
+	DRIVER_PROBE = 1,
+	RPU_INIT = 2,
+	RPU_ATTACH = 3,
+	RPU_DETACH = 4
+};
+
+struct rproc xlnx_rproc_cfg_arr[2];
+
+/**
+ * struct xlnx_rproc_core_privdata - contains private data of RPU core driver
+ *
+ * @cluster_dev:     parent device of each core, device mapped to
+ *                   cluster node
+ * @rpu_mode:        one of split, lockstep or single-cpu mode
+ * @tcm_mode:        TCM configuration - split or lockstep
+ * @state:	     current state of RPU
+ * @rproc_cfg:       core's corresponding remoteproc data
+ * @vdev:            core's corresponding remoteproc virtio device
+ * @rsc_pa:          core's physical resource table address
+ * @pd_node:         power-domain id of core
+ * @index:           core's child node index in cluster node in device-tree
+ */
+struct xlnx_rproc_core_privdata {
+	struct udevice *cluster_dev;
+	enum rpu_oper_mode rpu_mode;
+	enum rpu_tcm_comb tcm_mode;
+	enum xlnx_rpu_state state;
+	struct rproc rproc_cfg;
+	struct udevice *vdev;
+	phys_addr_t rsc_pa;
+	int pd_node;
+	int index;
+};
+
+/**
+ * struct xlnx_rproc_cluster_privdata - contains privdate data of cluster driver
+ *
+ * @cores: Array of pointers to R5 cores within the cluster
+ * @rpu_cluster_mode: xlnx,cluster-mode dt prop value to configure RPU
+ * @core_probed: keep count of number of cores probed
+ * @core_count: number of individual cores available based on cluster-mode
+ *              for lockstep this is 1, for split this is 2.
+ */
+struct xlnx_rproc_cluster_privdata {
+	struct xlnx_rproc_core_privdata *cores[MAX_CORE];
+	enum xlnx_cluster_mode_dt_prop rpu_cluster_mode;
+	int core_probed;
+	int core_count;
+};
+
+static int xlnx_rproc_detach(struct udevice *udev)
+{
+	struct xlnx_rproc_core_privdata *core = dev_get_priv(udev);
+
+	if (core->state != RPU_ATTACH) {
+		debug("RPU %s isn't attached yet\n", udev->name);
+		return 0;
+	}
+
+	core->state = RPU_DETACH;
+
+	return 0;
+}
+
+static int xlnx_rproc_attach(struct udevice *udev)
+{
+	struct xlnx_rproc_core_privdata *core = dev_get_priv(udev);
+	struct rproc *rproc = rproc_get_cfg(udev);
+	struct resource_table *rsctbl;
+	u32 rsc_tbl_start;
+	int ret;
+
+	/*
+	 * RPU attach will parse and alloc resources only after INIT state.
+	 * Once the resources are allocated we won't be releasing them during
+	 * detach, but we just change the state of RPU. So, during kick, based
+	 * on state we can decided if RPU should be notified or not
+	 */
+	if (core->state == RPU_DETACH) {
+		core->state = RPU_ATTACH;
+		return 0;
+	}
+
+	if (core->state != RPU_INIT) {
+		debug("RPU isn't initialized, can't attach\n");
+		return 0;
+	}
+
+	/* get rsc tbl carveout info */
+	ret = dev_read_u32_index(udev, RSCTBL_PROP, 1, &rsc_tbl_start);
+	if (ret < 0) {
+		debug("failed to read phandle for prop %s", RSCTBL_PROP);
+		return ret;
+	}
+
+	core->rsc_pa = (phys_addr_t)map_physmem(rsc_tbl_start, RSC_TBL_SIZE,
+						MAP_NOCACHE);
+	rproc->table_ptr = (struct resource_table *)core->rsc_pa;
+
+	if (!core->rsc_pa) {
+		dev_info(udev, "rsc tbl not available\n");
+		return -EINVAL;
+	}
+
+	rsctbl = (struct resource_table *)core->rsc_pa;
+	if (rsctbl->ver != 1) {
+		debug("fw rsc table version %d not compatible\n", rsctbl->ver);
+		return -EINVAL;
+	}
+
+	if (rsctbl->num < 1 || rsctbl->num > 255) {
+		debug("number of resources are invalid %d\n", rsctbl->num);
+		return -EINVAL;
+	}
+
+	if (rproc_attach_resource_table(udev)) {
+		debug("rsc table not found\n");
+		return -EINVAL;
+	}
+
+	core->state = RPU_ATTACH;
+
+	return 0;
+}
+
+static void xlnx_remove_all_res(struct rproc_mem_entry *mapping)
+{
+	struct list_head *tmp, *head;
+
+	head = &mapping->node;
+	if (!head)
+		return;
+
+	/* remove the list */
+	tmp = head;
+	while (tmp) {
+		head = head->next;
+		kfree(tmp);
+		tmp = head;
+	}
+}
+
+static int xlnx_add_res(struct udevice *dev, struct rproc_mem_entry *mapping)
+{
+	struct rproc *rproc = rproc_get_cfg(dev);
+
+	list_add_tail(&mapping->node, &rproc->mappings.node);
+
+	return 0;
+}
+
+static int zynqmp_r5_get_mem_region_node(struct udevice *udev)
+{
+	struct rproc_mem_entry *mapping;
+	int num_mem_region = 4, i, ret;
+	u32 mem_reg_vals[4] = {0};
+	ofnode mem_reg_node;
+	struct rproc *rproc;
+
+	rproc = rproc_get_cfg(udev);
+	if (!rproc)
+		return -EINVAL;
+
+	ret = dev_read_u32_array(udev, "memory-region", mem_reg_vals, 4);
+	if (ret < 0) {
+		debug("Unable to read memory-region property\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num_mem_region; i++) {
+		mem_reg_node = ofnode_get_by_phandle(mem_reg_vals[i]);
+		if (!ofnode_valid(mem_reg_node)) {
+			debug("Could not parse mem region node\n");
+			return -EINVAL;
+		}
+
+		mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
+		if (!mapping)
+			goto remove_mem_region;
+
+		mapping->dma = ofnode_get_addr(mem_reg_node);
+		mapping->len = ofnode_get_size(mem_reg_node);
+		mapping->da = mapping->dma;
+		mapping->va = map_physmem(mapping->da, mapping->len, MAP_NOCACHE);
+
+		strlcpy(mapping->name, ofnode_get_name(mem_reg_node),
+			RPMSG_NAME_SIZE);
+
+		debug("dev %s mapping %s: va=0x%p, da=0x%x, dma=0x%llx, len=0x%x\n",
+		      udev->name, mapping->name, mapping->va, mapping->da,
+		      mapping->dma, mapping->len);
+		xlnx_add_res(udev, mapping);
+	}
+
+	return 0;
+
+remove_mem_region:
+	xlnx_remove_all_res(&rproc->mappings);
+
+	return -EINVAL;
+}
+
+static int xlnx_rproc_init(struct udevice *udev)
+{
+	struct xlnx_rproc_core_privdata *core = dev_get_priv(udev);
+	struct rproc *rproc = rproc_get_cfg(udev);
+	int ret;
+
+	if (core->state != DRIVER_PROBE)
+		return 0;
+
+	/*
+	 * If for some reason, memory-region property fails then don't fail
+	 * the command as memory-region is not required property
+	 */
+	ret = zynqmp_r5_get_mem_region_node(udev);
+	if (ret)
+		debug("adding memory-region failed with ret %d\n", ret);
+
+	rproc->support_rpmsg_virtio = true;
+
+	core->state = RPU_INIT;
+
+	return 0;
+}
+
+static int xlnx_rproc_probe(struct udevice *udev)
+{
+	struct xlnx_rproc_cluster_privdata *cluster = dev_get_priv(udev->parent);
+	struct xlnx_rproc_core_privdata *core = dev_get_priv(udev);
+	struct dm_rproc_uclass_pdata *pdata = dev_get_plat(udev);
+
+	/* Assume primary core gets probed first */
+	if (cluster->core_probed >= cluster->core_count) {
+		debug("core %d isn't used in mode %d\n",
+		      cluster->core_probed, cluster->rpu_cluster_mode);
+		return -EINVAL;
+	}
+
+	core->index = cluster->core_probed;
+	cluster->cores[core->index] = core;
+
+	pdata->rproc = &xlnx_rproc_cfg_arr[core->index];
+	pdata->rproc->rproc_id = core->index;
+
+	INIT_LIST_HEAD(&pdata->rproc->mappings.node);
+
+	if (cluster->rpu_cluster_mode == split_mode) {
+		core->tcm_mode = PM_RPU_TCM_SPLIT;
+		core->rpu_mode = PM_RPU_MODE_SPLIT;
+	} else if (cluster->rpu_cluster_mode == lockstep_mode) {
+		core->tcm_mode = PM_RPU_TCM_COMB;
+		core->rpu_mode = PM_RPU_MODE_LOCKSTEP;
+	} else if (cluster->rpu_cluster_mode == single_cpu_mode) {
+		debug("single cpu cluster mode not supported\n");
+		return -EINVAL;
+	}
+
+	pdata->rproc->support_rpmsg_virtio = true;
+
+	cluster->core_probed++;
+	core->state = DRIVER_PROBE;
+
+	cluster->cores[core->index] = core;
+	INIT_LIST_HEAD(&pdata->rproc->mappings.node);
+
+	return 0;
+}
+
+static struct resource_table *
+xlnx_rpu_rproc_get_loaded_rsc_table(struct udevice *dev, int *table_sz)
+{
+	struct xlnx_rproc_core_privdata *core = dev_get_priv(dev);
+
+	*table_sz = RSC_TBL_SIZE;
+
+	return (struct resource_table *)core->rsc_pa;
+}
+
+static int xlnx_rproc_kick(struct udevice *dev, int notify_id)
+{
+	struct xlnx_rproc_core_privdata *core = dev_get_priv(dev);
+
+	if (core->state != RPU_ATTACH) {
+		debug("error: RPU %s state=%d\n", dev->name, core->state);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int xlnx_rproc_of_to_plat(struct udevice *udev)
+{
+	struct xlnx_rproc_core_privdata *core = dev_get_priv(udev);
+	int ret;
+
+	ret = dev_read_u32_index(udev, "power-domains", 1, &core->pd_node);
+	if (ret) {
+		debug("failed to read power-domains property\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static const struct dm_rproc_ops xlnx_rproc_ops = {
+	.init = xlnx_rproc_init,
+	.attach = xlnx_rproc_attach,
+	.detach = xlnx_rproc_detach,
+	.get_loaded_rsc_table = xlnx_rpu_rproc_get_loaded_rsc_table,
+	.add_res = xlnx_add_res,
+	.kick = xlnx_rproc_kick,
+};
+
+static const struct udevice_id xlnx_rproc_ids[] = {
+	{ .compatible = "xlnx,zynqmp-r5f" }
+};
+
+U_BOOT_DRIVER(xlnx_rproc) = {
+	.name = "xlnx-rproc",
+	.of_match = xlnx_rproc_ids,
+	.id = UCLASS_REMOTEPROC,
+	.ops = &xlnx_rproc_ops,
+	.probe = xlnx_rproc_probe,
+	.of_to_plat = xlnx_rproc_of_to_plat,
+	.priv_auto = sizeof(struct xlnx_rproc_core_privdata),
+	.flags = DM_FLAG_VITAL,
+};
+
+static int xlnx_rproc_cluster_probe(struct udevice *udev)
+{
+	struct xlnx_rproc_cluster_privdata *cluster = dev_get_priv(udev);
+	enum xlnx_cluster_mode_dt_prop cluster_mode;
+	int ret;
+
+	if (device_get_child_count(udev) < MAX_CORE) {
+		dev_err(udev, "Invalid number of R5 cores for cluster %s\n",
+			udev->name);
+		return -EINVAL;
+	}
+
+	/* set mode */
+	ret = dev_read_u32(udev, "xlnx,cluster-mode", &cluster_mode);
+	if (ret < 0)
+		cluster_mode = 1; /* default is lockstep */
+
+	if (cluster->rpu_cluster_mode < split_mode ||
+	    cluster->rpu_cluster_mode > single_cpu_mode) {
+		debug("invalid cluster mode %d\n", cluster->rpu_cluster_mode);
+		return -EINVAL;
+	}
+
+	cluster->rpu_cluster_mode = cluster_mode;
+
+	if (cluster_mode == split_mode) { /* split */
+		cluster->core_count = 2;
+	} else if (cluster_mode == lockstep_mode) { /* lockstep */
+		cluster->core_count = 1;
+	} else if (cluster_mode == single_cpu_mode) { /* single-cpu not supported */
+		debug("single cpu cluster mode not supported\n");
+		return -EINVAL;
+	}
+
+	cluster->core_probed = 0;
+
+	return 0;
+}
+
+static const struct udevice_id xlnx_cluster_ids[] = {
+	{ .compatible = "xlnx,zynqmp-r5fss", },
+	{}
+};
+
+U_BOOT_DRIVER(xlnx_cluster) = {
+	.name = "xlnx_cluster",
+	.of_match = xlnx_cluster_ids,
+	.id = UCLASS_MISC,
+	.probe = xlnx_rproc_cluster_probe,
+	.priv_auto = sizeof(struct xlnx_rproc_cluster_privdata),
+};
diff --git a/include/zynqmp_firmware.h b/include/zynqmp_firmware.h
index ce086f48d4..6419ca0c58 100644
--- a/include/zynqmp_firmware.h
+++ b/include/zynqmp_firmware.h
@@ -450,6 +450,21 @@ enum zynqmp_pm_request_ack {
 	ZYNQMP_PM_REQUEST_ACK_NON_BLOCKING = 3,
 };
 
+enum rpu_boot_mem {
+	PM_RPU_BOOTMEM_LOVEC = 0,
+	PM_RPU_BOOTMEM_HIVEC = 1,
+};
+
+enum rpu_tcm_comb {
+	PM_RPU_TCM_SPLIT = 0,
+	PM_RPU_TCM_COMB = 1,
+};
+
+enum rpu_oper_mode {
+	PM_RPU_MODE_LOCKSTEP = 0,
+	PM_RPU_MODE_SPLIT = 1,
+};
+
 unsigned int zynqmp_firmware_version(void);
 int zynqmp_pmufw_node(u32 id);
 int zynqmp_pmufw_config_close(void);
@@ -461,6 +476,7 @@ int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
 int zynqmp_pm_set_sd_config(u32 node, enum pm_sd_config_type config, u32 value);
 int zynqmp_pm_set_gem_config(u32 node, enum pm_gem_config_type config,
 			     u32 value);
+int zynqmp_pm_set_rpu_mode(u32 node, int rpu_mode);
 int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id);
 int zynqmp_mmio_read(const u32 address, u32 *value);
 int zynqmp_mmio_write(const u32 address, const u32 mask, const u32 value);
-- 
2.25.1



More information about the U-Boot mailing list