[PATCH v2 5/9] nvme: Introduce driver ops

Mark Kettenis kettenis at openbsd.org
Sat Jan 22 20:38:15 CET 2022


The NVMe storage controller integrated on Apple SoCs deviates
from the NVMe standard in two aspects.  It uses a "linear"
submission queue and it integrates an NVMMU that needs to be
programmed for each NVMe command.  Introduce driver ops such
that we can set up the linear submission queue and program the
NVMMU in the driver for this strange beast.

Signed-off-by: Mark Kettenis <kettenis at openbsd.org>
Reviewed-by: Simon Glass <sjg at chromium.org>
Tested on: Macbook Air M1
Tested-by: Simon Glass <sjg at chromium.org>
---

ChangeLog:

v2: - Change alloc_queue op into setup_queue
    - Add comments


 drivers/nvme/nvme.c | 45 +++++++++++++++----------------------
 drivers/nvme/nvme.h | 55 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 73 insertions(+), 27 deletions(-)

diff --git a/drivers/nvme/nvme.c b/drivers/nvme/nvme.c
index be518ec20b..e7cbf39c96 100644
--- a/drivers/nvme/nvme.c
+++ b/drivers/nvme/nvme.c
@@ -27,33 +27,6 @@
 #define IO_TIMEOUT		30
 #define MAX_PRP_POOL		512
 
-enum nvme_queue_id {
-	NVME_ADMIN_Q,
-	NVME_IO_Q,
-	NVME_Q_NUM,
-};
-
-/*
- * An NVM Express queue. Each device has at least two (one for admin
- * commands and one for I/O commands).
- */
-struct nvme_queue {
-	struct nvme_dev *dev;
-	struct nvme_command *sq_cmds;
-	struct nvme_completion *cqes;
-	wait_queue_head_t sq_full;
-	u32 __iomem *q_db;
-	u16 q_depth;
-	s16 cq_vector;
-	u16 sq_head;
-	u16 sq_tail;
-	u16 cq_head;
-	u16 qid;
-	u8 cq_phase;
-	u8 cqe_seen;
-	unsigned long cmdid_data[];
-};
-
 static int nvme_wait_ready(struct nvme_dev *dev, bool enabled)
 {
 	u32 bit = enabled ? NVME_CSTS_RDY : 0;
@@ -167,12 +140,19 @@ static u16 nvme_read_completion_status(struct nvme_queue *nvmeq, u16 index)
  */
 static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
 {
+	struct nvme_ops *ops;
 	u16 tail = nvmeq->sq_tail;
 
 	memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
 	flush_dcache_range((ulong)&nvmeq->sq_cmds[tail],
 			   (ulong)&nvmeq->sq_cmds[tail] + sizeof(*cmd));
 
+	ops = (struct nvme_ops *)nvmeq->dev->udev->driver->ops;
+	if (ops && ops->submit_cmd) {
+		ops->submit_cmd(nvmeq, cmd);
+		return;
+	}
+
 	if (++tail == nvmeq->q_depth)
 		tail = 0;
 	writel(tail, nvmeq->q_db);
@@ -183,6 +163,7 @@ static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
 				struct nvme_command *cmd,
 				u32 *result, unsigned timeout)
 {
+	struct nvme_ops *ops;
 	u16 head = nvmeq->cq_head;
 	u16 phase = nvmeq->cq_phase;
 	u16 status;
@@ -203,6 +184,10 @@ static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
 			return -ETIMEDOUT;
 	}
 
+	ops = (struct nvme_ops *)nvmeq->dev->udev->driver->ops;
+	if (ops && ops->complete_cmd)
+		ops->complete_cmd(nvmeq, cmd);
+
 	status >>= 1;
 	if (status) {
 		printf("ERROR: status = %x, phase = %d, head = %d\n",
@@ -243,6 +228,7 @@ static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
 static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev,
 					   int qid, int depth)
 {
+	struct nvme_ops *ops;
 	struct nvme_queue *nvmeq = malloc(sizeof(*nvmeq));
 	if (!nvmeq)
 		return NULL;
@@ -268,6 +254,10 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev,
 	dev->queue_count++;
 	dev->queues[qid] = nvmeq;
 
+	ops = (struct nvme_ops *)dev->udev->driver->ops;
+	if (ops && ops->setup_queue)
+		ops->setup_queue(nvmeq);
+
 	return nvmeq;
 
  free_queue:
@@ -821,6 +811,7 @@ int nvme_init(struct udevice *udev)
 	struct nvme_id_ns *id;
 	int ret;
 
+	ndev->udev = udev;
 	INIT_LIST_HEAD(&ndev->namespaces);
 	if (readl(&ndev->bar->csts) == -1) {
 		ret = -ENODEV;
diff --git a/drivers/nvme/nvme.h b/drivers/nvme/nvme.h
index 8e9ae3c7f6..bc6b79f8dd 100644
--- a/drivers/nvme/nvme.h
+++ b/drivers/nvme/nvme.h
@@ -596,6 +596,7 @@ enum {
 
 /* Represents an NVM Express device. Each nvme_dev is a PCI function. */
 struct nvme_dev {
+	struct udevice *udev;
 	struct list_head node;
 	struct nvme_queue **queues;
 	u32 __iomem *dbs;
@@ -622,6 +623,33 @@ struct nvme_dev {
 	u32 nn;
 };
 
+/* Admin queue and a single I/O queue. */
+enum nvme_queue_id {
+	NVME_ADMIN_Q,
+	NVME_IO_Q,
+	NVME_Q_NUM,
+};
+
+/*
+ * An NVM Express queue. Each device has at least two (one for admin
+ * commands and one for I/O commands).
+ */
+struct nvme_queue {
+	struct nvme_dev *dev;
+	struct nvme_command *sq_cmds;
+	struct nvme_completion *cqes;
+	u32 __iomem *q_db;
+	u16 q_depth;
+	s16 cq_vector;
+	u16 sq_head;
+	u16 sq_tail;
+	u16 cq_head;
+	u16 qid;
+	u8 cq_phase;
+	u8 cqe_seen;
+	unsigned long cmdid_data[];
+};
+
 /*
  * An NVM Express namespace is equivalent to a SCSI LUN.
  * Each namespace is operated as an independent "device".
@@ -636,6 +664,33 @@ struct nvme_ns {
 	u8 flbas;
 };
 
+struct nvme_ops {
+	/**
+	 * setup_queue - Controller-specific NVM Express queue setup.
+	 *
+	 * @nvmeq: NVM Express queue
+	 * Return: 0 if OK, -ve on error
+	 */
+	int (*setup_queue)(struct nvme_queue *nvmeq);
+	/**
+	 * submit_cmd - Controller-specific NVM Express command submission.
+	 *
+	 * If this function pointer is set to NULL, normal command
+	 * submission is performed according to the NVM Express spec.
+	 *
+	 * @nvmeq: NVM Express queue
+	 * @cmd:   NVM Express command
+	 */
+	void (*submit_cmd)(struct nvme_queue *nvmeq, struct nvme_command *cmd);
+	/**
+	 * complete_cmd - Controller-specific NVM Express command completion
+	 *
+	 * @nvmeq: NVM Express queue
+	 * @cmd:   NVM Express command
+	 */
+	void (*complete_cmd)(struct nvme_queue *nvmeq, struct nvme_command *cmd);
+};
+
 int nvme_init(struct udevice *udev);
 
 #endif /* __DRIVER_NVME_H__ */
-- 
2.34.1



More information about the U-Boot mailing list