[PATCH 4/8] nvme: Introduce driver ops
Simon Glass
sjg at chromium.org
Sat Jan 22 18:17:11 CET 2022
Hi Mark,
On Sat, 22 Jan 2022 at 06:33, Mark Kettenis <mark.kettenis at xs4all.nl> wrote:
>
> > From: Simon Glass <sjg at chromium.org>
> > Date: Fri, 21 Jan 2022 18:40:19 -0700
> >
> > Hi Mark,
> >
> > On Fri, 14 Jan 2022 at 04:05, Mark Kettenis <kettenis at openbsd.org> wrote:
> > >
> > > The NVMe storage controller integrated on Apple SoCs deviates
> > > from the NVMe standard in two aspects. It uses a "linear"
> > > submission queue and it integrates an NVMMU that needs to be
> > > programmed for each NVMe command. Introduce driver ops such
> > > that we can set up the linear submission queue and program the
> > > NVMMU in the driver for this strange beast.
> > >
> > > Signed-off-by: Mark Kettenis <kettenis at openbsd.org>
> > > ---
> > > drivers/nvme/nvme.c | 45 ++++++++++++++++++---------------------------
> > > drivers/nvme/nvme.h | 33 +++++++++++++++++++++++++++++++++
> > > 2 files changed, 51 insertions(+), 27 deletions(-)
> >
> > Reviewed-by: Simon Glass <sjg at chromium.org>
> > Tested on: Macbook Air M1
> > Tested-by: Simon Glass <sjg at chromium.org>
>
> Thanks...
>
> > But please see below
> >
> > >
> > > diff --git a/drivers/nvme/nvme.c b/drivers/nvme/nvme.c
> > > index be518ec20b..e2d0f9c668 100644
> > > --- a/drivers/nvme/nvme.c
> > > +++ b/drivers/nvme/nvme.c
> > > @@ -27,33 +27,6 @@
> > > #define IO_TIMEOUT 30
> > > #define MAX_PRP_POOL 512
> > >
> > > -enum nvme_queue_id {
> > > - NVME_ADMIN_Q,
> > > - NVME_IO_Q,
> > > - NVME_Q_NUM,
> > > -};
> > > -
> > > -/*
> > > - * An NVM Express queue. Each device has at least two (one for admin
> > > - * commands and one for I/O commands).
> > > - */
> > > -struct nvme_queue {
> > > - struct nvme_dev *dev;
> > > - struct nvme_command *sq_cmds;
> > > - struct nvme_completion *cqes;
> > > - wait_queue_head_t sq_full;
> > > - u32 __iomem *q_db;
> > > - u16 q_depth;
> > > - s16 cq_vector;
> > > - u16 sq_head;
> > > - u16 sq_tail;
> > > - u16 cq_head;
> > > - u16 qid;
> > > - u8 cq_phase;
> > > - u8 cqe_seen;
> > > - unsigned long cmdid_data[];
> > > -};
> > > -
> > > static int nvme_wait_ready(struct nvme_dev *dev, bool enabled)
> > > {
> > > u32 bit = enabled ? NVME_CSTS_RDY : 0;
> > > @@ -167,12 +140,19 @@ static u16 nvme_read_completion_status(struct nvme_queue *nvmeq, u16 index)
> > > */
> > > static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
> > > {
> > > + struct nvme_ops *ops;
> > > u16 tail = nvmeq->sq_tail;
> > >
> > > memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
> > > flush_dcache_range((ulong)&nvmeq->sq_cmds[tail],
> > > (ulong)&nvmeq->sq_cmds[tail] + sizeof(*cmd));
> > >
> > > + ops = (struct nvme_ops *)nvmeq->dev->udev->driver->ops;
> > > + if (ops && ops->submit_cmd) {
> > > + ops->submit_cmd(nvmeq, cmd);
> > > + return;
> > > + }
> > > +
> > > if (++tail == nvmeq->q_depth)
> > > tail = 0;
> > > writel(tail, nvmeq->q_db);
> > > @@ -183,6 +163,7 @@ static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
> > > struct nvme_command *cmd,
> > > u32 *result, unsigned timeout)
> > > {
> > > + struct nvme_ops *ops;
> > > u16 head = nvmeq->cq_head;
> > > u16 phase = nvmeq->cq_phase;
> > > u16 status;
> > > @@ -203,6 +184,10 @@ static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
> > > return -ETIMEDOUT;
> > > }
> > >
> > > + ops = (struct nvme_ops *)nvmeq->dev->udev->driver->ops;
> > > + if (ops && ops->complete_cmd)
> > > + ops->complete_cmd(nvmeq, cmd);
> > > +
> > > status >>= 1;
> > > if (status) {
> > > printf("ERROR: status = %x, phase = %d, head = %d\n",
> > > @@ -243,6 +228,7 @@ static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
> > > static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev,
> > > int qid, int depth)
> > > {
> > > + struct nvme_ops *ops;
> > > struct nvme_queue *nvmeq = malloc(sizeof(*nvmeq));
> > > if (!nvmeq)
> > > return NULL;
> > > @@ -268,6 +254,10 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev,
> > > dev->queue_count++;
> > > dev->queues[qid] = nvmeq;
> > >
> > > + ops = (struct nvme_ops *)dev->udev->driver->ops;
> > > + if (ops && ops->alloc_queue)
> > > + ops->alloc_queue(nvmeq);
> > > +
> > > return nvmeq;
> > >
> > > free_queue:
> > > @@ -821,6 +811,7 @@ int nvme_init(struct udevice *udev)
> > > struct nvme_id_ns *id;
> > > int ret;
> > >
> > > + ndev->udev = udev;
> > > INIT_LIST_HEAD(&ndev->namespaces);
> > > if (readl(&ndev->bar->csts) == -1) {
> > > ret = -ENODEV;
> > > diff --git a/drivers/nvme/nvme.h b/drivers/nvme/nvme.h
> > > index 8e9ae3c7f6..57803b43fd 100644
> > > --- a/drivers/nvme/nvme.h
> > > +++ b/drivers/nvme/nvme.h
> > > @@ -596,6 +596,7 @@ enum {
> > >
> > > /* Represents an NVM Express device. Each nvme_dev is a PCI function. */
> > > struct nvme_dev {
> > > + struct udevice *udev;
> > > struct list_head node;
> > > struct nvme_queue **queues;
> > > u32 __iomem *dbs;
> > > @@ -622,6 +623,32 @@ struct nvme_dev {
> > > u32 nn;
> > > };
> > >
> > > +enum nvme_queue_id {
> >
> > comment
>
> Hmm, I merely moved this definition. Feels a bit awkward to come up
> with a comment when I didn't write the bit of code in question. I can
> come up with something though if you insist.
Oh. Well, if you can...
>
> >
> > > + NVME_ADMIN_Q,
> > > + NVME_IO_Q,
> > > + NVME_Q_NUM,
> > > +};
> > > +
> > > +/*
> > > + * An NVM Express queue. Each device has at least two (one for admin
> > > + * commands and one for I/O commands).
> > > + */
> > > +struct nvme_queue {
> > > + struct nvme_dev *dev;
> > > + struct nvme_command *sq_cmds;
> > > + struct nvme_completion *cqes;
> > > + u32 __iomem *q_db;
> > > + u16 q_depth;
> > > + s16 cq_vector;
> > > + u16 sq_head;
> > > + u16 sq_tail;
> > > + u16 cq_head;
> > > + u16 qid;
> > > + u8 cq_phase;
> > > + u8 cqe_seen;
> > > + unsigned long cmdid_data[];
> > > +};
> > > +
> > > /*
> > > * An NVM Express namespace is equivalent to a SCSI LUN.
> > > * Each namespace is operated as an independent "device".
> > > @@ -636,6 +663,12 @@ struct nvme_ns {
> > > u8 flbas;
> > > };
> > >
> > > +struct nvme_ops {
> > > + int (*alloc_queue)(struct nvme_queue *);
> > > + void (*submit_cmd)(struct nvme_queue *, struct nvme_command *);
> > > + void (*complete_cmd)(struct nvme_queue *, struct nvme_command *);
> >
> > each of these needs a comment
>
> Indeed. While writing the comment, I realized that setup_queue would
> be a better name than alloc_queue, so I changed that. Is that a
> small-enough change to retain your tags?
Yes
>
> >
> > > +};
> > > +
> > > int nvme_init(struct udevice *udev);
> >
> > (for later, this should be dev, not udev)
>
> As in, this is fine for now but will need to be changed at some point
> in the future?
Yes, it's just a style thing.
>
> >
> > >
> > > #endif /* __DRIVER_NVME_H__ */
> > > --
> > > 2.34.1
> > >
Regards,
Simon
More information about the U-Boot
mailing list