[PATCH 2/3] nvme: Fix missing address translation for PCIe inbound access
Neil Armstrong
neil.armstrong at linaro.org
Mon May 11 16:50:48 CEST 2026
On 5/8/26 17:42, Torsten Duwe wrote:
> From: Torsten Duwe <duwe at suse.de>
>
> U-Boot currently does not account for PCIe bridges with a non-zero
> inbound access offset when talking NVMe, it only works on platforms
> where this offset happens to be zero.
>
> This patch enhances the NVMe driver with the ability to also handle
> these cases.
>
> Signed-off-by: Torsten Duwe <duwe at suse.de>
>
> ---
> drivers/nvme/nvme.c | 34 +++++++++++++++++++++-------------
> 1 file changed, 21 insertions(+), 13 deletions(-)
>
> diff --git a/drivers/nvme/nvme.c b/drivers/nvme/nvme.c
> index 2b14437f69c..99a47bf25f2 100644
> --- a/drivers/nvme/nvme.c
> +++ b/drivers/nvme/nvme.c
> @@ -12,6 +12,7 @@
> #include <log.h>
> #include <malloc.h>
> #include <memalign.h>
> +#include <phys2bus.h>
> #include <time.h>
> #include <dm/device-internal.h>
> #include <linux/compat.h>
> @@ -27,6 +28,13 @@
> #define IO_TIMEOUT 30
> #define MAX_PRP_POOL 512
>
> +/*
> + * Convert a memory address to the value needed by the PCI device to
> + * access the given location, taking into account inbound window
> + * translations of PCI bridges:
> + */
> +#define DEV_ADDR(a) dev_phys_to_bus(dev->udev, (a))
This doesn't look very clean, I would do something closer to linux by precalculating
the DMA addresses after allocation like:
====================><================================
diff --git a/drivers/nvme/nvme.c b/drivers/nvme/nvme.c
index 2b14437f69c..1ce06402a90 100644
--- a/drivers/nvme/nvme.c
+++ b/drivers/nvme/nvme.c
@@ -241,6 +241,7 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev,
nvmeq->sq_cmds = (void *)memalign(4096, NVME_SQ_SIZE(depth));
if (!nvmeq->sq_cmds)
goto free_queue;
+ nvmeq->sq_dma_addr = dev_phys_to_bus(dev->udev, nvmeq->sq_cmds);
memset((void *)nvmeq->sq_cmds, 0, NVME_SQ_SIZE(depth));
nvmeq->dev = dev;
@@ -393,7 +394,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
writel(aqa, &dev->bar->aqa);
- nvme_writeq((ulong)nvmeq->sq_cmds, &dev->bar->asq);
+ nvme_writeq((ulong)nvmeq->sq_dma_addr, &dev->bar->asq);
nvme_writeq((ulong)nvmeq->cqes, &dev->bar->acq);
result = nvme_enable_ctrl(dev);
diff --git a/drivers/nvme/nvme.h b/drivers/nvme/nvme.h
index bc1d612dde4..9c5e57cb586 100644
--- a/drivers/nvme/nvme.h
+++ b/drivers/nvme/nvme.h
@@ -637,6 +637,7 @@ enum nvme_queue_id {
struct nvme_queue {
struct nvme_dev *dev;
struct nvme_command *sq_cmds;
+ dma_addr_t sq_dma_addr;
struct nvme_completion *cqes;
u32 __iomem *q_db;
u16 q_depth;
====================><================================
and same for the orther allocated buffers.
Thanks,
Neil
> +
> static int nvme_wait_csts(struct nvme_dev *dev, u32 mask, u32 val)
> {
> int timeout;
> @@ -91,12 +99,12 @@ static int nvme_setup_prps(struct nvme_dev *dev, u64 *prp2,
> i = 0;
> while (nprps) {
> if ((i == (prps_per_page - 1)) && nprps > 1) {
> - *(prp_pool + i) = cpu_to_le64((ulong)prp_pool +
> - page_size);
> + *(prp_pool + i) = cpu_to_le64(DEV_ADDR((ulong)prp_pool +
> + page_size));
> i = 0;
> prp_pool += page_size;
> }
> - *(prp_pool + i++) = cpu_to_le64(dma_addr);
> + *(prp_pool + i++) = cpu_to_le64(DEV_ADDR(dma_addr));
> dma_addr += page_size;
> nprps--;
> }
> @@ -393,8 +401,8 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
> dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
>
> writel(aqa, &dev->bar->aqa);
> - nvme_writeq((ulong)nvmeq->sq_cmds, &dev->bar->asq);
> - nvme_writeq((ulong)nvmeq->cqes, &dev->bar->acq);
> + nvme_writeq(DEV_ADDR((ulong)nvmeq->sq_cmds), &dev->bar->asq);
> + nvme_writeq(DEV_ADDR((ulong)nvmeq->cqes), &dev->bar->acq);
>
> result = nvme_enable_ctrl(dev);
> if (result)
> @@ -420,7 +428,7 @@ static int nvme_alloc_cq(struct nvme_dev *dev, u16 qid,
>
> memset(&c, 0, sizeof(c));
> c.create_cq.opcode = nvme_admin_create_cq;
> - c.create_cq.prp1 = cpu_to_le64((ulong)nvmeq->cqes);
> + c.create_cq.prp1 = cpu_to_le64(DEV_ADDR((ulong)nvmeq->cqes));
> c.create_cq.cqid = cpu_to_le16(qid);
> c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
> c.create_cq.cq_flags = cpu_to_le16(flags);
> @@ -437,7 +445,7 @@ static int nvme_alloc_sq(struct nvme_dev *dev, u16 qid,
>
> memset(&c, 0, sizeof(c));
> c.create_sq.opcode = nvme_admin_create_sq;
> - c.create_sq.prp1 = cpu_to_le64((ulong)nvmeq->sq_cmds);
> + c.create_sq.prp1 = cpu_to_le64(DEV_ADDR((ulong)nvmeq->sq_cmds));
> c.create_sq.sqid = cpu_to_le16(qid);
> c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
> c.create_sq.sq_flags = cpu_to_le16(flags);
> @@ -458,14 +466,14 @@ int nvme_identify(struct nvme_dev *dev, unsigned nsid,
> memset(&c, 0, sizeof(c));
> c.identify.opcode = nvme_admin_identify;
> c.identify.nsid = cpu_to_le32(nsid);
> - c.identify.prp1 = cpu_to_le64(dma_addr);
> + c.identify.prp1 = cpu_to_le64(DEV_ADDR(dma_addr));
>
> length -= (page_size - offset);
> if (length <= 0) {
> c.identify.prp2 = 0;
> } else {
> dma_addr += (page_size - offset);
> - c.identify.prp2 = cpu_to_le64(dma_addr);
> + c.identify.prp2 = cpu_to_le64(DEV_ADDR(dma_addr));
> }
>
> c.identify.cns = cpu_to_le32(cns);
> @@ -490,7 +498,7 @@ int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
> memset(&c, 0, sizeof(c));
> c.features.opcode = nvme_admin_get_features;
> c.features.nsid = cpu_to_le32(nsid);
> - c.features.prp1 = cpu_to_le64(dma_addr);
> + c.features.prp1 = cpu_to_le64(DEV_ADDR(dma_addr));
> c.features.fid = cpu_to_le32(fid);
>
> ret = nvme_submit_admin_cmd(dev, &c, result);
> @@ -516,7 +524,7 @@ int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
>
> memset(&c, 0, sizeof(c));
> c.features.opcode = nvme_admin_set_features;
> - c.features.prp1 = cpu_to_le64(dma_addr);
> + c.features.prp1 = cpu_to_le64(DEV_ADDR(dma_addr));
> c.features.fid = cpu_to_le32(fid);
> c.features.dword11 = cpu_to_le32(dword11);
>
> @@ -785,8 +793,8 @@ static ulong nvme_blk_rw(struct udevice *udev, lbaint_t blknr,
> c.rw.slba = cpu_to_le64(slba);
> slba += lbas;
> c.rw.length = cpu_to_le16(lbas - 1);
> - c.rw.prp1 = cpu_to_le64(temp_buffer);
> - c.rw.prp2 = cpu_to_le64(prp2);
> + c.rw.prp1 = cpu_to_le64(DEV_ADDR(temp_buffer));
> + c.rw.prp2 = cpu_to_le64(DEV_ADDR(prp2));
> status = nvme_submit_sync_cmd(dev->queues[NVME_IO_Q],
> &c, NULL, IO_TIMEOUT);
> if (status)
More information about the U-Boot
mailing list