[PATCH] nvme: Fix cache alignment

Andre Przywara andre.przywara at arm.com
Tue Feb 2 14:04:56 CET 2021


On Tue, 2 Feb 2021 11:55:50 +0800
Bin Meng <bmeng.cn at gmail.com> wrote:

Hi,

> On Sun, Jan 31, 2021 at 1:53 AM Marek Vasut <marek.vasut at gmail.com> wrote:
> >
> > The various structures in the driver are already correcty padded and  
> 
> typo: correctly
> 
> > cache aligned in memory, however the cache operations are called on
> > the structure sizes, which themselves might not be cache aligned. Add
> > the necessary rounding to fix this, which permits the nvme to work on
> > arm64.  
> 
> +ARM guys
> 
> Which ARM64 SoC did you test this with?
> 
> The round down in this patch should be unnecessary. But it's better to
> figure out which call to dcache_xxx() with an unaligned end address.

I agree. There is no requirement for the actual cache maintenance
instruction's address to be aligned, and also we align everything
already in the implementations of flush_dcache_range() and
invalidate_dcache_range().

Actually I think rounding here is *wrong*, as we paper over the
requirement of the *buffer* to be cache line aligned. So this must be
assured at buffer allocation time, and just rounding before calling
cache maintenance merely avoids (read: silences!) the warnings.

I think drivers/net/bcmgenet.c does the right thing.

Happy to provide more detailed rationale and explanations if needed.

Cheers,
Andre
 
> >
> > Signed-off-by: Marek Vasut <marek.vasut+renesas at gmail.com>
> > Cc: Bin Meng <bmeng.cn at gmail.com>
> > ---
> >  drivers/nvme/nvme.c | 50 +++++++++++++++++++++++++++++----------------
> >  1 file changed, 32 insertions(+), 18 deletions(-)
> >
> > diff --git a/drivers/nvme/nvme.c b/drivers/nvme/nvme.c
> > index 5d6331ad34..758415a53b 100644
> > --- a/drivers/nvme/nvme.c
> > +++ b/drivers/nvme/nvme.c
> > @@ -53,6 +53,27 @@ struct nvme_queue {
> >         unsigned long cmdid_data[];
> >  };
> >
> > +static void nvme_align_dcache_range(void *start, unsigned long size,
> > +                                   unsigned long *s, unsigned long *e)
> > +{
> > +       *s = rounddown((uintptr_t)start, ARCH_DMA_MINALIGN);
> > +       *e = roundup((uintptr_t)start + size, ARCH_DMA_MINALIGN);
> > +}
> > +
> > +static void nvme_flush_dcache_range(void *start, unsigned long size)
> > +{
> > +       unsigned long s, e;
> > +       nvme_align_dcache_range(start, size, &s, &e);
> > +       flush_dcache_range(s, e);
> > +}
> > +
> > +static void nvme_invalidate_dcache_range(void *start, unsigned long size)
> > +{
> > +       unsigned long s, e;
> > +       nvme_align_dcache_range(start, size, &s, &e);
> > +       invalidate_dcache_range(s, e);
> > +}
> > +
> >  static int nvme_wait_ready(struct nvme_dev *dev, bool enabled)
> >  {
> >         u32 bit = enabled ? NVME_CSTS_RDY : 0;
> > @@ -129,8 +150,7 @@ static int nvme_setup_prps(struct nvme_dev *dev, u64 *prp2,
> >         }
> >         *prp2 = (ulong)dev->prp_pool;
> >
> > -       flush_dcache_range((ulong)dev->prp_pool, (ulong)dev->prp_pool +
> > -                          dev->prp_entry_num * sizeof(u64));
> > +       nvme_flush_dcache_range(dev->prp_pool, dev->prp_entry_num * sizeof(u64));
> >
> >         return 0;
> >  }
> > @@ -144,10 +164,8 @@ static __le16 nvme_get_cmd_id(void)
> >
> >  static u16 nvme_read_completion_status(struct nvme_queue *nvmeq, u16 index)
> >  {
> > -       u64 start = (ulong)&nvmeq->cqes[index];
> > -       u64 stop = start + sizeof(struct nvme_completion);
> > -
> > -       invalidate_dcache_range(start, stop);
> > +       nvme_invalidate_dcache_range(&nvmeq->cqes[index],
> > +                                    sizeof(struct nvme_completion));
> >
> >         return le16_to_cpu(readw(&(nvmeq->cqes[index].status)));
> >  }
> > @@ -163,8 +181,7 @@ static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
> >         u16 tail = nvmeq->sq_tail;
> >
> >         memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
> > -       flush_dcache_range((ulong)&nvmeq->sq_cmds[tail],
> > -                          (ulong)&nvmeq->sq_cmds[tail] + sizeof(*cmd));
> > +       nvme_flush_dcache_range(&nvmeq->sq_cmds[tail], sizeof(*cmd));
> >
> >         if (++tail == nvmeq->q_depth)
> >                 tail = 0;
> > @@ -338,8 +355,7 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
> >         nvmeq->cq_phase = 1;
> >         nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
> >         memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(nvmeq->q_depth));
> > -       flush_dcache_range((ulong)nvmeq->cqes,
> > -                          (ulong)nvmeq->cqes + NVME_CQ_SIZE(nvmeq->q_depth));
> > +       nvme_flush_dcache_range(nvmeq->cqes, NVME_CQ_SIZE(nvmeq->q_depth));
> >         dev->online_queues++;
> >  }
> >
> > @@ -466,13 +482,13 @@ int nvme_identify(struct nvme_dev *dev, unsigned nsid,
> >
> >         c.identify.cns = cpu_to_le32(cns);
> >
> > -       invalidate_dcache_range(dma_addr,
> > -                               dma_addr + sizeof(struct nvme_id_ctrl));
> > +       nvme_invalidate_dcache_range((void *)dma_addr,
> > +                                    sizeof(struct nvme_id_ctrl));
> >
> >         ret = nvme_submit_admin_cmd(dev, &c, NULL);
> >         if (!ret)
> > -               invalidate_dcache_range(dma_addr,
> > -                                       dma_addr + sizeof(struct nvme_id_ctrl));
> > +               nvme_invalidate_dcache_range((void *)dma_addr,
> > +                                            sizeof(struct nvme_id_ctrl));
> >
> >         return ret;
> >  }
> > @@ -729,8 +745,7 @@ static ulong nvme_blk_rw(struct udevice *udev, lbaint_t blknr,
> >         u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift);
> >         u64 total_lbas = blkcnt;
> >
> > -       flush_dcache_range((unsigned long)buffer,
> > -                          (unsigned long)buffer + total_len);
> > +       nvme_flush_dcache_range(buffer, total_len);
> >
> >         c.rw.opcode = read ? nvme_cmd_read : nvme_cmd_write;
> >         c.rw.flags = 0;
> > @@ -767,8 +782,7 @@ static ulong nvme_blk_rw(struct udevice *udev, lbaint_t blknr,
> >         }
> >
> >         if (read)
> > -               invalidate_dcache_range((unsigned long)buffer,
> > -                                       (unsigned long)buffer + total_len);
> > +               nvme_invalidate_dcache_range(buffer, total_len);
> >
> >         return (total_len - temp_len) >> desc->log2blksz;
> >  }  
> 
> Regards,
> Bin



More information about the U-Boot mailing list