[PATCH v3 24/44] x86: Allow adding non-aligned size for MTRR
Simon Glass
sjg at chromium.org
Tue Feb 25 00:06:13 CET 2025
At present mtrr_add_request() requires that the size is a power of two.
This is too limiting for machines with 4GB (or more) of RAM, since they
often must take account of a memory hole at 3GB.
Update the function to automatically deal with an unaligned size, using
more MTRRs as required.
The algorithm is taken from coreboot commit 60bce10750
Signed-off-by: Simon Glass <sjg at chromium.org>
---
Changes in v3:
- Add new patch to allow adding non-aligned size for MTRR
arch/x86/cpu/mtrr.c | 84 ++++++++++++++++++++++++++++++++++++---------
1 file changed, 68 insertions(+), 16 deletions(-)
diff --git a/arch/x86/cpu/mtrr.c b/arch/x86/cpu/mtrr.c
index fca7b28f815..7a0f00b9b8f 100644
--- a/arch/x86/cpu/mtrr.c
+++ b/arch/x86/cpu/mtrr.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* (C) Copyright 2014 Google, Inc
+ * Portions added from coreboot
*
* Memory Type Range Regsters - these are used to tell the CPU whether
* memory is cacheable and if so the cache write mode to use.
@@ -204,29 +205,80 @@ int mtrr_commit(bool do_caches)
return 0;
}
-int mtrr_add_request(int type, uint64_t start, uint64_t size)
+/* fms: find most significant bit set (from Linux) */
+static inline uint fms(uint val)
+{
+ uint ret;
+
+ __asm__("bsrl %1,%0\n\t"
+ "jnz 1f\n\t"
+ "movl $0,%0\n"
+ "1:" : "=r" (ret) : "mr" (val));
+
+ return ret;
+}
+
+/*
+ * fms64: find most significant bit set in a 64-bit word
+ * As samples, fms64(0x0) = 0; fms64(0x4400) = 14;
+ * fms64(0x40400000000) = 42.
+ */
+static uint fms64(uint64_t val)
+{
+ u32 hi = (u32)(val >> 32);
+
+ if (!hi)
+ return fms((u32)val);
+
+ return fms(hi) + 32;
+}
+
+int mtrr_add_request(int type, u64 base, uint64_t size)
{
struct mtrr_request *req;
- uint64_t mask;
+ u64 mask;
debug("%s: count=%d\n", __func__, gd->arch.mtrr_req_count);
if (!gd->arch.has_mtrr)
return -ENOSYS;
- if (!is_power_of_2(size))
- return -EINVAL;
-
- if (gd->arch.mtrr_req_count == MAX_MTRR_REQUESTS)
- return -ENOSPC;
- req = &gd->arch.mtrr_req[gd->arch.mtrr_req_count++];
- req->type = type;
- req->start = start;
- req->size = size;
- debug("%d: type=%d, %08llx %08llx\n", gd->arch.mtrr_req_count - 1,
- req->type, req->start, req->size);
- mask = mtrr_to_mask(req->size);
- mask |= MTRR_PHYS_MASK_VALID;
- debug(" %016llx %016llx\n", req->start | req->type, mask);
+ while (size) {
+ uint addr_lsb;
+ uint size_msb;
+ u64 mtrr_size;
+
+ addr_lsb = fls64(base);
+ size_msb = fms64(size);
+
+ /*
+ * All MTRR entries need to have their base aligned to the
+ * mask size. The maximum size is calculated by a function of
+ * the min base bit set and maximum size bit set.
+ * Algorithm is from coreboot
+ */
+ if (!addr_lsb || addr_lsb > size_msb)
+ mtrr_size = 1ull << size_msb;
+ else
+ mtrr_size = 1ull << addr_lsb;
+ log_debug("addr_lsb %x size_msb %x mtrr_size %llx\n",
+ addr_lsb, size_msb, mtrr_size);
+
+ if (gd->arch.mtrr_req_count == MAX_MTRR_REQUESTS)
+ return -ENOSPC;
+ req = &gd->arch.mtrr_req[gd->arch.mtrr_req_count++];
+ req->type = type;
+ req->start = base;
+ req->size = mtrr_size;
+ log_debug("%d: type=%d, %08llx %08llx ",
+ gd->arch.mtrr_req_count - 1, req->type, req->start,
+ req->size);
+ mask = mtrr_to_mask(req->size);
+ mask |= MTRR_PHYS_MASK_VALID;
+ log_debug(" %016llx %016llx\n", req->start | req->type, mask);
+
+ size -= mtrr_size;
+ base += mtrr_size;
+ }
return 0;
}
--
2.43.0
More information about the U-Boot
mailing list