[PATCH] arch/riscv/lib: update memmove and memcpy for big-endian

Ben Dooks ben.dooks at codethink.co.uk
Thu Aug 7 18:49:33 CEST 2025


Change the shift patterns for the unaligned memory move and copy code
to deal with big-endian by definign macros to change the shfit left and
right to go the opposite way.

Signed-off-by: Ben Dooks <ben.dooks at codethink.co.uk>
---
 arch/riscv/lib/memcpy.S  | 12 ++++++++++--
 arch/riscv/lib/memmove.S | 12 ++++++++++--
 2 files changed, 20 insertions(+), 4 deletions(-)

diff --git a/arch/riscv/lib/memcpy.S b/arch/riscv/lib/memcpy.S
index 9884077c933..e5479bbe84e 100644
--- a/arch/riscv/lib/memcpy.S
+++ b/arch/riscv/lib/memcpy.S
@@ -125,6 +125,14 @@ WEAK(memcpy)
 .copy_end:
 	ret
 
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define M_SLL sll
+#define M_SRL srl
+#else
+#define M_SLL srl
+#define M_SRL sll
+#endif
+
 .Lmisaligned_word_copy:
 	/*
 	 * Misaligned word-wise copy.
@@ -144,10 +152,10 @@ WEAK(memcpy)
 	addi	t0, t0, -(SZREG-1)
 	/* At least one iteration will be executed here, no check */
 1:
-	srl	a4, a5, t3
+	M_SRL	a4, a5, t3
 	REG_L	a5, SZREG(a1)
 	addi	a1, a1, SZREG
-	sll	a2, a5, t4
+	M_SLL	a2, a5, t4
 	or	a2, a2, a4
 	REG_S	a2, 0(a0)
 	addi	a0, a0, SZREG
diff --git a/arch/riscv/lib/memmove.S b/arch/riscv/lib/memmove.S
index fbe6701dbe4..b2c1c736713 100644
--- a/arch/riscv/lib/memmove.S
+++ b/arch/riscv/lib/memmove.S
@@ -91,6 +91,14 @@ WEAK(memmove)
 	mv	a0, t0
 	ret
 
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define M_SLL sll
+#define M_SRL srl
+#else
+#define M_SLL srl
+#define M_SRL sll
+#endif
+
 .Lmisaligned_word_copy:
 	/*
 	 * Misaligned word-wise copy.
@@ -110,10 +118,10 @@ WEAK(memmove)
 	addi	t0, t0, SZREG-1
 	/* At least one iteration will be executed here, no check */
 1:
-	sll	a4, a5, t4
+	M_SLL	a4, a5, t4
 	addi	a1, a1, -SZREG
 	REG_L	a5, 0(a1)
-	srl	a2, a5, t3
+	M_SRL	a2, a5, t3
 	or	a2, a2, a4
 	addi	a0, a0, -SZREG
 	REG_S	a2, 0(a0)
-- 
2.37.2.352.g3c44437643



More information about the U-Boot mailing list