[U-Boot] [PATCH] Make the generic unaligned access code safe for unaligned access

Remy Bohmer linux at bohmer.net
Wed Oct 28 22:13:38 CET 2009


The current generic code for handling unaligned access assumes that
the processor can properly handle unaligned accesses itself.
This is at least not the case for ARM, which results in runtime
errors.

Rewrite it such that it works for ARM as well.

Signed-off-by: Remy Bohmer <linux at bohmer.net>
---
 include/linux/unaligned/access_ok.h |   48 ++++++++++++++++++++++++----------
 1 files changed, 34 insertions(+), 14 deletions(-)

diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
index 5f46eee..172124f 100644
--- a/include/linux/unaligned/access_ok.h
+++ b/include/linux/unaligned/access_ok.h
@@ -1,66 +1,86 @@
 #ifndef _LINUX_UNALIGNED_ACCESS_OK_H
 #define _LINUX_UNALIGNED_ACCESS_OK_H
 
-#include <asm/byteorder.h>
-
 static inline u16 get_unaligned_le16(const void *p)
 {
-	return le16_to_cpup((__le16 *)p);
+	const u8 *__p = p;
+	return __p[0] | __p[1] << 8;
 }
 
 static inline u32 get_unaligned_le32(const void *p)
 {
-	return le32_to_cpup((__le32 *)p);
+	const u8 *__p = p;
+	return __p[0] | __p[1] << 8 | __p[2] << 16 | __p[3] << 24;
 }
 
 static inline u64 get_unaligned_le64(const void *p)
 {
-	return le64_to_cpup((__le64 *)p);
+	const u8 *__p = p;
+	return (unsigned long long)
+		get_unaligned_le32((__p + 4)) << 32 |
+		get_unaligned_le32(__p);
 }
 
 static inline u16 get_unaligned_be16(const void *p)
 {
-	return be16_to_cpup((__be16 *)p);
+	const u8 *__p = p;
+	return __p[0] << 8 | __p[1];
 }
 
 static inline u32 get_unaligned_be32(const void *p)
 {
-	return be32_to_cpup((__be32 *)p);
+	const u8 *__p = p;
+	return __p[0] << 24 | __p[1] << 16 | __p[2] << 8 | __p[3];
 }
 
 static inline u64 get_unaligned_be64(const void *p)
 {
-	return be64_to_cpup((__be64 *)p);
+	const u8 *__p = p;
+	return (unsigned long long)
+		get_unaligned_be32(__p) << 32 |
+		get_unaligned_be32((__p + 4));
 }
 
 static inline void put_unaligned_le16(u16 val, void *p)
 {
-	*((__le16 *)p) = cpu_to_le16(val);
+	u8 *__p = p;
+	*__p++ = val;
+	*__p++ = val >> 8;
 }
 
 static inline void put_unaligned_le32(u32 val, void *p)
 {
-	*((__le32 *)p) = cpu_to_le32(val);
+	u8 *__p = p;
+	put_unaligned_le16(val >> 16, __p + 2);
+	put_unaligned_le16(val, __p);
 }
 
 static inline void put_unaligned_le64(u64 val, void *p)
 {
-	*((__le64 *)p) = cpu_to_le64(val);
+	u8 *__p = p;
+	put_unaligned_le32(val >> 32, __p + 4);
+	put_unaligned_le32(val, __p);
 }
 
 static inline void put_unaligned_be16(u16 val, void *p)
 {
-	*((__be16 *)p) = cpu_to_be16(val);
+	u8 *__p = p;
+	*__p++ = val >> 8;
+	*__p++ = val;
 }
 
 static inline void put_unaligned_be32(u32 val, void *p)
 {
-	*((__be32 *)p) = cpu_to_be32(val);
+	u8 *__p = p;
+	put_unaligned_be16(val >> 16, __p);
+	put_unaligned_be16(val, __p + 2);
 }
 
 static inline void put_unaligned_be64(u64 val, void *p)
 {
-	*((__be64 *)p) = cpu_to_be64(val);
+	u8 *__p = p;
+	put_unaligned_be32(val >> 32, __p);
+	put_unaligned_be32(val, __p + 4);
 }
 
 #endif /* _LINUX_UNALIGNED_ACCESS_OK_H */
-- 
1.6.0.4



More information about the U-Boot mailing list