[U-Boot-Users] [PATCH 7/8] [SH] SuperH support

Nobuhiro Iwamatsu iwamatsu at nigauri.org
Sun May 6 22:14:48 CEST 2007


Hi, all.

I wrote the code in which u-boot was supported for SH [0]. 
I am maintaining SH-IPL+g[1] that is the boot loader only for SH now. 
However, I think that I will maintain u-boot in the future.

Now,
0. SDRAM controler
1. Internal serial device
2. Flash
3. Timer
working.

I plan to adopt it as a boot loader of the main in the future though 
only several boards are supported now. 

Please apply this patch .

regards,
 Nobuhiro

[0] : http://america.renesas.com/fmwk.jsp?cnt=superh_family_landing.jsp&fp=/products/mpumcu/superh_family
[1] : http://git.nigauri.org

-- 
Nobuhiro Iwamatsu
	E-Mail : iwamatsu at nigauri.org
	GPG ID : 3170EBE9 

Signed-off-by: Nobuhiro Iwamatsu <iwamatsu at nigauri.org>

diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h
new file mode 100644
index 0000000..b353bc5
--- /dev/null
+++ b/include/asm-sh/system.h
@@ -0,0 +1,275 @@
+#ifndef __ASM_SH_SYSTEM_H
+#define __ASM_SH_SYSTEM_H
+
+/*
+ * Copyright (C) 1999, 2000  Niibe Yutaka  &  Kaz Kojima
+ * Copyright (C) 2002 Paul Mundt
+ *
+ * from linux kernel code.
+ */
+
+#include <linux/irqflags.h>
+#include <asm/types.h>
+
+/*
+ *	switch_to() should switch tasks to task nr n, first
+ */
+
+#define switch_to(prev, next, last) do {				\
+ struct task_struct *__last;						\
+ register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp;	\
+ register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc;	\
+ register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev;	\
+ register unsigned long *__ts5 __asm__ ("r5") = (unsigned long *)next;	\
+ register unsigned long *__ts6 __asm__ ("r6") = &next->thread.sp;	\
+ register unsigned long __ts7 __asm__ ("r7") = next->thread.pc;		\
+ __asm__ __volatile__ (".balign 4\n\t" 					\
+		       "stc.l	gbr, @-r15\n\t" 			\
+		       "sts.l	pr, @-r15\n\t" 				\
+		       "mov.l	r8, @-r15\n\t" 				\
+		       "mov.l	r9, @-r15\n\t" 				\
+		       "mov.l	r10, @-r15\n\t" 			\
+		       "mov.l	r11, @-r15\n\t" 			\
+		       "mov.l	r12, @-r15\n\t" 			\
+		       "mov.l	r13, @-r15\n\t" 			\
+		       "mov.l	r14, @-r15\n\t" 			\
+		       "mov.l	r15, @r1	! save SP\n\t"		\
+		       "mov.l	@r6, r15	! change to new stack\n\t" \
+		       "mova	1f, %0\n\t" 				\
+		       "mov.l	%0, @r2		! save PC\n\t" 		\
+		       "mov.l	2f, %0\n\t" 				\
+		       "jmp	@%0		! call __switch_to\n\t" \
+		       " lds	r7, pr		!  with return to new PC\n\t" \
+		       ".balign	4\n"					\
+		       "2:\n\t"						\
+		       ".long	__switch_to\n"				\
+		       "1:\n\t"						\
+		       "mov.l	@r15+, r14\n\t"				\
+		       "mov.l	@r15+, r13\n\t"				\
+		       "mov.l	@r15+, r12\n\t"				\
+		       "mov.l	@r15+, r11\n\t"				\
+		       "mov.l	@r15+, r10\n\t"				\
+		       "mov.l	@r15+, r9\n\t"				\
+		       "mov.l	@r15+, r8\n\t"				\
+		       "lds.l	@r15+, pr\n\t"				\
+		       "ldc.l	@r15+, gbr\n\t"				\
+		       : "=z" (__last)					\
+		       : "r" (__ts1), "r" (__ts2), "r" (__ts4), 	\
+			 "r" (__ts5), "r" (__ts6), "r" (__ts7) 		\
+		       : "r3", "t");					\
+	last = __last;							\
+} while (0)
+
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
+#ifdef CONFIG_CPU_SH4A
+#define __icbi()			\
+{					\
+	unsigned long __addr;		\
+	__addr = 0xa8000000;		\
+	__asm__ __volatile__(		\
+		"icbi   %0\n\t"		\
+		: /* no output */	\
+		: "m" (__m(__addr)));	\
+}
+#endif
+
+static inline unsigned long tas(volatile int *m)
+{
+	unsigned long retval;
+
+	__asm__ __volatile__ ("tas.b	@%1\n\t"
+			      "movt	%0"
+			      : "=r" (retval): "r" (m): "t", "memory");
+	return retval;
+}
+
+/*
+ * A brief note on ctrl_barrier(), the control register write barrier.
+ *
+ * Legacy SH cores typically require a sequence of 8 nops after
+ * modification of a control register in order for the changes to take
+ * effect. On newer cores (like the sh4a and sh5) this is accomplished
+ * with icbi.
+ *
+ * Also note that on sh4a in the icbi case we can forego a synco for the
+ * write barrier, as it's not necessary for control registers.
+ *
+ * Historically we have only done this type of barrier for the MMUCR, but
+ * it's also necessary for the CCR, so we make it generic here instead.
+ */
+#ifdef CONFIG_CPU_SH4A
+#define mb()		__asm__ __volatile__ ("synco": : :"memory")
+#define rmb()		mb()
+#define wmb()		__asm__ __volatile__ ("synco": : :"memory")
+#define ctrl_barrier()	__icbi()
+#define read_barrier_depends()	do { } while(0)
+#else
+#define mb()		__asm__ __volatile__ ("": : :"memory")
+#define rmb()		mb()
+#define wmb()		__asm__ __volatile__ ("": : :"memory")
+#define ctrl_barrier()	__asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop")
+#define read_barrier_depends()	do { } while(0)
+#endif
+
+#ifdef CONFIG_SMP
+#define smp_mb()	mb()
+#define smp_rmb()	rmb()
+#define smp_wmb()	wmb()
+#define smp_read_barrier_depends()	read_barrier_depends()
+#else
+#define smp_mb()	barrier()
+#define smp_rmb()	barrier()
+#define smp_wmb()	barrier()
+#define smp_read_barrier_depends()	do { } while(0)
+#endif
+
+#define set_mb(var, value) do { xchg(&var, value); } while (0)
+
+/*
+ * Jump to P2 area.
+ * When handling TLB or caches, we need to do it from P2 area.
+ */
+#define jump_to_P2()			\
+do {					\
+	unsigned long __dummy;		\
+	__asm__ __volatile__(		\
+		"mov.l	1f, %0\n\t"	\
+		"or	%1, %0\n\t"	\
+		"jmp	@%0\n\t"	\
+		" nop\n\t" 		\
+		".balign 4\n"		\
+		"1:	.long 2f\n"	\
+		"2:"			\
+		: "=&r" (__dummy)	\
+		: "r" (0x20000000));	\
+} while (0)
+
+/*
+ * Back to P1 area.
+ */
+#define back_to_P1()					\
+do {							\
+	unsigned long __dummy;				\
+	ctrl_barrier();					\
+	__asm__ __volatile__(				\
+		"mov.l	1f, %0\n\t"			\
+		"jmp	@%0\n\t"			\
+		" nop\n\t"				\
+		".balign 4\n"				\
+		"1:	.long 2f\n"			\
+		"2:"					\
+		: "=&r" (__dummy));			\
+} while (0)
+
+static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
+{
+	unsigned long flags, retval;
+
+	local_irq_save(flags);
+	retval = *m;
+	*m = val;
+	local_irq_restore(flags);
+	return retval;
+}
+
+static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
+{
+	unsigned long flags, retval;
+
+	local_irq_save(flags);
+	retval = *m;
+	*m = val & 0xff;
+	local_irq_restore(flags);
+	return retval;
+}
+
+extern void __xchg_called_with_bad_pointer(void);
+
+#define __xchg(ptr, x, size)				\
+({							\
+	unsigned long __xchg__res;			\
+	volatile void *__xchg_ptr = (ptr);		\
+	switch (size) {					\
+	case 4:						\
+		__xchg__res = xchg_u32(__xchg_ptr, x);	\
+		break;					\
+	case 1:						\
+		__xchg__res = xchg_u8(__xchg_ptr, x);	\
+		break;					\
+	default:					\
+		__xchg_called_with_bad_pointer();	\
+		__xchg__res = x;			\
+		break;					\
+	}						\
+							\
+	__xchg__res;					\
+})
+
+#define xchg(ptr,x)	\
+	((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr))))
+
+static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
+	unsigned long new)
+{
+	__u32 retval;
+	unsigned long flags;
+
+	local_irq_save(flags);
+	retval = *m;
+	if (retval == old)
+		*m = new;
+	local_irq_restore(flags);       /* implies memory barrier  */
+	return retval;
+}
+
+/* This function doesn't exist, so you'll get a linker error
+ * if something tries to do an invalid cmpxchg(). */
+extern void __cmpxchg_called_with_bad_pointer(void);
+
+#define __HAVE_ARCH_CMPXCHG 1
+
+static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
+		unsigned long new, int size)
+{
+	switch (size) {
+	case 4:
+		return __cmpxchg_u32(ptr, old, new);
+	}
+	__cmpxchg_called_with_bad_pointer();
+	return old;
+}
+
+#define cmpxchg(ptr,o,n)						 \
+  ({									 \
+     __typeof__(*(ptr)) _o_ = (o);					 \
+     __typeof__(*(ptr)) _n_ = (n);					 \
+     (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,		 \
+				    (unsigned long)_n_, sizeof(*(ptr))); \
+  })
+
+extern void *set_exception_table_vec(unsigned int vec, void *handler);
+
+static inline void *set_exception_table_evt(unsigned int evt, void *handler)
+{
+	return set_exception_table_vec(evt >> 5, handler);
+}
+
+/* XXX
+ * disable hlt during certain critical i/o operations
+ */
+#define HAVE_DISABLE_HLT
+void disable_hlt(void);
+void enable_hlt(void);
+
+#define arch_align_stack(x) (x)
+
+#endif
diff --git a/include/asm-sh/types.h b/include/asm-sh/types.h
new file mode 100644
index 0000000..fd00dbb
--- /dev/null
+++ b/include/asm-sh/types.h
@@ -0,0 +1,59 @@
+#ifndef __ASM_SH_TYPES_H
+#define __ASM_SH_TYPES_H
+
+#ifndef __ASSEMBLY__
+
+typedef unsigned short umode_t;
+
+/*
+ * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
+ * header files exported to user space
+ */
+
+typedef __signed__ char __s8;
+typedef unsigned char __u8;
+
+typedef __signed__ short __s16;
+typedef unsigned short __u16;
+
+typedef __signed__ int __s32;
+typedef unsigned int __u32;
+
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+typedef __signed__ long long __s64;
+typedef unsigned long long __u64;
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * These aren't exported outside the kernel to avoid name space clashes
+ */
+#ifdef __KERNEL__
+
+#define BITS_PER_LONG 32
+
+#ifndef __ASSEMBLY__
+
+
+typedef __signed__ char s8;
+typedef unsigned char u8;
+
+typedef __signed__ short s16;
+typedef unsigned short u16;
+
+typedef __signed__ int s32;
+typedef unsigned int u32;
+
+typedef __signed__ long long s64;
+typedef unsigned long long u64;
+
+/* Dma addresses are 32-bits wide.  */
+
+typedef u32 dma_addr_t;
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_SH_TYPES_H */




More information about the U-Boot mailing list