[U-Boot] [PATCH] part2 of arm64. This patch provide u-boot with arm64 support. Currently, it works on Foundation Model for armv8 or Fast Model for armv8.

fenghua at phytium.com.cn fenghua at phytium.com.cn
Sun Aug 11 18:05:38 CEST 2013


From: fenghua <fenghua at ligen-virtual-machine.(none)>

This patch provide u-boot with arm64 support. Currently, it works
on Foundation Model for armv8 or Fast Model for armv8.

Signed-off-by: fenghua <fenghua at ligen-virtual-machine.(none)>
---
 arch/arm64/cpu/armv8/Makefile           |   51 +++++++
 arch/arm64/cpu/armv8/cache.S            |  115 +++++++++++++++
 arch/arm64/cpu/armv8/config.mk          |   29 ++++
 arch/arm64/cpu/armv8/cpu.c              |  105 ++++++++++++++
 arch/arm64/cpu/armv8/exceptions.S       |  210 +++++++++++++++++++++++++++
 arch/arm64/cpu/armv8/start.S            |  198 ++++++++++++++++++++++++++
 arch/arm64/cpu/armv8/tlb.S              |   38 +++++
 arch/arm64/cpu/u-boot.lds               |   73 ++++++++++
 arch/arm64/dts/aemv8a.dtsi              |  234 +++++++++++++++++++++++++++++++
 arch/arm64/include/asm/arch-armv8/mmu.h |  117 ++++++++++++++++
 arch/arm64/include/asm/atomic.h         |  115 +++++++++++++++
 arch/arm64/include/asm/bitops.h         |  153 ++++++++++++++++++++
 arch/arm64/include/asm/byteorder.h      |   31 ++++
 arch/arm64/include/asm/cache.h          |   37 +++++
 arch/arm64/include/asm/config.h         |   39 ++++++
 arch/arm64/include/asm/errno.h          |    1 +
 arch/arm64/include/asm/global_data.h    |   38 +++++
 arch/arm64/include/asm/gpio.h           |    1 +
 arch/arm64/include/asm/io.h             |  193 +++++++++++++++++++++++++
 arch/arm64/include/asm/linkage.h        |   49 +++++++
 arch/arm64/include/asm/posix_types.h    |   61 ++++++++
 arch/arm64/include/asm/processor.h      |   59 ++++++++
 arch/arm64/include/asm/ptrace.h         |   58 ++++++++
 arch/arm64/include/asm/sections.h       |   27 ++++
 arch/arm64/include/asm/string.h         |   49 +++++++
 arch/arm64/include/asm/system.h         |  113 +++++++++++++++
 arch/arm64/include/asm/types.h          |   67 +++++++++
 arch/arm64/include/asm/u-boot.h         |   38 +++++
 arch/arm64/include/asm/unaligned.h      |   28 ++++
 arch/arm64/include/asm/utils.h          |   56 ++++++++
 30 files changed, 2383 insertions(+)
 create mode 100644 arch/arm64/cpu/armv8/Makefile
 create mode 100644 arch/arm64/cpu/armv8/cache.S
 create mode 100644 arch/arm64/cpu/armv8/config.mk
 create mode 100644 arch/arm64/cpu/armv8/cpu.c
 create mode 100644 arch/arm64/cpu/armv8/exceptions.S
 create mode 100644 arch/arm64/cpu/armv8/start.S
 create mode 100644 arch/arm64/cpu/armv8/tlb.S
 create mode 100644 arch/arm64/cpu/u-boot.lds
 create mode 100644 arch/arm64/dts/aemv8a.dtsi
 create mode 100644 arch/arm64/include/asm/arch-armv8/mmu.h
 create mode 100644 arch/arm64/include/asm/atomic.h
 create mode 100644 arch/arm64/include/asm/bitops.h
 create mode 100644 arch/arm64/include/asm/byteorder.h
 create mode 100644 arch/arm64/include/asm/cache.h
 create mode 100644 arch/arm64/include/asm/config.h
 create mode 100644 arch/arm64/include/asm/errno.h
 create mode 100644 arch/arm64/include/asm/global_data.h
 create mode 100644 arch/arm64/include/asm/gpio.h
 create mode 100644 arch/arm64/include/asm/io.h
 create mode 100644 arch/arm64/include/asm/linkage.h
 create mode 100644 arch/arm64/include/asm/posix_types.h
 create mode 100644 arch/arm64/include/asm/processor.h
 create mode 100644 arch/arm64/include/asm/ptrace.h
 create mode 100644 arch/arm64/include/asm/sections.h
 create mode 100644 arch/arm64/include/asm/string.h
 create mode 100644 arch/arm64/include/asm/system.h
 create mode 100644 arch/arm64/include/asm/types.h
 create mode 100644 arch/arm64/include/asm/u-boot.h
 create mode 100644 arch/arm64/include/asm/unaligned.h
 create mode 100644 arch/arm64/include/asm/utils.h

diff --git a/arch/arm64/cpu/armv8/Makefile b/arch/arm64/cpu/armv8/Makefile
new file mode 100644
index 0000000..1a8cdd7
--- /dev/null
+++ b/arch/arm64/cpu/armv8/Makefile
@@ -0,0 +1,51 @@
+#
+# Copyright (c) 2013	FengHua <fenghua at phytium.com.cn>
+#
+# See file CREDITS for list of people who contributed to this
+# project.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundatio; either version 2 of
+# the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+# MA 02111-1307 USA
+#
+
+include $(TOPDIR)/config.mk
+
+LIB	= $(obj)lib$(CPU).o
+
+START	:= start.o
+
+COBJS	+= cpu.o
+
+SOBJS	+= exceptions.o
+SOBJS	+= cache.o
+SOBJS	+= tlb.o
+
+SRCS	:= $(START:.o=.S) $(COBJS:.o=.c)
+OBJS	:= $(addprefix $(obj),$(COBJS) $(SOBJS))
+START	:= $(addprefix $(obj),$(START))
+
+all:	$(obj).depend $(START) $(LIB)
+
+$(LIB):	$(OBJS)
+	$(call cmd_link_o_target, $(OBJS))
+
+#########################################################################
+
+# defines $(obj).depend target
+include $(SRCTREE)/rules.mk
+
+sinclude $(obj).depend
+
+#########################################################################
diff --git a/arch/arm64/cpu/armv8/cache.S b/arch/arm64/cpu/armv8/cache.S
new file mode 100644
index 0000000..49eed1a
--- /dev/null
+++ b/arch/arm64/cpu/armv8/cache.S
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2013	David Feng <fenghua at phytium.com.cn>
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <asm-offsets.h>
+#include <config.h>
+#include <version.h>
+#include <linux/linkage.h>
+
+/*
+ * void __flush_dcache_all(void)
+ *
+ * This "function" clean and invalidate all data cache by SET/WAY.
+ */
+ENTRY(__flush_dcache_all)
+	dsb	sy				// ensure ordering with previous memory accesses
+	mrs	x0, clidr_el1			// read clidr
+	and	x3, x0, #0x7000000		// extract loc from clidr
+	lsr	x3, x3, #23			// left align loc bit field
+	cbz	x3, finished			// if loc is 0, then no need to clean
+	mov	x10, #0				// start clean at cache level 0
+loop1:
+	add	x2, x10, x10, lsr #1		// work out 3x current cache level
+	lsr	x1, x0, x2			// extract cache type bits from clidr
+	and	x1, x1, #7			// mask of the bits for current cache only
+	cmp	x1, #2				// see what cache we have at this level
+	b.lt	skip				// skip if no cache, or just i-cache
+	msr	csselr_el1, x10			// select current cache level in csselr
+	isb					// isb to sych the new cssr&csidr
+	mrs	x1, ccsidr_el1			// read the new ccsidr
+	and	x2, x1, #7			// extract the length of the cache lines
+	add	x2, x2, #4			// add 4 (line length offset)
+	mov	x4, #0x3ff
+	and	x4, x4, x1, lsr #3		// find maximum number on the way size
+	clz	w5, w4				// find bit position of way size increment
+	mov	x7, #0x7fff
+	and	x7, x7, x1, lsr #13		// extract max number of the index size
+loop2:
+	mov	x9, x4				// create working copy of max way size
+loop3:
+	lsl	x6, x9, x5
+	orr	x11, x10, x6			// factor way and cache number into x11
+	lsl	x6, x7, x2
+	orr	x11, x11, x6			// factor index number into x11
+	dc	cisw, x11			// clean & invalidate by set/way
+	subs	x9, x9, #1			// decrement the way
+	b.ge	loop3
+	subs	x7, x7, #1			// decrement the index
+	b.ge	loop2
+skip:
+	add	x10, x10, #2			// increment cache number
+	cmp	x3, x10
+	b.gt	loop1
+finished:
+	mov	x10, #0				// swith back to cache level 0
+	msr	csselr_el1, x10			// select current cache level in csselr
+	dsb	sy
+	isb
+	ret
+ENDPROC(__flush_dcache_all)
+
+/*
+ * void __flush_dcache_range(start, end)
+ *
+ * clean & invalidate data cache in the range
+ *
+ * -x0: start address
+ * -x1: end address
+ */
+ENTRY(__flush_dcache_range)
+	mrs	x3, ctr_el0			// read CTR
+	lsr	x3, x3, #16
+	and	x3, x3, #0xf			// cache line size encoding
+	mov	x2, #4				// bytes per word
+	lsl	x2, x2, x3			// actual cache line size
+
+	/* x2 = minimal cache line size in cache system */
+	sub	x3, x2, #1
+	bic	x0, x0, x3
+1:      dc	civac, x0			// clean & invalidate D line / unified line
+	add	x0, x0, x2
+	cmp	x0, x1
+	b.lo	1b
+	dsb	sy
+	ret
+ENDPROC(__flush_dcache_range)
+
+/*
+ * void __invalidate_icache_all(void)
+ *
+ * This "function" invalidate all tlb entries.
+ */
+ENTRY(__invalidate_icache_all)
+	ic	ialluis
+	isb	sy
+	ret
+ENDPROC(__invalidate_icache_all)
diff --git a/arch/arm64/cpu/armv8/config.mk b/arch/arm64/cpu/armv8/config.mk
new file mode 100644
index 0000000..113075c
--- /dev/null
+++ b/arch/arm64/cpu/armv8/config.mk
@@ -0,0 +1,29 @@
+#
+# Copyright (c) 2013	FengHua <fenghua at phytium.com.cn>
+#
+# See file CREDITS for list of people who contributed to this
+# project.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; either version 2 of
+# the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+# MA 02111-1307 USA
+#
+PLATFORM_RELFLAGS += -fno-common -ffixed-x18
+
+PF_CPPFLAGS_ARMV8 := $(call cc-option, -march=armv8-a)
+PLATFORM_CPPFLAGS += $(PF_CPPFLAGS_ARMV8)
+
+# SEE README.arm-unaligned-accesses
+PF_NO_UNALIGNED := $(call cc-option, -mstrict-align)
+PLATFORM_NO_UNALIGNED := $(PF_NO_UNALIGNED)
diff --git a/arch/arm64/cpu/armv8/cpu.c b/arch/arm64/cpu/armv8/cpu.c
new file mode 100644
index 0000000..a6bea74
--- /dev/null
+++ b/arch/arm64/cpu/armv8/cpu.c
@@ -0,0 +1,105 @@
+/*
+ * (C) Copyright 2013  David Feng <fenghua at phytium.com.cn>
+ *
+ * Based on arch/arm/armv7/cpu.c by
+ *       Marius Groeger <mgroeger at sysgo.de> and
+ *       Gary Jennejohn <garyj at denx.de>
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <common.h>
+#include <command.h>
+#include <asm/cache.h>
+#include <linux/compiler.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+/*
+ * Default implementation of arch_cpu_init()
+ * Real implementation should be in platform code
+ */
+int __arch_cpu_init(void)
+{
+	return 0;
+}
+int arch_cpu_init(void)
+	__attribute__((weak, alias("__arch_cpu_init")));
+
+/*
+ * Default implementation of dram_init()
+ * Real implementation should be in platform code
+ */
+int __dram_init(void)
+{
+	int i;
+
+	gd->ram_size = PHYS_SDRAM_1_SIZE;
+
+	gd->bd->bi_dram[0].start = CONFIG_SYS_SDRAM_BASE;
+	gd->bd->bi_dram[0].size =  PHYS_SDRAM_1_SIZE;
+
+	puts("RAM Configuration:\n");
+
+	for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
+		printf("    Bank #%d: %08lx ", i, gd->bd->bi_dram[i].start);
+		print_size(gd->bd->bi_dram[i].size, "\n");
+	}
+
+	return 0;
+}
+int dram_init(void)
+	__attribute__((weak, alias("__dram_init")));
+
+
+int cleanup_before_linux(void)
+{
+	/*
+	 * this function is called just before we call linux
+	 * it prepares the processor for linux
+	 */
+	disable_interrupts();
+
+	/*
+	 * Turn off I-cache and invalidate it
+	 */
+	icache_disable();
+	invalidate_icache_all();
+
+	/*
+	 * turn off D-cache
+	 * dcache_disable() in turn flushes the d-cache and disables MMU
+	 */
+	dcache_disable();
+	outer_cache_disable();
+
+	/*
+	 * After D-cache is flushed and before it is disabled there may
+	 * be some new valid entries brought into the cache. We are sure
+	 * that these lines are not dirty and will not affect our execution.
+	 * (because unwinding the call-stack and setting a bit in CP15 SCTRL
+	 * is all we did during this. We have not pushed anything on to the
+	 * stack. Neither have we affected any static data)
+	 * So just invalidate the entire d-cache again to avoid coherency
+	 * problems for kernel
+	 */
+	invalidate_dcache_all();
+
+	return 0;
+}
diff --git a/arch/arm64/cpu/armv8/exceptions.S b/arch/arm64/cpu/armv8/exceptions.S
new file mode 100644
index 0000000..376df49
--- /dev/null
+++ b/arch/arm64/cpu/armv8/exceptions.S
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2013	FengHua <fenghua at phytium.com.cn>
+ *
+ * Based on entry.S of linux kernel by
+ *     Catalin Marinas <catalin.marinas at arm.com> and
+ *     Will Deacon <will.deacon at arm.com>
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <asm-offsets.h>
+#include <config.h>
+#include <version.h>
+#include <asm/ptrace.h>
+#include <linux/linkage.h>
+
+/*
+ * Bad Abort numbers
+ */
+#define BAD_SYNC	0
+#define BAD_IRQ		1
+#define BAD_FIQ		2
+#define BAD_ERROR	3
+
+.macro	kernel_entry, el, regsize = 64
+	sub	sp, sp, #S_FRAME_SIZE - S_LR	// room for LR, SP, SPSR, ELR
+	.if	\regsize == 32
+	mov	w0, w0				// zero upper 32 bits of x0
+	.endif
+	push	x28, x29
+	push	x26, x27
+	push	x24, x25
+	push	x22, x23
+	push	x20, x21
+	push	x18, x19
+	push	x16, x17
+	push	x14, x15
+	push	x12, x13
+	push	x10, x11
+	push	x8, x9
+	push	x6, x7
+	push	x4, x5
+	push	x2, x3
+	push	x0, x1
+	.if	\el == 1
+	mrs	x21, sp_el1
+	.else
+	add	x21, sp, #S_FRAME_SIZE
+	.endif
+	mrs	x22, elr_el2
+	mrs	x23, spsr_el2
+	stp	lr, x21, [sp, #S_LR]
+	stp	x22, x23, [sp, #S_PC]
+
+	/*
+	 * Set syscallno to -1 by default (overridden later if real syscall).
+	 */
+	.if	\el == 1
+	mvn	x21, xzr
+	str	x21, [sp, #S_SYSCALLNO]
+	.endif
+
+	/*
+	 * Registers that may be useful after this macro is invoked:
+	 *
+	 * x21 - aborted SP
+	 * x22 - aborted PC
+	 * x23 - aborted PSTATE
+	*/
+.endm
+
+.macro	kernel_exit, el, ret = 0
+	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
+	.if	\el == 1
+	ldr	x23, [sp, #S_SP]		// load return stack pointer
+	.endif
+	.if	\ret
+	ldr	x1, [sp, #S_X1]			// preserve x0 (syscall return)
+	add	sp, sp, S_X2
+	.else
+	pop	x0, x1
+	.endif
+	pop	x2, x3				// load the rest of the registers
+	pop	x4, x5
+	pop	x6, x7
+	pop	x8, x9
+	msr	elr_el1, x21			// set up the return data
+	msr	spsr_el1, x22
+	.if	\el == 1
+	msr	sp_el1, x23
+	.endif
+	pop	x10, x11
+	pop	x12, x13
+	pop	x14, x15
+	pop	x16, x17
+	pop	x18, x19
+	pop	x20, x21
+	pop	x22, x23
+	pop	x24, x25
+	pop	x26, x27
+	pop	x28, x29
+	ldr	lr, [sp], #S_FRAME_SIZE - S_LR	// load LR and restore SP
+	eret					// return to kernel
+.endm
+
+/*
+ * Exception vectors.
+ */
+	.align	11
+ENTRY(vectors)
+	ventry	el2_sync_invalid		// Synchronous EL2t
+	ventry	el2_irq_invalid			// IRQ EL2t
+	ventry	el2_fiq_invalid			// FIQ EL2t
+	ventry	el2_error_invalid		// Error EL2t
+
+	ventry	el2_sync			// Synchronous EL2h
+	ventry	el2_irq_invalid			// IRQ EL2h
+	ventry	el2_fiq_invalid			// FIQ EL2h
+	ventry	el2_error			// Error EL2h
+
+	ventry	el1_sync_invalid		// Synchronous 64-bit EL1
+	ventry	el1_irq_invalid			// IRQ 64-bit EL1
+	ventry	el1_fiq_invalid			// FIQ 64-bit EL1
+	ventry	el1_error_invalid		// Error 64-bit EL1
+
+	ventry	el1_sync_invalid		// Synchronous 32-bit EL1
+	ventry	el1_irq_invalid			// IRQ 32-bit EL1
+	ventry	el1_fiq_invalid			// FIQ 32-bit EL1
+	ventry	el1_error_invalid		// Error 32-bit EL1
+END(vectors)
+
+
+/*
+ * Invalid mode handlers
+ */
+.macro	inv_entry, el, reason, regsize = 64
+	kernel_entry el, \regsize
+	mov	x0, sp
+	mov	x1, #\reason
+	mrs	x2, esr_el2
+	b	bad_mode
+.endm
+
+/*
+ * el1 invalid entry
+ */
+el1_sync_invalid:
+	inv_entry 1, BAD_SYNC
+ENDPROC(el1_sync_invalid)
+
+el1_irq_invalid:
+	inv_entry 1, BAD_IRQ
+ENDPROC(el1_irq_invalid)
+
+el1_fiq_invalid:
+	inv_entry 1, BAD_FIQ
+ENDPROC(el1_fiq_invalid)
+
+el1_error_invalid:
+	inv_entry 1, BAD_ERROR
+ENDPROC(el1_error_invalid)
+
+/*
+ * el2 invalid entry
+ */
+el2_sync_invalid:
+	inv_entry 2, BAD_SYNC
+ENDPROC(el2_sync_invalid)
+
+el2_irq_invalid:
+	inv_entry 2, BAD_IRQ
+ENDPROC(el2_irq_invalid)
+
+el2_fiq_invalid:
+	inv_entry 2, BAD_FIQ
+ENDPROC(el2_fiq_invalid)
+
+el2_error_invalid:
+	inv_entry 2, BAD_ERROR
+ENDPROC(el2_error_invalid)
+
+
+/*
+ * EL2 mode handlers.
+ */
+	.align	6
+el2_sync:
+	inv_entry 2, BAD_SYNC
+ENDPROC(el2_sync)
+
+	.align	6
+el2_error:
+	inv_entry 2, BAD_ERROR
+ENDPROC(el2_error)
diff --git a/arch/arm64/cpu/armv8/start.S b/arch/arm64/cpu/armv8/start.S
new file mode 100644
index 0000000..4a08449
--- /dev/null
+++ b/arch/arm64/cpu/armv8/start.S
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2013	David Feng <fenghua at phytium.com.cn>
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <asm-offsets.h>
+#include <config.h>
+#include <version.h>
+#include <linux/linkage.h>
+#include <asm/arch/mmu.h>
+
+/*************************************************************************
+ *
+ * Startup Code (reset vector)
+ *
+ *************************************************************************/
+
+.globl _start
+_start:
+	b	reset
+
+	.align 3
+
+.globl _TEXT_BASE
+_TEXT_BASE:
+	.quad	CONFIG_SYS_TEXT_BASE
+
+.globl	_end_ofs
+_end_ofs:
+	.quad	_end - _start
+
+.globl	_bss_start_ofs
+_bss_start_ofs:
+	.quad	__bss_start - _start
+
+.globl	_bss_end_ofs
+_bss_end_ofs:
+	.quad	__bss_end - _start
+
+reset:
+	/*
+	 * EL3 initialisation
+	 */
+	mrs	x0, CurrentEL
+	cmp	x0, #0xc			// EL3?
+	b.ne	reset_nonsecure			// skip EL3 initialisation
+
+	mov	x0, #0x30			// RES1
+	orr	x0, x0, #(1 << 0)		// Non-secure EL1
+	orr	x0, x0, #(1 << 8)		// HVC enable
+	orr	x0, x0, #(1 << 10)		// 64-bit EL2
+	msr	scr_el3, x0
+
+	msr	cptr_el3, xzr			// Disable copro. traps to EL3
+
+	/* Counter frequency initialisation */
+	ldr	x0, =CONFIG_SYS_CNTFRQ
+	msr	cntfrq_el0, x0
+
+	/* GIC initialisation */
+	mrs	x0, mpidr_el1
+	tst	x0, #15
+	b.ne	1f				// secondary CPU
+
+	ldr	x1, =GIC_DIST_BASE		// GICD_CTLR
+	mov	w0, #3				// EnableGrp0 | EnableGrp1
+	str	w0, [x1]
+
+1:	ldr	x1, =GIC_DIST_BASE + 0x80	// GICD_IGROUPR
+	mov	w0, #~0				// Grp1 interrupts
+	str	w0, [x1], #4
+	b.ne	2f				// Only local interrupts for secondary CPUs
+	str	w0, [x1], #4
+	str	w0, [x1], #4
+
+2:	ldr	x1, =GIC_CPU_BASE		// GICC_CTLR
+	ldr	w0, [x1]
+	mov	w0, #3				// EnableGrp0 | EnableGrp1
+	str	w0, [x1]
+
+	mov	w0, #1 << 7			// allow NS access to GICC_PMR
+	str	w0, [x1, #4]			// GICC_PMR
+
+	/* SCTLR_EL2 initialisation */
+	msr	sctlr_el2, xzr
+
+	/* Return to the EL2_SP1 mode from EL3 */
+	adr	x0, reset_nonsecure
+	mov	x1, #0x3c9			// EL2_SP1 | D | A | I | F
+	msr	elr_el3, x0
+	msr	spsr_el3, x1
+	eret
+
+	/*
+	 * EL2 initialisation:
+	 * MMU Disabled, iCache Disabled, dCache Disabled
+	 */
+reset_nonsecure:
+	/* SCTLR_EL2 initialisation */
+	mov	x0, #0x2			// Alignment check enable
+	msr	sctlr_el2, x0
+
+	/* Initialize vBAR */
+	adr	x0, vectors
+	msr	vbar_el2, x0
+
+	/* Cache/BPB/TLB Invalidate */
+	bl	__flush_dcache_all	// dCache invalidate
+	bl	__invalidate_icache_all	// iCache invalidate
+	bl	__invalidate_tlb_all	// invalidate I + D TLBs
+
+	/* Processor specific initialisation */
+#ifndef CONFIG_SKIP_LOWLEVEL_INIT
+	bl	lowlevel_init
+#endif
+
+	mrs	x0, mpidr_el1
+	tst	x0, #15
+	b.eq	master_cpu
+
+	/*
+	 * Secondary CPUs
+	 */
+slave_cpu:
+
+	wfe
+	ldr	x1, =SECONDARY_CPU_MAILBOX
+	ldr	x0, [x1]
+	cbz	x0, slave_cpu
+	br	x0				// branch to the given address
+
+	/*
+	 * Primary CPU
+	 */
+master_cpu:
+
+	bl	_main
+
+/*------------------------------------------------------------------------------*/
+
+ENTRY(c_runtime_cpu_setup)
+	/* If I-cache is enabled invalidate it */
+#ifndef CONFIG_SYS_ICACHE_OFF
+	ic	iallu			/* I+BTB cache invalidate */
+	isb	sy
+#endif
+
+#ifndef CONFIG_SYS_DCACHE_OFF
+	/*
+	 * Memory region attributes for LPAE:
+	 *
+	 *   n = AttrIndx[2:0]
+	 *                      n       MAIR
+	 *   DEVICE_nGnRnE      000     00000000
+	 *   DEVICE_nGnRE       001     00000100
+	 *   DEVICE_GRE         010     00001100
+	 *   NORMAL_NC          011     01000100
+	 *   NORMAL             100     11111111
+	 */
+	ldr	x0, =MAIR(0x00, MT_DEVICE_nGnRnE) | \
+		     MAIR(0x04, MT_DEVICE_nGnRE) | \
+		     MAIR(0x0c, MT_DEVICE_GRE) | \
+		     MAIR(0x44, MT_NORMAL_NC) | \
+		     MAIR(0xff, MT_NORMAL)
+	msr     mair_el2, x0
+
+	/*
+	 * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
+	 * both user and kernel.
+	 */
+	ldr     x0, =TCR_T0SZ(VA_BITS) | TCR_FLAGS | TCR_IPS_40BIT
+	orr     x0, x0, TCR_TG0_64K
+	msr     tcr_el2, x0
+#endif
+
+	/* Relocate vBAR */
+	adr	x0, vectors
+	msr	vbar_el2, x0
+
+	ret
+ENDPROC(c_runtime_cpu_setup)
diff --git a/arch/arm64/cpu/armv8/tlb.S b/arch/arm64/cpu/armv8/tlb.S
new file mode 100644
index 0000000..e28ee9d
--- /dev/null
+++ b/arch/arm64/cpu/armv8/tlb.S
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2013	David Feng <fenghua at phytium.com.cn>
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <asm-offsets.h>
+#include <config.h>
+#include <version.h>
+#include <linux/linkage.h>
+
+/*
+ * void __invalidate_tlb_all(void)
+ *
+ * This "function" invalidate all tlb entries.
+ */
+ENTRY(__invalidate_tlb_all)
+	tlbi	alle2
+	dsb		sy
+	isb
+	ret
+ENDPROC(__invalidate_tlb_all)
diff --git a/arch/arm64/cpu/u-boot.lds b/arch/arm64/cpu/u-boot.lds
new file mode 100644
index 0000000..98d01d2
--- /dev/null
+++ b/arch/arm64/cpu/u-boot.lds
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2013	FengHua <fenghua at phytium.com.cn>
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+OUTPUT_FORMAT("elf64-littleaarch64", "elf64-littleaarch64", "elf64-littleaarch64")
+OUTPUT_ARCH(aarch64)
+ENTRY(_start)
+SECTIONS
+{
+	. = ALIGN(8);
+	.text :
+	{
+		__image_copy_start = .;
+		CPUDIR/start.o (.text*)
+		*(.text*)
+	}
+
+	. = ALIGN(8);
+	.rodata : { *(SORT_BY_ALIGNMENT(SORT_BY_NAME(.rodata*))) }
+
+	. = ALIGN(8);
+	.data : {
+		*(.data*)
+	}
+
+	. = ALIGN(8);
+	.u_boot_list : {
+		KEEP(*(SORT(.u_boot_list*)));
+	}
+
+	. = ALIGN(8);
+	.reloc : {
+		__rel_got_start = .;
+		*(.got)
+		__rel_got_end = .;
+	}
+
+	__image_copy_end = .;
+	_end = .;
+
+	. = ALIGN(8);
+	.bss : {
+		__bss_start = .;
+		*(.bss*)
+		 . = ALIGN(8);
+		__bss_end = .;
+	}
+
+	/DISCARD/ : { *(.dynsym) }
+	/DISCARD/ : { *(.dynstr*) }
+	/DISCARD/ : { *(.dynamic*) }
+	/DISCARD/ : { *(.plt*) }
+	/DISCARD/ : { *(.interp*) }
+	/DISCARD/ : { *(.gnu*) }
+}
diff --git a/arch/arm64/dts/aemv8a.dtsi b/arch/arm64/dts/aemv8a.dtsi
new file mode 100644
index 0000000..b45e5f3
--- /dev/null
+++ b/arch/arm64/dts/aemv8a.dtsi
@@ -0,0 +1,234 @@
+/*
+ * ARM Ltd. Fast Models
+ *
+ * Versatile Express (VE) system model
+ * Motherboard component
+ *
+ * VEMotherBoard.lisa
+ */
+
+	motherboard {
+		arm,v2m-memory-map = "rs1";
+		compatible = "arm,vexpress,v2m-p1", "simple-bus";
+		#address-cells = <2>; /* SMB chipselect number and offset */
+		#size-cells = <1>;
+		#interrupt-cells = <1>;
+		ranges;
+
+		flash at 0,00000000 {
+			compatible = "arm,vexpress-flash", "cfi-flash";
+			reg = <0 0x00000000 0x04000000>,
+			      <4 0x00000000 0x04000000>;
+			bank-width = <4>;
+		};
+
+		vram at 2,00000000 {
+			compatible = "arm,vexpress-vram";
+			reg = <2 0x00000000 0x00800000>;
+		};
+
+		ethernet at 2,02000000 {
+			compatible = "smsc,lan91c111";
+			reg = <2 0x02000000 0x10000>;
+			interrupts = <15>;
+		};
+
+		v2m_clk24mhz: clk24mhz {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <24000000>;
+			clock-output-names = "v2m:clk24mhz";
+		};
+
+		v2m_refclk1mhz: refclk1mhz {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <1000000>;
+			clock-output-names = "v2m:refclk1mhz";
+		};
+
+		v2m_refclk32khz: refclk32khz {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <32768>;
+			clock-output-names = "v2m:refclk32khz";
+		};
+
+		iofpga at 3,00000000 {
+			compatible = "arm,amba-bus", "simple-bus";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 3 0 0x200000>;
+
+			v2m_sysreg: sysreg at 010000 {
+				compatible = "arm,vexpress-sysreg";
+				reg = <0x010000 0x1000>;
+				gpio-controller;
+				#gpio-cells = <2>;
+			};
+
+			v2m_sysctl: sysctl at 020000 {
+				compatible = "arm,sp810", "arm,primecell";
+				reg = <0x020000 0x1000>;
+				clocks = <&v2m_refclk32khz>, <&v2m_refclk1mhz>, <&v2m_clk24mhz>;
+				clock-names = "refclk", "timclk", "apb_pclk";
+				#clock-cells = <1>;
+				clock-output-names = "timerclken0", "timerclken1", "timerclken2", "timerclken3";
+			};
+
+			aaci at 040000 {
+				compatible = "arm,pl041", "arm,primecell";
+				reg = <0x040000 0x1000>;
+				interrupts = <11>;
+				clocks = <&v2m_clk24mhz>;
+				clock-names = "apb_pclk";
+			};
+
+			mmci at 050000 {
+				compatible = "arm,pl180", "arm,primecell";
+				reg = <0x050000 0x1000>;
+				interrupts = <9 10>;
+				cd-gpios = <&v2m_sysreg 0 0>;
+				wp-gpios = <&v2m_sysreg 1 0>;
+				max-frequency = <12000000>;
+				vmmc-supply = <&v2m_fixed_3v3>;
+				clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
+				clock-names = "mclk", "apb_pclk";
+			};
+
+			kmi at 060000 {
+				compatible = "arm,pl050", "arm,primecell";
+				reg = <0x060000 0x1000>;
+				interrupts = <12>;
+				clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
+				clock-names = "KMIREFCLK", "apb_pclk";
+			};
+
+			kmi at 070000 {
+				compatible = "arm,pl050", "arm,primecell";
+				reg = <0x070000 0x1000>;
+				interrupts = <13>;
+				clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
+				clock-names = "KMIREFCLK", "apb_pclk";
+			};
+
+			v2m_serial0: uart at 090000 {
+				compatible = "arm,pl011", "arm,primecell";
+				reg = <0x090000 0x1000>;
+				interrupts = <5>;
+				clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
+				clock-names = "uartclk", "apb_pclk";
+			};
+
+			v2m_serial1: uart at 0a0000 {
+				compatible = "arm,pl011", "arm,primecell";
+				reg = <0x0a0000 0x1000>;
+				interrupts = <6>;
+				clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
+				clock-names = "uartclk", "apb_pclk";
+			};
+
+			v2m_serial2: uart at 0b0000 {
+				compatible = "arm,pl011", "arm,primecell";
+				reg = <0x0b0000 0x1000>;
+				interrupts = <7>;
+				clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
+				clock-names = "uartclk", "apb_pclk";
+			};
+
+			v2m_serial3: uart at 0c0000 {
+				compatible = "arm,pl011", "arm,primecell";
+				reg = <0x0c0000 0x1000>;
+				interrupts = <8>;
+				clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
+				clock-names = "uartclk", "apb_pclk";
+			};
+
+			wdt at 0f0000 {
+				compatible = "arm,sp805", "arm,primecell";
+				reg = <0x0f0000 0x1000>;
+				interrupts = <0>;
+				clocks = <&v2m_refclk32khz>, <&v2m_clk24mhz>;
+				clock-names = "wdogclk", "apb_pclk";
+			};
+
+			v2m_timer01: timer at 110000 {
+				compatible = "arm,sp804", "arm,primecell";
+				reg = <0x110000 0x1000>;
+				interrupts = <2>;
+				clocks = <&v2m_sysctl 0>, <&v2m_sysctl 1>, <&v2m_clk24mhz>;
+				clock-names = "timclken1", "timclken2", "apb_pclk";
+			};
+
+			v2m_timer23: timer at 120000 {
+				compatible = "arm,sp804", "arm,primecell";
+				reg = <0x120000 0x1000>;
+				interrupts = <3>;
+				clocks = <&v2m_sysctl 2>, <&v2m_sysctl 3>, <&v2m_clk24mhz>;
+				clock-names = "timclken1", "timclken2", "apb_pclk";
+			};
+
+			rtc at 170000 {
+				compatible = "arm,pl031", "arm,primecell";
+				reg = <0x170000 0x1000>;
+				interrupts = <4>;
+				clocks = <&v2m_clk24mhz>;
+				clock-names = "apb_pclk";
+			};
+
+			clcd at 1f0000 {
+				compatible = "arm,pl111", "arm,primecell";
+				reg = <0x1f0000 0x1000>;
+				interrupts = <14>;
+				clocks = <&v2m_oscclk1>, <&v2m_clk24mhz>;
+				clock-names = "clcdclk", "apb_pclk";
+			};
+		};
+
+		v2m_fixed_3v3: fixedregulator at 0 {
+			compatible = "regulator-fixed";
+			regulator-name = "3V3";
+			regulator-min-microvolt = <3300000>;
+			regulator-max-microvolt = <3300000>;
+			regulator-always-on;
+		};
+
+		mcc {
+			compatible = "arm,vexpress,config-bus", "simple-bus";
+			arm,vexpress,config-bridge = <&v2m_sysreg>;
+
+			v2m_oscclk1: osc at 1 {
+				/* CLCD clock */
+				compatible = "arm,vexpress-osc";
+				arm,vexpress-sysreg,func = <1 1>;
+				freq-range = <23750000 63500000>;
+				#clock-cells = <0>;
+				clock-output-names = "v2m:oscclk1";
+			};
+
+			reset at 0 {
+				compatible = "arm,vexpress-reset";
+				arm,vexpress-sysreg,func = <5 0>;
+			};
+
+			muxfpga at 0 {
+				compatible = "arm,vexpress-muxfpga";
+				arm,vexpress-sysreg,func = <7 0>;
+			};
+
+			shutdown at 0 {
+				compatible = "arm,vexpress-shutdown";
+				arm,vexpress-sysreg,func = <8 0>;
+			};
+
+			reboot at 0 {
+				compatible = "arm,vexpress-reboot";
+				arm,vexpress-sysreg,func = <9 0>;
+			};
+
+			dvimode at 0 {
+				compatible = "arm,vexpress-dvimode";
+				arm,vexpress-sysreg,func = <11 0>;
+			};
+		};
+	};
diff --git a/arch/arm64/include/asm/arch-armv8/mmu.h b/arch/arm64/include/asm/arch-armv8/mmu.h
new file mode 100644
index 0000000..2127649
--- /dev/null
+++ b/arch/arm64/include/asm/arch-armv8/mmu.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2013	David Feng <fenghua at phytium.com.cn>
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef _ASM_ARMV8_MMU_H_
+#define _ASM_ARMV8_MMU_H_
+
+#ifdef __ASSEMBLY__
+#define _AC(X,Y)        X
+#define _AT(T,X)        X
+#else
+#define __AC(X,Y)       (X##Y)
+#define _AC(X,Y)        __AC(X,Y)
+#define _AT(T,X)        ((T)(X))
+#endif
+
+#define UL(x) _AC(x, UL)
+
+/***************************************************************/
+/*
+ * The following definitions are related each other, shoud be
+ * calculated specifically.
+ */
+#define VA_BITS                 (39)
+
+/* PAGE_SHIFT determines the page size */
+#undef  PAGE_SIZE
+#define PAGE_SHIFT              16
+#define PAGE_SIZE               (1 << PAGE_SHIFT)
+#define PAGE_MASK               (~(PAGE_SIZE-1))
+
+/*
+ * section address mask and size definitions.
+ */
+#define SECTION_SHIFT           29
+#define SECTION_SIZE            (_AC(1, UL) << SECTION_SHIFT)
+#define SECTION_MASK            (~(SECTION_SIZE-1))
+/***************************************************************/
+
+/*
+ * Memory types available.
+ */
+#define MT_DEVICE_nGnRnE        0
+#define MT_DEVICE_nGnRE         1
+#define MT_DEVICE_GRE           2
+#define MT_NORMAL_NC            3
+#define MT_NORMAL               4
+
+#define MAIR(attr, mt)          ((attr) << ((mt) * 8))
+
+/*
+ * Hardware page table definitions.
+ *
+ * Level 2 descriptor (PMD).
+ */
+#define PMD_TYPE_MASK           (3 << 0)
+#define PMD_TYPE_FAULT          (0 << 0)
+#define PMD_TYPE_TABLE          (3 << 0)
+#define PMD_TYPE_SECT           (1 << 0)
+
+/*
+ * Section
+ */
+#define PMD_SECT_S              (UL(3) << 8)
+#define PMD_SECT_AF             (UL(1) << 10)
+#define PMD_SECT_NG             (UL(1) << 11)
+#define PMD_SECT_PXN            (UL(1) << 53)
+#define PMD_SECT_UXN            (UL(1) << 54)
+
+/*
+ * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
+ */
+#define PMD_ATTRINDX(t)         ((t) << 2)
+#define PMD_ATTRINDX_MASK       (7 << 2)
+
+/*
+ * TCR_EL2 flags.
+ */
+#define TCR_T0SZ(x)             ((64 - (x)) << 0)
+#define TCR_IRGN_NC             (0 << 8)
+#define TCR_IRGN_WBWA           (1 << 8)
+#define TCR_IRGN_WT             (2 << 8)
+#define TCR_IRGN_WBnWA          (3 << 8)
+#define TCR_IRGN_MASK           (3 << 8)
+#define TCR_ORGN_NC             (0 << 10)
+#define TCR_ORGN_WBWA           (1 << 10)
+#define TCR_ORGN_WT             (2 << 10)
+#define TCR_ORGN_WBnWA          (3 << 10)
+#define TCR_ORGN_MASK           (3 << 10)
+#define TCR_SHARED              (3 << 12)
+#define TCR_TG0_4K              (0 << 14)
+#define TCR_TG0_64K             (1 << 14)
+#define TCR_TG0_16K             (2 << 14)
+#define TCR_IPS_40BIT           (2 << 16)
+
+/* PTWs cacheable, inner/outer WBWA not shareable */
+#define TCR_FLAGS               TCR_IRGN_WBWA | TCR_ORGN_WBWA
+
+#endif /* _ASM_ARMV8_MMU_H_ */
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
new file mode 100644
index 0000000..2117ec1
--- /dev/null
+++ b/arch/arm64/include/asm/atomic.h
@@ -0,0 +1,115 @@
+/*
+ *  linux/include/asm-arm/atomic.h
+ *
+ *  Copyright (c) 1996 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Changelog:
+ *   27-06-1996	RMK	Created
+ *   13-04-1997	RMK	Made functions atomic!
+ *   07-12-1997	RMK	Upgraded for v2.1.
+ *   26-08-1998	PJB	Added #ifdef __KERNEL__
+ */
+#ifndef __ASM_ARM64_ATOMIC_H
+#define __ASM_ARM64_ATOMIC_H
+
+#include <linux/config.h>
+
+#ifdef CONFIG_SMP
+#error SMP not supported
+#endif
+
+typedef struct { volatile int counter; } atomic_t;
+
+#define ATOMIC_INIT(i)	{ (i) }
+
+#ifdef __KERNEL__
+
+#include <asm/system.h>
+
+#define atomic_read(v)	((v)->counter)
+#define atomic_set(v,i)	(((v)->counter) = (i))
+
+static inline void atomic_add(int i, volatile atomic_t *v)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	v->counter += i;
+	local_irq_restore(flags);
+}
+
+static inline void atomic_sub(int i, volatile atomic_t *v)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	v->counter -= i;
+	local_irq_restore(flags);
+}
+
+static inline void atomic_inc(volatile atomic_t *v)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	v->counter += 1;
+	local_irq_restore(flags);
+}
+
+static inline void atomic_dec(volatile atomic_t *v)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	v->counter -= 1;
+	local_irq_restore(flags);
+}
+
+static inline int atomic_dec_and_test(volatile atomic_t *v)
+{
+	unsigned long flags;
+	int val;
+
+	local_irq_save(flags);
+	val = v->counter;
+	v->counter = val -= 1;
+	local_irq_restore(flags);
+
+	return val == 0;
+}
+
+static inline int atomic_add_negative(int i, volatile atomic_t *v)
+{
+	unsigned long flags;
+	int val;
+
+	local_irq_save(flags);
+	val = v->counter;
+	v->counter = val += i;
+	local_irq_restore(flags);
+
+	return val < 0;
+}
+
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	*addr &= ~mask;
+	local_irq_restore(flags);
+}
+
+/* Atomic operations are already serializing on ARM */
+#define smp_mb__before_atomic_dec()	barrier()
+#define smp_mb__after_atomic_dec()	barrier()
+#define smp_mb__before_atomic_inc()	barrier()
+#define smp_mb__after_atomic_inc()	barrier()
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_ARM64_ATOMIC_H */
diff --git a/arch/arm64/include/asm/bitops.h b/arch/arm64/include/asm/bitops.h
new file mode 100644
index 0000000..2be9743
--- /dev/null
+++ b/arch/arm64/include/asm/bitops.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2013	David Feng <fenghua at phytium.com.cn>
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __ASM_ARM64_BITOPS_H
+#define __ASM_ARM64_BITOPS_H
+
+#ifdef __KERNEL__
+
+#include <asm/system.h>
+
+/*
+ * Function prototypes to keep gcc -Wall happy.
+ */
+static inline void __change_bit(int nr, volatile void *addr)
+{
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+
+	*p ^= mask;
+}
+
+static inline int __test_and_set_bit(int nr, volatile void *addr)
+{
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+	unsigned long old = *p;
+
+	*p = old | mask;
+	return (old & mask) != 0;
+}
+
+static inline int test_and_set_bit(int nr, volatile void * addr)
+{
+	unsigned long flags;
+	int out;
+
+	local_irq_save(flags);
+	out = __test_and_set_bit(nr, addr);
+	local_irq_restore(flags);
+
+	return out;
+}
+
+static inline int __test_and_clear_bit(int nr, volatile void *addr)
+{
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+	unsigned long old = *p;
+
+	*p = old & ~mask;
+	return (old & mask) != 0;
+}
+
+static inline int test_and_clear_bit(int nr, volatile void * addr)
+{
+	unsigned long flags;
+	int out;
+
+	local_irq_save(flags);
+	out = __test_and_clear_bit(nr, addr);
+	local_irq_restore(flags);
+
+	return out;
+}
+
+static inline int __test_and_change_bit(int nr, volatile void *addr)
+{
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+	unsigned long old = *p;
+
+	*p = old ^ mask;
+	return (old & mask) != 0;
+}
+
+static inline int test_and_change_bit(int nr, volatile void * addr)
+{
+	unsigned long flags;
+	int out;
+
+	local_irq_save(flags);
+	out = __test_and_change_bit(nr, addr);
+	local_irq_restore(flags);
+
+	return out;
+}
+
+/*
+ * This routine doesn't need to be atomic.
+ */
+static inline int test_bit(int nr, const void * addr)
+{
+    return ((unsigned char *) addr)[nr >> 3] & (1U << (nr & 7));
+}
+
+static inline int __ilog2(unsigned int x)
+{
+	return generic_fls(x) - 1;
+}
+
+/*
+ * ffz = Find First Zero in word. Undefined if no zero exists,
+ * so code should check against ~0UL first..
+ */
+static inline unsigned long ffz(unsigned long word)
+{
+	int k;
+
+	word = ~word;
+	k = 31;
+	if (word & 0x0000ffff) { k -= 16; word <<= 16; }
+	if (word & 0x00ff0000) { k -= 8;  word <<= 8;  }
+	if (word & 0x0f000000) { k -= 4;  word <<= 4;  }
+	if (word & 0x30000000) { k -= 2;  word <<= 2;  }
+	if (word & 0x40000000) { k -= 1; }
+	return k;
+}
+
+/*
+ * hweightN: returns the hamming weight (i.e. the number
+ * of bits set) of a N-bit word
+ */
+
+#define hweight32(x) generic_hweight32(x)
+#define hweight16(x) generic_hweight16(x)
+#define hweight8(x) generic_hweight8(x)
+
+#define ext2_set_bit			test_and_set_bit
+#define ext2_clear_bit			test_and_clear_bit
+#define ext2_test_bit			test_bit
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_ARM64_BITOPS_H */
diff --git a/arch/arm64/include/asm/byteorder.h b/arch/arm64/include/asm/byteorder.h
new file mode 100644
index 0000000..d5edfe8
--- /dev/null
+++ b/arch/arm64/include/asm/byteorder.h
@@ -0,0 +1,31 @@
+/*
+ *  linux/include/asm-arm/byteorder.h
+ *
+ * ARM Endian-ness.  In little endian mode, the data bus is connected such
+ * that byte accesses appear as:
+ *  0 = d0...d7, 1 = d8...d15, 2 = d16...d23, 3 = d24...d31
+ * and word accesses (data or instruction) appear as:
+ *  d0...d31
+ *
+ * When in big endian mode, byte accesses appear as:
+ *  0 = d24...d31, 1 = d16...d23, 2 = d8...d15, 3 = d0...d7
+ * and word accesses (data or instruction) appear as:
+ *  d0...d31
+ */
+#ifndef __ASM_ARM64_BYTEORDER_H
+#define __ASM_ARM64_BYTEORDER_H
+
+#include <asm/types.h>
+
+#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
+#  define __BYTEORDER_HAS_U64__
+#  define __SWAB_64_THRU_32__
+#endif
+
+#ifdef __BIG_ENDIAN__
+#include <linux/byteorder/big_endian.h>
+#else
+#include <linux/byteorder/little_endian.h>
+#endif
+
+#endif /* __ASM_ARM64_BYTEORDER_H */
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
new file mode 100644
index 0000000..d409b0e
--- /dev/null
+++ b/arch/arm64/include/asm/cache.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2013	David Feng <fenghua at phytium.com.cn>
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef _ASM_ARM64_CACHE_H
+#define _ASM_ARM64_CACHE_H
+
+/*
+ * The current upper bound for ARM L1 data cache line sizes is 64 bytes.  We
+ * use that value for aligning DMA buffers unless the board config has specified
+ * an alternate cache line size.
+ */
+#ifdef CONFIG_SYS_CACHELINE_SIZE
+#define ARCH_DMA_MINALIGN	CONFIG_SYS_CACHELINE_SIZE
+#else
+#define ARCH_DMA_MINALIGN	64
+#endif
+
+#endif /* _ASM_ARM64_CACHE_H */
diff --git a/arch/arm64/include/asm/config.h b/arch/arm64/include/asm/config.h
new file mode 100644
index 0000000..5e79aab
--- /dev/null
+++ b/arch/arm64/include/asm/config.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2013	David Feng <fenghua at phytium.com.cn>
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef _ASM_ARM64_CONFIG_H_
+#define _ASM_ARM64_CONFIG_H_
+
+#define CONFIG_LMB
+
+#define CONFIG_SYS_BOOT_RAMDISK_HIGH
+
+/*
+ * Currently, gcc-aarch64 produce error when compiling with
+ * -pie and rel_dyn. So, GOT is used to relocate u-boot and
+ * configuration CONFIG_NEEDS_MANUAL_RELOC is needed.
+ */
+#define CONFIG_NEEDS_MANUAL_RELOC
+
+#define CONFIG_SYS_HZ		1000
+
+#endif /* _ASM_ARM64_CONFIG_H_ */
diff --git a/arch/arm64/include/asm/errno.h b/arch/arm64/include/asm/errno.h
new file mode 100644
index 0000000..4c82b50
--- /dev/null
+++ b/arch/arm64/include/asm/errno.h
@@ -0,0 +1 @@
+#include <asm-generic/errno.h>
diff --git a/arch/arm64/include/asm/global_data.h b/arch/arm64/include/asm/global_data.h
new file mode 100644
index 0000000..13b66d4
--- /dev/null
+++ b/arch/arm64/include/asm/global_data.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2013	David Feng <fenghua at phytium.com.cn>
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef	__ASM_ARM64_GBL_DATA_H
+#define __ASM_ARM64_GBL_DATA_H
+
+/* Architecture-specific global data */
+struct arch_global_data {
+#ifndef CONFIG_SYS_DCACHE_OFF
+	unsigned long tlb_addr;
+	unsigned long tlb_size;
+#endif
+};
+
+#include <asm-generic/global_data.h>
+
+#define DECLARE_GLOBAL_DATA_PTR		register volatile gd_t *gd asm ("x18")
+
+#endif /* __ASM_ARM64_GBL_DATA_H */
diff --git a/arch/arm64/include/asm/gpio.h b/arch/arm64/include/asm/gpio.h
new file mode 100644
index 0000000..306ab4c
--- /dev/null
+++ b/arch/arm64/include/asm/gpio.h
@@ -0,0 +1 @@
+#include <asm-generic/gpio.h>
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
new file mode 100644
index 0000000..398ae53
--- /dev/null
+++ b/arch/arm64/include/asm/io.h
@@ -0,0 +1,193 @@
+/*
+ *  linux/include/asm-arm/io.h
+ *
+ *  Copyright (C) 1996-2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Modifications:
+ *  16-Sep-1996	RMK	Inlined the inx/outx functions & optimised for both
+ *			constant addresses and variable addresses.
+ *  04-Dec-1997	RMK	Moved a lot of this stuff to the new architecture
+ *			specific IO header files.
+ *  27-Mar-1999	PJB	Second parameter of memcpy_toio is const..
+ *  04-Apr-1999	PJB	Added check_signature.
+ *  12-Dec-1999	RMK	More cleanups
+ *  18-Jun-2000 RMK	Removed virt_to_* and friends definitions
+ */
+
+#ifndef __ASM_ARM64_IO_H
+#define __ASM_ARM64_IO_H
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+
+/*
+ * Given a physical address and a length, return a virtual address
+ * that can be used to access the memory range with the caching
+ * properties specified by "flags".
+ */
+#define MAP_NOCACHE	(0)
+#define MAP_WRCOMBINE	(0)
+#define MAP_WRBACK	(0)
+#define MAP_WRTHROUGH	(0)
+
+static inline void *
+map_physmem(phys_addr_t paddr, unsigned long len, unsigned long flags)
+{
+	return (void *)paddr;
+}
+
+/*
+ * Take down a mapping set up by map_physmem().
+ */
+static inline void unmap_physmem(void *vaddr, unsigned long flags)
+{
+}
+
+static inline phys_addr_t virt_to_phys(void * vaddr)
+{
+	return (phys_addr_t)(vaddr);
+}
+
+/*
+ * Generic virtual read/write.  Note that we don't support half-word
+ * read/writes.  We define __arch_*[bl] here, and leave __arch_*w
+ * to the architecture specific code.
+ */
+#define __arch_getb(a)			(*(volatile unsigned char *)(a))
+#define __arch_getw(a)			(*(volatile unsigned short *)(a))
+#define __arch_getl(a)			(*(volatile unsigned int *)(a))
+
+#define __arch_putb(v,a)		(*(volatile unsigned char *)(a) = (v))
+#define __arch_putw(v,a)		(*(volatile unsigned short *)(a) = (v))
+#define __arch_putl(v,a)		(*(volatile unsigned int *)(a) = (v))
+
+extern inline void __raw_writesb(unsigned long addr, const void *data, int bytelen)
+{
+	uint8_t *buf = (uint8_t *)data;
+	while(bytelen--)
+		__arch_putb(*buf++, addr);
+}
+
+extern inline void __raw_writesw(unsigned long addr, const void *data, int wordlen)
+{
+	uint16_t *buf = (uint16_t *)data;
+	while(wordlen--)
+		__arch_putw(*buf++, addr);
+}
+
+extern inline void __raw_writesl(unsigned long addr, const void *data, int longlen)
+{
+	uint32_t *buf = (uint32_t *)data;
+	while(longlen--)
+		__arch_putl(*buf++, addr);
+}
+
+extern inline void __raw_readsb(unsigned long addr, void *data, int bytelen)
+{
+	uint8_t *buf = (uint8_t *)data;
+	while(bytelen--)
+		*buf++ = __arch_getb(addr);
+}
+
+extern inline void __raw_readsw(unsigned long addr, void *data, int wordlen)
+{
+	uint16_t *buf = (uint16_t *)data;
+	while(wordlen--)
+		*buf++ = __arch_getw(addr);
+}
+
+extern inline void __raw_readsl(unsigned long addr, void *data, int longlen)
+{
+	uint32_t *buf = (uint32_t *)data;
+	while(longlen--)
+		*buf++ = __arch_getl(addr);
+}
+
+#define __raw_writeb(v,a)	__arch_putb(v,a)
+#define __raw_writew(v,a)	__arch_putw(v,a)
+#define __raw_writel(v,a)	__arch_putl(v,a)
+
+#define __raw_readb(a)		__arch_getb(a)
+#define __raw_readw(a)		__arch_getw(a)
+#define __raw_readl(a)		__arch_getl(a)
+
+/*
+ * TODO: The kernel offers some more advanced versions of barriers, it might
+ * have some advantages to use them instead of the simple one here.
+ */
+#define dmb()		__asm__ __volatile__ ("" : : : "memory")
+#define __iormb()	dmb()
+#define __iowmb()	dmb()
+
+#define writeb(v,c)	({ u8  __v = v; __iowmb(); __arch_putb(__v,c); __v; })
+#define writew(v,c)	({ u16 __v = v; __iowmb(); __arch_putw(__v,c); __v; })
+#define writel(v,c)	({ u32 __v = v; __iowmb(); __arch_putl(__v,c); __v; })
+
+#define readb(c)	({ u8  __v = __arch_getb(c); __iormb(); __v; })
+#define readw(c)	({ u16 __v = __arch_getw(c); __iormb(); __v; })
+#define readl(c)	({ u32 __v = __arch_getl(c); __iormb(); __v; })
+
+/*
+ * Clear and set bits in one shot. These macros can be used to clear and
+ * set multiple bits in a register using a single call. These macros can
+ * also be used to set a multiple-bit bit pattern using a mask, by
+ * specifying the mask in the 'clear' parameter and the new bit pattern
+ * in the 'set' parameter.
+ */
+
+#define out_arch(type,endian,a,v)	__raw_write##type(cpu_to_##endian(v),a)
+#define in_arch(type,endian,a)		endian##_to_cpu(__raw_read##type(a))
+
+#define out_le32(a,v)	out_arch(l,le32,a,v)
+#define out_le16(a,v)	out_arch(w,le16,a,v)
+
+#define in_le32(a)	in_arch(l,le32,a)
+#define in_le16(a)	in_arch(w,le16,a)
+
+#define out_be32(a,v)	out_arch(l,be32,a,v)
+#define out_be16(a,v)	out_arch(w,be16,a,v)
+
+#define in_be32(a)	in_arch(l,be32,a)
+#define in_be16(a)	in_arch(w,be16,a)
+
+#define out_8(a,v)	__raw_writeb(v,a)
+#define in_8(a)		__raw_readb(a)
+
+#define clrbits(type, addr, clear) \
+	out_##type((addr), in_##type(addr) & ~(clear))
+
+#define setbits(type, addr, set) \
+	out_##type((addr), in_##type(addr) | (set))
+
+#define clrsetbits(type, addr, clear, set) \
+	out_##type((addr), (in_##type(addr) & ~(clear)) | (set))
+
+#define clrbits_be32(addr, clear) clrbits(be32, addr, clear)
+#define setbits_be32(addr, set) setbits(be32, addr, set)
+#define clrsetbits_be32(addr, clear, set) clrsetbits(be32, addr, clear, set)
+
+#define clrbits_le32(addr, clear) clrbits(le32, addr, clear)
+#define setbits_le32(addr, set) setbits(le32, addr, set)
+#define clrsetbits_le32(addr, clear, set) clrsetbits(le32, addr, clear, set)
+
+#define clrbits_be16(addr, clear) clrbits(be16, addr, clear)
+#define setbits_be16(addr, set) setbits(be16, addr, set)
+#define clrsetbits_be16(addr, clear, set) clrsetbits(be16, addr, clear, set)
+
+#define clrbits_le16(addr, clear) clrbits(le16, addr, clear)
+#define setbits_le16(addr, set) setbits(le16, addr, set)
+#define clrsetbits_le16(addr, clear, set) clrsetbits(le16, addr, clear, set)
+
+#define clrbits_8(addr, clear) clrbits(8, addr, clear)
+#define setbits_8(addr, set) setbits(8, addr, set)
+#define clrsetbits_8(addr, clear, set) clrsetbits(8, addr, clear, set)
+
+#endif	/* __KERNEL__ */
+
+#endif	/* __ASM_ARM64_IO_H */
diff --git a/arch/arm64/include/asm/linkage.h b/arch/arm64/include/asm/linkage.h
new file mode 100644
index 0000000..05e909e
--- /dev/null
+++ b/arch/arm64/include/asm/linkage.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2013	David Feng <fenghua at phytium.com.cn>
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __ASM_ARM64_LINKAGE_H
+#define __ASM_ARM64_LINKAGE_H
+
+/*
+ * Register aliases.
+ */
+lr	.req	x30		// link register
+
+/*
+ * Stack pushing/popping (register pairs only).
+ * Equivalent to store decrement before,
+ * load increment after.
+ */
+.macro	push, xreg1, xreg2
+	stp	\xreg1, \xreg2, [sp, #-16]!
+.endm
+
+.macro	pop, xreg1, xreg2
+	ldp	\xreg1, \xreg2, [sp], #16
+.endm
+
+.macro	ventry	label
+	.align	7
+	b	\label
+.endm
+
+#endif /* __ASM_ARM64_LINKAGE_H */
diff --git a/arch/arm64/include/asm/posix_types.h b/arch/arm64/include/asm/posix_types.h
new file mode 100644
index 0000000..d272a35
--- /dev/null
+++ b/arch/arm64/include/asm/posix_types.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2013	David Feng <fenghua at phytium.com.cn>
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __ARCH_ARM64_POSIX_TYPES_H
+#define __ARCH_ARM64_POSIX_TYPES_H
+
+/*
+ * This file is generally used by user-level software, so you need to
+ * be a little careful about namespace pollution etc.  Also, we cannot
+ * assume GCC is being used.
+ */
+
+typedef unsigned short		__kernel_dev_t;
+typedef unsigned long		__kernel_ino_t;
+typedef unsigned short		__kernel_mode_t;
+typedef unsigned short		__kernel_nlink_t;
+typedef long			__kernel_off_t;
+typedef int			__kernel_pid_t;
+typedef unsigned short		__kernel_ipc_pid_t;
+typedef unsigned short		__kernel_uid_t;
+typedef unsigned short		__kernel_gid_t;
+typedef unsigned long		__kernel_size_t;
+typedef long			__kernel_ssize_t;
+typedef long			__kernel_ptrdiff_t;
+typedef long			__kernel_time_t;
+typedef long			__kernel_suseconds_t;
+typedef long			__kernel_clock_t;
+typedef long			__kernel_daddr_t;
+typedef char *			__kernel_caddr_t;
+typedef unsigned short		__kernel_uid16_t;
+typedef unsigned short		__kernel_gid16_t;
+typedef unsigned int		__kernel_uid32_t;
+typedef unsigned int		__kernel_gid32_t;
+
+typedef __kernel_uid_t		__kernel_old_uid_t;
+typedef __kernel_gid_t		__kernel_old_gid_t;
+
+#ifdef __GNUC__
+typedef long long		__kernel_loff_t;
+#endif
+
+#endif /* __ARCH_ARM64_POSIX_TYPES_H */
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
new file mode 100644
index 0000000..67c0233
--- /dev/null
+++ b/arch/arm64/include/asm/processor.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2013	David Feng <fenghua at phytium.com.cn>
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __ASM_ARM64_PROCESSOR_H
+#define __ASM_ARM64_PROCESSOR_H
+
+/* CCSIDR */
+#define CCSIDR_LINE_SIZE_OFFSET		0
+#define CCSIDR_LINE_SIZE_MASK		0x7
+#define CCSIDR_ASSOCIATIVITY_OFFSET	3
+#define CCSIDR_ASSOCIATIVITY_MASK	(0x3FF << 3)
+#define CCSIDR_NUM_SETS_OFFSET		13
+#define CCSIDR_NUM_SETS_MASK		(0x7FFF << 13)
+
+/*
+ * Values for InD field in CSSELR
+ * Selects the type of cache
+ */
+#define CSSELR_IND_DATA_UNIFIED		0
+#define CSSELR_IND_INSTRUCTION		1
+
+/* Values for Ctype fields in CLIDR */
+#define CLIDR_CTYPE_NO_CACHE		0
+#define CLIDR_CTYPE_INSTRUCTION_ONLY	1
+#define CLIDR_CTYPE_DATA_ONLY		2
+#define CLIDR_CTYPE_INSTRUCTION_DATA	3
+#define CLIDR_CTYPE_UNIFIED		4
+
+/*
+ * SCTLR_EL2 bits definitions
+ */
+#define CR_M		(1 << 0)	/* MMU enable				*/
+#define CR_A		(1 << 1)	/* Alignment abort enable		*/
+#define CR_C		(1 << 2)	/* Dcache enable			*/
+#define CR_SA		(1 << 3)	/* Stack Alignment Check Enable		*/
+#define CR_I		(1 << 12)	/* Icache enable			*/
+#define CR_WXN		(1 << 19)	/* Write Permision Imply XN		*/
+#define CR_EE		(1 << 25)	/* Exception (Big) Endian		*/
+
+#endif /* __ASM_ARM64_PROCESSOR_H */
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
new file mode 100644
index 0000000..8664d3e
--- /dev/null
+++ b/arch/arm64/include/asm/ptrace.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2013	David Feng <fenghua at phytium.com.cn>
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __ASM_ARM64_PTRACE_H
+#define __ASM_ARM64_PTRACE_H
+
+#define S_X0		(0)	/* offsetof(struct pt_regs, regs[0]) */
+#define S_X1		(8)	/* offsetof(struct pt_regs, regs[1]) */
+#define S_X2		(16)	/* offsetof(struct pt_regs, regs[2]) */
+#define S_X3		(24	/* offsetof(struct pt_regs, regs[3]) */
+#define S_X4		(32)	/* offsetof(struct pt_regs, regs[4]) */
+#define S_X5		(40)	/* offsetof(struct pt_regs, regs[5]) */
+#define S_X6		(48)	/* offsetof(struct pt_regs, regs[6]) */
+#define S_X7		(56)	/* offsetof(struct pt_regs, regs[7]) */
+#define S_LR		(240)	/* offsetof(struct pt_regs, regs[30]) */
+#define S_SP		(248)	/* offsetof(struct pt_regs, sp) */
+#define S_PSTATE	(264)	/* offsetof(struct pt_regs, pstate) */
+#define S_PC		(256)	/* offsetof(struct pt_regs, pc) */
+#define S_SYSCALLNO	(272)	/* offsetof(struct pt_regs, syscallno) */
+#define S_FRAME_SIZE	(280)	/* sizeof(struct pt_regs) */
+
+#ifndef __ASSEMBLY__
+
+/*
+ * This struct defines the way the registers are stored on the stack during an
+ * exception. Note that sizeof(struct pt_regs) has to be a multiple of 16 (for
+ * stack alignment). struct user_pt_regs must form a prefix of struct pt_regs.
+ */
+struct pt_regs {
+	u64 regs[31];
+	u64 sp;
+	u64 pc;
+	u64 pstate;
+	u64 syscallno;
+};
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_ARM64_PTRACE_H */
diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h
new file mode 100644
index 0000000..c042cb6
--- /dev/null
+++ b/arch/arm64/include/asm/sections.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2012 The Chromium OS Authors.
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __ASM_ARM_SECTIONS_H
+#define __ASM_ARM_SECTIONS_H
+
+#include <asm-generic/sections.h>
+
+#endif
diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h
new file mode 100644
index 0000000..550201f
--- /dev/null
+++ b/arch/arm64/include/asm/string.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2013	David Feng <fenghua at phytium.com.cn>
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __ASM_ARM64_STRING_H
+#define __ASM_ARM64_STRING_H
+
+#include <config.h>
+
+#undef __HAVE_ARCH_STRRCHR
+extern char * strrchr(const char * s, int c);
+
+#undef __HAVE_ARCH_STRCHR
+extern char * strchr(const char * s, int c);
+
+#undef __HAVE_ARCH_MEMCPY
+extern void * memcpy(void *, const void *, __kernel_size_t);
+
+#undef __HAVE_ARCH_MEMMOVE
+extern void * memmove(void *, const void *, __kernel_size_t);
+
+#undef __HAVE_ARCH_MEMCHR
+extern void * memchr(const void *, int, __kernel_size_t);
+
+#undef __HAVE_ARCH_MEMZERO
+extern void memzero(void *ptr, __kernel_size_t n);
+
+#undef __HAVE_ARCH_MEMSET
+extern void * memset(void *, int, __kernel_size_t);
+
+#endif /* __ASM_ARM64_STRING_H */
diff --git a/arch/arm64/include/asm/system.h b/arch/arm64/include/asm/system.h
new file mode 100644
index 0000000..8f57335
--- /dev/null
+++ b/arch/arm64/include/asm/system.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2013	David Feng <fenghua at phytium.com.cn>
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __ASM_ARM64_ARCH_SYSTEM_H
+#define __ASM_ARM64_ARCH_SYSTEM_H
+
+#include <linux/types.h>
+
+#ifndef __ASSEMBLY__
+
+void outer_cache_enable(void);
+void outer_cache_disable(void);
+void outer_cache_flush_all(void);
+void outer_cache_inval_all(void);
+void outer_cache_flush_range(unsigned long start, unsigned long end);
+void outer_cache_inval_range(unsigned long start, unsigned long end);
+
+/*
+ * Save the current interrupt enable state
+ * and disable IRQs/FIQs
+ */
+#define local_irq_save(flags)			\
+	{					\
+		asm volatile(			\
+			"mrs	%0, daif"	\
+			"msr	daifset, #3"	\
+			: "=r" (flags)		\
+			:			\
+			: "memory");		\
+	}
+
+/*
+ * restore saved IRQ & FIQ state
+ */
+#define local_irq_restore(flags)		\
+	{					\
+		asm volatile(			\
+			"msr	daif, %0"	\
+			:			\
+			: "r" (flags)		\
+			: "memory");		\
+	}
+
+/*
+ * Enable IRQs/FIQs
+ */
+#define local_irq_enable()			\
+	{					\
+		asm volatile(			\
+			"msr	daifclr, #3"	\
+			:			\
+			:			\
+			: "memory");		\
+	}
+
+/*
+ * Disable IRQs/FIQs
+ */
+#define local_irq_disable()			\
+	{					\
+		asm volatile(			\
+			"msr	daifset, #3"	\
+			:			\
+			:			\
+			: "memory");		\
+	}
+
+#define sev()		asm volatile("sev" : : : "memory")
+#define wfe()		asm volatile("wfe" : : : "memory")
+#define wfi()		asm volatile("wfi" : : : "memory")
+
+#define isb()		asm volatile("isb" : : : "memory")
+#define dsb()		asm volatile("dsb sy" : : : "memory")
+
+#define mb()		dsb()
+#define rmb()		asm volatile("dsb ld" : : : "memory")
+#define wmb()		asm volatile("dsb st" : : : "memory")
+
+static inline unsigned int get_sctlr(void)
+{
+	unsigned int val;
+	asm volatile("mrs %0, sctlr_el2" : "=r" (val) : : "cc");
+	return val;
+}
+
+static inline void set_sctlr(unsigned int val)
+{
+	asm volatile("msr sctlr_el2, %0" : : "r" (val) : "cc");
+	asm volatile("isb");
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_ARM64_ARCH_SYSTEM_H */
diff --git a/arch/arm64/include/asm/types.h b/arch/arm64/include/asm/types.h
new file mode 100644
index 0000000..7aa08cb
--- /dev/null
+++ b/arch/arm64/include/asm/types.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2013	David Feng <fenghua at phytium.com.cn>
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __ASM_ARM64_TYPES_H
+#define __ASM_ARM64_TYPES_H
+
+#define BITS_PER_LONG		64
+
+typedef unsigned short		umode_t;
+
+typedef __signed__ char		__s8;
+typedef unsigned char		__u8;
+
+typedef __signed__ short	__s16;
+typedef unsigned short		__u16;
+
+typedef __signed__ int		__s32;
+typedef unsigned int		__u32;
+
+typedef __signed__ long		__s64;
+typedef unsigned long		__u64;
+
+/*
+ * These aren't exported outside the kernel to avoid name space clashes
+ */
+#ifdef __KERNEL__
+
+typedef signed char		s8;
+typedef unsigned char		u8;
+
+typedef signed short		s16;
+typedef unsigned short		u16;
+
+typedef signed int		s32;
+typedef unsigned int		u32;
+
+typedef signed long		s64;
+typedef unsigned long		u64;
+
+/* Dma addresses are 32-bits wide.  */
+typedef u32			dma_addr_t;
+
+typedef unsigned long		phys_addr_t;
+typedef unsigned long		phys_size_t;
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_ARM64_TYPES_H */
diff --git a/arch/arm64/include/asm/u-boot.h b/arch/arm64/include/asm/u-boot.h
new file mode 100644
index 0000000..c0cf855
--- /dev/null
+++ b/arch/arm64/include/asm/u-boot.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2013	David Feng <fenghua at phytium.com.cn>
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef _U_BOOT_ARM64_H_
+#define _U_BOOT_ARM64_H_
+
+typedef struct bd_info {
+	unsigned long bi_baudrate;	/* serial console baudrate */
+	struct				/* RAM configuration */
+	{
+		unsigned long start;
+		unsigned long size;
+	} bi_dram[CONFIG_NR_DRAM_BANKS];
+} bd_t;
+
+/* For image.h:image_check_target_arch() */
+#define IH_ARCH_DEFAULT	IH_ARCH_ARM64
+
+#endif	/* _U_BOOT_ARM64_H_ */
diff --git a/arch/arm64/include/asm/unaligned.h b/arch/arm64/include/asm/unaligned.h
new file mode 100644
index 0000000..39b509a
--- /dev/null
+++ b/arch/arm64/include/asm/unaligned.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2013	David Feng <fenghua at phytium.com.cn>
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef _ASM_ARM64_UNALIGNED_H
+#define _ASM_ARM64_UNALIGNED_H
+
+#include <asm-generic/unaligned.h>
+
+#endif /* _ASM_ARM64_UNALIGNED_H */
diff --git a/arch/arm64/include/asm/utils.h b/arch/arm64/include/asm/utils.h
new file mode 100644
index 0000000..828b86c
--- /dev/null
+++ b/arch/arm64/include/asm/utils.h
@@ -0,0 +1,56 @@
+/*
+ * (C) Copyright 2010
+ * Texas Instruments, <www.ti.com>
+ * Aneesh V <aneesh at ti.com>
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+#ifndef _UTILS_H_
+#define _UTILS_H_
+
+static inline s32 log_2_n_round_up(u32 n)
+{
+	s32 log2n = -1;
+	u32 temp = n;
+
+	while (temp) {
+		log2n++;
+		temp >>= 1;
+	}
+
+	if (n & (n - 1))
+		return log2n + 1; /* not power of 2 - round up */
+	else
+		return log2n; /* power of 2 */
+}
+
+static inline s32 log_2_n_round_down(u32 n)
+{
+	s32 log2n = -1;
+	u32 temp = n;
+
+	while (temp) {
+		log2n++;
+		temp >>= 1;
+	}
+
+	return log2n;
+}
+
+#endif
-- 
1.7.9.5




More information about the U-Boot mailing list