[U-Boot] [PATCH v2 4/9] vexpress_aemv8a: Add spin table handling with per cpu release addresses
Arnab Basu
arnab_basu at rocketmail.com
Mon Jan 12 21:56:11 CET 2015
Signed-off-by: Arnab Basu <arnab_basu at rocketmail.com>
---
arch/arm/cpu/armv8/cpu-dt.c | 16 ++++-
arch/arm/cpu/armv8/cpu.c | 113 ++++++++++++++++++++++++++++++
arch/arm/cpu/armv8/start.S | 141 +++++++++++++++++++++++++++++---------
arch/arm/include/asm/armv8/mp.h | 36 ++++++++++
arch/arm/include/asm/config.h | 1 +
include/configs/vexpress_aemv8a.h | 2 +
6 files changed, 275 insertions(+), 34 deletions(-)
create mode 100644 arch/arm/include/asm/armv8/mp.h
diff --git a/arch/arm/cpu/armv8/cpu-dt.c b/arch/arm/cpu/armv8/cpu-dt.c
index 8833e6a..ce0e3c6 100644
--- a/arch/arm/cpu/armv8/cpu-dt.c
+++ b/arch/arm/cpu/armv8/cpu-dt.c
@@ -7,16 +7,30 @@
#include <common.h>
#include <libfdt.h>
#include <fdt_support.h>
+#include <asm/armv8/mp.h>
#ifdef CONFIG_MP
+DECLARE_GLOBAL_DATA_PTR;
__weak u64 arch_get_release_addr(u64 cpu_id)
{
- return 0;
+ u64 val;
+ u64 spin_table_loc = (u64)get_spin_tbl_addr();
+
+ val = spin_table_loc;
+ val += id_to_core(cpu_id) * SPIN_TABLE_ELEM_SIZE;
+
+ return val;
}
__weak void arch_spin_table_reserve_mem(void *fdt)
{
+ size_t *boot_code_size = &(__secondary_boot_code_size);
+ u64 boot_code_loc = ((u64)&secondary_boot_code - gd->relocaddr
+ + CONFIG_SYS_TEXT_BASE);
+
+ fdt_add_mem_rsv(fdt, (uintptr_t)boot_code_loc,
+ *boot_code_size);
}
static void cpu_update_dt_spin_table(void *blob)
diff --git a/arch/arm/cpu/armv8/cpu.c b/arch/arm/cpu/armv8/cpu.c
index e06c3cc..2c8be1c 100644
--- a/arch/arm/cpu/armv8/cpu.c
+++ b/arch/arm/cpu/armv8/cpu.c
@@ -16,6 +16,119 @@
#include <asm/system.h>
#include <linux/compiler.h>
+#ifdef CONFIG_MP
+#include <asm/armv8/mp.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+static u32 active_cores;
+
+void *get_spin_tbl_addr(void)
+{
+ return (void *)((u64)__spin_table - gd->relocaddr
+ + CONFIG_SYS_TEXT_BASE);
+}
+
+static void init_active_cores(void)
+{
+ int i;
+
+
+ active_cores = 1; /* The 0th (boot) core must be up */
+
+ for (i = 1; i < CONFIG_MAX_CPUS; i++) {
+ u64 *spin_table_entry = ((u64 *)get_spin_tbl_addr() +
+ (i * WORDS_PER_SPIN_TABLE_ENTRY));
+
+ if (spin_table_entry[SPIN_TABLE_ELEM_STATUS_IDX])
+ active_cores |= (1 << i);
+ }
+}
+
+u32 cpu_mask(void)
+{
+ if (!active_cores)
+ init_active_cores();
+
+ return active_cores;
+}
+
+int is_core_valid(unsigned int core)
+{
+ return !!((1 << core) & cpu_mask());
+}
+
+int cpu_reset(int nr)
+{
+ puts("Feature is not implemented.\n");
+
+ return 0;
+}
+
+int cpu_disable(int nr)
+{
+ puts("Feature is not implemented.\n");
+
+ return 0;
+}
+
+int core_to_pos(int nr)
+{
+ u32 cores = cpu_mask();
+ int i, count = 0;
+
+ if (nr == 0) {
+ return 0;
+ } else if (nr >= hweight32(cores)) {
+ puts("Not a valid core number.\n");
+ return -1;
+ }
+
+ for (i = 1; i < 32; i++) {
+ if (is_core_valid(i)) {
+ count++;
+ if (count == nr)
+ break;
+ }
+ }
+
+ return count;
+}
+
+int cpu_status(int nr)
+{
+ u64 *table;
+ int pos;
+
+ if (nr == 0) {
+ table = (u64 *)get_spin_tbl_addr();
+ printf("table base @ 0x%p\n", table);
+ } else {
+ pos = core_to_pos(nr);
+ if (pos < 0)
+ return -1;
+ table = (u64 *)get_spin_tbl_addr() + pos *
+ WORDS_PER_SPIN_TABLE_ENTRY;
+ printf("table @ 0x%p\n", table);
+ printf(" addr - 0x%016llx\n",
+ table[SPIN_TABLE_ELEM_ENTRY_ADDR_IDX]);
+ printf(" status - 0x%016llx\n",
+ table[SPIN_TABLE_ELEM_STATUS_IDX]);
+ printf(" lpid - 0x%016llx\n",
+ table[SPIN_TABLE_ELEM_LPID_IDX]);
+ }
+
+ return 0;
+}
+
+int cpu_release(int nr, int argc, char * const argv[])
+{
+ puts("Feature is not implemented.\n");
+
+ return 0;
+}
+#endif
+
int cleanup_before_linux(void)
{
/*
diff --git a/arch/arm/cpu/armv8/start.S b/arch/arm/cpu/armv8/start.S
index 4b11aa4..e985ede 100644
--- a/arch/arm/cpu/armv8/start.S
+++ b/arch/arm/cpu/armv8/start.S
@@ -9,8 +9,10 @@
#include <config.h>
#include <version.h>
#include <linux/linkage.h>
+#include <asm/gic.h>
#include <asm/macro.h>
#include <asm/armv8/mmu.h>
+#include <asm/armv8/mp.h>
/*************************************************************************
*
@@ -77,22 +79,9 @@ reset:
/* Processor specific initialization */
bl lowlevel_init
- branch_if_master x0, x1, master_cpu
-
- /*
- * Slave CPUs
- */
-slave_cpu:
- wfe
- ldr x1, =CPU_RELEASE_ADDR
- ldr x0, [x1]
- cbz x0, slave_cpu
- br x0 /* branch to the given address */
-
/*
- * Master CPU
+ * Only Master CPU will get here
*/
-master_cpu:
bl _main
/*-----------------------------------------------------------------------*/
@@ -117,25 +106,8 @@ WEAK(lowlevel_init)
branch_if_master x0, x1, 2f
- /*
- * Slave should wait for master clearing spin table.
- * This sync prevent salves observing incorrect
- * value of spin table and jumping to wrong place.
- */
-#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
-#ifdef CONFIG_GICV2
- ldr x0, =GICC_BASE
-#endif
- bl gic_wait_for_interrupt
-#endif
-
- /*
- * All slaves will enter EL2 and optionally EL1.
- */
- bl armv8_switch_to_el2
-#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
- bl armv8_switch_to_el1
-#endif
+ ldr x0, =secondary_boot_func
+ blr x0
2:
mov lr, x29 /* Restore LR */
@@ -168,3 +140,106 @@ ENTRY(c_runtime_cpu_setup)
ret
ENDPROC(c_runtime_cpu_setup)
+
+ /* Keep literals not used by the secondary boot code outside it */
+ .ltorg
+
+ /* Using 64 bit alignment since the spin table is accessed as data */
+ .align 4
+ .global secondary_boot_code
+ /* Secondary Boot Code starts here */
+secondary_boot_code:
+ .global __spin_table
+__spin_table:
+ .zero CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE
+
+ .align 2
+ENTRY(secondary_boot_func)
+ /*
+ * MPIDR_EL1 Fields:
+ * MPIDR[1:0] = AFF0_CPUID <- Core ID (0,1)
+ * MPIDR[7:2] = AFF0_RES
+ * MPIDR[15:8] = AFF1_CLUSTERID <- Cluster ID (0,1,2,3)
+ * MPIDR[23:16] = AFF2_CLUSTERID
+ * MPIDR[24] = MT
+ * MPIDR[29:25] = RES0
+ * MPIDR[30] = U
+ * MPIDR[31] = ME
+ * MPIDR[39:32] = AFF3
+ *
+ * Linear Processor ID (LPID) calculation from MPIDR_EL1:
+ * (We only use AFF0_CPUID and AFF1_CLUSTERID for now
+ * until AFF2_CLUSTERID and AFF3 have non-zero values)
+ *
+ * LPID = MPIDR[15:8] | MPIDR[1:0]
+ */
+ mrs x0, mpidr_el1
+ ubfm x1, x0, #8, #15
+ ubfm x2, x0, #0, #1
+ orr x10, x2, x1, lsl #2 /* x10 has LPID */
+ ubfm x9, x0, #0, #15 /* x9 contains MPIDR[15:0] */
+ /*
+ * offset of the spin table element for this core from start of spin
+ * table (each elem is padded to 64 bytes)
+ */
+ lsl x1, x10, #6
+ ldr x0, =__spin_table
+ /* physical address of this cpus spin table element */
+ add x11, x1, x0
+
+ str x9, [x11, #16] /* LPID */
+ mov x4, #1
+ str x4, [x11, #8] /* STATUS */
+ dsb sy
+#if defined(CONFIG_GICV3)
+ gic_wait_for_interrupt_m x0
+#elif defined(CONFIG_GICV2)
+ ldr x0, =GICC_BASE
+ gic_wait_for_interrupt_m x0, w1
+#endif
+
+ bl secondary_switch_to_el2
+#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
+ bl secondary_switch_to_el1
+#endif
+
+slave_cpu:
+ wfe
+ ldr x0, [x11]
+ cbz x0, slave_cpu
+#ifndef CONFIG_ARMV8_SWITCH_TO_EL1
+ mrs x1, sctlr_el2
+#else
+ mrs x1, sctlr_el1
+#endif
+ tbz x1, #25, cpu_is_le
+ rev x0, x0 /* BE to LE conversion */
+cpu_is_le:
+ br x0 /* branch to the given address */
+ENDPROC(secondary_boot_func)
+
+ENTRY(secondary_switch_to_el2)
+ switch_el x0, 1f, 0f, 0f
+0: ret
+1: armv8_switch_to_el2_m x0
+ENDPROC(secondary_switch_to_el2)
+
+ENTRY(secondary_switch_to_el1)
+ switch_el x0, 0f, 1f, 0f
+0: ret
+1: armv8_switch_to_el1_m x0, x1
+ENDPROC(secondary_switch_to_el1)
+
+ /* Ensure that the literals used by the secondary boot code are
+ * assembled within it (this is required so that we can protect
+ * this area with a single memreserve region
+ */
+ .ltorg
+
+ /* 64 bit alignment for elements accessed as data */
+ .align 4
+ .globl __secondary_boot_code_size
+ .type __secondary_boot_code_size, %object
+ /* Secondary Boot Code ends here */
+__secondary_boot_code_size:
+ .quad .-secondary_boot_code
diff --git a/arch/arm/include/asm/armv8/mp.h b/arch/arm/include/asm/armv8/mp.h
new file mode 100644
index 0000000..77e79cb
--- /dev/null
+++ b/arch/arm/include/asm/armv8/mp.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2014, Freescale Semiconductor
+ * Copyright 2015, Arnab Basu <arnab_basu at rocketmail.com>
+ * (modified version of arch/arm/cpu/armv8/fsl-lsch3/mp.h)
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#ifndef _ARMV8_MP_H
+#define _ARMV8_MP_H
+
+/*
+* Each spin table element is defined as
+* struct {
+* uint64_t entry_addr;
+* uint64_t status;
+* uint64_t lpid;
+* };
+* we pad this struct to 64 bytes so each entry is in its own cacheline
+* the actual spin table is an array of these structures
+*/
+#define SPIN_TABLE_ELEM_ENTRY_ADDR_IDX 0
+#define SPIN_TABLE_ELEM_STATUS_IDX 1
+#define SPIN_TABLE_ELEM_LPID_IDX 2
+#define WORDS_PER_SPIN_TABLE_ENTRY 8 /* pad to 64 bytes */
+#define SPIN_TABLE_ELEM_SIZE 64
+
+#define id_to_core(x) ((x & 3) | (x >> 6))
+#ifndef __ASSEMBLY__
+extern u64 __spin_table[];
+extern phys_addr_t secondary_boot_code;
+extern size_t __secondary_boot_code_size;
+void *get_spin_tbl_addr(void);
+void secondary_boot_func(void);
+#endif
+#endif /* _ARMV8_MP_H */
diff --git a/arch/arm/include/asm/config.h b/arch/arm/include/asm/config.h
index be80434..97544fb 100644
--- a/arch/arm/include/asm/config.h
+++ b/arch/arm/include/asm/config.h
@@ -15,6 +15,7 @@
#define CONFIG_SYS_BOOT_RAMDISK_HIGH
#ifdef CONFIG_ARM64
+#define CONFIG_MP
#define CONFIG_PHYS_64BIT
#define CONFIG_STATIC_RELA
#endif
diff --git a/include/configs/vexpress_aemv8a.h b/include/configs/vexpress_aemv8a.h
index 027d78b..9c4f06b 100644
--- a/include/configs/vexpress_aemv8a.h
+++ b/include/configs/vexpress_aemv8a.h
@@ -8,6 +8,8 @@
#ifndef __VEXPRESS_AEMV8A_H
#define __VEXPRESS_AEMV8A_H
+#define CONFIG_MAX_CPUS 4
+
/* We use generic board for v8 Versatile Express */
#define CONFIG_SYS_GENERIC_BOARD
--
1.9.1
More information about the U-Boot
mailing list