[U-Boot] [PATCH v3] armv8: aarch64: Fix the warning about x1-x3 nonzero issue
Michal Simek
monstr at monstr.eu
Tue Jan 17 15:23:27 CET 2017
2017-01-17 2:39 GMT+01:00 Alison Wang <b18965 at freescale.com>:
> For 64-bit kernel, there is a warning about x1-x3 nonzero in violation
> of boot protocol. To fix this issue, input argument 4 is added for
> armv8_switch_to_el2 and armv8_switch_to_el1. The input argument 4 will
> be set to the right value, such as zero.
>
> Signed-off-by: Alison Wang <alison.wang at nxp.com>
> Reviewed-by: Alexander Graf <agraf at suse.de>
> Tested-by: Ryan Harkin <ryan.harkin at linaro.org>
> ---
> Changes in v3:
> - Remove redundant code.
> Changes in v2:
> - Add another input argument 4 for armv8_switch_to_el2 and
> armv8_switch_to_el1.
> - Give up the previous way to adjust the parameters to transfer and make
> sure x3 is zero.
>
> arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S | 28
> ++++++++++++++--------------
> arch/arm/cpu/armv8/sec_firmware_asm.S | 6 +++---
> arch/arm/cpu/armv8/start.S | 8 ++++----
> arch/arm/cpu/armv8/transition.S | 22 +++++++++++-----------
> arch/arm/include/asm/system.h | 8 +++++---
> arch/arm/lib/bootm.c | 10 +++++-----
> arch/arm/mach-rmobile/lowlevel_init_gen3.S | 8 ++++----
> cmd/bootefi.c | 2 +-
> 8 files changed, 47 insertions(+), 45 deletions(-)
>
> diff --git a/arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S
> b/arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S
> index 72f2c11..63215f0 100644
> --- a/arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S
> +++ b/arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S
> @@ -378,29 +378,29 @@ cpu_is_le:
> b.eq 1f
>
> #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
> - adr x3, secondary_switch_to_el1
> - ldr x4, =ES_TO_AARCH64
> + adr x4, secondary_switch_to_el1
> + ldr x5, =ES_TO_AARCH64
> #else
> - ldr x3, [x11]
> - ldr x4, =ES_TO_AARCH32
> + ldr x4, [x11]
> + ldr x5, =ES_TO_AARCH32
> #endif
> bl secondary_switch_to_el2
>
> 1:
> #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
> - adr x3, secondary_switch_to_el1
> + adr x4, secondary_switch_to_el1
> #else
> - ldr x3, [x11]
> + ldr x4, [x11]
> #endif
> - ldr x4, =ES_TO_AARCH64
> + ldr x5, =ES_TO_AARCH64
> bl secondary_switch_to_el2
>
> ENDPROC(secondary_boot_func)
>
> ENTRY(secondary_switch_to_el2)
> - switch_el x5, 1f, 0f, 0f
> + switch_el x6, 1f, 0f, 0f
> 0: ret
> -1: armv8_switch_to_el2_m x3, x4, x5
> +1: armv8_switch_to_el2_m x4, x5, x6
> ENDPROC(secondary_switch_to_el2)
>
> ENTRY(secondary_switch_to_el1)
> @@ -414,22 +414,22 @@ ENTRY(secondary_switch_to_el1)
> /* physical address of this cpus spin table element */
> add x11, x1, x0
>
> - ldr x3, [x11]
> + ldr x4, [x11]
>
> ldr x5, [x11, #24]
> ldr x6, =IH_ARCH_DEFAULT
> cmp x6, x5
> b.eq 2f
>
> - ldr x4, =ES_TO_AARCH32
> + ldr x5, =ES_TO_AARCH32
> bl switch_to_el1
>
> -2: ldr x4, =ES_TO_AARCH64
> +2: ldr x5, =ES_TO_AARCH64
>
> switch_to_el1:
> - switch_el x5, 0f, 1f, 0f
> + switch_el x6, 0f, 1f, 0f
> 0: ret
> -1: armv8_switch_to_el1_m x3, x4, x5
> +1: armv8_switch_to_el1_m x4, x5, x6
> ENDPROC(secondary_switch_to_el1)
>
> /* Ensure that the literals used by the secondary boot code are
> diff --git a/arch/arm/cpu/armv8/sec_firmware_asm.S
> b/arch/arm/cpu/armv8/sec_firmware_asm.S
> index 903195d..5ed3677 100644
> --- a/arch/arm/cpu/armv8/sec_firmware_asm.S
> +++ b/arch/arm/cpu/armv8/sec_firmware_asm.S
> @@ -57,7 +57,8 @@ ENDPROC(_sec_firmware_support_psci_version)
> * x0: argument, zero
> * x1: machine nr
> * x2: fdt address
> - * x3: kernel entry point
> + * x3: input argument
> + * x4: kernel entry point
> * @param outputs for secure firmware:
> * x0: function id
> * x1: kernel entry point
> @@ -65,10 +66,9 @@ ENDPROC(_sec_firmware_support_psci_version)
> * x3: fdt address
> */
> ENTRY(armv8_el2_to_aarch32)
> - mov x0, x3
> mov x3, x2
> mov x2, x1
> - mov x1, x0
> + mov x1, x4
> ldr x0, =0xc000ff04
> smc #0
> ret
> diff --git a/arch/arm/cpu/armv8/start.S b/arch/arm/cpu/armv8/start.S
> index 9535057..eb1b8a6 100644
> --- a/arch/arm/cpu/armv8/start.S
> +++ b/arch/arm/cpu/armv8/start.S
> @@ -250,14 +250,14 @@ WEAK(lowlevel_init)
> /*
> * All slaves will enter EL2 and optionally EL1.
> */
> - adr x3, lowlevel_in_el2
> - ldr x4, =ES_TO_AARCH64
> + adr x4, lowlevel_in_el2
> + ldr x5, =ES_TO_AARCH64
> bl armv8_switch_to_el2
>
> lowlevel_in_el2:
> #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
> - adr x3, lowlevel_in_el1
> - ldr x4, =ES_TO_AARCH64
> + adr x4, lowlevel_in_el1
> + ldr x5, =ES_TO_AARCH64
> bl armv8_switch_to_el1
>
> lowlevel_in_el1:
> diff --git a/arch/arm/cpu/armv8/transition.S b/arch/arm/cpu/armv8/
> transition.S
> index adb9f35..ca07465 100644
> --- a/arch/arm/cpu/armv8/transition.S
> +++ b/arch/arm/cpu/armv8/transition.S
> @@ -11,9 +11,9 @@
> #include <asm/macro.h>
>
> ENTRY(armv8_switch_to_el2)
> - switch_el x5, 1f, 0f, 0f
> + switch_el x6, 1f, 0f, 0f
> 0:
> - cmp x4, #ES_TO_AARCH64
> + cmp x5, #ES_TO_AARCH64
> b.eq 2f
> /*
> * When loading 32-bit kernel, it will jump
> @@ -22,23 +22,23 @@ ENTRY(armv8_switch_to_el2)
> bl armv8_el2_to_aarch32
> 2:
> /*
> - * x3 is kernel entry point or switch_to_el1
> + * x4 is kernel entry point or switch_to_el1
> * if CONFIG_ARMV8_SWITCH_TO_EL1 is defined.
> * When running in EL2 now, jump to the
> - * address saved in x3.
> + * address saved in x4.
> */
> - br x3
> -1: armv8_switch_to_el2_m x3, x4, x5
> + br x4
> +1: armv8_switch_to_el2_m x4, x5, x6
> ENDPROC(armv8_switch_to_el2)
>
> ENTRY(armv8_switch_to_el1)
> - switch_el x5, 0f, 1f, 0f
> + switch_el x6, 0f, 1f, 0f
> 0:
> - /* x3 is kernel entry point. When running in EL1
> - * now, jump to the address saved in x3.
> + /* x4 is kernel entry point. When running in EL1
> + * now, jump to the address saved in x4.
> */
> - br x3
> -1: armv8_switch_to_el1_m x3, x4, x5
> + br x4
> +1: armv8_switch_to_el1_m x4, x5, x6
> ENDPROC(armv8_switch_to_el1)
>
> WEAK(armv8_el2_to_aarch32)
> diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
> index dc4c991..766e929 100644
> --- a/arch/arm/include/asm/system.h
> +++ b/arch/arm/include/asm/system.h
> @@ -196,11 +196,12 @@ void __asm_switch_ttbr(u64 new_ttbr);
> * For loading 32-bit OS, machine nr
> * @fdt_addr: For loading 64-bit OS, zero.
> * For loading 32-bit OS, fdt address.
> + * @arg4: Input argument.
> * @entry_point: kernel entry point
> * @es_flag: execution state flag, ES_TO_AARCH64 or ES_TO_AARCH32
> */
> void armv8_switch_to_el2(u64 args, u64 mach_nr, u64 fdt_addr,
> - u64 entry_point, u64 es_flag);
> + u64 arg4, u64 entry_point, u64 es_flag);
> /*
> * Switch from EL2 to EL1 for ARMv8
> *
> @@ -210,13 +211,14 @@ void armv8_switch_to_el2(u64 args, u64 mach_nr, u64
> fdt_addr,
> * For loading 32-bit OS, machine nr
> * @fdt_addr: For loading 64-bit OS, zero.
> * For loading 32-bit OS, fdt address.
> + * @arg4: Input argument.
> * @entry_point: kernel entry point
> * @es_flag: execution state flag, ES_TO_AARCH64 or ES_TO_AARCH32
> */
> void armv8_switch_to_el1(u64 args, u64 mach_nr, u64 fdt_addr,
> - u64 entry_point, u64 es_flag);
> + u64 arg4, u64 entry_point, u64 es_flag);
> void armv8_el2_to_aarch32(u64 args, u64 mach_nr, u64 fdt_addr,
> - u64 entry_point);
> + u64 arg4, u64 entry_point);
> void gic_init(void);
> void gic_send_sgi(unsigned long sgino);
> void wait_for_wakeup(void);
> diff --git a/arch/arm/lib/bootm.c b/arch/arm/lib/bootm.c
> index 43cc83e..8125cf0 100644
> --- a/arch/arm/lib/bootm.c
> +++ b/arch/arm/lib/bootm.c
> @@ -287,11 +287,11 @@ static void switch_to_el1(void)
> if ((IH_ARCH_DEFAULT == IH_ARCH_ARM64) &&
> (images.os.arch == IH_ARCH_ARM))
> armv8_switch_to_el1(0, (u64)gd->bd->bi_arch_number,
> - (u64)images.ft_addr,
> + (u64)images.ft_addr, 0,
> (u64)images.ep,
> ES_TO_AARCH32);
> else
> - armv8_switch_to_el1((u64)images.ft_addr, 0, 0,
> + armv8_switch_to_el1((u64)images.ft_addr, 0, 0, 0,
> images.ep,
> ES_TO_AARCH64);
> }
> @@ -324,17 +324,17 @@ static void boot_jump_linux(bootm_headers_t
> *images, int flag)
> update_os_arch_secondary_cores(images->os.arch);
>
> #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
> - armv8_switch_to_el2((u64)images->ft_addr, 0, 0,
> + armv8_switch_to_el2((u64)images->ft_addr, 0, 0, 0,
> (u64)switch_to_el1, ES_TO_AARCH64);
> #else
> if ((IH_ARCH_DEFAULT == IH_ARCH_ARM64) &&
> (images->os.arch == IH_ARCH_ARM))
> armv8_switch_to_el2(0, (u64)gd->bd->bi_arch_number,
> - (u64)images->ft_addr,
> + (u64)images->ft_addr, 0,
> (u64)images->ep,
> ES_TO_AARCH32);
> else
> - armv8_switch_to_el2((u64)images->ft_addr, 0, 0,
> + armv8_switch_to_el2((u64)images->ft_addr, 0, 0, 0,
> images->ep,
> ES_TO_AARCH64);
> #endif
> diff --git a/arch/arm/mach-rmobile/lowlevel_init_gen3.S
> b/arch/arm/mach-rmobile/lowlevel_init_gen3.S
> index 11acce0..ce3d4f5 100644
> --- a/arch/arm/mach-rmobile/lowlevel_init_gen3.S
> +++ b/arch/arm/mach-rmobile/lowlevel_init_gen3.S
> @@ -61,14 +61,14 @@ ENTRY(lowlevel_init)
> /*
> * All slaves will enter EL2 and optionally EL1.
> */
> - adr x3, lowlevel_in_el2
> - ldr x4, =ES_TO_AARCH64
> + adr x4, lowlevel_in_el2
> + ldr x5, =ES_TO_AARCH64
> bl armv8_switch_to_el2
>
> lowlevel_in_el2:
> #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
> - adr x3, lowlevel_in_el1
> - ldr x4, =ES_TO_AARCH64
> + adr x4, lowlevel_in_el1
> + ldr x5, =ES_TO_AARCH64
> bl armv8_switch_to_el1
>
> lowlevel_in_el1:
> diff --git a/cmd/bootefi.c b/cmd/bootefi.c
> index 97a0fc9..06943a9 100644
> --- a/cmd/bootefi.c
> +++ b/cmd/bootefi.c
> @@ -246,7 +246,7 @@ static unsigned long do_bootefi_exec(void *efi, void
> *fdt)
>
> /* Move into EL2 and keep running there */
> armv8_switch_to_el2((ulong)entry,
> (ulong)&loaded_image_info,
> - (ulong)&systab, (ulong)efi_run_in_el2,
> + (ulong)&systab, 0,
> (ulong)efi_run_in_el2,
> ES_TO_AARCH64);
>
> /* Should never reach here, efi exits with longjmp */
>
ZynqMP is also booting fine.
Tested-by: Michal Simek <michal.simek at xilinx.com>
Thanks,
Michal
--
Michal Simek, Ing. (M.Eng), OpenPGP -> KeyID: FE3D1F91
w: www.monstr.eu p: +42-0-721842854
Maintainer of Linux kernel - Microblaze cpu - http://www.monstr.eu/fdt/
Maintainer of Linux kernel - Xilinx Zynq ARM architecture
Microblaze U-BOOT custodian and responsible for u-boot arm zynq platform
More information about the U-Boot
mailing list