[PATCH] efi_loader: define allow_unaligned as 'asm volatile'

Heinrich Schuchardt heinrich.schuchardt at canonical.com
Tue Mar 21 20:43:38 CET 2023


On 3/20/23 18:13, Ilias Apalodimas wrote:
> Tom reports that compiling with LTO & EFI breaks armv7 and arm11*
> builds.  The reason is that LTO (maybe a binutils bug?) replaces the
> asm version of allow_unaligned() with the __weak definition of the
> function.  As a result EFI code ends up running with unaligned accesses
> disabled and eventually crashes.
> 
> allow_unaligned() hardware specific variants are usually defined in .S
> files.  Switching those to inline assembly fixes the problem and the
> linker keeps the correct version in the final binary
> 
> Reported-by: Tom Rini <trini at konsulko.com>
> Signed-off-by: Ilias Apalodimas <ilias.apalodimas at linaro.org>
> ---
> This is an alternative approach to https://lore.kernel.org/u-boot/ZBRy1oSZ5iVFqrdV@hera/
> since enabling unaligned accesses by default has been rejected in the past
> 
>   arch/arm/cpu/arm11/Makefile |  4 ----
>   arch/arm/cpu/arm11/cpu.c    | 13 +++++++++++++
>   arch/arm/cpu/arm11/sctlr.S  | 25 -------------------------
>   arch/arm/cpu/armv7/Makefile |  1 -
>   arch/arm/cpu/armv7/cpu.c    | 11 +++++++++++
>   arch/arm/cpu/armv7/sctlr.S  | 22 ----------------------
>   6 files changed, 24 insertions(+), 52 deletions(-)
>   delete mode 100644 arch/arm/cpu/arm11/sctlr.S
>   delete mode 100644 arch/arm/cpu/armv7/sctlr.S
> 
> diff --git a/arch/arm/cpu/arm11/Makefile b/arch/arm/cpu/arm11/Makefile
> index 5dfa01ae8d05..5d721fce12b5 100644
> --- a/arch/arm/cpu/arm11/Makefile
> +++ b/arch/arm/cpu/arm11/Makefile
> @@ -4,7 +4,3 @@
>   # Wolfgang Denk, DENX Software Engineering, wd at denx.de.
> 
>   obj-y	= cpu.o
> -
> -ifneq ($(CONFIG_SPL_BUILD),y)
> -obj-$(CONFIG_EFI_LOADER) += sctlr.o
> -endif
> diff --git a/arch/arm/cpu/arm11/cpu.c b/arch/arm/cpu/arm11/cpu.c
> index ffe35111d589..b19c4fc4a6c2 100644
> --- a/arch/arm/cpu/arm11/cpu.c
> +++ b/arch/arm/cpu/arm11/cpu.c
> @@ -111,3 +111,16 @@ void enable_caches(void)
>   #endif
>   }
>   #endif
> +
> +#if !IS_ENABLED(CONFIG_SPL_BUILD)
> +void allow_unaligned(void)
> +{
> +	asm volatile(
> +	"mrc	p15, 0, r0, c1, c0, 0\n" //load system control register
> +	"orr	r0, r0, #1 << 22\n"	 //set unaligned data support flag
> +	"bic	r0, r0, #2\n"		 //clear aligned flag
> +	"mcr	p15, 0, r0, c1, c0, 0\n" // write system control register
> +	"bx	lr\n"			 //return
> +	);
> +}
> +#endif
> diff --git a/arch/arm/cpu/arm11/sctlr.S b/arch/arm/cpu/arm11/sctlr.S
> deleted file mode 100644
> index 74a7fc4a25b6..000000000000
> --- a/arch/arm/cpu/arm11/sctlr.S
> +++ /dev/null
> @@ -1,25 +0,0 @@
> -/* SPDX-License-Identifier:	GPL-2.0+ */
> -/*
> - *  Routines to access the system control register
> - *
> - *  Copyright (c) 2019 Heinrich Schuchardt
> - */
> -
> -#include <linux/linkage.h>
> -
> -/*
> - * void allow_unaligned(void) - allow unaligned access
> - *
> - * This routine sets the enable unaligned data support flag and clears the
> - * aligned flag in the system control register.
> - * After calling this routine unaligned access does no longer leads to a
> - * data abort or undefined behavior but is handled by the CPU.
> - * For details see the "ARM Architecture Reference Manual" for ARMv6.
> - */
> -ENTRY(allow_unaligned)
> -	mrc	p15, 0, r0, c1, c0, 0	@ load system control register
> -	orr	r0, r0, #1 << 22	@ set unaligned data support flag
> -	bic	r0, r0, #2		@ clear aligned flag
> -	mcr	p15, 0, r0, c1, c0, 0	@ write system control register
> -	bx	lr			@ return
> -ENDPROC(allow_unaligned)
> diff --git a/arch/arm/cpu/armv7/Makefile b/arch/arm/cpu/armv7/Makefile
> index 653eef8ad79e..ca50f6e93e10 100644
> --- a/arch/arm/cpu/armv7/Makefile
> +++ b/arch/arm/cpu/armv7/Makefile
> @@ -13,7 +13,6 @@ obj-y	+= syslib.o
>   obj-$(CONFIG_SYS_ARM_MPU) += mpu_v7r.o
> 
>   ifneq ($(CONFIG_SPL_BUILD),y)
> -obj-$(CONFIG_EFI_LOADER) += sctlr.o
>   obj-$(CONFIG_ARMV7_NONSEC) += exception_level.o
>   endif
> 
> diff --git a/arch/arm/cpu/armv7/cpu.c b/arch/arm/cpu/armv7/cpu.c
> index 68807d209978..9742fa65e3cf 100644
> --- a/arch/arm/cpu/armv7/cpu.c
> +++ b/arch/arm/cpu/armv7/cpu.c
> @@ -83,3 +83,14 @@ int cleanup_before_linux(void)
>   {
>   	return cleanup_before_linux_select(CBL_ALL);
>   }
> +
> +#if !IS_ENABLED(CONFIG_SPL_BUILD)

Why do we need this #if? The linker should eliminate unused functions.

Best regards

Heinrich

> +void allow_unaligned(void)
> +{
> +	asm volatile(
> +	"mrc	p15, 0, r0, c1, c0, 0\n" //load system control register
> +	"bic	r0, r0, #2\n"		 //clear aligned flag
> +	"mcr	p15, 0, r0, c1, c0, 0\n" //write system control register
> +	);
> +}
> +#endif
> diff --git a/arch/arm/cpu/armv7/sctlr.S b/arch/arm/cpu/armv7/sctlr.S
> deleted file mode 100644
> index bd56e41afe18..000000000000
> --- a/arch/arm/cpu/armv7/sctlr.S
> +++ /dev/null
> @@ -1,22 +0,0 @@
> -/* SPDX-License-Identifier:     GPL-2.0+ */
> -/*
> - *  Routines to access the system control register
> - *
> - *  Copyright (c) 2018 Heinrich Schuchardt
> - */
> -
> -#include <linux/linkage.h>
> -
> -/*
> - * void allow_unaligned(void) - allow unaligned access
> - *
> - * This routine clears the aligned flag in the system control register.
> - * After calling this routine unaligned access does no longer lead to a
> - * data abort but is handled by the CPU.
> - */
> -ENTRY(allow_unaligned)
> -	mrc	p15, 0, r0, c1, c0, 0	@ load system control register
> -	bic	r0, r0, #2		@ clear aligned flag
> -	mcr	p15, 0, r0, c1, c0, 0	@ write system control register
> -	bx	lr			@ return
> -ENDPROC(allow_unaligned)
> --
> 2.39.2
> 



More information about the U-Boot mailing list