[U-Boot] [PATCH v4 1/2] armv8: Support loading 32-bit OS in AArch32 execution state

Alexander Graf agraf at suse.de
Wed Jun 15 10:21:11 CEST 2016



On 15.06.16 10:08, Huan Wang wrote:
>>> Am 15.06.2016 um 05:04 schrieb Huan Wang <alison.wang at nxp.com>:
>>>
>>> Hi, Alex,
>>>
>>>>> On 06/08/2016 07:14 AM, Alison Wang wrote:
>>>>> To support loading a 32-bit OS, the execution state will change from
>>>>> AArch64 to AArch32 when jumping to kernel.
>>>>>
>>>>> The architecture information will be got through checking FIT image,
>>>>> then U-Boot will load 32-bit OS or 64-bit OS automatically.
>>>>>
>>>>> Signed-off-by: Ebony Zhu <ebony.zhu at nxp.com>
>>>>> Signed-off-by: Alison Wang <alison.wang at nxp.com>
>>>>> Signed-off-by: Chenhui Zhao <chenhui.zhao at nxp.com>
>>>>> ---
>>>>> Changes in v4:
>>>>> - Correct config ARM64_SUPPORT_AARCH32.
>>>>> - Omit arch and ftaddr arguments.
>>>>> - Rename "xreg5" to "tmp".
>>>>> - Use xxx_RES1 to combine all RES1 fields in xxx register.
>>>>> - Use an immediate cmp directly.
>>>>> - Use #ifdef for CONFIG_ARM64_SUPPORT_AARCH32.
>>>>>
>>>>> Changes in v3:
>>>>> - Comments the functions and the arguments.
>>>>> - Rename the real parameters.
>>>>> - Use the macros instead of the magic values.
>>>>> - Remove the redundant codes.
>>>>> - Clean up all of the mess in boot_jump_linux().
>>>>> - Add CONFIG_ARM64_SUPPORT_AARCH32 to detect for some ARM64 system
>>>> doesn't support AArch32 state.
>>>>>
>>>>> Changes in v2:
>>>>> - armv8_switch_to_el2_aarch32() is removed. armv8_switch_to_el2_m is
>>>> used
>>>>>   to switch to AArch64 EL2 or AArch32 Hyp.
>>>>> - armv8_switch_to_el1_aarch32() is removed. armv8_switch_to_el1_m is
>>>> used
>>>>>   to switch to AArch64 EL1 or AArch32 SVC.
>>>>>
>>>>>  arch/arm/Kconfig                |   6 ++
>>>>>  arch/arm/cpu/armv8/start.S      |   1 +
>>>>>  arch/arm/cpu/armv8/transition.S |   8 +-
>>>>>  arch/arm/include/asm/macro.h    | 172
>>>> ++++++++++++++++++++++++++++++----------
>>>>>  arch/arm/include/asm/system.h   | 111 +++++++++++++++++++++++++-
>>>>>  arch/arm/lib/bootm.c            |  19 ++++-
>>>>>  common/image-fit.c              |  19 ++++-
>>>>>  7 files changed, 284 insertions(+), 52 deletions(-)
>>>>>
>>>>> diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index
>>>>> 77eab66..9cf4acd 100644
>>>>> --- a/arch/arm/Kconfig
>>>>> +++ b/arch/arm/Kconfig
>>>>> @@ -91,6 +91,12 @@ config SYS_L2CACHE_OFF
>>>>>        If SoC does not support L2CACHE or one do not want to enable
>>>>>        L2CACHE, choose this option.
>>>>>
>>>>> +config ARM64_SUPPORT_AARCH32
>>>>> +    bool "ARM64 system support AArch32 execution state"
>>>>> +    default y if ARM64 && !TARGET_THUNDERX_88XX
>>>>> +    help
>>>>> +      This ARM64 system supports AArch32 execution state.
>>>>> +
>>>>>  choice
>>>>>      prompt "Target select"
>>>>>      default TARGET_HIKEY
>>>>> diff --git a/arch/arm/cpu/armv8/start.S b/arch/arm/cpu/armv8/start.S
>>>>> index e933021..dd69501 100644
>>>>> --- a/arch/arm/cpu/armv8/start.S
>>>>> +++ b/arch/arm/cpu/armv8/start.S
>>>>> @@ -234,6 +234,7 @@ WEAK(lowlevel_init)
>>>>>      /*
>>>>>       * All slaves will enter EL2 and optionally EL1.
>>>>>       */
>>>>> +    ldr    x3, =ES_TO_AARCH64
>>>>>      bl    armv8_switch_to_el2
>>>>>  #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
>>>>>      bl    armv8_switch_to_el1
>>>>> diff --git a/arch/arm/cpu/armv8/transition.S
>>>> b/arch/arm/cpu/armv8/transition.S
>>>>> index 253a39b..e61b6ae 100644
>>>>> --- a/arch/arm/cpu/armv8/transition.S
>>>>> +++ b/arch/arm/cpu/armv8/transition.S
>>>>> @@ -11,13 +11,13 @@
>>>>>  #include <asm/macro.h>
>>>>>
>>>>>  ENTRY(armv8_switch_to_el2)
>>>>> -    switch_el x0, 1f, 0f, 0f
>>>>> +    switch_el x4, 1f, 0f, 0f
>>>>>  0:    ret
>>>>> -1:    armv8_switch_to_el2_m x0
>>>>> +1:    armv8_switch_to_el2_m x0, x3, x4
>>>>>  ENDPROC(armv8_switch_to_el2)
>>>>>
>>>>>  ENTRY(armv8_switch_to_el1)
>>>>> -    switch_el x0, 0f, 1f, 0f
>>>>> +    switch_el x4, 0f, 1f, 0f
>>>>>  0:    ret
>>>>> -1:    armv8_switch_to_el1_m x0, x1
>>>>> +1:    armv8_switch_to_el1_m x0, x3, x4
>>>>>  ENDPROC(armv8_switch_to_el1)
>>>>> diff --git a/arch/arm/include/asm/macro.h
>>>> b/arch/arm/include/asm/macro.h
>>>>> index 9bb0efa..109724f 100644
>>>>> --- a/arch/arm/include/asm/macro.h
>>>>> +++ b/arch/arm/include/asm/macro.h
>>>>> @@ -8,6 +8,9 @@
>>>>>
>>>>>  #ifndef __ASM_ARM_MACRO_H__
>>>>>  #define __ASM_ARM_MACRO_H__
>>>>> +
>>>>> +#include <asm/system.h>
>>>>> +
>>>>>  #ifdef __ASSEMBLY__
>>>>>
>>>>>  /*
>>>>> @@ -135,13 +138,20 @@ lr    .req    x30
>>>>>  #endif
>>>>>  .endm
>>>>>
>>>>> -.macro armv8_switch_to_el2_m, xreg1
>>>>> -    /* 64bit EL2 | HCE | SMD | RES1 (Bits[5:4]) | Non-secure
>> EL0/EL1
>>>> */
>>>>> -    mov    \xreg1, #0x5b1
>>>>> -    msr    scr_el3, \xreg1
>>>>> +/*
>>>>> + * Switch from EL3 to EL2 for ARMv8
>>>>> + * @ep:     kernel entry point
>>>>> + * @flag:   The execution state flag for lower exception
>>>>> + *          level, ES_TO_AARCH64 or ES_TO_AARCH32
>>>>> + * @tmp:    temporary register
>>>>> + *
>>>>> + * x1 is machine nr and x2 is ftaddr, they will be passed
>>>>> + * to the guest.
>>>>> + */
>>>>> +.macro armv8_switch_to_el2_m, ep, flag, tmp
>>>>>      msr    cptr_el3, xzr        /* Disable coprocessor traps to EL3
>>>> */
>>>>> -    mov    \xreg1, #0x33ff
>>>>> -    msr    cptr_el2, \xreg1    /* Disable coprocessor traps to EL2
>> */
>>>>> +    mov    \tmp, #CPTR_EL2_RES1
>>>>> +    msr    cptr_el2, \tmp        /* Disable coprocessor traps to
>> EL2
>>>> */
>>>>>
>>>>>      /* Initialize Generic Timers */
>>>>>      msr    cntvoff_el2, xzr
>>>>> @@ -152,45 +162,91 @@ lr    .req    x30
>>>>>       * and RES0 bits (31,30,27,26,24,21,20,17,15-13,10-6) +
>>>>>       * EE,WXN,I,SA,C,A,M to 0
>>>>>       */
>>>>> -    mov    \xreg1, #0x0830
>>>>> -    movk    \xreg1, #0x30C5, lsl #16
>>>>> -    msr    sctlr_el2, \xreg1
>>>>> +    ldr    \tmp, =(SCTLR_EL2_RES1 | SCTLR_EL2_EE_LE |\
>>>>> +            SCTLR_EL2_WXN_DIS | SCTLR_EL2_ICACHE_DIS |\
>>>>> +            SCTLR_EL2_SA_DIS | SCTLR_EL2_DCACHE_DIS |\
>>>>> +            SCTLR_EL2_ALIGN_DIS | SCTLR_EL2_MMU_DIS)
>>>>> +    msr    sctlr_el2, \tmp
>>>>> +
>>>>> +    mov    \tmp, sp
>>>>> +    msr    sp_el2, \tmp        /* Migrate SP */
>>>>> +    mrs    \tmp, vbar_el3
>>>>> +    msr    vbar_el2, \tmp        /* Migrate VBAR */
>>>>> +
>>>>> +    /* Check switch to AArch64 EL2 or AArch32 Hypervisor mode */
>>>>> +    cmp    \flag, #ES_TO_AARCH32
>>>>> +    b.eq    1f
>>>>> +
>>>>> +    /*
>>>>> +     * The next lower exception level is AArch64, 64bit EL2 | HCE |
>>>>> +     * SMD | RES1 (Bits[5:4]) | Non-secure EL0/EL1.
>>>>> +     */
>>>>> +    ldr    \tmp, =(SCR_EL3_RW_AARCH64 | SCR_EL3_HCE_EN |\
>>>>> +            SCR_EL3_SMD_DIS | SCR_EL3_RES1 |\
>>>>> +            SCR_EL3_NS_EN)
>>>>> +    msr    scr_el3, \tmp
>>>>>
>>>>>      /* Return to the EL2_SP2 mode from EL3 */
>>>>> -    mov    \xreg1, sp
>>>>> -    msr    sp_el2, \xreg1        /* Migrate SP */
>>>>> -    mrs    \xreg1, vbar_el3
>>>>> -    msr    vbar_el2, \xreg1    /* Migrate VBAR */
>>>>> -    mov    \xreg1, #0x3c9
>>>>> -    msr    spsr_el3, \xreg1    /* EL2_SP2 | D | A | I | F */
>>>>> +    ldr    \tmp, =(SPSR_EL_DEBUG_MASK | SPSR_EL_SERR_MASK |\
>>>>> +            SPSR_EL_IRQ_MASK | SPSR_EL_FIQ_MASK |\
>>>>> +            SPSR_EL_M_AARCH64 | SPSR_EL_M_EL2H)
>>>>> +    msr    spsr_el3, \tmp
>>>>>      msr    elr_el3, lr
>>>>
>>>> So if we switch into AArch64 mode, we return ...
>>>>
>>>>>      eret
>>>>> +
>>>>> +1:
>>>>> +    /*
>>>>> +     * The next lower exception level is AArch32, 32bit EL2 | HCE |
>>>>> +     * SMD | RES1 (Bits[5:4]) | Non-secure EL0/EL1.
>>>>> +     */
>>>>> +    ldr    \tmp, =(SCR_EL3_RW_AARCH32 | SCR_EL3_HCE_EN |\
>>>>> +            SCR_EL3_SMD_DIS | SCR_EL3_RES1 |\
>>>>> +            SCR_EL3_NS_EN)
>>>>> +    msr    scr_el3, \tmp
>>>>> +
>>>>> +    /* Return to AArch32 Hypervisor mode */
>>>>> +    ldr     \tmp, =(SPSR_EL_END_LE | SPSR_EL_ASYN_MASK |\
>>>>> +            SPSR_EL_IRQ_MASK | SPSR_EL_FIQ_MASK |\
>>>>> +            SPSR_EL_T_A32 | SPSR_EL_M_AARCH32 |\
>>>>> +            SPSR_EL_M_HYP)
>>>>> +    msr    spsr_el3, \tmp
>>>>> +    msr     elr_el3, \ep
>>>>> +
>>>>> +    mov    \ep, #0
>>>>
>>>> ... while if we switch to AArch32 mode we jump to ep.
>>>>
>>>> I think it would make a lot of sense if we could *always* jump to ep.
>>>> Just swizzle the argument order so that you get
>>>>
>>>>                if ((IH_ARCH_DEFAULT == IH_ARCH_ARM64) &&
>>>>                    (images->os.arch == IH_ARCH_ARM))
>>>>                        armv8_switch_to_el2(0, gd->bd->bi_arch_number,
>>>> (uintptr_t)images->ft_addr, (u64)images->ep, ES_TO_AARCH32);
>>>>                else
>>>> armv8_switch_to_el2((uintptr_t)images->ft_addr, 0 0, images->ep,
>>>> ES_TO_AARCH64);
>>> [Alison Wang] I don't agree it would make a lot of sense if we could
>>> *always* jump to ep.
>>> If we switch to EL2 AArch32 mode, it will happen at the last minute
>>> from U-Boot to kernel. The ep is the entry pointer for kernel.
>>> If we switch to EL2 AArch64 mode, it will happen earlier. For primary
>>> core, it will call kernel entry after the switch. For secondary cores,
>>> it will switch from EL3 AArch64 to EL2 AArch64 first, then it will
>>> wait until the spin-table is written by the kernel, later it will jump
>> to kernel.
>>
>> Then jump to a separate function that continues the execution stream. I
>> really think we should keep the interface between both flags as
>> identical as possible.
> [Alison Wang] In this v4 patch, the interface armv8_switch_to_el2() is same
> for both flags. Well, how about listening to other's opinion? :)

Is it? I thought when you switch to AA32 mode, you jump to ep while if
you switch to AA64 mode, you just return.

That means while the prototype is identical, the behavior is different, no?


Alex


More information about the U-Boot mailing list