Update to 2.3 release

This commit is contained in:
hht8 2021-01-05 17:01:32 +08:00
parent 1201306913
commit 437821c111
7 changed files with 7 additions and 1000 deletions

View File

@ -1,252 +0,0 @@
From 659a2c0e2ce3d8cb72020234095ecec10a1d0ebd Mon Sep 17 00:00:00 2001
From: Alexei Fedorov <Alexei.Fedorov@arm.com>
Date: Tue, 20 Aug 2019 15:22:44 +0100
Subject: AArch32: Disable Secure Cycle Counter
This patch changes implementation for disabling Secure Cycle
Counter. For ARMv8.5 the counter gets disabled by setting
SDCR.SCCD bit on CPU cold/warm boot. For the earlier
architectures PMCR register is saved/restored on secure
world entry/exit from/to Non-secure state, and cycle counting
gets disabled by setting PMCR.DP bit.
In 'include\aarch32\arch.h' header file new
ARMv8.5-PMU related definitions were added.
Change-Id: Ia8845db2ebe8de940d66dff479225a5b879316f8
Signed-off-by: Alexei Fedorov <Alexei.Fedorov@arm.com>
---
bl32/sp_min/aarch32/entrypoint.S | 20 +-------
include/common/aarch32/el3_common_macros.S | 5 ++
include/lib/aarch32/arch.h | 5 ++
include/lib/aarch32/smccc_macros.S | 54 +++++++++++++++++++---
lib/el3_runtime/aarch32/context_mgmt.c | 26 +++++++++--
5 files changed, 81 insertions(+), 29 deletions(-)
diff --git a/bl32/sp_min/aarch32/entrypoint.S b/bl32/sp_min/aarch32/entrypoint.S
index d6853cc..d9cb176 100644
--- a/bl32/sp_min/aarch32/entrypoint.S
+++ b/bl32/sp_min/aarch32/entrypoint.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -183,15 +183,6 @@ func sp_min_handle_smc
stcopr r0, SCR
isb
- /*
- * Set PMCR.DP to 1 to prohibit cycle counting whilst in Secure Mode.
- * Also, the PMCR.LC field has an architecturally UNKNOWN value on reset
- * and so set to 1 as ARM has deprecated use of PMCR.LC=0.
- */
- ldcopr r0, PMCR
- orr r0, r0, #(PMCR_LC_BIT | PMCR_DP_BIT)
- stcopr r0, PMCR
-
ldr r0, [r2, #SMC_CTX_GPREG_R0] /* smc_fid */
/* Check whether an SMC64 is issued */
tst r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT)
@@ -236,15 +227,6 @@ func sp_min_handle_fiq
stcopr r0, SCR
isb
- /*
- * Set PMCR.DP to 1 to prohibit cycle counting whilst in Secure Mode.
- * Also, the PMCR.LC field has an architecturally UNKNOWN value on reset
- * and so set to 1 as ARM has deprecated use of PMCR.LC=0.
- */
- ldcopr r0, PMCR
- orr r0, r0, #(PMCR_LC_BIT | PMCR_DP_BIT)
- stcopr r0, PMCR
-
push {r2, r3}
bl sp_min_fiq
pop {r0, r3}
diff --git a/include/common/aarch32/el3_common_macros.S b/include/common/aarch32/el3_common_macros.S
index 5db8854..a0abf88 100644
--- a/include/common/aarch32/el3_common_macros.S
+++ b/include/common/aarch32/el3_common_macros.S
@@ -108,7 +108,12 @@
*/
ldr r0, =(SDCR_RESET_VAL | SDCR_SPD(SDCR_SPD_DISABLE))
stcopr r0, SDCR
+ ldr r0, =(PMCR_RESET_VAL | PMCR_DP_BIT | PMCR_LC_BIT | \
+ PMCR_LP_BIT)
+#else
+ ldr r0, =(PMCR_RESET_VAL | PMCR_DP_BIT)
#endif
+ stcopr r0, PMCR
.endm
diff --git a/include/lib/aarch32/arch.h b/include/lib/aarch32/arch.h
index f9ed56e..a0135b3 100644
--- a/include/lib/aarch32/arch.h
+++ b/include/lib/aarch32/arch.h
@@ -124,6 +124,7 @@
#define SDCR_SPD_LEGACY U(0x0)
#define SDCR_SPD_DISABLE U(0x2)
#define SDCR_SPD_ENABLE U(0x3)
+#define SDCR_SPME_BIT (U(1) << 17)
#define SDCR_RESET_VAL U(0x0)
#if !ERROR_DEPRECATED
@@ -208,6 +209,8 @@
#define VTTBR_BADDR_SHIFT U(0)
/* HDCR definitions */
+#define HDCR_HLP_BIT (U(1) << 26)
+#define HDCR_HPME_BIT (U(1) << 7)
#define HDCR_RESET_VAL U(0x0)
/* HSTR definitions */
@@ -386,8 +389,10 @@
#define PMCR_N_SHIFT U(11)
#define PMCR_N_MASK U(0x1f)
#define PMCR_N_BITS (PMCR_N_MASK << PMCR_N_SHIFT)
+#define PMCR_LP_BIT (U(1) << 7)
#define PMCR_LC_BIT (U(1) << 6)
#define PMCR_DP_BIT (U(1) << 5)
+#define PMCR_RESET_VAL U(0x0)
/*******************************************************************************
* Definitions of register offsets, fields and macros for CPU system
diff --git a/include/lib/aarch32/smccc_macros.S b/include/lib/aarch32/smccc_macros.S
index fdb65e8..09bc3fd 100644
--- a/include/lib/aarch32/smccc_macros.S
+++ b/include/lib/aarch32/smccc_macros.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -58,7 +58,6 @@
stm r0!, {r2}
stcopr r4, SCR
- isb
#else
/* Save the banked registers including the current SPSR and LR */
mrs r4, sp_usr
@@ -85,10 +84,34 @@
/* lr_mon is already saved by caller */
ldcopr r4, SCR
+
+#if ARM_ARCH_MAJOR > 7
+ /*
+ * Check if earlier initialization of SDCR.SCCD to 1
+ * failed, meaning that ARMv8-PMU is not implemented,
+ * cycle counting is not disabled and PMCR should be
+ * saved in Non-secure context.
+ */
+ ldcopr r5, SDCR
+ tst r5, #SDCR_SCCD_BIT
+ bne 1f
+#endif
+ /* Secure Cycle Counter is not disabled */
#endif
- str r4, [sp, #SMC_CTX_SCR]
- ldcopr r4, PMCR
- str r4, [sp, #SMC_CTX_PMCR]
+ ldcopr r5, PMCR
+
+ /* Check caller's security state */
+ tst r4, #SCR_NS_BIT
+ beq 2f
+
+ /* Save PMCR if called from Non-secure state */
+ str r5, [sp, #SMC_CTX_PMCR]
+
+ /* Disable cycle counter when event counting is prohibited */
+2: orr r5, r5, #PMCR_DP_BIT
+ stcopr r5, PMCR
+ isb
+1: str r4, [sp, #SMC_CTX_SCR]
.endm
/*
@@ -113,12 +136,31 @@
stcopr r1, SCR
isb
+ /*
+ * Restore PMCR when returning to Non-secure state
+ */
+ tst r1, #SCR_NS_BIT
+ beq 2f
+
+ /*
+ * Back to Non-secure state
+ */
+#if ARM_ARCH_MAJOR > 7
+ /*
+ * Check if earlier initialization SDCR.SCCD to 1
+ * failed, meaning that ARMv8-PMU is not implemented and
+ * PMCR should be restored from Non-secure context.
+ */
+ ldcopr r1, SDCR
+ tst r1, #SDCR_SCCD_BIT
+ bne 2f
+#endif
/*
* Restore the PMCR register.
*/
ldr r1, [r0, #SMC_CTX_PMCR]
stcopr r1, PMCR
-
+2:
/* Restore the banked registers including the current SPSR */
add r1, r0, #SMC_CTX_SP_USR
diff --git a/lib/el3_runtime/aarch32/context_mgmt.c b/lib/el3_runtime/aarch32/context_mgmt.c
index 11ef6e5..98796d2 100644
--- a/lib/el3_runtime/aarch32/context_mgmt.c
+++ b/lib/el3_runtime/aarch32/context_mgmt.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -278,10 +278,28 @@ void cm_prepare_el3_exit(uint32_t security_state)
*
* HDCR.HPMN: Set to value of PMCR.N which is the
* architecturally-defined reset value.
+ *
+ * HDCR.HLP: Set to one so that event counter
+ * overflow, that is recorded in PMOVSCLR[0-30],
+ * occurs on the increment that changes
+ * PMEVCNTR<n>[63] from 1 to 0, when ARMv8.5-PMU is
+ * implemented. This bit is RES0 in versions of the
+ * architecture earlier than ARMv8.5, setting it to 1
+ * doesn't have any effect on them.
+ * This bit is Reserved, UNK/SBZP in ARMv7.
+ *
+ * HDCR.HPME: Set to zero to disable EL2 Event
+ * counters.
*/
- write_hdcr(HDCR_RESET_VAL |
- ((read_pmcr() & PMCR_N_BITS) >> PMCR_N_SHIFT));
-
+#if (ARM_ARCH_MAJOR > 7)
+ write_hdcr((HDCR_RESET_VAL | HDCR_HLP_BIT |
+ ((read_pmcr() & PMCR_N_BITS) >>
+ PMCR_N_SHIFT)) & ~HDCR_HPME_BIT);
+#else
+ write_hdcr((HDCR_RESET_VAL |
+ ((read_pmcr() & PMCR_N_BITS) >>
+ PMCR_N_SHIFT)) & ~HDCR_HPME_BIT);
+#endif
/*
* Set HSTR to its architectural reset value so that
* access to system registers in the cproc=1111
--
2.23.0

View File

@ -1,511 +0,0 @@
From 8603760403dc686361a6693bffc195b72092a77d Mon Sep 17 00:00:00 2001
From: Alexei Fedorov <Alexei.Fedorov@arm.com>
Date: Tue, 13 Aug 2019 15:17:53 +0100
Subject: AArch64: Disable Secure Cycle Counter
This patch fixes an issue when secure world timing information
can be leaked because Secure Cycle Counter is not disabled.
For ARMv8.5 the counter gets disabled by setting MDCR_El3.SCCD
bit on CPU cold/warm boot.
For the earlier architectures PMCR_EL0 register is saved/restored
on secure world entry/exit from/to Non-secure state, and cycle
counting gets disabled by setting PMCR_EL0.DP bit.
'include\aarch64\arch.h' header file was tided up and new
ARMv8.5-PMU related definitions were added.
Change-Id: I6f56db6bc77504634a352388990ad925a69ebbfa
Signed-off-by: Alexei Fedorov <Alexei.Fedorov@arm.com>
---
bl1/aarch64/bl1_exceptions.S | 10 +++-
bl31/aarch64/ea_delegate.S | 14 +++++
bl31/aarch64/runtime_exceptions.S | 22 +++++++
include/common/aarch64/el3_common_macros.S | 34 ++++++++++-
include/lib/aarch64/arch.h | 29 +++++----
include/lib/el3_runtime/aarch64/context.h | 19 +++---
lib/el3_runtime/aarch64/context.S | 65 ++++++++++++++++++--
lib/el3_runtime/aarch64/context_mgmt.c | 70 ++++++++++++----------
8 files changed, 200 insertions(+), 63 deletions(-)
diff --git a/bl1/aarch64/bl1_exceptions.S b/bl1/aarch64/bl1_exceptions.S
index cf8a6a7..66db591 100644
--- a/bl1/aarch64/bl1_exceptions.S
+++ b/bl1/aarch64/bl1_exceptions.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -223,6 +223,14 @@ smc_handler:
*/
bl save_gp_registers
+ /* -----------------------------------------------------
+ * If Secure Cycle Counter is not disabled in MDCR_EL3
+ * when ARMv8.5-PMU is implemented, save PMCR_EL0 and
+ * disable all event counters and cycle counter.
+ * -----------------------------------------------------
+ */
+ bl save_pmcr_disable_pmu
+
/* -----------------------------------------------------
* Populate the parameters for the SMC handler. We
* already have x0-x4 in place. x5 will point to a
diff --git a/bl31/aarch64/ea_delegate.S b/bl31/aarch64/ea_delegate.S
index 9d7c5e8..bf49c1b 100644
--- a/bl31/aarch64/ea_delegate.S
+++ b/bl31/aarch64/ea_delegate.S
@@ -67,6 +67,13 @@ func enter_lower_el_sync_ea
/* Save GP registers */
bl save_gp_registers
+ /*
+ * If Secure Cycle Counter is not disabled in MDCR_EL3
+ * when ARMv8.5-PMU is implemented, save PMCR_EL0 and
+ * disable all event counters and cycle counter.
+ */
+ bl save_pmcr_disable_pmu
+
/* Setup exception class and syndrome arguments for platform handler */
mov x0, #ERROR_EA_SYNC
@@ -98,6 +105,13 @@ func enter_lower_el_async_ea
/* Save GP registers */
bl save_gp_registers
+ /*
+ * If Secure Cycle Counter is not disabled in MDCR_EL3
+ * when ARMv8.5-PMU is implemented, save PMCR_EL0 and
+ * disable all event counters and cycle counter.
+ */
+ bl save_pmcr_disable_pmu
+
/* Setup exception class and syndrome arguments for platform handler */
mov x0, #ERROR_EA_ASYNC
mrs x1, esr_el3
diff --git a/bl31/aarch64/runtime_exceptions.S b/bl31/aarch64/runtime_exceptions.S
index 77bd63e..7978373 100644
--- a/bl31/aarch64/runtime_exceptions.S
+++ b/bl31/aarch64/runtime_exceptions.S
@@ -66,6 +66,14 @@
/* Save GP registers and restore them afterwards */
bl save_gp_registers
+
+ /*
+ * If Secure Cycle Counter is not disabled in MDCR_EL3
+ * when ARMv8.5-PMU is implemented, save PMCR_EL0 and
+ * disable all event counters and cycle counter.
+ */
+ bl save_pmcr_disable_pmu
+
bl handle_lower_el_ea_esb
bl restore_gp_registers
@@ -120,6 +128,13 @@
*/
.macro handle_interrupt_exception label
bl save_gp_registers
+ /*
+ * If Secure Cycle Counter is not disabled in MDCR_EL3
+ * when ARMv8.5-PMU is implemented, save PMCR_EL0 and
+ * disable all event counters and cycle counter.
+ */
+ bl save_pmcr_disable_pmu
+
/* Save the EL3 system registers needed to return from this exception */
mrs x0, spsr_el3
mrs x1, elr_el3
@@ -359,6 +374,13 @@ smc_handler64:
*/
bl save_gp_registers
+ /*
+ * If Secure Cycle Counter is not disabled in MDCR_EL3
+ * when ARMv8.5-PMU is implemented, save PMCR_EL0 and
+ * disable all event counters and cycle counter.
+ */
+ bl save_pmcr_disable_pmu
+
mov x5, xzr
mov x6, sp
diff --git a/include/common/aarch64/el3_common_macros.S b/include/common/aarch64/el3_common_macros.S
index 03b977e..0aea8e4 100644
--- a/include/common/aarch64/el3_common_macros.S
+++ b/include/common/aarch64/el3_common_macros.S
@@ -98,10 +98,40 @@
* accesses to all Performance Monitors registers do not trap to EL3.
* ---------------------------------------------------------------------
*/
- mov_imm x0, ((MDCR_EL3_RESET_VAL | MDCR_SDD_BIT | MDCR_SPD32(MDCR_SPD32_DISABLE)) \
- & ~(MDCR_TDOSA_BIT | MDCR_TDA_BIT | MDCR_TPM_BIT))
+ mov_imm x0, ((MDCR_EL3_RESET_VAL | MDCR_SDD_BIT | MDCR_SPD32(MDCR_SPD32_DISABLE) | MDCR_SCCD_BIT) & \
+ ~(MDCR_TDOSA_BIT | MDCR_TDA_BIT | MDCR_TPM_BIT))
msr mdcr_el3, x0
+ /* ---------------------------------------------------------------------
+ * Initialise PMCR_EL0 setting all fields rather than relying
+ * on hw. Some fields are architecturally UNKNOWN on reset.
+ *
+ * PMCR_EL0.LP: Set to one so that event counter overflow, that
+ * is recorded in PMOVSCLR_EL0[0-30], occurs on the increment
+ * that changes PMEVCNTR<n>_EL0[63] from 1 to 0, when ARMv8.5-PMU
+ * is implemented. This bit is RES0 in versions of the architecture
+ * earlier than ARMv8.5, setting it to 1 doesn't have any effect
+ * on them.
+ *
+ * PMCR_EL0.LC: Set to one so that cycle counter overflow, that
+ * is recorded in PMOVSCLR_EL0[31], occurs on the increment
+ * that changes PMCCNTR_EL0[63] from 1 to 0.
+ *
+ * PMCR_EL0.DP: Set to one so that the cycle counter,
+ * PMCCNTR_EL0 does not count when event counting is prohibited.
+ *
+ * PMCR_EL0.X: Set to zero to disable export of events.
+ *
+ * PMCR_EL0.D: Set to zero so that, when enabled, PMCCNTR_EL0
+ * counts on every clock cycle.
+ * ---------------------------------------------------------------------
+ */
+ mov_imm x0, ((PMCR_EL0_RESET_VAL | PMCR_EL0_LP_BIT | \
+ PMCR_EL0_LC_BIT | PMCR_EL0_DP_BIT) & \
+ ~(PMCR_EL0_X_BIT | PMCR_EL0_D_BIT))
+
+ msr pmcr_el0, x0
+
/* ---------------------------------------------------------------------
* Enable External Aborts and SError Interrupts now that the exception
* vectors have been setup.
diff --git a/include/lib/aarch64/arch.h b/include/lib/aarch64/arch.h
index fed5944..ad02272 100644
--- a/include/lib/aarch64/arch.h
+++ b/include/lib/aarch64/arch.h
@@ -111,30 +111,27 @@
#define ID_AA64PFR0_EL2_SHIFT U(8)
#define ID_AA64PFR0_EL3_SHIFT U(12)
#define ID_AA64PFR0_AMU_SHIFT U(44)
-#define ID_AA64PFR0_AMU_LENGTH U(4)
#define ID_AA64PFR0_AMU_MASK ULL(0xf)
#define ID_AA64PFR0_ELX_MASK ULL(0xf)
+#define ID_AA64PFR0_GIC_SHIFT U(24)
+#define ID_AA64PFR0_GIC_WIDTH U(4)
+#define ID_AA64PFR0_GIC_MASK ULL(0xf)
#define ID_AA64PFR0_SVE_SHIFT U(32)
#define ID_AA64PFR0_SVE_MASK ULL(0xf)
-#define ID_AA64PFR0_SVE_LENGTH U(4)
#define ID_AA64PFR0_MPAM_SHIFT U(40)
#define ID_AA64PFR0_MPAM_MASK ULL(0xf)
#define ID_AA64PFR0_CSV2_SHIFT U(56)
#define ID_AA64PFR0_CSV2_MASK ULL(0xf)
#define ID_AA64PFR0_CSV2_LENGTH U(4)
-/* ID_AA64DFR0_EL1.PMS definitions (for ARMv8.2+) */
-#define ID_AA64DFR0_PMS_SHIFT U(32)
-#define ID_AA64DFR0_PMS_LENGTH U(4)
-#define ID_AA64DFR0_PMS_MASK ULL(0xf)
-
+/* Exception level handling */
#define EL_IMPL_NONE ULL(0)
#define EL_IMPL_A64ONLY ULL(1)
#define EL_IMPL_A64_A32 ULL(2)
-#define ID_AA64PFR0_GIC_SHIFT U(24)
-#define ID_AA64PFR0_GIC_WIDTH U(4)
-#define ID_AA64PFR0_GIC_MASK ((ULL(1) << ID_AA64PFR0_GIC_WIDTH) - ULL(1))
+/* ID_AA64DFR0_EL1.PMS definitions (for ARMv8.2+) */
+#define ID_AA64DFR0_PMS_SHIFT U(32)
+#define ID_AA64DFR0_PMS_MASK ULL(0xf)
/* ID_AA64MMFR0_EL1 definitions */
#define ID_AA64MMFR0_EL1_PARANGE_SHIFT U(0)
@@ -233,11 +230,13 @@
#define SCR_RESET_VAL SCR_RES1_BITS
/* MDCR_EL3 definitions */
+#define MDCR_SCCD_BIT (ULL(1) << 23)
+#define MDCR_SPME_BIT (ULL(1) << 17)
+#define MDCR_SDD_BIT (ULL(1) << 16)
#define MDCR_SPD32(x) ((x) << 14)
#define MDCR_SPD32_LEGACY U(0x0)
#define MDCR_SPD32_DISABLE U(0x2)
#define MDCR_SPD32_ENABLE U(0x3)
-#define MDCR_SDD_BIT (U(1) << 16)
#define MDCR_NSPB(x) ((x) << 12)
#define MDCR_NSPB_EL1 U(0x3)
#define MDCR_TDOSA_BIT (U(1) << 10)
@@ -250,6 +249,10 @@
#endif
/* MDCR_EL2 definitions */
+#define MDCR_EL2_HLP (U(1) << 26)
+#define MDCR_EL2_HCCD (U(1) << 23)
+#define MDCR_EL2_TTRF (U(1) << 19)
+#define MDCR_EL2_HPMD (U(1) << 17)
#define MDCR_EL2_TPMS (U(1) << 14)
#define MDCR_EL2_E2PB(x) ((x) << 12)
#define MDCR_EL2_E2PB_EL1 U(0x3)
@@ -581,10 +584,14 @@
#define PMCR_EL0_N_SHIFT U(11)
#define PMCR_EL0_N_MASK U(0x1f)
#define PMCR_EL0_N_BITS (PMCR_EL0_N_MASK << PMCR_EL0_N_SHIFT)
+#define PMCR_EL0_LP_BIT (U(1) << 7)
#define PMCR_EL0_LC_BIT (U(1) << 6)
#define PMCR_EL0_DP_BIT (U(1) << 5)
#define PMCR_EL0_X_BIT (U(1) << 4)
#define PMCR_EL0_D_BIT (U(1) << 3)
+#define PMCR_EL0_C_BIT (U(1) << 2)
+#define PMCR_EL0_P_BIT (U(1) << 1)
+#define PMCR_EL0_E_BIT (U(1) << 0)
/*******************************************************************************
* Definitions for system register interface to SVE
diff --git a/include/lib/el3_runtime/aarch64/context.h b/include/lib/el3_runtime/aarch64/context.h
index 196b94e..f50de6c 100644
--- a/include/lib/el3_runtime/aarch64/context.h
+++ b/include/lib/el3_runtime/aarch64/context.h
@@ -59,7 +59,7 @@
#define CTX_RUNTIME_SP U(0x10)
#define CTX_SPSR_EL3 U(0x18)
#define CTX_ELR_EL3 U(0x20)
-#define CTX_UNUSED U(0x28)
+#define CTX_PMCR_EL0 U(0x28)
#define CTX_EL3STATE_END U(0x30)
/*******************************************************************************
@@ -91,22 +91,21 @@
#define CTX_AFSR1_EL1 U(0x98)
#define CTX_CONTEXTIDR_EL1 U(0xa0)
#define CTX_VBAR_EL1 U(0xa8)
-#define CTX_PMCR_EL0 U(0xb0)
/*
* If the platform is AArch64-only, there is no need to save and restore these
* AArch32 registers.
*/
#if CTX_INCLUDE_AARCH32_REGS
-#define CTX_SPSR_ABT U(0xc0) /* Align to the next 16 byte boundary */
-#define CTX_SPSR_UND U(0xc8)
-#define CTX_SPSR_IRQ U(0xd0)
-#define CTX_SPSR_FIQ U(0xd8)
-#define CTX_DACR32_EL2 U(0xe0)
-#define CTX_IFSR32_EL2 U(0xe8)
-#define CTX_AARCH32_END U(0xf0) /* Align to the next 16 byte boundary */
+#define CTX_SPSR_ABT U(0xb0) /* Align to the next 16 byte boundary */
+#define CTX_SPSR_UND U(0xb8)
+#define CTX_SPSR_IRQ U(0xc0)
+#define CTX_SPSR_FIQ U(0xc8)
+#define CTX_DACR32_EL2 U(0xd0)
+#define CTX_IFSR32_EL2 U(0xd8)
+#define CTX_AARCH32_END U(0xe0) /* Align to the next 16 byte boundary */
#else
-#define CTX_AARCH32_END U(0xc0) /* Align to the next 16 byte boundary */
+#define CTX_AARCH32_END U(0xb0) /* Align to the next 16 byte boundary */
#endif /* CTX_INCLUDE_AARCH32_REGS */
/*
diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S
index 1f8e23d..fbc8d7d 100644
--- a/lib/el3_runtime/aarch64/context.S
+++ b/lib/el3_runtime/aarch64/context.S
@@ -17,8 +17,44 @@
.global save_gp_registers
.global restore_gp_registers
.global restore_gp_registers_eret
+ .global save_pmcr_disable_pmu
.global el3_exit
+/* -----------------------------------------------------
+ * If ARMv8.5-PMU is implemented, cycle counting is
+ * disabled by seting MDCR_EL3.SCCD to 1.
+ * -----------------------------------------------------
+ */
+func save_pmcr_disable_pmu
+ /* -----------------------------------------------------
+ * Check if earlier initialization MDCR_EL3.SCCD to 1
+ * failed, meaning that ARMv8-PMU is not implemented and
+ * PMCR_EL0 should be saved in non-secure context.
+ * -----------------------------------------------------
+ */
+ mrs x9, mdcr_el3
+ tst x9, #MDCR_SCCD_BIT
+ bne 1f
+
+ /* Secure Cycle Counter is not disabled */
+ mrs x9, pmcr_el0
+
+ /* Check caller's security state */
+ mrs x10, scr_el3
+ tst x10, #SCR_NS_BIT
+ beq 2f
+
+ /* Save PMCR_EL0 if called from Non-secure state */
+ str x9, [sp, #CTX_EL3STATE_OFFSET + CTX_PMCR_EL0]
+
+ /* Disable cycle counter when event counting is prohibited */
+2: orr x9, x9, #PMCR_EL0_DP_BIT
+ msr pmcr_el0, x9
+
+ isb
+1: ret
+endfunc save_pmcr_disable_pmu
+
/* -----------------------------------------------------
* The following function strictly follows the AArch64
* PCS to use x9-x17 (temporary caller-saved registers)
@@ -73,9 +109,6 @@ func el1_sysregs_context_save
mrs x9, vbar_el1
stp x17, x9, [x0, #CTX_CONTEXTIDR_EL1]
- mrs x10, pmcr_el0
- str x10, [x0, #CTX_PMCR_EL0]
-
/* Save AArch32 system registers if the build has instructed so */
#if CTX_INCLUDE_AARCH32_REGS
mrs x11, spsr_abt
@@ -162,9 +195,6 @@ func el1_sysregs_context_restore
msr contextidr_el1, x17
msr vbar_el1, x9
- ldr x10, [x0, #CTX_PMCR_EL0]
- msr pmcr_el0, x10
-
/* Restore AArch32 system registers if the build has instructed so */
#if CTX_INCLUDE_AARCH32_REGS
ldp x11, x12, [x0, #CTX_SPSR_ABT]
@@ -406,6 +436,29 @@ func el3_exit
msr spsr_el3, x16
msr elr_el3, x17
+ /* -----------------------------------------------------
+ * Restore PMCR_EL0 when returning to Non-secure state
+ * if Secure Cycle Counter is not disabled in MDCR_EL3
+ * when ARMv8.5-PMU is implemented
+ * -----------------------------------------------------
+ */
+ tst x18, #SCR_NS_BIT
+ beq 2f
+
+ /* -----------------------------------------------------
+ * Back to Non-secure state.
+ * Check if earlier initialization MDCR_EL3.SCCD to 1
+ * failed, meaning that ARMv8-PMU is not implemented and
+ * PMCR_EL0 should be restored from non-secure context.
+ * -----------------------------------------------------
+ */
+ mrs x17, mdcr_el3
+ tst x17, #MDCR_SCCD_BIT
+ bne 2f
+ ldr x17, [sp, #CTX_EL3STATE_OFFSET + CTX_PMCR_EL0]
+ msr pmcr_el0, x17
+2:
+
#if IMAGE_BL31 && DYNAMIC_WORKAROUND_CVE_2018_3639
/* Restore mitigation state as it was on entry to EL3 */
ldr x17, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]
diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c
index ee5fe4f..3bb3f26 100644
--- a/lib/el3_runtime/aarch64/context_mgmt.c
+++ b/lib/el3_runtime/aarch64/context_mgmt.c
@@ -62,7 +62,7 @@ void cm_init(void)
void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
{
unsigned int security_state;
- uint32_t scr_el3, pmcr_el0;
+ uint32_t scr_el3;
el3_state_t *state;
gp_regs_t *gp_regs;
unsigned long sctlr_elx, actlr_elx;
@@ -188,31 +188,10 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
actlr_elx = read_actlr_el1();
write_ctx_reg((get_sysregs_ctx(ctx)), (CTX_ACTLR_EL1), (actlr_elx));
- if (security_state == SECURE) {
- /*
- * Initialise PMCR_EL0 for secure context only, setting all
- * fields rather than relying on hw. Some fields are
- * architecturally UNKNOWN on reset.
- *
- * PMCR_EL0.LC: Set to one so that cycle counter overflow, that
- * is recorded in PMOVSCLR_EL0[31], occurs on the increment
- * that changes PMCCNTR_EL0[63] from 1 to 0.
- *
- * PMCR_EL0.DP: Set to one so that the cycle counter,
- * PMCCNTR_EL0 does not count when event counting is prohibited.
- *
- * PMCR_EL0.X: Set to zero to disable export of events.
- *
- * PMCR_EL0.D: Set to zero so that, when enabled, PMCCNTR_EL0
- * counts on every clock cycle.
- */
- pmcr_el0 = ((PMCR_EL0_RESET_VAL | PMCR_EL0_LC_BIT
- | PMCR_EL0_DP_BIT)
- & ~(PMCR_EL0_X_BIT | PMCR_EL0_D_BIT));
- write_ctx_reg(get_sysregs_ctx(ctx), CTX_PMCR_EL0, pmcr_el0);
- }
-
- /* Populate EL3 state so that we've the right context before doing ERET */
+ /*
+ * Populate EL3 state so that we've the right context
+ * before doing ERET
+ */
state = get_el3state_ctx(ctx);
write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
write_ctx_reg(state, CTX_ELR_EL3, ep->pc);
@@ -387,6 +366,29 @@ void cm_prepare_el3_exit(uint32_t security_state)
* relying on hw. Some fields are architecturally
* UNKNOWN on reset.
*
+ * MDCR_EL2.HLP: Set to one so that event counter
+ * overflow, that is recorded in PMOVSCLR_EL0[0-30],
+ * occurs on the increment that changes
+ * PMEVCNTR<n>_EL0[63] from 1 to 0, when ARMv8.5-PMU is
+ * implemented. This bit is RES0 in versions of the
+ * architecture earlier than ARMv8.5, setting it to 1
+ * doesn't have any effect on them.
+ *
+ * MDCR_EL2.TTRF: Set to zero so that access to Trace
+ * Filter Control register TRFCR_EL1 at EL1 is not
+ * trapped to EL2. This bit is RES0 in versions of
+ * the architecture earlier than ARMv8.4.
+ *
+ * MDCR_EL2.HPMD: Set to one so that event counting is
+ * prohibited at EL2. This bit is RES0 in versions of
+ * the architecture earlier than ARMv8.1, setting it
+ * to 1 doesn't have any effect on them.
+ *
+ * MDCR_EL2.TPMS: Set to zero so that accesses to
+ * Statistical Profiling control registers from EL1
+ * do not trap to EL2. This bit is RES0 when SPE is
+ * not implemented.
+ *
* MDCR_EL2.TDRA: Set to zero so that Non-secure EL0 and
* EL1 System register accesses to the Debug ROM
* registers are not trapped to EL2.
@@ -415,13 +417,15 @@ void cm_prepare_el3_exit(uint32_t security_state)
* MDCR_EL2.HPMN: Set to value of PMCR_EL0.N which is the
* architecturally-defined reset value.
*/
- mdcr_el2 = ((MDCR_EL2_RESET_VAL |
- ((read_pmcr_el0() & PMCR_EL0_N_BITS)
- >> PMCR_EL0_N_SHIFT)) &
- ~(MDCR_EL2_TDRA_BIT | MDCR_EL2_TDOSA_BIT
- | MDCR_EL2_TDA_BIT | MDCR_EL2_TDE_BIT
- | MDCR_EL2_HPME_BIT | MDCR_EL2_TPM_BIT
- | MDCR_EL2_TPMCR_BIT));
+ mdcr_el2 = ((MDCR_EL2_RESET_VAL | MDCR_EL2_HLP |
+ MDCR_EL2_HPMD) |
+ ((read_pmcr_el0() & PMCR_EL0_N_BITS)
+ >> PMCR_EL0_N_SHIFT)) &
+ ~(MDCR_EL2_TTRF | MDCR_EL2_TPMS |
+ MDCR_EL2_TDRA_BIT | MDCR_EL2_TDOSA_BIT |
+ MDCR_EL2_TDA_BIT | MDCR_EL2_TDE_BIT |
+ MDCR_EL2_HPME_BIT | MDCR_EL2_TPM_BIT |
+ MDCR_EL2_TPMCR_BIT);
write_mdcr_el2(mdcr_el2);
--
2.23.0

View File

@ -1,16 +1,12 @@
%global debug_package %{nil}
Name: arm-trusted-firmware
Version: 1.6
Release: 2
Version: 2.3
Release: 1
Summary: ARM Trusted Firmware
License: BSD
URL: https://github.com/ARM-software/arm-trusted-firmware/wiki
Source0: https://github.com/ARM-software/arm-trusted-firmware/archive/v1.6-rc0.tar.gz
Patch0000: AArch32-Disable-Secure-Cycle-Counter.patch
Patch0001: cleanup-context--handing-library.patch
Patch0002: use-helper-function-to-save-registers-in-SMC-handler.patch
Patch0003: AArch64-Disable-Secure-Cycle-Counter.patch
Source0: https://github.com/ARM-software/arm-trusted-firmware/archive/v%{version}.tar.gz
ExclusiveArch: aarch64
BuildRequires: dtc
@ -25,7 +21,7 @@ ARM Trusted Firmware for various ARMv8-A SoCs.
%prep
%autosetup -p1 -n %{name}-%{version}-rc0
%autosetup -p1 -n %{name}-%{version}
sed -i 's/arm-none-eabi-/arm-linux-gnu-/' plat/rockchip/rk3399/drivers/m0/Makefile
%build
@ -62,6 +58,9 @@ done
%{_datadir}/%{name}
%changelog
* Tue 05 Jan 2021 huanghaitao <huanghaitao8@huawei.com> - 2.3-1
- Update to 2.3 release
* Wed 16 Sep 2020 wangyue <wangyue92@huawei.com> - 1.6-2
- fix CVE-2017-15031

View File

@ -1,182 +0,0 @@
From 65c4c975e76b9fe03ce692ca0686719dc560428e Mon Sep 17 00:00:00 2001
From: wang_yue111 <648774160@qq.com>
Date: Wed, 16 Sep 2020 15:56:17 +0800
Subject: [PATCH] 2
---
include/lib/el3_runtime/aarch64/context.h | 45 +++++++++++------------
lib/el3_runtime/aarch64/context.S | 22 ++++++-----
2 files changed, 34 insertions(+), 33 deletions(-)
diff --git a/include/lib/el3_runtime/aarch64/context.h b/include/lib/el3_runtime/aarch64/context.h
index b990674..196b94e 100644
--- a/include/lib/el3_runtime/aarch64/context.h
+++ b/include/lib/el3_runtime/aarch64/context.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -104,25 +104,30 @@
#define CTX_SPSR_FIQ U(0xd8)
#define CTX_DACR32_EL2 U(0xe0)
#define CTX_IFSR32_EL2 U(0xe8)
-#define CTX_TIMER_SYSREGS_OFF U(0xf0) /* Align to the next 16 byte boundary */
+#define CTX_AARCH32_END U(0xf0) /* Align to the next 16 byte boundary */
#else
-#define CTX_TIMER_SYSREGS_OFF U(0xc0) /* Align to the next 16 byte boundary */
-#endif /* __CTX_INCLUDE_AARCH32_REGS__ */
+#define CTX_AARCH32_END U(0xc0) /* Align to the next 16 byte boundary */
+#endif /* CTX_INCLUDE_AARCH32_REGS */
/*
* If the timer registers aren't saved and restored, we don't have to reserve
* space for them in the context
*/
#if NS_TIMER_SWITCH
-#define CTX_CNTP_CTL_EL0 (CTX_TIMER_SYSREGS_OFF + U(0x0))
-#define CTX_CNTP_CVAL_EL0 (CTX_TIMER_SYSREGS_OFF + U(0x8))
-#define CTX_CNTV_CTL_EL0 (CTX_TIMER_SYSREGS_OFF + U(0x10))
-#define CTX_CNTV_CVAL_EL0 (CTX_TIMER_SYSREGS_OFF + U(0x18))
-#define CTX_CNTKCTL_EL1 (CTX_TIMER_SYSREGS_OFF + U(0x20))
-#define CTX_SYSREGS_END (CTX_TIMER_SYSREGS_OFF + U(0x30)) /* Align to the next 16 byte boundary */
+#define CTX_CNTP_CTL_EL0 (CTX_AARCH32_END + U(0x0))
+#define CTX_CNTP_CVAL_EL0 (CTX_AARCH32_END + U(0x8))
+#define CTX_CNTV_CTL_EL0 (CTX_AARCH32_END + U(0x10))
+#define CTX_CNTV_CVAL_EL0 (CTX_AARCH32_END + U(0x18))
+#define CTX_CNTKCTL_EL1 (CTX_AARCH32_END + U(0x20))
+#define CTX_TIMER_SYSREGS_END (CTX_AARCH32_END + U(0x30)) /* Align to the next 16 byte boundary */
#else
-#define CTX_SYSREGS_END CTX_TIMER_SYSREGS_OFF
-#endif /* __NS_TIMER_SWITCH__ */
+#define CTX_TIMER_SYSREGS_END CTX_AARCH32_END
+#endif /* NS_TIMER_SWITCH */
+
+/*
+ * End of system registers.
+ */
+#define CTX_SYSREGS_END CTX_TIMER_SYSREGS_END
/*******************************************************************************
* Constants that allow assembler code to access members of and the 'fp_regs'
@@ -174,6 +179,9 @@
#define CTX_FPREGS_END U(0)
#endif
+/*******************************************************************************
+ * Registers related to CVE-2018-3639
+ ******************************************************************************/
#define CTX_CVE_2018_3639_OFFSET (CTX_FPREGS_OFFSET + CTX_FPREGS_END)
#define CTX_CVE_2018_3639_DISABLE U(0)
#define CTX_CVE_2018_3639_END U(0x10) /* Align to the next 16 byte boundary */
@@ -181,7 +189,6 @@
#ifndef __ASSEMBLY__
#include <cassert.h>
-#include <platform_def.h> /* for CACHE_WRITEBACK_GRANULE */
#include <stdint.h>
/*
@@ -198,7 +205,7 @@
#define CTX_GPREG_ALL (CTX_GPREGS_END >> DWORD_SHIFT)
#define CTX_SYSREG_ALL (CTX_SYSREGS_END >> DWORD_SHIFT)
#if CTX_INCLUDE_FPREGS
-#define CTX_FPREG_ALL (CTX_FPREGS_END >> DWORD_SHIFT)
+# define CTX_FPREG_ALL (CTX_FPREGS_END >> DWORD_SHIFT)
#endif
#define CTX_EL3STATE_ALL (CTX_EL3STATE_END >> DWORD_SHIFT)
#define CTX_CVE_2018_3639_ALL (CTX_CVE_2018_3639_END >> DWORD_SHIFT)
@@ -267,7 +274,7 @@ typedef struct cpu_context {
/* Macros to access members of the 'cpu_context_t' structure */
#define get_el3state_ctx(h) (&((cpu_context_t *) h)->el3state_ctx)
#if CTX_INCLUDE_FPREGS
-#define get_fpregs_ctx(h) (&((cpu_context_t *) h)->fpregs_ctx)
+# define get_fpregs_ctx(h) (&((cpu_context_t *) h)->fpregs_ctx)
#endif
#define get_sysregs_ctx(h) (&((cpu_context_t *) h)->sysregs_ctx)
#define get_gpregs_ctx(h) (&((cpu_context_t *) h)->gpregs_ctx)
@@ -337,14 +344,6 @@ void fpregs_context_save(fp_regs_t *regs);
void fpregs_context_restore(fp_regs_t *regs);
#endif
-
-#undef CTX_SYSREG_ALL
-#if CTX_INCLUDE_FPREGS
-#undef CTX_FPREG_ALL
-#endif
-#undef CTX_GPREG_ALL
-#undef CTX_EL3STATE_ALL
-
#endif /* __ASSEMBLY__ */
#endif /* __CONTEXT_H__ */
diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S
index 707e6db..1f8e23d 100644
--- a/lib/el3_runtime/aarch64/context.S
+++ b/lib/el3_runtime/aarch64/context.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -332,9 +332,10 @@ func save_gp_registers
ret
endfunc save_gp_registers
-/*
+/* -----------------------------------------------------
* This function restores all general purpose registers except x30 from the
* CPU context. x30 register must be explicitly restored by the caller.
+ * -----------------------------------------------------
*/
func restore_gp_registers
ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
@@ -357,9 +358,10 @@ func restore_gp_registers
ret
endfunc restore_gp_registers
-/*
+/* -----------------------------------------------------
* Restore general purpose registers (including x30), and exit EL3 via. ERET to
* a lower exception level.
+ * -----------------------------------------------------
*/
func restore_gp_registers_eret
bl restore_gp_registers
@@ -377,12 +379,12 @@ func restore_gp_registers_eret
eret
endfunc restore_gp_registers_eret
- /* -----------------------------------------------------
- * This routine assumes that the SP_EL3 is pointing to
- * a valid context structure from where the gp regs and
- * other special registers can be retrieved.
- * -----------------------------------------------------
- */
+/* -----------------------------------------------------
+ * This routine assumes that the SP_EL3 is pointing to
+ * a valid context structure from where the gp regs and
+ * other special registers can be retrieved.
+ * -----------------------------------------------------
+ */
func el3_exit
/* -----------------------------------------------------
* Save the current SP_EL0 i.e. the EL3 runtime stack
@@ -410,9 +412,9 @@ func el3_exit
cmp x17, xzr
beq 1f
blr x17
+1:
#endif
-1:
/* Restore saved general purpose registers and return */
b restore_gp_registers_eret
endfunc el3_exit
--
2.23.0

View File

@ -1,47 +0,0 @@
From 01fc1c24b9a03c519efae4d006a71c350c0529fa Mon Sep 17 00:00:00 2001
From: Soby Mathew <soby.mathew@arm.com>
Date: Fri, 16 Nov 2018 15:43:34 +0000
Subject: [PATCH] BL31: Use helper function to save registers in SMC handler
Use the helper function `save_gp_registers` to save the register
state to cpu_context on entry to EL3 in SMC handler. This has the
effect of saving x0 - x3 as well into the cpu_context which was
not done previously but it unifies the register save sequence
in BL31.
Change-Id: I5753c942263a5f9178deda3dba896e3220f3dd83
Signed-off-by: Soby Mathew <soby.mathew@arm.com>
---
bl31/aarch64/runtime_exceptions.S | 18 +-----------------
1 file changed, 1 insertion(+), 17 deletions(-)
diff --git a/bl31/aarch64/runtime_exceptions.S b/bl31/aarch64/runtime_exceptions.S
index 54db6814d5..77bd63ec08 100644
--- a/bl31/aarch64/runtime_exceptions.S
+++ b/bl31/aarch64/runtime_exceptions.S
@@ -356,24 +356,8 @@ smc_handler64:
* We already have x0-x4 in place. x5 will point to a cookie (not used
* now). x6 will point to the context structure (SP_EL3) and x7 will
* contain flags we need to pass to the handler.
- *
- * Save x4-x29 and sp_el0.
*/
- stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
- stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
- stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
- stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
- stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
- stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
- stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
- stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
- stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
- stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
- stp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
- stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
- stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
- mrs x18, sp_el0
- str x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
+ bl save_gp_registers
mov x5, xzr
mov x6, sp

Binary file not shown.

BIN
v2.3.tar.gz Normal file

Binary file not shown.