From 50daf5b7c4ec4efcaf49a4128930f872bec7dbc0 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 27 Dec 2022 12:55:54 +0000 Subject: [PATCH 001/130] arm64/cpufeature: Fix field sign for DIT hwcap detection Since it was added our hwcap for DIT has specified that DIT is a signed field but this appears to be incorrect, the two values for the enumeration are: 0b0000 NI 0b0001 IMP which look like a normal unsigned enumeration and the in-kernel DIT usage added by 01ab991fc0ee ("arm64: Enable data independent timing (DIT) in the kernel") detects the feature with an unsigned enum. Fix the hwcap to specify the field as unsigned. Fixes: 7206dc93a58f ("arm64: Expose Arm v8.4 features") Reviewed-by: Mark Rutland Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221207-arm64-sysreg-helpers-v3-1-0d71a7b174a8@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/cpufeature.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index a77315b338e6..ee40dca9f28e 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2777,7 +2777,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_FP_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_AdvSIMD_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_AdvSIMD_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP), - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_DIT_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT), + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_DIT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DCPOP), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_DCPODP), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_JSCVT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_JSCVT), From 8c6e105558628b63292ae770198f11a1d5535660 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 27 Dec 2022 12:55:55 +0000 Subject: [PATCH 002/130] arm64/sysreg: Fix errors in 32 bit enumeration values The recently converted 32 bit ID registers have errors in MVFR0_EL1.FPSP, MVFR0_EL1.SIMDReg and MVFR1_EL1.SIMDHP where enumeration values which should be 0b0010 are specified as 0b0001. Correct these. Fixes: e79c94a2a487 ("arm64/sysreg: Convert MVFR0_EL1 to automatic generation") Fixes: c9b718eda706 ("arm64/sysreg: Convert MVFR1_EL1 to automatic generation") Reviewed-by: Mark Rutland Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221207-arm64-sysreg-helpers-v3-2-0d71a7b174a8@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/tools/sysreg | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 184e58fd5631..e8fb6684d7f3 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -689,17 +689,17 @@ EndEnum Enum 11:8 FPDP 0b0000 NI 0b0001 VFPv2 - 0b0001 VFPv3 + 0b0010 VFPv3 EndEnum Enum 7:4 FPSP 0b0000 NI 0b0001 VFPv2 - 0b0001 VFPv3 + 0b0010 VFPv3 EndEnum Enum 3:0 SIMDReg 0b0000 NI 0b0001 IMP_16x64 - 0b0001 IMP_32x64 + 0b0010 IMP_32x64 EndEnum EndSysreg @@ -718,7 +718,7 @@ EndEnum Enum 23:20 SIMDHP 0b0000 NI 0b0001 SIMDHP - 0b0001 SIMDHP_FLOAT + 0b0010 SIMDHP_FLOAT EndEnum Enum 19:16 SIMDSP 0b0000 NI From e978eaca4beefb789874864c0ed99603531c5425 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 27 Dec 2022 13:00:39 +0000 Subject: [PATCH 003/130] arm64/cpufeature: Remove 4 bit assumption in ARM64_FEATURE_MASK() The ARM64_FEATURE_MASK(), used extensively by KVM, assumes that all ID register fields are 4 bits wide but this is not the case any more, for example there are several 1 bit fields in ID_AA64SMFR0_EL1. Fortunately we now have generated constants for all the ID mask registers which can be used instead. Rather than create churn from updating existing users update the macro to reference the generated constants and replace the comment with a note advising against adding new users. There are also users of ARM64_FEATURE_FIELD_BITS in the pKVM code which will need to be fixed separately, since no relevant feature is planned to be exposed to protected guests in the immediate future there is no immediate issue with them assuming fields are 4 bits wide. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221222-arm64-arm64-feature-mask-v1-1-c34c1e177f90@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 1312fb48f18b..1c2bd78f03d3 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -809,8 +809,8 @@ #define ARM64_FEATURE_FIELD_BITS 4 -/* Create a mask for the feature bits of the specified feature. */ -#define ARM64_FEATURE_MASK(x) (GENMASK_ULL(x##_SHIFT + ARM64_FEATURE_FIELD_BITS - 1, x##_SHIFT)) +/* Defined for compatibility only, do not add new users. */ +#define ARM64_FEATURE_MASK(x) (x##_MASK) #ifdef __ASSEMBLY__ From c3cdd54c6138871b04dc2306fbfe30ece947ac48 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 27 Dec 2022 13:01:08 +0000 Subject: [PATCH 004/130] arm64/ptrace: Use system_supports_tpidr2() to check for TPIDR2 support We have a separate system_supports_tpidr2() to check for TPIDR2 support but were using system_supports_sme() in tls_set(). While these are currently identical let's use the specific check instead so we don't have any surprises in future. Reported-by: Will Deacon Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-tpidr2-ptrace-feat-v2-1-3760c895a574@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/ptrace.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 2686ab157601..33d985bb2707 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -683,7 +683,7 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset, unsigned long tls[2]; tls[0] = target->thread.uw.tp_value; - if (system_supports_sme()) + if (system_supports_tpidr2()) tls[1] = target->thread.tpidr2_el0; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, tls, 0, count); @@ -691,7 +691,7 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset, return ret; target->thread.uw.tp_value = tls[0]; - if (system_supports_sme()) + if (system_supports_tpidr2()) target->thread.tpidr2_el0 = tls[1]; return ret; From fcd3d2c082b2a19da2326b2b38ba5a05536dcd32 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 27 Dec 2022 13:04:35 +0000 Subject: [PATCH 005/130] arm64/sme: Don't use streaming mode to probe the maximum SME VL During development the architecture added the RDSVL instruction which means we do not need to enter streaming mode to enumerate the SME VLs, use it when we probe the maximum supported VL. Other users were already updated. No functional change. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221223-arm64-sme-probe-max-v1-1-cbde68f67ad0@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/fpsimd.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index dcc81e7200d4..62c67664cdaa 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -1322,7 +1322,6 @@ u64 read_smcr_features(void) unsigned int vq_max; sme_kernel_enable(NULL); - sme_smstart_sm(); /* * Set the maximum possible VL. @@ -1332,11 +1331,9 @@ u64 read_smcr_features(void) smcr = read_sysreg_s(SYS_SMCR_EL1); smcr &= ~(u64)SMCR_ELx_LEN_MASK; /* Only the LEN field */ - vq_max = sve_vq_from_vl(sve_get_vl()); + vq_max = sve_vq_from_vl(sme_get_vl()); smcr |= vq_max - 1; /* set LEN field to maximum effective value */ - sme_smstop_sm(); - return smcr; } From 97ec597b26df774a257e3f8e97353fd1b4471615 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 27 Dec 2022 13:06:35 +0000 Subject: [PATCH 006/130] kselftest/arm64: Fix syscall-abi for systems without 128 bit SME SME does not mandate any specific VL so we may not have 128 bit SME but the algorithm used for enumerating VLs assumes that we will. Add the required check to ensure that the algorithm terminates. Fixes: 43e3f85523e4 ("kselftest/arm64: Add SME support to syscall ABI test") Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221223-arm64-syscall-abi-sme-only-v1-1-4fabfbd62087@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/abi/syscall-abi.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools/testing/selftests/arm64/abi/syscall-abi.c b/tools/testing/selftests/arm64/abi/syscall-abi.c index dd7ebe536d05..ffe719b50c21 100644 --- a/tools/testing/selftests/arm64/abi/syscall-abi.c +++ b/tools/testing/selftests/arm64/abi/syscall-abi.c @@ -390,6 +390,10 @@ static void test_one_syscall(struct syscall_cfg *cfg) sme_vl &= PR_SME_VL_LEN_MASK; + /* Found lowest VL */ + if (sve_vq_from_vl(sme_vl) > sme_vq) + break; + if (sme_vq != sve_vq_from_vl(sme_vl)) sme_vq = sve_vq_from_vl(sme_vl); @@ -461,6 +465,10 @@ int sme_count_vls(void) vl &= PR_SME_VL_LEN_MASK; + /* Found lowest VL */ + if (sve_vq_from_vl(vl) > vq) + break; + if (vq != sve_vq_from_vl(vl)) vq = sve_vq_from_vl(vl); From fae491e52cc2b48097bd93fceb687ccbacd263c4 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 27 Dec 2022 13:06:36 +0000 Subject: [PATCH 007/130] kselftest/arm64: Only enumerate VLs once in syscall-abi Currently syscall-abi not only enumerates the SVE VLs twice while working out how many tests are planned, it also repeats the enumeration process while doing the actual tests. Record the VLs when we enumerate and use that list when we are performing the tests, removing some duplicated logic. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221223-arm64-syscall-abi-sme-only-v1-2-4fabfbd62087@kernel.org Signed-off-by: Catalin Marinas --- .../testing/selftests/arm64/abi/syscall-abi.c | 95 ++++++++----------- 1 file changed, 41 insertions(+), 54 deletions(-) diff --git a/tools/testing/selftests/arm64/abi/syscall-abi.c b/tools/testing/selftests/arm64/abi/syscall-abi.c index ffe719b50c21..45fdcbe3e909 100644 --- a/tools/testing/selftests/arm64/abi/syscall-abi.c +++ b/tools/testing/selftests/arm64/abi/syscall-abi.c @@ -24,6 +24,11 @@ static int default_sme_vl; +static int sve_vl_count; +static unsigned int sve_vls[SVE_VQ_MAX]; +static int sme_vl_count; +static unsigned int sme_vls[SVE_VQ_MAX]; + extern void do_syscall(int sve_vl, int sme_vl); static void fill_random(void *buf, size_t size) @@ -355,72 +360,55 @@ static bool do_test(struct syscall_cfg *cfg, int sve_vl, int sme_vl, static void test_one_syscall(struct syscall_cfg *cfg) { - int sve_vq, sve_vl; - int sme_vq, sme_vl; + int sve, sme; + int ret; /* FPSIMD only case */ ksft_test_result(do_test(cfg, 0, default_sme_vl, 0), "%s FPSIMD\n", cfg->name); - if (!(getauxval(AT_HWCAP) & HWCAP_SVE)) - return; - - for (sve_vq = SVE_VQ_MAX; sve_vq > 0; --sve_vq) { - sve_vl = prctl(PR_SVE_SET_VL, sve_vq * 16); - if (sve_vl == -1) + for (sve = 0; sve < sve_vl_count; sve++) { + ret = prctl(PR_SVE_SET_VL, sve_vls[sve]); + if (ret == -1) ksft_exit_fail_msg("PR_SVE_SET_VL failed: %s (%d)\n", strerror(errno), errno); - sve_vl &= PR_SVE_VL_LEN_MASK; + ksft_test_result(do_test(cfg, sve_vls[sve], default_sme_vl, 0), + "%s SVE VL %d\n", cfg->name, sve_vls[sve]); - if (sve_vq != sve_vq_from_vl(sve_vl)) - sve_vq = sve_vq_from_vl(sve_vl); - - ksft_test_result(do_test(cfg, sve_vl, default_sme_vl, 0), - "%s SVE VL %d\n", cfg->name, sve_vl); - - if (!(getauxval(AT_HWCAP2) & HWCAP2_SME)) - continue; - - for (sme_vq = SVE_VQ_MAX; sme_vq > 0; --sme_vq) { - sme_vl = prctl(PR_SME_SET_VL, sme_vq * 16); - if (sme_vl == -1) + for (sme = 0; sme < sme_vl_count; sme++) { + ret = prctl(PR_SME_SET_VL, sme_vls[sme]); + if (ret == -1) ksft_exit_fail_msg("PR_SME_SET_VL failed: %s (%d)\n", strerror(errno), errno); - sme_vl &= PR_SME_VL_LEN_MASK; - - /* Found lowest VL */ - if (sve_vq_from_vl(sme_vl) > sme_vq) - break; - - if (sme_vq != sve_vq_from_vl(sme_vl)) - sme_vq = sve_vq_from_vl(sme_vl); - - ksft_test_result(do_test(cfg, sve_vl, sme_vl, + ksft_test_result(do_test(cfg, sve_vls[sve], + sme_vls[sme], SVCR_ZA_MASK | SVCR_SM_MASK), "%s SVE VL %d/SME VL %d SM+ZA\n", - cfg->name, sve_vl, sme_vl); - ksft_test_result(do_test(cfg, sve_vl, sme_vl, - SVCR_SM_MASK), + cfg->name, sve_vls[sve], + sme_vls[sme]); + ksft_test_result(do_test(cfg, sve_vls[sve], + sme_vls[sme], SVCR_SM_MASK), "%s SVE VL %d/SME VL %d SM\n", - cfg->name, sve_vl, sme_vl); - ksft_test_result(do_test(cfg, sve_vl, sme_vl, - SVCR_ZA_MASK), + cfg->name, sve_vls[sve], + sme_vls[sme]); + ksft_test_result(do_test(cfg, sve_vls[sve], + sme_vls[sme], SVCR_ZA_MASK), "%s SVE VL %d/SME VL %d ZA\n", - cfg->name, sve_vl, sme_vl); + cfg->name, sve_vls[sve], + sme_vls[sme]); } } } -int sve_count_vls(void) +void sve_count_vls(void) { unsigned int vq; - int vl_count = 0; int vl; if (!(getauxval(AT_HWCAP) & HWCAP_SVE)) - return 0; + return; /* * Enumerate up to SVE_VQ_MAX vector lengths @@ -436,23 +424,17 @@ int sve_count_vls(void) if (vq != sve_vq_from_vl(vl)) vq = sve_vq_from_vl(vl); - vl_count++; + sve_vls[sve_vl_count++] = vl; } - - return vl_count; } -int sme_count_vls(void) +void sme_count_vls(void) { unsigned int vq; - int vl_count = 0; int vl; if (!(getauxval(AT_HWCAP2) & HWCAP2_SME)) - return 0; - - /* Ensure we configure a SME VL, used to flag if SVCR is set */ - default_sme_vl = 16; + return; /* * Enumerate up to SVE_VQ_MAX vector lengths @@ -472,10 +454,11 @@ int sme_count_vls(void) if (vq != sve_vq_from_vl(vl)) vq = sve_vq_from_vl(vl); - vl_count++; + sme_vls[sme_vl_count++] = vl; } - return vl_count; + /* Ensure we configure a SME VL, used to flag if SVCR is set */ + default_sme_vl = sme_vls[0]; } int main(void) @@ -486,8 +469,12 @@ int main(void) srandom(getpid()); ksft_print_header(); - tests += sve_count_vls(); - tests += (sve_count_vls() * sme_count_vls()) * 3; + + sve_count_vls(); + sme_count_vls(); + + tests += sve_vl_count; + tests += (sve_vl_count * sme_vl_count) * 3; ksft_set_plan(ARRAY_SIZE(syscalls) * tests); if (getauxval(AT_HWCAP2) & HWCAP2_SME_FA64) From 024e4a155874dcd3c3473fb26ff4f491c746b4d3 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 27 Dec 2022 13:06:37 +0000 Subject: [PATCH 008/130] kselftest/arm64: Verify SME only ABI in syscall-abi Currently syscall-abi only covers SME in the case where the system supports SVE however it is architecturally valid to support SME without SVE. Update the program to cover this case, this requires adjustments in the code to check for SVCR.SM being set when deciding if we're handling the FPSIMD or SVE registers and the addition of new test cases for the SME only case. Note that in the SME only case we should not save the SVE registers after a syscall since even if we were in streaming mode and therefore set them the syscall should have exited streaming mode, we check that we have done so by looking at SVCR. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221223-arm64-syscall-abi-sme-only-v1-3-4fabfbd62087@kernel.org Signed-off-by: Catalin Marinas --- .../selftests/arm64/abi/syscall-abi-asm.S | 14 +++++--- .../testing/selftests/arm64/abi/syscall-abi.c | 34 ++++++++++++++++++- 2 files changed, 42 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/arm64/abi/syscall-abi-asm.S b/tools/testing/selftests/arm64/abi/syscall-abi-asm.S index acd5e9f3bc0b..cdfafc939a9e 100644 --- a/tools/testing/selftests/arm64/abi/syscall-abi-asm.S +++ b/tools/testing/selftests/arm64/abi/syscall-abi-asm.S @@ -92,8 +92,11 @@ do_syscall: str x29, [x2], #8 // FP str x30, [x2], #8 // LR - // Load FPRs if we're not doing SVE + // Load FPRs if we're not doing neither SVE nor streaming SVE cbnz x0, 1f + ldr x2, =svcr_in + tbnz x2, #SVCR_SM_SHIFT, 1f + ldr x2, =fpr_in ldp q0, q1, [x2] ldp q2, q3, [x2, #16 * 2] @@ -111,10 +114,11 @@ do_syscall: ldp q26, q27, [x2, #16 * 26] ldp q28, q29, [x2, #16 * 28] ldp q30, q31, [x2, #16 * 30] + + b 2f 1: // Load the SVE registers if we're doing SVE/SME - cbz x0, 1f ldr x2, =z_in ldr z0, [x2, #0, MUL VL] @@ -155,9 +159,9 @@ do_syscall: ldr x2, =ffr_in ldr p0, [x2] ldr x2, [x2, #0] - cbz x2, 2f + cbz x2, 1f wrffr p0.b -2: +1: ldr x2, =p_in ldr p0, [x2, #0, MUL VL] @@ -176,7 +180,7 @@ do_syscall: ldr p13, [x2, #13, MUL VL] ldr p14, [x2, #14, MUL VL] ldr p15, [x2, #15, MUL VL] -1: +2: // Do the syscall svc #0 diff --git a/tools/testing/selftests/arm64/abi/syscall-abi.c b/tools/testing/selftests/arm64/abi/syscall-abi.c index 45fdcbe3e909..7c9b6e947040 100644 --- a/tools/testing/selftests/arm64/abi/syscall-abi.c +++ b/tools/testing/selftests/arm64/abi/syscall-abi.c @@ -88,6 +88,7 @@ static int check_gpr(struct syscall_cfg *cfg, int sve_vl, int sme_vl, uint64_t s #define NUM_FPR 32 uint64_t fpr_in[NUM_FPR * 2]; uint64_t fpr_out[NUM_FPR * 2]; +uint64_t fpr_zero[NUM_FPR * 2]; static void setup_fpr(struct syscall_cfg *cfg, int sve_vl, int sme_vl, uint64_t svcr) @@ -102,7 +103,7 @@ static int check_fpr(struct syscall_cfg *cfg, int sve_vl, int sme_vl, int errors = 0; int i; - if (!sve_vl) { + if (!sve_vl && !(svcr & SVCR_SM_MASK)) { for (i = 0; i < ARRAY_SIZE(fpr_in); i++) { if (fpr_in[i] != fpr_out[i]) { ksft_print_msg("%s Q%d/%d mismatch %llx != %llx\n", @@ -114,6 +115,18 @@ static int check_fpr(struct syscall_cfg *cfg, int sve_vl, int sme_vl, } } + /* + * In streaming mode the whole register set should be cleared + * by the transition out of streaming mode. + */ + if (svcr & SVCR_SM_MASK) { + if (memcmp(fpr_zero, fpr_out, sizeof(fpr_out)) != 0) { + ksft_print_msg("%s FPSIMD registers non-zero exiting SM\n", + cfg->name); + errors++; + } + } + return errors; } @@ -400,6 +413,24 @@ static void test_one_syscall(struct syscall_cfg *cfg) sme_vls[sme]); } } + + for (sme = 0; sme < sme_vl_count; sme++) { + ret = prctl(PR_SME_SET_VL, sme_vls[sme]); + if (ret == -1) + ksft_exit_fail_msg("PR_SME_SET_VL failed: %s (%d)\n", + strerror(errno), errno); + + ksft_test_result(do_test(cfg, 0, sme_vls[sme], + SVCR_ZA_MASK | SVCR_SM_MASK), + "%s SME VL %d SM+ZA\n", + cfg->name, sme_vls[sme]); + ksft_test_result(do_test(cfg, 0, sme_vls[sme], SVCR_SM_MASK), + "%s SME VL %d SM\n", + cfg->name, sme_vls[sme]); + ksft_test_result(do_test(cfg, 0, sme_vls[sme], SVCR_ZA_MASK), + "%s SME VL %d ZA\n", + cfg->name, sme_vls[sme]); + } } void sve_count_vls(void) @@ -474,6 +505,7 @@ int main(void) sme_count_vls(); tests += sve_vl_count; + tests += sme_vl_count * 3; tests += (sve_vl_count * sme_vl_count) * 3; ksft_set_plan(ARRAY_SIZE(syscalls) * tests); From 10f326fbb4584f3b9fbf1102c1a71a9ecac0e97f Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 27 Dec 2022 13:06:38 +0000 Subject: [PATCH 009/130] kselftest/arm64: Only enumerate power of two VLs in syscall-abi As documented in issue C215 in the known issues list for DDI0487I.a [1] Arm will be making a retroactive change to SVE to remove the possibility of selecting non power of two vector lengths. This has no impact on existing physical implementations but most virtual implementations have implemented the full range of permissible vector lengths. Since virtual implementations are noticeably slow in general and the larger vector lengths amplify the issue there's a useful improvement in runtime from only covering the vector lengths that will exist in practical systems, adjust our enumeration accordingly. We have other tests that aim to cover the enumeration interfaces. For symmetry we apply the same change to the eumeration for SME vector lengths, though the power of two restriction was already present for SME so there is no impact on the set of vector lengths tested. [1] https://developer.arm.com/documentation/102105/ia-00/ Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221223-arm64-syscall-abi-sme-only-v1-4-4fabfbd62087@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/abi/syscall-abi.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/arm64/abi/syscall-abi.c b/tools/testing/selftests/arm64/abi/syscall-abi.c index 7c9b6e947040..8afcbf6861fd 100644 --- a/tools/testing/selftests/arm64/abi/syscall-abi.c +++ b/tools/testing/selftests/arm64/abi/syscall-abi.c @@ -444,7 +444,7 @@ void sve_count_vls(void) /* * Enumerate up to SVE_VQ_MAX vector lengths */ - for (vq = SVE_VQ_MAX; vq > 0; --vq) { + for (vq = SVE_VQ_MAX; vq > 0; vq /= 2) { vl = prctl(PR_SVE_SET_VL, vq * 16); if (vl == -1) ksft_exit_fail_msg("PR_SVE_SET_VL failed: %s (%d)\n", @@ -470,7 +470,7 @@ void sme_count_vls(void) /* * Enumerate up to SVE_VQ_MAX vector lengths */ - for (vq = SVE_VQ_MAX; vq > 0; --vq) { + for (vq = SVE_VQ_MAX; vq > 0; vq /= 2) { vl = prctl(PR_SME_SET_VL, vq * 16); if (vl == -1) ksft_exit_fail_msg("PR_SME_SET_VL failed: %s (%d)\n", From 67f49869106f78882a8a09b736d4884be85aba18 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 27 Dec 2022 13:07:45 +0000 Subject: [PATCH 010/130] kselftest/arm64: Skip non-power of 2 SVE vector lengths in fp-stress As documented in issue C215 in the known issues list for DDI0487I.a [1] Arm will be making a retroactive change to SVE to remove the possibility of selecting non power of two vector lengths. This has no impact on existing physical implementations but most virtual implementations have implemented the full range of permissible vector lengths. Given how demanding fp-stress is for these implementations update to only attempt to enumerate the power of two vector lengths, reducing the load created on existing virtual implementations and only exercising the functionality that will be seen in physical implementations. [1] https://developer.arm.com/documentation/102105/ia-00/ Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221220-arm64-fp-stress-pow2-v1-1-d0ce756b57af@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/fp/fp-stress.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/arm64/fp/fp-stress.c b/tools/testing/selftests/arm64/fp/fp-stress.c index f8b2f41aac36..2b95f9451b1b 100644 --- a/tools/testing/selftests/arm64/fp/fp-stress.c +++ b/tools/testing/selftests/arm64/fp/fp-stress.c @@ -377,7 +377,7 @@ static void probe_vls(int vls[], int *vl_count, int set_vl) *vl_count = 0; - for (vq = SVE_VQ_MAX; vq > 0; --vq) { + for (vq = SVE_VQ_MAX; vq > 0; vq /= 2) { vl = prctl(set_vl, vq * 16); if (vl == -1) ksft_exit_fail_msg("SET_VL failed: %s (%d)\n", @@ -385,6 +385,9 @@ static void probe_vls(int vls[], int *vl_count, int set_vl) vl &= PR_SVE_VL_LEN_MASK; + if (*vl_count && (vl == vls[*vl_count - 1])) + break; + vq = sve_vq_from_vl(vl); vls[*vl_count] = vl; From 541826afb2c7426ada6a414c3611352c86f95ad5 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 27 Dec 2022 17:05:03 +0000 Subject: [PATCH 011/130] arm64/sysreg: Add definition for ICC_NMIAR1_EL1 FEAT_NMI adds a new interrupt status register for NMIs, ICC_NMIAR1_EL1. Add the definition for this register as per IHI0069H. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-isr-el1-v2-1-89f7073a1ca9@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/tools/sysreg | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index e8fb6684d7f3..12b127342f49 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -1842,3 +1842,8 @@ Field 23:16 LD Res0 15:8 Field 7:0 LR EndSysreg + +Sysreg ICC_NMIAR1_EL1 3 0 12 9 5 +Res0 63:24 +Field 23:0 INTID +EndSysreg From df5f1775aab2ab819caac73f1ccbb65929848e4e Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 27 Dec 2022 17:05:04 +0000 Subject: [PATCH 012/130] arm64/sysreg: Add definition of ISR_EL1 Add a definition of ISR_EL1 as per DDI0487I.a. This register was not previously defined in sysreg.h, no functional changes. Acked-by: Marc Zyngier Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-isr-el1-v2-2-89f7073a1ca9@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/tools/sysreg | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 12b127342f49..e87c378fb941 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -1843,6 +1843,16 @@ Res0 15:8 Field 7:0 LR EndSysreg +Sysreg ISR_EL1 3 0 12 1 0 +Res0 63:11 +Field 10 IS +Field 9 FS +Field 8 A +Field 7 I +Field 6 F +Res0 5:0 +EndSysreg + Sysreg ICC_NMIAR1_EL1 3 0 12 9 5 Res0 63:24 Field 23:0 INTID From 1abf363d085cf6133ef44900334ddd0f61dc3276 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 27 Dec 2022 17:05:05 +0000 Subject: [PATCH 013/130] KVM: arm64: Use symbolic definition for ISR_EL1.A Now that we are generating ISR_EL1 we have acquired a constant for ISR_EL1.A, use it rather than the magic number we had been using in the KVM entry code. Suggested-by: Marc Zyngier Acked-by: Marc Zyngier Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-isr-el1-v2-3-89f7073a1ca9@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kvm/hyp/entry.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index 435346ea1504..f3aa7738b477 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -171,7 +171,7 @@ alternative_else dsb sy // Synchronize against in-flight ld/st isb // Prevent an early read of side-effect free ISR mrs x2, isr_el1 - tbnz x2, #8, 2f // ISR_EL1.A + tbnz x2, #ISR_EL1_A_SHIFT, 2f ret nop 2: From 5a4c2a314083b07751c3151baf5e6ed7cc3aba36 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 4 Jan 2023 21:00:00 +0800 Subject: [PATCH 014/130] arm64: make ARCH_FORCE_MAX_ORDER selectable The other architectures with ARCH_FORCE_MAX_ORDER are selectable, but not for ARM64, this is to make it selectable on ARM64, which is useful for user that need to allocate more than 4MB of physically contiguous memory with 4K pagesize, also bigger on 16K pagesize too, the max value of MAX_ORDER is calculated bellow, see include/linux/mmzone.h, MAX_ORDER - 1 + PAGE_SHIFT <= SECTION_SIZE_BITS so max value of MAX_ORDER = SECTION_SIZE_BITS + 1 - PAGE_SHIFT | SECTION_SIZE_BITS | PAGE_SHIFT | max MAX_ORDER | default MAX_ORDER | ----+-------------------+--------------+-----------------+--------------------+ 4K | 27 | 12 | 16 | 11 | 16K | 27 | 14 | 14 | 12 | 64K | 29 | 16 | 14 | 14 | ----+-------------------+--------------+-----------------+--------------------+ Signed-off-by: Kefeng Wang Link: https://lore.kernel.org/r/20230104130000.69806-1-wangkefeng.wang@huawei.com [catalin.marinas@arm.com: add the calculations as comment to arch/arm64/Kconfig] Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 03934808b2ed..8cfd8dcf7e75 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1456,10 +1456,23 @@ config XEN help Say Y if you want to run Linux in a Virtual Machine on Xen on ARM64. +# include/linux/mmzone.h requires the following to be true: +# +# MAX_ORDER - 1 + PAGE_SHIFT <= SECTION_SIZE_BITS +# +# so the maximum value of MAX_ORDER is SECTION_SIZE_BITS + 1 - PAGE_SHIFT: +# +# | SECTION_SIZE_BITS | PAGE_SHIFT | max MAX_ORDER | default MAX_ORDER | +# ----+-------------------+--------------+-----------------+--------------------+ +# 4K | 27 | 12 | 16 | 11 | +# 16K | 27 | 14 | 14 | 12 | +# 64K | 29 | 16 | 14 | 14 | config ARCH_FORCE_MAX_ORDER - int + int "Maximum zone order" if ARM64_4K_PAGES || ARM64_16K_PAGES default "14" if ARM64_64K_PAGES + range 12 14 if ARM64_16K_PAGES default "12" if ARM64_16K_PAGES + range 11 16 if ARM64_4K_PAGES default "11" help The kernel memory allocator divides physically contiguous memory From b2482807fbd48a299512d161e38262fd7d973aa0 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 10 Jan 2023 19:34:36 +0000 Subject: [PATCH 015/130] arm64/sme: Optimise SME exit on syscall entry Our ABI says that we exit streaming mode on syscall entry. Currently we check if we are in streaming mode before doing this but since we have a SMSTOP SM instruction which will clear SVCR.SM in a single atomic operation we can save ourselves the read of the system register and check of the flag and just unconditionally do the SMSTOP SM. If we are not in streaming mode it results in a noop change to SVCR, if we are in streaming mode we will exit as desired. No functional change. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20230110-arm64-sme-syscall-smstop-v1-1-ac94235fd810@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/syscall.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c index a5de47e3df2b..da84cf855c44 100644 --- a/arch/arm64/kernel/syscall.c +++ b/arch/arm64/kernel/syscall.c @@ -173,12 +173,8 @@ static inline void fp_user_discard(void) * register state to track, if this changes the KVM code will * need updating. */ - if (system_supports_sme() && test_thread_flag(TIF_SME)) { - u64 svcr = read_sysreg_s(SYS_SVCR); - - if (svcr & SVCR_SM_MASK) - sme_smstop_sm(); - } + if (system_supports_sme()) + sme_smstop_sm(); if (!system_supports_sve()) return; From 30792e7c18b659599d9b67922d60d76eee1a0e5d Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 10 Jan 2023 20:49:58 +0000 Subject: [PATCH 016/130] kselftest/arm64: Fix test numbering when skipping tests Currently when skipping tests in the BTI testsuite we assign the same number to every test since we forget to increment the current test number as we skip, causing warnings about not running the expected test count and potentially otherwise confusing result parsers. Fix this by adding an appropriate increment. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20230110-arm64-bti-selftest-skip-v1-1-143ecdc84567@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/bti/test.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/arm64/bti/test.c b/tools/testing/selftests/arm64/bti/test.c index 67b77ab83c20..4b6dda987c58 100644 --- a/tools/testing/selftests/arm64/bti/test.c +++ b/tools/testing/selftests/arm64/bti/test.c @@ -112,7 +112,7 @@ static void __do_test(void (*trampoline)(void (*)(void)), if (skip_all) { test_skipped++; putstr("ok "); - putnum(test_num); + putnum(test_num++); putstr(" "); puttestname(name, trampoline_name); putstr(" # SKIP\n"); From 1c3b614548b50227c5950258831b130d832703b5 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 10 Jan 2023 20:49:59 +0000 Subject: [PATCH 017/130] kselftest/arm64: Run BTI selftests on systems without BTI The BTI selftests are built both with and without BTI support, validating both the generation of BTI signals as expected for binaries without BTI support. Both versions of the binary currently skip all their tests when the system does not support BTI, however this is excessive since we do have a defined ABI for how the programs should function in this case (especially for the non-BTI binary). Update the test program to run all the tests unconditionally, adding a runtime adjustment of the expected results on systems that don't support BTI where we currently handle the build time case. The tests all use HINT space instructions, BTI itself is a HINT as is are the PAC instructions that function as landing pads, so nothing in the tests depends on support for BTI in the kernel or hardware. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20230110-arm64-bti-selftest-skip-v1-2-143ecdc84567@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/bti/test.c | 25 ++++++++++-------------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/tools/testing/selftests/arm64/bti/test.c b/tools/testing/selftests/arm64/bti/test.c index 4b6dda987c58..2cd8dcee5aec 100644 --- a/tools/testing/selftests/arm64/bti/test.c +++ b/tools/testing/selftests/arm64/bti/test.c @@ -6,6 +6,7 @@ #include "system.h" +#include #include #include #include @@ -101,7 +102,8 @@ static void handler(int n, siginfo_t *si __always_unused, uc->uc_mcontext.pstate &= ~PSR_BTYPE_MASK; } -static int skip_all; +/* Does the system have BTI? */ +static bool have_bti; static void __do_test(void (*trampoline)(void (*)(void)), void (*fn)(void), @@ -109,19 +111,11 @@ static void __do_test(void (*trampoline)(void (*)(void)), const char *name, int expect_sigill) { - if (skip_all) { - test_skipped++; - putstr("ok "); - putnum(test_num++); - putstr(" "); - puttestname(name, trampoline_name); - putstr(" # SKIP\n"); - - return; - } - - /* Branch Target exceptions should only happen in BTI binaries: */ - if (!BTI) + /* + * Branch Target exceptions should only happen for BTI + * binaries running on a system with BTI: + */ + if (!BTI || !have_bti) expect_sigill = 0; sigill_expected = expect_sigill; @@ -199,9 +193,10 @@ void start(int *argcp) putstr("# HWCAP2_BTI present\n"); if (!(hwcap & HWCAP_PACA)) putstr("# Bad hardware? Expect problems.\n"); + have_bti = true; } else { putstr("# HWCAP2_BTI not present\n"); - skip_all = 1; + have_bti = false; } putstr("# Test binary"); From 7f95da9d2dc4c20bb374c281ceb8fa40b6208f4b Mon Sep 17 00:00:00 2001 From: Junhao He Date: Thu, 19 Jan 2023 18:03:05 +0800 Subject: [PATCH 018/130] drivers/perf: hisi: Advertise the PERF_PMU_CAP_NO_EXCLUDE capability Missed initialization the variable of pmu::capabilities when extract the initialization code of hisi_pmu->pmu into a function. HISI UNCORE PMU drivers counters that not support context exclusion. So we have to advertise the PERF_PMU_CAP_NO_EXCLUDE capability. This ensures that perf will prevent us from handling events where any exclusion flags are set. Signed-off-by: Junhao He Link: https://lore.kernel.org/r/20230119100307.3660-2-hejunhao3@huawei.com Signed-off-by: Will Deacon --- drivers/perf/hisilicon/hisi_uncore_pmu.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c index fbc8a93d5eac..2a466477920b 100644 --- a/drivers/perf/hisilicon/hisi_uncore_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c @@ -546,6 +546,7 @@ void hisi_pmu_init(struct pmu *pmu, const char *name, pmu->stop = hisi_uncore_pmu_stop; pmu->read = hisi_uncore_pmu_read; pmu->attr_groups = attr_groups; + pmu->capabilities = PERF_PMU_CAP_NO_EXCLUDE; } EXPORT_SYMBOL_GPL(hisi_pmu_init); From 053b5579dacfc5763dda0c073ee14147421d32d7 Mon Sep 17 00:00:00 2001 From: Junhao He Date: Thu, 19 Jan 2023 18:03:06 +0800 Subject: [PATCH 019/130] drivers/perf: hisi: Simplify the parameters of hisi_pmu_init() Use "hisi_pmu" to simplify the parameter list for the hisi_pmu_init() function. Signed-off-by: Junhao He Link: https://lore.kernel.org/r/20230119100307.3660-3-hejunhao3@huawei.com Signed-off-by: Will Deacon --- drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c | 2 +- drivers/perf/hisilicon/hisi_uncore_hha_pmu.c | 2 +- drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c | 2 +- drivers/perf/hisilicon/hisi_uncore_pa_pmu.c | 2 +- drivers/perf/hisilicon/hisi_uncore_pmu.c | 8 +++++--- drivers/perf/hisilicon/hisi_uncore_pmu.h | 4 ++-- drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c | 2 +- 7 files changed, 12 insertions(+), 10 deletions(-) diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c index 50d0c0a2f1fe..8c3ffcbfd4c0 100644 --- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c @@ -516,7 +516,7 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev) "hisi_sccl%u_ddrc%u", ddrc_pmu->sccl_id, ddrc_pmu->index_id); - hisi_pmu_init(&ddrc_pmu->pmu, name, ddrc_pmu->pmu_events.attr_groups, THIS_MODULE); + hisi_pmu_init(ddrc_pmu, name, THIS_MODULE); ret = perf_pmu_register(&ddrc_pmu->pmu, name, -1); if (ret) { diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c index 13017b3412a5..806698b9eabf 100644 --- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c @@ -519,7 +519,7 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev) name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_hha%u", hha_pmu->sccl_id, hha_pmu->index_id); - hisi_pmu_init(&hha_pmu->pmu, name, hha_pmu->pmu_events.attr_groups, THIS_MODULE); + hisi_pmu_init(hha_pmu, name, THIS_MODULE); ret = perf_pmu_register(&hha_pmu->pmu, name, -1); if (ret) { diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c index 2995f3630d49..5b2c35f1658a 100644 --- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c @@ -557,7 +557,7 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev) */ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_l3c%u", l3c_pmu->sccl_id, l3c_pmu->ccl_id); - hisi_pmu_init(&l3c_pmu->pmu, name, l3c_pmu->pmu_events.attr_groups, THIS_MODULE); + hisi_pmu_init(l3c_pmu, name, THIS_MODULE); ret = perf_pmu_register(&l3c_pmu->pmu, name, -1); if (ret) { diff --git a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c index 47d3cc9b6eec..afe3419f3f6d 100644 --- a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c @@ -412,7 +412,7 @@ static int hisi_pa_pmu_probe(struct platform_device *pdev) return ret; } - hisi_pmu_init(&pa_pmu->pmu, name, pa_pmu->pmu_events.attr_groups, THIS_MODULE); + hisi_pmu_init(pa_pmu, name, THIS_MODULE); ret = perf_pmu_register(&pa_pmu->pmu, name, -1); if (ret) { dev_err(pa_pmu->dev, "PMU register failed, ret = %d\n", ret); diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c index 2a466477920b..f1b0f5e1a28f 100644 --- a/drivers/perf/hisilicon/hisi_uncore_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c @@ -531,9 +531,11 @@ int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) } EXPORT_SYMBOL_GPL(hisi_uncore_pmu_offline_cpu); -void hisi_pmu_init(struct pmu *pmu, const char *name, - const struct attribute_group **attr_groups, struct module *module) +void hisi_pmu_init(struct hisi_pmu *hisi_pmu, const char *name, + struct module *module) { + struct pmu *pmu = &hisi_pmu->pmu; + pmu->name = name; pmu->module = module; pmu->task_ctx_nr = perf_invalid_context; @@ -545,7 +547,7 @@ void hisi_pmu_init(struct pmu *pmu, const char *name, pmu->start = hisi_uncore_pmu_start; pmu->stop = hisi_uncore_pmu_stop; pmu->read = hisi_uncore_pmu_read; - pmu->attr_groups = attr_groups; + pmu->attr_groups = hisi_pmu->pmu_events.attr_groups; pmu->capabilities = PERF_PMU_CAP_NO_EXCLUDE; } EXPORT_SYMBOL_GPL(hisi_pmu_init); diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.h b/drivers/perf/hisilicon/hisi_uncore_pmu.h index b59de33cd059..f8e3cc6903d7 100644 --- a/drivers/perf/hisilicon/hisi_uncore_pmu.h +++ b/drivers/perf/hisilicon/hisi_uncore_pmu.h @@ -121,6 +121,6 @@ ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev, int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu, struct platform_device *pdev); -void hisi_pmu_init(struct pmu *pmu, const char *name, - const struct attribute_group **attr_groups, struct module *module); +void hisi_pmu_init(struct hisi_pmu *hisi_pmu, const char *name, + struct module *module); #endif /* __HISI_UNCORE_PMU_H__ */ diff --git a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c index b9c79f17230c..1e354433776a 100644 --- a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c @@ -445,7 +445,7 @@ static int hisi_sllc_pmu_probe(struct platform_device *pdev) return ret; } - hisi_pmu_init(&sllc_pmu->pmu, name, sllc_pmu->pmu_events.attr_groups, THIS_MODULE); + hisi_pmu_init(sllc_pmu, name, THIS_MODULE); ret = perf_pmu_register(&sllc_pmu->pmu, name, -1); if (ret) { From e126f6f42f89baee09e088ab6bc48f83ac3a0eae Mon Sep 17 00:00:00 2001 From: Junhao He Date: Thu, 19 Jan 2023 18:03:07 +0800 Subject: [PATCH 020/130] drivers/perf: hisi: Extract initialization of "cpa_pmu->pmu" Use hisi_pmu_init() function to simplify initialization of "cpa_pmu->pmu". Signed-off-by: Junhao He Link: https://lore.kernel.org/r/20230119100307.3660-4-hejunhao3@huawei.com Signed-off-by: Will Deacon --- drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) diff --git a/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c index a9bb73f76be4..4c67d57217a7 100644 --- a/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c @@ -316,21 +316,7 @@ static int hisi_cpa_pmu_probe(struct platform_device *pdev) if (!name) return -ENOMEM; - cpa_pmu->pmu = (struct pmu) { - .name = name, - .module = THIS_MODULE, - .task_ctx_nr = perf_invalid_context, - .event_init = hisi_uncore_pmu_event_init, - .pmu_enable = hisi_uncore_pmu_enable, - .pmu_disable = hisi_uncore_pmu_disable, - .add = hisi_uncore_pmu_add, - .del = hisi_uncore_pmu_del, - .start = hisi_uncore_pmu_start, - .stop = hisi_uncore_pmu_stop, - .read = hisi_uncore_pmu_read, - .attr_groups = cpa_pmu->pmu_events.attr_groups, - .capabilities = PERF_PMU_CAP_NO_EXCLUDE, - }; + hisi_pmu_init(cpa_pmu, name, THIS_MODULE); /* Power Management should be disabled before using CPA PMU. */ hisi_cpa_pmu_disable_pm(cpa_pmu); From bb21ef19a3d8f586a99310116d40622fb5b79942 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 5 Dec 2022 19:46:13 +0000 Subject: [PATCH 021/130] perf/arm-cmn: Reset DTM_PMU_CONFIG at probe Although we treat the DTM counters as free-running such that we're not too concerned about the initial DTM state, it's possible for a previous user to have left DTM counters enabled and paired with DTC counters. Thus if the first events are scheduled using some, but not all, DTMs, the as-yet-unused ones could end up adding spurious increments to the event counts at the DTC. Make sure we sync our initial DTM_PMU_CONFIG state to all the DTMs at probe time to avoid that possibility. Signed-off-by: Robin Murphy Link: https://lore.kernel.org/r/ba5f38b3dc733cd06bfb5e659b697e76d18c2183.1670269572.git.robin.murphy@arm.com Signed-off-by: Will Deacon --- drivers/perf/arm-cmn.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c index b80a9b74662b..e220714954b0 100644 --- a/drivers/perf/arm-cmn.c +++ b/drivers/perf/arm-cmn.c @@ -1865,6 +1865,7 @@ static void arm_cmn_init_dtm(struct arm_cmn_dtm *dtm, struct arm_cmn_node *xp, i dtm->base = xp->pmu_base + CMN_DTM_OFFSET(idx); dtm->pmu_config_low = CMN_DTM_PMU_CONFIG_PMU_EN; + writeq_relaxed(dtm->pmu_config_low, dtm->base + CMN_DTM_PMU_CONFIG); for (i = 0; i < 4; i++) { dtm->wp_event[i] = -1; writeq_relaxed(0, dtm->base + CMN_DTM_WPn_MASK(i)); From e85930f06f0e938bfeb6e081526da86a784cb907 Mon Sep 17 00:00:00 2001 From: Gowthami Thiagarajan Date: Fri, 9 Dec 2022 11:06:07 +0530 Subject: [PATCH 022/130] perf/marvell: Add ACPI support to DDR uncore driver Add support for ACPI based device registration so that the driver can be also enabled through ACPI table. Signed-off-by: Gowthami Thiagarajan Link: https://lore.kernel.org/r/20221209053607.3929964-1-gthiagarajan@marvell.com Signed-off-by: Will Deacon --- drivers/perf/marvell_cn10k_ddr_pmu.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/perf/marvell_cn10k_ddr_pmu.c b/drivers/perf/marvell_cn10k_ddr_pmu.c index 665b382a0ee3..b94a5f6cc22b 100644 --- a/drivers/perf/marvell_cn10k_ddr_pmu.c +++ b/drivers/perf/marvell_cn10k_ddr_pmu.c @@ -12,6 +12,7 @@ #include #include #include +#include /* Performance Counters Operating Mode Control Registers */ #define DDRC_PERF_CNT_OP_MODE_CTRL 0x8020 @@ -717,10 +718,19 @@ static const struct of_device_id cn10k_ddr_pmu_of_match[] = { MODULE_DEVICE_TABLE(of, cn10k_ddr_pmu_of_match); #endif +#ifdef CONFIG_ACPI +static const struct acpi_device_id cn10k_ddr_pmu_acpi_match[] = { + {"MRVL000A", 0}, + {}, +}; +MODULE_DEVICE_TABLE(acpi, cn10k_ddr_pmu_acpi_match); +#endif + static struct platform_driver cn10k_ddr_pmu_driver = { .driver = { .name = "cn10k-ddr-pmu", .of_match_table = of_match_ptr(cn10k_ddr_pmu_of_match), + .acpi_match_table = ACPI_PTR(cn10k_ddr_pmu_acpi_match), .suppress_bind_attrs = true, }, .probe = cn10k_ddr_perf_probe, From 093cf1f62fe8504d3cbd721c8753dbda931dd387 Mon Sep 17 00:00:00 2001 From: Gowthami Thiagarajan Date: Fri, 9 Dec 2022 11:07:15 +0530 Subject: [PATCH 023/130] perf/marvell: Add ACPI support to TAD uncore driver Add support for ACPI based device registration so that the driver can be also enabled through ACPI table. While at that change the DT specific API's to device_* API's so that both DT based and ACPI based probing works. Signed-off-by: Gowthami Thiagarajan Link: https://lore.kernel.org/r/20221209053715.3930071-1-gthiagarajan@marvell.com Signed-off-by: Will Deacon --- drivers/perf/marvell_cn10k_tad_pmu.c | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/drivers/perf/marvell_cn10k_tad_pmu.c b/drivers/perf/marvell_cn10k_tad_pmu.c index a1166afb3702..3972197e2210 100644 --- a/drivers/perf/marvell_cn10k_tad_pmu.c +++ b/drivers/perf/marvell_cn10k_tad_pmu.c @@ -13,6 +13,7 @@ #include #include #include +#include #define TAD_PFC_OFFSET 0x800 #define TAD_PFC(counter) (TAD_PFC_OFFSET | (counter << 3)) @@ -254,7 +255,7 @@ static const struct attribute_group *tad_pmu_attr_groups[] = { static int tad_pmu_probe(struct platform_device *pdev) { - struct device_node *node = pdev->dev.of_node; + struct device *dev = &pdev->dev; struct tad_region *regions; struct tad_pmu *tad_pmu; struct resource *res; @@ -276,21 +277,21 @@ static int tad_pmu_probe(struct platform_device *pdev) return -ENODEV; } - ret = of_property_read_u32(node, "marvell,tad-page-size", - &tad_page_size); + ret = device_property_read_u32(dev, "marvell,tad-page-size", + &tad_page_size); if (ret) { dev_err(&pdev->dev, "Can't find tad-page-size property\n"); return ret; } - ret = of_property_read_u32(node, "marvell,tad-pmu-page-size", - &tad_pmu_page_size); + ret = device_property_read_u32(dev, "marvell,tad-pmu-page-size", + &tad_pmu_page_size); if (ret) { dev_err(&pdev->dev, "Can't find tad-pmu-page-size property\n"); return ret; } - ret = of_property_read_u32(node, "marvell,tad-cnt", &tad_cnt); + ret = device_property_read_u32(dev, "marvell,tad-cnt", &tad_cnt); if (ret) { dev_err(&pdev->dev, "Can't find tad-cnt property\n"); return ret; @@ -369,10 +370,19 @@ static const struct of_device_id tad_pmu_of_match[] = { }; #endif +#ifdef CONFIG_ACPI +static const struct acpi_device_id tad_pmu_acpi_match[] = { + {"MRVL000B", 0}, + {}, +}; +MODULE_DEVICE_TABLE(acpi, tad_pmu_acpi_match); +#endif + static struct platform_driver tad_pmu_driver = { .driver = { .name = "cn10k_tad_pmu", .of_match_table = of_match_ptr(tad_pmu_of_match), + .acpi_match_table = ACPI_PTR(tad_pmu_acpi_match), .suppress_bind_attrs = true, }, .probe = tad_pmu_probe, From e080477a050cce0471d3a84347f350ad7514a18b Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Mon, 9 Jan 2023 13:26:17 -0600 Subject: [PATCH 024/130] perf: arm_spe: Use feature numbering for PMSEVFR_EL1 defines Similar to commit 121a8fc088f1 ("arm64/sysreg: Use feature numbering for PMU and SPE revisions") use feature numbering instead of architecture versions for the PMSEVFR_EL1 Res0 defines. Tested-by: James Clark Signed-off-by: Rob Herring Reviewed-by: Anshuman Khandual Link: https://lore.kernel.org/r/20220825-arm-spe-v8-7-v4-1-327f860daf28@kernel.org Signed-off-by: Will Deacon --- arch/arm64/include/asm/sysreg.h | 6 +++--- drivers/perf/arm_spe_pmu.c | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 1312fb48f18b..c4ce16333750 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -273,11 +273,11 @@ #define SYS_PMSFCR_EL1_ST_SHIFT 18 #define SYS_PMSEVFR_EL1 sys_reg(3, 0, 9, 9, 5) -#define SYS_PMSEVFR_EL1_RES0_8_2 \ +#define PMSEVFR_EL1_RES0_IMP \ (GENMASK_ULL(47, 32) | GENMASK_ULL(23, 16) | GENMASK_ULL(11, 8) |\ BIT_ULL(6) | BIT_ULL(4) | BIT_ULL(2) | BIT_ULL(0)) -#define SYS_PMSEVFR_EL1_RES0_8_3 \ - (SYS_PMSEVFR_EL1_RES0_8_2 & ~(BIT_ULL(18) | BIT_ULL(17) | BIT_ULL(11))) +#define PMSEVFR_EL1_RES0_V1P1 \ + (PMSEVFR_EL1_RES0_IMP & ~(BIT_ULL(18) | BIT_ULL(17) | BIT_ULL(11))) #define SYS_PMSLATFR_EL1 sys_reg(3, 0, 9, 9, 6) #define SYS_PMSLATFR_EL1_MINLAT_SHIFT 0 diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c index 00e3a637f7b6..65cf93dcc8ee 100644 --- a/drivers/perf/arm_spe_pmu.c +++ b/drivers/perf/arm_spe_pmu.c @@ -677,11 +677,11 @@ static u64 arm_spe_pmsevfr_res0(u16 pmsver) { switch (pmsver) { case ID_AA64DFR0_EL1_PMSVer_IMP: - return SYS_PMSEVFR_EL1_RES0_8_2; + return PMSEVFR_EL1_RES0_IMP; case ID_AA64DFR0_EL1_PMSVer_V1P1: /* Return the highest version we support in default */ default: - return SYS_PMSEVFR_EL1_RES0_8_3; + return PMSEVFR_EL1_RES0_V1P1; } } From c759ec850df89f7235b08e468abb3190b6998d4e Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Mon, 9 Jan 2023 13:26:18 -0600 Subject: [PATCH 025/130] arm64: Drop SYS_ from SPE register defines We currently have a non-standard SYS_ prefix in the constants generated for the SPE register bitfields. Drop this in preparation for automatic register definition generation. The SPE mask defines were unshifted, and the SPE register field enumerations were shifted. The autogenerated defines are the opposite, so make the necessary adjustments. No functional changes. Tested-by: James Clark Signed-off-by: Rob Herring Reviewed-by: Anshuman Khandual Link: https://lore.kernel.org/r/20220825-arm-spe-v8-7-v4-2-327f860daf28@kernel.org Signed-off-by: Will Deacon --- arch/arm64/include/asm/el2_setup.h | 6 +- arch/arm64/include/asm/sysreg.h | 112 ++++++++++++++--------------- arch/arm64/kvm/debug.c | 2 +- arch/arm64/kvm/hyp/nvhe/debug-sr.c | 2 +- drivers/perf/arm_spe_pmu.c | 85 +++++++++++----------- 5 files changed, 103 insertions(+), 104 deletions(-) diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index 668569adf4d3..f9da43e53cdb 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -53,10 +53,10 @@ cbz x0, .Lskip_spe_\@ // Skip if SPE not present mrs_s x0, SYS_PMBIDR_EL1 // If SPE available at EL2, - and x0, x0, #(1 << SYS_PMBIDR_EL1_P_SHIFT) + and x0, x0, #(1 << PMBIDR_EL1_P_SHIFT) cbnz x0, .Lskip_spe_el2_\@ // then permit sampling of physical - mov x0, #(1 << SYS_PMSCR_EL2_PCT_SHIFT | \ - 1 << SYS_PMSCR_EL2_PA_SHIFT) + mov x0, #(1 << PMSCR_EL2_PCT_SHIFT | \ + 1 << PMSCR_EL2_PA_SHIFT) msr_s SYS_PMSCR_EL2, x0 // addresses and physical counter .Lskip_spe_el2_\@: mov x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index c4ce16333750..dbb0e8e22cf4 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -218,59 +218,59 @@ /*** Statistical Profiling Extension ***/ /* ID registers */ #define SYS_PMSIDR_EL1 sys_reg(3, 0, 9, 9, 7) -#define SYS_PMSIDR_EL1_FE_SHIFT 0 -#define SYS_PMSIDR_EL1_FT_SHIFT 1 -#define SYS_PMSIDR_EL1_FL_SHIFT 2 -#define SYS_PMSIDR_EL1_ARCHINST_SHIFT 3 -#define SYS_PMSIDR_EL1_LDS_SHIFT 4 -#define SYS_PMSIDR_EL1_ERND_SHIFT 5 -#define SYS_PMSIDR_EL1_INTERVAL_SHIFT 8 -#define SYS_PMSIDR_EL1_INTERVAL_MASK 0xfUL -#define SYS_PMSIDR_EL1_MAXSIZE_SHIFT 12 -#define SYS_PMSIDR_EL1_MAXSIZE_MASK 0xfUL -#define SYS_PMSIDR_EL1_COUNTSIZE_SHIFT 16 -#define SYS_PMSIDR_EL1_COUNTSIZE_MASK 0xfUL +#define PMSIDR_EL1_FE_SHIFT 0 +#define PMSIDR_EL1_FT_SHIFT 1 +#define PMSIDR_EL1_FL_SHIFT 2 +#define PMSIDR_EL1_ARCHINST_SHIFT 3 +#define PMSIDR_EL1_LDS_SHIFT 4 +#define PMSIDR_EL1_ERND_SHIFT 5 +#define PMSIDR_EL1_INTERVAL_SHIFT 8 +#define PMSIDR_EL1_INTERVAL_MASK GENMASK_ULL(11, 8) +#define PMSIDR_EL1_MAXSIZE_SHIFT 12 +#define PMSIDR_EL1_MAXSIZE_MASK GENMASK_ULL(15, 12) +#define PMSIDR_EL1_COUNTSIZE_SHIFT 16 +#define PMSIDR_EL1_COUNTSIZE_MASK GENMASK_ULL(19, 16) #define SYS_PMBIDR_EL1 sys_reg(3, 0, 9, 10, 7) -#define SYS_PMBIDR_EL1_ALIGN_SHIFT 0 -#define SYS_PMBIDR_EL1_ALIGN_MASK 0xfU -#define SYS_PMBIDR_EL1_P_SHIFT 4 -#define SYS_PMBIDR_EL1_F_SHIFT 5 +#define PMBIDR_EL1_ALIGN_SHIFT 0 +#define PMBIDR_EL1_ALIGN_MASK 0xfU +#define PMBIDR_EL1_P_SHIFT 4 +#define PMBIDR_EL1_F_SHIFT 5 /* Sampling controls */ #define SYS_PMSCR_EL1 sys_reg(3, 0, 9, 9, 0) -#define SYS_PMSCR_EL1_E0SPE_SHIFT 0 -#define SYS_PMSCR_EL1_E1SPE_SHIFT 1 -#define SYS_PMSCR_EL1_CX_SHIFT 3 -#define SYS_PMSCR_EL1_PA_SHIFT 4 -#define SYS_PMSCR_EL1_TS_SHIFT 5 -#define SYS_PMSCR_EL1_PCT_SHIFT 6 +#define PMSCR_EL1_E0SPE_SHIFT 0 +#define PMSCR_EL1_E1SPE_SHIFT 1 +#define PMSCR_EL1_CX_SHIFT 3 +#define PMSCR_EL1_PA_SHIFT 4 +#define PMSCR_EL1_TS_SHIFT 5 +#define PMSCR_EL1_PCT_SHIFT 6 #define SYS_PMSCR_EL2 sys_reg(3, 4, 9, 9, 0) -#define SYS_PMSCR_EL2_E0HSPE_SHIFT 0 -#define SYS_PMSCR_EL2_E2SPE_SHIFT 1 -#define SYS_PMSCR_EL2_CX_SHIFT 3 -#define SYS_PMSCR_EL2_PA_SHIFT 4 -#define SYS_PMSCR_EL2_TS_SHIFT 5 -#define SYS_PMSCR_EL2_PCT_SHIFT 6 +#define PMSCR_EL2_E0HSPE_SHIFT 0 +#define PMSCR_EL2_E2SPE_SHIFT 1 +#define PMSCR_EL2_CX_SHIFT 3 +#define PMSCR_EL2_PA_SHIFT 4 +#define PMSCR_EL2_TS_SHIFT 5 +#define PMSCR_EL2_PCT_SHIFT 6 #define SYS_PMSICR_EL1 sys_reg(3, 0, 9, 9, 2) #define SYS_PMSIRR_EL1 sys_reg(3, 0, 9, 9, 3) -#define SYS_PMSIRR_EL1_RND_SHIFT 0 -#define SYS_PMSIRR_EL1_INTERVAL_SHIFT 8 -#define SYS_PMSIRR_EL1_INTERVAL_MASK 0xffffffUL +#define PMSIRR_EL1_RND_SHIFT 0 +#define PMSIRR_EL1_INTERVAL_SHIFT 8 +#define PMSIRR_EL1_INTERVAL_MASK GENMASK_ULL(31, 8) /* Filtering controls */ #define SYS_PMSNEVFR_EL1 sys_reg(3, 0, 9, 9, 1) #define SYS_PMSFCR_EL1 sys_reg(3, 0, 9, 9, 4) -#define SYS_PMSFCR_EL1_FE_SHIFT 0 -#define SYS_PMSFCR_EL1_FT_SHIFT 1 -#define SYS_PMSFCR_EL1_FL_SHIFT 2 -#define SYS_PMSFCR_EL1_B_SHIFT 16 -#define SYS_PMSFCR_EL1_LD_SHIFT 17 -#define SYS_PMSFCR_EL1_ST_SHIFT 18 +#define PMSFCR_EL1_FE_SHIFT 0 +#define PMSFCR_EL1_FT_SHIFT 1 +#define PMSFCR_EL1_FL_SHIFT 2 +#define PMSFCR_EL1_B_SHIFT 16 +#define PMSFCR_EL1_LD_SHIFT 17 +#define PMSFCR_EL1_ST_SHIFT 18 #define SYS_PMSEVFR_EL1 sys_reg(3, 0, 9, 9, 5) #define PMSEVFR_EL1_RES0_IMP \ @@ -280,37 +280,37 @@ (PMSEVFR_EL1_RES0_IMP & ~(BIT_ULL(18) | BIT_ULL(17) | BIT_ULL(11))) #define SYS_PMSLATFR_EL1 sys_reg(3, 0, 9, 9, 6) -#define SYS_PMSLATFR_EL1_MINLAT_SHIFT 0 +#define PMSLATFR_EL1_MINLAT_SHIFT 0 /* Buffer controls */ #define SYS_PMBLIMITR_EL1 sys_reg(3, 0, 9, 10, 0) -#define SYS_PMBLIMITR_EL1_E_SHIFT 0 -#define SYS_PMBLIMITR_EL1_FM_SHIFT 1 -#define SYS_PMBLIMITR_EL1_FM_MASK 0x3UL -#define SYS_PMBLIMITR_EL1_FM_STOP_IRQ (0 << SYS_PMBLIMITR_EL1_FM_SHIFT) +#define PMBLIMITR_EL1_E_SHIFT 0 +#define PMBLIMITR_EL1_FM_SHIFT 1 +#define PMBLIMITR_EL1_FM_MASK GENMASK_ULL(2, 1) +#define PMBLIMITR_EL1_FM_STOP_IRQ 0 #define SYS_PMBPTR_EL1 sys_reg(3, 0, 9, 10, 1) /* Buffer error reporting */ #define SYS_PMBSR_EL1 sys_reg(3, 0, 9, 10, 3) -#define SYS_PMBSR_EL1_COLL_SHIFT 16 -#define SYS_PMBSR_EL1_S_SHIFT 17 -#define SYS_PMBSR_EL1_EA_SHIFT 18 -#define SYS_PMBSR_EL1_DL_SHIFT 19 -#define SYS_PMBSR_EL1_EC_SHIFT 26 -#define SYS_PMBSR_EL1_EC_MASK 0x3fUL +#define PMBSR_EL1_COLL_SHIFT 16 +#define PMBSR_EL1_S_SHIFT 17 +#define PMBSR_EL1_EA_SHIFT 18 +#define PMBSR_EL1_DL_SHIFT 19 +#define PMBSR_EL1_EC_SHIFT 26 +#define PMBSR_EL1_EC_MASK GENMASK_ULL(31, 26) -#define SYS_PMBSR_EL1_EC_BUF (0x0UL << SYS_PMBSR_EL1_EC_SHIFT) -#define SYS_PMBSR_EL1_EC_FAULT_S1 (0x24UL << SYS_PMBSR_EL1_EC_SHIFT) -#define SYS_PMBSR_EL1_EC_FAULT_S2 (0x25UL << SYS_PMBSR_EL1_EC_SHIFT) +#define PMBSR_EL1_EC_BUF 0x0UL +#define PMBSR_EL1_EC_FAULT_S1 0x24UL +#define PMBSR_EL1_EC_FAULT_S2 0x25UL -#define SYS_PMBSR_EL1_FAULT_FSC_SHIFT 0 -#define SYS_PMBSR_EL1_FAULT_FSC_MASK 0x3fUL +#define PMBSR_EL1_FAULT_FSC_SHIFT 0 +#define PMBSR_EL1_FAULT_FSC_MASK 0x3fUL -#define SYS_PMBSR_EL1_BUF_BSC_SHIFT 0 -#define SYS_PMBSR_EL1_BUF_BSC_MASK 0x3fUL +#define PMBSR_EL1_BUF_BSC_SHIFT 0 +#define PMBSR_EL1_BUF_BSC_MASK 0x3fUL -#define SYS_PMBSR_EL1_BUF_BSC_FULL (0x1UL << SYS_PMBSR_EL1_BUF_BSC_SHIFT) +#define PMBSR_EL1_BUF_BSC_FULL 0x1UL /*** End of Statistical Profiling Extension ***/ diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c index fccf9ec01813..55f80fb93925 100644 --- a/arch/arm64/kvm/debug.c +++ b/arch/arm64/kvm/debug.c @@ -328,7 +328,7 @@ void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu) * we may need to check if the host state needs to be saved. */ if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) && - !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(SYS_PMBIDR_EL1_P_SHIFT))) + !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(PMBIDR_EL1_P_SHIFT))) vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_SPE); /* Check if we have TRBE implemented and available at the host */ diff --git a/arch/arm64/kvm/hyp/nvhe/debug-sr.c b/arch/arm64/kvm/hyp/nvhe/debug-sr.c index e17455773b98..2673bde62fad 100644 --- a/arch/arm64/kvm/hyp/nvhe/debug-sr.c +++ b/arch/arm64/kvm/hyp/nvhe/debug-sr.c @@ -27,7 +27,7 @@ static void __debug_save_spe(u64 *pmscr_el1) * Check if the host is actually using it ? */ reg = read_sysreg_s(SYS_PMBLIMITR_EL1); - if (!(reg & BIT(SYS_PMBLIMITR_EL1_E_SHIFT))) + if (!(reg & BIT(PMBLIMITR_EL1_E_SHIFT))) return; /* Yes; save the control register and disable data generation */ diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c index 65cf93dcc8ee..814ed18346b6 100644 --- a/drivers/perf/arm_spe_pmu.c +++ b/drivers/perf/arm_spe_pmu.c @@ -12,6 +12,7 @@ #define DRVNAME PMUNAME "_pmu" #define pr_fmt(fmt) DRVNAME ": " fmt +#include #include #include #include @@ -282,18 +283,18 @@ static u64 arm_spe_event_to_pmscr(struct perf_event *event) struct perf_event_attr *attr = &event->attr; u64 reg = 0; - reg |= ATTR_CFG_GET_FLD(attr, ts_enable) << SYS_PMSCR_EL1_TS_SHIFT; - reg |= ATTR_CFG_GET_FLD(attr, pa_enable) << SYS_PMSCR_EL1_PA_SHIFT; - reg |= ATTR_CFG_GET_FLD(attr, pct_enable) << SYS_PMSCR_EL1_PCT_SHIFT; + reg |= ATTR_CFG_GET_FLD(attr, ts_enable) << PMSCR_EL1_TS_SHIFT; + reg |= ATTR_CFG_GET_FLD(attr, pa_enable) << PMSCR_EL1_PA_SHIFT; + reg |= ATTR_CFG_GET_FLD(attr, pct_enable) << PMSCR_EL1_PCT_SHIFT; if (!attr->exclude_user) - reg |= BIT(SYS_PMSCR_EL1_E0SPE_SHIFT); + reg |= BIT(PMSCR_EL1_E0SPE_SHIFT); if (!attr->exclude_kernel) - reg |= BIT(SYS_PMSCR_EL1_E1SPE_SHIFT); + reg |= BIT(PMSCR_EL1_E1SPE_SHIFT); if (get_spe_event_has_cx(event)) - reg |= BIT(SYS_PMSCR_EL1_CX_SHIFT); + reg |= BIT(PMSCR_EL1_CX_SHIFT); return reg; } @@ -302,8 +303,7 @@ static void arm_spe_event_sanitise_period(struct perf_event *event) { struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu); u64 period = event->hw.sample_period; - u64 max_period = SYS_PMSIRR_EL1_INTERVAL_MASK - << SYS_PMSIRR_EL1_INTERVAL_SHIFT; + u64 max_period = PMSIRR_EL1_INTERVAL_MASK; if (period < spe_pmu->min_period) period = spe_pmu->min_period; @@ -322,7 +322,7 @@ static u64 arm_spe_event_to_pmsirr(struct perf_event *event) arm_spe_event_sanitise_period(event); - reg |= ATTR_CFG_GET_FLD(attr, jitter) << SYS_PMSIRR_EL1_RND_SHIFT; + reg |= ATTR_CFG_GET_FLD(attr, jitter) << PMSIRR_EL1_RND_SHIFT; reg |= event->hw.sample_period; return reg; @@ -333,18 +333,18 @@ static u64 arm_spe_event_to_pmsfcr(struct perf_event *event) struct perf_event_attr *attr = &event->attr; u64 reg = 0; - reg |= ATTR_CFG_GET_FLD(attr, load_filter) << SYS_PMSFCR_EL1_LD_SHIFT; - reg |= ATTR_CFG_GET_FLD(attr, store_filter) << SYS_PMSFCR_EL1_ST_SHIFT; - reg |= ATTR_CFG_GET_FLD(attr, branch_filter) << SYS_PMSFCR_EL1_B_SHIFT; + reg |= ATTR_CFG_GET_FLD(attr, load_filter) << PMSFCR_EL1_LD_SHIFT; + reg |= ATTR_CFG_GET_FLD(attr, store_filter) << PMSFCR_EL1_ST_SHIFT; + reg |= ATTR_CFG_GET_FLD(attr, branch_filter) << PMSFCR_EL1_B_SHIFT; if (reg) - reg |= BIT(SYS_PMSFCR_EL1_FT_SHIFT); + reg |= BIT(PMSFCR_EL1_FT_SHIFT); if (ATTR_CFG_GET_FLD(attr, event_filter)) - reg |= BIT(SYS_PMSFCR_EL1_FE_SHIFT); + reg |= BIT(PMSFCR_EL1_FE_SHIFT); if (ATTR_CFG_GET_FLD(attr, min_latency)) - reg |= BIT(SYS_PMSFCR_EL1_FL_SHIFT); + reg |= BIT(PMSFCR_EL1_FL_SHIFT); return reg; } @@ -359,7 +359,7 @@ static u64 arm_spe_event_to_pmslatfr(struct perf_event *event) { struct perf_event_attr *attr = &event->attr; return ATTR_CFG_GET_FLD(attr, min_latency) - << SYS_PMSLATFR_EL1_MINLAT_SHIFT; + << PMSLATFR_EL1_MINLAT_SHIFT; } static void arm_spe_pmu_pad_buf(struct perf_output_handle *handle, int len) @@ -511,7 +511,7 @@ static void arm_spe_perf_aux_output_begin(struct perf_output_handle *handle, limit = buf->snapshot ? arm_spe_pmu_next_snapshot_off(handle) : arm_spe_pmu_next_off(handle); if (limit) - limit |= BIT(SYS_PMBLIMITR_EL1_E_SHIFT); + limit |= BIT(PMBLIMITR_EL1_E_SHIFT); limit += (u64)buf->base; base = (u64)buf->base + PERF_IDX2OFF(handle->head, buf); @@ -570,28 +570,28 @@ arm_spe_pmu_buf_get_fault_act(struct perf_output_handle *handle) /* Service required? */ pmbsr = read_sysreg_s(SYS_PMBSR_EL1); - if (!(pmbsr & BIT(SYS_PMBSR_EL1_S_SHIFT))) + if (!(pmbsr & BIT(PMBSR_EL1_S_SHIFT))) return SPE_PMU_BUF_FAULT_ACT_SPURIOUS; /* * If we've lost data, disable profiling and also set the PARTIAL * flag to indicate that the last record is corrupted. */ - if (pmbsr & BIT(SYS_PMBSR_EL1_DL_SHIFT)) + if (pmbsr & BIT(PMBSR_EL1_DL_SHIFT)) perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED | PERF_AUX_FLAG_PARTIAL); /* Report collisions to userspace so that it can up the period */ - if (pmbsr & BIT(SYS_PMBSR_EL1_COLL_SHIFT)) + if (pmbsr & BIT(PMBSR_EL1_COLL_SHIFT)) perf_aux_output_flag(handle, PERF_AUX_FLAG_COLLISION); /* We only expect buffer management events */ - switch (pmbsr & (SYS_PMBSR_EL1_EC_MASK << SYS_PMBSR_EL1_EC_SHIFT)) { - case SYS_PMBSR_EL1_EC_BUF: + switch (FIELD_GET(PMBSR_EL1_EC_MASK, pmbsr)) { + case PMBSR_EL1_EC_BUF: /* Handled below */ break; - case SYS_PMBSR_EL1_EC_FAULT_S1: - case SYS_PMBSR_EL1_EC_FAULT_S2: + case PMBSR_EL1_EC_FAULT_S1: + case PMBSR_EL1_EC_FAULT_S2: err_str = "Unexpected buffer fault"; goto out_err; default: @@ -600,9 +600,8 @@ arm_spe_pmu_buf_get_fault_act(struct perf_output_handle *handle) } /* Buffer management event */ - switch (pmbsr & - (SYS_PMBSR_EL1_BUF_BSC_MASK << SYS_PMBSR_EL1_BUF_BSC_SHIFT)) { - case SYS_PMBSR_EL1_BUF_BSC_FULL: + switch (FIELD_GET(PMBSR_EL1_BUF_BSC_MASK, pmbsr)) { + case PMBSR_EL1_BUF_BSC_FULL: ret = SPE_PMU_BUF_FAULT_ACT_OK; goto out_stop; default: @@ -717,23 +716,23 @@ static int arm_spe_pmu_event_init(struct perf_event *event) return -EINVAL; reg = arm_spe_event_to_pmsfcr(event); - if ((reg & BIT(SYS_PMSFCR_EL1_FE_SHIFT)) && + if ((reg & BIT(PMSFCR_EL1_FE_SHIFT)) && !(spe_pmu->features & SPE_PMU_FEAT_FILT_EVT)) return -EOPNOTSUPP; - if ((reg & BIT(SYS_PMSFCR_EL1_FT_SHIFT)) && + if ((reg & BIT(PMSFCR_EL1_FT_SHIFT)) && !(spe_pmu->features & SPE_PMU_FEAT_FILT_TYP)) return -EOPNOTSUPP; - if ((reg & BIT(SYS_PMSFCR_EL1_FL_SHIFT)) && + if ((reg & BIT(PMSFCR_EL1_FL_SHIFT)) && !(spe_pmu->features & SPE_PMU_FEAT_FILT_LAT)) return -EOPNOTSUPP; set_spe_event_has_cx(event); reg = arm_spe_event_to_pmscr(event); if (!perfmon_capable() && - (reg & (BIT(SYS_PMSCR_EL1_PA_SHIFT) | - BIT(SYS_PMSCR_EL1_PCT_SHIFT)))) + (reg & (BIT(PMSCR_EL1_PA_SHIFT) | + BIT(PMSCR_EL1_PCT_SHIFT)))) return -EACCES; return 0; @@ -971,14 +970,14 @@ static void __arm_spe_pmu_dev_probe(void *info) /* Read PMBIDR first to determine whether or not we have access */ reg = read_sysreg_s(SYS_PMBIDR_EL1); - if (reg & BIT(SYS_PMBIDR_EL1_P_SHIFT)) { + if (reg & BIT(PMBIDR_EL1_P_SHIFT)) { dev_err(dev, "profiling buffer owned by higher exception level\n"); return; } /* Minimum alignment. If it's out-of-range, then fail the probe */ - fld = reg >> SYS_PMBIDR_EL1_ALIGN_SHIFT & SYS_PMBIDR_EL1_ALIGN_MASK; + fld = (reg & PMBIDR_EL1_ALIGN_MASK) >> PMBIDR_EL1_ALIGN_SHIFT; spe_pmu->align = 1 << fld; if (spe_pmu->align > SZ_2K) { dev_err(dev, "unsupported PMBIDR.Align [%d] on CPU %d\n", @@ -988,26 +987,26 @@ static void __arm_spe_pmu_dev_probe(void *info) /* It's now safe to read PMSIDR and figure out what we've got */ reg = read_sysreg_s(SYS_PMSIDR_EL1); - if (reg & BIT(SYS_PMSIDR_EL1_FE_SHIFT)) + if (reg & BIT(PMSIDR_EL1_FE_SHIFT)) spe_pmu->features |= SPE_PMU_FEAT_FILT_EVT; - if (reg & BIT(SYS_PMSIDR_EL1_FT_SHIFT)) + if (reg & BIT(PMSIDR_EL1_FT_SHIFT)) spe_pmu->features |= SPE_PMU_FEAT_FILT_TYP; - if (reg & BIT(SYS_PMSIDR_EL1_FL_SHIFT)) + if (reg & BIT(PMSIDR_EL1_FL_SHIFT)) spe_pmu->features |= SPE_PMU_FEAT_FILT_LAT; - if (reg & BIT(SYS_PMSIDR_EL1_ARCHINST_SHIFT)) + if (reg & BIT(PMSIDR_EL1_ARCHINST_SHIFT)) spe_pmu->features |= SPE_PMU_FEAT_ARCH_INST; - if (reg & BIT(SYS_PMSIDR_EL1_LDS_SHIFT)) + if (reg & BIT(PMSIDR_EL1_LDS_SHIFT)) spe_pmu->features |= SPE_PMU_FEAT_LDS; - if (reg & BIT(SYS_PMSIDR_EL1_ERND_SHIFT)) + if (reg & BIT(PMSIDR_EL1_ERND_SHIFT)) spe_pmu->features |= SPE_PMU_FEAT_ERND; /* This field has a spaced out encoding, so just use a look-up */ - fld = reg >> SYS_PMSIDR_EL1_INTERVAL_SHIFT & SYS_PMSIDR_EL1_INTERVAL_MASK; + fld = (reg & PMSIDR_EL1_INTERVAL_MASK) >> PMSIDR_EL1_INTERVAL_SHIFT; switch (fld) { case 0: spe_pmu->min_period = 256; @@ -1039,7 +1038,7 @@ static void __arm_spe_pmu_dev_probe(void *info) } /* Maximum record size. If it's out-of-range, then fail the probe */ - fld = reg >> SYS_PMSIDR_EL1_MAXSIZE_SHIFT & SYS_PMSIDR_EL1_MAXSIZE_MASK; + fld = (reg & PMSIDR_EL1_MAXSIZE_MASK) >> PMSIDR_EL1_MAXSIZE_SHIFT; spe_pmu->max_record_sz = 1 << fld; if (spe_pmu->max_record_sz > SZ_2K || spe_pmu->max_record_sz < 16) { dev_err(dev, "unsupported PMSIDR_EL1.MaxSize [%d] on CPU %d\n", @@ -1047,7 +1046,7 @@ static void __arm_spe_pmu_dev_probe(void *info) return; } - fld = reg >> SYS_PMSIDR_EL1_COUNTSIZE_SHIFT & SYS_PMSIDR_EL1_COUNTSIZE_MASK; + fld = (reg & PMSIDR_EL1_COUNTSIZE_MASK) >> PMSIDR_EL1_COUNTSIZE_SHIFT; switch (fld) { default: dev_warn(dev, "unknown PMSIDR_EL1.CountSize [%d]; assuming 2\n", From 956936041a56eaebc012cf62a00fafd86958ffd5 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Mon, 9 Jan 2023 13:26:19 -0600 Subject: [PATCH 026/130] arm64/sysreg: Convert SPE registers to automatic generation Convert all the SPE register defines to automatic generation. No functional changes. New registers and fields for SPEv1.2 are added with the conversion. Some of the PMBSR MSS field defines are kept as the automatic generation has no way to create multiple names for the same register bits. The meaning of the MSS field depends on other bits. Tested-by: James Clark Signed-off-by: Rob Herring Reviewed-by: Mark Brown Reviewed-by: Anshuman Khandual Link: https://lore.kernel.org/r/20220825-arm-spe-v8-7-v4-3-327f860daf28@kernel.org Signed-off-by: Will Deacon --- arch/arm64/include/asm/sysreg.h | 89 +------------------- arch/arm64/tools/sysreg | 139 ++++++++++++++++++++++++++++++++ 2 files changed, 143 insertions(+), 85 deletions(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index dbb0e8e22cf4..db269eda7c1c 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -216,99 +216,18 @@ #define SYS_PAR_EL1_FST GENMASK(6, 1) /*** Statistical Profiling Extension ***/ -/* ID registers */ -#define SYS_PMSIDR_EL1 sys_reg(3, 0, 9, 9, 7) -#define PMSIDR_EL1_FE_SHIFT 0 -#define PMSIDR_EL1_FT_SHIFT 1 -#define PMSIDR_EL1_FL_SHIFT 2 -#define PMSIDR_EL1_ARCHINST_SHIFT 3 -#define PMSIDR_EL1_LDS_SHIFT 4 -#define PMSIDR_EL1_ERND_SHIFT 5 -#define PMSIDR_EL1_INTERVAL_SHIFT 8 -#define PMSIDR_EL1_INTERVAL_MASK GENMASK_ULL(11, 8) -#define PMSIDR_EL1_MAXSIZE_SHIFT 12 -#define PMSIDR_EL1_MAXSIZE_MASK GENMASK_ULL(15, 12) -#define PMSIDR_EL1_COUNTSIZE_SHIFT 16 -#define PMSIDR_EL1_COUNTSIZE_MASK GENMASK_ULL(19, 16) - -#define SYS_PMBIDR_EL1 sys_reg(3, 0, 9, 10, 7) -#define PMBIDR_EL1_ALIGN_SHIFT 0 -#define PMBIDR_EL1_ALIGN_MASK 0xfU -#define PMBIDR_EL1_P_SHIFT 4 -#define PMBIDR_EL1_F_SHIFT 5 - -/* Sampling controls */ -#define SYS_PMSCR_EL1 sys_reg(3, 0, 9, 9, 0) -#define PMSCR_EL1_E0SPE_SHIFT 0 -#define PMSCR_EL1_E1SPE_SHIFT 1 -#define PMSCR_EL1_CX_SHIFT 3 -#define PMSCR_EL1_PA_SHIFT 4 -#define PMSCR_EL1_TS_SHIFT 5 -#define PMSCR_EL1_PCT_SHIFT 6 - -#define SYS_PMSCR_EL2 sys_reg(3, 4, 9, 9, 0) -#define PMSCR_EL2_E0HSPE_SHIFT 0 -#define PMSCR_EL2_E2SPE_SHIFT 1 -#define PMSCR_EL2_CX_SHIFT 3 -#define PMSCR_EL2_PA_SHIFT 4 -#define PMSCR_EL2_TS_SHIFT 5 -#define PMSCR_EL2_PCT_SHIFT 6 - -#define SYS_PMSICR_EL1 sys_reg(3, 0, 9, 9, 2) - -#define SYS_PMSIRR_EL1 sys_reg(3, 0, 9, 9, 3) -#define PMSIRR_EL1_RND_SHIFT 0 -#define PMSIRR_EL1_INTERVAL_SHIFT 8 -#define PMSIRR_EL1_INTERVAL_MASK GENMASK_ULL(31, 8) - -/* Filtering controls */ -#define SYS_PMSNEVFR_EL1 sys_reg(3, 0, 9, 9, 1) - -#define SYS_PMSFCR_EL1 sys_reg(3, 0, 9, 9, 4) -#define PMSFCR_EL1_FE_SHIFT 0 -#define PMSFCR_EL1_FT_SHIFT 1 -#define PMSFCR_EL1_FL_SHIFT 2 -#define PMSFCR_EL1_B_SHIFT 16 -#define PMSFCR_EL1_LD_SHIFT 17 -#define PMSFCR_EL1_ST_SHIFT 18 - -#define SYS_PMSEVFR_EL1 sys_reg(3, 0, 9, 9, 5) #define PMSEVFR_EL1_RES0_IMP \ (GENMASK_ULL(47, 32) | GENMASK_ULL(23, 16) | GENMASK_ULL(11, 8) |\ BIT_ULL(6) | BIT_ULL(4) | BIT_ULL(2) | BIT_ULL(0)) #define PMSEVFR_EL1_RES0_V1P1 \ (PMSEVFR_EL1_RES0_IMP & ~(BIT_ULL(18) | BIT_ULL(17) | BIT_ULL(11))) -#define SYS_PMSLATFR_EL1 sys_reg(3, 0, 9, 9, 6) -#define PMSLATFR_EL1_MINLAT_SHIFT 0 - -/* Buffer controls */ -#define SYS_PMBLIMITR_EL1 sys_reg(3, 0, 9, 10, 0) -#define PMBLIMITR_EL1_E_SHIFT 0 -#define PMBLIMITR_EL1_FM_SHIFT 1 -#define PMBLIMITR_EL1_FM_MASK GENMASK_ULL(2, 1) -#define PMBLIMITR_EL1_FM_STOP_IRQ 0 - -#define SYS_PMBPTR_EL1 sys_reg(3, 0, 9, 10, 1) - /* Buffer error reporting */ -#define SYS_PMBSR_EL1 sys_reg(3, 0, 9, 10, 3) -#define PMBSR_EL1_COLL_SHIFT 16 -#define PMBSR_EL1_S_SHIFT 17 -#define PMBSR_EL1_EA_SHIFT 18 -#define PMBSR_EL1_DL_SHIFT 19 -#define PMBSR_EL1_EC_SHIFT 26 -#define PMBSR_EL1_EC_MASK GENMASK_ULL(31, 26) +#define PMBSR_EL1_FAULT_FSC_SHIFT PMBSR_EL1_MSS_SHIFT +#define PMBSR_EL1_FAULT_FSC_MASK PMBSR_EL1_MSS_MASK -#define PMBSR_EL1_EC_BUF 0x0UL -#define PMBSR_EL1_EC_FAULT_S1 0x24UL -#define PMBSR_EL1_EC_FAULT_S2 0x25UL - -#define PMBSR_EL1_FAULT_FSC_SHIFT 0 -#define PMBSR_EL1_FAULT_FSC_MASK 0x3fUL - -#define PMBSR_EL1_BUF_BSC_SHIFT 0 -#define PMBSR_EL1_BUF_BSC_MASK 0x3fUL +#define PMBSR_EL1_BUF_BSC_SHIFT PMBSR_EL1_MSS_SHIFT +#define PMBSR_EL1_BUF_BSC_MASK PMBSR_EL1_MSS_MASK #define PMBSR_EL1_BUF_BSC_FULL 0x1UL diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 184e58fd5631..c323833cf235 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -1618,6 +1618,130 @@ Sysreg FAR_EL1 3 0 6 0 0 Field 63:0 ADDR EndSysreg +Sysreg PMSCR_EL1 3 0 9 9 0 +Res0 63:8 +Field 7:6 PCT +Field 5 TS +Field 4 PA +Field 3 CX +Res0 2 +Field 1 E1SPE +Field 0 E0SPE +EndSysreg + +Sysreg PMSNEVFR_EL1 3 0 9 9 1 +Field 63:0 E +EndSysreg + +Sysreg PMSICR_EL1 3 0 9 9 2 +Field 63:56 ECOUNT +Res0 55:32 +Field 31:0 COUNT +EndSysreg + +Sysreg PMSIRR_EL1 3 0 9 9 3 +Res0 63:32 +Field 31:8 INTERVAL +Res0 7:1 +Field 0 RND +EndSysreg + +Sysreg PMSFCR_EL1 3 0 9 9 4 +Res0 63:19 +Field 18 ST +Field 17 LD +Field 16 B +Res0 15:4 +Field 3 FnE +Field 2 FL +Field 1 FT +Field 0 FE +EndSysreg + +Sysreg PMSEVFR_EL1 3 0 9 9 5 +Field 63:0 E +EndSysreg + +Sysreg PMSLATFR_EL1 3 0 9 9 6 +Res0 63:16 +Field 15:0 MINLAT +EndSysreg + +Sysreg PMSIDR_EL1 3 0 9 9 7 +Res0 63:25 +Field 24 PBT +Field 23:20 FORMAT +Enum 19:16 COUNTSIZE + 0b0010 12_BIT_SAT + 0b0011 16_BIT_SAT +EndEnum +Field 15:12 MAXSIZE +Enum 11:8 INTERVAL + 0b0000 256 + 0b0010 512 + 0b0011 768 + 0b0100 1024 + 0b0101 1536 + 0b0110 2048 + 0b0111 3072 + 0b1000 4096 +EndEnum +Res0 7 +Field 6 FnE +Field 5 ERND +Field 4 LDS +Field 3 ARCHINST +Field 2 FL +Field 1 FT +Field 0 FE +EndSysreg + +Sysreg PMBLIMITR_EL1 3 0 9 10 0 +Field 63:12 LIMIT +Res0 11:6 +Field 5 PMFZ +Res0 4:3 +Enum 2:1 FM + 0b00 FILL + 0b10 DISCARD +EndEnum +Field 0 E +EndSysreg + +Sysreg PMBPTR_EL1 3 0 9 10 1 +Field 63:0 PTR +EndSysreg + +Sysreg PMBSR_EL1 3 0 9 10 3 +Res0 63:32 +Enum 31:26 EC + 0b000000 BUF + 0b100100 FAULT_S1 + 0b100101 FAULT_S2 + 0b011110 FAULT_GPC + 0b011111 IMP_DEF +EndEnum +Res0 25:20 +Field 19 DL +Field 18 EA +Field 17 S +Field 16 COLL +Field 15:0 MSS +EndSysreg + +Sysreg PMBIDR_EL1 3 0 9 10 7 +Res0 63:12 +Enum 11:8 EA + 0b0000 NotDescribed + 0b0001 Ignored + 0b0010 SError +EndEnum +Res0 7:6 +Field 5 F +Field 4 P +Field 3:0 ALIGN +EndSysreg + SysregFields CONTEXTIDR_ELx Res0 63:32 Field 31:0 PROCID @@ -1772,6 +1896,21 @@ Sysreg FAR_EL2 3 4 6 0 0 Field 63:0 ADDR EndSysreg +Sysreg PMSCR_EL2 3 4 9 9 0 +Res0 63:8 +Enum 7:6 PCT + 0b00 VIRT + 0b01 PHYS + 0b11 GUEST +EndEnum +Field 5 TS +Field 4 PA +Field 3 CX +Res0 2 +Field 1 E2SPE +Field 0 E0HSPE +EndSysreg + Sysreg CONTEXTIDR_EL2 3 4 13 0 1 Fields CONTEXTIDR_ELx EndSysreg From 2d347ac23362f6cfc5e04a4b998f51e1e7e909a8 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Mon, 9 Jan 2023 13:26:20 -0600 Subject: [PATCH 027/130] perf: arm_spe: Drop BIT() and use FIELD_GET/PREP accessors Now that generated sysregs are in place, update the register field accesses. The use of BIT() is no longer needed with the new defines. Use FIELD_GET and FIELD_PREP instead of open coding masking and shifting. No functional change. Tested-by: James Clark Signed-off-by: Rob Herring Reviewed-by: Anshuman Khandual Link: https://lore.kernel.org/r/20220825-arm-spe-v8-7-v4-4-327f860daf28@kernel.org Signed-off-by: Will Deacon --- drivers/perf/arm_spe_pmu.c | 70 ++++++++++++++++++-------------------- 1 file changed, 34 insertions(+), 36 deletions(-) diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c index 814ed18346b6..9b4bd72087ea 100644 --- a/drivers/perf/arm_spe_pmu.c +++ b/drivers/perf/arm_spe_pmu.c @@ -283,18 +283,18 @@ static u64 arm_spe_event_to_pmscr(struct perf_event *event) struct perf_event_attr *attr = &event->attr; u64 reg = 0; - reg |= ATTR_CFG_GET_FLD(attr, ts_enable) << PMSCR_EL1_TS_SHIFT; - reg |= ATTR_CFG_GET_FLD(attr, pa_enable) << PMSCR_EL1_PA_SHIFT; - reg |= ATTR_CFG_GET_FLD(attr, pct_enable) << PMSCR_EL1_PCT_SHIFT; + reg |= FIELD_PREP(PMSCR_EL1_TS, ATTR_CFG_GET_FLD(attr, ts_enable)); + reg |= FIELD_PREP(PMSCR_EL1_PA, ATTR_CFG_GET_FLD(attr, pa_enable)); + reg |= FIELD_PREP(PMSCR_EL1_PCT, ATTR_CFG_GET_FLD(attr, pct_enable)); if (!attr->exclude_user) - reg |= BIT(PMSCR_EL1_E0SPE_SHIFT); + reg |= PMSCR_EL1_E0SPE; if (!attr->exclude_kernel) - reg |= BIT(PMSCR_EL1_E1SPE_SHIFT); + reg |= PMSCR_EL1_E1SPE; if (get_spe_event_has_cx(event)) - reg |= BIT(PMSCR_EL1_CX_SHIFT); + reg |= PMSCR_EL1_CX; return reg; } @@ -322,7 +322,7 @@ static u64 arm_spe_event_to_pmsirr(struct perf_event *event) arm_spe_event_sanitise_period(event); - reg |= ATTR_CFG_GET_FLD(attr, jitter) << PMSIRR_EL1_RND_SHIFT; + reg |= FIELD_PREP(PMSIRR_EL1_RND, ATTR_CFG_GET_FLD(attr, jitter)); reg |= event->hw.sample_period; return reg; @@ -333,18 +333,18 @@ static u64 arm_spe_event_to_pmsfcr(struct perf_event *event) struct perf_event_attr *attr = &event->attr; u64 reg = 0; - reg |= ATTR_CFG_GET_FLD(attr, load_filter) << PMSFCR_EL1_LD_SHIFT; - reg |= ATTR_CFG_GET_FLD(attr, store_filter) << PMSFCR_EL1_ST_SHIFT; - reg |= ATTR_CFG_GET_FLD(attr, branch_filter) << PMSFCR_EL1_B_SHIFT; + reg |= FIELD_PREP(PMSFCR_EL1_LD, ATTR_CFG_GET_FLD(attr, load_filter)); + reg |= FIELD_PREP(PMSFCR_EL1_ST, ATTR_CFG_GET_FLD(attr, store_filter)); + reg |= FIELD_PREP(PMSFCR_EL1_B, ATTR_CFG_GET_FLD(attr, branch_filter)); if (reg) - reg |= BIT(PMSFCR_EL1_FT_SHIFT); + reg |= PMSFCR_EL1_FT; if (ATTR_CFG_GET_FLD(attr, event_filter)) - reg |= BIT(PMSFCR_EL1_FE_SHIFT); + reg |= PMSFCR_EL1_FE; if (ATTR_CFG_GET_FLD(attr, min_latency)) - reg |= BIT(PMSFCR_EL1_FL_SHIFT); + reg |= PMSFCR_EL1_FL; return reg; } @@ -358,8 +358,7 @@ static u64 arm_spe_event_to_pmsevfr(struct perf_event *event) static u64 arm_spe_event_to_pmslatfr(struct perf_event *event) { struct perf_event_attr *attr = &event->attr; - return ATTR_CFG_GET_FLD(attr, min_latency) - << PMSLATFR_EL1_MINLAT_SHIFT; + return FIELD_PREP(PMSLATFR_EL1_MINLAT, ATTR_CFG_GET_FLD(attr, min_latency)); } static void arm_spe_pmu_pad_buf(struct perf_output_handle *handle, int len) @@ -511,7 +510,7 @@ static void arm_spe_perf_aux_output_begin(struct perf_output_handle *handle, limit = buf->snapshot ? arm_spe_pmu_next_snapshot_off(handle) : arm_spe_pmu_next_off(handle); if (limit) - limit |= BIT(PMBLIMITR_EL1_E_SHIFT); + limit |= PMBLIMITR_EL1_E; limit += (u64)buf->base; base = (u64)buf->base + PERF_IDX2OFF(handle->head, buf); @@ -570,23 +569,23 @@ arm_spe_pmu_buf_get_fault_act(struct perf_output_handle *handle) /* Service required? */ pmbsr = read_sysreg_s(SYS_PMBSR_EL1); - if (!(pmbsr & BIT(PMBSR_EL1_S_SHIFT))) + if (!FIELD_GET(PMBSR_EL1_S, pmbsr)) return SPE_PMU_BUF_FAULT_ACT_SPURIOUS; /* * If we've lost data, disable profiling and also set the PARTIAL * flag to indicate that the last record is corrupted. */ - if (pmbsr & BIT(PMBSR_EL1_DL_SHIFT)) + if (FIELD_GET(PMBSR_EL1_DL, pmbsr)) perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED | PERF_AUX_FLAG_PARTIAL); /* Report collisions to userspace so that it can up the period */ - if (pmbsr & BIT(PMBSR_EL1_COLL_SHIFT)) + if (FIELD_GET(PMBSR_EL1_COLL, pmbsr)) perf_aux_output_flag(handle, PERF_AUX_FLAG_COLLISION); /* We only expect buffer management events */ - switch (FIELD_GET(PMBSR_EL1_EC_MASK, pmbsr)) { + switch (FIELD_GET(PMBSR_EL1_EC, pmbsr)) { case PMBSR_EL1_EC_BUF: /* Handled below */ break; @@ -716,23 +715,22 @@ static int arm_spe_pmu_event_init(struct perf_event *event) return -EINVAL; reg = arm_spe_event_to_pmsfcr(event); - if ((reg & BIT(PMSFCR_EL1_FE_SHIFT)) && + if ((FIELD_GET(PMSFCR_EL1_FE, reg)) && !(spe_pmu->features & SPE_PMU_FEAT_FILT_EVT)) return -EOPNOTSUPP; - if ((reg & BIT(PMSFCR_EL1_FT_SHIFT)) && + if ((FIELD_GET(PMSFCR_EL1_FT, reg)) && !(spe_pmu->features & SPE_PMU_FEAT_FILT_TYP)) return -EOPNOTSUPP; - if ((reg & BIT(PMSFCR_EL1_FL_SHIFT)) && + if ((FIELD_GET(PMSFCR_EL1_FL, reg)) && !(spe_pmu->features & SPE_PMU_FEAT_FILT_LAT)) return -EOPNOTSUPP; set_spe_event_has_cx(event); reg = arm_spe_event_to_pmscr(event); if (!perfmon_capable() && - (reg & (BIT(PMSCR_EL1_PA_SHIFT) | - BIT(PMSCR_EL1_PCT_SHIFT)))) + (reg & (PMSCR_EL1_PA | PMSCR_EL1_PCT))) return -EACCES; return 0; @@ -970,14 +968,14 @@ static void __arm_spe_pmu_dev_probe(void *info) /* Read PMBIDR first to determine whether or not we have access */ reg = read_sysreg_s(SYS_PMBIDR_EL1); - if (reg & BIT(PMBIDR_EL1_P_SHIFT)) { + if (FIELD_GET(PMBIDR_EL1_P, reg)) { dev_err(dev, "profiling buffer owned by higher exception level\n"); return; } /* Minimum alignment. If it's out-of-range, then fail the probe */ - fld = (reg & PMBIDR_EL1_ALIGN_MASK) >> PMBIDR_EL1_ALIGN_SHIFT; + fld = FIELD_GET(PMBIDR_EL1_ALIGN, reg); spe_pmu->align = 1 << fld; if (spe_pmu->align > SZ_2K) { dev_err(dev, "unsupported PMBIDR.Align [%d] on CPU %d\n", @@ -987,26 +985,26 @@ static void __arm_spe_pmu_dev_probe(void *info) /* It's now safe to read PMSIDR and figure out what we've got */ reg = read_sysreg_s(SYS_PMSIDR_EL1); - if (reg & BIT(PMSIDR_EL1_FE_SHIFT)) + if (FIELD_GET(PMSIDR_EL1_FE, reg)) spe_pmu->features |= SPE_PMU_FEAT_FILT_EVT; - if (reg & BIT(PMSIDR_EL1_FT_SHIFT)) + if (FIELD_GET(PMSIDR_EL1_FT, reg)) spe_pmu->features |= SPE_PMU_FEAT_FILT_TYP; - if (reg & BIT(PMSIDR_EL1_FL_SHIFT)) + if (FIELD_GET(PMSIDR_EL1_FL, reg)) spe_pmu->features |= SPE_PMU_FEAT_FILT_LAT; - if (reg & BIT(PMSIDR_EL1_ARCHINST_SHIFT)) + if (FIELD_GET(PMSIDR_EL1_ARCHINST, reg)) spe_pmu->features |= SPE_PMU_FEAT_ARCH_INST; - if (reg & BIT(PMSIDR_EL1_LDS_SHIFT)) + if (FIELD_GET(PMSIDR_EL1_LDS, reg)) spe_pmu->features |= SPE_PMU_FEAT_LDS; - if (reg & BIT(PMSIDR_EL1_ERND_SHIFT)) + if (FIELD_GET(PMSIDR_EL1_ERND, reg)) spe_pmu->features |= SPE_PMU_FEAT_ERND; /* This field has a spaced out encoding, so just use a look-up */ - fld = (reg & PMSIDR_EL1_INTERVAL_MASK) >> PMSIDR_EL1_INTERVAL_SHIFT; + fld = FIELD_GET(PMSIDR_EL1_INTERVAL, reg); switch (fld) { case 0: spe_pmu->min_period = 256; @@ -1038,7 +1036,7 @@ static void __arm_spe_pmu_dev_probe(void *info) } /* Maximum record size. If it's out-of-range, then fail the probe */ - fld = (reg & PMSIDR_EL1_MAXSIZE_MASK) >> PMSIDR_EL1_MAXSIZE_SHIFT; + fld = FIELD_GET(PMSIDR_EL1_MAXSIZE, reg); spe_pmu->max_record_sz = 1 << fld; if (spe_pmu->max_record_sz > SZ_2K || spe_pmu->max_record_sz < 16) { dev_err(dev, "unsupported PMSIDR_EL1.MaxSize [%d] on CPU %d\n", @@ -1046,7 +1044,7 @@ static void __arm_spe_pmu_dev_probe(void *info) return; } - fld = (reg & PMSIDR_EL1_COUNTSIZE_MASK) >> PMSIDR_EL1_COUNTSIZE_SHIFT; + fld = FIELD_GET(PMSIDR_EL1_COUNTSIZE, reg); switch (fld) { default: dev_warn(dev, "unknown PMSIDR_EL1.CountSize [%d]; assuming 2\n", From 05e4c88e2b5c9f77409577702d6d516682e1ce14 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Mon, 9 Jan 2023 13:26:21 -0600 Subject: [PATCH 028/130] perf: arm_spe: Use new PMSIDR_EL1 register enums Now that the SPE register definitions include enums for some PMSIDR_EL1 fields, use them in the driver in place of magic values. Signed-off-by: Rob Herring Reviewed-by: Anshuman Khandual Link: https://lore.kernel.org/r/20220825-arm-spe-v8-7-v4-5-327f860daf28@kernel.org Signed-off-by: Will Deacon --- drivers/perf/arm_spe_pmu.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c index 9b4bd72087ea..af6d3867c3e7 100644 --- a/drivers/perf/arm_spe_pmu.c +++ b/drivers/perf/arm_spe_pmu.c @@ -1006,32 +1006,32 @@ static void __arm_spe_pmu_dev_probe(void *info) /* This field has a spaced out encoding, so just use a look-up */ fld = FIELD_GET(PMSIDR_EL1_INTERVAL, reg); switch (fld) { - case 0: + case PMSIDR_EL1_INTERVAL_256: spe_pmu->min_period = 256; break; - case 2: + case PMSIDR_EL1_INTERVAL_512: spe_pmu->min_period = 512; break; - case 3: + case PMSIDR_EL1_INTERVAL_768: spe_pmu->min_period = 768; break; - case 4: + case PMSIDR_EL1_INTERVAL_1024: spe_pmu->min_period = 1024; break; - case 5: + case PMSIDR_EL1_INTERVAL_1536: spe_pmu->min_period = 1536; break; - case 6: + case PMSIDR_EL1_INTERVAL_2048: spe_pmu->min_period = 2048; break; - case 7: + case PMSIDR_EL1_INTERVAL_3072: spe_pmu->min_period = 3072; break; default: dev_warn(dev, "unknown PMSIDR_EL1.Interval [%d]; assuming 8\n", fld); fallthrough; - case 8: + case PMSIDR_EL1_INTERVAL_4096: spe_pmu->min_period = 4096; } @@ -1050,10 +1050,10 @@ static void __arm_spe_pmu_dev_probe(void *info) dev_warn(dev, "unknown PMSIDR_EL1.CountSize [%d]; assuming 2\n", fld); fallthrough; - case 2: + case PMSIDR_EL1_COUNTSIZE_12_BIT_SAT: spe_pmu->counter_sz = 12; break; - case 3: + case PMSIDR_EL1_COUNTSIZE_16_BIT_SAT: spe_pmu->counter_sz = 16; } From 4998897b1e96d624bf094e5785b27023e17ba570 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Mon, 9 Jan 2023 13:26:22 -0600 Subject: [PATCH 029/130] perf: arm_spe: Support new SPEv1.2/v8.7 'not taken' event Arm SPEv1.2 (Armv8.7/v9.2) adds a new event, 'not taken', in bit 6 of the PMSEVFR_EL1 register. Update arm_spe_pmsevfr_res0() to support the additional event. Tested-by: James Clark Signed-off-by: Rob Herring Reviewed-by: Anshuman Khandual Link: https://lore.kernel.org/r/20220825-arm-spe-v8-7-v4-6-327f860daf28@kernel.org Signed-off-by: Will Deacon --- arch/arm64/include/asm/sysreg.h | 2 ++ drivers/perf/arm_spe_pmu.c | 4 +++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index db269eda7c1c..fc8787727792 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -221,6 +221,8 @@ BIT_ULL(6) | BIT_ULL(4) | BIT_ULL(2) | BIT_ULL(0)) #define PMSEVFR_EL1_RES0_V1P1 \ (PMSEVFR_EL1_RES0_IMP & ~(BIT_ULL(18) | BIT_ULL(17) | BIT_ULL(11))) +#define PMSEVFR_EL1_RES0_V1P2 \ + (PMSEVFR_EL1_RES0_V1P1 & ~BIT_ULL(6)) /* Buffer error reporting */ #define PMBSR_EL1_FAULT_FSC_SHIFT PMBSR_EL1_MSS_SHIFT diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c index af6d3867c3e7..82f67e941bc4 100644 --- a/drivers/perf/arm_spe_pmu.c +++ b/drivers/perf/arm_spe_pmu.c @@ -677,9 +677,11 @@ static u64 arm_spe_pmsevfr_res0(u16 pmsver) case ID_AA64DFR0_EL1_PMSVer_IMP: return PMSEVFR_EL1_RES0_IMP; case ID_AA64DFR0_EL1_PMSVer_V1P1: + return PMSEVFR_EL1_RES0_V1P1; + case ID_AA64DFR0_EL1_PMSVer_V1P2: /* Return the highest version we support in default */ default: - return PMSEVFR_EL1_RES0_V1P1; + return PMSEVFR_EL1_RES0_V1P2; } } From ce514000da4f4b5f850f3339f805471e5c5c1caf Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 16 Jan 2023 16:04:36 +0000 Subject: [PATCH 030/130] arm64/sme: Rename za_state to sme_state In preparation for adding support for storage for ZT0 to the thread_struct rename za_state to sme_state. Since ZT0 is accessible when PSTATE.ZA is set just like ZA itself we will extend the allocation done for ZA to cover it, avoiding the need to further expand task_struct for non-SME tasks. No functional changes. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-sme2-v4-1-f2fa0aef982f@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/fpsimd.h | 15 +++++++------ arch/arm64/include/asm/processor.h | 2 +- arch/arm64/kernel/fpsimd.c | 34 +++++++++++++++--------------- arch/arm64/kernel/process.c | 13 ++++++------ arch/arm64/kernel/ptrace.c | 6 +++--- arch/arm64/kernel/signal.c | 8 +++---- arch/arm64/kvm/fpsimd.c | 2 +- 7 files changed, 42 insertions(+), 38 deletions(-) diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index e6fa1e2982c8..2d3fa80cd95d 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -61,7 +61,7 @@ extern void fpsimd_kvm_prepare(void); struct cpu_fp_state { struct user_fpsimd_state *st; void *sve_state; - void *za_state; + void *sme_state; u64 *svcr; unsigned int sve_vl; unsigned int sme_vl; @@ -355,14 +355,17 @@ extern int sme_get_current_vl(void); /* * Return how many bytes of memory are required to store the full SME - * specific state (currently just ZA) for task, given task's currently - * configured vector length. + * specific state for task, given task's currently configured vector + * length. */ -static inline size_t za_state_size(struct task_struct const *task) +static inline size_t sme_state_size(struct task_struct const *task) { unsigned int vl = task_get_sme_vl(task); + size_t size; - return ZA_SIG_REGS_SIZE(sve_vq_from_vl(vl)); + size = ZA_SIG_REGS_SIZE(sve_vq_from_vl(vl)); + + return size; } #else @@ -382,7 +385,7 @@ static inline int sme_max_virtualisable_vl(void) { return 0; } static inline int sme_set_current_vl(unsigned long arg) { return -EINVAL; } static inline int sme_get_current_vl(void) { return -EINVAL; } -static inline size_t za_state_size(struct task_struct const *task) +static inline size_t sme_state_size(struct task_struct const *task) { return 0; } diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index d51b32a69309..3918f2a67970 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -161,7 +161,7 @@ struct thread_struct { enum fp_type fp_type; /* registers FPSIMD or SVE? */ unsigned int fpsimd_cpu; void *sve_state; /* SVE registers, if any */ - void *za_state; /* ZA register, if any */ + void *sme_state; /* ZA and ZT state, if any */ unsigned int vl[ARM64_VEC_MAX]; /* vector length */ unsigned int vl_onexec[ARM64_VEC_MAX]; /* vl after next exec */ unsigned long fault_address; /* fault info */ diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index dcc81e7200d4..9e168a9eb615 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -299,7 +299,7 @@ void task_set_vl_onexec(struct task_struct *task, enum vec_type type, /* * TIF_SME controls whether a task can use SME without trapping while * in userspace, when TIF_SME is set then we must have storage - * alocated in sve_state and za_state to store the contents of both ZA + * alocated in sve_state and sme_state to store the contents of both ZA * and the SVE registers for both streaming and non-streaming modes. * * If both SVCR.ZA and SVCR.SM are disabled then at any point we @@ -429,7 +429,7 @@ static void task_fpsimd_load(void) write_sysreg_s(current->thread.svcr, SYS_SVCR); if (thread_za_enabled(¤t->thread)) - za_load_state(current->thread.za_state); + za_load_state(current->thread.sme_state); if (thread_sm_enabled(¤t->thread)) restore_ffr = system_supports_fa64(); @@ -490,7 +490,7 @@ static void fpsimd_save(void) *svcr = read_sysreg_s(SYS_SVCR); if (*svcr & SVCR_ZA_MASK) - za_save_state(last->za_state); + za_save_state(last->sme_state); /* If we are in streaming mode override regular SVE. */ if (*svcr & SVCR_SM_MASK) { @@ -1257,30 +1257,30 @@ void fpsimd_release_task(struct task_struct *dead_task) #ifdef CONFIG_ARM64_SME /* - * Ensure that task->thread.za_state is allocated and sufficiently large. + * Ensure that task->thread.sme_state is allocated and sufficiently large. * * This function should be used only in preparation for replacing - * task->thread.za_state with new data. The memory is always zeroed + * task->thread.sme_state with new data. The memory is always zeroed * here to prevent stale data from showing through: this is done in * the interest of testability and predictability, the architecture * guarantees that when ZA is enabled it will be zeroed. */ void sme_alloc(struct task_struct *task) { - if (task->thread.za_state) { - memset(task->thread.za_state, 0, za_state_size(task)); + if (task->thread.sme_state) { + memset(task->thread.sme_state, 0, sme_state_size(task)); return; } /* This could potentially be up to 64K. */ - task->thread.za_state = - kzalloc(za_state_size(task), GFP_KERNEL); + task->thread.sme_state = + kzalloc(sme_state_size(task), GFP_KERNEL); } static void sme_free(struct task_struct *task) { - kfree(task->thread.za_state); - task->thread.za_state = NULL; + kfree(task->thread.sme_state); + task->thread.sme_state = NULL; } void sme_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p) @@ -1488,7 +1488,7 @@ void do_sme_acc(unsigned long esr, struct pt_regs *regs) sve_alloc(current, false); sme_alloc(current); - if (!current->thread.sve_state || !current->thread.za_state) { + if (!current->thread.sve_state || !current->thread.sme_state) { force_sig(SIGKILL); return; } @@ -1609,7 +1609,7 @@ static void fpsimd_flush_thread_vl(enum vec_type type) void fpsimd_flush_thread(void) { void *sve_state = NULL; - void *za_state = NULL; + void *sme_state = NULL; if (!system_supports_fpsimd()) return; @@ -1634,8 +1634,8 @@ void fpsimd_flush_thread(void) clear_thread_flag(TIF_SME); /* Defer kfree() while in atomic context */ - za_state = current->thread.za_state; - current->thread.za_state = NULL; + sme_state = current->thread.sme_state; + current->thread.sme_state = NULL; fpsimd_flush_thread_vl(ARM64_VEC_SME); current->thread.svcr = 0; @@ -1645,7 +1645,7 @@ void fpsimd_flush_thread(void) put_cpu_fpsimd_context(); kfree(sve_state); - kfree(za_state); + kfree(sme_state); } /* @@ -1711,7 +1711,7 @@ static void fpsimd_bind_task_to_cpu(void) WARN_ON(!system_supports_fpsimd()); last->st = ¤t->thread.uw.fpsimd_state; last->sve_state = current->thread.sve_state; - last->za_state = current->thread.za_state; + last->sme_state = current->thread.sme_state; last->sve_vl = task_get_sve_vl(current); last->sme_vl = task_get_sme_vl(current); last->svcr = ¤t->thread.svcr; diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 269ac1c25ae2..4ce0c4313ec6 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -311,23 +311,24 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) * This may be shortly freed if we exec() or if CLONE_SETTLS * but it's simpler to do it here. To avoid confusing the rest * of the code ensure that we have a sve_state allocated - * whenever za_state is allocated. + * whenever sme_state is allocated. */ if (thread_za_enabled(&src->thread)) { dst->thread.sve_state = kzalloc(sve_state_size(src), GFP_KERNEL); if (!dst->thread.sve_state) return -ENOMEM; - dst->thread.za_state = kmemdup(src->thread.za_state, - za_state_size(src), - GFP_KERNEL); - if (!dst->thread.za_state) { + + dst->thread.sme_state = kmemdup(src->thread.sme_state, + sme_state_size(src), + GFP_KERNEL); + if (!dst->thread.sme_state) { kfree(dst->thread.sve_state); dst->thread.sve_state = NULL; return -ENOMEM; } } else { - dst->thread.za_state = NULL; + dst->thread.sme_state = NULL; clear_tsk_thread_flag(dst, TIF_SME); } diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 2686ab157601..67256e7772e4 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -1045,7 +1045,7 @@ static int za_get(struct task_struct *target, if (thread_za_enabled(&target->thread)) { start = end; end = ZA_PT_SIZE(vq); - membuf_write(&to, target->thread.za_state, end - start); + membuf_write(&to, target->thread.sme_state, end - start); } /* Zero any trailing padding */ @@ -1099,7 +1099,7 @@ static int za_set(struct task_struct *target, /* Allocate/reinit ZA storage */ sme_alloc(target); - if (!target->thread.za_state) { + if (!target->thread.sme_state) { ret = -ENOMEM; goto out; } @@ -1124,7 +1124,7 @@ static int za_set(struct task_struct *target, start = ZA_PT_ZA_OFFSET; end = ZA_PT_SIZE(vq); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, - target->thread.za_state, + target->thread.sme_state, start, end); if (ret) goto out; diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index e0d09bf5b01b..27768809dd3e 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -389,7 +389,7 @@ static int preserve_za_context(struct za_context __user *ctx) * fpsimd_signal_preserve_current_state(). */ err |= __copy_to_user((char __user *)ctx + ZA_SIG_REGS_OFFSET, - current->thread.za_state, + current->thread.sme_state, ZA_SIG_REGS_SIZE(vq)); } @@ -420,7 +420,7 @@ static int restore_za_context(struct user_ctxs *user) /* * Careful: we are about __copy_from_user() directly into - * thread.za_state with preemption enabled, so protection is + * thread.sme_state with preemption enabled, so protection is * needed to prevent a racing context switch from writing stale * registers back over the new data. */ @@ -429,13 +429,13 @@ static int restore_za_context(struct user_ctxs *user) /* From now, fpsimd_thread_switch() won't touch thread.sve_state */ sme_alloc(current); - if (!current->thread.za_state) { + if (!current->thread.sme_state) { current->thread.svcr &= ~SVCR_ZA_MASK; clear_thread_flag(TIF_SME); return -ENOMEM; } - err = __copy_from_user(current->thread.za_state, + err = __copy_from_user(current->thread.sme_state, (char __user const *)user->za + ZA_SIG_REGS_OFFSET, ZA_SIG_REGS_SIZE(vq)); diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 02dd7e9ebd39..235775d0c825 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -143,7 +143,7 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) fp_state.st = &vcpu->arch.ctxt.fp_regs; fp_state.sve_state = vcpu->arch.sve_state; fp_state.sve_vl = vcpu->arch.sve_max_vl; - fp_state.za_state = NULL; + fp_state.sme_state = NULL; fp_state.svcr = &vcpu->arch.svcr; fp_state.fp_type = &vcpu->arch.fp_type; From 6dabf1fac6b48d256b1d78b81c9356a88df26d2f Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 16 Jan 2023 16:04:37 +0000 Subject: [PATCH 031/130] arm64: Document boot requirements for SME 2 SME 2 introduces the new ZT0 register, we require that access to this reigster is not trapped when we identify that the feature is supported. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-sme2-v4-2-f2fa0aef982f@kernel.org Signed-off-by: Catalin Marinas --- Documentation/arm64/booting.rst | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/Documentation/arm64/booting.rst b/Documentation/arm64/booting.rst index 96fe10ec6c24..f8d0a7288c73 100644 --- a/Documentation/arm64/booting.rst +++ b/Documentation/arm64/booting.rst @@ -369,6 +369,16 @@ Before jumping into the kernel, the following conditions must be met: - HCR_EL2.ATA (bit 56) must be initialised to 0b1. + For CPUs with the Scalable Matrix Extension version 2 (FEAT_SME2): + + - If EL3 is present: + + - SMCR_EL3.EZT0 (bit 30) must be initialised to 0b1. + + - If the kernel is entered at EL1 and EL2 is present: + + - SMCR_EL2.EZT0 (bit 30) must be initialised to 0b1. + The requirements described above for CPU mode, caches, MMUs, architected timers, coherency and system registers apply to all CPUs. All CPUs must enter the kernel in the same exception level. Where the values documented From 0f3bbe0edf788f1d43400a68a84eb37c9f13dfa5 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 16 Jan 2023 16:04:38 +0000 Subject: [PATCH 032/130] arm64/sysreg: Update system registers for SME 2 and 2.1 FEAT_SME2 and FEAT_SME2P1 introduce several new SME features which can be enumerated via ID_AA64SMFR0_EL1 and a new register ZT0 access to which is controlled via SMCR_ELn, add the relevant register description. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-sme2-v4-3-f2fa0aef982f@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/tools/sysreg | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 184e58fd5631..13c87d8a42f1 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -894,6 +894,7 @@ EndEnum Enum 27:24 SME 0b0000 NI 0b0001 IMP + 0b0010 SME2 EndEnum Res0 23:20 Enum 19:16 MPAM_frac @@ -975,7 +976,9 @@ Enum 63 FA64 EndEnum Res0 62:60 Enum 59:56 SMEver - 0b0000 IMP + 0b0000 SME + 0b0001 SME2 + 0b0010 SME2p1 EndEnum Enum 55:52 I16I64 0b0000 NI @@ -986,7 +989,19 @@ Enum 48 F64F64 0b0 NI 0b1 IMP EndEnum -Res0 47:40 +Enum 47:44 I16I32 + 0b0000 NI + 0b0101 IMP +EndEnum +Enum 43 B16B16 + 0b0 NI + 0b1 IMP +EndEnum +Enum 42 F16F16 + 0b0 NI + 0b1 IMP +EndEnum +Res0 41:40 Enum 39:36 I8I32 0b0000 NI 0b1111 IMP @@ -999,7 +1014,10 @@ Enum 34 B16F32 0b0 NI 0b1 IMP EndEnum -Res0 33 +Enum 33 BI32I32 + 0b0 NI + 0b1 IMP +EndEnum Enum 32 F32F32 0b0 NI 0b1 IMP @@ -1599,7 +1617,8 @@ EndSysreg SysregFields SMCR_ELx Res0 63:32 Field 31 FA64 -Res0 30:9 +Field 30 EZT0 +Res0 29:9 Raz 8:4 Field 3:0 LEN EndSysregFields From 4edc11744e8cfe9f6d38fe3f656e949c6918bdd2 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 16 Jan 2023 16:04:39 +0000 Subject: [PATCH 033/130] arm64/sme: Document SME 2 and SME 2.1 ABI As well as a number of simple features which only add new instructions and require corresponding hwcaps SME2 introduces a new register ZT0 for which we must define ABI. Fortunately this is a fixed size 512 bits and therefore much more straightforward than the base SME state, the only wrinkle is that it is only accessible when ZA is accessible. While there is only a single register the architecture is written with a view to exensibility, including a number in the name, so follow this in the ABI. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-sme2-v4-4-f2fa0aef982f@kernel.org Signed-off-by: Catalin Marinas --- Documentation/arm64/sme.rst | 52 ++++++++++++++++++++++++++++++------- 1 file changed, 43 insertions(+), 9 deletions(-) diff --git a/Documentation/arm64/sme.rst b/Documentation/arm64/sme.rst index 16d2db4c2e2e..68d1efb7d171 100644 --- a/Documentation/arm64/sme.rst +++ b/Documentation/arm64/sme.rst @@ -18,14 +18,19 @@ model features for SME is included in Appendix A. 1. General ----------- -* PSTATE.SM, PSTATE.ZA, the streaming mode vector length, the ZA - register state and TPIDR2_EL0 are tracked per thread. +* PSTATE.SM, PSTATE.ZA, the streaming mode vector length, the ZA and (when + present) ZTn register state and TPIDR2_EL0 are tracked per thread. * The presence of SME is reported to userspace via HWCAP2_SME in the aux vector AT_HWCAP2 entry. Presence of this flag implies the presence of the SME instructions and registers, and the Linux-specific system interfaces described in this document. SME is reported in /proc/cpuinfo as "sme". +* The presence of SME2 is reported to userspace via HWCAP2_SME2 in the + aux vector AT_HWCAP2 entry. Presence of this flag implies the presence of + the SME2 instructions and ZT0, and the Linux-specific system interfaces + described in this document. SME2 is reported in /proc/cpuinfo as "sme2". + * Support for the execution of SME instructions in userspace can also be detected by reading the CPU ID register ID_AA64PFR1_EL1 using an MRS instruction, and checking that the value of the SME field is nonzero. [3] @@ -44,6 +49,7 @@ model features for SME is included in Appendix A. HWCAP2_SME_B16F32 HWCAP2_SME_F32F32 HWCAP2_SME_FA64 + HWCAP2_SME2 This list may be extended over time as the SME architecture evolves. @@ -52,8 +58,8 @@ model features for SME is included in Appendix A. cpu-feature-registers.txt for details. * Debuggers should restrict themselves to interacting with the target via the - NT_ARM_SVE, NT_ARM_SSVE and NT_ARM_ZA regsets. The recommended way - of detecting support for these regsets is to connect to a target process + NT_ARM_SVE, NT_ARM_SSVE, NT_ARM_ZA and NT_ARM_ZT regsets. The recommended + way of detecting support for these regsets is to connect to a target process first and then attempt a ptrace(PTRACE_GETREGSET, pid, NT_ARM_, &iov). @@ -89,13 +95,13 @@ be zeroed. ------------------------- * On syscall PSTATE.ZA is preserved, if PSTATE.ZA==1 then the contents of the - ZA matrix are preserved. + ZA matrix and ZTn (if present) are preserved. * On syscall PSTATE.SM will be cleared and the SVE registers will be handled as per the standard SVE ABI. -* Neither the SVE registers nor ZA are used to pass arguments to or receive - results from any syscall. +* None of the SVE registers, ZA or ZTn are used to pass arguments to + or receive results from any syscall. * On process creation (eg, clone()) the newly created process will have PSTATE.SM cleared. @@ -134,6 +140,14 @@ be zeroed. __reserved[] referencing this space. za_context is then written in the extra space. Refer to [1] for further details about this mechanism. +* If ZTn is supported and PSTATE.ZA==1 then a signal frame record for ZTn will + be generated. + +* The signal record for ZTn has magic ZT_MAGIC (0x5a544e01) and consists of a + standard signal frame header followed by a struct zt_context specifying + the number of ZTn registers supported by the system, then zt_context.nregs + blocks of 64 bytes of data per register. + 5. Signal return ----------------- @@ -151,6 +165,9 @@ When returning from a signal handler: the signal frame does not match the current vector length, the signal return attempt is treated as illegal, resulting in a forced SIGSEGV. +* If ZTn is not supported or PSTATE.ZA==0 then it is illegal to have a + signal frame record for ZTn, resulting in a forced SIGSEGV. + 6. prctl extensions -------------------- @@ -214,8 +231,8 @@ prctl(PR_SME_SET_VL, unsigned long arg) vector length that will be applied at the next execve() by the calling thread. - * Changing the vector length causes all of ZA, P0..P15, FFR and all bits of - Z0..Z31 except for Z0 bits [127:0] .. Z31 bits [127:0] to become + * Changing the vector length causes all of ZA, ZTn, P0..P15, FFR and all + bits of Z0..Z31 except for Z0 bits [127:0] .. Z31 bits [127:0] to become unspecified, including both streaming and non-streaming SVE state. Calling PR_SME_SET_VL with vl equal to the thread's current vector length, or calling PR_SME_SET_VL with the PR_SVE_SET_VL_ONEXEC flag, @@ -317,6 +334,15 @@ The regset data starts with struct user_za_header, containing: * The effect of writing a partial, incomplete payload is unspecified. +* A new regset NT_ARM_ZT is defined for access to ZTn state via + PTRACE_GETREGSET and PTRACE_SETREGSET. + +* The NT_ARM_ZT regset consists of a single 512 bit register. + +* When PSTATE.ZA==0 reads of NT_ARM_ZT will report all bits of ZTn as 0. + +* Writes to NT_ARM_ZT will set PSTATE.ZA to 1. + 8. ELF coredump extensions --------------------------- @@ -331,6 +357,11 @@ The regset data starts with struct user_za_header, containing: been read if a PTRACE_GETREGSET of NT_ARM_ZA were executed for each thread when the coredump was generated. +* A NT_ARM_ZT note will be added to each coredump for each thread of the + dumped process. The contents will be equivalent to the data that would have + been read if a PTRACE_GETREGSET of NT_ARM_ZT were executed for each thread + when the coredump was generated. + * The NT_ARM_TLS note will be extended to two registers, the second register will contain TPIDR2_EL0 on systems that support SME and will be read as zero with writes ignored otherwise. @@ -406,6 +437,9 @@ In A64 state, SME adds the following: For best system performance it is strongly encouraged for software to enable ZA only when it is actively being used. +* A new ZT0 register is introduced when SME2 is present. This is a 512 bit + register which is accessible when PSTATE.ZA is set, as ZA itself is. + * Two new 1 bit fields in PSTATE which may be controlled via the SMSTART and SMSTOP instructions or by access to the SVCR system register: From 8ef55603b8eaf9362464b3197eb139baa4bd18b9 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 16 Jan 2023 16:04:40 +0000 Subject: [PATCH 034/130] arm64/esr: Document ISS for ZT0 being disabled SME2 defines a new ISS code for use when trapping acesses to ZT0, add a definition for it. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-sme2-v4-5-f2fa0aef982f@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/esr.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index 15b34fbfca66..5f3271e9d5df 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -341,6 +341,7 @@ #define ESR_ELx_SME_ISS_ILL 1 #define ESR_ELx_SME_ISS_SM_DISABLED 2 #define ESR_ELx_SME_ISS_ZA_DISABLED 3 +#define ESR_ELx_SME_ISS_ZT_DISABLED 4 #ifndef __ASSEMBLY__ #include From 2cdeecdb95134cc0abc76626a59c94073cda8a47 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 16 Jan 2023 16:04:41 +0000 Subject: [PATCH 035/130] arm64/sme: Manually encode ZT0 load and store instructions In order to avoid unrealistic toolchain requirements we manually encode the instructions for loading and storing ZT0. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-sme2-v4-6-f2fa0aef982f@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/fpsimdmacros.h | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h index 5e0910cf4832..cd03819a3b68 100644 --- a/arch/arm64/include/asm/fpsimdmacros.h +++ b/arch/arm64/include/asm/fpsimdmacros.h @@ -220,6 +220,28 @@ | ((\offset) & 7) .endm +/* + * LDR (ZT0) + * + * LDR ZT0, nx + */ +.macro _ldr_zt nx + _check_general_reg \nx + .inst 0xe11f8000 \ + | (\nx << 5) +.endm + +/* + * STR (ZT0) + * + * STR ZT0, nx + */ +.macro _str_zt nx + _check_general_reg \nx + .inst 0xe13f8000 \ + | (\nx << 5) +.endm + /* * Zero the entire ZA array * ZERO ZA From f122576f35336820259a79847e408b9f807eba15 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 16 Jan 2023 16:04:42 +0000 Subject: [PATCH 036/130] arm64/sme: Enable host kernel to access ZT0 The new register ZT0 introduced by SME2 comes with a new trap, disable it for the host kernel so that we can implement support for it. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-sme2-v4-7-f2fa0aef982f@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/hyp-stub.S | 6 ++++++ arch/arm64/kernel/idreg-override.c | 1 + 2 files changed, 7 insertions(+) diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S index 2ee18c860f2a..d31d1acb170d 100644 --- a/arch/arm64/kernel/hyp-stub.S +++ b/arch/arm64/kernel/hyp-stub.S @@ -132,6 +132,12 @@ SYM_CODE_START_LOCAL(__finalise_el2) orr x0, x0, SMCR_ELx_FA64_MASK .Lskip_sme_fa64: + // ZT0 available? + __check_override id_aa64smfr0 ID_AA64SMFR0_EL1_SMEver_SHIFT 4 .Linit_sme_zt0 .Lskip_sme_zt0 +.Linit_sme_zt0: + orr x0, x0, SMCR_ELx_EZT0_MASK +.Lskip_sme_zt0: + orr x0, x0, #SMCR_ELx_LEN_MASK // Enable full SME vector msr_s SYS_SMCR_EL2, x0 // length for EL1. diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c index 95133765ed29..d833d78a7f31 100644 --- a/arch/arm64/kernel/idreg-override.c +++ b/arch/arm64/kernel/idreg-override.c @@ -131,6 +131,7 @@ static const struct ftr_set_desc smfr0 __initconst = { .name = "id_aa64smfr0", .override = &id_aa64smfr0_override, .fields = { + FIELD("smever", ID_AA64SMFR0_EL1_SMEver_SHIFT, NULL), /* FA64 is a one bit field... :-/ */ { "fa64", ID_AA64SMFR0_EL1_FA64_SHIFT, 1, }, {} From d4913eee152d3ffaf9a1e70073bc15e773625685 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 16 Jan 2023 16:04:43 +0000 Subject: [PATCH 037/130] arm64/sme: Add basic enumeration for SME2 Add basic feature detection for SME2, detecting that the feature is present and disabling traps for ZT0. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-sme2-v4-8-f2fa0aef982f@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpufeature.h | 6 ++++++ arch/arm64/include/asm/fpsimd.h | 1 + arch/arm64/kernel/cpufeature.c | 14 ++++++++++++++ arch/arm64/kernel/fpsimd.c | 11 +++++++++++ arch/arm64/tools/cpucaps | 1 + 5 files changed, 33 insertions(+) diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 03d1c9d7af82..fc2c739f48f1 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -769,6 +769,12 @@ static __always_inline bool system_supports_sme(void) cpus_have_const_cap(ARM64_SME); } +static __always_inline bool system_supports_sme2(void) +{ + return IS_ENABLED(CONFIG_ARM64_SME) && + cpus_have_const_cap(ARM64_SME2); +} + static __always_inline bool system_supports_fa64(void) { return IS_ENABLED(CONFIG_ARM64_SME) && diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 2d3fa80cd95d..2a66e3b94553 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -118,6 +118,7 @@ extern void za_load_state(void const *state); struct arm64_cpu_capabilities; extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused); extern void sme_kernel_enable(const struct arm64_cpu_capabilities *__unused); +extern void sme2_kernel_enable(const struct arm64_cpu_capabilities *__unused); extern void fa64_kernel_enable(const struct arm64_cpu_capabilities *__unused); extern u64 read_zcr_features(void); diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index a77315b338e6..fd90905bc2e6 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -282,6 +282,8 @@ static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = { static const struct arm64_ftr_bits ftr_id_aa64smfr0[] = { ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_SMEver_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I16I64_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), @@ -2649,6 +2651,18 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = has_cpuid_feature, .cpu_enable = fa64_kernel_enable, }, + { + .desc = "SME2", + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .capability = ARM64_SME2, + .sys_reg = SYS_ID_AA64PFR1_EL1, + .sign = FTR_UNSIGNED, + .field_pos = ID_AA64PFR1_EL1_SME_SHIFT, + .field_width = ID_AA64PFR1_EL1_SME_WIDTH, + .min_field_value = ID_AA64PFR1_EL1_SME_SME2, + .matches = has_cpuid_feature, + .cpu_enable = sme2_kernel_enable, + }, #endif /* CONFIG_ARM64_SME */ { .desc = "WFx with timeout", diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 9e168a9eb615..717ae4aaa021 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -1298,6 +1298,17 @@ void sme_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p) isb(); } +/* + * This must be called after sme_kernel_enable(), we rely on the + * feature table being sorted to ensure this. + */ +void sme2_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p) +{ + /* Allow use of ZT0 */ + write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_EZT0_MASK, + SYS_SMCR_EL1); +} + /* * This must be called after sme_kernel_enable(), we rely on the * feature table being sorted to ensure this. diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index a86ee376920a..1222b0ed63a2 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -50,6 +50,7 @@ MTE MTE_ASYMM SME SME_FA64 +SME2 SPECTRE_V2 SPECTRE_V3A SPECTRE_V4 From d6138b4adc70729404efeb0133bc9f6e9ad78d03 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 16 Jan 2023 16:04:44 +0000 Subject: [PATCH 038/130] arm64/sme: Provide storage for ZT0 When the system supports SME2 there is an additional register ZT0 which we must store when the task is using SME. Since ZT0 is accessible only when PSTATE.ZA is set just like ZA we allocate storage for it along with ZA, increasing the allocation size for the memory region where we store ZA and storing the data for ZT after that for ZA. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-sme2-v4-9-f2fa0aef982f@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/fpsimd.h | 13 +++++++++++++ arch/arm64/kernel/process.c | 10 +++++----- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 2a66e3b94553..8df769c20677 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -105,6 +105,13 @@ static inline void *sve_pffr(struct thread_struct *thread) return (char *)thread->sve_state + sve_ffr_offset(vl); } +static inline void *thread_zt_state(struct thread_struct *thread) +{ + /* The ZT register state is stored immediately after the ZA state */ + unsigned int sme_vq = sve_vq_from_vl(thread_get_sme_vl(thread)); + return thread->sme_state + ZA_SIG_REGS_SIZE(sme_vq); +} + extern void sve_save_state(void *state, u32 *pfpsr, int save_ffr); extern void sve_load_state(void const *state, u32 const *pfpsr, int restore_ffr); @@ -354,6 +361,9 @@ extern unsigned int sme_get_vl(void); extern int sme_set_current_vl(unsigned long arg); extern int sme_get_current_vl(void); +/* Will move with signal support */ +#define ZT_SIG_REG_SIZE 512 + /* * Return how many bytes of memory are required to store the full SME * specific state for task, given task's currently configured vector @@ -366,6 +376,9 @@ static inline size_t sme_state_size(struct task_struct const *task) size = ZA_SIG_REGS_SIZE(sve_vq_from_vl(vl)); + if (system_supports_sme2()) + size += ZT_SIG_REG_SIZE; + return size; } diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 4ce0c4313ec6..71d59b5abede 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -307,11 +307,11 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) /* * In the unlikely event that we create a new thread with ZA - * enabled we should retain the ZA state so duplicate it here. - * This may be shortly freed if we exec() or if CLONE_SETTLS - * but it's simpler to do it here. To avoid confusing the rest - * of the code ensure that we have a sve_state allocated - * whenever sme_state is allocated. + * enabled we should retain the ZA and ZT state so duplicate + * it here. This may be shortly freed if we exec() or if + * CLONE_SETTLS but it's simpler to do it here. To avoid + * confusing the rest of the code ensure that we have a + * sve_state allocated whenever sme_state is allocated. */ if (thread_za_enabled(&src->thread)) { dst->thread.sve_state = kzalloc(sve_state_size(src), From 95fcec713259d440626526ed96ff5d3bac6179ea Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 16 Jan 2023 16:04:45 +0000 Subject: [PATCH 039/130] arm64/sme: Implement context switching for ZT0 When the system supports SME2 the ZT0 register must be context switched as part of the floating point state. This register is stored immediately after ZA in memory and is only accessible when PSTATE.ZA is set so we handle it in the same functions we use to save and restore ZA. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-sme2-v4-10-f2fa0aef982f@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/fpsimd.h | 4 ++-- arch/arm64/kernel/entry-fpsimd.S | 30 ++++++++++++++++++++---------- arch/arm64/kernel/fpsimd.c | 6 ++++-- 3 files changed, 26 insertions(+), 14 deletions(-) diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 8df769c20677..0ce4465644fc 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -119,8 +119,8 @@ extern void sve_flush_live(bool flush_ffr, unsigned long vq_minus_1); extern unsigned int sve_get_vl(void); extern void sve_set_vq(unsigned long vq_minus_1); extern void sme_set_vq(unsigned long vq_minus_1); -extern void za_save_state(void *state); -extern void za_load_state(void const *state); +extern void sme_save_state(void *state, int zt); +extern void sme_load_state(void const *state, int zt); struct arm64_cpu_capabilities; extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused); diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S index 229436f33df5..6325db1a2179 100644 --- a/arch/arm64/kernel/entry-fpsimd.S +++ b/arch/arm64/kernel/entry-fpsimd.S @@ -100,25 +100,35 @@ SYM_FUNC_START(sme_set_vq) SYM_FUNC_END(sme_set_vq) /* - * Save the SME state + * Save the ZA and ZT state * * x0 - pointer to buffer for state + * x1 - number of ZT registers to save */ -SYM_FUNC_START(za_save_state) - _sme_rdsvl 1, 1 // x1 = VL/8 - sme_save_za 0, x1, 12 +SYM_FUNC_START(sme_save_state) + _sme_rdsvl 2, 1 // x2 = VL/8 + sme_save_za 0, x2, 12 // Leaves x0 pointing to the end of ZA + + cbz x1, 1f + _str_zt 0 +1: ret -SYM_FUNC_END(za_save_state) +SYM_FUNC_END(sme_save_state) /* - * Load the SME state + * Load the ZA and ZT state * * x0 - pointer to buffer for state + * x1 - number of ZT registers to save */ -SYM_FUNC_START(za_load_state) - _sme_rdsvl 1, 1 // x1 = VL/8 - sme_load_za 0, x1, 12 +SYM_FUNC_START(sme_load_state) + _sme_rdsvl 2, 1 // x2 = VL/8 + sme_load_za 0, x2, 12 // Leaves x0 pointing to the end of ZA + + cbz x1, 1f + _ldr_zt 0 +1: ret -SYM_FUNC_END(za_load_state) +SYM_FUNC_END(sme_load_state) #endif /* CONFIG_ARM64_SME */ diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 717ae4aaa021..cec8b43e7888 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -429,7 +429,8 @@ static void task_fpsimd_load(void) write_sysreg_s(current->thread.svcr, SYS_SVCR); if (thread_za_enabled(¤t->thread)) - za_load_state(current->thread.sme_state); + sme_load_state(current->thread.sme_state, + system_supports_sme2()); if (thread_sm_enabled(¤t->thread)) restore_ffr = system_supports_fa64(); @@ -490,7 +491,8 @@ static void fpsimd_save(void) *svcr = read_sysreg_s(SYS_SVCR); if (*svcr & SVCR_ZA_MASK) - za_save_state(last->sme_state); + sme_save_state(last->sme_state, + system_supports_sme2()); /* If we are in streaming mode override regular SVE. */ if (*svcr & SVCR_SM_MASK) { From ee072cf708048c0d718a88159ae7985dd96d316f Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 16 Jan 2023 16:04:46 +0000 Subject: [PATCH 040/130] arm64/sme: Implement signal handling for ZT Add a new signal context type for ZT which is present in the signal frame when ZA is enabled and ZT is supported by the system. In order to account for the possible addition of further ZT registers in the future we make the number of registers variable in the ABI, though currently the only possible number is 1. We could just use a bare list head for the context since the number of registers can be inferred from the size of the context but for usability and future extensibility we define a header with the number of registers and some reserved fields in it. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-sme2-v4-11-f2fa0aef982f@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/fpsimd.h | 3 - arch/arm64/include/uapi/asm/sigcontext.h | 19 ++++ arch/arm64/kernel/signal.c | 105 +++++++++++++++++++++++ 3 files changed, 124 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 0ce4465644fc..67f2fb781f59 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -361,9 +361,6 @@ extern unsigned int sme_get_vl(void); extern int sme_set_current_vl(unsigned long arg); extern int sme_get_current_vl(void); -/* Will move with signal support */ -#define ZT_SIG_REG_SIZE 512 - /* * Return how many bytes of memory are required to store the full SME * specific state for task, given task's currently configured vector diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h index 9525041e4a14..46e9072985a5 100644 --- a/arch/arm64/include/uapi/asm/sigcontext.h +++ b/arch/arm64/include/uapi/asm/sigcontext.h @@ -152,6 +152,14 @@ struct za_context { __u16 __reserved[3]; }; +#define ZT_MAGIC 0x5a544e01 + +struct zt_context { + struct _aarch64_ctx head; + __u16 nregs; + __u16 __reserved[3]; +}; + #endif /* !__ASSEMBLY__ */ #include @@ -304,4 +312,15 @@ struct za_context { #define ZA_SIG_CONTEXT_SIZE(vq) \ (ZA_SIG_REGS_OFFSET + ZA_SIG_REGS_SIZE(vq)) +#define ZT_SIG_REG_SIZE 512 + +#define ZT_SIG_REG_BYTES (ZT_SIG_REG_SIZE / 8) + +#define ZT_SIG_REGS_OFFSET sizeof(struct zt_context) + +#define ZT_SIG_REGS_SIZE(n) (ZT_SIG_REG_BYTES * n) + +#define ZT_SIG_CONTEXT_SIZE(n) \ + (sizeof(struct zt_context) + ZT_SIG_REGS_SIZE(n)) + #endif /* _UAPI__ASM_SIGCONTEXT_H */ diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 27768809dd3e..1c5e557a3617 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -57,6 +57,7 @@ struct rt_sigframe_user_layout { unsigned long esr_offset; unsigned long sve_offset; unsigned long za_offset; + unsigned long zt_offset; unsigned long extra_offset; unsigned long end_offset; }; @@ -221,6 +222,7 @@ struct user_ctxs { struct fpsimd_context __user *fpsimd; struct sve_context __user *sve; struct za_context __user *za; + struct zt_context __user *zt; }; #ifdef CONFIG_ARM64_SVE @@ -447,11 +449,81 @@ static int restore_za_context(struct user_ctxs *user) return 0; } + +static int preserve_zt_context(struct zt_context __user *ctx) +{ + int err = 0; + u16 reserved[ARRAY_SIZE(ctx->__reserved)]; + + if (WARN_ON(!thread_za_enabled(¤t->thread))) + return -EINVAL; + + memset(reserved, 0, sizeof(reserved)); + + __put_user_error(ZT_MAGIC, &ctx->head.magic, err); + __put_user_error(round_up(ZT_SIG_CONTEXT_SIZE(1), 16), + &ctx->head.size, err); + __put_user_error(1, &ctx->nregs, err); + BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); + err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); + + /* + * This assumes that the ZT state has already been saved to + * the task struct by calling the function + * fpsimd_signal_preserve_current_state(). + */ + err |= __copy_to_user((char __user *)ctx + ZT_SIG_REGS_OFFSET, + thread_zt_state(¤t->thread), + ZT_SIG_REGS_SIZE(1)); + + return err ? -EFAULT : 0; +} + +static int restore_zt_context(struct user_ctxs *user) +{ + int err; + struct zt_context zt; + + /* ZA must be restored first for this check to be valid */ + if (!thread_za_enabled(¤t->thread)) + return -EINVAL; + + if (__copy_from_user(&zt, user->zt, sizeof(zt))) + return -EFAULT; + + if (zt.nregs != 1) + return -EINVAL; + + if (zt.head.size != ZT_SIG_CONTEXT_SIZE(zt.nregs)) + return -EINVAL; + + /* + * Careful: we are about __copy_from_user() directly into + * thread.zt_state with preemption enabled, so protection is + * needed to prevent a racing context switch from writing stale + * registers back over the new data. + */ + + fpsimd_flush_task_state(current); + /* From now, fpsimd_thread_switch() won't touch ZT in thread state */ + + err = __copy_from_user(thread_zt_state(¤t->thread), + (char __user const *)user->zt + + ZT_SIG_REGS_OFFSET, + ZT_SIG_REGS_SIZE(1)); + if (err) + return -EFAULT; + + return 0; +} + #else /* ! CONFIG_ARM64_SME */ /* Turn any non-optimised out attempts to use these into a link error: */ extern int preserve_za_context(void __user *ctx); extern int restore_za_context(struct user_ctxs *user); +extern int preserve_zt_context(void __user *ctx); +extern int restore_zt_context(struct user_ctxs *user); #endif /* ! CONFIG_ARM64_SME */ @@ -469,6 +541,7 @@ static int parse_user_sigframe(struct user_ctxs *user, user->fpsimd = NULL; user->sve = NULL; user->za = NULL; + user->zt = NULL; if (!IS_ALIGNED((unsigned long)base, 16)) goto invalid; @@ -547,6 +620,19 @@ static int parse_user_sigframe(struct user_ctxs *user, user->za = (struct za_context __user *)head; break; + case ZT_MAGIC: + if (!system_supports_sme2()) + goto invalid; + + if (user->zt) + goto invalid; + + if (size < sizeof(*user->zt)) + goto invalid; + + user->zt = (struct zt_context __user *)head; + break; + case EXTRA_MAGIC: if (have_extra_context) goto invalid; @@ -669,6 +755,9 @@ static int restore_sigframe(struct pt_regs *regs, if (err == 0 && system_supports_sme() && user.za) err = restore_za_context(&user); + if (err == 0 && system_supports_sme2() && user.zt) + err = restore_zt_context(&user); + return err; } @@ -769,6 +858,15 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, return err; } + if (system_supports_sme2()) { + if (add_all || thread_za_enabled(¤t->thread)) { + err = sigframe_alloc(user, &user->zt_offset, + ZT_SIG_CONTEXT_SIZE(1)); + if (err) + return err; + } + } + return sigframe_alloc_end(user); } @@ -824,6 +922,13 @@ static int setup_sigframe(struct rt_sigframe_user_layout *user, err |= preserve_za_context(za_ctx); } + /* ZT state if present */ + if (system_supports_sme2() && err == 0 && user->zt_offset) { + struct zt_context __user *zt_ctx = + apply_user_offset(user, user->zt_offset); + err |= preserve_zt_context(zt_ctx); + } + if (err == 0 && user->extra_offset) { char __user *sfp = (char __user *)user->sigframe; char __user *userp = From f90b529bcbe57c66da2b6a0d3c7c81e8567af63d Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 16 Jan 2023 16:04:47 +0000 Subject: [PATCH 041/130] arm64/sme: Implement ZT0 ptrace support Implement support for a new note type NT_ARM64_ZT providing access to ZT0 when implemented. Since ZT0 is a register with constant size this is much simpler than for other SME state. As ZT0 is only accessible when PSTATE.ZA is set writes to ZT0 cause PSTATE.ZA to be set, the main alternative would be to return -EBUSY in this case but this seemed more constructive. Practical users are also going to be working with ZA anyway and have some understanding of the state. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-sme2-v4-12-f2fa0aef982f@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/ptrace.c | 54 ++++++++++++++++++++++++++++++++++++++ include/uapi/linux/elf.h | 1 + 2 files changed, 55 insertions(+) diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 67256e7772e4..38be7ca202af 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -1138,6 +1138,51 @@ static int za_set(struct task_struct *target, return ret; } +static int zt_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + if (!system_supports_sme2()) + return -EINVAL; + + /* + * If PSTATE.ZA is not set then ZT will be zeroed when it is + * enabled so report the current register value as zero. + */ + if (thread_za_enabled(&target->thread)) + membuf_write(&to, thread_zt_state(&target->thread), + ZT_SIG_REG_BYTES); + else + membuf_zero(&to, ZT_SIG_REG_BYTES); + + return 0; +} + +static int zt_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + + if (!system_supports_sme2()) + return -EINVAL; + + if (!thread_za_enabled(&target->thread)) { + sme_alloc(target); + if (!target->thread.sme_state) + return -ENOMEM; + } + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + thread_zt_state(&target->thread), + 0, ZT_SIG_REG_BYTES); + if (ret == 0) + target->thread.svcr |= SVCR_ZA_MASK; + + return ret; +} + #endif /* CONFIG_ARM64_SME */ #ifdef CONFIG_ARM64_PTR_AUTH @@ -1360,6 +1405,7 @@ enum aarch64_regset { #ifdef CONFIG_ARM64_SVE REGSET_SSVE, REGSET_ZA, + REGSET_ZT, #endif #ifdef CONFIG_ARM64_PTR_AUTH REGSET_PAC_MASK, @@ -1467,6 +1513,14 @@ static const struct user_regset aarch64_regsets[] = { .regset_get = za_get, .set = za_set, }, + [REGSET_ZT] = { /* SME ZT */ + .core_note_type = NT_ARM_ZT, + .n = 1, + .size = ZT_SIG_REG_BYTES, + .align = sizeof(u64), + .regset_get = zt_get, + .set = zt_set, + }, #endif #ifdef CONFIG_ARM64_PTR_AUTH [REGSET_PAC_MASK] = { diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index 4c6a8fa5e7ed..68de6f4c4eee 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h @@ -434,6 +434,7 @@ typedef struct elf64_shdr { #define NT_ARM_PAC_ENABLED_KEYS 0x40a /* arm64 ptr auth enabled keys (prctl()) */ #define NT_ARM_SSVE 0x40b /* ARM Streaming SVE registers */ #define NT_ARM_ZA 0x40c /* ARM SME ZA registers */ +#define NT_ARM_ZT 0x40d /* ARM SME ZT registers */ #define NT_ARC_V2 0x600 /* ARCv2 accumulator/extra registers */ #define NT_VMCOREDD 0x700 /* Vmcore Device Dump Note */ #define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */ From 7d5d8601e4577c918fdb5fa922c634a73557ac0f Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 16 Jan 2023 16:04:48 +0000 Subject: [PATCH 042/130] arm64/sme: Add hwcaps for SME 2 and 2.1 features In order to allow userspace to discover the presence of the new SME features add hwcaps for them. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-sme2-v4-13-f2fa0aef982f@kernel.org Signed-off-by: Catalin Marinas --- Documentation/arm64/elf_hwcaps.rst | 18 ++++++++++++++++++ arch/arm64/include/asm/hwcap.h | 6 ++++++ arch/arm64/include/uapi/asm/hwcap.h | 6 ++++++ arch/arm64/kernel/cpufeature.c | 14 ++++++++++++++ arch/arm64/kernel/cpuinfo.c | 6 ++++++ 5 files changed, 50 insertions(+) diff --git a/Documentation/arm64/elf_hwcaps.rst b/Documentation/arm64/elf_hwcaps.rst index 6fed84f935df..8a9d4bf7daf4 100644 --- a/Documentation/arm64/elf_hwcaps.rst +++ b/Documentation/arm64/elf_hwcaps.rst @@ -284,6 +284,24 @@ HWCAP2_RPRFM HWCAP2_SVE2P1 Functionality implied by ID_AA64ZFR0_EL1.SVEver == 0b0010. +HWCAP2_SME2 + Functionality implied by ID_AA64SMFR0_EL1.SMEver == 0b0001. + +HWCAP2_SME2P1 + Functionality implied by ID_AA64SMFR0_EL1.SMEver == 0b0010. + +HWCAP2_SMEI16I32 + Functionality implied by ID_AA64SMFR0_EL1.I16I32 == 0b0101 + +HWCAP2_SMEBI32I32 + Functionality implied by ID_AA64SMFR0_EL1.BI32I32 == 0b1 + +HWCAP2_SMEB16B16 + Functionality implied by ID_AA64SMFR0_EL1.B16B16 == 0b1 + +HWCAP2_SMEF16F16 + Functionality implied by ID_AA64SMFR0_EL1.F16F16 == 0b1 + 4. Unused AT_HWCAP bits ----------------------- diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index 06dd12c514e6..475c803ecf42 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -123,6 +123,12 @@ #define KERNEL_HWCAP_CSSC __khwcap2_feature(CSSC) #define KERNEL_HWCAP_RPRFM __khwcap2_feature(RPRFM) #define KERNEL_HWCAP_SVE2P1 __khwcap2_feature(SVE2P1) +#define KERNEL_HWCAP_SME2 __khwcap2_feature(SME2) +#define KERNEL_HWCAP_SME2P1 __khwcap2_feature(SME2P1) +#define KERNEL_HWCAP_SME_I16I32 __khwcap2_feature(SME_I16I32) +#define KERNEL_HWCAP_SME_BI32I32 __khwcap2_feature(SME_BI32I32) +#define KERNEL_HWCAP_SME_B16B16 __khwcap2_feature(SME_B16B16) +#define KERNEL_HWCAP_SME_F16F16 __khwcap2_feature(SME_F16F16) /* * This yields a mask that user programs can use to figure out what diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h index b713d30544f1..69a4fb749c65 100644 --- a/arch/arm64/include/uapi/asm/hwcap.h +++ b/arch/arm64/include/uapi/asm/hwcap.h @@ -96,5 +96,11 @@ #define HWCAP2_CSSC (1UL << 34) #define HWCAP2_RPRFM (1UL << 35) #define HWCAP2_SVE2P1 (1UL << 36) +#define HWCAP2_SME2 (1UL << 37) +#define HWCAP2_SME2P1 (1UL << 38) +#define HWCAP2_SME_I16I32 (1UL << 39) +#define HWCAP2_SME_BI32I32 (1UL << 40) +#define HWCAP2_SME_B16B16 (1UL << 41) +#define HWCAP2_SME_F16F16 (1UL << 42) #endif /* _UAPI__ASM_HWCAP_H */ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index fd90905bc2e6..5bd959bd9a1f 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -288,12 +288,20 @@ static const struct arm64_ftr_bits ftr_id_aa64smfr0[] = { FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I16I64_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F64F64_SHIFT, 1, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I16I32_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_B16B16_SHIFT, 1, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F16F16_SHIFT, 1, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I8I32_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F16F32_SHIFT, 1, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_B16F32_SHIFT, 1, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), + FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_BI32I32_SHIFT, 1, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F32F32_SHIFT, 1, 0), ARM64_FTR_END, @@ -2841,11 +2849,17 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { #ifdef CONFIG_ARM64_SME HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_SME_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_SME_IMP, CAP_HWCAP, KERNEL_HWCAP_SME), HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_FA64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64), + HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_SMEver_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_EL1_SMEver_SME2p1, CAP_HWCAP, KERNEL_HWCAP_SME2P1), + HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_SMEver_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_EL1_SMEver_SME2, CAP_HWCAP, KERNEL_HWCAP_SME2), HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_I16I64_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_EL1_I16I64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64), HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_F64F64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_F64F64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64), + HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_I16I32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_I16I32_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I32), + HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_B16B16_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_B16B16_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16B16), + HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_F16F16_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_F16F16_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F16), HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_I8I32_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_EL1_I8I32_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I8I32), HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_F16F32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_F16F32_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F32), HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_B16F32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_B16F32_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16F32), + HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_BI32I32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_BI32I32_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_BI32I32), HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_F32F32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_F32F32_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F32F32), #endif /* CONFIG_ARM64_SME */ {}, diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 379695262b77..85e54417d141 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -119,6 +119,12 @@ static const char *const hwcap_str[] = { [KERNEL_HWCAP_CSSC] = "cssc", [KERNEL_HWCAP_RPRFM] = "rprfm", [KERNEL_HWCAP_SVE2P1] = "sve2p1", + [KERNEL_HWCAP_SME2] = "sme2", + [KERNEL_HWCAP_SME2P1] = "sme2p1", + [KERNEL_HWCAP_SME_I16I32] = "smei16i32", + [KERNEL_HWCAP_SME_BI32I32] = "smebi32i32", + [KERNEL_HWCAP_SME_B16B16] = "smeb16b16", + [KERNEL_HWCAP_SME_F16F16] = "smef16f16", }; #ifdef CONFIG_COMPAT From 1c07425e902cd3137961c3d45b4271bf8a9b8eb9 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 16 Jan 2023 16:04:49 +0000 Subject: [PATCH 043/130] kselftest/arm64: Add a stress test program for ZT0 Following the pattern for the other register sets add a stress test program for ZT0 which continually loads and verifies patterns in the register in an effort to discover context switching problems. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-sme2-v4-14-f2fa0aef982f@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/fp/.gitignore | 1 + tools/testing/selftests/arm64/fp/Makefile | 3 + tools/testing/selftests/arm64/fp/sme-inst.h | 20 ++ tools/testing/selftests/arm64/fp/zt-test.S | 317 ++++++++++++++++++++ 4 files changed, 341 insertions(+) create mode 100644 tools/testing/selftests/arm64/fp/zt-test.S diff --git a/tools/testing/selftests/arm64/fp/.gitignore b/tools/testing/selftests/arm64/fp/.gitignore index df79d29664a1..41bde4c97d47 100644 --- a/tools/testing/selftests/arm64/fp/.gitignore +++ b/tools/testing/selftests/arm64/fp/.gitignore @@ -12,3 +12,4 @@ vlset za-fork za-ptrace za-test +zt-test diff --git a/tools/testing/selftests/arm64/fp/Makefile b/tools/testing/selftests/arm64/fp/Makefile index 36db61358ed5..aff3026d3dff 100644 --- a/tools/testing/selftests/arm64/fp/Makefile +++ b/tools/testing/selftests/arm64/fp/Makefile @@ -14,6 +14,7 @@ TEST_GEN_PROGS_EXTENDED := fp-pidbench fpsimd-test \ sve-test \ ssve-test \ za-test \ + zt-test \ vlset TEST_PROGS_EXTENDED := fpsimd-stress sve-stress ssve-stress za-stress @@ -41,5 +42,7 @@ $(OUTPUT)/za-fork: za-fork.c $(OUTPUT)/za-fork-asm.o $(OUTPUT)/za-ptrace: za-ptrace.c $(OUTPUT)/za-test: za-test.S $(OUTPUT)/asm-utils.o $(CC) -nostdlib $^ -o $@ +$(OUTPUT)/zt-test: zt-test.S $(OUTPUT)/asm-utils.o + $(CC) -nostdlib $^ -o $@ include ../../lib.mk diff --git a/tools/testing/selftests/arm64/fp/sme-inst.h b/tools/testing/selftests/arm64/fp/sme-inst.h index 7191e53ca1c0..9292bba5400b 100644 --- a/tools/testing/selftests/arm64/fp/sme-inst.h +++ b/tools/testing/selftests/arm64/fp/sme-inst.h @@ -48,4 +48,24 @@ | ((\offset) & 7) .endm +/* + * LDR (ZT0) + * + * LDR ZT0, nx + */ +.macro _ldr_zt nx + .inst 0xe11f8000 \ + | (((\nx) & 0x1f) << 5) +.endm + +/* + * STR (ZT0) + * + * STR ZT0, nx + */ +.macro _str_zt nx + .inst 0xe13f8000 \ + | (((\nx) & 0x1f) << 5) +.endm + #endif diff --git a/tools/testing/selftests/arm64/fp/zt-test.S b/tools/testing/selftests/arm64/fp/zt-test.S new file mode 100644 index 000000000000..7ec90976cf5e --- /dev/null +++ b/tools/testing/selftests/arm64/fp/zt-test.S @@ -0,0 +1,317 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright (C) 2021-2 ARM Limited. +// Original author: Mark Brown +// +// Scalable Matrix Extension ZT context switch test +// Repeatedly writes unique test patterns into ZT0 +// and reads them back to verify integrity. + +#include +#include "assembler.h" +#include "asm-offsets.h" +#include "sme-inst.h" + +.arch_extension sve + +#define ZT_SZ 512 +#define ZT_B (ZT_SZ / 8) + +// Declare some storage space to shadow ZT register contents and a +// scratch buffer. +.pushsection .text +.data +.align 4 +ztref: + .space ZT_B +scratch: + .space ZT_B +.popsection + + +// Generate a test pattern for storage in ZT +// x0: pid +// x1: generation + +// These values are used to construct a 32-bit pattern that is repeated in the +// scratch buffer as many times as will fit: +// bits 31:24 generation number (increments once per test_loop) +// bits 23: 8 pid +// bits 7: 0 32-bit lane index + +function pattern + mov w3, wzr + bfi w3, w0, #8, #16 // PID + bfi w3, w1, #24, #8 // Generation + + ldr x0, =scratch + mov w1, #ZT_B / 4 + +0: str w3, [x0], #4 + add w3, w3, #1 // Lane + subs w1, w1, #1 + b.ne 0b + + ret +endfunction + +// Set up test pattern in a ZT horizontal vector +// x0: pid +// x1: generation +function setup_zt + mov x4, x30 + + bl pattern // Get pattern in scratch buffer + ldr x0, =ztref + ldr x1, =scratch + mov x2, #ZT_B + bl memcpy + + ldr x0, =ztref + _ldr_zt 0 // load zt0 from pointer x0 + + ret x4 +endfunction + +// Trivial memory compare: compare x2 bytes starting at address x0 with +// bytes starting at address x1. +// Returns only if all bytes match; otherwise, the program is aborted. +// Clobbers x0-x5. +function memcmp + cbz x2, 2f + + stp x0, x1, [sp, #-0x20]! + str x2, [sp, #0x10] + + mov x5, #0 +0: ldrb w3, [x0, x5] + ldrb w4, [x1, x5] + add x5, x5, #1 + cmp w3, w4 + b.ne 1f + subs x2, x2, #1 + b.ne 0b + +1: ldr x2, [sp, #0x10] + ldp x0, x1, [sp], #0x20 + b.ne barf + +2: ret +endfunction + +// Verify that a ZT vector matches its shadow in memory, else abort +// Clobbers x0-x3 +function check_zt + mov x3, x30 + + ldr x0, =scratch // Poison scratch + mov x1, #ZT_B + bl memfill_ae + + ldr x0, =scratch + _str_zt 0 + + ldr x0, =ztref + ldr x1, =scratch + mov x2, #ZT_B + mov x30, x3 + b memcmp +endfunction + +// Any SME register modified here can cause corruption in the main +// thread -- but *only* the locations modified here. +function irritator_handler + // Increment the irritation signal count (x23): + ldr x0, [x2, #ucontext_regs + 8 * 23] + add x0, x0, #1 + str x0, [x2, #ucontext_regs + 8 * 23] + + // Corrupt some random ZT data +#if 0 + adr x0, .text + (irritator_handler - .text) / 16 * 16 + movi v0.8b, #1 + movi v9.16b, #2 + movi v31.8b, #3 +#endif + + ret +endfunction + +function tickle_handler + // Increment the signal count (x23): + ldr x0, [x2, #ucontext_regs + 8 * 23] + add x0, x0, #1 + str x0, [x2, #ucontext_regs + 8 * 23] + + ret +endfunction + +function terminate_handler + mov w21, w0 + mov x20, x2 + + puts "Terminated by signal " + mov w0, w21 + bl putdec + puts ", no error, iterations=" + ldr x0, [x20, #ucontext_regs + 8 * 22] + bl putdec + puts ", signals=" + ldr x0, [x20, #ucontext_regs + 8 * 23] + bl putdecn + + mov x0, #0 + mov x8, #__NR_exit + svc #0 +endfunction + +// w0: signal number +// x1: sa_action +// w2: sa_flags +// Clobbers x0-x6,x8 +function setsignal + str x30, [sp, #-((sa_sz + 15) / 16 * 16 + 16)]! + + mov w4, w0 + mov x5, x1 + mov w6, w2 + + add x0, sp, #16 + mov x1, #sa_sz + bl memclr + + mov w0, w4 + add x1, sp, #16 + str w6, [x1, #sa_flags] + str x5, [x1, #sa_handler] + mov x2, #0 + mov x3, #sa_mask_sz + mov x8, #__NR_rt_sigaction + svc #0 + + cbz w0, 1f + + puts "sigaction failure\n" + b .Labort + +1: ldr x30, [sp], #((sa_sz + 15) / 16 * 16 + 16) + ret +endfunction + +// Main program entry point +.globl _start +function _start +_start: + mov x23, #0 // signal count + + mov w0, #SIGINT + adr x1, terminate_handler + mov w2, #SA_SIGINFO + bl setsignal + + mov w0, #SIGTERM + adr x1, terminate_handler + mov w2, #SA_SIGINFO + bl setsignal + + mov w0, #SIGUSR1 + adr x1, irritator_handler + mov w2, #SA_SIGINFO + orr w2, w2, #SA_NODEFER + bl setsignal + + mov w0, #SIGUSR2 + adr x1, tickle_handler + mov w2, #SA_SIGINFO + orr w2, w2, #SA_NODEFER + bl setsignal + + smstart_za + + // Obtain our PID, to ensure test pattern uniqueness between processes + mov x8, #__NR_getpid + svc #0 + mov x20, x0 + + puts "PID:\t" + mov x0, x20 + bl putdecn + + mov x22, #0 // generation number, increments per iteration +.Ltest_loop: + mov x0, x20 + mov x1, x22 + bl setup_zt + + mov x8, #__NR_sched_yield // Encourage preemption + svc #0 + + mrs x0, S3_3_C4_C2_2 // SVCR should have ZA=1,SM=0 + and x1, x0, #3 + cmp x1, #2 + b.ne svcr_barf + + bl check_zt + + add x22, x22, #1 // Everything still working + b .Ltest_loop + +.Labort: + mov x0, #0 + mov x1, #SIGABRT + mov x8, #__NR_kill + svc #0 +endfunction + +function barf +// fpsimd.c acitivty log dump hack +// ldr w0, =0xdeadc0de +// mov w8, #__NR_exit +// svc #0 +// end hack + smstop + mov x10, x0 // expected data + mov x11, x1 // actual data + mov x12, x2 // data size + + puts "Mismatch: PID=" + mov x0, x20 + bl putdec + puts ", iteration=" + mov x0, x22 + bl putdec + puts "\tExpected [" + mov x0, x10 + mov x1, x12 + bl dumphex + puts "]\n\tGot [" + mov x0, x11 + mov x1, x12 + bl dumphex + puts "]\n" + + mov x8, #__NR_getpid + svc #0 +// fpsimd.c acitivty log dump hack +// ldr w0, =0xdeadc0de +// mov w8, #__NR_exit +// svc #0 +// ^ end of hack + mov x1, #SIGABRT + mov x8, #__NR_kill + svc #0 +// mov x8, #__NR_exit +// mov x1, #1 +// svc #0 +endfunction + +function svcr_barf + mov x10, x0 + + puts "Bad SVCR: " + mov x0, x10 + bl putdecn + + mov x8, #__NR_exit + mov x1, #1 + svc #0 +endfunction From f63a9f15b2d4a8c9588a9260d49bc3890118fece Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 16 Jan 2023 16:04:50 +0000 Subject: [PATCH 044/130] kselftest/arm64: Cover ZT in the FP stress test Hook up the newly added zt-test program in the FPSIMD stress tests, start a copy per CPU when SME2 is supported. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-sme2-v4-15-f2fa0aef982f@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/fp/fp-stress.c | 29 ++++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/arm64/fp/fp-stress.c b/tools/testing/selftests/arm64/fp/fp-stress.c index f8b2f41aac36..520385fcfede 100644 --- a/tools/testing/selftests/arm64/fp/fp-stress.c +++ b/tools/testing/selftests/arm64/fp/fp-stress.c @@ -370,6 +370,19 @@ static void start_za(struct child_data *child, int vl, int cpu) ksft_print_msg("Started %s\n", child->name); } +static void start_zt(struct child_data *child, int cpu) +{ + int ret; + + ret = asprintf(&child->name, "ZT-%d", cpu); + if (ret == -1) + ksft_exit_fail_msg("asprintf() failed\n"); + + child_start(child, "./zt-test"); + + ksft_print_msg("Started %s\n", child->name); +} + static void probe_vls(int vls[], int *vl_count, int set_vl) { unsigned int vq; @@ -426,6 +439,7 @@ int main(int argc, char **argv) bool all_children_started = false; int seen_children; int sve_vls[MAX_VLS], sme_vls[MAX_VLS]; + bool have_sme2; struct sigaction sa; while ((c = getopt_long(argc, argv, "t:", options, NULL)) != -1) { @@ -458,6 +472,13 @@ int main(int argc, char **argv) sme_vl_count = 0; } + if (getauxval(AT_HWCAP2) & HWCAP2_SME2) { + tests += cpus; + have_sme2 = true; + } else { + have_sme2 = false; + } + /* Force context switching if we only have FPSIMD */ if (!sve_vl_count && !sme_vl_count) fpsimd_per_cpu = 2; @@ -468,8 +489,9 @@ int main(int argc, char **argv) ksft_print_header(); ksft_set_plan(tests); - ksft_print_msg("%d CPUs, %d SVE VLs, %d SME VLs\n", - cpus, sve_vl_count, sme_vl_count); + ksft_print_msg("%d CPUs, %d SVE VLs, %d SME VLs, SME2 %s\n", + cpus, sve_vl_count, sme_vl_count, + have_sme2 ? "present" : "absent"); if (timeout > 0) ksft_print_msg("Will run for %ds\n", timeout); @@ -527,6 +549,9 @@ int main(int argc, char **argv) start_ssve(&children[num_children++], sme_vls[j], i); start_za(&children[num_children++], sme_vls[j], i); } + + if (have_sme2) + start_zt(&children[num_children++], i); } /* From 63829373260898bdd2eb95a03e2e1c936998d6ca Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 16 Jan 2023 16:04:51 +0000 Subject: [PATCH 045/130] kselftest/arm64: Enumerate SME2 in the signal test utility code Support test cases for SME2 by adding it to the set of features that we enumerate so test cases can check for it. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-sme2-v4-16-f2fa0aef982f@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/signal/test_signals.h | 2 ++ tools/testing/selftests/arm64/signal/test_signals_utils.c | 3 +++ 2 files changed, 5 insertions(+) diff --git a/tools/testing/selftests/arm64/signal/test_signals.h b/tools/testing/selftests/arm64/signal/test_signals.h index 0c645834ddc3..1e6273d81575 100644 --- a/tools/testing/selftests/arm64/signal/test_signals.h +++ b/tools/testing/selftests/arm64/signal/test_signals.h @@ -34,6 +34,7 @@ enum { FSVE_BIT, FSME_BIT, FSME_FA64_BIT, + FSME2_BIT, FMAX_END }; @@ -41,6 +42,7 @@ enum { #define FEAT_SVE (1UL << FSVE_BIT) #define FEAT_SME (1UL << FSME_BIT) #define FEAT_SME_FA64 (1UL << FSME_FA64_BIT) +#define FEAT_SME2 (1UL << FSME2_BIT) /* * A descriptor used to describe and configure a test case. diff --git a/tools/testing/selftests/arm64/signal/test_signals_utils.c b/tools/testing/selftests/arm64/signal/test_signals_utils.c index 308e229e58ab..07f518f0e58d 100644 --- a/tools/testing/selftests/arm64/signal/test_signals_utils.c +++ b/tools/testing/selftests/arm64/signal/test_signals_utils.c @@ -29,6 +29,7 @@ static char const *const feats_names[FMAX_END] = { " SVE ", " SME ", " FA64 ", + " SME2 ", }; #define MAX_FEATS_SZ 128 @@ -323,6 +324,8 @@ int test_init(struct tdescr *td) td->feats_supported |= FEAT_SME; if (getauxval(AT_HWCAP2) & HWCAP2_SME_FA64) td->feats_supported |= FEAT_SME_FA64; + if (getauxval(AT_HWCAP2) & HWCAP2_SME2) + td->feats_supported |= FEAT_SME2; if (feats_ok(td)) { if (td->feats_required & td->feats_supported) fprintf(stderr, From afe6f182752625cadf4ff97613bd2f362383bcbf Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 16 Jan 2023 16:04:52 +0000 Subject: [PATCH 046/130] kselftest/arm64: Teach the generic signal context validation about ZT Add ZT to the set of signal contexts that the shared code understands and validates the form of. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-sme2-v4-17-f2fa0aef982f@kernel.org Signed-off-by: Catalin Marinas --- .../arm64/signal/testcases/testcases.c | 36 +++++++++++++++++++ .../arm64/signal/testcases/testcases.h | 1 + 2 files changed, 37 insertions(+) diff --git a/tools/testing/selftests/arm64/signal/testcases/testcases.c b/tools/testing/selftests/arm64/signal/testcases/testcases.c index d2eda7b5de26..27d495fa52f8 100644 --- a/tools/testing/selftests/arm64/signal/testcases/testcases.c +++ b/tools/testing/selftests/arm64/signal/testcases/testcases.c @@ -108,6 +108,26 @@ bool validate_za_context(struct za_context *za, char **err) return true; } +bool validate_zt_context(struct zt_context *zt, char **err) +{ + if (!zt || !err) + return false; + + /* If the context is present there should be at least one register */ + if (zt->nregs == 0) { + *err = "no registers"; + return false; + } + + /* Size should agree with the number of registers */ + if (zt->head.size != ZT_SIG_CONTEXT_SIZE(zt->nregs)) { + *err = "register count does not match size"; + return false; + } + + return true; +} + bool validate_reserved(ucontext_t *uc, size_t resv_sz, char **err) { bool terminated = false; @@ -117,6 +137,7 @@ bool validate_reserved(ucontext_t *uc, size_t resv_sz, char **err) struct extra_context *extra = NULL; struct sve_context *sve = NULL; struct za_context *za = NULL; + struct zt_context *zt = NULL; struct _aarch64_ctx *head = (struct _aarch64_ctx *)uc->uc_mcontext.__reserved; void *extra_data = NULL; @@ -177,6 +198,13 @@ bool validate_reserved(ucontext_t *uc, size_t resv_sz, char **err) za = (struct za_context *)head; new_flags |= ZA_CTX; break; + case ZT_MAGIC: + if (flags & ZT_CTX) + *err = "Multiple ZT_MAGIC"; + /* Size is validated in validate_za_context() */ + zt = (struct zt_context *)head; + new_flags |= ZT_CTX; + break; case EXTRA_MAGIC: if (flags & EXTRA_CTX) *err = "Multiple EXTRA_MAGIC"; @@ -234,6 +262,9 @@ bool validate_reserved(ucontext_t *uc, size_t resv_sz, char **err) if (new_flags & ZA_CTX) if (!validate_za_context(za, err)) return false; + if (new_flags & ZT_CTX) + if (!validate_zt_context(zt, err)) + return false; flags |= new_flags; @@ -245,6 +276,11 @@ bool validate_reserved(ucontext_t *uc, size_t resv_sz, char **err) return false; } + if (terminated && (flags & ZT_CTX) && !(flags & ZA_CTX)) { + *err = "ZT context but no ZA context"; + return false; + } + return true; } diff --git a/tools/testing/selftests/arm64/signal/testcases/testcases.h b/tools/testing/selftests/arm64/signal/testcases/testcases.h index 040afded0b76..a08ab0d6207a 100644 --- a/tools/testing/selftests/arm64/signal/testcases/testcases.h +++ b/tools/testing/selftests/arm64/signal/testcases/testcases.h @@ -18,6 +18,7 @@ #define SVE_CTX (1 << 1) #define ZA_CTX (1 << 2) #define EXTRA_CTX (1 << 3) +#define ZT_CTX (1 << 4) #define KSFT_BAD_MAGIC 0xdeadbeef From 18f8729ab3d56ec0bfd980d6b3660a50af43b82a Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 16 Jan 2023 16:04:53 +0000 Subject: [PATCH 047/130] kselftest/arm64: Add test coverage for ZT register signal frames We should have a ZT register frame with an expected size when ZA is enabled and have no ZT frame when ZA is disabled. Since we don't load any data into ZT we expect the data to all be zeros since the architecture guarantees it will be set to 0 as ZA is enabled. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-sme2-v4-18-f2fa0aef982f@kernel.org Signed-off-by: Catalin Marinas --- .../testing/selftests/arm64/signal/.gitignore | 1 + .../arm64/signal/testcases/zt_no_regs.c | 51 +++++++++++ .../arm64/signal/testcases/zt_regs.c | 85 +++++++++++++++++++ 3 files changed, 137 insertions(+) create mode 100644 tools/testing/selftests/arm64/signal/testcases/zt_no_regs.c create mode 100644 tools/testing/selftests/arm64/signal/testcases/zt_regs.c diff --git a/tools/testing/selftests/arm64/signal/.gitignore b/tools/testing/selftests/arm64/signal/.gitignore index e8d2b57f73ec..b7fbb65183e8 100644 --- a/tools/testing/selftests/arm64/signal/.gitignore +++ b/tools/testing/selftests/arm64/signal/.gitignore @@ -5,4 +5,5 @@ sme_* ssve_* sve_* za_* +zt_* !*.[ch] diff --git a/tools/testing/selftests/arm64/signal/testcases/zt_no_regs.c b/tools/testing/selftests/arm64/signal/testcases/zt_no_regs.c new file mode 100644 index 000000000000..34f69bcf821e --- /dev/null +++ b/tools/testing/selftests/arm64/signal/testcases/zt_no_regs.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 ARM Limited + * + * Verify that using an instruction not supported in streaming mode + * traps when in streaming mode. + */ + +#include +#include +#include + +#include "test_signals_utils.h" +#include "testcases.h" + +static union { + ucontext_t uc; + char buf[1024 * 128]; +} context; + +int zt_no_regs_run(struct tdescr *td, siginfo_t *si, ucontext_t *uc) +{ + size_t offset; + struct _aarch64_ctx *head = GET_BUF_RESV_HEAD(context); + + /* + * Get a signal context which should not have a ZT frame and + * registers in it. + */ + if (!get_current_context(td, &context.uc, sizeof(context))) + return 1; + + head = get_header(head, ZT_MAGIC, GET_BUF_RESV_SIZE(context), &offset); + if (head) { + fprintf(stderr, "Got unexpected ZT context\n"); + return 1; + } + + td->pass = 1; + + return 0; +} + +struct tdescr tde = { + .name = "ZT register data not present", + .descr = "Validate that ZT is not present when ZA is disabled", + .feats_required = FEAT_SME2, + .timeout = 3, + .sanity_disabled = true, + .run = zt_no_regs_run, +}; diff --git a/tools/testing/selftests/arm64/signal/testcases/zt_regs.c b/tools/testing/selftests/arm64/signal/testcases/zt_regs.c new file mode 100644 index 000000000000..e1eb4d5c027a --- /dev/null +++ b/tools/testing/selftests/arm64/signal/testcases/zt_regs.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 ARM Limited + * + * Verify that using an instruction not supported in streaming mode + * traps when in streaming mode. + */ + +#include +#include +#include + +#include "test_signals_utils.h" +#include "testcases.h" + +static union { + ucontext_t uc; + char buf[1024 * 128]; +} context; + +static void enable_za(void) +{ + /* smstart za; real data is TODO */ + asm volatile(".inst 0xd503457f" : : : ); +} + +int zt_regs_run(struct tdescr *td, siginfo_t *si, ucontext_t *uc) +{ + size_t offset; + struct _aarch64_ctx *head = GET_BUF_RESV_HEAD(context); + struct zt_context *zt; + char *zeros; + + /* + * Get a signal context which should have a ZT frame and registers + * in it. + */ + enable_za(); + if (!get_current_context(td, &context.uc, sizeof(context))) + return 1; + + head = get_header(head, ZT_MAGIC, GET_BUF_RESV_SIZE(context), &offset); + if (!head) { + fprintf(stderr, "No ZT context\n"); + return 1; + } + + zt = (struct zt_context *)head; + if (zt->nregs == 0) { + fprintf(stderr, "Got context with no registers\n"); + return 1; + } + + fprintf(stderr, "Got expected size %u for %d registers\n", + head->size, zt->nregs); + + /* We didn't load any data into ZT so it should be all zeros */ + zeros = malloc(ZT_SIG_REGS_SIZE(zt->nregs)); + if (!zeros) { + fprintf(stderr, "Out of memory, nregs=%u\n", zt->nregs); + return 1; + } + memset(zeros, 0, ZT_SIG_REGS_SIZE(zt->nregs)); + + if (memcmp(zeros, (char *)zt + ZT_SIG_REGS_OFFSET, + ZT_SIG_REGS_SIZE(zt->nregs)) != 0) { + fprintf(stderr, "ZT data invalid\n"); + return 1; + } + + free(zeros); + + td->pass = 1; + + return 0; +} + +struct tdescr tde = { + .name = "ZT register data", + .descr = "Validate that ZT is present and has data when ZA is enabled", + .feats_required = FEAT_SME2, + .timeout = 3, + .sanity_disabled = true, + .run = zt_regs_run, +}; From 49886aa9ab33b1825439015a6b2c91ed9b294ba3 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 16 Jan 2023 16:04:54 +0000 Subject: [PATCH 048/130] kselftest/arm64: Add SME2 coverage to syscall-abi Verify that ZT0 is preserved over syscalls when it is present and PSTATE.ZA is set. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-sme2-v4-19-f2fa0aef982f@kernel.org Signed-off-by: Catalin Marinas --- .../selftests/arm64/abi/syscall-abi-asm.S | 43 ++++++++++++++++++- .../testing/selftests/arm64/abi/syscall-abi.c | 40 ++++++++++++++++- 2 files changed, 80 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/arm64/abi/syscall-abi-asm.S b/tools/testing/selftests/arm64/abi/syscall-abi-asm.S index acd5e9f3bc0b..6ddf392329c9 100644 --- a/tools/testing/selftests/arm64/abi/syscall-abi-asm.S +++ b/tools/testing/selftests/arm64/abi/syscall-abi-asm.S @@ -23,6 +23,9 @@ .arch_extension sve +#define ID_AA64SMFR0_EL1_SMEver_SHIFT 56 +#define ID_AA64SMFR0_EL1_SMEver_WIDTH 4 + /* * LDR (vector to ZA array): * LDR ZA[\nw, #\offset], [X\nxbase, #\offset, MUL VL] @@ -45,6 +48,26 @@ | ((\offset) & 7) .endm +/* + * LDR (ZT0) + * + * LDR ZT0, nx + */ +.macro _ldr_zt nx + .inst 0xe11f8000 \ + | (((\nx) & 0x1f) << 5) +.endm + +/* + * STR (ZT0) + * + * STR ZT0, nx + */ +.macro _str_zt nx + .inst 0xe13f8000 \ + | (((\nx) & 0x1f) << 5) +.endm + .globl do_syscall do_syscall: // Store callee saved registers x19-x29 (80 bytes) plus x0 and x1 @@ -64,7 +87,7 @@ do_syscall: msr S3_3_C4_C2_2, x2 1: - // Load ZA if it's enabled - uses x12 as scratch due to SME LDR + // Load ZA and ZT0 if enabled - uses x12 as scratch due to SME LDR tbz x2, #SVCR_ZA_SHIFT, 1f mov w12, #0 ldr x2, =za_in @@ -73,6 +96,15 @@ do_syscall: add x12, x12, #1 cmp x1, x12 bne 2b + + // ZT0 + mrs x2, S3_0_C0_C4_5 // ID_AA64SMFR0_EL1 + ubfx x2, x2, #ID_AA64SMFR0_EL1_SMEver_SHIFT, \ + #ID_AA64SMFR0_EL1_SMEver_WIDTH + cbz x2, 1f + adrp x2, zt_in + add x2, x2, :lo12:zt_in + _ldr_zt 2 1: // Load GPRs x8-x28, and save our SP/FP for later comparison @@ -235,6 +267,15 @@ do_syscall: add x12, x12, #1 cmp x1, x12 bne 2b + + // ZT0 + mrs x2, S3_0_C0_C4_5 // ID_AA64SMFR0_EL1 + ubfx x2, x2, #ID_AA64SMFR0_EL1_SMEver_SHIFT, \ + #ID_AA64SMFR0_EL1_SMEver_WIDTH + cbz x2, 1f + adrp x2, zt_out + add x2, x2, :lo12:zt_out + _str_zt 2 1: // Save the SVE state if we have some diff --git a/tools/testing/selftests/arm64/abi/syscall-abi.c b/tools/testing/selftests/arm64/abi/syscall-abi.c index dd7ebe536d05..9800f9dc6b35 100644 --- a/tools/testing/selftests/arm64/abi/syscall-abi.c +++ b/tools/testing/selftests/arm64/abi/syscall-abi.c @@ -311,6 +311,35 @@ static int check_za(struct syscall_cfg *cfg, int sve_vl, int sme_vl, return errors; } +uint8_t zt_in[ZT_SIG_REG_BYTES] __attribute__((aligned(16))); +uint8_t zt_out[ZT_SIG_REG_BYTES] __attribute__((aligned(16))); + +static void setup_zt(struct syscall_cfg *cfg, int sve_vl, int sme_vl, + uint64_t svcr) +{ + fill_random(zt_in, sizeof(zt_in)); + memset(zt_out, 0, sizeof(zt_out)); +} + +static int check_zt(struct syscall_cfg *cfg, int sve_vl, int sme_vl, + uint64_t svcr) +{ + int errors = 0; + + if (!(getauxval(AT_HWCAP2) & HWCAP2_SME2)) + return 0; + + if (!(svcr & SVCR_ZA_MASK)) + return 0; + + if (memcmp(zt_in, zt_out, sizeof(zt_in)) != 0) { + ksft_print_msg("SME VL %d ZT does not match\n", sme_vl); + errors++; + } + + return errors; +} + typedef void (*setup_fn)(struct syscall_cfg *cfg, int sve_vl, int sme_vl, uint64_t svcr); typedef int (*check_fn)(struct syscall_cfg *cfg, int sve_vl, int sme_vl, @@ -334,6 +363,7 @@ static struct { { setup_ffr, check_ffr }, { setup_svcr, check_svcr }, { setup_za, check_za }, + { setup_zt, check_zt }, }; static bool do_test(struct syscall_cfg *cfg, int sve_vl, int sme_vl, @@ -474,6 +504,7 @@ int main(void) { int i; int tests = 1; /* FPSIMD */ + int sme_ver; srandom(getpid()); @@ -482,10 +513,15 @@ int main(void) tests += (sve_count_vls() * sme_count_vls()) * 3; ksft_set_plan(ARRAY_SIZE(syscalls) * tests); + if (getauxval(AT_HWCAP2) & HWCAP2_SME2) + sme_ver = 2; + else + sme_ver = 1; + if (getauxval(AT_HWCAP2) & HWCAP2_SME_FA64) - ksft_print_msg("SME with FA64\n"); + ksft_print_msg("SME%d with FA64\n", sme_ver); else if (getauxval(AT_HWCAP2) & HWCAP2_SME) - ksft_print_msg("SME without FA64\n"); + ksft_print_msg("SME%d without FA64\n", sme_ver); for (i = 0; i < ARRAY_SIZE(syscalls); i++) test_one_syscall(&syscalls[i]); From 4e1aa1a18f1becb479c3627b73f1b7ebe2d46924 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 16 Jan 2023 16:04:55 +0000 Subject: [PATCH 049/130] kselftest/arm64: Add coverage of the ZT ptrace regset Add coverage of the ZT ptrace interface. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-sme2-v4-20-f2fa0aef982f@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/fp/.gitignore | 1 + tools/testing/selftests/arm64/fp/Makefile | 2 + tools/testing/selftests/arm64/fp/zt-ptrace.c | 365 +++++++++++++++++++ 3 files changed, 368 insertions(+) create mode 100644 tools/testing/selftests/arm64/fp/zt-ptrace.c diff --git a/tools/testing/selftests/arm64/fp/.gitignore b/tools/testing/selftests/arm64/fp/.gitignore index 41bde4c97d47..ebc86757bdd8 100644 --- a/tools/testing/selftests/arm64/fp/.gitignore +++ b/tools/testing/selftests/arm64/fp/.gitignore @@ -12,4 +12,5 @@ vlset za-fork za-ptrace za-test +zt-ptrace zt-test diff --git a/tools/testing/selftests/arm64/fp/Makefile b/tools/testing/selftests/arm64/fp/Makefile index aff3026d3dff..50a70220ba6c 100644 --- a/tools/testing/selftests/arm64/fp/Makefile +++ b/tools/testing/selftests/arm64/fp/Makefile @@ -14,6 +14,7 @@ TEST_GEN_PROGS_EXTENDED := fp-pidbench fpsimd-test \ sve-test \ ssve-test \ za-test \ + zt-ptrace \ zt-test \ vlset TEST_PROGS_EXTENDED := fpsimd-stress sve-stress ssve-stress za-stress @@ -42,6 +43,7 @@ $(OUTPUT)/za-fork: za-fork.c $(OUTPUT)/za-fork-asm.o $(OUTPUT)/za-ptrace: za-ptrace.c $(OUTPUT)/za-test: za-test.S $(OUTPUT)/asm-utils.o $(CC) -nostdlib $^ -o $@ +$(OUTPUT)/zt-ptrace: zt-ptrace.c $(OUTPUT)/zt-test: zt-test.S $(OUTPUT)/asm-utils.o $(CC) -nostdlib $^ -o $@ diff --git a/tools/testing/selftests/arm64/fp/zt-ptrace.c b/tools/testing/selftests/arm64/fp/zt-ptrace.c new file mode 100644 index 000000000000..996d9614a131 --- /dev/null +++ b/tools/testing/selftests/arm64/fp/zt-ptrace.c @@ -0,0 +1,365 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2021 ARM Limited. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../../kselftest.h" + +/* and don't like each other, so: */ +#ifndef NT_ARM_ZA +#define NT_ARM_ZA 0x40c +#endif +#ifndef NT_ARM_ZT +#define NT_ARM_ZT 0x40d +#endif + +#define EXPECTED_TESTS 3 + +static int sme_vl; + +static void fill_buf(char *buf, size_t size) +{ + int i; + + for (i = 0; i < size; i++) + buf[i] = random(); +} + +static int do_child(void) +{ + if (ptrace(PTRACE_TRACEME, -1, NULL, NULL)) + ksft_exit_fail_msg("PTRACE_TRACEME", strerror(errno)); + + if (raise(SIGSTOP)) + ksft_exit_fail_msg("raise(SIGSTOP)", strerror(errno)); + + return EXIT_SUCCESS; +} + +static struct user_za_header *get_za(pid_t pid, void **buf, size_t *size) +{ + struct user_za_header *za; + void *p; + size_t sz = sizeof(*za); + struct iovec iov; + + while (1) { + if (*size < sz) { + p = realloc(*buf, sz); + if (!p) { + errno = ENOMEM; + goto error; + } + + *buf = p; + *size = sz; + } + + iov.iov_base = *buf; + iov.iov_len = sz; + if (ptrace(PTRACE_GETREGSET, pid, NT_ARM_ZA, &iov)) + goto error; + + za = *buf; + if (za->size <= sz) + break; + + sz = za->size; + } + + return za; + +error: + return NULL; +} + +static int set_za(pid_t pid, const struct user_za_header *za) +{ + struct iovec iov; + + iov.iov_base = (void *)za; + iov.iov_len = za->size; + return ptrace(PTRACE_SETREGSET, pid, NT_ARM_ZA, &iov); +} + +static int get_zt(pid_t pid, char zt[ZT_SIG_REG_BYTES]) +{ + struct iovec iov; + + iov.iov_base = zt; + iov.iov_len = ZT_SIG_REG_BYTES; + return ptrace(PTRACE_GETREGSET, pid, NT_ARM_ZT, &iov); +} + + +static int set_zt(pid_t pid, const char zt[ZT_SIG_REG_BYTES]) +{ + struct iovec iov; + + iov.iov_base = (void *)zt; + iov.iov_len = ZT_SIG_REG_BYTES; + return ptrace(PTRACE_SETREGSET, pid, NT_ARM_ZT, &iov); +} + +/* Reading with ZA disabled returns all zeros */ +static void ptrace_za_disabled_read_zt(pid_t child) +{ + struct user_za_header za; + char zt[ZT_SIG_REG_BYTES]; + int ret, i; + bool fail = false; + + /* Disable PSTATE.ZA using the ZA interface */ + memset(&za, 0, sizeof(za)); + za.vl = sme_vl; + za.size = sizeof(za); + + ret = set_za(child, &za); + if (ret != 0) { + ksft_print_msg("Failed to disable ZA\n"); + fail = true; + } + + /* Read back ZT */ + ret = get_zt(child, zt); + if (ret != 0) { + ksft_print_msg("Failed to read ZT\n"); + fail = true; + } + + for (i = 0; i < ARRAY_SIZE(zt); i++) { + if (zt[i]) { + ksft_print_msg("zt[%d]: 0x%x != 0\n", i, zt[i]); + fail = true; + } + } + + ksft_test_result(!fail, "ptrace_za_disabled_read_zt\n"); +} + +/* Writing then reading ZT should return the data written */ +static void ptrace_set_get_zt(pid_t child) +{ + char zt_in[ZT_SIG_REG_BYTES]; + char zt_out[ZT_SIG_REG_BYTES]; + int ret, i; + bool fail = false; + + fill_buf(zt_in, sizeof(zt_in)); + + ret = set_zt(child, zt_in); + if (ret != 0) { + ksft_print_msg("Failed to set ZT\n"); + fail = true; + } + + ret = get_zt(child, zt_out); + if (ret != 0) { + ksft_print_msg("Failed to read ZT\n"); + fail = true; + } + + for (i = 0; i < ARRAY_SIZE(zt_in); i++) { + if (zt_in[i] != zt_out[i]) { + ksft_print_msg("zt[%d]: 0x%x != 0x%x\n", i, + zt_in[i], zt_out[i]); + fail = true; + } + } + + ksft_test_result(!fail, "ptrace_set_get_zt\n"); +} + +/* Writing ZT should set PSTATE.ZA */ +static void ptrace_enable_za_via_zt(pid_t child) +{ + struct user_za_header za_in; + struct user_za_header *za_out; + char zt[ZT_SIG_REG_BYTES]; + char *za_data; + size_t za_out_size; + int ret, i, vq; + bool fail = false; + + /* Disable PSTATE.ZA using the ZA interface */ + memset(&za_in, 0, sizeof(za_in)); + za_in.vl = sme_vl; + za_in.size = sizeof(za_in); + + ret = set_za(child, &za_in); + if (ret != 0) { + ksft_print_msg("Failed to disable ZA\n"); + fail = true; + } + + /* Write ZT */ + fill_buf(zt, sizeof(zt)); + ret = set_zt(child, zt); + if (ret != 0) { + ksft_print_msg("Failed to set ZT\n"); + fail = true; + } + + /* Read back ZA and check for register data */ + za_out = NULL; + za_out_size = 0; + if (get_za(child, (void **)&za_out, &za_out_size)) { + /* Should have an unchanged VL */ + if (za_out->vl != sme_vl) { + ksft_print_msg("VL changed from %d to %d\n", + sme_vl, za_out->vl); + fail = true; + } + vq = __sve_vq_from_vl(za_out->vl); + za_data = (char *)za_out + ZA_PT_ZA_OFFSET; + + /* Should have register data */ + if (za_out->size < ZA_PT_SIZE(vq)) { + ksft_print_msg("ZA data less than expected: %u < %u\n", + za_out->size, ZA_PT_SIZE(vq)); + fail = true; + vq = 0; + } + + /* That register data should be non-zero */ + for (i = 0; i < ZA_PT_ZA_SIZE(vq); i++) { + if (za_data[i]) { + ksft_print_msg("ZA byte %d is %x\n", + i, za_data[i]); + fail = true; + } + } + } else { + ksft_print_msg("Failed to read ZA\n"); + fail = true; + } + + ksft_test_result(!fail, "ptrace_enable_za_via_zt\n"); +} + +static int do_parent(pid_t child) +{ + int ret = EXIT_FAILURE; + pid_t pid; + int status; + siginfo_t si; + + /* Attach to the child */ + while (1) { + int sig; + + pid = wait(&status); + if (pid == -1) { + perror("wait"); + goto error; + } + + /* + * This should never happen but it's hard to flag in + * the framework. + */ + if (pid != child) + continue; + + if (WIFEXITED(status) || WIFSIGNALED(status)) + ksft_exit_fail_msg("Child died unexpectedly\n"); + + if (!WIFSTOPPED(status)) + goto error; + + sig = WSTOPSIG(status); + + if (ptrace(PTRACE_GETSIGINFO, pid, NULL, &si)) { + if (errno == ESRCH) + goto disappeared; + + if (errno == EINVAL) { + sig = 0; /* bust group-stop */ + goto cont; + } + + ksft_test_result_fail("PTRACE_GETSIGINFO: %s\n", + strerror(errno)); + goto error; + } + + if (sig == SIGSTOP && si.si_code == SI_TKILL && + si.si_pid == pid) + break; + + cont: + if (ptrace(PTRACE_CONT, pid, NULL, sig)) { + if (errno == ESRCH) + goto disappeared; + + ksft_test_result_fail("PTRACE_CONT: %s\n", + strerror(errno)); + goto error; + } + } + + ksft_print_msg("Parent is %d, child is %d\n", getpid(), child); + + ptrace_za_disabled_read_zt(child); + ptrace_set_get_zt(child); + ptrace_enable_za_via_zt(child); + + ret = EXIT_SUCCESS; + +error: + kill(child, SIGKILL); + +disappeared: + return ret; +} + +int main(void) +{ + int ret = EXIT_SUCCESS; + pid_t child; + + srandom(getpid()); + + ksft_print_header(); + + if (!(getauxval(AT_HWCAP2) & HWCAP2_SME2)) { + ksft_set_plan(1); + ksft_exit_skip("SME2 not available\n"); + } + + /* We need a valid SME VL to enable/disable ZA */ + sme_vl = prctl(PR_SME_GET_VL); + if (sme_vl == -1) { + ksft_set_plan(1); + ksft_exit_skip("Failed to read SME VL: %d (%s)\n", + errno, strerror(errno)); + } + + ksft_set_plan(EXPECTED_TESTS); + + child = fork(); + if (!child) + return do_child(); + + if (do_parent(child)) + ret = EXIT_FAILURE; + + ksft_print_cnts(); + + return ret; +} From 3eb1b41fba97a1586e3ecca8c10547071f541567 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 16 Jan 2023 16:04:56 +0000 Subject: [PATCH 050/130] kselftest/arm64: Add coverage of SME 2 and 2.1 hwcaps Add the hwcaps defined by SME 2 and 2.1 to the hwcaps test. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-sme2-v4-21-f2fa0aef982f@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/abi/hwcap.c | 115 ++++++++++++++++++++++ 1 file changed, 115 insertions(+) diff --git a/tools/testing/selftests/arm64/abi/hwcap.c b/tools/testing/selftests/arm64/abi/hwcap.c index 9f255bc5f31c..93333a90bf3a 100644 --- a/tools/testing/selftests/arm64/abi/hwcap.c +++ b/tools/testing/selftests/arm64/abi/hwcap.c @@ -50,6 +50,78 @@ static void sme_sigill(void) asm volatile(".inst 0x04bf5800" : : : "x0"); } +static void sme2_sigill(void) +{ + /* SMSTART ZA */ + asm volatile("msr S0_3_C4_C5_3, xzr" : : : ); + + /* ZERO ZT0 */ + asm volatile(".inst 0xc0480001" : : : ); + + /* SMSTOP */ + asm volatile("msr S0_3_C4_C6_3, xzr" : : : ); +} + +static void sme2p1_sigill(void) +{ + /* SMSTART SM */ + asm volatile("msr S0_3_C4_C3_3, xzr" : : : ); + + /* BFCLAMP { Z0.H - Z1.H }, Z0.H, Z0.H */ + asm volatile(".inst 0xc120C000" : : : ); + + /* SMSTOP */ + asm volatile("msr S0_3_C4_C6_3, xzr" : : : ); +} + +static void smei16i32_sigill(void) +{ + /* SMSTART */ + asm volatile("msr S0_3_C4_C7_3, xzr" : : : ); + + /* SMOPA ZA0.S, P0/M, P0/M, Z0.B, Z0.B */ + asm volatile(".inst 0xa0800000" : : : ); + + /* SMSTOP */ + asm volatile("msr S0_3_C4_C6_3, xzr" : : : ); +} + +static void smebi32i32_sigill(void) +{ + /* SMSTART */ + asm volatile("msr S0_3_C4_C7_3, xzr" : : : ); + + /* BMOPA ZA0.S, P0/M, P0/M, Z0.B, Z0.B */ + asm volatile(".inst 0x80800008" : : : ); + + /* SMSTOP */ + asm volatile("msr S0_3_C4_C6_3, xzr" : : : ); +} + +static void smeb16b16_sigill(void) +{ + /* SMSTART */ + asm volatile("msr S0_3_C4_C7_3, xzr" : : : ); + + /* BFADD ZA.H[W0, 0], {Z0.H-Z1.H} */ + asm volatile(".inst 0xC1E41C00" : : : ); + + /* SMSTOP */ + asm volatile("msr S0_3_C4_C6_3, xzr" : : : ); +} + +static void smef16f16_sigill(void) +{ + /* SMSTART */ + asm volatile("msr S0_3_C4_C7_3, xzr" : : : ); + + /* FADD ZA.H[W0, 0], { Z0.H-Z1.H } */ + asm volatile(".inst 0xc1a41C00" : : : ); + + /* SMSTOP */ + asm volatile("msr S0_3_C4_C6_3, xzr" : : : ); +} + static void sve_sigill(void) { /* RDVL x0, #0 */ @@ -158,6 +230,49 @@ static const struct hwcap_data { .sigill_fn = sme_sigill, .sigill_reliable = true, }, + { + .name = "SME2", + .at_hwcap = AT_HWCAP2, + .hwcap_bit = HWCAP2_SME2, + .cpuinfo = "sme2", + .sigill_fn = sme2_sigill, + .sigill_reliable = true, + }, + { + .name = "SME 2.1", + .at_hwcap = AT_HWCAP2, + .hwcap_bit = HWCAP2_SME2P1, + .cpuinfo = "sme2p1", + .sigill_fn = sme2p1_sigill, + }, + { + .name = "SME I16I32", + .at_hwcap = AT_HWCAP2, + .hwcap_bit = HWCAP2_SME_I16I32, + .cpuinfo = "smei16i32", + .sigill_fn = smei16i32_sigill, + }, + { + .name = "SME BI32I32", + .at_hwcap = AT_HWCAP2, + .hwcap_bit = HWCAP2_SME_BI32I32, + .cpuinfo = "smebi32i32", + .sigill_fn = smebi32i32_sigill, + }, + { + .name = "SME B16B16", + .at_hwcap = AT_HWCAP2, + .hwcap_bit = HWCAP2_SME_B16B16, + .cpuinfo = "smeb16b16", + .sigill_fn = smeb16b16_sigill, + }, + { + .name = "SME F16F16", + .at_hwcap = AT_HWCAP2, + .hwcap_bit = HWCAP2_SME_F16F16, + .cpuinfo = "smef16f16", + .sigill_fn = smef16f16_sigill, + }, { .name = "SVE", .at_hwcap = AT_HWCAP, From 17d0c4a27b2ac90df99307af0b6b6a162805c4fc Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 27 Dec 2022 14:20:40 +0000 Subject: [PATCH 051/130] arm64/sme: Document ABI for TPIDR2 signal information In order to allow access to TPIDR2 from signal handlers we need to add it to the signal context, document that we are doing so. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-tpidr2-sig-v3-1-c77c6c8775f4@kernel.org Signed-off-by: Catalin Marinas --- Documentation/arm64/sme.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Documentation/arm64/sme.rst b/Documentation/arm64/sme.rst index 16d2db4c2e2e..36b08a105dd1 100644 --- a/Documentation/arm64/sme.rst +++ b/Documentation/arm64/sme.rst @@ -111,6 +111,9 @@ be zeroed. * Signal handlers are invoked with streaming mode and ZA disabled. +* A new signal frame record TPIDR2_MAGIC is added formatted as a struct + tpidr2_context to allow access to TPIDR2_EL0 from signal handlers. + * A new signal frame record za_context encodes the ZA register contents on signal delivery. [1] From 39e54499280f373d03ab7ffe681ca9d53a9089c9 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 27 Dec 2022 14:20:41 +0000 Subject: [PATCH 052/130] arm64/signal: Include TPIDR2 in the signal context Add a new signal frame record for TPIDR2 using the same format as we already use for ESR with different magic, a header with the value from the register appended as the only data. If SME is supported then this record is always included. Signed-off-by: Mark Brown Reviewed-by: Szabolcs Nagy Link: https://lore.kernel.org/r/20221208-arm64-tpidr2-sig-v3-2-c77c6c8775f4@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/uapi/asm/sigcontext.h | 8 ++++ arch/arm64/kernel/signal.c | 59 ++++++++++++++++++++++++ 2 files changed, 67 insertions(+) diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h index 9525041e4a14..4b4398d7fb2d 100644 --- a/arch/arm64/include/uapi/asm/sigcontext.h +++ b/arch/arm64/include/uapi/asm/sigcontext.h @@ -144,6 +144,14 @@ struct sve_context { #define SVE_SIG_FLAG_SM 0x1 /* Context describes streaming mode */ +/* TPIDR2_EL0 context */ +#define TPIDR2_MAGIC 0x54504902 + +struct tpidr2_context { + struct _aarch64_ctx head; + __u64 tpidr2; +}; + #define ZA_MAGIC 0x54366345 struct za_context { diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index e0d09bf5b01b..5fe45c7c5e4f 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -56,6 +56,7 @@ struct rt_sigframe_user_layout { unsigned long fpsimd_offset; unsigned long esr_offset; unsigned long sve_offset; + unsigned long tpidr2_offset; unsigned long za_offset; unsigned long extra_offset; unsigned long end_offset; @@ -220,6 +221,7 @@ static int restore_fpsimd_context(struct fpsimd_context __user *ctx) struct user_ctxs { struct fpsimd_context __user *fpsimd; struct sve_context __user *sve; + struct tpidr2_context __user *tpidr2; struct za_context __user *za; }; @@ -361,6 +363,32 @@ extern int preserve_sve_context(void __user *ctx); #ifdef CONFIG_ARM64_SME +static int preserve_tpidr2_context(struct tpidr2_context __user *ctx) +{ + int err = 0; + + current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0); + + __put_user_error(TPIDR2_MAGIC, &ctx->head.magic, err); + __put_user_error(sizeof(*ctx), &ctx->head.size, err); + __put_user_error(current->thread.tpidr2_el0, &ctx->tpidr2, err); + + return err; +} + +static int restore_tpidr2_context(struct user_ctxs *user) +{ + u64 tpidr2_el0; + int err = 0; + + /* Magic and size were validated deciding to call this function */ + __get_user_error(tpidr2_el0, &user->tpidr2->tpidr2, err); + if (!err) + current->thread.tpidr2_el0 = tpidr2_el0; + + return err; +} + static int preserve_za_context(struct za_context __user *ctx) { int err = 0; @@ -450,6 +478,8 @@ static int restore_za_context(struct user_ctxs *user) #else /* ! CONFIG_ARM64_SME */ /* Turn any non-optimised out attempts to use these into a link error: */ +extern int preserve_tpidr2_context(void __user *ctx); +extern int restore_tpidr2_context(struct user_ctxs *user); extern int preserve_za_context(void __user *ctx); extern int restore_za_context(struct user_ctxs *user); @@ -468,6 +498,7 @@ static int parse_user_sigframe(struct user_ctxs *user, user->fpsimd = NULL; user->sve = NULL; + user->tpidr2 = NULL; user->za = NULL; if (!IS_ALIGNED((unsigned long)base, 16)) @@ -534,6 +565,19 @@ static int parse_user_sigframe(struct user_ctxs *user, user->sve = (struct sve_context __user *)head; break; + case TPIDR2_MAGIC: + if (!system_supports_sme()) + goto invalid; + + if (user->tpidr2) + goto invalid; + + if (size != sizeof(*user->tpidr2)) + goto invalid; + + user->tpidr2 = (struct tpidr2_context __user *)head; + break; + case ZA_MAGIC: if (!system_supports_sme()) goto invalid; @@ -666,6 +710,9 @@ static int restore_sigframe(struct pt_regs *regs, err = restore_fpsimd_context(user.fpsimd); } + if (err == 0 && system_supports_sme() && user.tpidr2) + err = restore_tpidr2_context(&user); + if (err == 0 && system_supports_sme() && user.za) err = restore_za_context(&user); @@ -760,6 +807,11 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, else vl = task_get_sme_vl(current); + err = sigframe_alloc(user, &user->tpidr2_offset, + sizeof(struct tpidr2_context)); + if (err) + return err; + if (thread_za_enabled(¤t->thread)) vq = sve_vq_from_vl(vl); @@ -817,6 +869,13 @@ static int setup_sigframe(struct rt_sigframe_user_layout *user, err |= preserve_sve_context(sve_ctx); } + /* TPIDR2 if supported */ + if (system_supports_sme() && err == 0) { + struct tpidr2_context __user *tpidr2_ctx = + apply_user_offset(user, user->tpidr2_offset); + err |= preserve_tpidr2_context(tpidr2_ctx); + } + /* ZA state if present */ if (system_supports_sme() && err == 0 && user->za_offset) { struct za_context __user *za_ctx = From bae393dabf353172784987c12c99a55cbe8075bc Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 27 Dec 2022 14:20:42 +0000 Subject: [PATCH 053/130] kselftest/arm64: Add TPIDR2 to the set of known signal context records When validating the set of signal context records check that any TPIDR2 record has the correct size, also suppressing warnings due to seeing an unknown record type. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-tpidr2-sig-v3-3-c77c6c8775f4@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/signal/testcases/testcases.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/testing/selftests/arm64/signal/testcases/testcases.c b/tools/testing/selftests/arm64/signal/testcases/testcases.c index d2eda7b5de26..23487c458240 100644 --- a/tools/testing/selftests/arm64/signal/testcases/testcases.c +++ b/tools/testing/selftests/arm64/signal/testcases/testcases.c @@ -163,6 +163,10 @@ bool validate_reserved(ucontext_t *uc, size_t resv_sz, char **err) if (head->size != sizeof(struct esr_context)) *err = "Bad size for esr_context"; break; + case TPIDR2_MAGIC: + if (head->size != sizeof(struct tpidr2_context)) + *err = "Bad size for tpidr2_context"; + break; case SVE_MAGIC: if (flags & SVE_CTX) *err = "Multiple SVE_MAGIC"; From 8ced928019353eaecbffee566d7ed6a9a9e60e78 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 27 Dec 2022 14:20:43 +0000 Subject: [PATCH 054/130] kselftest/arm64: Add test case for TPIDR2 signal frame records Ensure that we get signal context for TPIDR2 if and only if SME is present on the system. Since TPIDR2 is owned by libc we merely validate that the value is whatever it was set to, this isn't ideal since it's likely to just be the default of 0 with current systems but it avoids future false positives. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-tpidr2-sig-v3-4-c77c6c8775f4@kernel.org Signed-off-by: Catalin Marinas --- .../testing/selftests/arm64/signal/.gitignore | 1 + .../arm64/signal/testcases/tpidr2_siginfo.c | 90 +++++++++++++++++++ 2 files changed, 91 insertions(+) create mode 100644 tools/testing/selftests/arm64/signal/testcases/tpidr2_siginfo.c diff --git a/tools/testing/selftests/arm64/signal/.gitignore b/tools/testing/selftests/arm64/signal/.gitignore index e8d2b57f73ec..e1b6c4d961b5 100644 --- a/tools/testing/selftests/arm64/signal/.gitignore +++ b/tools/testing/selftests/arm64/signal/.gitignore @@ -4,5 +4,6 @@ fake_sigreturn_* sme_* ssve_* sve_* +tpidr2_siginfo za_* !*.[ch] diff --git a/tools/testing/selftests/arm64/signal/testcases/tpidr2_siginfo.c b/tools/testing/selftests/arm64/signal/testcases/tpidr2_siginfo.c new file mode 100644 index 000000000000..6a2c82bf7ead --- /dev/null +++ b/tools/testing/selftests/arm64/signal/testcases/tpidr2_siginfo.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 ARM Limited + * + * Verify that the TPIDR2 register context in signal frames is set up as + * expected. + */ + +#include +#include +#include +#include +#include +#include + +#include "test_signals_utils.h" +#include "testcases.h" + +static union { + ucontext_t uc; + char buf[1024 * 128]; +} context; + +#define SYS_TPIDR2 "S3_3_C13_C0_5" + +static uint64_t get_tpidr2(void) +{ + uint64_t val; + + asm volatile ( + "mrs %0, " SYS_TPIDR2 "\n" + : "=r"(val) + : + : "cc"); + + return val; +} + +int tpidr2_present(struct tdescr *td, siginfo_t *si, ucontext_t *uc) +{ + struct _aarch64_ctx *head = GET_BUF_RESV_HEAD(context); + struct tpidr2_context *tpidr2_ctx; + size_t offset; + bool in_sigframe; + bool have_sme; + __u64 orig_tpidr2; + + have_sme = getauxval(AT_HWCAP2) & HWCAP2_SME; + if (have_sme) + orig_tpidr2 = get_tpidr2(); + + if (!get_current_context(td, &context.uc, sizeof(context))) + return 1; + + tpidr2_ctx = (struct tpidr2_context *) + get_header(head, TPIDR2_MAGIC, td->live_sz, &offset); + + in_sigframe = tpidr2_ctx != NULL; + + fprintf(stderr, "TPIDR2 sigframe %s on system %s SME\n", + in_sigframe ? "present" : "absent", + have_sme ? "with" : "without"); + + td->pass = (in_sigframe == have_sme); + + /* + * Check that the value we read back was the one present at + * the time that the signal was triggered. TPIDR2 is owned by + * libc so we can't safely choose the value and it is possible + * that we may need to revisit this in future if something + * starts deciding to set a new TPIDR2 between us reading and + * the signal. + */ + if (have_sme && tpidr2_ctx) { + if (tpidr2_ctx->tpidr2 != orig_tpidr2) { + fprintf(stderr, "TPIDR2 in frame is %llx, was %llx\n", + tpidr2_ctx->tpidr2, orig_tpidr2); + td->pass = false; + } + } + + return 0; +} + +struct tdescr tde = { + .name = "TPIDR2", + .descr = "Validate that TPIDR2 is present as expected", + .timeout = 3, + .run = tpidr2_present, +}; From a89c6bcdac22bec1bfbe6e64060b4cf5838d4f47 Mon Sep 17 00:00:00 2001 From: Gabriel Krisman Bertazi Date: Mon, 9 Jan 2023 12:19:55 -0300 Subject: [PATCH 055/130] arm64: Avoid repeated AA64MMFR1_EL1 register read on pagefault path Accessing AA64MMFR1_EL1 is expensive in KVM guests, since it is emulated in the hypervisor. In fact, ARM documentation mentions some feature registers are not supposed to be accessed frequently by the OS, and therefore should be emulated for guests [1]. Commit 0388f9c74330 ("arm64: mm: Implement arch_wants_old_prefaulted_pte()") introduced a read of this register in the page fault path. But, even when the feature of setting faultaround pages with the old flag is disabled for a given cpu, we are still paying the cost of checking the register on every pagefault. This results in an explosion of vmexit events in KVM guests, which directly impacts the performance of virtualized workloads. For instance, running kernbench yields a 15% increase in system time solely due to the increased vmexit cycles. This patch avoids the extra cost by using the sanitized cached value. It should be safe to do so, since this register mustn't change for a given cpu. [1] https://developer.arm.com/-/media/Arm%20Developer%20Community/PDF/Learn%20the%20Architecture/Armv8-A%20virtualization.pdf?revision=a765a7df-1a00-434d-b241-357bfda2dd31 Signed-off-by: Gabriel Krisman Bertazi Acked-by: Will Deacon Reviewed-by: Anshuman Khandual Link: https://lore.kernel.org/r/20230109151955.8292-1-krisman@suse.de Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpufeature.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 03d1c9d7af82..3d1a5677321e 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -864,7 +864,11 @@ static inline bool cpu_has_hw_af(void) if (!IS_ENABLED(CONFIG_ARM64_HW_AFDBM)) return false; - mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); + /* + * Use cached version to avoid emulated msr operation on KVM + * guests. + */ + mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); return cpuid_feature_extract_unsigned_field(mmfr1, ID_AA64MMFR1_EL1_HAFDBS_SHIFT); } From 2198d07c509f1db4a1185d1f65aaada794c6ea59 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 9 Jan 2023 18:47:59 +0100 Subject: [PATCH 056/130] arm64: Always load shadow stack pointer directly from the task struct All occurrences of the scs_load macro load the value of the shadow call stack pointer from the task which is current at that point. So instead of taking a task struct register argument in the scs_load macro to specify the task struct to load from, let's always reference the current task directly. This should make it much harder to exploit any instruction sequences reloading the shadow call stack pointer register from memory. Signed-off-by: Ard Biesheuvel Acked-by: Mark Rutland Reviewed-by: Kees Cook Link: https://lore.kernel.org/r/20230109174800.3286265-2-ardb@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/scs.h | 7 ++++--- arch/arm64/kernel/entry.S | 4 ++-- arch/arm64/kernel/head.S | 2 +- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/arch/arm64/include/asm/scs.h b/arch/arm64/include/asm/scs.h index ff7da1268a52..13df982a0808 100644 --- a/arch/arm64/include/asm/scs.h +++ b/arch/arm64/include/asm/scs.h @@ -10,15 +10,16 @@ #ifdef CONFIG_SHADOW_CALL_STACK scs_sp .req x18 - .macro scs_load tsk - ldr scs_sp, [\tsk, #TSK_TI_SCS_SP] + .macro scs_load_current + get_current_task scs_sp + ldr scs_sp, [scs_sp, #TSK_TI_SCS_SP] .endm .macro scs_save tsk str scs_sp, [\tsk, #TSK_TI_SCS_SP] .endm #else - .macro scs_load tsk + .macro scs_load_current .endm .macro scs_save tsk diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 11cb99c4d298..546f7773238e 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -275,7 +275,7 @@ alternative_if ARM64_HAS_ADDRESS_AUTH alternative_else_nop_endif 1: - scs_load tsk + scs_load_current .else add x21, sp, #PT_REGS_SIZE get_current_task tsk @@ -848,7 +848,7 @@ SYM_FUNC_START(cpu_switch_to) msr sp_el0, x1 ptrauth_keys_install_kernel x1, x8, x9, x10 scs_save x0 - scs_load x1 + scs_load_current ret SYM_FUNC_END(cpu_switch_to) NOKPROBE(cpu_switch_to) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 952e17bd1c0b..b9c1a506798e 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -404,7 +404,7 @@ SYM_FUNC_END(create_kernel_mapping) stp xzr, xzr, [sp, #S_STACKFRAME] add x29, sp, #S_STACKFRAME - scs_load \tsk + scs_load_current adr_l \tmp1, __per_cpu_offset ldr w\tmp2, [\tsk, #TSK_TI_CPU] From 59b37fe52f49955791a460752c37145f1afdcad1 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 9 Jan 2023 18:48:00 +0100 Subject: [PATCH 057/130] arm64: Stash shadow stack pointer in the task struct on interrupt Instead of reloading the shadow call stack pointer from the ordinary stack, which may be vulnerable to the kind of gadget based attacks shadow call stacks were designed to prevent, let's store a task's shadow call stack pointer in the task struct when switching to the shadow IRQ stack. Given that currently, the task_struct::scs_sp field is only used to preserve the shadow call stack pointer while a task is scheduled out or running in user space, reusing this field to preserve and restore it while running off the IRQ stack must be safe, as those occurrences are guaranteed to never overlap. (The stack switching logic only switches stacks when running from the task stack, and so the value being saved here always corresponds to the task mode shadow stack) While at it, fold a mov/add/mov sequence into a single add. Signed-off-by: Ard Biesheuvel Reviewed-by: Kees Cook Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20230109174800.3286265-3-ardb@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/entry.S | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 546f7773238e..80d763e165fc 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -876,19 +876,19 @@ NOKPROBE(ret_from_fork) */ SYM_FUNC_START(call_on_irq_stack) #ifdef CONFIG_SHADOW_CALL_STACK - stp scs_sp, xzr, [sp, #-16]! + get_current_task x16 + scs_save x16 ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x17 #endif + /* Create a frame record to save our LR and SP (implicit in FP) */ stp x29, x30, [sp, #-16]! mov x29, sp ldr_this_cpu x16, irq_stack_ptr, x17 - mov x15, #IRQ_STACK_SIZE - add x16, x16, x15 /* Move to the new stack and call the function there */ - mov sp, x16 + add sp, x16, #IRQ_STACK_SIZE blr x1 /* @@ -897,9 +897,7 @@ SYM_FUNC_START(call_on_irq_stack) */ mov sp, x29 ldp x29, x30, [sp], #16 -#ifdef CONFIG_SHADOW_CALL_STACK - ldp scs_sp, xzr, [sp], #16 -#endif + scs_load_current ret SYM_FUNC_END(call_on_irq_stack) NOKPROBE(call_on_irq_stack) From 846b73a4a3d0bf289ee924f6101c4f842ecc5b81 Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Wed, 11 Jan 2023 11:07:00 +0530 Subject: [PATCH 058/130] arm64: Add compat hwcap FPHP and ASIMDHP These hwcaps were added earlier for 32-bit native arm kernel by commit c00a19c8b143 ("ARM: 9268/1: vfp: Add hwcap FPHP and ASIMDHP for FEAT_FP16") and hence the corresponding changes added in 32-bit compat arm64 kernel for similar userspace interfaces. Floating point half-precision (FPHP) and Advanced SIMD half-precision (ASIMDHP) represents the Armv8 FP16 feature extension and is already advertised in native arm64 kernel. Signed-off-by: Amit Daniel Kachhap Reviewed-by: Mark Brown Link: https://lore.kernel.org/r/20230111053706.13994-2-amit.kachhap@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/hwcap.h | 2 ++ arch/arm64/kernel/cpufeature.c | 6 ++++-- arch/arm64/kernel/cpuinfo.c | 2 ++ 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index 06dd12c514e6..8d0d412aba3c 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -31,6 +31,8 @@ #define COMPAT_HWCAP_VFPD32 (1 << 19) #define COMPAT_HWCAP_LPAE (1 << 20) #define COMPAT_HWCAP_EVTSTRM (1 << 21) +#define COMPAT_HWCAP_FPHP (1 << 22) +#define COMPAT_HWCAP_ASIMDHP (1 << 23) #define COMPAT_HWCAP2_AES (1 << 0) #define COMPAT_HWCAP2_PMULL (1 << 1) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index a77315b338e6..4c589ff7e5ea 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -444,8 +444,8 @@ static const struct arm64_ftr_bits ftr_mvfr0[] = { static const struct arm64_ftr_bits ftr_mvfr1[] = { ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDFMAC_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_FPHP_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDHP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_FPHP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDHP_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDSP_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDInt_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDLS_SHIFT, 4, 0), @@ -2866,6 +2866,8 @@ static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = { /* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */ HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_EL1_FPDP_SHIFT, 4, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP), HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_EL1_FPDP_SHIFT, 4, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3), + HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_EL1_FPHP_SHIFT, 4, FTR_UNSIGNED, 3, CAP_COMPAT_HWCAP, COMPAT_HWCAP_FPHP), + HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_EL1_SIMDHP_SHIFT, 4, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDHP), HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_AES_SHIFT, 4, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL), HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_AES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES), HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_SHA1_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1), diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 379695262b77..195b44a99f5d 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -146,6 +146,8 @@ static const char *const compat_hwcap_str[] = { [COMPAT_KERNEL_HWCAP(VFPD32)] = NULL, /* Not possible on arm64 */ [COMPAT_KERNEL_HWCAP(LPAE)] = "lpae", [COMPAT_KERNEL_HWCAP(EVTSTRM)] = "evtstrm", + [COMPAT_KERNEL_HWCAP(FPHP)] = "fphp", + [COMPAT_KERNEL_HWCAP(ASIMDHP)] = "asimdhp", }; #define COMPAT_KERNEL_HWCAP2(x) const_ilog2(COMPAT_HWCAP2_ ## x) From 27addd402a73cb97b1529ea4e56be7eb82d3c478 Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Wed, 11 Jan 2023 11:07:01 +0530 Subject: [PATCH 059/130] arm64: Add compat hwcap ASIMDDP This hwcap was added earlier for 32-bit native arm kernel by commit 62ea0d873af3 ("ARM: 9269/1: vfp: Add hwcap for FEAT_DotProd") and hence the corresponding changes added in 32-bit compat arm64 kernel for similar user interfaces. Advanced Dot product is a feature (FEAT_DotProd) present in both AArch32/AArch64 state for Armv8 and is already advertised in native arm64 kernel. Signed-off-by: Amit Daniel Kachhap Reviewed-by: Mark Brown Link: https://lore.kernel.org/r/20230111053706.13994-3-amit.kachhap@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/hwcap.h | 1 + arch/arm64/kernel/cpufeature.c | 3 ++- arch/arm64/kernel/cpuinfo.c | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index 8d0d412aba3c..d3d8a992ab05 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -33,6 +33,7 @@ #define COMPAT_HWCAP_EVTSTRM (1 << 21) #define COMPAT_HWCAP_FPHP (1 << 22) #define COMPAT_HWCAP_ASIMDHP (1 << 23) +#define COMPAT_HWCAP_ASIMDDP (1 << 24) #define COMPAT_HWCAP2_AES (1 << 0) #define COMPAT_HWCAP2_PMULL (1 << 1) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 4c589ff7e5ea..e213a3fcbb9b 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -534,7 +534,7 @@ static const struct arm64_ftr_bits ftr_id_isar6[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SPECRES_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SB_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_FHM_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_DP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_DP_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_JSCVT_SHIFT, 4, 0), ARM64_FTR_END, }; @@ -2873,6 +2873,7 @@ static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_SHA1_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1), HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_SHA2_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2), HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_CRC32_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32), + HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_DP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDDP), #endif {}, }; diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 195b44a99f5d..e94eb1a4f1f4 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -148,6 +148,7 @@ static const char *const compat_hwcap_str[] = { [COMPAT_KERNEL_HWCAP(EVTSTRM)] = "evtstrm", [COMPAT_KERNEL_HWCAP(FPHP)] = "fphp", [COMPAT_KERNEL_HWCAP(ASIMDHP)] = "asimdhp", + [COMPAT_KERNEL_HWCAP(ASIMDDP)] = "asimddp", }; #define COMPAT_KERNEL_HWCAP2(x) const_ilog2(COMPAT_HWCAP2_ ## x) From 4a87be25b02b33948c293cae47a7f27d8b9bf20f Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Wed, 11 Jan 2023 11:07:02 +0530 Subject: [PATCH 060/130] arm64: Add compat hwcap ASIMDFHM This hwcap was added earlier for 32-bit native arm kernel by commit ce4835497c20 ("ARM: 9270/1: vfp: Add hwcap for FEAT_FHM") and hence the corresponding changes added in 32-bit compat arm64 kernel for similar user interfaces. Floating-point half-precision multiplication (FHM) is a feature present in AArch32/AArch64 state for Armv8. This hwcap is already advertised in native arm64 kernel. Signed-off-by: Amit Daniel Kachhap Reviewed-by: Mark Brown Link: https://lore.kernel.org/r/20230111053706.13994-4-amit.kachhap@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/hwcap.h | 1 + arch/arm64/kernel/cpufeature.c | 3 ++- arch/arm64/kernel/cpuinfo.c | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index d3d8a992ab05..5891e27b840b 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -34,6 +34,7 @@ #define COMPAT_HWCAP_FPHP (1 << 22) #define COMPAT_HWCAP_ASIMDHP (1 << 23) #define COMPAT_HWCAP_ASIMDDP (1 << 24) +#define COMPAT_HWCAP_ASIMDFHM (1 << 25) #define COMPAT_HWCAP2_AES (1 << 0) #define COMPAT_HWCAP2_PMULL (1 << 1) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index e213a3fcbb9b..d056b54dbe01 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -533,7 +533,7 @@ static const struct arm64_ftr_bits ftr_id_isar6[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_BF16_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SPECRES_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SB_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_FHM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_FHM_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_DP_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_JSCVT_SHIFT, 4, 0), ARM64_FTR_END, @@ -2874,6 +2874,7 @@ static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_SHA2_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2), HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_CRC32_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32), HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_DP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDDP), + HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_FHM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDFHM), #endif {}, }; diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index e94eb1a4f1f4..a0fefb451bac 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -149,6 +149,7 @@ static const char *const compat_hwcap_str[] = { [COMPAT_KERNEL_HWCAP(FPHP)] = "fphp", [COMPAT_KERNEL_HWCAP(ASIMDHP)] = "asimdhp", [COMPAT_KERNEL_HWCAP(ASIMDDP)] = "asimddp", + [COMPAT_KERNEL_HWCAP(ASIMDFHM)] = "asimdfhm", }; #define COMPAT_KERNEL_HWCAP2(x) const_ilog2(COMPAT_HWCAP2_ ## x) From f64234fa45f47cd757e5eb7a83d54d83480c4fce Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Wed, 11 Jan 2023 11:07:03 +0530 Subject: [PATCH 061/130] arm64: Add compat hwcap ASIMDBF16 This hwcap was added earlier for 32-bit native arm kernel by commit 23b6d4ad6e7a ("ARM: 9271/1: vfp: Add hwcap for FEAT_AA32BF16") and hence the corresponding changes added in 32-bit compat arm64 kernel. Brain 16-bit floating-point storage format is a feature (FEAT_AA32BF16) present in AArch32 state for Armv8 and is represented by ISAR6.BF16 identification register. Similar feature (FEAT_BF16) exist for AArch64 state and is already advertised in native arm64 kernel. Signed-off-by: Amit Daniel Kachhap Reviewed-by: Mark Brown Link: https://lore.kernel.org/r/20230111053706.13994-5-amit.kachhap@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/hwcap.h | 1 + arch/arm64/kernel/cpufeature.c | 3 ++- arch/arm64/kernel/cpuinfo.c | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index 5891e27b840b..268aa0e5ec06 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -35,6 +35,7 @@ #define COMPAT_HWCAP_ASIMDHP (1 << 23) #define COMPAT_HWCAP_ASIMDDP (1 << 24) #define COMPAT_HWCAP_ASIMDFHM (1 << 25) +#define COMPAT_HWCAP_ASIMDBF16 (1 << 26) #define COMPAT_HWCAP2_AES (1 << 0) #define COMPAT_HWCAP2_PMULL (1 << 1) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index d056b54dbe01..1533107a7ad5 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -530,7 +530,7 @@ static const struct arm64_ftr_bits ftr_id_mmfr5[] = { static const struct arm64_ftr_bits ftr_id_isar6[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_I8MM_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_BF16_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_BF16_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SPECRES_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SB_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_FHM_SHIFT, 4, 0), @@ -2875,6 +2875,7 @@ static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_CRC32_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32), HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_DP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDDP), HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_FHM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDFHM), + HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDBF16), #endif {}, }; diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index a0fefb451bac..50cfd808b80c 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -150,6 +150,7 @@ static const char *const compat_hwcap_str[] = { [COMPAT_KERNEL_HWCAP(ASIMDHP)] = "asimdhp", [COMPAT_KERNEL_HWCAP(ASIMDDP)] = "asimddp", [COMPAT_KERNEL_HWCAP(ASIMDFHM)] = "asimdfhm", + [COMPAT_KERNEL_HWCAP(ASIMDBF16)] = "asimdbf16", }; #define COMPAT_KERNEL_HWCAP2(x) const_ilog2(COMPAT_HWCAP2_ ## x) From 0864d1e42959b8b1e32efa3fb672fe81ba1dbc6c Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Wed, 11 Jan 2023 11:07:04 +0530 Subject: [PATCH 062/130] arm64: Add compat hwcap I8MM This hwcap was added earlier for 32-bit native arm kernel by commit 956ca3a4eb81 ("ARM: 9272/1: vfp: Add hwcap for FEAT_AA32I8MM") and hence the corresponding changes added in 32-bit compat arm64 kernel for similar user interfaces. Int8 matrix multiplication is a feature (FEAT_AA32I8MM) present in AArch32 state of Armv8 and is identified by ISAR6.I8MM register. Similar feature(FEAT_I8MM) exist for AArch64 state and is already advertised in arm64 kernel. Signed-off-by: Amit Daniel Kachhap Reviewed-by: Mark Brown Link: https://lore.kernel.org/r/20230111053706.13994-6-amit.kachhap@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/hwcap.h | 1 + arch/arm64/kernel/cpufeature.c | 3 ++- arch/arm64/kernel/cpuinfo.c | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index 268aa0e5ec06..da7687efd1ae 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -36,6 +36,7 @@ #define COMPAT_HWCAP_ASIMDDP (1 << 24) #define COMPAT_HWCAP_ASIMDFHM (1 << 25) #define COMPAT_HWCAP_ASIMDBF16 (1 << 26) +#define COMPAT_HWCAP_I8MM (1 << 27) #define COMPAT_HWCAP2_AES (1 << 0) #define COMPAT_HWCAP2_PMULL (1 << 1) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 1533107a7ad5..46998bbba6ff 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -529,7 +529,7 @@ static const struct arm64_ftr_bits ftr_id_mmfr5[] = { }; static const struct arm64_ftr_bits ftr_id_isar6[] = { - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_I8MM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_I8MM_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_BF16_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SPECRES_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SB_SHIFT, 4, 0), @@ -2876,6 +2876,7 @@ static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_DP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDDP), HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_FHM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDFHM), HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDBF16), + HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_I8MM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_I8MM), #endif {}, }; diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 50cfd808b80c..2af73bc14775 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -151,6 +151,7 @@ static const char *const compat_hwcap_str[] = { [COMPAT_KERNEL_HWCAP(ASIMDDP)] = "asimddp", [COMPAT_KERNEL_HWCAP(ASIMDFHM)] = "asimdfhm", [COMPAT_KERNEL_HWCAP(ASIMDBF16)] = "asimdbf16", + [COMPAT_KERNEL_HWCAP(I8MM)] = "i8mm", }; #define COMPAT_KERNEL_HWCAP2(x) const_ilog2(COMPAT_HWCAP2_ ## x) From 2d602aa99abb84c34cc9602c8bd6852895314f74 Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Wed, 11 Jan 2023 11:07:05 +0530 Subject: [PATCH 063/130] arm64: Add compat hwcap SB This hwcap was added for 32-bit native arm kernel by commit 3bda6d884897 ("ARM: 9273/1: Add hwcap for Speculation Barrier(SB)") and hence the corresponding changes added in 32-bit compat arm64 kernel. Speculation Barrier is a feature(FEAT_SB) present in both AArch32 and AArch64 state. This hwcap is already advertised in native arm64 kernel. Signed-off-by: Amit Daniel Kachhap Reviewed-by: Mark Brown Link: https://lore.kernel.org/r/20230111053706.13994-7-amit.kachhap@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/hwcap.h | 1 + arch/arm64/kernel/cpufeature.c | 3 ++- arch/arm64/kernel/cpuinfo.c | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index da7687efd1ae..f2bcf4255f97 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -43,6 +43,7 @@ #define COMPAT_HWCAP2_SHA1 (1 << 2) #define COMPAT_HWCAP2_SHA2 (1 << 3) #define COMPAT_HWCAP2_CRC32 (1 << 4) +#define COMPAT_HWCAP2_SB (1 << 5) #ifndef __ASSEMBLY__ #include diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 46998bbba6ff..8adbafbdf1da 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -532,7 +532,7 @@ static const struct arm64_ftr_bits ftr_id_isar6[] = { ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_I8MM_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_BF16_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SPECRES_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SB_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SB_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_FHM_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_DP_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_JSCVT_SHIFT, 4, 0), @@ -2875,6 +2875,7 @@ static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_CRC32_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32), HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_DP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDDP), HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_FHM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDFHM), + HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_SB_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SB), HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDBF16), HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_I8MM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_I8MM), #endif diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 2af73bc14775..d2b41f2544f5 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -161,6 +161,7 @@ static const char *const compat_hwcap2_str[] = { [COMPAT_KERNEL_HWCAP2(SHA1)] = "sha1", [COMPAT_KERNEL_HWCAP2(SHA2)] = "sha2", [COMPAT_KERNEL_HWCAP2(CRC32)] = "crc32", + [COMPAT_KERNEL_HWCAP2(SB)] = "sb", }; #endif /* CONFIG_COMPAT */ From 4f2c9bf16a4bc209a674e7b76d8e829b917c7f84 Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Wed, 11 Jan 2023 11:07:06 +0530 Subject: [PATCH 064/130] arm64: Add compat hwcap SSBS This hwcap was added for 32-bit native arm kernel by commit fea53546be57 ("ARM: 9274/1: Add hwcap for Speculative Store Bypassing Safe") and hence the corresponding changes added in 32-bit compat arm64 for similar user interfaces. Speculative Store Bypass Safe is a feature(FEAT_SSBS) present in AArch32/AArch64 state for Armv8 and can be identified by PFR2.SSBS identification register. This hwcap is already advertised in native arm64 kernel. Signed-off-by: Amit Daniel Kachhap Reviewed-by: Mark Brown Link: https://lore.kernel.org/r/20230111053706.13994-8-amit.kachhap@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/hwcap.h | 1 + arch/arm64/kernel/cpufeature.c | 3 ++- arch/arm64/kernel/cpuinfo.c | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index f2bcf4255f97..2f539a3e3d3a 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -44,6 +44,7 @@ #define COMPAT_HWCAP2_SHA2 (1 << 3) #define COMPAT_HWCAP2_CRC32 (1 << 4) #define COMPAT_HWCAP2_SB (1 << 5) +#define COMPAT_HWCAP2_SSBS (1 << 6) #ifndef __ASSEMBLY__ #include diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 8adbafbdf1da..d54c2a003435 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -562,7 +562,7 @@ static const struct arm64_ftr_bits ftr_id_pfr1[] = { }; static const struct arm64_ftr_bits ftr_id_pfr2[] = { - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_EL1_SSBS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_EL1_SSBS_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_EL1_CSV3_SHIFT, 4, 0), ARM64_FTR_END, }; @@ -2878,6 +2878,7 @@ static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_SB_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SB), HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDBF16), HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_I8MM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_I8MM), + HWCAP_CAP(SYS_ID_PFR2_EL1, ID_PFR2_EL1_SSBS_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SSBS), #endif {}, }; diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index d2b41f2544f5..876cd96c73ea 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -162,6 +162,7 @@ static const char *const compat_hwcap2_str[] = { [COMPAT_KERNEL_HWCAP2(SHA2)] = "sha2", [COMPAT_KERNEL_HWCAP2(CRC32)] = "crc32", [COMPAT_KERNEL_HWCAP2(SB)] = "sb", + [COMPAT_KERNEL_HWCAP2(SSBS)] = "ssbs", }; #endif /* CONFIG_COMPAT */ From aa58ace3499a678768f5ce9a5973919bfccd8a4e Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 12 Jan 2023 19:51:47 +0000 Subject: [PATCH 065/130] kselftest/arm64: Fix .pushsection for strings in FP tests The .pushsection directive used to store the strings used with the .puts macro in the floating point helpers does not provide a section type but according to the gas documentation this should be mandatory and with the clang built in as it actually is. Provide one so that we can build these tests with LLVM=1. No functional change. Signed-off-by: Mark Brown Reviewed-by: Nick Desaulniers Link: https://lore.kernel.org/r/20230111-arm64-kselftest-clang-v1-1-89c69d377727@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/fp/assembler.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/arm64/fp/assembler.h b/tools/testing/selftests/arm64/fp/assembler.h index 90bd433d2665..9b38a0da407d 100644 --- a/tools/testing/selftests/arm64/fp/assembler.h +++ b/tools/testing/selftests/arm64/fp/assembler.h @@ -57,7 +57,7 @@ endfunction // Utility macro to print a literal string // Clobbers x0-x4,x8 .macro puts string - .pushsection .rodata.str1.1, "aMS", 1 + .pushsection .rodata.str1.1, "aMS", @progbits, 1 .L__puts_literal\@: .string "\string" .popsection From cd57a6584fe596f4daf128007bff71b1c2ba16c8 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 12 Jan 2023 19:51:48 +0000 Subject: [PATCH 066/130] kselftest/arm64: Remove redundant _start labels from FP tests There are a number of freestanding static executables used in floating point testing that have no runtime at all. These all define the main entry point as: .globl _start function _start _start: but clang's integrated assembler complains that: error: symbol '_start' is already defined due to having both a label and function directive. Remove the label to allow building with clang. No functional change. Signed-off-by: Mark Brown Reviewed-by: Nick Desaulniers Link: https://lore.kernel.org/r/20230111-arm64-kselftest-clang-v1-2-89c69d377727@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/fp/fp-pidbench.S | 1 - tools/testing/selftests/arm64/fp/fpsimd-test.S | 1 - tools/testing/selftests/arm64/fp/sve-test.S | 1 - tools/testing/selftests/arm64/fp/za-test.S | 1 - 4 files changed, 4 deletions(-) diff --git a/tools/testing/selftests/arm64/fp/fp-pidbench.S b/tools/testing/selftests/arm64/fp/fp-pidbench.S index 16a436389bfc..73830f6bc99b 100644 --- a/tools/testing/selftests/arm64/fp/fp-pidbench.S +++ b/tools/testing/selftests/arm64/fp/fp-pidbench.S @@ -31,7 +31,6 @@ // Main program entry point .globl _start function _start -_start: puts "Iterations per test: " mov x20, #10000 lsl x20, x20, #8 diff --git a/tools/testing/selftests/arm64/fp/fpsimd-test.S b/tools/testing/selftests/arm64/fp/fpsimd-test.S index 918d04885a33..8b960d01ed2e 100644 --- a/tools/testing/selftests/arm64/fp/fpsimd-test.S +++ b/tools/testing/selftests/arm64/fp/fpsimd-test.S @@ -215,7 +215,6 @@ endfunction // Main program entry point .globl _start function _start -_start: mov x23, #0 // signal count mov w0, #SIGINT diff --git a/tools/testing/selftests/arm64/fp/sve-test.S b/tools/testing/selftests/arm64/fp/sve-test.S index 2a18cb4c528c..4328895dfc87 100644 --- a/tools/testing/selftests/arm64/fp/sve-test.S +++ b/tools/testing/selftests/arm64/fp/sve-test.S @@ -378,7 +378,6 @@ endfunction // Main program entry point .globl _start function _start -_start: mov x23, #0 // Irritation signal count mov w0, #SIGINT diff --git a/tools/testing/selftests/arm64/fp/za-test.S b/tools/testing/selftests/arm64/fp/za-test.S index 53c54af65704..9dcd70911397 100644 --- a/tools/testing/selftests/arm64/fp/za-test.S +++ b/tools/testing/selftests/arm64/fp/za-test.S @@ -231,7 +231,6 @@ endfunction // Main program entry point .globl _start function _start -_start: mov x23, #0 // signal count mov w0, #SIGINT From a884f7970e57aef78c6011561e29d238e46b3a9f Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 12 Jan 2023 19:51:49 +0000 Subject: [PATCH 067/130] kselftest/arm64: Don't pass headers to the compiler as source The signal Makefile rules pass all the dependencies for each executable, including headers, to the compiler which GCC is happy enough with but clang rejects: clang --target=aarch64-none-linux-gnu -fintegrated-as -Wall -O2 -g -I/home/broonie/git/linux/tools/testing/selftests/ -isystem /home/broonie/git/linux/usr/include -D_GNU_SOURCE -std=gnu99 -I. test_signals.c test_signals_utils.c testcases/testcases.c signals.S testcases/fake_sigreturn_bad_magic.c test_signals.h test_signals_utils.h testcases/testcases.h -o testcases/fake_sigreturn_bad_magic clang: error: cannot specify -o when generating multiple output files This happens because clang gets confused about what to do with the header files, failing to identify them as source. This is not amazing behaviour on clang's part and should ideally be fixed but even if that happens we'd still need a new clang release so let's instead rework the Makefile so we use variables for the lists of header and source files, allowing us to only pass the source files to the compiler and keep clang happy. As a bonus the resulting Makefile is a bit easier to read. Signed-off-by: Mark Brown Reviewed-by: Nick Desaulniers Link: https://lore.kernel.org/r/20230111-arm64-kselftest-clang-v1-3-89c69d377727@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/signal/Makefile | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/arm64/signal/Makefile b/tools/testing/selftests/arm64/signal/Makefile index be7520a863b0..8f5febaf1a9a 100644 --- a/tools/testing/selftests/arm64/signal/Makefile +++ b/tools/testing/selftests/arm64/signal/Makefile @@ -22,6 +22,10 @@ $(TEST_GEN_PROGS): $(PROGS) # Common test-unit targets to build common-layout test-cases executables # Needs secondary expansion to properly include the testcase c-file in pre-reqs +COMMON_SOURCES := test_signals.c test_signals_utils.c testcases/testcases.c \ + signals.S +COMMON_HEADERS := test_signals.h test_signals_utils.h testcases/testcases.h + .SECONDEXPANSION: -$(PROGS): test_signals.c test_signals_utils.c testcases/testcases.c signals.S $$@.c test_signals.h test_signals_utils.h testcases/testcases.h - $(CC) $(CFLAGS) $^ -o $@ +$(PROGS): $$@.c ${COMMON_SOURCES} ${COMMON_HEADERS} + $(CC) $(CFLAGS) ${@}.c ${COMMON_SOURCES} -o $@ From 6e4b4f0eca88e47def703f90a403fef5b96730d5 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 12 Jan 2023 19:51:50 +0000 Subject: [PATCH 068/130] kselftest/arm64: Initialise current at build time in signal tests When building with clang the toolchain refuses to link the signals testcases since the assembly code has a reference to current which has no initialiser so is placed in the BSS: /tmp/signals-af2042.o: in function `fake_sigreturn': :51:(.text+0x40): relocation truncated to fit: R_AARCH64_LD_PREL_LO19 against symbol `current' defined in .bss section in /tmp/test_signals-ec1160.o Since the first statement in main() initialises current we may as well fix this by moving the initialisation to build time so the variable doesn't end up in the BSS. Signed-off-by: Mark Brown Reviewed-by: Nick Desaulniers Link: https://lore.kernel.org/r/20230111-arm64-kselftest-clang-v1-4-89c69d377727@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/signal/test_signals.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tools/testing/selftests/arm64/signal/test_signals.c b/tools/testing/selftests/arm64/signal/test_signals.c index 416b1ff43199..00051b40d71e 100644 --- a/tools/testing/selftests/arm64/signal/test_signals.c +++ b/tools/testing/selftests/arm64/signal/test_signals.c @@ -12,12 +12,10 @@ #include "test_signals.h" #include "test_signals_utils.h" -struct tdescr *current; +struct tdescr *current = &tde; int main(int argc, char *argv[]) { - current = &tde; - ksft_print_msg("%s :: %s\n", current->name, current->descr); if (test_setup(current) && test_init(current)) { test_run(current); From 343d59119e776af3060000f7af70553fc531230e Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 12 Jan 2023 19:51:51 +0000 Subject: [PATCH 069/130] kselftest/arm64: Support build of MTE tests with clang The assembly portions of the MTE selftests need to be built with a toolchain supporting MTE. Since we support GCC versions that lack MTE support we have logic to suppress build of these tests when using such a toolchain but that logic is broken for LLVM=1 builds, it uses CC but CC is only set for LLVM builds in libs.mk which needs to be included after we have selected which test programs to build. Since all supported LLVM versions support MTE we can simply assume MTE support when LLVM is set. This is not a thing of beauty but it does the job. Signed-off-by: Mark Brown Reviewed-by: Nick Desaulniers Link: https://lore.kernel.org/r/20230111-arm64-kselftest-clang-v1-5-89c69d377727@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/mte/Makefile | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/arm64/mte/Makefile b/tools/testing/selftests/arm64/mte/Makefile index 037046f5784e..fdb9acdca42b 100644 --- a/tools/testing/selftests/arm64/mte/Makefile +++ b/tools/testing/selftests/arm64/mte/Makefile @@ -1,19 +1,29 @@ # SPDX-License-Identifier: GPL-2.0 # Copyright (C) 2020 ARM Limited -# preserve CC value from top level Makefile -ifeq ($(CC),cc) -CC := $(CROSS_COMPILE)gcc -endif - CFLAGS += -std=gnu99 -I. -pthread LDFLAGS += -pthread SRCS := $(filter-out mte_common_util.c,$(wildcard *.c)) PROGS := $(patsubst %.c,%,$(SRCS)) +ifeq ($(LLVM),) +# For GCC check that the toolchain has MTE support. + +# preserve CC value from top level Makefile +ifeq ($(CC),cc) +CC := $(CROSS_COMPILE)gcc +endif + #check if the compiler works well mte_cc_support := $(shell if ($(CC) $(CFLAGS) -march=armv8.5-a+memtag -E -x c /dev/null -o /dev/null 2>&1) then echo "1"; fi) +else + +# All supported clang versions also support MTE. +mte_cc_support := 1 + +endif + ifeq ($(mte_cc_support),1) # Generated binaries to be installed by top KSFT script TEST_GEN_PROGS := $(PROGS) From 89d72c035f88c8338bf3ab604a07c7320dc9f800 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 12 Jan 2023 19:51:52 +0000 Subject: [PATCH 070/130] kselftest/arm64: Remove spurious comment from MTE test Makefile There's a stray comment in the MTE test Makefile which documents something that's since been removed, delete it. Signed-off-by: Mark Brown Reviewed-by: Nick Desaulniers Link: https://lore.kernel.org/r/20230111-arm64-kselftest-clang-v1-6-89c69d377727@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/mte/Makefile | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/testing/selftests/arm64/mte/Makefile b/tools/testing/selftests/arm64/mte/Makefile index fdb9acdca42b..0d7ac3db8390 100644 --- a/tools/testing/selftests/arm64/mte/Makefile +++ b/tools/testing/selftests/arm64/mte/Makefile @@ -28,7 +28,6 @@ ifeq ($(mte_cc_support),1) # Generated binaries to be installed by top KSFT script TEST_GEN_PROGS := $(PROGS) -# Get Kernel headers installed and use them. else $(warning compiler "$(CC)" does not support the ARMv8.5 MTE extension.) $(warning test program "mte" will not be created.) From f76cb73a2d7c6e80bed04098b7f100872fd9a475 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Fri, 20 Jan 2023 12:04:08 +0000 Subject: [PATCH 071/130] kselftest/arm64: Verify that SSVE signal context has SVE_SIG_FLAG_SM set Streaming mode SVE signal context should have SVE_SIG_FLAG_SM set but we were not actually validating this. Add a check. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20230117-arm64-test-ssve-za-v1-1-203c00150154@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/signal/testcases/ssve_regs.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c b/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c index d0a178945b1a..cd738265cdcd 100644 --- a/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c +++ b/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c @@ -92,6 +92,11 @@ static int do_one_sme_vl(struct tdescr *td, siginfo_t *si, ucontext_t *uc, return 1; } + if (!(ssve->flags & SVE_SIG_FLAG_SM)) { + fprintf(stderr, "SVE_SIG_FLAG_SM not set in SVE record\n"); + return 1; + } + /* The actual size validation is done in get_current_context() */ fprintf(stderr, "Got expected size %u and VL %d\n", head->size, ssve->vl); From bc69da5ff087c40d1fc4f30596f1ee1b71924577 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Fri, 20 Jan 2023 12:04:09 +0000 Subject: [PATCH 072/130] kselftest/arm64: Verify simultaneous SSVE and ZA context generation Add a test that generates SSVE and ZA context in a single signal frame to ensure that nothing is going wrong in that case for any reason. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20230117-arm64-test-ssve-za-v1-2-203c00150154@kernel.org Signed-off-by: Catalin Marinas --- .../arm64/signal/testcases/ssve_za_regs.c | 162 ++++++++++++++++++ 1 file changed, 162 insertions(+) create mode 100644 tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c diff --git a/tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c b/tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c new file mode 100644 index 000000000000..954a21f6121a --- /dev/null +++ b/tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c @@ -0,0 +1,162 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 ARM Limited + * + * Verify that both the streaming SVE and ZA register context in + * signal frames is set up as expected when enabled simultaneously. + */ + +#include +#include +#include + +#include "test_signals_utils.h" +#include "testcases.h" + +static union { + ucontext_t uc; + char buf[1024 * 128]; +} context; +static unsigned int vls[SVE_VQ_MAX]; +unsigned int nvls = 0; + +static bool sme_get_vls(struct tdescr *td) +{ + int vq, vl; + + /* + * Enumerate up to SVE_VQ_MAX vector lengths + */ + for (vq = SVE_VQ_MAX; vq > 0; --vq) { + vl = prctl(PR_SME_SET_VL, vq * 16); + if (vl == -1) + return false; + + vl &= PR_SME_VL_LEN_MASK; + + /* Skip missing VLs */ + vq = sve_vq_from_vl(vl); + + vls[nvls++] = vl; + } + + /* We need at least one VL */ + if (nvls < 1) { + fprintf(stderr, "Only %d VL supported\n", nvls); + return false; + } + + return true; +} + +static void setup_regs(void) +{ + /* smstart sm; real data is TODO */ + asm volatile(".inst 0xd503437f" : : : ); + + /* smstart za; real data is TODO */ + asm volatile(".inst 0xd503457f" : : : ); +} + +static char zeros[ZA_SIG_REGS_SIZE(SVE_VQ_MAX)]; + +static int do_one_sme_vl(struct tdescr *td, siginfo_t *si, ucontext_t *uc, + unsigned int vl) +{ + size_t offset; + struct _aarch64_ctx *head = GET_BUF_RESV_HEAD(context); + struct _aarch64_ctx *regs; + struct sve_context *ssve; + struct za_context *za; + int ret; + + fprintf(stderr, "Testing VL %d\n", vl); + + ret = prctl(PR_SME_SET_VL, vl); + if (ret != vl) { + fprintf(stderr, "Failed to set VL, got %d\n", ret); + return 1; + } + + /* + * Get a signal context which should have the SVE and ZA + * frames in it. + */ + setup_regs(); + if (!get_current_context(td, &context.uc, sizeof(context))) + return 1; + + regs = get_header(head, SVE_MAGIC, GET_BUF_RESV_SIZE(context), + &offset); + if (!regs) { + fprintf(stderr, "No SVE context\n"); + return 1; + } + + ssve = (struct sve_context *)regs; + if (ssve->vl != vl) { + fprintf(stderr, "Got SSVE VL %d, expected %d\n", ssve->vl, vl); + return 1; + } + + if (!(ssve->flags & SVE_SIG_FLAG_SM)) { + fprintf(stderr, "SVE_SIG_FLAG_SM not set in SVE record\n"); + return 1; + } + + fprintf(stderr, "Got expected SSVE size %u and VL %d\n", + regs->size, ssve->vl); + + regs = get_header(head, ZA_MAGIC, GET_BUF_RESV_SIZE(context), + &offset); + if (!regs) { + fprintf(stderr, "No ZA context\n"); + return 1; + } + + za = (struct za_context *)regs; + if (za->vl != vl) { + fprintf(stderr, "Got ZA VL %d, expected %d\n", za->vl, vl); + return 1; + } + + fprintf(stderr, "Got expected ZA size %u and VL %d\n", + regs->size, za->vl); + + /* We didn't load any data into ZA so it should be all zeros */ + if (memcmp(zeros, (char *)za + ZA_SIG_REGS_OFFSET, + ZA_SIG_REGS_SIZE(sve_vq_from_vl(za->vl))) != 0) { + fprintf(stderr, "ZA data invalid\n"); + return 1; + } + + return 0; +} + +static int sme_regs(struct tdescr *td, siginfo_t *si, ucontext_t *uc) +{ + int i; + + for (i = 0; i < nvls; i++) { + if (do_one_sme_vl(td, si, uc, vls[i])) + return 1; + } + + td->pass = 1; + + return 0; +} + +struct tdescr tde = { + .name = "Streaming SVE registers", + .descr = "Check that we get the right Streaming SVE registers reported", + /* + * We shouldn't require FA64 but things like memset() used in the + * helpers might use unsupported instructions so for now disable + * the test unless we've got the full instruction set. + */ + .feats_required = FEAT_SME | FEAT_SME_FA64, + .timeout = 3, + .init = sme_get_vls, + .run = sme_regs, +}; From 1a920c92cd0c09bb7f94d18029c6dd524035e190 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Tue, 1 Nov 2022 22:14:12 +0100 Subject: [PATCH 073/130] arm64: cpufeature: Use kstrtobool() instead of strtobool() strtobool() is the same as kstrtobool(). However, the latter is more used within the kernel. In order to remove strtobool() and slightly simplify kstrtox.h, switch to the other function name. While at it, include the corresponding header file () Signed-off-by: Christophe JAILLET Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/5a1b329cda34aec67615c0d2fd326eb0d6634bf7.1667336095.git.christophe.jaillet@wanadoo.fr Signed-off-by: Catalin Marinas --- arch/arm64/kernel/cpufeature.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index a77315b338e6..acc96317cff5 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -65,6 +65,7 @@ #include #include #include +#include #include #include #include @@ -1795,7 +1796,7 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) static int __init parse_kpti(char *str) { bool enabled; - int ret = strtobool(str, &enabled); + int ret = kstrtobool(str, &enabled); if (ret) return ret; @@ -2039,7 +2040,7 @@ static bool enable_pseudo_nmi; static int __init early_enable_pseudo_nmi(char *p) { - return strtobool(p, &enable_pseudo_nmi); + return kstrtobool(p, &enable_pseudo_nmi); } early_param("irqchip.gicv3_pseudo_nmi", early_enable_pseudo_nmi); From 00598857e38f56963116a6d70f7d64a29959bce9 Mon Sep 17 00:00:00 2001 From: Zenghui Yu Date: Sun, 18 Dec 2022 17:29:41 +0800 Subject: [PATCH 074/130] kselftest/arm64: Remove the local NUM_VL definition It was introduced in commit b77e995e3b96 ("kselftest/arm64: Add a test program to exercise the syscall ABI") but never actually used. Remove it. Signed-off-by: Zenghui Yu Reviewed-by: Mark Brown Link: https://lore.kernel.org/r/20221218092942.1940-1-yuzenghui@huawei.com Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/abi/syscall-abi.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/tools/testing/selftests/arm64/abi/syscall-abi.c b/tools/testing/selftests/arm64/abi/syscall-abi.c index 8afcbf6861fd..01aea12ea252 100644 --- a/tools/testing/selftests/arm64/abi/syscall-abi.c +++ b/tools/testing/selftests/arm64/abi/syscall-abi.c @@ -20,8 +20,6 @@ #include "syscall-abi.h" -#define NUM_VL ((SVE_VQ_MAX - SVE_VQ_MIN) + 1) - static int default_sme_vl; static int sve_vl_count; From daac835347a52d9d141be281e4657cc08a360e97 Mon Sep 17 00:00:00 2001 From: Zenghui Yu Date: Sun, 18 Dec 2022 17:29:42 +0800 Subject: [PATCH 075/130] kselftest/arm64: Correct buffer size for SME ZA storage It looks like a copy-paste error to describe the ZA buffer size using (the number of P registers * the maximum size of a Z register). This doesn't have practical impact though as we're always allocating enough space even for the architectural maximum ZA storage, with SVL equals to 2048 bits. Switch to use ZA_SIG_REGS_SIZE(SVE_VQ_MAX). setup_za() will need to initialize two 64MB arraies with this change and can be optimized later (if someone complain). Signed-off-by: Zenghui Yu Reviewed-by: Mark Brown Link: https://lore.kernel.org/r/20221218092942.1940-2-yuzenghui@huawei.com Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/abi/syscall-abi.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/arm64/abi/syscall-abi.c b/tools/testing/selftests/arm64/abi/syscall-abi.c index 01aea12ea252..834616fde23e 100644 --- a/tools/testing/selftests/arm64/abi/syscall-abi.c +++ b/tools/testing/selftests/arm64/abi/syscall-abi.c @@ -300,8 +300,8 @@ static int check_svcr(struct syscall_cfg *cfg, int sve_vl, int sme_vl, return errors; } -uint8_t za_in[SVE_NUM_PREGS * __SVE_ZREG_SIZE(SVE_VQ_MAX)]; -uint8_t za_out[SVE_NUM_PREGS * __SVE_ZREG_SIZE(SVE_VQ_MAX)]; +uint8_t za_in[ZA_SIG_REGS_SIZE(SVE_VQ_MAX)]; +uint8_t za_out[ZA_SIG_REGS_SIZE(SVE_VQ_MAX)]; static void setup_za(struct syscall_cfg *cfg, int sve_vl, int sme_vl, uint64_t svcr) From cbad0fb2d8d97fa6dd8089c0cc729ced0abacad6 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 23 Jan 2023 13:45:56 +0000 Subject: [PATCH 076/130] ftrace: Add DYNAMIC_FTRACE_WITH_CALL_OPS Architectures without dynamic ftrace trampolines incur an overhead when multiple ftrace_ops are enabled with distinct filters. in these cases, each call site calls a common trampoline which uses ftrace_ops_list_func() to iterate over all enabled ftrace functions, and so incurs an overhead relative to the size of this list (including RCU protection overhead). Architectures with dynamic ftrace trampolines avoid this overhead for call sites which have a single associated ftrace_ops. In these cases, the dynamic trampoline is customized to branch directly to the relevant ftrace function, avoiding the list overhead. On some architectures it's impractical and/or undesirable to implement dynamic ftrace trampolines. For example, arm64 has limited branch ranges and cannot always directly branch from a call site to an arbitrary address (e.g. from a kernel text address to an arbitrary module address). Calls from modules to core kernel text can be indirected via PLTs (allocated at module load time) to address this, but the same is not possible from calls from core kernel text. Using an indirect branch from a call site to an arbitrary trampoline is possible, but requires several more instructions in the function prologue (or immediately before it), and/or comes with far more complex requirements for patching. Instead, this patch adds a new option, where an architecture can associate each call site with a pointer to an ftrace_ops, placed at a fixed offset from the call site. A shared trampoline can recover this pointer and call ftrace_ops::func() without needing to go via ftrace_ops_list_func(), avoiding the associated overhead. This avoids issues with branch range limitations, and avoids the need to allocate and manipulate dynamic trampolines, making it far simpler to implement and maintain, while having similar performance characteristics. Note that this allows for dynamic ftrace_ops to be invoked directly from an architecture's ftrace_caller trampoline, whereas existing code forces the use of ftrace_ops_get_list_func(), which is in part necessary to permit the ftrace_ops to be freed once unregistered *and* to avoid branch/address-generation range limitation on some architectures (e.g. where ops->func is a module address, and may be outside of the direct branch range for callsites within the main kernel image). The CALL_OPS approach avoids this problems and is safe as: * The existing synchronization in ftrace_shutdown() using ftrace_shutdown() using synchronize_rcu_tasks_rude() (and synchronize_rcu_tasks()) ensures that no tasks hold a stale reference to an ftrace_ops (e.g. in the middle of the ftrace_caller trampoline, or while invoking ftrace_ops::func), when that ftrace_ops is unregistered. Arguably this could also be relied upon for the existing scheme, permitting dynamic ftrace_ops to be invoked directly when ops->func is in range, but this will require additional logic to handle branch range limitations, and is not handled by this patch. * Each callsite's ftrace_ops pointer literal can hold any valid kernel address, and is updated atomically. As an architecture's ftrace_caller trampoline will atomically load the ops pointer then dereference ops->func, there is no risk of invoking ops->func with a mismatches ops pointer, and updates to the ops pointer do not require special care. A subsequent patch will implement architectures support for arm64. There should be no functional change as a result of this patch alone. Signed-off-by: Mark Rutland Reviewed-by: Steven Rostedt (Google) Cc: Florent Revest Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Will Deacon Link: https://lore.kernel.org/r/20230123134603.1064407-2-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- include/linux/ftrace.h | 18 +++++-- kernel/trace/Kconfig | 7 +++ kernel/trace/ftrace.c | 109 +++++++++++++++++++++++++++++++++++++++-- 3 files changed, 125 insertions(+), 9 deletions(-) diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 99f1146614c0..366c730beaa3 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -39,6 +39,7 @@ static inline void ftrace_boot_snapshot(void) { } struct ftrace_ops; struct ftrace_regs; +struct dyn_ftrace; #ifdef CONFIG_FUNCTION_TRACER /* @@ -57,6 +58,9 @@ void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip); void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct ftrace_regs *fregs); #endif +extern const struct ftrace_ops ftrace_nop_ops; +extern const struct ftrace_ops ftrace_list_ops; +struct ftrace_ops *ftrace_find_unique_ops(struct dyn_ftrace *rec); #endif /* CONFIG_FUNCTION_TRACER */ /* Main tracing buffer and events set up */ @@ -391,8 +395,6 @@ struct ftrace_func_entry { unsigned long direct; /* for direct lookup only */ }; -struct dyn_ftrace; - #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS extern int ftrace_direct_func_count; int register_ftrace_direct(unsigned long ip, unsigned long addr); @@ -563,6 +565,8 @@ bool is_ftrace_trampoline(unsigned long addr); * IPMODIFY - the record allows for the IP address to be changed. * DISABLED - the record is not ready to be touched yet * DIRECT - there is a direct function to call + * CALL_OPS - the record can use callsite-specific ops + * CALL_OPS_EN - the function is set up to use callsite-specific ops * * When a new ftrace_ops is registered and wants a function to save * pt_regs, the rec->flags REGS is set. When the function has been @@ -580,9 +584,11 @@ enum { FTRACE_FL_DISABLED = (1UL << 25), FTRACE_FL_DIRECT = (1UL << 24), FTRACE_FL_DIRECT_EN = (1UL << 23), + FTRACE_FL_CALL_OPS = (1UL << 22), + FTRACE_FL_CALL_OPS_EN = (1UL << 21), }; -#define FTRACE_REF_MAX_SHIFT 23 +#define FTRACE_REF_MAX_SHIFT 21 #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) #define ftrace_rec_count(rec) ((rec)->flags & FTRACE_REF_MAX) @@ -820,7 +826,8 @@ static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) */ extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); -#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +#if defined(CONFIG_DYNAMIC_FTRACE_WITH_REGS) || \ + defined(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS) /** * ftrace_modify_call - convert from one addr to another (no nop) * @rec: the call site record (e.g. mcount/fentry) @@ -833,6 +840,9 @@ extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); * what we expect it to be, and then on success of the compare, * it should write to the location. * + * When using call ops, this is called when the associated ops change, even + * when (addr == old_addr). + * * The code segment at @rec->ip should be a caller to @old_addr * * Return must be: diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 197545241ab8..5df427a2321d 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -42,6 +42,9 @@ config HAVE_DYNAMIC_FTRACE_WITH_REGS config HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS bool +config HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS + bool + config HAVE_DYNAMIC_FTRACE_WITH_ARGS bool help @@ -257,6 +260,10 @@ config DYNAMIC_FTRACE_WITH_DIRECT_CALLS depends on DYNAMIC_FTRACE_WITH_REGS depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS +config DYNAMIC_FTRACE_WITH_CALL_OPS + def_bool y + depends on HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS + config DYNAMIC_FTRACE_WITH_ARGS def_bool y depends on DYNAMIC_FTRACE diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 442438b93fe9..e634b80f49d1 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -125,6 +125,33 @@ struct ftrace_ops global_ops; void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct ftrace_regs *fregs); +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS +/* + * Stub used to invoke the list ops without requiring a separate trampoline. + */ +const struct ftrace_ops ftrace_list_ops = { + .func = ftrace_ops_list_func, + .flags = FTRACE_OPS_FL_STUB, +}; + +static void ftrace_ops_nop_func(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, + struct ftrace_regs *fregs) +{ + /* do nothing */ +} + +/* + * Stub used when a call site is disabled. May be called transiently by threads + * which have made it into ftrace_caller but haven't yet recovered the ops at + * the point the call site is disabled. + */ +const struct ftrace_ops ftrace_nop_ops = { + .func = ftrace_ops_nop_func, + .flags = FTRACE_OPS_FL_STUB, +}; +#endif + static inline void ftrace_ops_init(struct ftrace_ops *ops) { #ifdef CONFIG_DYNAMIC_FTRACE @@ -1814,6 +1841,18 @@ static bool __ftrace_hash_rec_update(struct ftrace_ops *ops, * if rec count is zero. */ } + + /* + * If the rec has a single associated ops, and ops->func can be + * called directly, allow the call site to call via the ops. + */ + if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS) && + ftrace_rec_count(rec) == 1 && + ftrace_ops_get_func(ops) == ops->func) + rec->flags |= FTRACE_FL_CALL_OPS; + else + rec->flags &= ~FTRACE_FL_CALL_OPS; + count++; /* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */ @@ -2108,8 +2147,9 @@ void ftrace_bug(int failed, struct dyn_ftrace *rec) struct ftrace_ops *ops = NULL; pr_info("ftrace record flags: %lx\n", rec->flags); - pr_cont(" (%ld)%s", ftrace_rec_count(rec), - rec->flags & FTRACE_FL_REGS ? " R" : " "); + pr_cont(" (%ld)%s%s", ftrace_rec_count(rec), + rec->flags & FTRACE_FL_REGS ? " R" : " ", + rec->flags & FTRACE_FL_CALL_OPS ? " O" : " "); if (rec->flags & FTRACE_FL_TRAMP_EN) { ops = ftrace_find_tramp_ops_any(rec); if (ops) { @@ -2177,6 +2217,7 @@ static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update) * want the direct enabled (it will be done via the * direct helper). But if DIRECT_EN is set, and * the count is not one, we need to clear it. + * */ if (ftrace_rec_count(rec) == 1) { if (!(rec->flags & FTRACE_FL_DIRECT) != @@ -2185,6 +2226,19 @@ static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update) } else if (rec->flags & FTRACE_FL_DIRECT_EN) { flag |= FTRACE_FL_DIRECT; } + + /* + * Ops calls are special, as count matters. + * As with direct calls, they must only be enabled when count + * is one, otherwise they'll be handled via the list ops. + */ + if (ftrace_rec_count(rec) == 1) { + if (!(rec->flags & FTRACE_FL_CALL_OPS) != + !(rec->flags & FTRACE_FL_CALL_OPS_EN)) + flag |= FTRACE_FL_CALL_OPS; + } else if (rec->flags & FTRACE_FL_CALL_OPS_EN) { + flag |= FTRACE_FL_CALL_OPS; + } } /* If the state of this record hasn't changed, then do nothing */ @@ -2229,6 +2283,21 @@ static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update) rec->flags &= ~FTRACE_FL_DIRECT_EN; } } + + if (flag & FTRACE_FL_CALL_OPS) { + if (ftrace_rec_count(rec) == 1) { + if (rec->flags & FTRACE_FL_CALL_OPS) + rec->flags |= FTRACE_FL_CALL_OPS_EN; + else + rec->flags &= ~FTRACE_FL_CALL_OPS_EN; + } else { + /* + * Can only call directly if there's + * only one set of associated ops. + */ + rec->flags &= ~FTRACE_FL_CALL_OPS_EN; + } + } } /* @@ -2258,7 +2327,8 @@ static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update) * and REGS states. The _EN flags must be disabled though. */ rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN | - FTRACE_FL_REGS_EN | FTRACE_FL_DIRECT_EN); + FTRACE_FL_REGS_EN | FTRACE_FL_DIRECT_EN | + FTRACE_FL_CALL_OPS_EN); } ftrace_bug_type = FTRACE_BUG_NOP; @@ -2431,6 +2501,25 @@ ftrace_find_tramp_ops_new(struct dyn_ftrace *rec) return NULL; } +struct ftrace_ops * +ftrace_find_unique_ops(struct dyn_ftrace *rec) +{ + struct ftrace_ops *op, *found = NULL; + unsigned long ip = rec->ip; + + do_for_each_ftrace_op(op, ftrace_ops_list) { + + if (hash_contains_ip(ip, op->func_hash)) { + if (found) + return NULL; + found = op; + } + + } while_for_each_ftrace_op(op); + + return found; +} + #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS /* Protected by rcu_tasks for reading, and direct_mutex for writing */ static struct ftrace_hash *direct_functions = EMPTY_HASH; @@ -3780,11 +3869,12 @@ static int t_show(struct seq_file *m, void *v) if (iter->flags & FTRACE_ITER_ENABLED) { struct ftrace_ops *ops; - seq_printf(m, " (%ld)%s%s%s", + seq_printf(m, " (%ld)%s%s%s%s", ftrace_rec_count(rec), rec->flags & FTRACE_FL_REGS ? " R" : " ", rec->flags & FTRACE_FL_IPMODIFY ? " I" : " ", - rec->flags & FTRACE_FL_DIRECT ? " D" : " "); + rec->flags & FTRACE_FL_DIRECT ? " D" : " ", + rec->flags & FTRACE_FL_CALL_OPS ? " O" : " "); if (rec->flags & FTRACE_FL_TRAMP_EN) { ops = ftrace_find_tramp_ops_any(rec); if (ops) { @@ -3800,6 +3890,15 @@ static int t_show(struct seq_file *m, void *v) } else { add_trampoline_func(m, NULL, rec); } + if (rec->flags & FTRACE_FL_CALL_OPS_EN) { + ops = ftrace_find_unique_ops(rec); + if (ops) { + seq_printf(m, "\tops: %pS (%pS)", + ops, ops->func); + } else { + seq_puts(m, "\tops: ERROR!"); + } + } if (rec->flags & FTRACE_FL_DIRECT) { unsigned long direct; From c27cd083cfb9d392f304657ed00fcde1136704e7 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 23 Jan 2023 13:45:57 +0000 Subject: [PATCH 077/130] Compiler attributes: GCC cold function alignment workarounds Contemporary versions of GCC (e.g. GCC 12.2.0) drop the alignment specified by '-falign-functions=N' for functions marked with the __cold__ attribute, and potentially for callees of __cold__ functions as these may be implicitly marked as __cold__ by the compiler. LLVM appears to respect '-falign-functions=N' in such cases. This has been reported to GCC in bug 88345: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=88345 ... which also covers alignment being dropped when '-Os' is used, which will be addressed in a separate patch. Currently, use of '-falign-functions=N' is limited to CONFIG_FUNCTION_ALIGNMENT, which is largely used for performance and/or analysis reasons (e.g. with CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B), but isn't necessary for correct functionality. However, this dropped alignment isn't great for the performance and/or analysis cases. Subsequent patches will use CONFIG_FUNCTION_ALIGNMENT as part of arm64's ftrace implementation, which will require all instrumented functions to be aligned to at least 8-bytes. This patch works around the dropped alignment by avoiding the use of the __cold__ attribute when CONFIG_FUNCTION_ALIGNMENT is non-zero, and by specifically aligning abort(), which GCC implicitly marks as __cold__. As the __cold macro is now dependent upon config options (which is against the policy described at the top of compiler_attributes.h), it is moved into compiler_types.h. I've tested this by building and booting a kernel configured with defconfig + CONFIG_EXPERT=y + CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B=y, and looking for misaligned text symbols in /proc/kallsyms: * arm64: Before: # uname -rm 6.2.0-rc3 aarch64 # grep ' [Tt] ' /proc/kallsyms | grep -iv '[048c]0 [Tt] ' | wc -l 5009 After: # uname -rm 6.2.0-rc3-00001-g2a2bedf8bfa9 aarch64 # grep ' [Tt] ' /proc/kallsyms | grep -iv '[048c]0 [Tt] ' | wc -l 919 * x86_64: Before: # uname -rm 6.2.0-rc3 x86_64 # grep ' [Tt] ' /proc/kallsyms | grep -iv '[048c]0 [Tt] ' | wc -l 11537 After: # uname -rm 6.2.0-rc3-00001-g2a2bedf8bfa9 x86_64 # grep ' [Tt] ' /proc/kallsyms | grep -iv '[048c]0 [Tt] ' | wc -l 2805 There's clearly a substantial reduction in the number of misaligned symbols. From manual inspection, the remaining unaligned text labels are a combination of ACPICA functions (due to the use of '-Os'), static call trampolines, and non-function labels in assembly, which will be dealt with in subsequent patches. Signed-off-by: Mark Rutland Cc: Florent Revest Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Steven Rostedt Cc: Will Deacon Cc: Miguel Ojeda Cc: Nick Desaulniers Link: https://lore.kernel.org/r/20230123134603.1064407-3-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- include/linux/compiler_attributes.h | 6 ------ include/linux/compiler_types.h | 27 +++++++++++++++++++++++++++ kernel/exit.c | 9 ++++++++- 3 files changed, 35 insertions(+), 7 deletions(-) diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h index 898b3458b24a..b83126452c65 100644 --- a/include/linux/compiler_attributes.h +++ b/include/linux/compiler_attributes.h @@ -75,12 +75,6 @@ # define __assume_aligned(a, ...) #endif -/* - * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-cold-function-attribute - * gcc: https://gcc.gnu.org/onlinedocs/gcc/Label-Attributes.html#index-cold-label-attribute - */ -#define __cold __attribute__((__cold__)) - /* * Note the long name. * diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 7c1afe0f4129..aab34e30128e 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -79,6 +79,33 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { } /* Attributes */ #include +#if CONFIG_FUNCTION_ALIGNMENT > 0 +#define __function_aligned __aligned(CONFIG_FUNCTION_ALIGNMENT) +#else +#define __function_aligned +#endif + +/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-cold-function-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Label-Attributes.html#index-cold-label-attribute + * + * When -falign-functions=N is in use, we must avoid the cold attribute as + * contemporary versions of GCC drop the alignment for cold functions. Worse, + * GCC can implicitly mark callees of cold functions as cold themselves, so + * it's not sufficient to add __function_aligned here as that will not ensure + * that callees are correctly aligned. + * + * See: + * + * https://lore.kernel.org/lkml/Y77%2FqVgvaJidFpYt@FVFF77S0Q05N + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=88345#c9 + */ +#if !defined(CONFIG_CC_IS_GCC) || (CONFIG_FUNCTION_ALIGNMENT == 0) +#define __cold __attribute__((__cold__)) +#else +#define __cold +#endif + /* Builtins */ /* diff --git a/kernel/exit.c b/kernel/exit.c index 15dc2ec80c46..c8e0375705f4 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -1898,7 +1898,14 @@ bool thread_group_exited(struct pid *pid) } EXPORT_SYMBOL(thread_group_exited); -__weak void abort(void) +/* + * This needs to be __function_aligned as GCC implicitly makes any + * implementation of abort() cold and drops alignment specified by + * -falign-functions=N. + * + * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=88345#c11 + */ +__weak __function_aligned void abort(void) { BUG(); From 8f9e0a52810dd83406c768972d022c37e7a18f1f Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 23 Jan 2023 13:45:58 +0000 Subject: [PATCH 078/130] ACPI: Don't build ACPICA with '-Os' The ACPICA code has been built with '-Os' since the beginning of git history, though there's no explanatory comment as to why. This is unfortunate as GCC drops the alignment specificed by '-falign-functions=N' when '-Os' is used, as reported in GCC bug 88345: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=88345 This prevents CONFIG_FUNCTION_ALIGNMENT and CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B from having their expected effect on the ACPICA code. This is doubly unfortunate as in subsequent patches arm64 will depend upon CONFIG_FUNCTION_ALIGNMENT for its ftrace implementation. Drop the '-Os' flag when building the ACPICA code. With this removed, the code builds cleanly and works correctly in testing so far. I've tested this by selecting CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B=y, building and booting a kernel using ACPI, and looking for misaligned text symbols: * arm64: Before, v6.2-rc3: # uname -rm 6.2.0-rc3 aarch64 # grep ' [Tt] ' /proc/kallsyms | grep -iv '[048c]0 [Tt] ' | wc -l 5009 Before, v6.2-rc3 + fixed __cold: # uname -rm 6.2.0-rc3-00001-g2a2bedf8bfa9 aarch64 # grep ' [Tt] ' /proc/kallsyms | grep -iv '[048c]0 [Tt] ' | wc -l 919 After: # uname -rm 6.2.0-rc3-00002-g267bddc38572 aarch64 # grep ' [Tt] ' /proc/kallsyms | grep -iv '[048c]0 [Tt] ' | wc -l 323 # grep ' [Tt] ' /proc/kallsyms | grep -iv '[048c]0 [Tt] ' | grep acpi | wc -l 0 * x86_64: Before, v6.2-rc3: # uname -rm 6.2.0-rc3 x86_64 # grep ' [Tt] ' /proc/kallsyms | grep -iv '[048c]0 [Tt] ' | wc -l 11537 Before, v6.2-rc3 + fixed __cold: # uname -rm 6.2.0-rc3-00001-g2a2bedf8bfa9 x86_64 # grep ' [Tt] ' /proc/kallsyms | grep -iv '[048c]0 [Tt] ' | wc -l 2805 After: # uname -rm 6.2.0-rc3-00002-g267bddc38572 x86_64 # grep ' [Tt] ' /proc/kallsyms | grep -iv '[048c]0 [Tt] ' | wc -l 1357 # grep ' [Tt] ' /proc/kallsyms | grep -iv '[048c]0 [Tt] ' | grep acpi | wc -l 0 With the patch applied, the remaining unaligned text labels are a combination of static call trampolines and labels in assembly, which can be dealt with in subsequent patches. Signed-off-by: Mark Rutland Acked-by: Rafael J. Wysocki Cc: Florent Revest Cc: Len Brown Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Robert Moore Cc: Steven Rostedt Cc: Will Deacon Cc: linux-acpi@vger.kernel.org Link: https://lore.kernel.org/r/20230123134603.1064407-4-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- drivers/acpi/acpica/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile index 9e0d95d76fff..30f3fc13c29d 100644 --- a/drivers/acpi/acpica/Makefile +++ b/drivers/acpi/acpica/Makefile @@ -3,7 +3,7 @@ # Makefile for ACPICA Core interpreter # -ccflags-y := -Os -D_LINUX -DBUILDING_ACPICA +ccflags-y := -D_LINUX -DBUILDING_ACPICA ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT # use acpi.o to put all files here into acpi.o modparam namespace From 47a15aa544279d34e14e17ca3b5855e39b946cec Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 23 Jan 2023 13:45:59 +0000 Subject: [PATCH 079/130] arm64: Extend support for CONFIG_FUNCTION_ALIGNMENT On arm64 we don't align assembly function in the same way as C functions. This somewhat limits the utility of CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B for testing, and adds noise when testing that we're correctly aligning functions as will be necessary for ftrace in subsequent patches. Follow the example of x86, and align assembly functions in the same way as C functions. Selecting FUNCTION_ALIGNMENT_4B ensures CONFIG_FUCTION_ALIGNMENT will be a minimum of 4 bytes, matching the minimum alignment that __ALIGN and __ALIGN_STR provide prior to this patch. I've tested this by selecting CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B=y, building and booting a kernel, and looking for misaligned text symbols: Before, v6.2-rc3: # uname -rm 6.2.0-rc3 aarch64 # grep ' [Tt] ' /proc/kallsyms | grep -iv '[048c]0 [Tt] ' | wc -l 5009 Before, v6.2-rc3 + fixed __cold: # uname -rm 6.2.0-rc3-00001-g2a2bedf8bfa9 aarch64 # grep ' [Tt] ' /proc/kallsyms | grep -iv '[048c]0 [Tt] ' | wc -l 919 Before, v6.2-rc3 + fixed __cold + fixed ACPICA: # uname -rm 6.2.0-rc3-00002-g267bddc38572 aarch64 # grep ' [Tt] ' /proc/kallsyms | grep -iv '[048c]0 [Tt] ' | wc -l 323 # grep ' [Tt] ' /proc/kallsyms | grep -iv '[048c]0 [Tt] ' | grep acpi | wc -l 0 After: # uname -rm 6.2.0-rc3-00003-g71db61ee3ea1 aarch64 # grep ' [Tt] ' /proc/kallsyms | grep -iv '[048c]0 [Tt] ' | wc -l 112 Considering the remaining 112 unaligned text symbols: * 20 are non-function KVM NVHE assembly symbols, which are never instrumented by ftrace: # grep ' [Tt] ' /proc/kallsyms | grep -iv '[048c]0 [Tt] ' | grep __kvm_nvhe | wc -l 20 # grep ' [Tt] ' /proc/kallsyms | grep -iv '[048c]0 [Tt] ' | grep __kvm_nvhe ffffbe6483f73784 t __kvm_nvhe___invalid ffffbe6483f73788 t __kvm_nvhe___do_hyp_init ffffbe6483f73ab0 t __kvm_nvhe_reset ffffbe6483f73b8c T __kvm_nvhe___hyp_idmap_text_end ffffbe6483f73b8c T __kvm_nvhe___hyp_text_start ffffbe6483f77864 t __kvm_nvhe___host_enter_restore_full ffffbe6483f77874 t __kvm_nvhe___host_enter_for_panic ffffbe6483f778a4 t __kvm_nvhe___host_enter_without_restoring ffffbe6483f81178 T __kvm_nvhe___guest_exit_panic ffffbe6483f811c8 T __kvm_nvhe___guest_exit ffffbe6483f81354 t __kvm_nvhe_abort_guest_exit_start ffffbe6483f81358 t __kvm_nvhe_abort_guest_exit_end ffffbe6483f81830 t __kvm_nvhe_wa_epilogue ffffbe6483f81844 t __kvm_nvhe_el1_trap ffffbe6483f81864 t __kvm_nvhe_el1_fiq ffffbe6483f81864 t __kvm_nvhe_el1_irq ffffbe6483f81884 t __kvm_nvhe_el1_error ffffbe6483f818a4 t __kvm_nvhe_el2_sync ffffbe6483f81920 t __kvm_nvhe_el2_error ffffbe6483f865c8 T __kvm_nvhe___start___kvm_ex_table * 53 are position-independent functions only used during early boot, which are built with '-Os', but are never instrumented by ftrace: # grep ' [Tt] ' /proc/kallsyms | grep -iv '[048c]0 [Tt] ' | grep __pi | wc -l 53 We *could* drop '-Os' when building these for consistency, but that is not necessary to ensure that ftrace works correctly. * The remaining 39 are non-function symbols, and 3 runtime BPF functions, which are never instrumented by ftrace: # grep ' [Tt] ' /proc/kallsyms | grep -iv '[048c]0 [Tt] ' | grep -v __kvm_nvhe | grep -v __pi | wc -l 39 # grep ' [Tt] ' /proc/kallsyms | grep -iv '[048c]0 [Tt] ' | grep -v __kvm_nvhe | grep -v __pi ffffbe6482e1009c T __irqentry_text_end ffffbe6482e10358 T __softirqentry_text_end ffffbe6482e1435c T __entry_text_end ffffbe6482e825f8 T __guest_exit_panic ffffbe6482e82648 T __guest_exit ffffbe6482e827d4 t abort_guest_exit_start ffffbe6482e827d8 t abort_guest_exit_end ffffbe6482e83030 t wa_epilogue ffffbe6482e83044 t el1_trap ffffbe6482e83064 t el1_fiq ffffbe6482e83064 t el1_irq ffffbe6482e83084 t el1_error ffffbe6482e830a4 t el2_sync ffffbe6482e83120 t el2_error ffffbe6482e93550 T sha256_block_neon ffffbe64830f3ae0 t e843419@01cc_00002a0c_3104 ffffbe648378bd90 t e843419@09b3_0000d7cb_bc4 ffffbe6483bdab20 t e843419@0c66_000116e2_34c8 ffffbe6483f62c94 T __noinstr_text_end ffffbe6483f70a18 T __sched_text_end ffffbe6483f70b2c T __cpuidle_text_end ffffbe6483f722d4 T __lock_text_end ffffbe6483f73b8c T __hyp_idmap_text_end ffffbe6483f73b8c T __hyp_text_start ffffbe6483f865c8 T __start___kvm_ex_table ffffbe6483f870d0 t init_el1 ffffbe6483f870f8 t init_el2 ffffbe6483f87324 t pen ffffbe6483f87b48 T __idmap_text_end ffffbe64848eb010 T __hibernate_exit_text_start ffffbe64848eb124 T __hibernate_exit_text_end ffffbe64848eb124 T __relocate_new_kernel_start ffffbe64848eb260 T __relocate_new_kernel_end ffffbe648498a8e8 T _einittext ffffbe648498a8e8 T __exittext_begin ffffbe6484999d84 T __exittext_end ffff8000080756b4 t bpf_prog_6deef7357e7b4530 [bpf] ffff80000808dd78 t bpf_prog_6deef7357e7b4530 [bpf] ffff80000809d684 t bpf_prog_6deef7357e7b4530 [bpf] Signed-off-by: Mark Rutland Cc: Florent Revest Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Steven Rostedt Cc: Will Deacon Link: https://lore.kernel.org/r/20230123134603.1064407-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 1 + arch/arm64/include/asm/linkage.h | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 03934808b2ed..6914f6bf41e2 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -123,6 +123,7 @@ config ARM64 select DMA_DIRECT_REMAP select EDAC_SUPPORT select FRAME_POINTER + select FUNCTION_ALIGNMENT_4B select GENERIC_ALLOCATOR select GENERIC_ARCH_TOPOLOGY select GENERIC_CLOCKEVENTS_BROADCAST diff --git a/arch/arm64/include/asm/linkage.h b/arch/arm64/include/asm/linkage.h index 1436fa1cde24..d3acd9c87509 100644 --- a/arch/arm64/include/asm/linkage.h +++ b/arch/arm64/include/asm/linkage.h @@ -5,8 +5,8 @@ #include #endif -#define __ALIGN .align 2 -#define __ALIGN_STR ".align 2" +#define __ALIGN .balign CONFIG_FUNCTION_ALIGNMENT +#define __ALIGN_STR ".balign " #CONFIG_FUNCTION_ALIGNMENT /* * When using in-kernel BTI we need to ensure that PCS-conformant From 2bbbb4015aa1af62f8002de86de019786ecf63a7 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 23 Jan 2023 13:46:00 +0000 Subject: [PATCH 080/130] arm64: insn: Add helpers for BTI In subsequent patches we'd like to check whether an instruction is a BTI. In preparation for this, add basic instruction helpers for BTI instructions. Per ARM DDI 0487H.a section C6.2.41, BTI is encoded in binary as follows, MSB to LSB: 1101 0101 000 0011 0010 0100 xx01 1111 Where the `xx` bits encode J/C/JC: 00 : (omitted) 01 : C 10 : J 11 : JC Signed-off-by: Mark Rutland Cc: Florent Revest Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Steven Rostedt Cc: Will Deacon Link: https://lore.kernel.org/r/20230123134603.1064407-6-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/insn.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index aaf1f52fbf3e..139a88e4e852 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -420,6 +420,7 @@ __AARCH64_INSN_FUNCS(sb, 0xFFFFFFFF, 0xD50330FF) __AARCH64_INSN_FUNCS(clrex, 0xFFFFF0FF, 0xD503305F) __AARCH64_INSN_FUNCS(ssbb, 0xFFFFFFFF, 0xD503309F) __AARCH64_INSN_FUNCS(pssbb, 0xFFFFFFFF, 0xD503349F) +__AARCH64_INSN_FUNCS(bti, 0xFFFFFF3F, 0xD503241f) #undef __AARCH64_INSN_FUNCS From e4ecbe83fd1a5428d5458de04a3404f1b5444429 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 23 Jan 2023 13:46:01 +0000 Subject: [PATCH 081/130] arm64: patching: Add aarch64_insn_write_literal_u64() In subsequent patches we'll need to atomically write to a naturally-aligned 64-bit literal embedded within the kernel text. Add a helper for this. For consistency with other text patching code we use copy_to_kernel_nofault(), which is atomic for naturally-aligned accesses up to 64-bits. Signed-off-by: Mark Rutland Cc: Florent Revest Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Steven Rostedt Cc: Will Deacon Link: https://lore.kernel.org/r/20230123134603.1064407-7-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/patching.h | 2 ++ arch/arm64/kernel/patching.c | 17 +++++++++++++++++ 2 files changed, 19 insertions(+) diff --git a/arch/arm64/include/asm/patching.h b/arch/arm64/include/asm/patching.h index 6bf5adc56295..68908b82b168 100644 --- a/arch/arm64/include/asm/patching.h +++ b/arch/arm64/include/asm/patching.h @@ -7,6 +7,8 @@ int aarch64_insn_read(void *addr, u32 *insnp); int aarch64_insn_write(void *addr, u32 insn); +int aarch64_insn_write_literal_u64(void *addr, u64 val); + int aarch64_insn_patch_text_nosync(void *addr, u32 insn); int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt); diff --git a/arch/arm64/kernel/patching.c b/arch/arm64/kernel/patching.c index 33e0fabc0b79..b4835f6d594b 100644 --- a/arch/arm64/kernel/patching.c +++ b/arch/arm64/kernel/patching.c @@ -88,6 +88,23 @@ int __kprobes aarch64_insn_write(void *addr, u32 insn) return __aarch64_insn_write(addr, cpu_to_le32(insn)); } +noinstr int aarch64_insn_write_literal_u64(void *addr, u64 val) +{ + u64 *waddr; + unsigned long flags; + int ret; + + raw_spin_lock_irqsave(&patch_lock, flags); + waddr = patch_map(addr, FIX_TEXT_POKE0); + + ret = copy_to_kernel_nofault(waddr, &val, sizeof(val)); + + patch_unmap(FIX_TEXT_POKE0); + raw_spin_unlock_irqrestore(&patch_lock, flags); + + return ret; +} + int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn) { u32 *tp = addr; From 90955d778ad7873964a271852b1f24d31e00248b Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 23 Jan 2023 13:46:02 +0000 Subject: [PATCH 082/130] arm64: ftrace: Update stale comment In commit: 26299b3f6ba26bfc ("ftrace: arm64: move from REGS to ARGS") ... we folded ftrace_regs_entry into ftrace_caller, and ftrace_regs_entry no longer exists. Update the comment accordingly. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Florent Revest Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Steven Rostedt Cc: Will Deacon Link: https://lore.kernel.org/r/20230123134603.1064407-8-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/ftrace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index b30b955a8921..38ebdf063255 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -209,7 +209,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) * | NOP | MOV X9, LR | MOV X9, LR | * | NOP | NOP | BL | * - * The LR value will be recovered by ftrace_regs_entry, and restored into LR + * The LR value will be recovered by ftrace_caller, and restored into LR * before returning to the regular function prologue. When a function is not * being traced, the MOV is not harmful given x9 is not live per the AAPCS. * From baaf553d3bc330697c68a00f96cf11f4edfeac7e Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 23 Jan 2023 13:46:03 +0000 Subject: [PATCH 083/130] arm64: Implement HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS This patch enables support for DYNAMIC_FTRACE_WITH_CALL_OPS on arm64. This allows each ftrace callsite to provide an ftrace_ops to the common ftrace trampoline, allowing each callsite to invoke distinct tracer functions without the need to fall back to list processing or to allocate custom trampolines for each callsite. This significantly speeds up cases where multiple distinct trace functions are used and callsites are mostly traced by a single tracer. The main idea is to place a pointer to the ftrace_ops as a literal at a fixed offset from the function entry point, which can be recovered by the common ftrace trampoline. Using a 64-bit literal avoids branch range limitations, and permits the ops to be swapped atomically without special considerations that apply to code-patching. In future this will also allow for the implementation of DYNAMIC_FTRACE_WITH_DIRECT_CALLS without branch range limitations by using additional fields in struct ftrace_ops. As noted in the core patch adding support for DYNAMIC_FTRACE_WITH_CALL_OPS, this approach allows for directly invoking ftrace_ops::func even for ftrace_ops which are dynamically-allocated (or part of a module), without going via ftrace_ops_list_func. Currently, this approach is not compatible with CLANG_CFI, as the presence/absence of pre-function NOPs changes the offset of the pre-function type hash, and there's no existing mechanism to ensure a consistent offset for instrumented and uninstrumented functions. When CLANG_CFI is enabled, the existing scheme with a global ops->func pointer is used, and there should be no functional change. I am currently working with others to allow the two to work together in future (though this will liekly require updated compiler support). I've benchamrked this with the ftrace_ops sample module [1], which is not currently upstream, but available at: https://lore.kernel.org/lkml/20230103124912.2948963-1-mark.rutland@arm.com git://git.kernel.org/pub/scm/linux/kernel/git/mark/linux.git ftrace-ops-sample-20230109 Using that module I measured the total time taken for 100,000 calls to a trivial instrumented function, with a number of tracers enabled with relevant filters (which would apply to the instrumented function) and a number of tracers enabled with irrelevant filters (which would not apply to the instrumented function). I tested on an M1 MacBook Pro, running under a HVF-accelerated QEMU VM (i.e. on real hardware). Before this patch: Number of tracers || Total time | Per-call average time (ns) Relevant | Irrelevant || (ns) | Total | Overhead =========+============++=============+==============+============ 0 | 0 || 94,583 | 0.95 | - 0 | 1 || 93,709 | 0.94 | - 0 | 2 || 93,666 | 0.94 | - 0 | 10 || 93,709 | 0.94 | - 0 | 100 || 93,792 | 0.94 | - ---------+------------++-------------+--------------+------------ 1 | 1 || 6,467,833 | 64.68 | 63.73 1 | 2 || 7,509,708 | 75.10 | 74.15 1 | 10 || 23,786,792 | 237.87 | 236.92 1 | 100 || 106,432,500 | 1,064.43 | 1063.38 ---------+------------++-------------+--------------+------------ 1 | 0 || 1,431,875 | 14.32 | 13.37 2 | 0 || 6,456,334 | 64.56 | 63.62 10 | 0 || 22,717,000 | 227.17 | 226.22 100 | 0 || 103,293,667 | 1032.94 | 1031.99 ---------+------------++-------------+--------------+-------------- Note: per-call overhead is estimated relative to the baseline case with 0 relevant tracers and 0 irrelevant tracers. After this patch Number of tracers || Total time | Per-call average time (ns) Relevant | Irrelevant || (ns) | Total | Overhead =========+============++=============+==============+============ 0 | 0 || 94,541 | 0.95 | - 0 | 1 || 93,666 | 0.94 | - 0 | 2 || 93,709 | 0.94 | - 0 | 10 || 93,667 | 0.94 | - 0 | 100 || 93,792 | 0.94 | - ---------+------------++-------------+--------------+------------ 1 | 1 || 281,000 | 2.81 | 1.86 1 | 2 || 281,042 | 2.81 | 1.87 1 | 10 || 280,958 | 2.81 | 1.86 1 | 100 || 281,250 | 2.81 | 1.87 ---------+------------++-------------+--------------+------------ 1 | 0 || 280,959 | 2.81 | 1.86 2 | 0 || 6,502,708 | 65.03 | 64.08 10 | 0 || 18,681,209 | 186.81 | 185.87 100 | 0 || 103,550,458 | 1,035.50 | 1034.56 ---------+------------++-------------+--------------+------------ Note: per-call overhead is estimated relative to the baseline case with 0 relevant tracers and 0 irrelevant tracers. As can be seen from the above: a) Whenever there is a single relevant tracer function associated with a tracee, the overhead of invoking the tracer is constant, and does not scale with the number of tracers which are *not* associated with that tracee. b) The overhead for a single relevant tracer has dropped to ~1/7 of the overhead prior to this series (from 13.37ns to 1.86ns). This is largely due to permitting calls to dynamically-allocated ftrace_ops without going through ftrace_ops_list_func. I've run the ftrace selftests from v6.2-rc3, which reports: | # of passed: 110 | # of failed: 0 | # of unresolved: 3 | # of untested: 0 | # of unsupported: 0 | # of xfailed: 1 | # of undefined(test bug): 0 ... where the unresolved entries were the tests for DIRECT functions (which are not supported), and the checkbashisms selftest (which is irrelevant here): | [8] Test ftrace direct functions against tracers [UNRESOLVED] | [9] Test ftrace direct functions against kprobes [UNRESOLVED] | [62] Meta-selftest: Checkbashisms [UNRESOLVED] ... with all other tests passing (or failing as expected). Signed-off-by: Mark Rutland Cc: Florent Revest Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Steven Rostedt Cc: Will Deacon Link: https://lore.kernel.org/r/20230123134603.1064407-9-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 3 + arch/arm64/Makefile | 5 +- arch/arm64/include/asm/ftrace.h | 15 +-- arch/arm64/kernel/asm-offsets.c | 4 + arch/arm64/kernel/entry-ftrace.S | 32 ++++++- arch/arm64/kernel/ftrace.c | 156 +++++++++++++++++++++++++++++++ 6 files changed, 195 insertions(+), 20 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 6914f6bf41e2..6f6f37161cf6 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -124,6 +124,7 @@ config ARM64 select EDAC_SUPPORT select FRAME_POINTER select FUNCTION_ALIGNMENT_4B + select FUNCTION_ALIGNMENT_8B if DYNAMIC_FTRACE_WITH_CALL_OPS select GENERIC_ALLOCATOR select GENERIC_ARCH_TOPOLOGY select GENERIC_CLOCKEVENTS_BROADCAST @@ -187,6 +188,8 @@ config ARM64 select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE_WITH_ARGS \ if $(cc-option,-fpatchable-function-entry=2) + select HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS \ + if (DYNAMIC_FTRACE_WITH_ARGS && !CFI_CLANG) select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \ if DYNAMIC_FTRACE_WITH_ARGS select HAVE_EFFICIENT_UNALIGNED_ACCESS diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index d62bd221828f..4c3be442fbb3 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -139,7 +139,10 @@ endif CHECKFLAGS += -D__aarch64__ -ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_ARGS),y) +ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS),y) + KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY + CC_FLAGS_FTRACE := -fpatchable-function-entry=4,2 +else ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_ARGS),y) KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY CC_FLAGS_FTRACE := -fpatchable-function-entry=2 endif diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h index 5664729800ae..1c2672bbbf37 100644 --- a/arch/arm64/include/asm/ftrace.h +++ b/arch/arm64/include/asm/ftrace.h @@ -62,20 +62,7 @@ extern unsigned long ftrace_graph_call; extern void return_to_handler(void); -static inline unsigned long ftrace_call_adjust(unsigned long addr) -{ - /* - * Adjust addr to point at the BL in the callsite. - * See ftrace_init_nop() for the callsite sequence. - */ - if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS)) - return addr + AARCH64_INSN_SIZE; - /* - * addr is the address of the mcount call instruction. - * recordmcount does the necessary offset calculation. - */ - return addr; -} +unsigned long ftrace_call_adjust(unsigned long addr); #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS struct dyn_ftrace; diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 2234624536d9..ae345b06e9f7 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -9,6 +9,7 @@ #include #include +#include #include #include #include @@ -193,6 +194,9 @@ int main(void) DEFINE(KIMAGE_HEAD, offsetof(struct kimage, head)); DEFINE(KIMAGE_START, offsetof(struct kimage, start)); BLANK(); +#endif +#ifdef CONFIG_FUNCTION_TRACER + DEFINE(FTRACE_OPS_FUNC, offsetof(struct ftrace_ops, func)); #endif return 0; } diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index 3b625f76ffba..350ed81324ac 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S @@ -65,13 +65,35 @@ SYM_CODE_START(ftrace_caller) stp x29, x30, [sp, #FREGS_SIZE] add x29, sp, #FREGS_SIZE - sub x0, x30, #AARCH64_INSN_SIZE // ip (callsite's BL insn) - mov x1, x9 // parent_ip (callsite's LR) - ldr_l x2, function_trace_op // op - mov x3, sp // regs + /* Prepare arguments for the the tracer func */ + sub x0, x30, #AARCH64_INSN_SIZE // ip (callsite's BL insn) + mov x1, x9 // parent_ip (callsite's LR) + mov x3, sp // regs + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS + /* + * The literal pointer to the ops is at an 8-byte aligned boundary + * which is either 12 or 16 bytes before the BL instruction in the call + * site. See ftrace_call_adjust() for details. + * + * Therefore here the LR points at `literal + 16` or `literal + 20`, + * and we can find the address of the literal in either case by + * aligning to an 8-byte boundary and subtracting 16. We do the + * alignment first as this allows us to fold the subtraction into the + * LDR. + */ + bic x2, x30, 0x7 + ldr x2, [x2, #-16] // op + + ldr x4, [x2, #FTRACE_OPS_FUNC] // op->func + blr x4 // op->func(ip, parent_ip, op, regs) + +#else + ldr_l x2, function_trace_op // op SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) - bl ftrace_stub + bl ftrace_stub // func(ip, parent_ip, op, regs) +#endif /* * At the callsite x0-x8 and x19-x30 were live. Any C code will have preserved diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index 38ebdf063255..5545fe1a9012 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -60,6 +60,89 @@ int ftrace_regs_query_register_offset(const char *name) } #endif +unsigned long ftrace_call_adjust(unsigned long addr) +{ + /* + * When using mcount, addr is the address of the mcount call + * instruction, and no adjustment is necessary. + */ + if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS)) + return addr; + + /* + * When using patchable-function-entry without pre-function NOPS, addr + * is the address of the first NOP after the function entry point. + * + * The compiler has either generated: + * + * addr+00: func: NOP // To be patched to MOV X9, LR + * addr+04: NOP // To be patched to BL + * + * Or: + * + * addr-04: BTI C + * addr+00: func: NOP // To be patched to MOV X9, LR + * addr+04: NOP // To be patched to BL + * + * We must adjust addr to the address of the NOP which will be patched + * to `BL `, which is at `addr + 4` bytes in either case. + * + */ + if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS)) + return addr + AARCH64_INSN_SIZE; + + /* + * When using patchable-function-entry with pre-function NOPs, addr is + * the address of the first pre-function NOP. + * + * Starting from an 8-byte aligned base, the compiler has either + * generated: + * + * addr+00: NOP // Literal (first 32 bits) + * addr+04: NOP // Literal (last 32 bits) + * addr+08: func: NOP // To be patched to MOV X9, LR + * addr+12: NOP // To be patched to BL + * + * Or: + * + * addr+00: NOP // Literal (first 32 bits) + * addr+04: NOP // Literal (last 32 bits) + * addr+08: func: BTI C + * addr+12: NOP // To be patched to MOV X9, LR + * addr+16: NOP // To be patched to BL + * + * We must adjust addr to the address of the NOP which will be patched + * to `BL `, which is at either addr+12 or addr+16 depending on + * whether there is a BTI. + */ + + if (!IS_ALIGNED(addr, sizeof(unsigned long))) { + WARN_RATELIMIT(1, "Misaligned patch-site %pS\n", + (void *)(addr + 8)); + return 0; + } + + /* Skip the NOPs placed before the function entry point */ + addr += 2 * AARCH64_INSN_SIZE; + + /* Skip any BTI */ + if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)) { + u32 insn = le32_to_cpu(*(__le32 *)addr); + + if (aarch64_insn_is_bti(insn)) { + addr += AARCH64_INSN_SIZE; + } else if (insn != aarch64_insn_gen_nop()) { + WARN_RATELIMIT(1, "unexpected insn in patch-site %pS: 0x%08x\n", + (void *)addr, insn); + } + } + + /* Skip the first NOP after function entry */ + addr += AARCH64_INSN_SIZE; + + return addr; +} + /* * Replace a single instruction, which may be a branch or NOP. * If @validate == true, a replaced instruction is checked against 'old'. @@ -98,6 +181,13 @@ int ftrace_update_ftrace_func(ftrace_func_t func) unsigned long pc; u32 new; + /* + * When using CALL_OPS, the function to call is associated with the + * call site, and we don't have a global function pointer to update. + */ + if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS)) + return 0; + pc = (unsigned long)ftrace_call; new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func, AARCH64_INSN_BRANCH_LINK); @@ -176,6 +266,44 @@ static bool ftrace_find_callable_addr(struct dyn_ftrace *rec, return true; } +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS +static const struct ftrace_ops *arm64_rec_get_ops(struct dyn_ftrace *rec) +{ + const struct ftrace_ops *ops = NULL; + + if (rec->flags & FTRACE_FL_CALL_OPS_EN) { + ops = ftrace_find_unique_ops(rec); + WARN_ON_ONCE(!ops); + } + + if (!ops) + ops = &ftrace_list_ops; + + return ops; +} + +static int ftrace_rec_set_ops(const struct dyn_ftrace *rec, + const struct ftrace_ops *ops) +{ + unsigned long literal = ALIGN_DOWN(rec->ip - 12, 8); + return aarch64_insn_write_literal_u64((void *)literal, + (unsigned long)ops); +} + +static int ftrace_rec_set_nop_ops(struct dyn_ftrace *rec) +{ + return ftrace_rec_set_ops(rec, &ftrace_nop_ops); +} + +static int ftrace_rec_update_ops(struct dyn_ftrace *rec) +{ + return ftrace_rec_set_ops(rec, arm64_rec_get_ops(rec)); +} +#else +static int ftrace_rec_set_nop_ops(struct dyn_ftrace *rec) { return 0; } +static int ftrace_rec_update_ops(struct dyn_ftrace *rec) { return 0; } +#endif + /* * Turn on the call to ftrace_caller() in instrumented function */ @@ -183,6 +311,11 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { unsigned long pc = rec->ip; u32 old, new; + int ret; + + ret = ftrace_rec_update_ops(rec); + if (ret) + return ret; if (!ftrace_find_callable_addr(rec, NULL, &addr)) return -EINVAL; @@ -193,6 +326,19 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) return ftrace_modify_code(pc, old, new, true); } +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS +int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, + unsigned long addr) +{ + if (WARN_ON_ONCE(old_addr != (unsigned long)ftrace_caller)) + return -EINVAL; + if (WARN_ON_ONCE(addr != (unsigned long)ftrace_caller)) + return -EINVAL; + + return ftrace_rec_update_ops(rec); +} +#endif + #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS /* * The compiler has inserted two NOPs before the regular function prologue. @@ -220,6 +366,11 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) { unsigned long pc = rec->ip - AARCH64_INSN_SIZE; u32 old, new; + int ret; + + ret = ftrace_rec_set_nop_ops(rec); + if (ret) + return ret; old = aarch64_insn_gen_nop(); new = aarch64_insn_gen_move_reg(AARCH64_INSN_REG_9, @@ -237,9 +388,14 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, { unsigned long pc = rec->ip; u32 old = 0, new; + int ret; new = aarch64_insn_gen_nop(); + ret = ftrace_rec_set_nop_ops(rec); + if (ret) + return ret; + /* * When using mcount, callsites in modules may have been initalized to * call an arbitrary module PLT (which redirects to the _mcount stub) From 82e4958800c01daa7662362ee9543065bd14c852 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 11 Jan 2023 11:22:31 +0100 Subject: [PATCH 084/130] arm64: head: Move all finalise_el2 calls to after __enable_mmu In the primary boot path, finalise_el2() is called much later than on the secondary boot or resume-from-suspend paths, and this does not appear to be intentional. Since we aim to do as little as possible before enabling the MMU and caches, align secondary and resume with primary boot, and defer the call to after the MMU is turned on. This also removes the need to clean finalise_el2() to the PoC once we enable support for booting with the MMU on. Signed-off-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20230111102236.1430401-2-ardb@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/head.S | 5 ++++- arch/arm64/kernel/sleep.S | 5 ++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 952e17bd1c0b..c4e12d466a5f 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -587,7 +587,6 @@ SYM_FUNC_START_LOCAL(secondary_startup) * Common entry point for secondary CPUs. */ mov x20, x0 // preserve boot mode - bl finalise_el2 bl __cpu_secondary_check52bitva #if VA_BITS > 48 ldr_l x0, vabits_actual @@ -603,6 +602,10 @@ SYM_FUNC_END(secondary_startup) SYM_FUNC_START_LOCAL(__secondary_switched) mov x0, x20 bl set_cpu_boot_mode_flag + + mov x0, x20 + bl finalise_el2 + str_l xzr, __early_cpu_boot_status, x3 adr_l x5, vectors msr vbar_el1, x5 diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index 97c9de57725d..7b7c56e04834 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S @@ -100,7 +100,7 @@ SYM_FUNC_END(__cpu_suspend_enter) .pushsection ".idmap.text", "awx" SYM_CODE_START(cpu_resume) bl init_kernel_el - bl finalise_el2 + mov x19, x0 // preserve boot mode #if VA_BITS > 48 ldr_l x0, vabits_actual #endif @@ -116,6 +116,9 @@ SYM_CODE_END(cpu_resume) .popsection SYM_FUNC_START(_cpu_resume) + mov x0, x19 + bl finalise_el2 + mrs x1, mpidr_el1 adr_l x8, mpidr_hash // x8 = struct mpidr_hash virt address From af7249b317e4d0b3d5a0ebbb7ee7a0f336ca7bca Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 11 Jan 2023 11:22:32 +0100 Subject: [PATCH 085/130] arm64: kernel: move identity map out of .text mapping Reorganize the ID map slightly so that only code that is executed with the MMU off or via the 1:1 mapping remains. This allows us to move the identity map out of the .text segment, as it will no longer need executable permissions via the kernel mapping. Signed-off-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20230111102236.1430401-3-ardb@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/head.S | 28 +++++++++++++++------------- arch/arm64/kernel/vmlinux.lds.S | 2 +- arch/arm64/mm/proc.S | 2 -- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index c4e12d466a5f..bec97aad092c 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -543,19 +543,6 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL) eret SYM_FUNC_END(init_kernel_el) -/* - * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed - * in w0. See arch/arm64/include/asm/virt.h for more info. - */ -SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag) - adr_l x1, __boot_cpu_mode - cmp w0, #BOOT_CPU_MODE_EL2 - b.ne 1f - add x1, x1, #4 -1: str w0, [x1] // Save CPU boot mode - ret -SYM_FUNC_END(set_cpu_boot_mode_flag) - /* * This provides a "holding pen" for platforms to hold all secondary * cores are held until we're ready for them to initialise. @@ -599,6 +586,7 @@ SYM_FUNC_START_LOCAL(secondary_startup) br x8 SYM_FUNC_END(secondary_startup) + .text SYM_FUNC_START_LOCAL(__secondary_switched) mov x0, x20 bl set_cpu_boot_mode_flag @@ -631,6 +619,19 @@ SYM_FUNC_START_LOCAL(__secondary_too_slow) b __secondary_too_slow SYM_FUNC_END(__secondary_too_slow) +/* + * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed + * in w0. See arch/arm64/include/asm/virt.h for more info. + */ +SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag) + adr_l x1, __boot_cpu_mode + cmp w0, #BOOT_CPU_MODE_EL2 + b.ne 1f + add x1, x1, #4 +1: str w0, [x1] // Save CPU boot mode + ret +SYM_FUNC_END(set_cpu_boot_mode_flag) + /* * The booting CPU updates the failed status @__early_cpu_boot_status, * with MMU turned off. @@ -662,6 +663,7 @@ SYM_FUNC_END(__secondary_too_slow) * Checks if the selected granule size is supported by the CPU. * If it isn't, park the CPU */ + .section ".idmap.text","awx" SYM_FUNC_START(__enable_mmu) mrs x3, ID_AA64MMFR0_EL1 ubfx x3, x3, #ID_AA64MMFR0_EL1_TGRAN_SHIFT, 4 diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 4c13dafc98b8..407415a5163a 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -179,7 +179,6 @@ SECTIONS LOCK_TEXT KPROBES_TEXT HYPERVISOR_TEXT - IDMAP_TEXT *(.gnu.warning) . = ALIGN(16); *(.got) /* Global offset table */ @@ -206,6 +205,7 @@ SECTIONS TRAMP_TEXT HIBERNATE_TEXT KEXEC_TEXT + IDMAP_TEXT . = ALIGN(PAGE_SIZE); } diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 066fa60b93d2..91410f488090 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -110,7 +110,6 @@ SYM_FUNC_END(cpu_do_suspend) * * x0: Address of context pointer */ - .pushsection ".idmap.text", "awx" SYM_FUNC_START(cpu_do_resume) ldp x2, x3, [x0] ldp x4, x5, [x0, #16] @@ -166,7 +165,6 @@ alternative_else_nop_endif isb ret SYM_FUNC_END(cpu_do_resume) - .popsection #endif .pushsection ".idmap.text", "awx" From 9d7c13e5dde31270eb48a34204a2e06b1a719546 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 11 Jan 2023 11:22:33 +0100 Subject: [PATCH 086/130] arm64: head: record the MMU state at primary entry Prepare for being able to deal with primary entry with the MMU and caches enabled, by recording whether or not we entered with the MMU on in register x19 and in a global variable. (Note that setting this variable to '1' does not require cache invalidation, nor is it required for storing the bootargs in that case, so omit the cache maintenance). Since boot with the MMU and caches enabled is not permitted by the bare metal boot protocol, ensure that a diagnostic is emitted and a taint bit set if the MMU was found to be enabled on a non-EFI boot, and panic() once the console is likely to be up. We will make an exception for EFI boot later, which has strict requirements for the mapping of system memory, permitting us to relax the boot protocol and hand over from the EFI stub to the core kernel with MMU and caches left enabled. While at it, add 'pre_disable_mmu_workaround' macro invocations to init_kernel_el, as its manipulation of SCTLR_ELx may amount to disabling of the MMU after subsequent patches. Signed-off-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20230111102236.1430401-4-ardb@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/head.S | 20 ++++++++++++++++++++ arch/arm64/kernel/setup.c | 17 +++++++++++++++-- 2 files changed, 35 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index bec97aad092c..c3b898efd3b5 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -77,6 +77,7 @@ * primary lowlevel boot path: * * Register Scope Purpose + * x19 primary_entry() .. start_kernel() whether we entered with the MMU on * x20 primary_entry() .. __primary_switch() CPU boot mode * x21 primary_entry() .. start_kernel() FDT pointer passed at boot in x0 * x22 create_idmap() .. start_kernel() ID map VA of the DT blob @@ -86,6 +87,7 @@ * x28 create_idmap() callee preserved temp register */ SYM_CODE_START(primary_entry) + bl record_mmu_state bl preserve_boot_args bl init_kernel_el // w0=cpu_boot_mode mov x20, x0 @@ -109,6 +111,18 @@ SYM_CODE_START(primary_entry) b __primary_switch SYM_CODE_END(primary_entry) +SYM_CODE_START_LOCAL(record_mmu_state) + mrs x19, CurrentEL + cmp x19, #CurrentEL_EL2 + mrs x19, sctlr_el1 + b.ne 0f + mrs x19, sctlr_el2 +0: tst x19, #SCTLR_ELx_C // Z := (C == 0) + and x19, x19, #SCTLR_ELx_M // isolate M bit + csel x19, xzr, x19, eq // clear x19 if Z + ret +SYM_CODE_END(record_mmu_state) + /* * Preserve the arguments passed by the bootloader in x0 .. x3 */ @@ -119,11 +133,14 @@ SYM_CODE_START_LOCAL(preserve_boot_args) stp x21, x1, [x0] // x0 .. x3 at kernel entry stp x2, x3, [x0, #16] + cbnz x19, 0f // skip cache invalidation if MMU is on dmb sy // needed before dc ivac with // MMU off add x1, x0, #0x20 // 4 x 8 bytes b dcache_inval_poc // tail call +0: str_l x19, mmu_enabled_at_boot, x0 + ret SYM_CODE_END(preserve_boot_args) SYM_FUNC_START_LOCAL(clear_page_tables) @@ -497,6 +514,7 @@ SYM_FUNC_START(init_kernel_el) SYM_INNER_LABEL(init_el1, SYM_L_LOCAL) mov_q x0, INIT_SCTLR_EL1_MMU_OFF + pre_disable_mmu_workaround msr sctlr_el1, x0 isb mov_q x0, INIT_PSTATE_EL1 @@ -529,11 +547,13 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL) cbz x0, 1f /* Set a sane SCTLR_EL1, the VHE way */ + pre_disable_mmu_workaround msr_s SYS_SCTLR_EL12, x1 mov x2, #BOOT_CPU_FLAG_E2H b 2f 1: + pre_disable_mmu_workaround msr sctlr_el1, x1 mov x2, xzr 2: diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 12cfe9d0d3fa..b8ec7b3ac9cb 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -58,6 +58,7 @@ static int num_standard_resources; static struct resource *standard_resources; phys_addr_t __fdt_pointer __initdata; +u64 mmu_enabled_at_boot __initdata; /* * Standard memory resources @@ -332,8 +333,12 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p) xen_early_init(); efi_init(); - if (!efi_enabled(EFI_BOOT) && ((u64)_text % MIN_KIMG_ALIGN) != 0) - pr_warn(FW_BUG "Kernel image misaligned at boot, please fix your bootloader!"); + if (!efi_enabled(EFI_BOOT)) { + if ((u64)_text % MIN_KIMG_ALIGN) + pr_warn(FW_BUG "Kernel image misaligned at boot, please fix your bootloader!"); + WARN_TAINT(mmu_enabled_at_boot, TAINT_FIRMWARE_WORKAROUND, + FW_BUG "Booted with MMU enabled!"); + } arm64_memblock_init(); @@ -442,3 +447,11 @@ static int __init register_arm64_panic_block(void) return 0; } device_initcall(register_arm64_panic_block); + +static int __init check_mmu_enabled_at_boot(void) +{ + if (!efi_enabled(EFI_BOOT) && mmu_enabled_at_boot) + panic("Non-EFI boot detected with MMU and caches enabled"); + return 0; +} +device_initcall_sync(check_mmu_enabled_at_boot); From 32b135a7fafebe7843abe5425159fa081ae56b7c Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 11 Jan 2023 11:22:34 +0100 Subject: [PATCH 087/130] arm64: head: avoid cache invalidation when entering with the MMU on If we enter with the MMU on, there is no need for explicit cache invalidation for stores to memory, as they will be coherent with the caches. Let's take advantage of this, and create the ID map with the MMU still enabled if that is how we entered, and avoid any cache invalidation calls in that case. Signed-off-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20230111102236.1430401-5-ardb@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/head.S | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index c3b898efd3b5..d75f41920645 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -89,9 +89,9 @@ SYM_CODE_START(primary_entry) bl record_mmu_state bl preserve_boot_args + bl create_idmap bl init_kernel_el // w0=cpu_boot_mode mov x20, x0 - bl create_idmap /* * The following calls CPU setup code, see arch/arm64/mm/proc.S for @@ -377,12 +377,13 @@ SYM_FUNC_START_LOCAL(create_idmap) * accesses (MMU disabled), invalidate those tables again to * remove any speculatively loaded cache lines. */ + cbnz x19, 0f // skip cache invalidation if MMU is on dmb sy adrp x0, init_idmap_pg_dir adrp x1, init_idmap_pg_end bl dcache_inval_poc - ret x28 +0: ret x28 SYM_FUNC_END(create_idmap) SYM_FUNC_START_LOCAL(create_kernel_mapping) From 3dcf60bbfd284e5ebfa40c56172222425d10abf0 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 11 Jan 2023 11:22:35 +0100 Subject: [PATCH 088/130] arm64: head: Clean the ID map and the HYP text to the PoC if needed If we enter with the MMU and caches enabled, the bootloader may not have performed any cache maintenance to the PoC. So clean the ID mapped page to the PoC, to ensure that instruction and data accesses with the MMU off see the correct data. For similar reasons, clean all the HYP text to the PoC as well when entering at EL2 with the MMU and caches enabled. Note that this means primary_entry() itself needs to be moved into the ID map as well, as we will return from init_kernel_el() with the MMU and caches off. Signed-off-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20230111102236.1430401-6-ardb@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/head.S | 31 +++++++++++++++++++++++++++---- arch/arm64/kernel/sleep.S | 1 + 2 files changed, 28 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index d75f41920645..dc56e1d8f36e 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -70,7 +70,7 @@ __EFI_PE_HEADER - __INIT + .section ".idmap.text","awx" /* * The following callee saved general purpose registers are used on the @@ -90,6 +90,17 @@ SYM_CODE_START(primary_entry) bl record_mmu_state bl preserve_boot_args bl create_idmap + + /* + * If we entered with the MMU and caches on, clean the ID mapped part + * of the primary boot code to the PoC so we can safely execute it with + * the MMU off. + */ + cbz x19, 0f + adrp x0, __idmap_text_start + adr_l x1, __idmap_text_end + bl dcache_clean_poc +0: mov x0, x19 bl init_kernel_el // w0=cpu_boot_mode mov x20, x0 @@ -111,6 +122,7 @@ SYM_CODE_START(primary_entry) b __primary_switch SYM_CODE_END(primary_entry) + __INIT SYM_CODE_START_LOCAL(record_mmu_state) mrs x19, CurrentEL cmp x19, #CurrentEL_EL2 @@ -507,10 +519,12 @@ SYM_FUNC_END(__primary_switched) * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in x0 if * booted in EL1 or EL2 respectively, with the top 32 bits containing * potential context flags. These flags are *not* stored in __boot_cpu_mode. + * + * x0: whether we are being called from the primary boot path with the MMU on */ SYM_FUNC_START(init_kernel_el) - mrs x0, CurrentEL - cmp x0, #CurrentEL_EL2 + mrs x1, CurrentEL + cmp x1, #CurrentEL_EL2 b.eq init_el2 SYM_INNER_LABEL(init_el1, SYM_L_LOCAL) @@ -525,6 +539,14 @@ SYM_INNER_LABEL(init_el1, SYM_L_LOCAL) eret SYM_INNER_LABEL(init_el2, SYM_L_LOCAL) + msr elr_el2, lr + + // clean all HYP code to the PoC if we booted at EL2 with the MMU on + cbz x0, 0f + adrp x0, __hyp_idmap_text_start + adr_l x1, __hyp_text_end + bl dcache_clean_poc +0: mov_q x0, HCR_HOST_NVHE_FLAGS msr hcr_el2, x0 isb @@ -558,7 +580,6 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL) msr sctlr_el1, x1 mov x2, xzr 2: - msr elr_el2, lr mov w0, #BOOT_CPU_MODE_EL2 orr x0, x0, x2 eret @@ -569,6 +590,7 @@ SYM_FUNC_END(init_kernel_el) * cores are held until we're ready for them to initialise. */ SYM_FUNC_START(secondary_holding_pen) + mov x0, xzr bl init_kernel_el // w0=cpu_boot_mode mrs x2, mpidr_el1 mov_q x1, MPIDR_HWID_BITMASK @@ -586,6 +608,7 @@ SYM_FUNC_END(secondary_holding_pen) * be used where CPUs are brought online dynamically by the kernel. */ SYM_FUNC_START(secondary_entry) + mov x0, xzr bl init_kernel_el // w0=cpu_boot_mode b secondary_startup SYM_FUNC_END(secondary_entry) diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index 7b7c56e04834..2ae7cff1953a 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S @@ -99,6 +99,7 @@ SYM_FUNC_END(__cpu_suspend_enter) .pushsection ".idmap.text", "awx" SYM_CODE_START(cpu_resume) + mov x0, xzr bl init_kernel_el mov x19, x0 // preserve boot mode #if VA_BITS > 48 From 61786170383093908e9f5f8fd8c5c3ff0c3bbe03 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 11 Jan 2023 11:22:36 +0100 Subject: [PATCH 089/130] efi: arm64: enter with MMU and caches enabled Instead of cleaning the entire loaded kernel image to the PoC and disabling the MMU and caches before branching to the kernel's bare metal entry point, we can leave the MMU and caches enabled, and rely on EFI's cacheable 1:1 mapping of all of system RAM (which is mandated by the spec) to populate the initial page tables. This removes the need for managing coherency in software, which is tedious and error prone. Note that we still need to clean the executable region of the image to the PoU if this is required for I/D coherency, but only if we actually decided to move the image in memory, as otherwise, this will have been taken care of by the loader. This change affects both the builtin EFI stub as well as the zboot decompressor, which now carries the entire EFI stub along with the decompression code and the compressed image. Signed-off-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20230111102236.1430401-7-ardb@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/efi.h | 2 + arch/arm64/kernel/image-vars.h | 5 +- arch/arm64/mm/cache.S | 1 + drivers/firmware/efi/libstub/Makefile | 4 +- drivers/firmware/efi/libstub/arm64-entry.S | 67 ---------------------- drivers/firmware/efi/libstub/arm64-stub.c | 28 +++++---- drivers/firmware/efi/libstub/arm64.c | 41 +++++++++++-- 7 files changed, 62 insertions(+), 86 deletions(-) delete mode 100644 drivers/firmware/efi/libstub/arm64-entry.S diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 31d13a6001df..0f0e729b40ef 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -105,6 +105,8 @@ static inline unsigned long efi_get_kimg_min_align(void) #define EFI_ALLOC_ALIGN SZ_64K #define EFI_ALLOC_LIMIT ((1UL << 48) - 1) +extern unsigned long primary_entry_offset(void); + /* * On ARM systems, virtually remapped UEFI runtime services are set up in two * distinct stages: diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index d0e9bb5c91fc..73388b21d07d 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -10,7 +10,7 @@ #error This file should only be included in vmlinux.lds.S #endif -PROVIDE(__efistub_primary_entry_offset = primary_entry - _text); +PROVIDE(__efistub_primary_entry = primary_entry); /* * The EFI stub has its own symbol namespace prefixed by __efistub_, to @@ -21,10 +21,11 @@ PROVIDE(__efistub_primary_entry_offset = primary_entry - _text); * linked at. The routines below are all implemented in assembler in a * position independent manner */ -PROVIDE(__efistub_dcache_clean_poc = __pi_dcache_clean_poc); +PROVIDE(__efistub_caches_clean_inval_pou = __pi_caches_clean_inval_pou); PROVIDE(__efistub__text = _text); PROVIDE(__efistub__end = _end); +PROVIDE(__efistub___inittext_end = __inittext_end); PROVIDE(__efistub__edata = _edata); PROVIDE(__efistub_screen_info = screen_info); PROVIDE(__efistub__ctype = _ctype); diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 081058d4e436..503567c864fd 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -56,6 +56,7 @@ SYM_FUNC_START(caches_clean_inval_pou) caches_clean_inval_pou_macro ret SYM_FUNC_END(caches_clean_inval_pou) +SYM_FUNC_ALIAS(__pi_caches_clean_inval_pou, caches_clean_inval_pou) /* * caches_clean_inval_user_pou(start,end) diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index be8b8c6e8b40..80d85a5169fb 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -87,7 +87,7 @@ lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o string.o intrinsics.o systable.o \ screen_info.o efi-stub-entry.o lib-$(CONFIG_ARM) += arm32-stub.o -lib-$(CONFIG_ARM64) += arm64.o arm64-stub.o arm64-entry.o smbios.o +lib-$(CONFIG_ARM64) += arm64.o arm64-stub.o smbios.o lib-$(CONFIG_X86) += x86-stub.o lib-$(CONFIG_RISCV) += riscv.o riscv-stub.o lib-$(CONFIG_LOONGARCH) += loongarch.o loongarch-stub.o @@ -141,7 +141,7 @@ STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS # STUBCOPY_FLAGS-$(CONFIG_ARM64) += --prefix-alloc-sections=.init \ --prefix-symbols=__efistub_ -STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS64 +STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS # For RISC-V, we don't need anything special other than arm64. Keep all the # symbols in .init section and make sure that no absolute symbols references diff --git a/drivers/firmware/efi/libstub/arm64-entry.S b/drivers/firmware/efi/libstub/arm64-entry.S deleted file mode 100644 index b5c17e89a4fc..000000000000 --- a/drivers/firmware/efi/libstub/arm64-entry.S +++ /dev/null @@ -1,67 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * EFI entry point. - * - * Copyright (C) 2013, 2014 Red Hat, Inc. - * Author: Mark Salter - */ -#include -#include - - /* - * The entrypoint of a arm64 bare metal image is at offset #0 of the - * image, so this is a reasonable default for primary_entry_offset. - * Only when the EFI stub is integrated into the core kernel, it is not - * guaranteed that the PE/COFF header has been copied to memory too, so - * in this case, primary_entry_offset should be overridden by the - * linker and point to primary_entry() directly. - */ - .weak primary_entry_offset - -SYM_CODE_START(efi_enter_kernel) - /* - * efi_pe_entry() will have copied the kernel image if necessary and we - * end up here with device tree address in x1 and the kernel entry - * point stored in x0. Save those values in registers which are - * callee preserved. - */ - ldr w2, =primary_entry_offset - add x19, x0, x2 // relocated Image entrypoint - - mov x0, x1 // DTB address - mov x1, xzr - mov x2, xzr - mov x3, xzr - - /* - * Clean the remainder of this routine to the PoC - * so that we can safely disable the MMU and caches. - */ - adr x4, 1f - dc civac, x4 - dsb sy - - /* Turn off Dcache and MMU */ - mrs x4, CurrentEL - cmp x4, #CurrentEL_EL2 - mrs x4, sctlr_el1 - b.ne 0f - mrs x4, sctlr_el2 -0: bic x4, x4, #SCTLR_ELx_M - bic x4, x4, #SCTLR_ELx_C - b.eq 1f - b 2f - - .balign 32 -1: pre_disable_mmu_workaround - msr sctlr_el2, x4 - isb - br x19 // jump to kernel entrypoint - -2: pre_disable_mmu_workaround - msr sctlr_el1, x4 - isb - br x19 // jump to kernel entrypoint - - .org 1b + 32 -SYM_CODE_END(efi_enter_kernel) diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c index 7327b98d8e3f..d4a6b12a8741 100644 --- a/drivers/firmware/efi/libstub/arm64-stub.c +++ b/drivers/firmware/efi/libstub/arm64-stub.c @@ -58,7 +58,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, efi_handle_t image_handle) { efi_status_t status; - unsigned long kernel_size, kernel_memsize = 0; + unsigned long kernel_size, kernel_codesize, kernel_memsize; u32 phys_seed = 0; u64 min_kimg_align = efi_get_kimg_min_align(); @@ -93,6 +93,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, SEGMENT_ALIGN >> 10); kernel_size = _edata - _text; + kernel_codesize = __inittext_end - _text; kernel_memsize = kernel_size + (_end - _edata); *reserve_size = kernel_memsize; @@ -121,7 +122,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, */ *image_addr = (u64)_text; *reserve_size = 0; - goto clean_image_to_poc; + return EFI_SUCCESS; } status = efi_allocate_pages_aligned(*reserve_size, reserve_addr, @@ -137,14 +138,21 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, *image_addr = *reserve_addr; memcpy((void *)*image_addr, _text, kernel_size); - -clean_image_to_poc: - /* - * Clean the copied Image to the PoC, and ensure it is not shadowed by - * stale icache entries from before relocation. - */ - dcache_clean_poc(*image_addr, *image_addr + kernel_size); - asm("ic ialluis"); + caches_clean_inval_pou(*image_addr, *image_addr + kernel_codesize); return EFI_SUCCESS; } + +asmlinkage void primary_entry(void); + +unsigned long primary_entry_offset(void) +{ + /* + * When built as part of the kernel, the EFI stub cannot branch to the + * kernel proper via the image header, as the PE/COFF header is + * strictly not part of the in-memory presentation of the image, only + * of the file representation. So instead, we need to jump to the + * actual entrypoint in the .text region of the image. + */ + return (char *)primary_entry - _text; +} diff --git a/drivers/firmware/efi/libstub/arm64.c b/drivers/firmware/efi/libstub/arm64.c index ff2d18c42ee7..f5da4fbccd86 100644 --- a/drivers/firmware/efi/libstub/arm64.c +++ b/drivers/firmware/efi/libstub/arm64.c @@ -56,6 +56,12 @@ efi_status_t check_platform_features(void) return EFI_SUCCESS; } +#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE +#define DCTYPE "civac" +#else +#define DCTYPE "cvau" +#endif + void efi_cache_sync_image(unsigned long image_base, unsigned long alloc_size, unsigned long code_size) @@ -64,13 +70,38 @@ void efi_cache_sync_image(unsigned long image_base, u64 lsize = 4 << cpuid_feature_extract_unsigned_field(ctr, CTR_EL0_DminLine_SHIFT); - do { - asm("dc civac, %0" :: "r"(image_base)); - image_base += lsize; - alloc_size -= lsize; - } while (alloc_size >= lsize); + /* only perform the cache maintenance if needed for I/D coherency */ + if (!(ctr & BIT(CTR_EL0_IDC_SHIFT))) { + do { + asm("dc " DCTYPE ", %0" :: "r"(image_base)); + image_base += lsize; + code_size -= lsize; + } while (code_size >= lsize); + } asm("ic ialluis"); dsb(ish); isb(); } + +unsigned long __weak primary_entry_offset(void) +{ + /* + * By default, we can invoke the kernel via the branch instruction in + * the image header, so offset #0. This will be overridden by the EFI + * stub build that is linked into the core kernel, as in that case, the + * image header may not have been loaded into memory, or may be mapped + * with non-executable permissions. + */ + return 0; +} + +void __noreturn efi_enter_kernel(unsigned long entrypoint, + unsigned long fdt_addr, + unsigned long fdt_size) +{ + void (* __noreturn enter_kernel)(u64, u64, u64, u64); + + enter_kernel = (void *)entrypoint + primary_entry_offset(); + enter_kernel(fdt_addr, 0, 0, 0); +} From 2ced0f30a426c7301350681f838344d5aea711e3 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 25 Jan 2023 19:59:10 +0100 Subject: [PATCH 090/130] arm64: head: Switch endianness before populating the ID map Ensure that the endianness used for populating the ID map matches the endianness that the running kernel will be using, as this is no longer guaranteed now that create_idmap() is invoked before init_kernel_el(). Note that doing so is only safe if the MMU is off, as switching the endianness with the MMU on results in the active ID map to become invalid. So also clear the M bit when toggling the EE bit in SCTLR, and mark the MMU as disabled at boot. Note that the same issue has resulted in preserve_boot_args() recording the contents of registers X0 ... X3 in the wrong byte order, although this is arguably a very minor concern. Fixes: 32b135a7fafe ("arm64: head: avoid cache invalidation when entering with the MMU on") Reported-by: Nathan Chancellor Signed-off-by: Ard Biesheuvel Tested-by: Nathan Chancellor Link: https://lore.kernel.org/r/20230125185910.962733-1-ardb@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 3 ++- arch/arm64/kernel/head.S | 23 ++++++++++++++++++++++- 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 1312fb48f18b..2facd7933948 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -575,6 +575,7 @@ #define SCTLR_ELx_DSSBS (BIT(44)) #define SCTLR_ELx_ATA (BIT(43)) +#define SCTLR_ELx_EE_SHIFT 25 #define SCTLR_ELx_ENIA_SHIFT 31 #define SCTLR_ELx_ITFSB (BIT(37)) @@ -583,7 +584,7 @@ #define SCTLR_ELx_LSMAOE (BIT(29)) #define SCTLR_ELx_nTLSMD (BIT(28)) #define SCTLR_ELx_ENDA (BIT(27)) -#define SCTLR_ELx_EE (BIT(25)) +#define SCTLR_ELx_EE (BIT(SCTLR_ELx_EE_SHIFT)) #define SCTLR_ELx_EIS (BIT(22)) #define SCTLR_ELx_IESB (BIT(21)) #define SCTLR_ELx_TSCXT (BIT(20)) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index dc56e1d8f36e..107a2b87577c 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -129,10 +129,31 @@ SYM_CODE_START_LOCAL(record_mmu_state) mrs x19, sctlr_el1 b.ne 0f mrs x19, sctlr_el2 -0: tst x19, #SCTLR_ELx_C // Z := (C == 0) +0: +CPU_LE( tbnz x19, #SCTLR_ELx_EE_SHIFT, 1f ) +CPU_BE( tbz x19, #SCTLR_ELx_EE_SHIFT, 1f ) + tst x19, #SCTLR_ELx_C // Z := (C == 0) and x19, x19, #SCTLR_ELx_M // isolate M bit csel x19, xzr, x19, eq // clear x19 if Z ret + + /* + * Set the correct endianness early so all memory accesses issued + * before init_kernel_el() occur in the correct byte order. Note that + * this means the MMU must be disabled, or the active ID map will end + * up getting interpreted with the wrong byte order. + */ +1: eor x19, x19, #SCTLR_ELx_EE + bic x19, x19, #SCTLR_ELx_M + b.ne 2f + pre_disable_mmu_workaround + msr sctlr_el2, x19 + b 3f + pre_disable_mmu_workaround +2: msr sctlr_el1, x19 +3: isb + mov x19, xzr + ret SYM_CODE_END(record_mmu_state) /* From 11fc944f7e14a67a2fbf578da5d7244525cf9571 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 24 Jan 2023 10:16:05 -0800 Subject: [PATCH 091/130] arm64: Kconfig: fix spelling Fix spelling typos in arm64: (reported by codespell) s/upto/up to/ s/familly/family/ Signed-off-by: Randy Dunlap Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230124181605.14144-1-rdunlap@infradead.org Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 2 +- arch/arm64/Kconfig.platforms | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 8cfd8dcf7e75..035d307c7abb 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1485,7 +1485,7 @@ config ARCH_FORCE_MAX_ORDER This config option is actually maximum order plus one. For example, a value of 11 means that the largest free memory block is 2^10 pages. - We make sure that we can allocate upto a HugePage size for each configuration. + We make sure that we can allocate up to a HugePage size for each configuration. Hence we have : MAX_ORDER = (PMD_SHIFT - PAGE_SHIFT) + 1 => PAGE_SHIFT - 2 diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index d1970adf80ab..333d0af650d2 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -187,7 +187,7 @@ config ARCH_MVEBU select PINCTRL_ARMADA_CP110 select PINCTRL_AC5 help - This enables support for Marvell EBU familly, including: + This enables support for Marvell EBU family, including: - Armada 3700 SoC Family - Armada 7K SoC Family - Armada 8K SoC Family From bb457bddee41077768759f6706fa641227877fa0 Mon Sep 17 00:00:00 2001 From: Prathu Baronia Date: Mon, 23 Jan 2023 16:36:38 +0530 Subject: [PATCH 092/130] arm64: el2_setup.h: fix spelling typo in comments - "evailable" -> "available" Signed-off-by: Prathu Baronia Reviewed-by: Mukesh Ojha Link: https://lore.kernel.org/r/20230123110639.10473-1-prathubaronia2011@gmail.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/el2_setup.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index 668569adf4d3..40567b9bb019 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -177,7 +177,7 @@ /** * Initialize EL2 registers to sane values. This should be called early on all * cores that were booted in EL2. Note that everything gets initialised as - * if VHE was not evailable. The kernel context will be upgraded to VHE + * if VHE was not available. The kernel context will be upgraded to VHE * if possible later on in the boot process * * Regs: x0, x1 and x2 are clobbered. From 54c968bec344b101ba3596f2544f0f3b4c1eef2f Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 13 Dec 2022 15:28:49 +0100 Subject: [PATCH 093/130] arm64: Apply dynamic shadow call stack patching in two passes Code patching for the dynamically enabled shadow call stack comes down to finding PACIASP and AUTIASP instructions -which behave as NOPs on cores that do not implement pointer authentication- and converting them into shadow call stack pushes and pops, respectively. Due to past bad experiences with the highly complex and overengineered DWARF standard that describes the unwind metadata that we are using to locate these instructions, let's make this patching logic a little bit more robust so that any issues with the unwind metadata detected at boot time can de dealt with gracefully. The DWARF annotations that are used for this are emitted at function granularity, and due to the fact that the instructions we are patching will simply behave as NOPs if left unpatched, we can abort on errors as long as we don't leave any functions in a half-patched state. So do a dry run of each FDE frame (covering a single function) before performing the actual patching, and give up if the DWARF metadata cannot be understood. Signed-off-by: Ard Biesheuvel Acked-by: Will Deacon Reviewed-by: Sami Tolvanen Link: https://lore.kernel.org/r/20221213142849.1629026-1-ardb@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/patch-scs.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kernel/patch-scs.c b/arch/arm64/kernel/patch-scs.c index 1b3da02d5b74..a1fe4b4ff591 100644 --- a/arch/arm64/kernel/patch-scs.c +++ b/arch/arm64/kernel/patch-scs.c @@ -130,7 +130,8 @@ struct eh_frame { static int noinstr scs_handle_fde_frame(const struct eh_frame *frame, bool fde_has_augmentation_data, - int code_alignment_factor) + int code_alignment_factor, + bool dry_run) { int size = frame->size - offsetof(struct eh_frame, opcodes) + 4; u64 loc = (u64)offset_to_ptr(&frame->initial_loc); @@ -184,7 +185,8 @@ static int noinstr scs_handle_fde_frame(const struct eh_frame *frame, break; case DW_CFA_negate_ra_state: - scs_patch_loc(loc - 4); + if (!dry_run) + scs_patch_loc(loc - 4); break; case 0x40 ... 0x7f: @@ -235,9 +237,12 @@ int noinstr scs_patch(const u8 eh_frame[], int size) } else { ret = scs_handle_fde_frame(frame, fde_has_augmentation_data, - code_alignment_factor); + code_alignment_factor, + true); if (ret) return ret; + scs_handle_fde_frame(frame, fde_has_augmentation_data, + code_alignment_factor, false); } p += sizeof(frame->size) + frame->size; From dc4824faa265db1bc93449e8ec386a0245404fa6 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 27 Jan 2023 11:08:20 +0000 Subject: [PATCH 094/130] arm64: avoid executing padding bytes during kexec / hibernation Currently we rely on the HIBERNATE_TEXT section starting with the entry point to swsusp_arch_suspend_exit, and the KEXEC_TEXT section starting with the entry point to arm64_relocate_new_kernel. In both cases we copy the entire section into a dynamically-allocated page, and then later branch to the start of this page. SYM_FUNC_START() will align the function entry points to CONFIG_FUNCTION_ALIGNMENT, and when the linker later processes the assembled code it will place padding bytes before the function entry point if the location counter was not already sufficiently aligned. The linker happens to use the value zero for these padding bytes. This padding may end up being applied whenever CONFIG_FUNCTION_ALIGNMENT is greater than 4, which can be the case with CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B=y or CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS=y. When such padding is applied, attempting to kexec or resume from hibernate will result ina crash: the kernel will branch to the padding bytes as the start of the dynamically-allocated page, and as those bytes are zero they will decode as UDF #0, which reliably triggers an UNDEFINED exception. For example: | # ./kexec --reuse-cmdline -f Image | [ 46.965800] kexec_core: Starting new kernel | [ 47.143641] psci: CPU1 killed (polled 0 ms) | [ 47.233653] psci: CPU2 killed (polled 0 ms) | [ 47.323465] psci: CPU3 killed (polled 0 ms) | [ 47.324776] Bye! | [ 47.327072] Internal error: Oops - Undefined instruction: 0000000002000000 [#1] SMP | [ 47.328510] Modules linked in: | [ 47.329086] CPU: 0 PID: 259 Comm: kexec Not tainted 6.2.0-rc5+ #3 | [ 47.330223] Hardware name: QEMU KVM Virtual Machine, BIOS 0.0.0 02/06/2015 | [ 47.331497] pstate: 604003c5 (nZCv DAIF +PAN -UAO -TCO -DIT -SSBS BTYPE=--) | [ 47.332782] pc : 0x43a95000 | [ 47.333338] lr : machine_kexec+0x190/0x1e0 | [ 47.334169] sp : ffff80000d293b70 | [ 47.334845] x29: ffff80000d293b70 x28: ffff000002cc0000 x27: 0000000000000000 | [ 47.336292] x26: 0000000000000000 x25: 0000000000000000 x24: 0000000000000000 | [ 47.337744] x23: ffff80000a837858 x22: 0000000048ec9000 x21: 0000000000000010 | [ 47.339192] x20: 00000000adc83000 x19: ffff000000827000 x18: 0000000000000006 | [ 47.340638] x17: ffff800075a61000 x16: ffff800008000000 x15: ffff80000d293658 | [ 47.342085] x14: 0000000000000000 x13: ffff80000d2937f7 x12: ffff80000a7ff6e0 | [ 47.343530] x11: 00000000ffffdfff x10: ffff80000a8ef8e0 x9 : ffff80000813ef00 | [ 47.344976] x8 : 000000000002ffe8 x7 : c0000000ffffdfff x6 : 00000000000affa8 | [ 47.346431] x5 : 0000000000001fff x4 : 0000000000000001 x3 : ffff80000a0a3008 | [ 47.347877] x2 : ffff80000a8220f8 x1 : 0000000043a95000 x0 : ffff000000827000 | [ 47.349334] Call trace: | [ 47.349834] 0x43a95000 | [ 47.350338] kernel_kexec+0x88/0x100 | [ 47.351070] __do_sys_reboot+0x108/0x268 | [ 47.351873] __arm64_sys_reboot+0x2c/0x40 | [ 47.352689] invoke_syscall+0x78/0x108 | [ 47.353458] el0_svc_common.constprop.0+0x4c/0x100 | [ 47.354426] do_el0_svc+0x34/0x50 | [ 47.355102] el0_svc+0x34/0x108 | [ 47.355747] el0t_64_sync_handler+0xf4/0x120 | [ 47.356617] el0t_64_sync+0x194/0x198 | [ 47.357374] Code: bad PC value | [ 47.357999] ---[ end trace 0000000000000000 ]--- | [ 47.358937] Kernel panic - not syncing: Oops - Undefined instruction: Fatal exception | [ 47.360515] Kernel Offset: disabled | [ 47.361230] CPU features: 0x002000,00050108,c8004203 | [ 47.362232] Memory Limit: none Note: Unfortunately the code dump reports "bad PC value" as it attempts to dump some instructions prior to the UDF (i.e. before the start of the page), and terminates early upon a fault, obscuring the problem. This patch fixes this issue by aligning the section starter markes to CONFIG_FUNCTION_ALIGNMENT using the ALIGN_FUNCTION() helper, which ensures that the linker never needs to place padding bytes within the section. Assertions are added to verify each section begins with the function we expect, making our implicit requirement explicit. In future it might be nice to rework the kexec and hibernation code to decouple the section start from the entry point, but that involves much more significant changes that come with a higher risk of error, so I've tried to keep this fix as simple as possible for now. Fixes: 47a15aa54427 ("arm64: Extend support for CONFIG_FUNCTION_ALIGNMENT") Reported-by: CKI Project Link: https://lore.kernel.org/linux-arm-kernel/29992.123012504212600261@us-mta-139.us.mimecast.lan/ Signed-off-by: Mark Rutland Cc: James Morse Cc: Will Deacon Reviewed-by: Ard Biesheuvel Signed-off-by: Catalin Marinas --- arch/arm64/kernel/vmlinux.lds.S | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 4c13dafc98b8..1f6d75f78fd5 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -93,6 +93,7 @@ jiffies = jiffies_64; #ifdef CONFIG_HIBERNATION #define HIBERNATE_TEXT \ + ALIGN_FUNCTION(); \ __hibernate_exit_text_start = .; \ *(.hibernate_exit.text) \ __hibernate_exit_text_end = .; @@ -102,6 +103,7 @@ jiffies = jiffies_64; #ifdef CONFIG_KEXEC_CORE #define KEXEC_TEXT \ + ALIGN_FUNCTION(); \ __relocate_new_kernel_start = .; \ *(.kexec_relocate.text) \ __relocate_new_kernel_end = .; @@ -355,6 +357,8 @@ ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K, #ifdef CONFIG_HIBERNATION ASSERT(__hibernate_exit_text_end - __hibernate_exit_text_start <= SZ_4K, "Hibernate exit text is bigger than 4 KiB") +ASSERT(__hibernate_exit_text_start == swsusp_arch_suspend_exit, + "Hibernate exit text does not start with swsusp_arch_suspend_exit") #endif #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) <= 3*PAGE_SIZE, @@ -381,4 +385,6 @@ ASSERT(swapper_pg_dir - tramp_pg_dir == TRAMP_SWAPPER_OFFSET, ASSERT(__relocate_new_kernel_end - __relocate_new_kernel_start <= SZ_4K, "kexec relocation code is bigger than 4 KiB") ASSERT(KEXEC_CONTROL_PAGE_SIZE >= SZ_4K, "KEXEC_CONTROL_PAGE_SIZE is broken") +ASSERT(__relocate_new_kernel_start == arm64_relocate_new_kernel, + "kexec control page does not start with arm64_relocate_new_kernel") #endif From a873bb493fb192abf7696f84ec4496c5c49a483d Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 27 Jan 2023 12:12:56 +0000 Subject: [PATCH 095/130] arm64: traps: attempt to dump all instructions Currently dump_kernel_instr() dumps a few instructions around the pt_regs::pc value, dumping 4 instructions before the PC before dumping the instruction at the PC. If an attempt to read an instruction fails, it gives up and does not attempt to dump any subsequent instructions. This is unfortunate when the pt_regs::pc value points to the start of a page with a leading guard page, where the instruction at the PC can be read, but prior instructions cannot. This patch makes dump_kernel_instr() attempt to dump each instruction regardless of whether reading a prior instruction could be read, which gives a more useful code dump in such cases. When an instruction cannot be read, it is reported as "????????", which cannot be confused with a hex value, For example, with a `UDF #0` (AKA 0x00000000) early in the kexec control page, we'll now get the following code dump: | Internal error: Oops - Undefined instruction: 0000000002000000 [#1] SMP | Modules linked in: | CPU: 0 PID: 261 Comm: kexec Not tainted 6.2.0-rc5+ #26 | Hardware name: QEMU KVM Virtual Machine, BIOS 0.0.0 02/06/2015 | pstate: 604003c5 (nZCv DAIF +PAN -UAO -TCO -DIT -SSBS BTYPE=--) | pc : 0x48c00000 | lr : machine_kexec+0x190/0x200 | sp : ffff80000d36ba80 | x29: ffff80000d36ba80 x28: ffff000002dfc380 x27: 0000000000000000 | x26: 0000000000000000 x25: 0000000000000000 x24: 0000000000000000 | x23: ffff80000a9f7858 x22: 000000004c460000 x21: 0000000000000010 | x20: 00000000ad821000 x19: ffff000000aa0000 x18: 0000000000000006 | x17: ffff8000758a2000 x16: ffff800008000000 x15: ffff80000d36b568 | x14: 0000000000000000 x13: ffff80000d36b707 x12: ffff80000a9bf6e0 | x11: 00000000ffffdfff x10: ffff80000aaaf8e0 x9 : ffff80000815eff8 | x8 : 000000000002ffe8 x7 : c0000000ffffdfff x6 : 00000000000affa8 | x5 : 0000000000001fff x4 : 0000000000000001 x3 : ffff80000a263008 | x2 : ffff80000a9e20f8 x1 : 0000000048c00000 x0 : ffff000000aa0000 | Call trace: | 0x48c00000 | kernel_kexec+0x88/0x138 | __do_sys_reboot+0x108/0x288 | __arm64_sys_reboot+0x2c/0x40 | invoke_syscall+0x78/0x140 | el0_svc_common.constprop.0+0x4c/0x100 | do_el0_svc+0x34/0x80 | el0_svc+0x34/0x140 | el0t_64_sync_handler+0xf4/0x140 | el0t_64_sync+0x194/0x1c0 | Code: ???????? ???????? ???????? ???????? (00000000) | ---[ end trace 0000000000000000 ]--- | Kernel panic - not syncing: Oops - Undefined instruction: Fatal exception | Kernel Offset: disabled | CPU features: 0x002000,00050108,c8004203 | Memory Limit: none Signed-off-by: Mark Rutland Cc: Ard Biesheuvel Cc: James Morse Cc: Will Deacon Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20230127121256.2141368-1-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/traps.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 4c0caa589e12..0ccc063daccb 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -162,10 +162,8 @@ static void dump_kernel_instr(const char *lvl, struct pt_regs *regs) if (!bad) p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val); - else { - p += sprintf(p, "bad PC value"); - break; - } + else + p += sprintf(p, i == 0 ? "(????????) " : "???????? "); } printk("%sCode: %s\n", lvl, str); From 89ff30b9b72079a1c677500218bdc511eac246d4 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 11 Jan 2023 15:02:41 +0000 Subject: [PATCH 096/130] kselftest/arm64: Limit the maximum VL we try to set via ptrace When SVE was initially merged we chose to export the maximum VQ in the ABI as being 512, rather more than the architecturally supported maximum of 16. For the ptrace tests this results in us generating a lot of test cases and hence log output which are redundant since a system couldn't possibly support them. Instead only check values up to the current architectural limit, plus one more so that we're covering the constraining of higher vector lengths. This makes no practical difference to our test coverage, speeds things up on slower consoles and makes the output much more managable. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20230111-arm64-kselftest-ptrace-max-vl-v1-1-8167f41d1ad8@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/fp/sve-ptrace.c | 14 ++++++++++++-- tools/testing/selftests/arm64/fp/za-ptrace.c | 14 ++++++++++++-- 2 files changed, 24 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/arm64/fp/sve-ptrace.c b/tools/testing/selftests/arm64/fp/sve-ptrace.c index 8c4847977583..6d61992fe8a0 100644 --- a/tools/testing/selftests/arm64/fp/sve-ptrace.c +++ b/tools/testing/selftests/arm64/fp/sve-ptrace.c @@ -30,6 +30,16 @@ #define NT_ARM_SSVE 0x40b #endif +/* + * The architecture defines the maximum VQ as 16 but for extensibility + * the kernel specifies the SVE_VQ_MAX as 512 resulting in us running + * a *lot* more tests than are useful if we use it. Until the + * architecture is extended let's limit our coverage to what is + * currently allowed, plus one extra to ensure we cover constraining + * the VL as expected. + */ +#define TEST_VQ_MAX 17 + struct vec_type { const char *name; unsigned long hwcap_type; @@ -55,7 +65,7 @@ static const struct vec_type vec_types[] = { }, }; -#define VL_TESTS (((SVE_VQ_MAX - SVE_VQ_MIN) + 1) * 4) +#define VL_TESTS (((TEST_VQ_MAX - SVE_VQ_MIN) + 1) * 4) #define FLAG_TESTS 2 #define FPSIMD_TESTS 2 @@ -689,7 +699,7 @@ static int do_parent(pid_t child) } /* Step through every possible VQ */ - for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; vq++) { + for (vq = SVE_VQ_MIN; vq <= TEST_VQ_MAX; vq++) { vl = sve_vl_from_vq(vq); /* First, try to set this vector length */ diff --git a/tools/testing/selftests/arm64/fp/za-ptrace.c b/tools/testing/selftests/arm64/fp/za-ptrace.c index bf6158654056..ac27d87396fc 100644 --- a/tools/testing/selftests/arm64/fp/za-ptrace.c +++ b/tools/testing/selftests/arm64/fp/za-ptrace.c @@ -25,7 +25,17 @@ #define NT_ARM_ZA 0x40c #endif -#define EXPECTED_TESTS (((SVE_VQ_MAX - SVE_VQ_MIN) + 1) * 3) +/* + * The architecture defines the maximum VQ as 16 but for extensibility + * the kernel specifies the SVE_VQ_MAX as 512 resulting in us running + * a *lot* more tests than are useful if we use it. Until the + * architecture is extended let's limit our coverage to what is + * currently allowed, plus one extra to ensure we cover constraining + * the VL as expected. + */ +#define TEST_VQ_MAX 17 + +#define EXPECTED_TESTS (((TEST_VQ_MAX - SVE_VQ_MIN) + 1) * 3) static void fill_buf(char *buf, size_t size) { @@ -301,7 +311,7 @@ static int do_parent(pid_t child) ksft_print_msg("Parent is %d, child is %d\n", getpid(), child); /* Step through every possible VQ */ - for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; vq++) { + for (vq = SVE_VQ_MIN; vq <= TEST_VQ_MAX; vq++) { vl = sve_vl_from_vq(vq); /* First, try to set this vector length */ From a70f00e7f1a3132a2ff6f0516fe990e710ae8c1c Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Thu, 26 Jan 2023 22:39:32 -0800 Subject: [PATCH 097/130] Documentation: arm64: correct spelling Correct spelling problems for Documentation/arm64/ as reported by codespell. Signed-off-by: Randy Dunlap Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Cc: Jonathan Corbet Cc: linux-doc@vger.kernel.org Reviewed-by: Mukesh Ojha Link: https://lore.kernel.org/r/20230127064005.1558-3-rdunlap@infradead.org Signed-off-by: Catalin Marinas --- Documentation/arm64/booting.rst | 2 +- Documentation/arm64/elf_hwcaps.rst | 2 +- Documentation/arm64/sve.rst | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Documentation/arm64/booting.rst b/Documentation/arm64/booting.rst index 96fe10ec6c24..2734fb499cdc 100644 --- a/Documentation/arm64/booting.rst +++ b/Documentation/arm64/booting.rst @@ -223,7 +223,7 @@ Before jumping into the kernel, the following conditions must be met: For systems with a GICv3 interrupt controller to be used in v3 mode: - If EL3 is present: - - ICC_SRE_EL3.Enable (bit 3) must be initialiased to 0b1. + - ICC_SRE_EL3.Enable (bit 3) must be initialised to 0b1. - ICC_SRE_EL3.SRE (bit 0) must be initialised to 0b1. - ICC_CTLR_EL3.PMHE (bit 6) must be set to the same value across all CPUs the kernel is executing on, and must stay constant diff --git a/Documentation/arm64/elf_hwcaps.rst b/Documentation/arm64/elf_hwcaps.rst index 6fed84f935df..f9f353fc4897 100644 --- a/Documentation/arm64/elf_hwcaps.rst +++ b/Documentation/arm64/elf_hwcaps.rst @@ -14,7 +14,7 @@ Some hardware or software features are only available on some CPU implementations, and/or with certain kernel configurations, but have no architected discovery mechanism available to userspace code at EL0. The kernel exposes the presence of these features to userspace through a set -of flags called hwcaps, exposed in the auxilliary vector. +of flags called hwcaps, exposed in the auxiliary vector. Userspace software can test for features by acquiring the AT_HWCAP or AT_HWCAP2 entry of the auxiliary vector, and testing whether the relevant diff --git a/Documentation/arm64/sve.rst b/Documentation/arm64/sve.rst index c7a356bf4e8f..1b90a30382ac 100644 --- a/Documentation/arm64/sve.rst +++ b/Documentation/arm64/sve.rst @@ -175,7 +175,7 @@ the SVE instruction set architecture. When returning from a signal handler: * If there is no sve_context record in the signal frame, or if the record is - present but contains no register data as desribed in the previous section, + present but contains no register data as described in the previous section, then the SVE registers/bits become non-live and take unspecified values. * If sve_context is present in the signal frame and contains full register @@ -223,7 +223,7 @@ prctl(PR_SVE_SET_VL, unsigned long arg) Defer the requested vector length change until the next execve() performed by this thread. - The effect is equivalent to implicit exceution of the following + The effect is equivalent to implicit execution of the following call immediately after the next execve() (if any) by the thread: prctl(PR_SVE_SET_VL, arg & ~PR_SVE_SET_VL_ONEXEC) From 004fc58f917cfea5d7190139e3ed1b7a13e39c25 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 30 Jan 2023 17:44:57 +0530 Subject: [PATCH 098/130] arm64/mm: Intercept pfn changes in set_pte_at() Changing pfn on a user page table mapped entry, without first going through break-before-make (BBM) procedure is unsafe. This just updates set_pte_at() to intercept such changes, via an updated pgattr_change_is_safe(). This new check happens via __check_racy_pte_update(), which has now been renamed as __check_safe_pte_update(). Cc: Will Deacon Cc: Mark Rutland Cc: Andrew Morton Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Acked-by: Mark Rutland Signed-off-by: Anshuman Khandual Link: https://lore.kernel.org/r/20230130121457.1607675-1-anshuman.khandual@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/pgtable.h | 8 ++++++-- arch/arm64/mm/mmu.c | 8 ++++++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index b4bbeed80fb6..832c9c8fb58f 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -275,6 +275,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte) } extern void __sync_icache_dcache(pte_t pteval); +bool pgattr_change_is_safe(u64 old, u64 new); /* * PTE bits configuration in the presence of hardware Dirty Bit Management @@ -292,7 +293,7 @@ extern void __sync_icache_dcache(pte_t pteval); * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY) */ -static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep, +static inline void __check_safe_pte_update(struct mm_struct *mm, pte_t *ptep, pte_t pte) { pte_t old_pte; @@ -318,6 +319,9 @@ static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep, VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte), "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx", __func__, pte_val(old_pte), pte_val(pte)); + VM_WARN_ONCE(!pgattr_change_is_safe(pte_val(old_pte), pte_val(pte)), + "%s: unsafe attribute change: 0x%016llx -> 0x%016llx", + __func__, pte_val(old_pte), pte_val(pte)); } static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, @@ -346,7 +350,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, mte_sync_tags(old_pte, pte); } - __check_racy_pte_update(mm, ptep, pte); + __check_safe_pte_update(mm, ptep, pte); set_pte(ptep, pte); } diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 14c87e8d69d8..34d5f7c4c64e 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -133,7 +133,7 @@ static phys_addr_t __init early_pgtable_alloc(int shift) return phys; } -static bool pgattr_change_is_safe(u64 old, u64 new) +bool pgattr_change_is_safe(u64 old, u64 new) { /* * The following mapping attributes may be updated in live @@ -142,9 +142,13 @@ static bool pgattr_change_is_safe(u64 old, u64 new) pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG; /* creating or taking down mappings is always safe */ - if (old == 0 || new == 0) + if (!pte_valid(__pte(old)) || !pte_valid(__pte(new))) return true; + /* A live entry's pfn should not change */ + if (pte_pfn(__pte(old)) != pte_pfn(__pte(new))) + return false; + /* live contiguous mappings may not be manipulated at all */ if ((old | new) & PTE_CONT) return false; From b2ab432bcf65e6fa3ec3fef6dd08796404b009d0 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 30 Jan 2023 23:45:57 +0000 Subject: [PATCH 099/130] kselftest/arm64: Remove redundant _start labels from zt-test The newly added zt-test program copied the pattern from the other FP stress test programs of having a redundant _start label which is rejected by clang, as we did in a parallel series for the other tests remove the label so we can build with clang. No functional change. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20230130-arm64-fix-sme2-clang-v1-1-3ce81d99ea8f@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/fp/zt-test.S | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/testing/selftests/arm64/fp/zt-test.S b/tools/testing/selftests/arm64/fp/zt-test.S index 7ec90976cf5e..d63286397638 100644 --- a/tools/testing/selftests/arm64/fp/zt-test.S +++ b/tools/testing/selftests/arm64/fp/zt-test.S @@ -200,7 +200,6 @@ endfunction // Main program entry point .globl _start function _start -_start: mov x23, #0 // signal count mov w0, #SIGINT From 1e249c41ea43157fc69e1496e3b4feea999f107a Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 31 Jan 2023 10:58:08 +0000 Subject: [PATCH 100/130] arm64: unify asm-arch manipulation Assemblers will reject instructions not supported by a target architecture version, and so we must explicitly tell the assembler the latest architecture version for which we want to assemble instructions from. We've added a few AS_HAS_ARMV8_ definitions for this, in addition to an inconsistently named AS_HAS_PAC definition, from which arm64's top-level Makefile determines the architecture version that we intend to target, and generates the `asm-arch` variable. To make this a bit clearer and easier to maintain, this patch reworks the Makefile to determine asm-arch in a single if-else-endif chain. AS_HAS_PAC, which is defined when the assembler supports `-march=armv8.3-a`, is renamed to AS_HAS_ARMV8_3. As the logic for armv8.3-a is lifted out of the block handling pointer authentication, `asm-arch` may now be set to armv8.3-a regardless of whether support for pointer authentication is selected. This means that it will be possible to assemble armv8.3-a instructions even if we didn't intend to, but this is consistent with our handling of other architecture versions, and the compiler won't generate armv8.3-a instructions regardless. For the moment there's no need for an CONFIG_AS_HAS_ARMV8_1, as the code for LSE atomics and LDAPR use individual `.arch_extension` entries and do not require the baseline asm arch to be bumped to armv8.1-a. The other armv8.1-a features (e.g. PAN) do not require assembler support. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Reviewed-by: Ard Biesheuvel Reviewed-by: Mark Brown Cc: Will Deacon Link: https://lore.kernel.org/r/20230131105809.991288-2-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 4 ++-- arch/arm64/Makefile | 37 ++++++++++++++++++------------------- 2 files changed, 20 insertions(+), 21 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 03934808b2ed..be760ad7715d 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1818,7 +1818,7 @@ config ARM64_PTR_AUTH_KERNEL bool "Use pointer authentication for kernel" default y depends on ARM64_PTR_AUTH - depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC + depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_ARMV8_3 # Modern compilers insert a .note.gnu.property section note for PAC # which is only understood by binutils starting with version 2.33.1. depends on LD_IS_LLD || LD_VERSION >= 23301 || (CC_IS_GCC && GCC_VERSION < 90100) @@ -1843,7 +1843,7 @@ config CC_HAS_SIGN_RETURN_ADDRESS # GCC 7, 8 def_bool $(cc-option,-msign-return-address=all) -config AS_HAS_PAC +config AS_HAS_ARMV8_3 def_bool $(cc-option,-Wa$(comma)-march=armv8.3-a) config AS_HAS_CFI_NEGATE_RA_STATE diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index d62bd221828f..e176eb76345b 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -63,11 +63,6 @@ stack_protector_prepare: prepare0 include/generated/asm-offsets.h)) endif -ifeq ($(CONFIG_AS_HAS_ARMV8_2), y) -# make sure to pass the newest target architecture to -march. -asm-arch := armv8.2-a -endif - # Ensure that if the compiler supports branch protection we default it # off, this will be overridden if we are using branch protection. branch-prot-flags-y += $(call cc-option,-mbranch-protection=none) @@ -88,25 +83,29 @@ branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI) := -mbranch-protectio else branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET) := -mbranch-protection=$(PACRET-y) endif -# -march=armv8.3-a enables the non-nops instructions for PAC, to avoid the -# compiler to generate them and consequently to break the single image contract -# we pass it only to the assembler. This option is utilized only in case of non -# integrated assemblers. -ifeq ($(CONFIG_AS_HAS_PAC), y) -asm-arch := armv8.3-a -endif endif KBUILD_CFLAGS += $(branch-prot-flags-y) -ifeq ($(CONFIG_AS_HAS_ARMV8_4), y) -# make sure to pass the newest target architecture to -march. -asm-arch := armv8.4-a -endif - +# Tell the assembler to support instructions from the latest target +# architecture. +# +# For non-integrated assemblers we'll pass this on the command line, and for +# integrated assemblers we'll define ARM64_ASM_ARCH and ARM64_ASM_PREAMBLE for +# inline usage. +# +# We cannot pass the same arch flag to the compiler as this would allow it to +# freely generate instructions which are not supported by earlier architecture +# versions, which would prevent a single kernel image from working on earlier +# hardware. ifeq ($(CONFIG_AS_HAS_ARMV8_5), y) -# make sure to pass the newest target architecture to -march. -asm-arch := armv8.5-a + asm-arch := armv8.5-a +else ifeq ($(CONFIG_AS_HAS_ARMV8_4), y) + asm-arch := armv8.4-a +else ifeq ($(CONFIG_AS_HAS_ARMV8_3), y) + asm-arch := armv8.3-a +else ifeq ($(CONFIG_AS_HAS_ARMV8_2), y) + asm-arch := armv8.2-a endif ifdef asm-arch From c68cf5285e1896a2b725ec01a1351f08610165b8 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 31 Jan 2023 10:58:09 +0000 Subject: [PATCH 101/130] arm64: pauth: don't sign leaf functions Currently, when CONFIG_ARM64_PTR_AUTH_KERNEL=y (and CONFIG_UNWIND_PATCH_PAC_INTO_SCS=n), we enable pointer authentication for all functions, including leaf functions. This isn't necessary, and is unfortunate for a few reasons: * Any PACIASP instruction is implicitly a `BTI C` landing pad, and forcing the addition of a PACIASP in every function introduces a larger set of BTI gadgets than is necessary. * The PACIASP and AUTIASP instructions make leaf functions larger than necessary, bloating the kernel Image. For a defconfig v6.2-rc3 kernel, this appears to add ~64KiB relative to not signing leaf functions, which is unfortunate but not entirely onerous. * The PACIASP and AUTIASP instructions potentially make leaf functions more expensive in terms of performance and/or power. For many trivial leaf functions, this is clearly unnecessary, e.g. | : | d503233f paciasp | d53b4220 mrs x0, daif | d50323bf autiasp | d65f03c0 ret | : | d503233f paciasp | d50323bf autiasp | d65f03c0 ret | d503201f nop * When CONFIG_UNWIND_PATCH_PAC_INTO_SCS=y we disable pointer authentication for leaf functions, so clearly this is not functionally necessary, indicates we have an inconsistent threat model, and convolutes the Makefile logic. We've used pointer authentication in leaf functions since the introduction of in-kernel pointer authentication in commit: 74afda4016a7437e ("arm64: compile the kernel with ptrauth return address signing") ... but at the time we had no rationale for signing leaf functions. Subsequently, we considered avoiding signing leaf functions: https://lore.kernel.org/linux-arm-kernel/1586856741-26839-1-git-send-email-amit.kachhap@arm.com/ https://lore.kernel.org/linux-arm-kernel/1588149371-20310-1-git-send-email-amit.kachhap@arm.com/ ... however at the time we didn't have an abundance of reasons to avoid signing leaf functions as above (e.g. the BTI case), we had no hardware to make performance measurements, and it was reasoned that this gave some level of protection against a limited set of code-reuse gadgets which would fall through to a RET. We documented this in commit: 717b938e22f8dbf0 ("arm64: Document why we enable PAC support for leaf functions") Notably, this was before we supported any forward-edge CFI scheme (e.g. Arm BTI, or Clang CFI/kCFI), which would prevent jumping into the middle of a function. In addition, even with signing forced for leaf functions, AUTIASP may be placed before a number of instructions which might constitute such a gadget, e.g. | : | f9400022 ldr x2, [x1] | d503233f paciasp | d50323bf autiasp | f9408401 ldr x1, [x0, #264] | 720b005f tst w2, #0x200000 | b26b0022 orr x2, x1, #0x200000 | 926af821 and x1, x1, #0xffffffffffdfffff | 9a820021 csel x1, x1, x2, eq // eq = none | f9008401 str x1, [x0, #264] | d65f03c0 ret | : | 2a0003e3 mov w3, w0 | 9000ff42 adrp x2, ffff800009ffd000 | 9120e042 add x2, x2, #0x838 | 52800000 mov w0, #0x0 // #0 | d503233f paciasp | f000d041 adrp x1, ffff800009a20000 | d50323bf autiasp | 9102c021 add x1, x1, #0xb0 | f8635842 ldr x2, [x2, w3, uxtw #3] | f821685f str xzr, [x2, x1] | d65f03c0 ret | d503201f nop So generally, trying to use AUTIASP to detect such gadgetization is not robust, and this is dealt with far better by forward-edge CFI (which is designed to prevent such cases). We should bite the bullet and stop pretending that AUTIASP is a mitigation for such forward-edge gadgetization. For the above reasons, this patch has the kernel consistently sign non-leaf functions and avoid signing leaf functions. Considering a defconfig v6.2-rc3 kernel built with LLVM 15.0.6: * The vmlinux is ~43KiB smaller: | [mark@lakrids:~/src/linux]% ls -al vmlinux-* | -rwxr-xr-x 1 mark mark 338547808 Jan 25 17:17 vmlinux-after | -rwxr-xr-x 1 mark mark 338591472 Jan 25 17:22 vmlinux-before * The resulting Image is 64KiB smaller: | [mark@lakrids:~/src/linux]% ls -al Image-* | -rwxr-xr-x 1 mark mark 32702976 Jan 25 17:17 Image-after | -rwxr-xr-x 1 mark mark 32768512 Jan 25 17:22 Image-before * There are ~400 fewer BTI gadgets: | [mark@lakrids:~/src/linux]% usekorg 12.1.0 aarch64-linux-objdump -d vmlinux-before 2> /dev/null | grep -ow 'paciasp\|bti\sc\?' | sort | uniq -c | 1219 bti c | 61982 paciasp | [mark@lakrids:~/src/linux]% usekorg 12.1.0 aarch64-linux-objdump -d vmlinux-after 2> /dev/null | grep -ow 'paciasp\|bti\sc\?' | sort | uniq -c | 10099 bti c | 52699 paciasp Which is +8880 BTIs, and -9283 PACIASPs, for -403 unnecessary BTI gadgets. While this is small relative to the total, distinguishing the two cases will make it easier to analyse and reduce this set further in future. Signed-off-by: Mark Rutland Reviewed-by: Ard Biesheuvel Reviewed-by: Mark Brown Cc: Amit Daniel Kachhap Cc: Will Deacon Link: https://lore.kernel.org/r/20230131105809.991288-3-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/Makefile | 28 ++++++++-------------------- 1 file changed, 8 insertions(+), 20 deletions(-) diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index e176eb76345b..ab1f12b4f339 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -63,29 +63,17 @@ stack_protector_prepare: prepare0 include/generated/asm-offsets.h)) endif -# Ensure that if the compiler supports branch protection we default it -# off, this will be overridden if we are using branch protection. -branch-prot-flags-y += $(call cc-option,-mbranch-protection=none) - -ifeq ($(CONFIG_ARM64_PTR_AUTH_KERNEL),y) -branch-prot-flags-$(CONFIG_CC_HAS_SIGN_RETURN_ADDRESS) := -msign-return-address=all -# We enable additional protection for leaf functions as there is some -# narrow potential for ROP protection benefits and no substantial -# performance impact has been observed. -PACRET-y := pac-ret+leaf - -# Using a shadow call stack in leaf functions is too costly, so avoid PAC there -# as well when we may be patching PAC into SCS -PACRET-$(CONFIG_UNWIND_PATCH_PAC_INTO_SCS) := pac-ret - ifeq ($(CONFIG_ARM64_BTI_KERNEL),y) -branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI) := -mbranch-protection=$(PACRET-y)+bti + KBUILD_CFLAGS += -mbranch-protection=pac-ret+bti +else ifeq ($(CONFIG_ARM64_PTR_AUTH_KERNEL),y) + ifeq ($(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET),y) + KBUILD_CFLAGS += -mbranch-protection=pac-ret + else + KBUILD_CFLAGS += -msign-return-address=non-leaf + endif else -branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET) := -mbranch-protection=$(PACRET-y) + KBUILD_CFLAGS += $(call cc-option,-mbranch-protection=none) endif -endif - -KBUILD_CFLAGS += $(branch-prot-flags-y) # Tell the assembler to support instructions from the latest target # architecture. From 0e62ccb9598dae492588bef0453a059bb2bbbabe Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 30 Jan 2023 14:54:25 +0000 Subject: [PATCH 102/130] arm64: rename ARM64_HAS_SYSREG_GIC_CPUIF to ARM64_HAS_GIC_CPUIF_SYSREGS Subsequent patches will add more GIC-related cpucaps. When we do so, it would be nice to give them a consistent HAS_GIC_* prefix. In preparation for doing so, this patch renames the existing ARM64_HAS_SYSREG_GIC_CPUIF cap to ARM64_HAS_GIC_CPUIF_SYSREGS. The 'CPUIF_SYSREGS' suffix is chosen so that this will be ordered ahead of other ARM64_HAS_GIC_* definitions in subsequent patches. The cpucaps file was hand-modified; all other changes were scripted with: find . -type f -name '*.[chS]' -print0 | \ xargs -0 sed -i 's/ARM64_HAS_SYSREG_GIC_CPUIF/ARM64_HAS_GIC_CPUIF_SYSREGS/' There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Reviewed-by: Marc Zyngier Cc: Mark Brown Cc: Will Deacon Link: https://lore.kernel.org/r/20230130145429.903791-2-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/cpufeature.c | 2 +- arch/arm64/tools/cpucaps | 2 +- drivers/irqchip/irq-gic.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index a77315b338e6..ad2a1f5503f3 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2142,7 +2142,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { }, { .desc = "GIC system register CPU interface", - .capability = ARM64_HAS_SYSREG_GIC_CPUIF, + .capability = ARM64_HAS_GIC_CPUIF_SYSREGS, .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, .matches = has_useable_gicv3_cpuif, .sys_reg = SYS_ID_AA64PFR0_EL1, diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index a86ee376920a..373eb148498e 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -28,6 +28,7 @@ HAS_GENERIC_AUTH HAS_GENERIC_AUTH_ARCH_QARMA3 HAS_GENERIC_AUTH_ARCH_QARMA5 HAS_GENERIC_AUTH_IMP_DEF +HAS_GIC_CPUIF_SYSREGS HAS_IRQ_PRIO_MASKING HAS_LDAPR HAS_LSE_ATOMICS @@ -38,7 +39,6 @@ HAS_RAS_EXTN HAS_RNG HAS_SB HAS_STAGE2_FWB -HAS_SYSREG_GIC_CPUIF HAS_TIDCP1 HAS_TLB_RANGE HAS_VIRT_HOST_EXTN diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 210bc2f4d555..6ae697a3800d 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -54,7 +54,7 @@ static void gic_check_cpu_features(void) { - WARN_TAINT_ONCE(this_cpu_has_cap(ARM64_HAS_SYSREG_GIC_CPUIF), + WARN_TAINT_ONCE(this_cpu_has_cap(ARM64_HAS_GIC_CPUIF_SYSREGS), TAINT_CPU_OUT_OF_SPEC, "GICv3 system registers enabled, broken firmware!\n"); } From c888b7bd916c4d85e06a6c3e507d5d68b229518f Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 30 Jan 2023 14:54:26 +0000 Subject: [PATCH 103/130] arm64: rename ARM64_HAS_IRQ_PRIO_MASKING to ARM64_HAS_GIC_PRIO_MASKING Subsequent patches will add more GIC-related cpucaps. When we do so, it would be nice to give them a consistent HAS_GIC_* prefix. In preparation for doing so, this patch renames the existing ARM64_HAS_IRQ_PRIO_MASKING cap to ARM64_HAS_GIC_PRIO_MASKING. The cpucaps file was hand-modified; all other changes were scripted with: find . -type f -name '*.[chS]' -print0 | \ xargs -0 sed -i 's/ARM64_HAS_IRQ_PRIO_MASKING/ARM64_HAS_GIC_PRIO_MASKING/' There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Reviewed-by: Marc Zyngier Cc: Mark Brown Cc: Will Deacon Link: https://lore.kernel.org/r/20230130145429.903791-3-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpufeature.h | 2 +- arch/arm64/include/asm/irqflags.h | 10 +++++----- arch/arm64/include/asm/ptrace.h | 2 +- arch/arm64/kernel/cpufeature.c | 2 +- arch/arm64/kernel/entry.S | 4 ++-- arch/arm64/tools/cpucaps | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 03d1c9d7af82..c50928398e4b 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -806,7 +806,7 @@ static inline bool system_has_full_ptr_auth(void) static __always_inline bool system_uses_irq_prio_masking(void) { return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && - cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING); + cpus_have_const_cap(ARM64_HAS_GIC_PRIO_MASKING); } static inline bool system_supports_mte(void) diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h index b57b9b1e4344..f51653fb90e4 100644 --- a/arch/arm64/include/asm/irqflags.h +++ b/arch/arm64/include/asm/irqflags.h @@ -35,7 +35,7 @@ static inline void arch_local_irq_enable(void) asm volatile(ALTERNATIVE( "msr daifclr, #3 // arch_local_irq_enable", __msr_s(SYS_ICC_PMR_EL1, "%0"), - ARM64_HAS_IRQ_PRIO_MASKING) + ARM64_HAS_GIC_PRIO_MASKING) : : "r" ((unsigned long) GIC_PRIO_IRQON) : "memory"); @@ -54,7 +54,7 @@ static inline void arch_local_irq_disable(void) asm volatile(ALTERNATIVE( "msr daifset, #3 // arch_local_irq_disable", __msr_s(SYS_ICC_PMR_EL1, "%0"), - ARM64_HAS_IRQ_PRIO_MASKING) + ARM64_HAS_GIC_PRIO_MASKING) : : "r" ((unsigned long) GIC_PRIO_IRQOFF) : "memory"); @@ -70,7 +70,7 @@ static inline unsigned long arch_local_save_flags(void) asm volatile(ALTERNATIVE( "mrs %0, daif", __mrs_s("%0", SYS_ICC_PMR_EL1), - ARM64_HAS_IRQ_PRIO_MASKING) + ARM64_HAS_GIC_PRIO_MASKING) : "=&r" (flags) : : "memory"); @@ -85,7 +85,7 @@ static inline int arch_irqs_disabled_flags(unsigned long flags) asm volatile(ALTERNATIVE( "and %w0, %w1, #" __stringify(PSR_I_BIT), "eor %w0, %w1, #" __stringify(GIC_PRIO_IRQON), - ARM64_HAS_IRQ_PRIO_MASKING) + ARM64_HAS_GIC_PRIO_MASKING) : "=&r" (res) : "r" ((int) flags) : "memory"); @@ -122,7 +122,7 @@ static inline void arch_local_irq_restore(unsigned long flags) asm volatile(ALTERNATIVE( "msr daif, %0", __msr_s(SYS_ICC_PMR_EL1, "%0"), - ARM64_HAS_IRQ_PRIO_MASKING) + ARM64_HAS_GIC_PRIO_MASKING) : : "r" (flags) : "memory"); diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 41b332c054ab..47ec58031f11 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -194,7 +194,7 @@ struct pt_regs { u32 unused2; #endif u64 sdei_ttbr1; - /* Only valid when ARM64_HAS_IRQ_PRIO_MASKING is enabled. */ + /* Only valid when ARM64_HAS_GIC_PRIO_MASKING is enabled. */ u64 pmr_save; u64 stackframe[2]; diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index ad2a1f5503f3..afd547a5309c 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2534,7 +2534,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { * Depends on having GICv3 */ .desc = "IRQ priority masking", - .capability = ARM64_HAS_IRQ_PRIO_MASKING, + .capability = ARM64_HAS_GIC_PRIO_MASKING, .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, .matches = can_use_gic_priorities, .sys_reg = SYS_ID_AA64PFR0_EL1, diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 11cb99c4d298..e2d1d3d5de1d 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -312,7 +312,7 @@ alternative_else_nop_endif #ifdef CONFIG_ARM64_PSEUDO_NMI /* Save pmr */ -alternative_if ARM64_HAS_IRQ_PRIO_MASKING +alternative_if ARM64_HAS_GIC_PRIO_MASKING mrs_s x20, SYS_ICC_PMR_EL1 str x20, [sp, #S_PMR_SAVE] mov x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET @@ -337,7 +337,7 @@ alternative_else_nop_endif #ifdef CONFIG_ARM64_PSEUDO_NMI /* Restore pmr */ -alternative_if ARM64_HAS_IRQ_PRIO_MASKING +alternative_if ARM64_HAS_GIC_PRIO_MASKING ldr x20, [sp, #S_PMR_SAVE] msr_s SYS_ICC_PMR_EL1, x20 mrs_s x21, SYS_ICC_CTLR_EL1 diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index 373eb148498e..c993d43624b3 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -29,7 +29,7 @@ HAS_GENERIC_AUTH_ARCH_QARMA3 HAS_GENERIC_AUTH_ARCH_QARMA5 HAS_GENERIC_AUTH_IMP_DEF HAS_GIC_CPUIF_SYSREGS -HAS_IRQ_PRIO_MASKING +HAS_GIC_PRIO_MASKING HAS_LDAPR HAS_LSE_ATOMICS HAS_NO_FPSIMD From 4b43f1cd70df69c99aeee9758c16f4de913c7bad Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 30 Jan 2023 14:54:27 +0000 Subject: [PATCH 104/130] arm64: make ARM64_HAS_GIC_PRIO_MASKING depend on ARM64_HAS_GIC_CPUIF_SYSREGS Currently the arm64_cpu_capabilities structure for ARM64_HAS_GIC_PRIO_MASKING open-codes the same CPU field definitions as the arm64_cpu_capabilities structure for ARM64_HAS_GIC_CPUIF_SYSREGS, so that can_use_gic_priorities() can use has_useable_gicv3_cpuif(). This duplication isn't ideal for the legibility of the code, and sets a bad example for any ARM64_HAS_GIC_* definitions added by subsequent patches. Instead, have ARM64_HAS_GIC_PRIO_MASKING check for the ARM64_HAS_GIC_CPUIF_SYSREGS cpucap, and add a comment explaining why this is safe. Subsequent patches will use the same pattern where one cpucap depends upon another. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Reviewed-by: Marc Zyngier Cc: Mark Brown Cc: Will Deacon Link: https://lore.kernel.org/r/20230130145429.903791-4-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/cpufeature.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index afd547a5309c..515975f42d03 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2046,7 +2046,15 @@ early_param("irqchip.gicv3_pseudo_nmi", early_enable_pseudo_nmi); static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry, int scope) { - return enable_pseudo_nmi && has_useable_gicv3_cpuif(entry, scope); + /* + * ARM64_HAS_GIC_CPUIF_SYSREGS has a lower index, and is a boot CPU + * feature, so will be detected earlier. + */ + BUILD_BUG_ON(ARM64_HAS_GIC_PRIO_MASKING <= ARM64_HAS_GIC_CPUIF_SYSREGS); + if (!cpus_have_cap(ARM64_HAS_GIC_CPUIF_SYSREGS)) + return false; + + return enable_pseudo_nmi; } #endif @@ -2537,11 +2545,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .capability = ARM64_HAS_GIC_PRIO_MASKING, .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, .matches = can_use_gic_priorities, - .sys_reg = SYS_ID_AA64PFR0_EL1, - .field_pos = ID_AA64PFR0_EL1_GIC_SHIFT, - .field_width = 4, - .sign = FTR_UNSIGNED, - .min_field_value = 1, }, #endif #ifdef CONFIG_ARM64_E0PD From 8bf0a8048b155eebc05aa8896f4c378a4c538214 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 30 Jan 2023 14:54:28 +0000 Subject: [PATCH 105/130] arm64: add ARM64_HAS_GIC_PRIO_RELAXED_SYNC cpucap When Priority Mask Hint Enable (PMHE) == 0b1, the GIC may use the PMR value to determine whether to signal an IRQ to a PE, and consequently after a change to the PMR value, a DSB SY may be required to ensure that interrupts are signalled to a CPU in finite time. When PMHE == 0b0, interrupts are always signalled to the relevant PE, and all masking occurs locally, without requiring a DSB SY. Since commit: f226650494c6aa87 ("arm64: Relax ICC_PMR_EL1 accesses when ICC_CTLR_EL1.PMHE is clear") ... we handle this dynamically: in most cases a static key is used to determine whether to issue a DSB SY, but the entry code must read from ICC_CTLR_EL1 as static keys aren't accessible from plain assembly. It would be much nicer to use an alternative instruction sequence for the DSB, as this would avoid the need to read from ICC_CTLR_EL1 in the entry code, and for most other code this will result in simpler code generation with fewer instructions and fewer branches. This patch adds a new ARM64_HAS_GIC_PRIO_RELAXED_SYNC cpucap which is only set when ICC_CTLR_EL1.PMHE == 0b0 (and GIC priority masking is in use). This allows us to replace the existing users of the `gic_pmr_sync` static key with alternative sequences which default to a DSB SY and are relaxed to a NOP when PMHE is not in use. The entry assembly management of the PMR is slightly restructured to use a branch (rather than multiple NOPs) when priority masking is not in use. This is more in keeping with other alternatives in the entry assembly, and permits the use of a separate alternatives for the PMHE-dependent DSB SY (and removal of the conditional branch this currently requires). For consistency I've adjusted both the save and restore paths. According to bloat-o-meter, when building defconfig + CONFIG_ARM64_PSEUDO_NMI=y this shrinks the kernel text by ~4KiB: | add/remove: 4/2 grow/shrink: 42/310 up/down: 332/-5032 (-4700) The resulting vmlinux is ~66KiB smaller, though the resulting Image size is unchanged due to padding and alignment: | [mark@lakrids:~/src/linux]% ls -al vmlinux-* | -rwxr-xr-x 1 mark mark 137508344 Jan 17 14:11 vmlinux-after | -rwxr-xr-x 1 mark mark 137575440 Jan 17 13:49 vmlinux-before | [mark@lakrids:~/src/linux]% ls -al Image-* | -rw-r--r-- 1 mark mark 38777344 Jan 17 14:11 Image-after | -rw-r--r-- 1 mark mark 38777344 Jan 17 13:49 Image-before Prior to this patch we did not verify the state of ICC_CTLR_EL1.PMHE on secondary CPUs. As of this patch this is verified by the cpufeature code when using GIC priority masking (i.e. when using pseudo-NMIs). Note that since commit: 7e3a57fa6ca831fa ("arm64: Document ICC_CTLR_EL3.PMHE setting requirements") ... Documentation/arm64/booting.rst specifies: | - ICC_CTLR_EL3.PMHE (bit 6) must be set to the same value across | all CPUs the kernel is executing on, and must stay constant | for the lifetime of the kernel. ... so that should not adversely affect any compliant systems, and as we'll only check for the absense of PMHE when using pseudo-NMIs, this will only fire when such mismatch will adversely affect the system. Signed-off-by: Mark Rutland Reviewed-by: Marc Zyngier Cc: Mark Brown Cc: Will Deacon Link: https://lore.kernel.org/r/20230130145429.903791-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm/include/asm/arch_gicv3.h | 5 ++++ arch/arm64/include/asm/arch_gicv3.h | 5 ++++ arch/arm64/include/asm/barrier.h | 11 +++++---- arch/arm64/kernel/cpufeature.c | 36 +++++++++++++++++++++++++++++ arch/arm64/kernel/entry.S | 25 ++++++++++++-------- arch/arm64/kernel/image-vars.h | 2 -- arch/arm64/tools/cpucaps | 1 + drivers/irqchip/irq-gic-v3.c | 19 +-------------- 8 files changed, 71 insertions(+), 33 deletions(-) diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h index f82a819eb0db..311e83038bdb 100644 --- a/arch/arm/include/asm/arch_gicv3.h +++ b/arch/arm/include/asm/arch_gicv3.h @@ -252,5 +252,10 @@ static inline void gic_arch_enable_irqs(void) WARN_ON_ONCE(true); } +static inline bool gic_has_relaxed_pmr_sync(void) +{ + return false; +} + #endif /* !__ASSEMBLY__ */ #endif /* !__ASM_ARCH_GICV3_H */ diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index 48d4473e8eee..01281a5336cf 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -190,5 +190,10 @@ static inline void gic_arch_enable_irqs(void) asm volatile ("msr daifclr, #3" : : : "memory"); } +static inline bool gic_has_relaxed_pmr_sync(void) +{ + return cpus_have_cap(ARM64_HAS_GIC_PRIO_RELAXED_SYNC); +} + #endif /* __ASSEMBLY__ */ #endif /* __ASM_ARCH_GICV3_H */ diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 2cfc4245d2e2..3dd8982a9ce3 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -11,6 +11,8 @@ #include +#include + #define __nops(n) ".rept " #n "\nnop\n.endr\n" #define nops(n) asm volatile(__nops(n)) @@ -41,10 +43,11 @@ #ifdef CONFIG_ARM64_PSEUDO_NMI #define pmr_sync() \ do { \ - extern struct static_key_false gic_pmr_sync; \ - \ - if (static_branch_unlikely(&gic_pmr_sync)) \ - dsb(sy); \ + asm volatile( \ + ALTERNATIVE_CB("dsb sy", \ + ARM64_HAS_GIC_PRIO_RELAXED_SYNC, \ + alt_cb_patch_nops) \ + ); \ } while(0) #else #define pmr_sync() do {} while (0) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 515975f42d03..bdabfc98226a 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2056,6 +2056,34 @@ static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry, return enable_pseudo_nmi; } + +static bool has_gic_prio_relaxed_sync(const struct arm64_cpu_capabilities *entry, + int scope) +{ + /* + * If we're not using priority masking then we won't be poking PMR_EL1, + * and there's no need to relax synchronization of writes to it, and + * ICC_CTLR_EL1 might not be accessible and we must avoid reads from + * that. + * + * ARM64_HAS_GIC_PRIO_MASKING has a lower index, and is a boot CPU + * feature, so will be detected earlier. + */ + BUILD_BUG_ON(ARM64_HAS_GIC_PRIO_RELAXED_SYNC <= ARM64_HAS_GIC_PRIO_MASKING); + if (!cpus_have_cap(ARM64_HAS_GIC_PRIO_MASKING)) + return false; + + /* + * When Priority Mask Hint Enable (PMHE) == 0b0, PMR is not used as a + * hint for interrupt distribution, a DSB is not necessary when + * unmasking IRQs via PMR, and we can relax the barrier to a NOP. + * + * Linux itself doesn't use 1:N distribution, so has no need to + * set PMHE. The only reason to have it set is if EL3 requires it + * (and we can't change it). + */ + return (gic_read_ctlr() & ICC_CTLR_EL1_PMHE_MASK) == 0; +} #endif #ifdef CONFIG_ARM64_BTI @@ -2546,6 +2574,14 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, .matches = can_use_gic_priorities, }, + { + /* + * Depends on ARM64_HAS_GIC_PRIO_MASKING + */ + .capability = ARM64_HAS_GIC_PRIO_RELAXED_SYNC, + .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, + .matches = has_gic_prio_relaxed_sync, + }, #endif #ifdef CONFIG_ARM64_E0PD { diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index e2d1d3d5de1d..8427cdc0cfcb 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -311,13 +311,16 @@ alternative_else_nop_endif .endif #ifdef CONFIG_ARM64_PSEUDO_NMI - /* Save pmr */ -alternative_if ARM64_HAS_GIC_PRIO_MASKING +alternative_if_not ARM64_HAS_GIC_PRIO_MASKING + b .Lskip_pmr_save\@ +alternative_else_nop_endif + mrs_s x20, SYS_ICC_PMR_EL1 str x20, [sp, #S_PMR_SAVE] mov x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET msr_s SYS_ICC_PMR_EL1, x20 -alternative_else_nop_endif + +.Lskip_pmr_save\@: #endif /* @@ -336,15 +339,19 @@ alternative_else_nop_endif .endif #ifdef CONFIG_ARM64_PSEUDO_NMI - /* Restore pmr */ -alternative_if ARM64_HAS_GIC_PRIO_MASKING +alternative_if_not ARM64_HAS_GIC_PRIO_MASKING + b .Lskip_pmr_restore\@ +alternative_else_nop_endif + ldr x20, [sp, #S_PMR_SAVE] msr_s SYS_ICC_PMR_EL1, x20 - mrs_s x21, SYS_ICC_CTLR_EL1 - tbz x21, #6, .L__skip_pmr_sync\@ // Check for ICC_CTLR_EL1.PMHE - dsb sy // Ensure priority change is seen by redistributor -.L__skip_pmr_sync\@: + + /* Ensure priority change is seen by redistributor */ +alternative_if_not ARM64_HAS_GIC_PRIO_RELAXED_SYNC + dsb sy alternative_else_nop_endif + +.Lskip_pmr_restore\@: #endif ldp x21, x22, [sp, #S_PC] // load ELR, SPSR diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index d0e9bb5c91fc..97e750a35f70 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -67,9 +67,7 @@ KVM_NVHE_ALIAS(__hyp_stub_vectors); KVM_NVHE_ALIAS(vgic_v2_cpuif_trap); KVM_NVHE_ALIAS(vgic_v3_cpuif_trap); -/* Static key checked in pmr_sync(). */ #ifdef CONFIG_ARM64_PSEUDO_NMI -KVM_NVHE_ALIAS(gic_pmr_sync); /* Static key checked in GIC_PRIO_IRQOFF. */ KVM_NVHE_ALIAS(gic_nonsecure_priorities); #endif diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index c993d43624b3..10ce8f88f86b 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -30,6 +30,7 @@ HAS_GENERIC_AUTH_ARCH_QARMA5 HAS_GENERIC_AUTH_IMP_DEF HAS_GIC_CPUIF_SYSREGS HAS_GIC_PRIO_MASKING +HAS_GIC_PRIO_RELAXED_SYNC HAS_LDAPR HAS_LSE_ATOMICS HAS_NO_FPSIMD diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 997104d4338e..3779836737c8 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -89,15 +89,6 @@ static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); */ static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis); -/* - * Global static key controlling whether an update to PMR allowing more - * interrupts requires to be propagated to the redistributor (DSB SY). - * And this needs to be exported for modules to be able to enable - * interrupts... - */ -DEFINE_STATIC_KEY_FALSE(gic_pmr_sync); -EXPORT_SYMBOL(gic_pmr_sync); - DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities); EXPORT_SYMBOL(gic_nonsecure_priorities); @@ -1768,16 +1759,8 @@ static void gic_enable_nmi_support(void) for (i = 0; i < gic_data.ppi_nr; i++) refcount_set(&ppi_nmi_refs[i], 0); - /* - * Linux itself doesn't use 1:N distribution, so has no need to - * set PMHE. The only reason to have it set is if EL3 requires it - * (and we can't change it). - */ - if (gic_read_ctlr() & ICC_CTLR_EL1_PMHE_MASK) - static_branch_enable(&gic_pmr_sync); - pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n", - static_branch_unlikely(&gic_pmr_sync) ? "forced" : "relaxed"); + gic_has_relaxed_pmr_sync() ? "relaxed" : "forced"); /* * How priority values are used by the GIC depends on two things: From a5f61cc636f48bdf09450dba72c0bc914f9eed2f Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 30 Jan 2023 14:54:29 +0000 Subject: [PATCH 106/130] arm64: irqflags: use alternative branches for pseudo-NMI logic Due to the way we use alternatives in the irqflags code, even when CONFIG_ARM64_PSEUDO_NMI=n, we generate unused alternative code for pseudo-NMI management. This patch reworks the irqflags code to remove the redundant code when CONFIG_ARM64_PSEUDO_NMI=n, which benefits the more common case, and will permit further rework of our DAIF management (e.g. in preparation for ARMv8.8-A's NMI feature). Prior to this patch a defconfig kernel has hundreds of redundant instructions to access ICC_PMR_EL1 (which should only need to be manipulated in setup code), which this patch removes: | [mark@lakrids:~/src/linux]% usekorg 12.1.0 aarch64-linux-objdump -d vmlinux-before-defconfig | grep icc_pmr_el1 | wc -l | 885 | [mark@lakrids:~/src/linux]% usekorg 12.1.0 aarch64-linux-objdump -d vmlinux-after-defconfig | grep icc_pmr_el1 | wc -l | 5 Those instructions alone account for more than 3KiB of kernel text, and will be associated with additional alt_instr entries, padding and branches, etc. These redundant instructions exist because we use alternative sequences for to choose between DAIF / PMR management in irqflags.h, and even when CONFIG_ARM64_PSEUDO_NMI=n, those alternative sequences will generate the code for PMR management, along with alt_instr entries. We use alternatives here as this was necessary to ensure that we never encounter a mismatched local_irq_save() ... local_irq_restore() sequence in the middle of patching, which was possible to see if we used static keys to choose between DAIF and PMR management. Since commit: 21fb26bfb01ffe0d ("arm64: alternatives: add alternative_has_feature_*()") ... we have a mechanism to use alternatives similarly to static keys, allowing us to write the bulk of the logic in C code while also being able to rely on all sites being patched in one go, and avoiding a mismatched mismatched local_irq_save() ... local_irq_restore() sequence during patching. This patch rewrites arm64's local_irq_*() functions to use alternative branches. This allows for the pseudo-NMI code to be entirely elided when CONFIG_ARM64_PSEUDO_NMI=n, making a defconfig Image 64KiB smaller, and not affectint the size of an Image with CONFIG_ARM64_PSEUDO_NMI=y: | [mark@lakrids:~/src/linux]% ls -al vmlinux-* | -rwxr-xr-x 1 mark mark 137473432 Jan 18 11:11 vmlinux-after-defconfig | -rwxr-xr-x 1 mark mark 137918776 Jan 18 11:15 vmlinux-after-pnmi | -rwxr-xr-x 1 mark mark 137380152 Jan 18 11:03 vmlinux-before-defconfig | -rwxr-xr-x 1 mark mark 137523704 Jan 18 11:08 vmlinux-before-pnmi | [mark@lakrids:~/src/linux]% ls -al Image-* | -rw-r--r-- 1 mark mark 38646272 Jan 18 11:11 Image-after-defconfig | -rw-r--r-- 1 mark mark 38777344 Jan 18 11:14 Image-after-pnmi | -rw-r--r-- 1 mark mark 38711808 Jan 18 11:03 Image-before-defconfig | -rw-r--r-- 1 mark mark 38777344 Jan 18 11:08 Image-before-pnmi Some sensitive code depends on being run with interrupts enabled or with interrupts disabled, and so when enabling or disabling interrupts we must ensure that the compiler does not move such code around the actual enable/disable. Before this patch, that was ensured by the combined asm volatile blocks having memory clobbers (and any sensitive code either being asm volatile, or touching memory). This patch consistently uses explicit barrier() operations before and after the enable/disable, which allows us to use the usual sysreg accessors (which are asm volatile) to manipulate the interrupt masks. The use of pmr_sync() is pulled within this critical section for consistency. Signed-off-by: Mark Rutland Reviewed-by: Marc Zyngier Cc: Mark Brown Cc: Will Deacon Link: https://lore.kernel.org/r/20230130145429.903791-6-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/irqflags.h | 209 ++++++++++++++++++++---------- 1 file changed, 141 insertions(+), 68 deletions(-) diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h index f51653fb90e4..e0f5f6b73edd 100644 --- a/arch/arm64/include/asm/irqflags.h +++ b/arch/arm64/include/asm/irqflags.h @@ -21,43 +21,77 @@ * exceptions should be unmasked. */ -/* - * CPU interrupt mask handling. - */ -static inline void arch_local_irq_enable(void) +static __always_inline bool __irqflags_uses_pmr(void) { - if (system_has_prio_mask_debugging()) { - u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1); + return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && + alternative_has_feature_unlikely(ARM64_HAS_GIC_PRIO_MASKING); +} +static __always_inline void __daif_local_irq_enable(void) +{ + barrier(); + asm volatile("msr daifclr, #3"); + barrier(); +} + +static __always_inline void __pmr_local_irq_enable(void) +{ + if (IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING)) { + u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1); WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF); } - asm volatile(ALTERNATIVE( - "msr daifclr, #3 // arch_local_irq_enable", - __msr_s(SYS_ICC_PMR_EL1, "%0"), - ARM64_HAS_GIC_PRIO_MASKING) - : - : "r" ((unsigned long) GIC_PRIO_IRQON) - : "memory"); - + barrier(); + write_sysreg_s(GIC_PRIO_IRQON, SYS_ICC_PMR_EL1); pmr_sync(); + barrier(); +} + +static inline void arch_local_irq_enable(void) +{ + if (__irqflags_uses_pmr()) { + __pmr_local_irq_enable(); + } else { + __daif_local_irq_enable(); + } +} + +static __always_inline void __daif_local_irq_disable(void) +{ + barrier(); + asm volatile("msr daifset, #3"); + barrier(); +} + +static __always_inline void __pmr_local_irq_disable(void) +{ + if (IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING)) { + u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1); + WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF); + } + + barrier(); + write_sysreg_s(GIC_PRIO_IRQOFF, SYS_ICC_PMR_EL1); + barrier(); } static inline void arch_local_irq_disable(void) { - if (system_has_prio_mask_debugging()) { - u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1); - - WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF); + if (__irqflags_uses_pmr()) { + __pmr_local_irq_disable(); + } else { + __daif_local_irq_disable(); } +} - asm volatile(ALTERNATIVE( - "msr daifset, #3 // arch_local_irq_disable", - __msr_s(SYS_ICC_PMR_EL1, "%0"), - ARM64_HAS_GIC_PRIO_MASKING) - : - : "r" ((unsigned long) GIC_PRIO_IRQOFF) - : "memory"); +static __always_inline unsigned long __daif_local_save_flags(void) +{ + return read_sysreg(daif); +} + +static __always_inline unsigned long __pmr_local_save_flags(void) +{ + return read_sysreg_s(SYS_ICC_PMR_EL1); } /* @@ -65,69 +99,108 @@ static inline void arch_local_irq_disable(void) */ static inline unsigned long arch_local_save_flags(void) { - unsigned long flags; + if (__irqflags_uses_pmr()) { + return __pmr_local_save_flags(); + } else { + return __daif_local_save_flags(); + } +} - asm volatile(ALTERNATIVE( - "mrs %0, daif", - __mrs_s("%0", SYS_ICC_PMR_EL1), - ARM64_HAS_GIC_PRIO_MASKING) - : "=&r" (flags) - : - : "memory"); +static __always_inline bool __daif_irqs_disabled_flags(unsigned long flags) +{ + return flags & PSR_I_BIT; +} + +static __always_inline bool __pmr_irqs_disabled_flags(unsigned long flags) +{ + return flags != GIC_PRIO_IRQON; +} + +static inline bool arch_irqs_disabled_flags(unsigned long flags) +{ + if (__irqflags_uses_pmr()) { + return __pmr_irqs_disabled_flags(flags); + } else { + return __daif_irqs_disabled_flags(flags); + } +} + +static __always_inline bool __daif_irqs_disabled(void) +{ + return __daif_irqs_disabled_flags(__daif_local_save_flags()); +} + +static __always_inline bool __pmr_irqs_disabled(void) +{ + return __pmr_irqs_disabled_flags(__pmr_local_save_flags()); +} + +static inline bool arch_irqs_disabled(void) +{ + if (__irqflags_uses_pmr()) { + return __pmr_irqs_disabled(); + } else { + return __daif_irqs_disabled(); + } +} + +static __always_inline unsigned long __daif_local_irq_save(void) +{ + unsigned long flags = __daif_local_save_flags(); + + __daif_local_irq_disable(); return flags; } -static inline int arch_irqs_disabled_flags(unsigned long flags) +static __always_inline unsigned long __pmr_local_irq_save(void) { - int res; - - asm volatile(ALTERNATIVE( - "and %w0, %w1, #" __stringify(PSR_I_BIT), - "eor %w0, %w1, #" __stringify(GIC_PRIO_IRQON), - ARM64_HAS_GIC_PRIO_MASKING) - : "=&r" (res) - : "r" ((int) flags) - : "memory"); - - return res; -} - -static inline int arch_irqs_disabled(void) -{ - return arch_irqs_disabled_flags(arch_local_save_flags()); -} - -static inline unsigned long arch_local_irq_save(void) -{ - unsigned long flags; - - flags = arch_local_save_flags(); + unsigned long flags = __pmr_local_save_flags(); /* * There are too many states with IRQs disabled, just keep the current * state if interrupts are already disabled/masked. */ - if (!arch_irqs_disabled_flags(flags)) - arch_local_irq_disable(); + if (!__pmr_irqs_disabled_flags(flags)) + __pmr_local_irq_disable(); return flags; } +static inline unsigned long arch_local_irq_save(void) +{ + if (__irqflags_uses_pmr()) { + return __pmr_local_irq_save(); + } else { + return __daif_local_irq_save(); + } +} + +static __always_inline void __daif_local_irq_restore(unsigned long flags) +{ + barrier(); + write_sysreg(flags, daif); + barrier(); +} + +static __always_inline void __pmr_local_irq_restore(unsigned long flags) +{ + barrier(); + write_sysreg_s(flags, SYS_ICC_PMR_EL1); + pmr_sync(); + barrier(); +} + /* * restore saved IRQ state */ static inline void arch_local_irq_restore(unsigned long flags) { - asm volatile(ALTERNATIVE( - "msr daif, %0", - __msr_s(SYS_ICC_PMR_EL1, "%0"), - ARM64_HAS_GIC_PRIO_MASKING) - : - : "r" (flags) - : "memory"); - - pmr_sync(); + if (__irqflags_uses_pmr()) { + __pmr_local_irq_restore(flags); + } else { + __daif_local_irq_restore(flags); + } } #endif /* __ASM_IRQFLAGS_H */ From 4365eec8190c237aea723e6ac9529789215558e1 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 31 Jan 2023 18:28:05 +0000 Subject: [PATCH 107/130] kselftest/arm64: Don't require FA64 for streaming SVE tests During early development a dependedncy was added on having FA64 available so we could use the full FPSIMD register set in the signal handler. Subsequently the ABI was finialised so the handler is run with streaming mode disabled meaning this is redundant but the dependency was never removed, do so now. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20230131-arm64-kselfetest-ssve-fa64-v1-1-f418efcc2b60@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/signal/testcases/ssve_regs.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c b/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c index cd738265cdcd..d0eceea92073 100644 --- a/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c +++ b/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c @@ -121,12 +121,7 @@ static int sme_regs(struct tdescr *td, siginfo_t *si, ucontext_t *uc) struct tdescr tde = { .name = "Streaming SVE registers", .descr = "Check that we get the right Streaming SVE registers reported", - /* - * We shouldn't require FA64 but things like memset() used in the - * helpers might use unsupported instructions so for now disable - * the test unless we've got the full instruction set. - */ - .feats_required = FEAT_SME | FEAT_SME_FA64, + .feats_required = FEAT_SME, .timeout = 3, .init = sme_get_vls, .run = sme_regs, From 5f389238534ac8ca4ee3ab12eeb89d3984d303a1 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 31 Jan 2023 22:56:34 +0000 Subject: [PATCH 108/130] kselftest/arm64: Fix enumeration of systems without 128 bit SME The current signal handling tests for SME do not account for the fact that unlike SVE all SME vector lengths are optional so we can't guarantee that we will encounter the minimum possible VL, they will hang enumerating VLs on such systems. Abort enumeration when we find the lowest VL. Fixes: 4963aeb35a9e ("kselftest/arm64: signal: Add SME signal handling tests") Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20230131-arm64-kselftest-sig-sme-no-128-v1-1-d47c13dc8e1e@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/signal/testcases/ssve_regs.c | 4 ++++ tools/testing/selftests/arm64/signal/testcases/za_regs.c | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c b/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c index d0eceea92073..3d37daafcff5 100644 --- a/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c +++ b/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c @@ -34,6 +34,10 @@ static bool sme_get_vls(struct tdescr *td) vl &= PR_SME_VL_LEN_MASK; + /* Did we find the lowest supported VL? */ + if (vq < sve_vq_from_vl(vl)) + break; + /* Skip missing VLs */ vq = sve_vq_from_vl(vl); diff --git a/tools/testing/selftests/arm64/signal/testcases/za_regs.c b/tools/testing/selftests/arm64/signal/testcases/za_regs.c index ea45acb115d5..174ad6656696 100644 --- a/tools/testing/selftests/arm64/signal/testcases/za_regs.c +++ b/tools/testing/selftests/arm64/signal/testcases/za_regs.c @@ -34,6 +34,10 @@ static bool sme_get_vls(struct tdescr *td) vl &= PR_SME_VL_LEN_MASK; + /* Did we find the lowest supported VL? */ + if (vq < sve_vq_from_vl(vl)) + break; + /* Skip missing VLs */ vq = sve_vq_from_vl(vl); From a7db82f18cd3d85ea8ef70fca5946b441187ed6d Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 31 Jan 2023 22:56:35 +0000 Subject: [PATCH 109/130] kselftest/arm64: Fix enumeration of systems without 128 bit SME for SSVE+ZA The current signal handling tests for SME do not account for the fact that unlike SVE all SME vector lengths are optional so we can't guarantee that we will encounter the minimum possible VL, they will hang enumerating VLs on such systems. Abort enumeration when we find the lowest VL in the newly added ssve_za_regs test. Fixes: bc69da5ff087 ("kselftest/arm64: Verify simultaneous SSVE and ZA context generation") Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20230131-arm64-kselftest-sig-sme-no-128-v1-2-d47c13dc8e1e@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c b/tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c index 954a21f6121a..1f62621794d5 100644 --- a/tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c +++ b/tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c @@ -34,6 +34,10 @@ static bool sme_get_vls(struct tdescr *td) vl &= PR_SME_VL_LEN_MASK; + /* Did we find the lowest supported VL? */ + if (vq < sve_vq_from_vl(vl)) + break; + /* Skip missing VLs */ vq = sve_vq_from_vl(vl); From a55d1425fb2f4000a415164ba08712de9c1b7755 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 31 Jan 2023 00:18:43 +0000 Subject: [PATCH 110/130] arm64/sysreg: Allow enumerations to be declared as signed or unsigned Many of our enumerations follow a standard scheme where the values can be treated as signed however there are some where the value must be treated as signed and others that are simple enumerations where there is no clear ordering to the values. Provide new field types SignedEnum and UnsignedEnum which allows the signedness to be specified in the sysreg definition and emit a REG_FIELD_SIGNED define for these which is a boolean corresponding to our current FTR_UNSIGNED and FTR_SIGNED macros. Existing Enums will need to be converted, since these do not have a define generated anyone wishing to use the sign of one of these will need to explicitly annotate that field so nothing should start going wrong by default. Acked-by: Mark Rutland Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221207-arm64-sysreg-helpers-v4-1-25b6b3fb9d18@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/tools/gen-sysreg.awk | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/arch/arm64/tools/gen-sysreg.awk b/arch/arm64/tools/gen-sysreg.awk index c350164a3955..7f27d66a17e1 100755 --- a/arch/arm64/tools/gen-sysreg.awk +++ b/arch/arm64/tools/gen-sysreg.awk @@ -44,6 +44,11 @@ function define_field(reg, field, msb, lsb) { define(reg "_" field "_WIDTH", msb - lsb + 1) } +# Print a field _SIGNED definition for a field +function define_field_sign(reg, field, sign) { + define(reg "_" field "_SIGNED", sign) +} + # Parse a "[:]" string into the global variables @msb and @lsb function parse_bitdef(reg, field, bitdef, _bits) { @@ -233,6 +238,30 @@ END { next } +/^SignedEnum/ { + change_block("Enum<", "Sysreg", "Enum") + expect_fields(3) + field = $3 + parse_bitdef(reg, field, $2) + + define_field(reg, field, msb, lsb) + define_field_sign(reg, field, "true") + + next +} + +/^UnsignedEnum/ { + change_block("Enum<", "Sysreg", "Enum") + expect_fields(3) + field = $3 + parse_bitdef(reg, field, $2) + + define_field(reg, field, msb, lsb) + define_field_sign(reg, field, "false") + + next +} + /^Enum/ { change_block("Enum", "Sysreg", "Enum") expect_fields(3) From c3ac60aa1cfeee4f3976392a9d1d32e79e78d207 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 31 Jan 2023 00:18:44 +0000 Subject: [PATCH 111/130] arm64/sysreg: Initial annotation of signed ID registers We currently annotate a few bitfields as signed in hwcaps, update all of these to be SignedEnum in the sysreg generation. Further signed bitfields can be done incrementally, this is the minimum required for the conversion of the hwcaps to use token pasting to simplify their declaration. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221207-arm64-sysreg-helpers-v4-2-25b6b3fb9d18@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/tools/sysreg | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index a01161c02c63..adc3fcde2777 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -846,12 +846,12 @@ Enum 27:24 GIC 0b0001 IMP 0b0010 V4P1 EndEnum -Enum 23:20 AdvSIMD +SignedEnum 23:20 AdvSIMD 0b0000 IMP 0b0001 FP16 0b1111 NI EndEnum -Enum 19:16 FP +SignedEnum 19:16 FP 0b0000 IMP 0b0001 FP16 0b1111 NI From ad16d4cf0b4f88b54085036102476588ea0e7626 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 31 Jan 2023 00:18:45 +0000 Subject: [PATCH 112/130] arm64/sysreg: Initial unsigned annotations for ID registers In order to allow the simplification of way we declare hwcaps annotate most of the unsigned fields in the identification registers as such. This is not a complete annotation, it does cover all the cases where we already annotate signedness of the field in the hwcaps and some others which I happened to look at and seemed clear but there will be more and nothing outside the identification registers was even looked at. Other fields can be annotated as incrementally as people have the time and need to do so. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221207-arm64-sysreg-helpers-v4-3-25b6b3fb9d18@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/tools/sysreg | 377 ++++++++++++++++++++-------------------- 1 file changed, 189 insertions(+), 188 deletions(-) diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index adc3fcde2777..3afae6325484 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -48,26 +48,26 @@ Sysreg ID_PFR0_EL1 3 0 0 1 0 Res0 63:32 -Enum 31:28 RAS +UnsignedEnum 31:28 RAS 0b0000 NI 0b0001 RAS 0b0010 RASv1p1 EndEnum -Enum 27:24 DIT +UnsignedEnum 27:24 DIT 0b0000 NI 0b0001 IMP EndEnum -Enum 23:20 AMU +UnsignedEnum 23:20 AMU 0b0000 NI 0b0001 AMUv1 0b0010 AMUv1p1 EndEnum -Enum 19:16 CSV2 +UnsignedEnum 19:16 CSV2 0b0000 UNDISCLOSED 0b0001 IMP 0b0010 CSV2p1 EndEnum -Enum 15:12 State3 +UnsignedEnum 15:12 State3 0b0000 NI 0b0001 IMP EndEnum @@ -76,12 +76,12 @@ Enum 11:8 State2 0b0001 NO_CV 0b0010 CV EndEnum -Enum 7:4 State1 +UnsignedEnum 7:4 State1 0b0000 NI 0b0001 THUMB 0b0010 THUMB2 EndEnum -Enum 3:0 State0 +UnsignedEnum 3:0 State0 0b0000 NI 0b0001 IMP EndEnum @@ -89,12 +89,12 @@ EndSysreg Sysreg ID_PFR1_EL1 3 0 0 1 1 Res0 63:32 -Enum 31:28 GIC +UnsignedEnum 31:28 GIC 0b0000 NI 0b0001 GICv3 0b0010 GICv4p1 EndEnum -Enum 27:24 Virt_frac +UnsignedEnum 27:24 Virt_frac 0b0000 NI 0b0001 IMP EndEnum @@ -103,16 +103,16 @@ Enum 23:20 Sec_frac 0b0001 WALK_DISABLE 0b0010 SECURE_MEMORY EndEnum -Enum 19:16 GenTimer +UnsignedEnum 19:16 GenTimer 0b0000 NI 0b0001 IMP 0b0010 ECV EndEnum -Enum 15:12 Virtualization +UnsignedEnum 15:12 Virtualization 0b0000 NI 0b0001 IMP EndEnum -Enum 11:8 MProgMod +UnsignedEnum 11:8 MProgMod 0b0000 NI 0b0001 IMP EndEnum @@ -121,7 +121,7 @@ Enum 7:4 Security 0b0001 EL3 0b0001 NSACR_RFR EndEnum -Enum 3:0 ProgMod +UnsignedEnum 3:0 ProgMod 0b0000 NI 0b0001 IMP EndEnum @@ -129,11 +129,11 @@ EndSysreg Sysreg ID_DFR0_EL1 3 0 0 1 2 Res0 63:32 -Enum 31:28 TraceFilt +UnsignedEnum 31:28 TraceFilt 0b0000 NI 0b0001 IMP EndEnum -Enum 27:24 PerfMon +UnsignedEnum 27:24 PerfMon 0b0000 NI 0b0001 PMUv1 0b0010 PMUv2 @@ -192,7 +192,7 @@ Enum 31:28 InnerShr 0b0001 HW 0b1111 IGNORED EndEnum -Enum 27:24 FCSE +UnsignedEnum 27:24 FCSE 0b0000 NI 0b0001 IMP EndEnum @@ -369,7 +369,7 @@ Enum 27:24 Divide 0b0001 xDIV_T32 0b0010 xDIV_A32 EndEnum -Enum 23:20 Debug +UnsignedEnum 23:20 Debug 0b0000 NI 0b0001 IMP EndEnum @@ -380,19 +380,19 @@ Enum 19:16 Coproc 0b0011 MRRC 0b0100 MRRC2 EndEnum -Enum 15:12 CmpBranch +UnsignedEnum 15:12 CmpBranch 0b0000 NI 0b0001 IMP EndEnum -Enum 11:8 BitField +UnsignedEnum 11:8 BitField 0b0000 NI 0b0001 IMP EndEnum -Enum 7:4 BitCount +UnsignedEnum 7:4 BitCount 0b0000 NI 0b0001 IMP EndEnum -Enum 3:0 Swap +UnsignedEnum 3:0 Swap 0b0000 NI 0b0001 IMP EndEnum @@ -562,33 +562,33 @@ EndSysreg Sysreg ID_ISAR5_EL1 3 0 0 2 5 Res0 63:32 -Enum 31:28 VCMA +UnsignedEnum 31:28 VCMA 0b0000 NI 0b0001 IMP EndEnum -Enum 27:24 RDM +UnsignedEnum 27:24 RDM 0b0000 NI 0b0001 IMP EndEnum Res0 23:20 -Enum 19:16 CRC32 +UnsignedEnum 19:16 CRC32 0b0000 NI 0b0001 IMP EndEnum -Enum 15:12 SHA2 +UnsignedEnum 15:12 SHA2 0b0000 NI 0b0001 IMP EndEnum -Enum 11:8 SHA1 +UnsignedEnum 11:8 SHA1 0b0000 NI 0b0001 IMP EndEnum -Enum 7:4 AES +UnsignedEnum 7:4 AES 0b0000 NI 0b0001 IMP 0b0010 VMULL EndEnum -Enum 3:0 SEVL +UnsignedEnum 3:0 SEVL 0b0000 NI 0b0001 IMP EndEnum @@ -596,31 +596,31 @@ EndSysreg Sysreg ID_ISAR6_EL1 3 0 0 2 7 Res0 63:28 -Enum 27:24 I8MM +UnsignedEnum 27:24 I8MM 0b0000 NI 0b0001 IMP EndEnum -Enum 23:20 BF16 +UnsignedEnum 23:20 BF16 0b0000 NI 0b0001 IMP EndEnum -Enum 19:16 SPECRES +UnsignedEnum 19:16 SPECRES 0b0000 NI 0b0001 IMP EndEnum -Enum 15:12 SB +UnsignedEnum 15:12 SB 0b0000 NI 0b0001 IMP EndEnum -Enum 11:8 FHM +UnsignedEnum 11:8 FHM 0b0000 NI 0b0001 IMP EndEnum -Enum 7:4 DP +UnsignedEnum 7:4 DP 0b0000 NI 0b0001 IMP EndEnum -Enum 3:0 JSCVT +UnsignedEnum 3:0 JSCVT 0b0000 NI 0b0001 IMP EndEnum @@ -628,37 +628,37 @@ EndSysreg Sysreg ID_MMFR4_EL1 3 0 0 2 6 Res0 63:32 -Enum 31:28 EVT +UnsignedEnum 31:28 EVT 0b0000 NI 0b0001 NO_TLBIS 0b0010 TLBIS EndEnum -Enum 27:24 CCIDX +UnsignedEnum 27:24 CCIDX 0b0000 NI 0b0001 IMP EndEnum -Enum 23:20 LSM +UnsignedEnum 23:20 LSM 0b0000 NI 0b0001 IMP EndEnum -Enum 19:16 HPDS +UnsignedEnum 19:16 HPDS 0b0000 NI 0b0001 AA32HPD 0b0010 HPDS2 EndEnum -Enum 15:12 CnP +UnsignedEnum 15:12 CnP 0b0000 NI 0b0001 IMP EndEnum -Enum 11:8 XNX +UnsignedEnum 11:8 XNX 0b0000 NI 0b0001 IMP EndEnum -Enum 7:4 AC2 +UnsignedEnum 7:4 AC2 0b0000 NI 0b0001 IMP EndEnum -Enum 3:0 SpecSEI +UnsignedEnum 3:0 SpecSEI 0b0000 NI 0b0001 IMP EndEnum @@ -666,32 +666,32 @@ EndSysreg Sysreg MVFR0_EL1 3 0 0 3 0 Res0 63:32 -Enum 31:28 FPRound +UnsignedEnum 31:28 FPRound 0b0000 NI 0b0001 IMP EndEnum -Enum 27:24 FPShVec +UnsignedEnum 27:24 FPShVec 0b0000 NI 0b0001 IMP EndEnum -Enum 23:20 FPSqrt +UnsignedEnum 23:20 FPSqrt 0b0000 NI 0b0001 IMP EndEnum -Enum 19:16 FPDivide +UnsignedEnum 19:16 FPDivide 0b0000 NI 0b0001 IMP EndEnum -Enum 15:12 FPTrap +UnsignedEnum 15:12 FPTrap 0b0000 NI 0b0001 IMP EndEnum -Enum 11:8 FPDP +UnsignedEnum 11:8 FPDP 0b0000 NI 0b0001 VFPv2 0b0010 VFPv3 EndEnum -Enum 7:4 FPSP +UnsignedEnum 7:4 FPSP 0b0000 NI 0b0001 VFPv2 0b0010 VFPv3 @@ -705,38 +705,38 @@ EndSysreg Sysreg MVFR1_EL1 3 0 0 3 1 Res0 63:32 -Enum 31:28 SIMDFMAC +UnsignedEnum 31:28 SIMDFMAC 0b0000 NI 0b0001 IMP EndEnum -Enum 27:24 FPHP +UnsignedEnum 27:24 FPHP 0b0000 NI 0b0001 FPHP 0b0010 FPHP_CONV 0b0011 FP16 EndEnum -Enum 23:20 SIMDHP +UnsignedEnum 23:20 SIMDHP 0b0000 NI 0b0001 SIMDHP 0b0010 SIMDHP_FLOAT EndEnum -Enum 19:16 SIMDSP +UnsignedEnum 19:16 SIMDSP 0b0000 NI 0b0001 IMP EndEnum -Enum 15:12 SIMDInt +UnsignedEnum 15:12 SIMDInt 0b0000 NI 0b0001 IMP EndEnum -Enum 11:8 SIMDLS +UnsignedEnum 11:8 SIMDLS 0b0000 NI 0b0001 IMP EndEnum -Enum 7:4 FPDNaN +UnsignedEnum 7:4 FPDNaN 0b0000 NI 0b0001 IMP EndEnum -Enum 3:0 FPFtZ +UnsignedEnum 3:0 FPFtZ 0b0000 NI 0b0001 IMP EndEnum @@ -761,15 +761,15 @@ EndSysreg Sysreg ID_PFR2_EL1 3 0 0 3 4 Res0 63:12 -Enum 11:8 RAS_frac +UnsignedEnum 11:8 RAS_frac 0b0000 NI 0b0001 RASv1p1 EndEnum -Enum 7:4 SSBS +UnsignedEnum 7:4 SSBS 0b0000 NI 0b0001 IMP EndEnum -Enum 3:0 CSV3 +UnsignedEnum 3:0 CSV3 0b0000 NI 0b0001 IMP EndEnum @@ -777,7 +777,7 @@ EndSysreg Sysreg ID_DFR1_EL1 3 0 0 3 5 Res0 63:8 -Enum 7:4 HPMN0 +UnsignedEnum 7:4 HPMN0 0b0000 NI 0b0001 IMP EndEnum @@ -790,58 +790,58 @@ EndSysreg Sysreg ID_MMFR5_EL1 3 0 0 3 6 Res0 63:8 -Enum 7:4 nTLBPA +UnsignedEnum 7:4 nTLBPA 0b0000 NI 0b0001 IMP EndEnum -Enum 3:0 ETS +UnsignedEnum 3:0 ETS 0b0000 NI 0b0001 IMP EndEnum EndSysreg Sysreg ID_AA64PFR0_EL1 3 0 0 4 0 -Enum 63:60 CSV3 +UnsignedEnum 63:60 CSV3 0b0000 NI 0b0001 IMP EndEnum -Enum 59:56 CSV2 +UnsignedEnum 59:56 CSV2 0b0000 NI 0b0001 IMP 0b0010 CSV2_2 0b0011 CSV2_3 EndEnum -Enum 55:52 RME +UnsignedEnum 55:52 RME 0b0000 NI 0b0001 IMP EndEnum -Enum 51:48 DIT +UnsignedEnum 51:48 DIT 0b0000 NI 0b0001 IMP EndEnum -Enum 47:44 AMU +UnsignedEnum 47:44 AMU 0b0000 NI 0b0001 IMP 0b0010 V1P1 EndEnum -Enum 43:40 MPAM +UnsignedEnum 43:40 MPAM 0b0000 0 0b0001 1 EndEnum -Enum 39:36 SEL2 +UnsignedEnum 39:36 SEL2 0b0000 NI 0b0001 IMP EndEnum -Enum 35:32 SVE +UnsignedEnum 35:32 SVE 0b0000 NI 0b0001 IMP EndEnum -Enum 31:28 RAS +UnsignedEnum 31:28 RAS 0b0000 NI 0b0001 IMP 0b0010 V1P1 EndEnum -Enum 27:24 GIC +UnsignedEnum 27:24 GIC 0b0000 NI 0b0001 IMP 0b0010 V4P1 @@ -856,21 +856,21 @@ SignedEnum 19:16 FP 0b0001 FP16 0b1111 NI EndEnum -Enum 15:12 EL3 +UnsignedEnum 15:12 EL3 0b0000 NI 0b0001 IMP 0b0010 AARCH32 EndEnum -Enum 11:8 EL2 +UnsignedEnum 11:8 EL2 0b0000 NI 0b0001 IMP 0b0010 AARCH32 EndEnum -Enum 7:4 EL1 +UnsignedEnum 7:4 EL1 0b0001 IMP 0b0010 AARCH32 EndEnum -Enum 3:0 EL0 +UnsignedEnum 3:0 EL0 0b0001 IMP 0b0010 AARCH32 EndEnum @@ -878,45 +878,45 @@ EndSysreg Sysreg ID_AA64PFR1_EL1 3 0 0 4 1 Res0 63:40 -Enum 39:36 NMI +UnsignedEnum 39:36 NMI 0b0000 NI 0b0001 IMP EndEnum -Enum 35:32 CSV2_frac +UnsignedEnum 35:32 CSV2_frac 0b0000 NI 0b0001 CSV2_1p1 0b0010 CSV2_1p2 EndEnum -Enum 31:28 RNDR_trap +UnsignedEnum 31:28 RNDR_trap 0b0000 NI 0b0001 IMP EndEnum -Enum 27:24 SME +UnsignedEnum 27:24 SME 0b0000 NI 0b0001 IMP 0b0010 SME2 EndEnum Res0 23:20 -Enum 19:16 MPAM_frac +UnsignedEnum 19:16 MPAM_frac 0b0000 MINOR_0 0b0001 MINOR_1 EndEnum -Enum 15:12 RAS_frac +UnsignedEnum 15:12 RAS_frac 0b0000 NI 0b0001 RASv1p1 EndEnum -Enum 11:8 MTE +UnsignedEnum 11:8 MTE 0b0000 NI 0b0001 IMP 0b0010 MTE2 0b0011 MTE3 EndEnum -Enum 7:4 SSBS +UnsignedEnum 7:4 SSBS 0b0000 NI 0b0001 IMP 0b0010 SSBS2 EndEnum -Enum 3:0 BT +UnsignedEnum 3:0 BT 0b0000 NI 0b0001 IMP EndEnum @@ -924,45 +924,45 @@ EndSysreg Sysreg ID_AA64ZFR0_EL1 3 0 0 4 4 Res0 63:60 -Enum 59:56 F64MM +UnsignedEnum 59:56 F64MM 0b0000 NI 0b0001 IMP EndEnum -Enum 55:52 F32MM +UnsignedEnum 55:52 F32MM 0b0000 NI 0b0001 IMP EndEnum Res0 51:48 -Enum 47:44 I8MM +UnsignedEnum 47:44 I8MM 0b0000 NI 0b0001 IMP EndEnum -Enum 43:40 SM4 +UnsignedEnum 43:40 SM4 0b0000 NI 0b0001 IMP EndEnum Res0 39:36 -Enum 35:32 SHA3 +UnsignedEnum 35:32 SHA3 0b0000 NI 0b0001 IMP EndEnum Res0 31:24 -Enum 23:20 BF16 +UnsignedEnum 23:20 BF16 0b0000 NI 0b0001 IMP 0b0010 EBF16 EndEnum -Enum 19:16 BitPerm +UnsignedEnum 19:16 BitPerm 0b0000 NI 0b0001 IMP EndEnum Res0 15:8 -Enum 7:4 AES +UnsignedEnum 7:4 AES 0b0000 NI 0b0001 IMP 0b0010 PMULL128 EndEnum -Enum 3:0 SVEver +UnsignedEnum 3:0 SVEver 0b0000 IMP 0b0001 SVE2 0b0010 SVE2p1 @@ -970,55 +970,56 @@ EndEnum EndSysreg Sysreg ID_AA64SMFR0_EL1 3 0 0 4 5 -Enum 63 FA64 +UnsignedEnum 63 FA64 0b0 NI 0b1 IMP EndEnum Res0 62:60 -Enum 59:56 SMEver +UnsignedEnum 59:56 SMEver 0b0000 SME 0b0001 SME2 0b0010 SME2p1 + 0b0000 IMP EndEnum -Enum 55:52 I16I64 +UnsignedEnum 55:52 I16I64 0b0000 NI 0b1111 IMP EndEnum Res0 51:49 -Enum 48 F64F64 +UnsignedEnum 48 F64F64 0b0 NI 0b1 IMP EndEnum -Enum 47:44 I16I32 +UnsignedEnum 47:44 I16I32 0b0000 NI 0b0101 IMP EndEnum -Enum 43 B16B16 +UnsignedEnum 43 B16B16 0b0 NI 0b1 IMP EndEnum -Enum 42 F16F16 +UnsignedEnum 42 F16F16 0b0 NI 0b1 IMP EndEnum Res0 41:40 -Enum 39:36 I8I32 +UnsignedEnum 39:36 I8I32 0b0000 NI 0b1111 IMP EndEnum -Enum 35 F16F32 +UnsignedEnum 35 F16F32 0b0 NI 0b1 IMP EndEnum -Enum 34 B16F32 +UnsignedEnum 34 B16F32 0b0 NI 0b1 IMP EndEnum -Enum 33 BI32I32 +UnsignedEnum 33 BI32I32 0b0 NI 0b1 IMP EndEnum -Enum 32 F32F32 +UnsignedEnum 32 F32F32 0b0 NI 0b1 IMP EndEnum @@ -1031,7 +1032,7 @@ Enum 63:60 HPMN0 0b0001 DEF EndEnum Res0 59:56 -Enum 55:52 BRBE +UnsignedEnum 55:52 BRBE 0b0000 NI 0b0001 IMP 0b0010 BRBE_V1P1 @@ -1041,19 +1042,19 @@ Enum 51:48 MTPMU 0b0001 IMP 0b1111 NI EndEnum -Enum 47:44 TraceBuffer +UnsignedEnum 47:44 TraceBuffer 0b0000 NI 0b0001 IMP EndEnum -Enum 43:40 TraceFilt +UnsignedEnum 43:40 TraceFilt 0b0000 NI 0b0001 IMP EndEnum -Enum 39:36 DoubleLock +UnsignedEnum 39:36 DoubleLock 0b0000 IMP 0b1111 NI EndEnum -Enum 35:32 PMSVer +UnsignedEnum 35:32 PMSVer 0b0000 NI 0b0001 IMP 0b0010 V1P1 @@ -1065,7 +1066,7 @@ Res0 27:24 Field 23:20 WRPs Res0 19:16 Field 15:12 BRPs -Enum 11:8 PMUVer +UnsignedEnum 11:8 PMUVer 0b0000 NI 0b0001 IMP 0b0100 V3P1 @@ -1075,11 +1076,11 @@ Enum 11:8 PMUVer 0b1000 V3P8 0b1111 IMP_DEF EndEnum -Enum 7:4 TraceVer +UnsignedEnum 7:4 TraceVer 0b0000 NI 0b0001 IMP EndEnum -Enum 3:0 DebugVer +UnsignedEnum 3:0 DebugVer 0b0110 IMP 0b0111 VHE 0b1000 V8P2 @@ -1109,66 +1110,66 @@ Res0 63:0 EndSysreg Sysreg ID_AA64ISAR0_EL1 3 0 0 6 0 -Enum 63:60 RNDR +UnsignedEnum 63:60 RNDR 0b0000 NI 0b0001 IMP EndEnum -Enum 59:56 TLB +UnsignedEnum 59:56 TLB 0b0000 NI 0b0001 OS 0b0010 RANGE EndEnum -Enum 55:52 TS +UnsignedEnum 55:52 TS 0b0000 NI 0b0001 FLAGM 0b0010 FLAGM2 EndEnum -Enum 51:48 FHM +UnsignedEnum 51:48 FHM 0b0000 NI 0b0001 IMP EndEnum -Enum 47:44 DP +UnsignedEnum 47:44 DP 0b0000 NI 0b0001 IMP EndEnum -Enum 43:40 SM4 +UnsignedEnum 43:40 SM4 0b0000 NI 0b0001 IMP EndEnum -Enum 39:36 SM3 +UnsignedEnum 39:36 SM3 0b0000 NI 0b0001 IMP EndEnum -Enum 35:32 SHA3 +UnsignedEnum 35:32 SHA3 0b0000 NI 0b0001 IMP EndEnum -Enum 31:28 RDM +UnsignedEnum 31:28 RDM 0b0000 NI 0b0001 IMP EndEnum -Enum 27:24 TME +UnsignedEnum 27:24 TME 0b0000 NI 0b0001 IMP EndEnum -Enum 23:20 ATOMIC +UnsignedEnum 23:20 ATOMIC 0b0000 NI 0b0010 IMP EndEnum -Enum 19:16 CRC32 +UnsignedEnum 19:16 CRC32 0b0000 NI 0b0001 IMP EndEnum -Enum 15:12 SHA2 +UnsignedEnum 15:12 SHA2 0b0000 NI 0b0001 SHA256 0b0010 SHA512 EndEnum -Enum 11:8 SHA1 +UnsignedEnum 11:8 SHA1 0b0000 NI 0b0001 IMP EndEnum -Enum 7:4 AES +UnsignedEnum 7:4 AES 0b0000 NI 0b0001 AES 0b0010 PMULL @@ -1177,63 +1178,63 @@ Res0 3:0 EndSysreg Sysreg ID_AA64ISAR1_EL1 3 0 0 6 1 -Enum 63:60 LS64 +UnsignedEnum 63:60 LS64 0b0000 NI 0b0001 LS64 0b0010 LS64_V 0b0011 LS64_ACCDATA EndEnum -Enum 59:56 XS +UnsignedEnum 59:56 XS 0b0000 NI 0b0001 IMP EndEnum -Enum 55:52 I8MM +UnsignedEnum 55:52 I8MM 0b0000 NI 0b0001 IMP EndEnum -Enum 51:48 DGH +UnsignedEnum 51:48 DGH 0b0000 NI 0b0001 IMP EndEnum -Enum 47:44 BF16 +UnsignedEnum 47:44 BF16 0b0000 NI 0b0001 IMP 0b0010 EBF16 EndEnum -Enum 43:40 SPECRES +UnsignedEnum 43:40 SPECRES 0b0000 NI 0b0001 IMP EndEnum -Enum 39:36 SB +UnsignedEnum 39:36 SB 0b0000 NI 0b0001 IMP EndEnum -Enum 35:32 FRINTTS +UnsignedEnum 35:32 FRINTTS 0b0000 NI 0b0001 IMP EndEnum -Enum 31:28 GPI +UnsignedEnum 31:28 GPI 0b0000 NI 0b0001 IMP EndEnum -Enum 27:24 GPA +UnsignedEnum 27:24 GPA 0b0000 NI 0b0001 IMP EndEnum -Enum 23:20 LRCPC +UnsignedEnum 23:20 LRCPC 0b0000 NI 0b0001 IMP 0b0010 LRCPC2 EndEnum -Enum 19:16 FCMA +UnsignedEnum 19:16 FCMA 0b0000 NI 0b0001 IMP EndEnum -Enum 15:12 JSCVT +UnsignedEnum 15:12 JSCVT 0b0000 NI 0b0001 IMP EndEnum -Enum 11:8 API +UnsignedEnum 11:8 API 0b0000 NI 0b0001 PAuth 0b0010 EPAC @@ -1241,7 +1242,7 @@ Enum 11:8 API 0b0100 FPAC 0b0101 FPACCOMBINE EndEnum -Enum 7:4 APA +UnsignedEnum 7:4 APA 0b0000 NI 0b0001 PAuth 0b0010 EPAC @@ -1249,7 +1250,7 @@ Enum 7:4 APA 0b0100 FPAC 0b0101 FPACCOMBINE EndEnum -Enum 3:0 DPB +UnsignedEnum 3:0 DPB 0b0000 NI 0b0001 IMP 0b0010 DPB2 @@ -1258,28 +1259,28 @@ EndSysreg Sysreg ID_AA64ISAR2_EL1 3 0 0 6 2 Res0 63:56 -Enum 55:52 CSSC +UnsignedEnum 55:52 CSSC 0b0000 NI 0b0001 IMP EndEnum -Enum 51:48 RPRFM +UnsignedEnum 51:48 RPRFM 0b0000 NI 0b0001 IMP EndEnum Res0 47:28 -Enum 27:24 PAC_frac +UnsignedEnum 27:24 PAC_frac 0b0000 NI 0b0001 IMP EndEnum -Enum 23:20 BC +UnsignedEnum 23:20 BC 0b0000 NI 0b0001 IMP EndEnum -Enum 19:16 MOPS +UnsignedEnum 19:16 MOPS 0b0000 NI 0b0001 IMP EndEnum -Enum 15:12 APA3 +UnsignedEnum 15:12 APA3 0b0000 NI 0b0001 PAuth 0b0010 EPAC @@ -1287,32 +1288,32 @@ Enum 15:12 APA3 0b0100 FPAC 0b0101 FPACCOMBINE EndEnum -Enum 11:8 GPA3 +UnsignedEnum 11:8 GPA3 0b0000 NI 0b0001 IMP EndEnum -Enum 7:4 RPRES +UnsignedEnum 7:4 RPRES 0b0000 NI 0b0001 IMP EndEnum -Enum 3:0 WFxT +UnsignedEnum 3:0 WFxT 0b0000 NI 0b0010 IMP EndEnum EndSysreg Sysreg ID_AA64MMFR0_EL1 3 0 0 7 0 -Enum 63:60 ECV +UnsignedEnum 63:60 ECV 0b0000 NI 0b0001 IMP 0b0010 CNTPOFF EndEnum -Enum 59:56 FGT +UnsignedEnum 59:56 FGT 0b0000 NI 0b0001 IMP EndEnum Res0 55:48 -Enum 47:44 EXS +UnsignedEnum 47:44 EXS 0b0000 NI 0b0001 IMP EndEnum @@ -1347,15 +1348,15 @@ Enum 23:20 TGRAN16 0b0001 IMP 0b0010 52_BIT EndEnum -Enum 19:16 BIGENDEL0 +UnsignedEnum 19:16 BIGENDEL0 0b0000 NI 0b0001 IMP EndEnum -Enum 15:12 SNSMEM +UnsignedEnum 15:12 SNSMEM 0b0000 NI 0b0001 IMP EndEnum -Enum 11:8 BIGEND +UnsignedEnum 11:8 BIGEND 0b0000 NI 0b0001 IMP EndEnum @@ -1375,62 +1376,62 @@ EndEnum EndSysreg Sysreg ID_AA64MMFR1_EL1 3 0 0 7 1 -Enum 63:60 ECBHB +UnsignedEnum 63:60 ECBHB 0b0000 NI 0b0001 IMP EndEnum -Enum 59:56 CMOW +UnsignedEnum 59:56 CMOW 0b0000 NI 0b0001 IMP EndEnum -Enum 55:52 TIDCP1 +UnsignedEnum 55:52 TIDCP1 0b0000 NI 0b0001 IMP EndEnum -Enum 51:48 nTLBPA +UnsignedEnum 51:48 nTLBPA 0b0000 NI 0b0001 IMP EndEnum -Enum 47:44 AFP +UnsignedEnum 47:44 AFP 0b0000 NI 0b0001 IMP EndEnum -Enum 43:40 HCX +UnsignedEnum 43:40 HCX 0b0000 NI 0b0001 IMP EndEnum -Enum 39:36 ETS +UnsignedEnum 39:36 ETS 0b0000 NI 0b0001 IMP EndEnum -Enum 35:32 TWED +UnsignedEnum 35:32 TWED 0b0000 NI 0b0001 IMP EndEnum -Enum 31:28 XNX +UnsignedEnum 31:28 XNX 0b0000 NI 0b0001 IMP EndEnum -Enum 27:24 SpecSEI +UnsignedEnum 27:24 SpecSEI 0b0000 NI 0b0001 IMP EndEnum -Enum 23:20 PAN +UnsignedEnum 23:20 PAN 0b0000 NI 0b0001 IMP 0b0010 PAN2 0b0011 PAN3 EndEnum -Enum 19:16 LO +UnsignedEnum 19:16 LO 0b0000 NI 0b0001 IMP EndEnum -Enum 15:12 HPDS +UnsignedEnum 15:12 HPDS 0b0000 NI 0b0001 IMP 0b0010 HPDS2 EndEnum -Enum 11:8 VH +UnsignedEnum 11:8 VH 0b0000 NI 0b0001 IMP EndEnum @@ -1438,7 +1439,7 @@ Enum 7:4 VMIDBits 0b0000 8 0b0010 16 EndEnum -Enum 3:0 HAFDBS +UnsignedEnum 3:0 HAFDBS 0b0000 NI 0b0001 AF 0b0010 DBM @@ -1446,26 +1447,26 @@ EndEnum EndSysreg Sysreg ID_AA64MMFR2_EL1 3 0 0 7 2 -Enum 63:60 E0PD +UnsignedEnum 63:60 E0PD 0b0000 NI 0b0001 IMP EndEnum -Enum 59:56 EVT +UnsignedEnum 59:56 EVT 0b0000 NI 0b0001 IMP 0b0010 TTLBxS EndEnum -Enum 55:52 BBM +UnsignedEnum 55:52 BBM 0b0000 0 0b0001 1 0b0010 2 EndEnum -Enum 51:48 TTL +UnsignedEnum 51:48 TTL 0b0000 NI 0b0001 IMP EndEnum Res0 47:44 -Enum 43:40 FWB +UnsignedEnum 43:40 FWB 0b0000 NI 0b0001 IMP EndEnum @@ -1473,7 +1474,7 @@ Enum 39:36 IDS 0b0000 0x0 0b0001 0x18 EndEnum -Enum 35:32 AT +UnsignedEnum 35:32 AT 0b0000 NI 0b0001 IMP EndEnum @@ -1481,7 +1482,7 @@ Enum 31:28 ST 0b0000 39 0b0001 48_47 EndEnum -Enum 27:24 NV +UnsignedEnum 27:24 NV 0b0000 NI 0b0001 IMP 0b0010 NV2 @@ -1494,19 +1495,19 @@ Enum 19:16 VARange 0b0000 48 0b0001 52 EndEnum -Enum 15:12 IESB +UnsignedEnum 15:12 IESB 0b0000 NI 0b0001 IMP EndEnum -Enum 11:8 LSM +UnsignedEnum 11:8 LSM 0b0000 NI 0b0001 IMP EndEnum -Enum 7:4 UAO +UnsignedEnum 7:4 UAO 0b0000 NI 0b0001 IMP EndEnum -Enum 3:0 CnP +UnsignedEnum 3:0 CnP 0b0000 NI 0b0001 IMP EndEnum From 82c5acefc9cb8478413610acf5904cec73ddd343 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 31 Jan 2023 00:18:46 +0000 Subject: [PATCH 113/130] arm64/cpufeature: Always use symbolic name for feature value in hwcaps Our table of hwcaps sometimes uses the defined constant to specify the enumeration value they are attempting to match but in some cases an unadorned number is used. In preparation for using helper macros to to specify the hwcaps less verbosely replace the magic numbers with their constants, this will hopefully make the conversion to helper macros easier to review. There should be no functional effect from this change, a check of the generate .rodata showed no differences. Acked-by: Mark Rutland Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221207-arm64-sysreg-helpers-v4-4-25b6b3fb9d18@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/cpufeature.c | 106 ++++++++++++++++----------------- 1 file changed, 53 insertions(+), 53 deletions(-) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 2d061e998a69..bb7423c6e6fe 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2779,40 +2779,40 @@ static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = { #endif static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_PMULL), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AES), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SHA1_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA1), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SHA2_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA2), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SHA2_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_SHA512), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_CRC32_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_CRC32), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_ATOMIC_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ATOMICS), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_RDM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDRDM), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SHA3_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA3), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SM3_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM3), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SM4_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM4), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_DP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDDP), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_FHM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDFHM), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_TS_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_TS_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_RNDR_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RNG), - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_FP_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP), - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_FP_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP), - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_AdvSIMD_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD), - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_AdvSIMD_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP), - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_DIT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT), - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DCPOP), - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_DCPODP), - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_JSCVT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_JSCVT), - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_FCMA_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FCMA), - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_LRCPC_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_LRCPC), - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_LRCPC_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ILRCPC), - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_FRINTTS_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FRINT), - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_SB_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SB), - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_BF16), - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_EBF16), - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DGH_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DGH), - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_I8MM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_I8MM), - HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_EL1_AT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR0_EL1_AES_PMULL, CAP_HWCAP, KERNEL_HWCAP_PMULL), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR0_EL1_AES_AES, CAP_HWCAP, KERNEL_HWCAP_AES), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SHA1_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR0_EL1_SHA1_IMP, CAP_HWCAP, KERNEL_HWCAP_SHA1), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SHA2_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR0_EL1_SHA2_SHA256, CAP_HWCAP, KERNEL_HWCAP_SHA2), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SHA2_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR0_EL1_SHA2_SHA512, CAP_HWCAP, KERNEL_HWCAP_SHA512), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_CRC32_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR0_EL1_CRC32_IMP, CAP_HWCAP, KERNEL_HWCAP_CRC32), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_ATOMIC_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR0_EL1_ATOMIC_IMP, CAP_HWCAP, KERNEL_HWCAP_ATOMICS), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_RDM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR0_EL1_RDM_IMP, CAP_HWCAP, KERNEL_HWCAP_ASIMDRDM), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SHA3_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR0_EL1_SHA3_IMP, CAP_HWCAP, KERNEL_HWCAP_SHA3), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SM3_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR0_EL1_SM3_IMP, CAP_HWCAP, KERNEL_HWCAP_SM3), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SM4_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR0_EL1_SM4_IMP, CAP_HWCAP, KERNEL_HWCAP_SM4), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_DP_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR0_EL1_DP_IMP, CAP_HWCAP, KERNEL_HWCAP_ASIMDDP), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_FHM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR0_EL1_FHM_IMP, CAP_HWCAP, KERNEL_HWCAP_ASIMDFHM), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_TS_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR0_EL1_TS_FLAGM, CAP_HWCAP, KERNEL_HWCAP_FLAGM), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_TS_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR0_EL1_TS_FLAGM2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_RNDR_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR0_EL1_RNDR_IMP, CAP_HWCAP, KERNEL_HWCAP_RNG), + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_FP_SHIFT, 4, FTR_SIGNED, ID_AA64PFR0_EL1_FP_IMP, CAP_HWCAP, KERNEL_HWCAP_FP), + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_FP_SHIFT, 4, FTR_SIGNED, ID_AA64PFR0_EL1_FP_FP16, CAP_HWCAP, KERNEL_HWCAP_FPHP), + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_AdvSIMD_SHIFT, 4, FTR_SIGNED, ID_AA64PFR0_EL1_AdvSIMD_IMP, CAP_HWCAP, KERNEL_HWCAP_ASIMD), + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_AdvSIMD_SHIFT, 4, FTR_SIGNED, ID_AA64PFR0_EL1_AdvSIMD_FP16, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP), + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_DIT_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR0_EL1_DIT_IMP, CAP_HWCAP, KERNEL_HWCAP_DIT), + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_DPB_IMP, CAP_HWCAP, KERNEL_HWCAP_DCPOP), + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_DPB_DPB2, CAP_HWCAP, KERNEL_HWCAP_DCPODP), + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_JSCVT_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_JSCVT_IMP, CAP_HWCAP, KERNEL_HWCAP_JSCVT), + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_FCMA_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_FCMA_IMP, CAP_HWCAP, KERNEL_HWCAP_FCMA), + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_LRCPC_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_LRCPC_IMP, CAP_HWCAP, KERNEL_HWCAP_LRCPC), + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_LRCPC_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_LRCPC_LRCPC2, CAP_HWCAP, KERNEL_HWCAP_ILRCPC), + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_FRINTTS_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_FRINTTS_IMP, CAP_HWCAP, KERNEL_HWCAP_FRINT), + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_SB_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_SB_IMP, CAP_HWCAP, KERNEL_HWCAP_SB), + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_BF16_IMP, CAP_HWCAP, KERNEL_HWCAP_BF16), + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_BF16_EBF16, CAP_HWCAP, KERNEL_HWCAP_EBF16), + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DGH_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_DGH_IMP, CAP_HWCAP, KERNEL_HWCAP_DGH), + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_I8MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_I8MM_IMP, CAP_HWCAP, KERNEL_HWCAP_I8MM), + HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_EL1_AT_SHIFT, 4, FTR_UNSIGNED, ID_AA64MMFR2_EL1_AT_IMP, CAP_HWCAP, KERNEL_HWCAP_USCAT), #ifdef CONFIG_ARM64_SVE HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_SVE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR0_EL1_SVE_IMP, CAP_HWCAP, KERNEL_HWCAP_SVE), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_SVEver_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_SVEver_SVE2p1, CAP_HWCAP, KERNEL_HWCAP_SVE2P1), @@ -2840,11 +2840,11 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_MTE_MTE2, CAP_HWCAP, KERNEL_HWCAP_MTE), HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_MTE_MTE3, CAP_HWCAP, KERNEL_HWCAP_MTE3), #endif /* CONFIG_ARM64_MTE */ - HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_EL1_ECV_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV), - HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_EL1_AFP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP), + HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_EL1_ECV_SHIFT, 4, FTR_UNSIGNED, ID_AA64MMFR0_EL1_ECV_IMP, CAP_HWCAP, KERNEL_HWCAP_ECV), + HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_EL1_AFP_SHIFT, 4, FTR_UNSIGNED, ID_AA64MMFR1_EL1_AFP_IMP, CAP_HWCAP, KERNEL_HWCAP_AFP), HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_CSSC_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_CSSC_IMP, CAP_HWCAP, KERNEL_HWCAP_CSSC), HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_RPRFM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_RPRFM_IMP, CAP_HWCAP, KERNEL_HWCAP_RPRFM), - HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_RPRES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES), + HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_RPRES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_RPRES_IMP, CAP_HWCAP, KERNEL_HWCAP_RPRES), HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_WFxT_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_WFxT_IMP, CAP_HWCAP, KERNEL_HWCAP_WFXT), #ifdef CONFIG_ARM64_SME HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_SME_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_SME_IMP, CAP_HWCAP, KERNEL_HWCAP_SME), @@ -2890,23 +2890,23 @@ static bool compat_has_neon(const struct arm64_cpu_capabilities *cap, int scope) static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = { #ifdef CONFIG_COMPAT HWCAP_CAP_MATCH(compat_has_neon, CAP_COMPAT_HWCAP, COMPAT_HWCAP_NEON), - HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_EL1_SIMDFMAC_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4), + HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_EL1_SIMDFMAC_SHIFT, 4, FTR_UNSIGNED, MVFR1_EL1_SIMDFMAC_IMP, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4), /* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */ - HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_EL1_FPDP_SHIFT, 4, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP), - HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_EL1_FPDP_SHIFT, 4, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3), - HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_EL1_FPHP_SHIFT, 4, FTR_UNSIGNED, 3, CAP_COMPAT_HWCAP, COMPAT_HWCAP_FPHP), - HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_EL1_SIMDHP_SHIFT, 4, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDHP), - HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_AES_SHIFT, 4, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL), - HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_AES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES), - HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_SHA1_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1), - HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_SHA2_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2), - HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_CRC32_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32), - HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_DP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDDP), - HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_FHM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDFHM), - HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_SB_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SB), - HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDBF16), - HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_I8MM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_I8MM), - HWCAP_CAP(SYS_ID_PFR2_EL1, ID_PFR2_EL1_SSBS_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SSBS), + HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_EL1_FPDP_SHIFT, 4, FTR_UNSIGNED, MVFR0_EL1_FPDP_VFPv3, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP), + HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_EL1_FPDP_SHIFT, 4, FTR_UNSIGNED, MVFR0_EL1_FPDP_VFPv3, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3), + HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_EL1_FPHP_SHIFT, 4, FTR_UNSIGNED, MVFR1_EL1_FPHP_FP16, CAP_COMPAT_HWCAP, COMPAT_HWCAP_FPHP), + HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_EL1_SIMDHP_SHIFT, 4, FTR_UNSIGNED, MVFR1_EL1_SIMDHP_SIMDHP_FLOAT, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDHP), + HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_ISAR5_EL1_AES_VMULL, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL), + HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_ISAR5_EL1_AES_IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES), + HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_SHA1_SHIFT, 4, FTR_UNSIGNED, ID_ISAR5_EL1_SHA1_IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1), + HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_SHA2_SHIFT, 4, FTR_UNSIGNED, ID_ISAR5_EL1_SHA2_IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2), + HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_CRC32_SHIFT, 4, FTR_UNSIGNED, ID_ISAR5_EL1_CRC32_IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32), + HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_DP_SHIFT, 4, FTR_UNSIGNED, ID_ISAR6_EL1_DP_IMP, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDDP), + HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_FHM_SHIFT, 4, FTR_UNSIGNED, ID_ISAR6_EL1_FHM_IMP, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDFHM), + HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_SB_SHIFT, 4, FTR_UNSIGNED, ID_ISAR6_EL1_SB_IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SB), + HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, ID_ISAR6_EL1_BF16_IMP, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDBF16), + HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_I8MM_SHIFT, 4, FTR_UNSIGNED, ID_ISAR6_EL1_I8MM_IMP, CAP_COMPAT_HWCAP, COMPAT_HWCAP_I8MM), + HWCAP_CAP(SYS_ID_PFR2_EL1, ID_PFR2_EL1_SSBS_SHIFT, 4, FTR_UNSIGNED, ID_PFR2_EL1_SSBS_IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SSBS), #endif {}, }; From bfffd469e5296b86418aacf6a0142bdef860b22d Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 31 Jan 2023 00:18:47 +0000 Subject: [PATCH 114/130] arm64/cpufeature: Use helper macros to specify hwcaps At present the hwcaps are hard to read and a bit error prone since the macros used to specify matches require us to write out the register name multiple times and explicitly specify the width of the field, hopefully using the correct constant. Now that all the ID registers are generated we can improve this somewhat by redoing the macros so that we specify the register, field and minimum value symbolically and use token pasting to initialise the capability struct with the appropriate values. We move from specifying like this: HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_BT_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_BT_IMP, CAP_HWCAP, KERNEL_HWCAP_BTI), to this: HWCAP_CAP(ID_AA64PFR1_EL1, BT, IMP, CAP_HWCAP, KERNEL_HWCAP_BTI), which is shorter due to having less duplicate information and makes it much harder to make an error like specifying the wrong field width or an invalid enumeration value since everything must be a constant defined for the sysreg and names are only typed once. There should be no functional effect from this change, a check of the generated .rodata showed no differences. Acked-by: Mark Rutland Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221207-arm64-sysreg-helpers-v4-5-25b6b3fb9d18@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/cpufeature.c | 209 ++++++++++++++++----------------- 1 file changed, 101 insertions(+), 108 deletions(-) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index bb7423c6e6fe..a2725ab4a99a 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2710,13 +2710,13 @@ static const struct arm64_cpu_capabilities arm64_features[] = { {}, }; -#define HWCAP_CPUID_MATCH(reg, field, width, s, min_value) \ +#define HWCAP_CPUID_MATCH(reg, field, min_value) \ .matches = has_user_cpuid_feature, \ - .sys_reg = reg, \ - .field_pos = field, \ - .field_width = width, \ - .sign = s, \ - .min_field_value = min_value, + .sys_reg = SYS_##reg, \ + .field_pos = reg##_##field##_SHIFT, \ + .field_width = reg##_##field##_WIDTH, \ + .sign = reg##_##field##_SIGNED, \ + .min_field_value = reg##_##field##_##min_value, #define __HWCAP_CAP(name, cap_type, cap) \ .desc = name, \ @@ -2724,10 +2724,10 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .hwcap_type = cap_type, \ .hwcap = cap, \ -#define HWCAP_CAP(reg, field, width, s, min_value, cap_type, cap) \ +#define HWCAP_CAP(reg, field, min_value, cap_type, cap) \ { \ __HWCAP_CAP(#cap, cap_type, cap) \ - HWCAP_CPUID_MATCH(reg, field, width, s, min_value) \ + HWCAP_CPUID_MATCH(reg, field, min_value) \ } #define HWCAP_MULTI_CAP(list, cap_type, cap) \ @@ -2746,121 +2746,114 @@ static const struct arm64_cpu_capabilities arm64_features[] = { #ifdef CONFIG_ARM64_PTR_AUTH static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = { { - HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_APA_SHIFT, - 4, FTR_UNSIGNED, - ID_AA64ISAR1_EL1_APA_PAuth) + HWCAP_CPUID_MATCH(ID_AA64ISAR1_EL1, APA, PAuth) }, { - HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_APA3_SHIFT, - 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_APA3_PAuth) + HWCAP_CPUID_MATCH(ID_AA64ISAR2_EL1, APA3, PAuth) }, { - HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_API_SHIFT, - 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_API_PAuth) + HWCAP_CPUID_MATCH(ID_AA64ISAR1_EL1, API, PAuth) }, {}, }; static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = { { - HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_GPA_SHIFT, - 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_GPA_IMP) + HWCAP_CPUID_MATCH(ID_AA64ISAR1_EL1, GPA, IMP) }, { - HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_GPA3_SHIFT, - 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_GPA3_IMP) + HWCAP_CPUID_MATCH(ID_AA64ISAR2_EL1, GPA3, IMP) }, { - HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_GPI_SHIFT, - 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_GPI_IMP) + HWCAP_CPUID_MATCH(ID_AA64ISAR1_EL1, GPI, IMP) }, {}, }; #endif static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR0_EL1_AES_PMULL, CAP_HWCAP, KERNEL_HWCAP_PMULL), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR0_EL1_AES_AES, CAP_HWCAP, KERNEL_HWCAP_AES), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SHA1_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR0_EL1_SHA1_IMP, CAP_HWCAP, KERNEL_HWCAP_SHA1), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SHA2_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR0_EL1_SHA2_SHA256, CAP_HWCAP, KERNEL_HWCAP_SHA2), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SHA2_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR0_EL1_SHA2_SHA512, CAP_HWCAP, KERNEL_HWCAP_SHA512), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_CRC32_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR0_EL1_CRC32_IMP, CAP_HWCAP, KERNEL_HWCAP_CRC32), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_ATOMIC_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR0_EL1_ATOMIC_IMP, CAP_HWCAP, KERNEL_HWCAP_ATOMICS), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_RDM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR0_EL1_RDM_IMP, CAP_HWCAP, KERNEL_HWCAP_ASIMDRDM), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SHA3_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR0_EL1_SHA3_IMP, CAP_HWCAP, KERNEL_HWCAP_SHA3), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SM3_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR0_EL1_SM3_IMP, CAP_HWCAP, KERNEL_HWCAP_SM3), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SM4_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR0_EL1_SM4_IMP, CAP_HWCAP, KERNEL_HWCAP_SM4), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_DP_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR0_EL1_DP_IMP, CAP_HWCAP, KERNEL_HWCAP_ASIMDDP), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_FHM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR0_EL1_FHM_IMP, CAP_HWCAP, KERNEL_HWCAP_ASIMDFHM), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_TS_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR0_EL1_TS_FLAGM, CAP_HWCAP, KERNEL_HWCAP_FLAGM), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_TS_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR0_EL1_TS_FLAGM2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2), - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_RNDR_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR0_EL1_RNDR_IMP, CAP_HWCAP, KERNEL_HWCAP_RNG), - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_FP_SHIFT, 4, FTR_SIGNED, ID_AA64PFR0_EL1_FP_IMP, CAP_HWCAP, KERNEL_HWCAP_FP), - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_FP_SHIFT, 4, FTR_SIGNED, ID_AA64PFR0_EL1_FP_FP16, CAP_HWCAP, KERNEL_HWCAP_FPHP), - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_AdvSIMD_SHIFT, 4, FTR_SIGNED, ID_AA64PFR0_EL1_AdvSIMD_IMP, CAP_HWCAP, KERNEL_HWCAP_ASIMD), - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_AdvSIMD_SHIFT, 4, FTR_SIGNED, ID_AA64PFR0_EL1_AdvSIMD_FP16, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP), - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_DIT_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR0_EL1_DIT_IMP, CAP_HWCAP, KERNEL_HWCAP_DIT), - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_DPB_IMP, CAP_HWCAP, KERNEL_HWCAP_DCPOP), - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_DPB_DPB2, CAP_HWCAP, KERNEL_HWCAP_DCPODP), - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_JSCVT_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_JSCVT_IMP, CAP_HWCAP, KERNEL_HWCAP_JSCVT), - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_FCMA_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_FCMA_IMP, CAP_HWCAP, KERNEL_HWCAP_FCMA), - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_LRCPC_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_LRCPC_IMP, CAP_HWCAP, KERNEL_HWCAP_LRCPC), - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_LRCPC_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_LRCPC_LRCPC2, CAP_HWCAP, KERNEL_HWCAP_ILRCPC), - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_FRINTTS_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_FRINTTS_IMP, CAP_HWCAP, KERNEL_HWCAP_FRINT), - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_SB_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_SB_IMP, CAP_HWCAP, KERNEL_HWCAP_SB), - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_BF16_IMP, CAP_HWCAP, KERNEL_HWCAP_BF16), - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_BF16_EBF16, CAP_HWCAP, KERNEL_HWCAP_EBF16), - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DGH_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_DGH_IMP, CAP_HWCAP, KERNEL_HWCAP_DGH), - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_I8MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_I8MM_IMP, CAP_HWCAP, KERNEL_HWCAP_I8MM), - HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_EL1_AT_SHIFT, 4, FTR_UNSIGNED, ID_AA64MMFR2_EL1_AT_IMP, CAP_HWCAP, KERNEL_HWCAP_USCAT), + HWCAP_CAP(ID_AA64ISAR0_EL1, AES, PMULL, CAP_HWCAP, KERNEL_HWCAP_PMULL), + HWCAP_CAP(ID_AA64ISAR0_EL1, AES, AES, CAP_HWCAP, KERNEL_HWCAP_AES), + HWCAP_CAP(ID_AA64ISAR0_EL1, SHA1, IMP, CAP_HWCAP, KERNEL_HWCAP_SHA1), + HWCAP_CAP(ID_AA64ISAR0_EL1, SHA2, SHA256, CAP_HWCAP, KERNEL_HWCAP_SHA2), + HWCAP_CAP(ID_AA64ISAR0_EL1, SHA2, SHA512, CAP_HWCAP, KERNEL_HWCAP_SHA512), + HWCAP_CAP(ID_AA64ISAR0_EL1, CRC32, IMP, CAP_HWCAP, KERNEL_HWCAP_CRC32), + HWCAP_CAP(ID_AA64ISAR0_EL1, ATOMIC, IMP, CAP_HWCAP, KERNEL_HWCAP_ATOMICS), + HWCAP_CAP(ID_AA64ISAR0_EL1, RDM, IMP, CAP_HWCAP, KERNEL_HWCAP_ASIMDRDM), + HWCAP_CAP(ID_AA64ISAR0_EL1, SHA3, IMP, CAP_HWCAP, KERNEL_HWCAP_SHA3), + HWCAP_CAP(ID_AA64ISAR0_EL1, SM3, IMP, CAP_HWCAP, KERNEL_HWCAP_SM3), + HWCAP_CAP(ID_AA64ISAR0_EL1, SM4, IMP, CAP_HWCAP, KERNEL_HWCAP_SM4), + HWCAP_CAP(ID_AA64ISAR0_EL1, DP, IMP, CAP_HWCAP, KERNEL_HWCAP_ASIMDDP), + HWCAP_CAP(ID_AA64ISAR0_EL1, FHM, IMP, CAP_HWCAP, KERNEL_HWCAP_ASIMDFHM), + HWCAP_CAP(ID_AA64ISAR0_EL1, TS, FLAGM, CAP_HWCAP, KERNEL_HWCAP_FLAGM), + HWCAP_CAP(ID_AA64ISAR0_EL1, TS, FLAGM2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2), + HWCAP_CAP(ID_AA64ISAR0_EL1, RNDR, IMP, CAP_HWCAP, KERNEL_HWCAP_RNG), + HWCAP_CAP(ID_AA64PFR0_EL1, FP, IMP, CAP_HWCAP, KERNEL_HWCAP_FP), + HWCAP_CAP(ID_AA64PFR0_EL1, FP, FP16, CAP_HWCAP, KERNEL_HWCAP_FPHP), + HWCAP_CAP(ID_AA64PFR0_EL1, AdvSIMD, IMP, CAP_HWCAP, KERNEL_HWCAP_ASIMD), + HWCAP_CAP(ID_AA64PFR0_EL1, AdvSIMD, FP16, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP), + HWCAP_CAP(ID_AA64PFR0_EL1, DIT, IMP, CAP_HWCAP, KERNEL_HWCAP_DIT), + HWCAP_CAP(ID_AA64ISAR1_EL1, DPB, IMP, CAP_HWCAP, KERNEL_HWCAP_DCPOP), + HWCAP_CAP(ID_AA64ISAR1_EL1, DPB, DPB2, CAP_HWCAP, KERNEL_HWCAP_DCPODP), + HWCAP_CAP(ID_AA64ISAR1_EL1, JSCVT, IMP, CAP_HWCAP, KERNEL_HWCAP_JSCVT), + HWCAP_CAP(ID_AA64ISAR1_EL1, FCMA, IMP, CAP_HWCAP, KERNEL_HWCAP_FCMA), + HWCAP_CAP(ID_AA64ISAR1_EL1, LRCPC, IMP, CAP_HWCAP, KERNEL_HWCAP_LRCPC), + HWCAP_CAP(ID_AA64ISAR1_EL1, LRCPC, LRCPC2, CAP_HWCAP, KERNEL_HWCAP_ILRCPC), + HWCAP_CAP(ID_AA64ISAR1_EL1, FRINTTS, IMP, CAP_HWCAP, KERNEL_HWCAP_FRINT), + HWCAP_CAP(ID_AA64ISAR1_EL1, SB, IMP, CAP_HWCAP, KERNEL_HWCAP_SB), + HWCAP_CAP(ID_AA64ISAR1_EL1, BF16, IMP, CAP_HWCAP, KERNEL_HWCAP_BF16), + HWCAP_CAP(ID_AA64ISAR1_EL1, BF16, EBF16, CAP_HWCAP, KERNEL_HWCAP_EBF16), + HWCAP_CAP(ID_AA64ISAR1_EL1, DGH, IMP, CAP_HWCAP, KERNEL_HWCAP_DGH), + HWCAP_CAP(ID_AA64ISAR1_EL1, I8MM, IMP, CAP_HWCAP, KERNEL_HWCAP_I8MM), + HWCAP_CAP(ID_AA64MMFR2_EL1, AT, IMP, CAP_HWCAP, KERNEL_HWCAP_USCAT), #ifdef CONFIG_ARM64_SVE - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_SVE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR0_EL1_SVE_IMP, CAP_HWCAP, KERNEL_HWCAP_SVE), - HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_SVEver_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_SVEver_SVE2p1, CAP_HWCAP, KERNEL_HWCAP_SVE2P1), - HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_SVEver_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_SVEver_SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2), - HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_AES_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEAES), - HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_AES_PMULL128, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL), - HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_BitPerm_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_BitPerm_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM), - HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_BF16_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBF16), - HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_BF16_EBF16, CAP_HWCAP, KERNEL_HWCAP_SVE_EBF16), - HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_SHA3_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_SHA3_IMP, CAP_HWCAP, KERNEL_HWCAP_SVESHA3), - HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_SM4_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_SM4_IMP, CAP_HWCAP, KERNEL_HWCAP_SVESM4), - HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_I8MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_I8MM_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEI8MM), - HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_F32MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_F32MM_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM), - HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_F64MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_F64MM_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM), + HWCAP_CAP(ID_AA64PFR0_EL1, SVE, IMP, CAP_HWCAP, KERNEL_HWCAP_SVE), + HWCAP_CAP(ID_AA64ZFR0_EL1, SVEver, SVE2p1, CAP_HWCAP, KERNEL_HWCAP_SVE2P1), + HWCAP_CAP(ID_AA64ZFR0_EL1, SVEver, SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2), + HWCAP_CAP(ID_AA64ZFR0_EL1, AES, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEAES), + HWCAP_CAP(ID_AA64ZFR0_EL1, AES, PMULL128, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL), + HWCAP_CAP(ID_AA64ZFR0_EL1, BitPerm, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM), + HWCAP_CAP(ID_AA64ZFR0_EL1, BF16, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBF16), + HWCAP_CAP(ID_AA64ZFR0_EL1, BF16, EBF16, CAP_HWCAP, KERNEL_HWCAP_SVE_EBF16), + HWCAP_CAP(ID_AA64ZFR0_EL1, SHA3, IMP, CAP_HWCAP, KERNEL_HWCAP_SVESHA3), + HWCAP_CAP(ID_AA64ZFR0_EL1, SM4, IMP, CAP_HWCAP, KERNEL_HWCAP_SVESM4), + HWCAP_CAP(ID_AA64ZFR0_EL1, I8MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEI8MM), + HWCAP_CAP(ID_AA64ZFR0_EL1, F32MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM), + HWCAP_CAP(ID_AA64ZFR0_EL1, F64MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM), #endif - HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_SSBS_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_SSBS_SSBS2, CAP_HWCAP, KERNEL_HWCAP_SSBS), + HWCAP_CAP(ID_AA64PFR1_EL1, SSBS, SSBS2, CAP_HWCAP, KERNEL_HWCAP_SSBS), #ifdef CONFIG_ARM64_BTI - HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_BT_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_BT_IMP, CAP_HWCAP, KERNEL_HWCAP_BTI), + HWCAP_CAP(ID_AA64PFR1_EL1, BT, IMP, CAP_HWCAP, KERNEL_HWCAP_BTI), #endif #ifdef CONFIG_ARM64_PTR_AUTH HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, KERNEL_HWCAP_PACA), HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, KERNEL_HWCAP_PACG), #endif #ifdef CONFIG_ARM64_MTE - HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_MTE_MTE2, CAP_HWCAP, KERNEL_HWCAP_MTE), - HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_MTE_MTE3, CAP_HWCAP, KERNEL_HWCAP_MTE3), + HWCAP_CAP(ID_AA64PFR1_EL1, MTE, MTE2, CAP_HWCAP, KERNEL_HWCAP_MTE), + HWCAP_CAP(ID_AA64PFR1_EL1, MTE, MTE3, CAP_HWCAP, KERNEL_HWCAP_MTE3), #endif /* CONFIG_ARM64_MTE */ - HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_EL1_ECV_SHIFT, 4, FTR_UNSIGNED, ID_AA64MMFR0_EL1_ECV_IMP, CAP_HWCAP, KERNEL_HWCAP_ECV), - HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_EL1_AFP_SHIFT, 4, FTR_UNSIGNED, ID_AA64MMFR1_EL1_AFP_IMP, CAP_HWCAP, KERNEL_HWCAP_AFP), - HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_CSSC_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_CSSC_IMP, CAP_HWCAP, KERNEL_HWCAP_CSSC), - HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_RPRFM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_RPRFM_IMP, CAP_HWCAP, KERNEL_HWCAP_RPRFM), - HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_RPRES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_RPRES_IMP, CAP_HWCAP, KERNEL_HWCAP_RPRES), - HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_WFxT_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_WFxT_IMP, CAP_HWCAP, KERNEL_HWCAP_WFXT), + HWCAP_CAP(ID_AA64MMFR0_EL1, ECV, IMP, CAP_HWCAP, KERNEL_HWCAP_ECV), + HWCAP_CAP(ID_AA64MMFR1_EL1, AFP, IMP, CAP_HWCAP, KERNEL_HWCAP_AFP), + HWCAP_CAP(ID_AA64ISAR2_EL1, CSSC, IMP, CAP_HWCAP, KERNEL_HWCAP_CSSC), + HWCAP_CAP(ID_AA64ISAR2_EL1, RPRFM, IMP, CAP_HWCAP, KERNEL_HWCAP_RPRFM), + HWCAP_CAP(ID_AA64ISAR2_EL1, RPRES, IMP, CAP_HWCAP, KERNEL_HWCAP_RPRES), + HWCAP_CAP(ID_AA64ISAR2_EL1, WFxT, IMP, CAP_HWCAP, KERNEL_HWCAP_WFXT), #ifdef CONFIG_ARM64_SME - HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_SME_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_SME_IMP, CAP_HWCAP, KERNEL_HWCAP_SME), - HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_FA64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64), - HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_SMEver_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_EL1_SMEver_SME2p1, CAP_HWCAP, KERNEL_HWCAP_SME2P1), - HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_SMEver_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_EL1_SMEver_SME2, CAP_HWCAP, KERNEL_HWCAP_SME2), - HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_I16I64_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_EL1_I16I64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64), - HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_F64F64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_F64F64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64), - HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_I16I32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_I16I32_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I32), - HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_B16B16_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_B16B16_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16B16), - HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_F16F16_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_F16F16_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F16), - HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_I8I32_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_EL1_I8I32_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I8I32), - HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_F16F32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_F16F32_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F32), - HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_B16F32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_B16F32_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16F32), - HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_BI32I32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_BI32I32_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_BI32I32), - HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_F32F32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_F32F32_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F32F32), + HWCAP_CAP(ID_AA64PFR1_EL1, SME, IMP, CAP_HWCAP, KERNEL_HWCAP_SME), + HWCAP_CAP(ID_AA64SMFR0_EL1, FA64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64), + HWCAP_CAP(ID_AA64SMFR0_EL1, SMEver, SME2p1, CAP_HWCAP, KERNEL_HWCAP_SME2P1), + HWCAP_CAP(ID_AA64SMFR0_EL1, SMEver, SME2, CAP_HWCAP, KERNEL_HWCAP_SME2), + HWCAP_CAP(ID_AA64SMFR0_EL1, I16I64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64), + HWCAP_CAP(ID_AA64SMFR0_EL1, F64F64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64), + HWCAP_CAP(ID_AA64SMFR0_EL1, I16I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I32), + HWCAP_CAP(ID_AA64SMFR0_EL1, B16B16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16B16), + HWCAP_CAP(ID_AA64SMFR0_EL1, F16F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F16), + HWCAP_CAP(ID_AA64SMFR0_EL1, I8I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I8I32), + HWCAP_CAP(ID_AA64SMFR0_EL1, F16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F32), + HWCAP_CAP(ID_AA64SMFR0_EL1, B16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16F32), + HWCAP_CAP(ID_AA64SMFR0_EL1, BI32I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_BI32I32), + HWCAP_CAP(ID_AA64SMFR0_EL1, F32F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F32F32), #endif /* CONFIG_ARM64_SME */ {}, }; @@ -2890,23 +2883,23 @@ static bool compat_has_neon(const struct arm64_cpu_capabilities *cap, int scope) static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = { #ifdef CONFIG_COMPAT HWCAP_CAP_MATCH(compat_has_neon, CAP_COMPAT_HWCAP, COMPAT_HWCAP_NEON), - HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_EL1_SIMDFMAC_SHIFT, 4, FTR_UNSIGNED, MVFR1_EL1_SIMDFMAC_IMP, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4), + HWCAP_CAP(MVFR1_EL1, SIMDFMAC, IMP, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4), /* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */ - HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_EL1_FPDP_SHIFT, 4, FTR_UNSIGNED, MVFR0_EL1_FPDP_VFPv3, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP), - HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_EL1_FPDP_SHIFT, 4, FTR_UNSIGNED, MVFR0_EL1_FPDP_VFPv3, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3), - HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_EL1_FPHP_SHIFT, 4, FTR_UNSIGNED, MVFR1_EL1_FPHP_FP16, CAP_COMPAT_HWCAP, COMPAT_HWCAP_FPHP), - HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_EL1_SIMDHP_SHIFT, 4, FTR_UNSIGNED, MVFR1_EL1_SIMDHP_SIMDHP_FLOAT, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDHP), - HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_ISAR5_EL1_AES_VMULL, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL), - HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_ISAR5_EL1_AES_IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES), - HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_SHA1_SHIFT, 4, FTR_UNSIGNED, ID_ISAR5_EL1_SHA1_IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1), - HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_SHA2_SHIFT, 4, FTR_UNSIGNED, ID_ISAR5_EL1_SHA2_IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2), - HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_CRC32_SHIFT, 4, FTR_UNSIGNED, ID_ISAR5_EL1_CRC32_IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32), - HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_DP_SHIFT, 4, FTR_UNSIGNED, ID_ISAR6_EL1_DP_IMP, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDDP), - HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_FHM_SHIFT, 4, FTR_UNSIGNED, ID_ISAR6_EL1_FHM_IMP, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDFHM), - HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_SB_SHIFT, 4, FTR_UNSIGNED, ID_ISAR6_EL1_SB_IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SB), - HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, ID_ISAR6_EL1_BF16_IMP, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDBF16), - HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_I8MM_SHIFT, 4, FTR_UNSIGNED, ID_ISAR6_EL1_I8MM_IMP, CAP_COMPAT_HWCAP, COMPAT_HWCAP_I8MM), - HWCAP_CAP(SYS_ID_PFR2_EL1, ID_PFR2_EL1_SSBS_SHIFT, 4, FTR_UNSIGNED, ID_PFR2_EL1_SSBS_IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SSBS), + HWCAP_CAP(MVFR0_EL1, FPDP, VFPv3, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP), + HWCAP_CAP(MVFR0_EL1, FPDP, VFPv3, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3), + HWCAP_CAP(MVFR1_EL1, FPHP, FP16, CAP_COMPAT_HWCAP, COMPAT_HWCAP_FPHP), + HWCAP_CAP(MVFR1_EL1, SIMDHP, SIMDHP_FLOAT, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDHP), + HWCAP_CAP(ID_ISAR5_EL1, AES, VMULL, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL), + HWCAP_CAP(ID_ISAR5_EL1, AES, IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES), + HWCAP_CAP(ID_ISAR5_EL1, SHA1, IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1), + HWCAP_CAP(ID_ISAR5_EL1, SHA2, IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2), + HWCAP_CAP(ID_ISAR5_EL1, CRC32, IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32), + HWCAP_CAP(ID_ISAR6_EL1, DP, IMP, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDDP), + HWCAP_CAP(ID_ISAR6_EL1, FHM, IMP, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDFHM), + HWCAP_CAP(ID_ISAR6_EL1, SB, IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SB), + HWCAP_CAP(ID_ISAR6_EL1, BF16, IMP, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDBF16), + HWCAP_CAP(ID_ISAR6_EL1, I8MM, IMP, CAP_COMPAT_HWCAP, COMPAT_HWCAP_I8MM), + HWCAP_CAP(ID_PFR2_EL1, SSBS, IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SSBS), #endif {}, }; From 92f14518cc43dcaebfb281dfff2fe14b87633cf4 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 31 Jan 2023 22:20:39 +0000 Subject: [PATCH 115/130] arm64/signal: Don't redundantly verify FPSIMD magic We validate that the magic in the struct fpsimd_context is correct in restore_fpsimd_context() but this is redundant since parse_user_sigframe() uses this magic to decide to call the function in the first place. Remove the extra validation. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221212-arm64-signal-cleanup-v3-1-4545c94b20ff@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/signal.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index ed692284f199..882f6d913508 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -191,15 +191,14 @@ static int preserve_fpsimd_context(struct fpsimd_context __user *ctx) static int restore_fpsimd_context(struct fpsimd_context __user *ctx) { struct user_fpsimd_state fpsimd; - __u32 magic, size; + __u32 size; int err = 0; - /* check the magic/size information */ - __get_user_error(magic, &ctx->head.magic, err); + /* check the size information */ __get_user_error(size, &ctx->head.size, err); if (err) return -EFAULT; - if (magic != FPSIMD_MAGIC || size != sizeof(struct fpsimd_context)) + if (size != sizeof(struct fpsimd_context)) return -EINVAL; /* copy the FP and status/control registers */ From 0eb23720f29e4e7010c4b5195cf0d6500921a146 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 31 Jan 2023 22:20:40 +0000 Subject: [PATCH 116/130] arm64/signal: Remove redundant size validation from parse_user_sigframe() There is some minimal size validation in parse_user_sigframe() however all of the individual parsing functions perform frame specific validation of the sizing information, remove the frame specific size checks in the core so that there isn't any confusion about what we validate for size. Since the checks in the SVE and ZA parsing are after we have read the relevant context and since they won't report an error if the frame is undersized they are adjusted to check for this before doing anything else. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221212-arm64-signal-cleanup-v3-2-4545c94b20ff@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/signal.c | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 882f6d913508..3228b5a1dfe3 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -278,6 +278,9 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user) if (__copy_from_user(&sve, user->sve, sizeof(sve))) return -EFAULT; + if (sve.head.size < sizeof(*user->sve)) + return -EINVAL; + if (sve.flags & SVE_SIG_FLAG_SM) { if (!system_supports_sme()) return -EINVAL; @@ -293,7 +296,7 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user) if (sve.vl != vl) return -EINVAL; - if (sve.head.size <= sizeof(*user->sve)) { + if (sve.head.size == sizeof(*user->sve)) { clear_thread_flag(TIF_SVE); current->thread.svcr &= ~SVCR_SM_MASK; current->thread.fp_type = FP_STATE_FPSIMD; @@ -434,10 +437,13 @@ static int restore_za_context(struct user_ctxs *user) if (__copy_from_user(&za, user->za, sizeof(za))) return -EFAULT; + if (za.head.size < sizeof(*user->za)) + return -EINVAL; + if (za.vl != task_get_sme_vl(current)) return -EINVAL; - if (za.head.size <= sizeof(*user->za)) { + if (za.head.size == sizeof(*user->za)) { current->thread.svcr &= ~SVCR_ZA_MASK; return 0; } @@ -614,9 +620,6 @@ static int parse_user_sigframe(struct user_ctxs *user, if (user->fpsimd) goto invalid; - if (size < sizeof(*user->fpsimd)) - goto invalid; - user->fpsimd = (struct fpsimd_context __user *)head; break; @@ -631,9 +634,6 @@ static int parse_user_sigframe(struct user_ctxs *user, if (user->sve) goto invalid; - if (size < sizeof(*user->sve)) - goto invalid; - user->sve = (struct sve_context __user *)head; break; @@ -657,9 +657,6 @@ static int parse_user_sigframe(struct user_ctxs *user, if (user->za) goto invalid; - if (size < sizeof(*user->za)) - goto invalid; - user->za = (struct za_context __user *)head; break; From 4e4e93045fe1ad83dce7448690458b7f73669044 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 31 Jan 2023 22:20:41 +0000 Subject: [PATCH 117/130] arm64/signal: Make interface for restore_fpsimd_context() consistent Instead of taking a pointer to struct user_ctxs like the other two restore_blah_context() functions the FPSIMD function takes a pointer to the user struct it should read. Change it to be consistent with the rest, both for consistency and to prepare for changes which avoid rereading data that has already been read by the core parsing code. There should be no functional change from this patch. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221212-arm64-signal-cleanup-v3-3-4545c94b20ff@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/signal.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 3228b5a1dfe3..49321871783d 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -170,6 +170,14 @@ static void __user *apply_user_offset( return base + offset; } +struct user_ctxs { + struct fpsimd_context __user *fpsimd; + struct sve_context __user *sve; + struct tpidr2_context __user *tpidr2; + struct za_context __user *za; + struct zt_context __user *zt; +}; + static int preserve_fpsimd_context(struct fpsimd_context __user *ctx) { struct user_fpsimd_state const *fpsimd = @@ -188,24 +196,24 @@ static int preserve_fpsimd_context(struct fpsimd_context __user *ctx) return err ? -EFAULT : 0; } -static int restore_fpsimd_context(struct fpsimd_context __user *ctx) +static int restore_fpsimd_context(struct user_ctxs *user) { struct user_fpsimd_state fpsimd; __u32 size; int err = 0; /* check the size information */ - __get_user_error(size, &ctx->head.size, err); + __get_user_error(size, &user->fpsimd->head.size, err); if (err) return -EFAULT; if (size != sizeof(struct fpsimd_context)) return -EINVAL; /* copy the FP and status/control registers */ - err = __copy_from_user(fpsimd.vregs, ctx->vregs, + err = __copy_from_user(fpsimd.vregs, &(user->fpsimd->vregs), sizeof(fpsimd.vregs)); - __get_user_error(fpsimd.fpsr, &ctx->fpsr, err); - __get_user_error(fpsimd.fpcr, &ctx->fpcr, err); + __get_user_error(fpsimd.fpsr, &(user->fpsimd->fpsr), err); + __get_user_error(fpsimd.fpcr, &(user->fpsimd->fpcr), err); clear_thread_flag(TIF_SVE); current->thread.fp_type = FP_STATE_FPSIMD; @@ -218,14 +226,6 @@ static int restore_fpsimd_context(struct fpsimd_context __user *ctx) } -struct user_ctxs { - struct fpsimd_context __user *fpsimd; - struct sve_context __user *sve; - struct tpidr2_context __user *tpidr2; - struct za_context __user *za; - struct zt_context __user *zt; -}; - #ifdef CONFIG_ARM64_SVE static int preserve_sve_context(struct sve_context __user *ctx) @@ -789,7 +789,7 @@ static int restore_sigframe(struct pt_regs *regs, if (user.sve) err = restore_sve_fpsimd_context(&user); else - err = restore_fpsimd_context(user.fpsimd); + err = restore_fpsimd_context(&user); } if (err == 0 && system_supports_sme() && user.tpidr2) From b57682b315588aab496439e317c0f433f28600ae Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 31 Jan 2023 22:20:42 +0000 Subject: [PATCH 118/130] arm64/signal: Avoid rereading context frame sizes We need to read the sizes of the signal context frames as part of parsing the overall signal context in parse_user_sigframe(). In the cases where we defer frame specific parsing to other functions those functions (other than the recently added TPIDR2 parser) reread the size and validate the version they read, opening the possibility that the value may change. Avoid this possibility by passing the size read in parse_user_sigframe() through user_ctxs and referring to that. For consistency we move the size check for the TPIDR2 context into the TPIDR2 parsing function. Note that for SVE, ZA and ZT contexts we still read the size again but after this change we no longer use the value, further changes will avoid the read. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221212-arm64-signal-cleanup-v3-4-4545c94b20ff@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/signal.c | 52 ++++++++++++++++++++------------------ 1 file changed, 27 insertions(+), 25 deletions(-) diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 49321871783d..567e8e5b6998 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -172,10 +172,15 @@ static void __user *apply_user_offset( struct user_ctxs { struct fpsimd_context __user *fpsimd; + u32 fpsimd_size; struct sve_context __user *sve; + u32 sve_size; struct tpidr2_context __user *tpidr2; + u32 tpidr2_size; struct za_context __user *za; + u32 za_size; struct zt_context __user *zt; + u32 zt_size; }; static int preserve_fpsimd_context(struct fpsimd_context __user *ctx) @@ -199,14 +204,10 @@ static int preserve_fpsimd_context(struct fpsimd_context __user *ctx) static int restore_fpsimd_context(struct user_ctxs *user) { struct user_fpsimd_state fpsimd; - __u32 size; int err = 0; /* check the size information */ - __get_user_error(size, &user->fpsimd->head.size, err); - if (err) - return -EFAULT; - if (size != sizeof(struct fpsimd_context)) + if (user->fpsimd_size != sizeof(struct fpsimd_context)) return -EINVAL; /* copy the FP and status/control registers */ @@ -275,12 +276,12 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user) struct user_fpsimd_state fpsimd; struct sve_context sve; + if (user->sve_size < sizeof(*user->sve)) + return -EINVAL; + if (__copy_from_user(&sve, user->sve, sizeof(sve))) return -EFAULT; - if (sve.head.size < sizeof(*user->sve)) - return -EINVAL; - if (sve.flags & SVE_SIG_FLAG_SM) { if (!system_supports_sme()) return -EINVAL; @@ -296,7 +297,7 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user) if (sve.vl != vl) return -EINVAL; - if (sve.head.size == sizeof(*user->sve)) { + if (user->sve_size == sizeof(*user->sve)) { clear_thread_flag(TIF_SVE); current->thread.svcr &= ~SVCR_SM_MASK; current->thread.fp_type = FP_STATE_FPSIMD; @@ -305,7 +306,7 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user) vq = sve_vq_from_vl(sve.vl); - if (sve.head.size < SVE_SIG_CONTEXT_SIZE(vq)) + if (user->sve_size < SVE_SIG_CONTEXT_SIZE(vq)) return -EINVAL; /* @@ -385,7 +386,9 @@ static int restore_tpidr2_context(struct user_ctxs *user) u64 tpidr2_el0; int err = 0; - /* Magic and size were validated deciding to call this function */ + if (user->tpidr2_size != sizeof(*user->tpidr2)) + return -EINVAL; + __get_user_error(tpidr2_el0, &user->tpidr2->tpidr2, err); if (!err) current->thread.tpidr2_el0 = tpidr2_el0; @@ -434,23 +437,23 @@ static int restore_za_context(struct user_ctxs *user) unsigned int vq; struct za_context za; + if (user->za_size < sizeof(*user->za)) + return -EINVAL; + if (__copy_from_user(&za, user->za, sizeof(za))) return -EFAULT; - if (za.head.size < sizeof(*user->za)) - return -EINVAL; - if (za.vl != task_get_sme_vl(current)) return -EINVAL; - if (za.head.size == sizeof(*user->za)) { + if (user->za_size == sizeof(*user->za)) { current->thread.svcr &= ~SVCR_ZA_MASK; return 0; } vq = sve_vq_from_vl(za.vl); - if (za.head.size < ZA_SIG_CONTEXT_SIZE(vq)) + if (user->za_size < ZA_SIG_CONTEXT_SIZE(vq)) return -EINVAL; /* @@ -521,15 +524,15 @@ static int restore_zt_context(struct user_ctxs *user) if (!thread_za_enabled(¤t->thread)) return -EINVAL; + if (user->zt_size != ZT_SIG_CONTEXT_SIZE(1)) + return -EINVAL; + if (__copy_from_user(&zt, user->zt, sizeof(zt))) return -EFAULT; if (zt.nregs != 1) return -EINVAL; - if (zt.head.size != ZT_SIG_CONTEXT_SIZE(zt.nregs)) - return -EINVAL; - /* * Careful: we are about __copy_from_user() directly into * thread.zt_state with preemption enabled, so protection is @@ -621,6 +624,7 @@ static int parse_user_sigframe(struct user_ctxs *user, goto invalid; user->fpsimd = (struct fpsimd_context __user *)head; + user->fpsimd_size = size; break; case ESR_MAGIC: @@ -635,6 +639,7 @@ static int parse_user_sigframe(struct user_ctxs *user, goto invalid; user->sve = (struct sve_context __user *)head; + user->sve_size = size; break; case TPIDR2_MAGIC: @@ -644,10 +649,8 @@ static int parse_user_sigframe(struct user_ctxs *user, if (user->tpidr2) goto invalid; - if (size != sizeof(*user->tpidr2)) - goto invalid; - user->tpidr2 = (struct tpidr2_context __user *)head; + user->tpidr2_size = size; break; case ZA_MAGIC: @@ -658,6 +661,7 @@ static int parse_user_sigframe(struct user_ctxs *user, goto invalid; user->za = (struct za_context __user *)head; + user->za_size = size; break; case ZT_MAGIC: @@ -667,10 +671,8 @@ static int parse_user_sigframe(struct user_ctxs *user, if (user->zt) goto invalid; - if (size < sizeof(*user->zt)) - goto invalid; - user->zt = (struct zt_context __user *)head; + user->zt_size = size; break; case EXTRA_MAGIC: From f3ac48aa3a58b0d0b1104416a652dc7da9c03b4a Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 31 Jan 2023 22:20:43 +0000 Subject: [PATCH 119/130] arm64/signal: Only read new data when parsing the SVE context When we parse the SVE signal context we read the entire context from userspace, including the generic signal context header which was already read by parse_user_sigframe() and padding bytes that we ignore. Avoid the possibility of relying on the second read of the data read twice by only reading the data which we are actually going to use. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221212-arm64-signal-cleanup-v3-5-4545c94b20ff@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/signal.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 567e8e5b6998..27a1fa37f926 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -271,18 +271,20 @@ static int preserve_sve_context(struct sve_context __user *ctx) static int restore_sve_fpsimd_context(struct user_ctxs *user) { - int err; + int err = 0; unsigned int vl, vq; struct user_fpsimd_state fpsimd; - struct sve_context sve; + u16 user_vl, flags; if (user->sve_size < sizeof(*user->sve)) return -EINVAL; - if (__copy_from_user(&sve, user->sve, sizeof(sve))) - return -EFAULT; + __get_user_error(user_vl, &(user->sve->vl), err); + __get_user_error(flags, &(user->sve->flags), err); + if (err) + return err; - if (sve.flags & SVE_SIG_FLAG_SM) { + if (flags & SVE_SIG_FLAG_SM) { if (!system_supports_sme()) return -EINVAL; @@ -294,7 +296,7 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user) vl = task_get_sve_vl(current); } - if (sve.vl != vl) + if (user_vl != vl) return -EINVAL; if (user->sve_size == sizeof(*user->sve)) { @@ -304,7 +306,7 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user) goto fpsimd_only; } - vq = sve_vq_from_vl(sve.vl); + vq = sve_vq_from_vl(vl); if (user->sve_size < SVE_SIG_CONTEXT_SIZE(vq)) return -EINVAL; @@ -332,7 +334,7 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user) if (err) return -EFAULT; - if (sve.flags & SVE_SIG_FLAG_SM) + if (flags & SVE_SIG_FLAG_SM) current->thread.svcr |= SVCR_SM_MASK; else set_thread_flag(TIF_SVE); From 24d68345a02aee155b22deb26fde3e08332d8f88 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 31 Jan 2023 22:20:44 +0000 Subject: [PATCH 120/130] arm64/signal: Only read new data when parsing the ZA context When we parse the ZA signal context we read the entire context from userspace, including the generic signal context header which was already read by parse_user_sigframe() and padding bytes that we ignore. Avoid the possibility of relying on the second read of the data read twice by only reading the data which we are actually going to use. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221212-arm64-signal-cleanup-v3-6-4545c94b20ff@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/signal.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 27a1fa37f926..7810d090c025 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -435,17 +435,18 @@ static int preserve_za_context(struct za_context __user *ctx) static int restore_za_context(struct user_ctxs *user) { - int err; + int err = 0; unsigned int vq; - struct za_context za; + u16 user_vl; if (user->za_size < sizeof(*user->za)) return -EINVAL; - if (__copy_from_user(&za, user->za, sizeof(za))) - return -EFAULT; + __get_user_error(user_vl, &(user->za->vl), err); + if (err) + return err; - if (za.vl != task_get_sme_vl(current)) + if (user_vl != task_get_sme_vl(current)) return -EINVAL; if (user->za_size == sizeof(*user->za)) { @@ -453,7 +454,7 @@ static int restore_za_context(struct user_ctxs *user) return 0; } - vq = sve_vq_from_vl(za.vl); + vq = sve_vq_from_vl(user_vl); if (user->za_size < ZA_SIG_CONTEXT_SIZE(vq)) return -EINVAL; From ad678be4238720384fa9e21b9b08b5540ac7ca5d Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 31 Jan 2023 22:20:45 +0000 Subject: [PATCH 121/130] arm64/signal: Only read new data when parsing the ZT context When we parse the ZT signal context we read the entire context from userspace, including the generic signal context header which was already read by parse_user_sigframe() and padding bytes that we ignore. Avoid the possibility of relying on the second read of the data read twice by only reading the data which we are actually going to use. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221212-arm64-signal-cleanup-v3-7-4545c94b20ff@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/signal.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 7810d090c025..d7b5ed8a9b7f 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -521,7 +521,7 @@ static int preserve_zt_context(struct zt_context __user *ctx) static int restore_zt_context(struct user_ctxs *user) { int err; - struct zt_context zt; + u16 nregs; /* ZA must be restored first for this check to be valid */ if (!thread_za_enabled(¤t->thread)) @@ -530,10 +530,10 @@ static int restore_zt_context(struct user_ctxs *user) if (user->zt_size != ZT_SIG_CONTEXT_SIZE(1)) return -EINVAL; - if (__copy_from_user(&zt, user->zt, sizeof(zt))) + if (__copy_from_user(&nregs, &(user->zt->nregs), sizeof(nregs))) return -EFAULT; - if (zt.nregs != 1) + if (nregs != 1) return -EINVAL; /* From 7f49b037397631dc5ec8f6eed67d218edf094fa2 Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Fri, 3 Feb 2023 13:15:09 +0100 Subject: [PATCH 122/130] drivers/perf: fsl_imx8_ddr_perf: Remove set-but-not-used variable active_events is set but not used, remove it. Signed-off-by: Sascha Hauer Link: https://lore.kernel.org/r/20230203121509.3580245-1-s.hauer@pengutronix.de Signed-off-by: Will Deacon --- drivers/perf/fsl_imx8_ddr_perf.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c index 8e058e08fe81..5222ba1e79d0 100644 --- a/drivers/perf/fsl_imx8_ddr_perf.c +++ b/drivers/perf/fsl_imx8_ddr_perf.c @@ -97,7 +97,6 @@ struct ddr_pmu { struct hlist_node node; struct device *dev; struct perf_event *events[NUM_COUNTERS]; - int active_events; enum cpuhp_state cpuhp_state; const struct fsl_ddr_devtype_data *devtype_data; int irq; @@ -530,7 +529,6 @@ static int ddr_perf_event_add(struct perf_event *event, int flags) } pmu->events[counter] = event; - pmu->active_events++; hwc->idx = counter; hwc->state |= PERF_HES_STOPPED; @@ -562,7 +560,6 @@ static void ddr_perf_event_del(struct perf_event *event, int flags) ddr_perf_event_stop(event, PERF_EF_UPDATE); ddr_perf_free_counter(pmu, counter); - pmu->active_events--; hwc->idx = -1; } From 9442d05bba6c12749fdc4039eddcf801398ec82b Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 6 Feb 2023 09:24:40 +0000 Subject: [PATCH 123/130] arm64/sme: Fix __finalise_el2 SMEver check When checking for ID_AA64SMFR0_EL1.SMEver, __check_override assumes that the ID_AA64SMFR0_EL1 value is in x1, and the intent of the code is to reuse value read a few lines above. However, as the comment says at the beginning of the macro, x1 will be clobbered, and the checks always fails. The easiest fix is just to reload the id register before checking it. Fixes: f122576f3533 ("arm64/sme: Enable host kernel to access ZT0") Signed-off-by: Marc Zyngier Reviewed-by: Mark Brown Signed-off-by: Catalin Marinas --- arch/arm64/kernel/hyp-stub.S | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S index d31d1acb170d..111ff33d93ee 100644 --- a/arch/arm64/kernel/hyp-stub.S +++ b/arch/arm64/kernel/hyp-stub.S @@ -133,6 +133,7 @@ SYM_CODE_START_LOCAL(__finalise_el2) .Lskip_sme_fa64: // ZT0 available? + mrs_s x1, SYS_ID_AA64SMFR0_EL1 __check_override id_aa64smfr0 ID_AA64SMFR0_EL1_SMEver_SHIFT 4 .Linit_sme_zt0 .Lskip_sme_zt0 .Linit_sme_zt0: orr x0, x0, SMCR_ELx_EZT0_MASK From 09519ec3b19e4144b5f6e269c54fbb9c294a9fcb Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Mon, 9 Jan 2023 13:26:23 -0600 Subject: [PATCH 124/130] perf: Add perf_event_attr::config3 Arm SPEv1.2 adds another 64-bits of event filtering control. As the existing perf_event_attr::configN fields are all used up for SPE PMU, an additional field is needed. Add a new 'config3' field. Tested-by: James Clark Signed-off-by: Rob Herring Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20220825-arm-spe-v8-7-v4-7-327f860daf28@kernel.org Signed-off-by: Will Deacon --- include/uapi/linux/perf_event.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index ccb7f5dad59b..37675437b768 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -374,6 +374,7 @@ enum perf_event_read_format { #define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */ #define PERF_ATTR_SIZE_VER6 120 /* add: aux_sample_size */ #define PERF_ATTR_SIZE_VER7 128 /* add: sig_data */ +#define PERF_ATTR_SIZE_VER8 136 /* add: config3 */ /* * Hardware event_id to monitor via a performance monitoring event: @@ -515,6 +516,8 @@ struct perf_event_attr { * truncated accordingly on 32 bit architectures. */ __u64 sig_data; + + __u64 config3; /* extension of config2 */ }; /* From 8d9190f00a9753ff51f18319d928dedd9a272057 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Mon, 9 Jan 2023 13:26:24 -0600 Subject: [PATCH 125/130] perf: arm_spe: Add support for SPEv1.2 inverted event filtering Arm SPEv1.2 (Arm v8.7/v9.2) adds a new feature called Inverted Event Filter which excludes samples matching the event filter. The feature mirrors the existing event filter in PMSEVFR_EL1 adding a new register, PMSNEVFR_EL1, which has the same event bit assignments. Tested-by: James Clark Signed-off-by: Rob Herring Link: https://lore.kernel.org/r/20220825-arm-spe-v8-7-v4-8-327f860daf28@kernel.org Signed-off-by: Will Deacon --- drivers/perf/arm_spe_pmu.c | 45 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c index 82f67e941bc4..573db4211acd 100644 --- a/drivers/perf/arm_spe_pmu.c +++ b/drivers/perf/arm_spe_pmu.c @@ -85,6 +85,7 @@ struct arm_spe_pmu { #define SPE_PMU_FEAT_ARCH_INST (1UL << 3) #define SPE_PMU_FEAT_LDS (1UL << 4) #define SPE_PMU_FEAT_ERND (1UL << 5) +#define SPE_PMU_FEAT_INV_FILT_EVT (1UL << 6) #define SPE_PMU_FEAT_DEV_PROBED (1UL << 63) u64 features; @@ -202,6 +203,10 @@ static const struct attribute_group arm_spe_pmu_cap_group = { #define ATTR_CFG_FLD_min_latency_LO 0 #define ATTR_CFG_FLD_min_latency_HI 11 +#define ATTR_CFG_FLD_inv_event_filter_CFG config3 /* PMSNEVFR_EL1 */ +#define ATTR_CFG_FLD_inv_event_filter_LO 0 +#define ATTR_CFG_FLD_inv_event_filter_HI 63 + /* Why does everything I do descend into this? */ #define __GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \ (lo) == (hi) ? #cfg ":" #lo "\n" : #cfg ":" #lo "-" #hi @@ -232,6 +237,7 @@ GEN_PMU_FORMAT_ATTR(branch_filter); GEN_PMU_FORMAT_ATTR(load_filter); GEN_PMU_FORMAT_ATTR(store_filter); GEN_PMU_FORMAT_ATTR(event_filter); +GEN_PMU_FORMAT_ATTR(inv_event_filter); GEN_PMU_FORMAT_ATTR(min_latency); static struct attribute *arm_spe_pmu_formats_attr[] = { @@ -243,12 +249,27 @@ static struct attribute *arm_spe_pmu_formats_attr[] = { &format_attr_load_filter.attr, &format_attr_store_filter.attr, &format_attr_event_filter.attr, + &format_attr_inv_event_filter.attr, &format_attr_min_latency.attr, NULL, }; +static umode_t arm_spe_pmu_format_attr_is_visible(struct kobject *kobj, + struct attribute *attr, + int unused) + { + struct device *dev = kobj_to_dev(kobj); + struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev); + + if (attr == &format_attr_inv_event_filter.attr && !(spe_pmu->features & SPE_PMU_FEAT_INV_FILT_EVT)) + return 0; + + return attr->mode; +} + static const struct attribute_group arm_spe_pmu_format_group = { .name = "format", + .is_visible = arm_spe_pmu_format_attr_is_visible, .attrs = arm_spe_pmu_formats_attr, }; @@ -343,6 +364,9 @@ static u64 arm_spe_event_to_pmsfcr(struct perf_event *event) if (ATTR_CFG_GET_FLD(attr, event_filter)) reg |= PMSFCR_EL1_FE; + if (ATTR_CFG_GET_FLD(attr, inv_event_filter)) + reg |= PMSFCR_EL1_FnE; + if (ATTR_CFG_GET_FLD(attr, min_latency)) reg |= PMSFCR_EL1_FL; @@ -355,6 +379,12 @@ static u64 arm_spe_event_to_pmsevfr(struct perf_event *event) return ATTR_CFG_GET_FLD(attr, event_filter); } +static u64 arm_spe_event_to_pmsnevfr(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + return ATTR_CFG_GET_FLD(attr, inv_event_filter); +} + static u64 arm_spe_event_to_pmslatfr(struct perf_event *event) { struct perf_event_attr *attr = &event->attr; @@ -703,6 +733,9 @@ static int arm_spe_pmu_event_init(struct perf_event *event) if (arm_spe_event_to_pmsevfr(event) & arm_spe_pmsevfr_res0(spe_pmu->pmsver)) return -EOPNOTSUPP; + if (arm_spe_event_to_pmsnevfr(event) & arm_spe_pmsevfr_res0(spe_pmu->pmsver)) + return -EOPNOTSUPP; + if (attr->exclude_idle) return -EOPNOTSUPP; @@ -721,6 +754,10 @@ static int arm_spe_pmu_event_init(struct perf_event *event) !(spe_pmu->features & SPE_PMU_FEAT_FILT_EVT)) return -EOPNOTSUPP; + if ((FIELD_GET(PMSFCR_EL1_FnE, reg)) && + !(spe_pmu->features & SPE_PMU_FEAT_INV_FILT_EVT)) + return -EOPNOTSUPP; + if ((FIELD_GET(PMSFCR_EL1_FT, reg)) && !(spe_pmu->features & SPE_PMU_FEAT_FILT_TYP)) return -EOPNOTSUPP; @@ -756,6 +793,11 @@ static void arm_spe_pmu_start(struct perf_event *event, int flags) reg = arm_spe_event_to_pmsevfr(event); write_sysreg_s(reg, SYS_PMSEVFR_EL1); + if (spe_pmu->features & SPE_PMU_FEAT_INV_FILT_EVT) { + reg = arm_spe_event_to_pmsnevfr(event); + write_sysreg_s(reg, SYS_PMSNEVFR_EL1); + } + reg = arm_spe_event_to_pmslatfr(event); write_sysreg_s(reg, SYS_PMSLATFR_EL1); @@ -990,6 +1032,9 @@ static void __arm_spe_pmu_dev_probe(void *info) if (FIELD_GET(PMSIDR_EL1_FE, reg)) spe_pmu->features |= SPE_PMU_FEAT_FILT_EVT; + if (FIELD_GET(PMSIDR_EL1_FnE, reg)) + spe_pmu->features |= SPE_PMU_FEAT_INV_FILT_EVT; + if (FIELD_GET(PMSIDR_EL1_FT, reg)) spe_pmu->features |= SPE_PMU_FEAT_FILT_TYP; From e8a709dc2a9156f223ec953ae70a919e87ad7e9a Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Mon, 6 Feb 2023 14:47:46 -0600 Subject: [PATCH 126/130] perf: arm_spe: Print the version of SPE detected There's up to 4 versions of SPE now. Let's add the version that's been detected to the driver's informational print out. Signed-off-by: Rob Herring Link: https://lore.kernel.org/r/20230206204746.1452942-1-robh@kernel.org Signed-off-by: Will Deacon --- drivers/perf/arm_spe_pmu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c index 573db4211acd..b9ba4c4fe5a2 100644 --- a/drivers/perf/arm_spe_pmu.c +++ b/drivers/perf/arm_spe_pmu.c @@ -1105,8 +1105,8 @@ static void __arm_spe_pmu_dev_probe(void *info) } dev_info(dev, - "probed for CPUs %*pbl [max_record_sz %u, align %u, features 0x%llx]\n", - cpumask_pr_args(&spe_pmu->supported_cpus), + "probed SPEv1.%d for CPUs %*pbl [max_record_sz %u, align %u, features 0x%llx]\n", + spe_pmu->pmsver - 1, cpumask_pr_args(&spe_pmu->supported_cpus), spe_pmu->max_record_sz, spe_pmu->align, spe_pmu->features); spe_pmu->features |= SPE_PMU_FEAT_DEV_PROBED; From a088cf8eee1263462131fe8364e0fa962e17412b Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Sat, 4 Feb 2023 11:18:07 +0100 Subject: [PATCH 127/130] arm64: kprobes: Drop ID map text from kprobes blacklist The ID mapped text region is never accessed via the normal kernel mapping of text, and so it was moved into .rodata instead. This means it is no longer considered as a suitable place for kprobes by default, and the explicit blacklist is unnecessary, and actually results in an error message at boot: kprobes: Failed to populate blacklist (error -22), kprobes not restricted, be careful using them! So stop blacklisting the ID map text explicitly. Fixes: af7249b317e4 ("arm64: kernel: move identity map out of .text mapping") Reported-by: Nathan Chancellor Signed-off-by: Ard Biesheuvel Tested-by: Nathan Chancellor Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20230204101807.2862321-1-ardb@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/probes/kprobes.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index f35d059a9a36..70b91a8c6bb3 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -387,10 +387,6 @@ int __init arch_populate_kprobe_blacklist(void) (unsigned long)__irqentry_text_end); if (ret) return ret; - ret = kprobe_add_area_blacklist((unsigned long)__idmap_text_start, - (unsigned long)__idmap_text_end); - if (ret) - return ret; ret = kprobe_add_area_blacklist((unsigned long)__hyp_text_start, (unsigned long)__hyp_text_end); if (ret || is_kernel_in_hyp_mode()) From 6012b8202022d9eec0c09cbb3212e49ccd273438 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 2 Feb 2023 17:30:44 +0000 Subject: [PATCH 128/130] kselftest/arm64: Copy whole EXTRA context When copying the EXTRA context our calculation of the amount of data we need to copy is incorrect, we only calculate the amount of data needed within uc_mcontext.__reserved, not taking account of the fixed portion of the context. Add in the offset of the reserved data so that we copy everything we should. This will only cause test failures in cases where the last context in the EXTRA context is smaller than the missing data since we don't currently validate any of the register data and all the buffers we copy into are statically allocated so default to zero meaning that if we walk beyond the end of what we copied we'll encounter what looks like a context with magic and length both 0 which is a valid terminator record. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20230201-arm64-kselftest-full-extra-v1-1-93741f32dd29@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/signal/test_signals_utils.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/arm64/signal/test_signals_utils.c b/tools/testing/selftests/arm64/signal/test_signals_utils.c index 308e229e58ab..746a4f70f082 100644 --- a/tools/testing/selftests/arm64/signal/test_signals_utils.c +++ b/tools/testing/selftests/arm64/signal/test_signals_utils.c @@ -192,8 +192,10 @@ static bool handle_signal_copyctx(struct tdescr *td, * in the copy, this was previously validated in * ASSERT_GOOD_CONTEXT(). */ - to_copy = offset + sizeof(struct extra_context) + 16 + - extra->size; + to_copy = __builtin_offsetof(ucontext_t, + uc_mcontext.__reserved); + to_copy += offset + sizeof(struct extra_context) + 16; + to_copy += extra->size; copied_extra = (struct extra_context *)&(td->live_uc->uc_mcontext.__reserved[offset]); } else { copied_extra = NULL; From 2c4192c0a7f2d628b5c1667577316ee9e7471e20 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 2 Feb 2023 17:31:25 +0000 Subject: [PATCH 129/130] kselftest/arm64: Don't require FA64 for streaming SVE+ZA tests During early development a dependedncy was added on having FA64 available so we could use the full FPSIMD register set in the signal handler which got copied over into the SSVE+ZA registers test case. Subsequently the ABI was finialised so the handler is run with streaming mode disabled meaning this is redundant but the dependency was never removed, do so now. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20230202-arm64-kselftest-sve-za-fa64-v1-1-5c5f3dabe441@kernel.org Signed-off-by: Catalin Marinas --- .../selftests/arm64/signal/testcases/ssve_za_regs.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c b/tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c index 1f62621794d5..9dc5f128bbc0 100644 --- a/tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c +++ b/tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c @@ -154,12 +154,7 @@ static int sme_regs(struct tdescr *td, siginfo_t *si, ucontext_t *uc) struct tdescr tde = { .name = "Streaming SVE registers", .descr = "Check that we get the right Streaming SVE registers reported", - /* - * We shouldn't require FA64 but things like memset() used in the - * helpers might use unsupported instructions so for now disable - * the test unless we've got the full instruction set. - */ - .feats_required = FEAT_SME | FEAT_SME_FA64, + .feats_required = FEAT_SME, .timeout = 3, .init = sme_get_vls, .run = sme_regs, From d54170812ef1c80e0fa3ed3e554a0bbfc2920d9d Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 20 Feb 2023 16:23:17 +0000 Subject: [PATCH 130/130] arm64: fix .idmap.text assertion for large kernels When building a kernel with many debug options enabled (which happens in test configurations use by myself and syzbot), the kernel can become large enough that portions of .text can be more than 128M away from .idmap.text (which is placed inside the .rodata section). Where idmap code branches into .text, the linker will place veneers in the .idmap.text section to make those branches possible. Unfortunately, as Ard reports, GNU LD has bseen observed to add 4K of padding when adding such veneers, e.g. | .idmap.text 0xffffffc01e48e5c0 0x32c arch/arm64/mm/proc.o | 0xffffffc01e48e5c0 idmap_cpu_replace_ttbr1 | 0xffffffc01e48e600 idmap_kpti_install_ng_mappings | 0xffffffc01e48e800 __cpu_setup | *fill* 0xffffffc01e48e8ec 0x4 | .idmap.text.stub | 0xffffffc01e48e8f0 0x18 linker stubs | 0xffffffc01e48f8f0 __idmap_text_end = . | 0xffffffc01e48f000 . = ALIGN (0x1000) | *fill* 0xffffffc01e48f8f0 0x710 | 0xffffffc01e490000 idmap_pg_dir = . This makes the __idmap_text_start .. __idmap_text_end region bigger than the 4K we require it to fit within, and triggers an assertion in arm64's vmlinux.lds.S, which breaks the build: | LD .tmp_vmlinux.kallsyms1 | aarch64-linux-gnu-ld: ID map text too big or misaligned | make[1]: *** [scripts/Makefile.vmlinux:35: vmlinux] Error 1 | make: *** [Makefile:1264: vmlinux] Error 2 Avoid this by using an `ADRP+ADD+BLR` sequence for branches out of .idmap.text, which avoids the need for veneers. These branches are only executed once per boot, and only when the MMU is on, so there should be no noticeable performance penalty in replacing `BL` with `ADRP+ADD+BLR`. At the same time, remove the "x" and "w" attributes when placing code in .idmap.text, as these are not necessary, and this will prevent the linker from assuming that it is safe to place PLTs into .idmap.text, causing it to warn if and when there are out-of-range branches within .idmap.text, e.g. | LD .tmp_vmlinux.kallsyms1 | arch/arm64/kernel/head.o: in function `primary_entry': | (.idmap.text+0x1c): relocation truncated to fit: R_AARCH64_CALL26 against symbol `dcache_clean_poc' defined in .text section in arch/arm64/mm/cache.o | arch/arm64/kernel/head.o: in function `init_el2': | (.idmap.text+0x88): relocation truncated to fit: R_AARCH64_CALL26 against symbol `dcache_clean_poc' defined in .text section in arch/arm64/mm/cache.o | make[1]: *** [scripts/Makefile.vmlinux:34: vmlinux] Error 1 | make: *** [Makefile:1252: vmlinux] Error 2 Thus, if future changes add out-of-range branches in .idmap.text, it should be easy enough to identify those from the resulting linker errors. Reported-by: syzbot+f8ac312e31226e23302b@syzkaller.appspotmail.com Link: https://lore.kernel.org/linux-arm-kernel/00000000000028ea4105f4e2ef54@google.com/ Signed-off-by: Mark Rutland Cc: Ard Biesheuvel Cc: Will Deacon Tested-by: Ard Biesheuvel Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20230220162317.1581208-1-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/head.S | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 212d93aca5e6..b98970907226 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -70,7 +70,7 @@ __EFI_PE_HEADER - .section ".idmap.text","awx" + .section ".idmap.text","a" /* * The following callee saved general purpose registers are used on the @@ -99,7 +99,8 @@ SYM_CODE_START(primary_entry) cbz x19, 0f adrp x0, __idmap_text_start adr_l x1, __idmap_text_end - bl dcache_clean_poc + adr_l x2, dcache_clean_poc + blr x2 0: mov x0, x19 bl init_kernel_el // w0=cpu_boot_mode mov x20, x0 @@ -527,7 +528,7 @@ SYM_FUNC_END(__primary_switched) * end early head section, begin head code that is also used for * hotplug and needs to have the same protections as the text region */ - .section ".idmap.text","awx" + .section ".idmap.text","a" /* * Starting from EL2 or EL1, configure the CPU to execute at the highest @@ -566,7 +567,8 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL) cbz x0, 0f adrp x0, __hyp_idmap_text_start adr_l x1, __hyp_text_end - bl dcache_clean_poc + adr_l x2, dcache_clean_poc + blr x2 0: mov_q x0, HCR_HOST_NVHE_FLAGS msr hcr_el2, x0 @@ -728,7 +730,7 @@ SYM_FUNC_END(set_cpu_boot_mode_flag) * Checks if the selected granule size is supported by the CPU. * If it isn't, park the CPU */ - .section ".idmap.text","awx" + .section ".idmap.text","a" SYM_FUNC_START(__enable_mmu) mrs x3, ID_AA64MMFR0_EL1 ubfx x3, x3, #ID_AA64MMFR0_EL1_TGRAN_SHIFT, 4