Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[muen/linux.git] / arch / arm64 / kernel / cpu_errata.c
index 7369ad5..9262ec5 100644 (file)
 
 static bool __maybe_unused
 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
+{
+       const struct arm64_midr_revidr *fix;
+       u32 midr = read_cpuid_id(), revidr;
+
+       WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+       if (!is_midr_in_range(midr, &entry->midr_range))
+               return false;
+
+       midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
+       revidr = read_cpuid(REVIDR_EL1);
+       for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
+               if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
+                       return false;
+
+       return true;
+}
+
+static bool __maybe_unused
+is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
+                           int scope)
 {
        WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
-       return MIDR_IS_CPU_MODEL_RANGE(read_cpuid_id(), entry->midr_model,
-                                      entry->midr_range_min,
-                                      entry->midr_range_max);
+       return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
 }
 
 static bool __maybe_unused
@@ -41,7 +59,7 @@ is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
        model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
                 MIDR_ARCHITECTURE_MASK;
 
-       return model == entry->midr_model;
+       return model == entry->midr_range.model;
 }
 
 static bool
@@ -53,11 +71,11 @@ has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
                (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
 }
 
-static int cpu_enable_trap_ctr_access(void *__unused)
+static void
+cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
 {
        /* Clear SCTLR_EL1.UCT */
        config_sctlr_el1(SCTLR_EL1_UCT, 0);
-       return 0;
 }
 
 atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
@@ -160,25 +178,25 @@ static void call_hvc_arch_workaround_1(void)
        arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
 }
 
-static int enable_smccc_arch_workaround_1(void *data)
+static void
+enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
 {
-       const struct arm64_cpu_capabilities *entry = data;
        bp_hardening_cb_t cb;
        void *smccc_start, *smccc_end;
        struct arm_smccc_res res;
 
        if (!entry->matches(entry, SCOPE_LOCAL_CPU))
-               return 0;
+               return;
 
        if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
-               return 0;
+               return;
 
        switch (psci_ops.conduit) {
        case PSCI_CONDUIT_HVC:
                arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
                                  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
-               if (res.a0)
-                       return 0;
+               if ((int)res.a0 < 0)
+                       return;
                cb = call_hvc_arch_workaround_1;
                smccc_start = __smccc_workaround_1_hvc_start;
                smccc_end = __smccc_workaround_1_hvc_end;
@@ -187,20 +205,20 @@ static int enable_smccc_arch_workaround_1(void *data)
        case PSCI_CONDUIT_SMC:
                arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
                                  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
-               if (res.a0)
-                       return 0;
+               if ((int)res.a0 < 0)
+                       return;
                cb = call_smc_arch_workaround_1;
                smccc_start = __smccc_workaround_1_smc_start;
                smccc_end = __smccc_workaround_1_smc_end;
                break;
 
        default:
-               return 0;
+               return;
        }
 
        install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
 
-       return 0;
+       return;
 }
 
 static void qcom_link_stack_sanitization(void)
@@ -215,31 +233,119 @@ static void qcom_link_stack_sanitization(void)
                     : "=&r" (tmp));
 }
 
-static int qcom_enable_link_stack_sanitization(void *data)
+static void
+qcom_enable_link_stack_sanitization(const struct arm64_cpu_capabilities *entry)
 {
-       const struct arm64_cpu_capabilities *entry = data;
-
        install_bp_hardening_cb(entry, qcom_link_stack_sanitization,
                                __qcom_hyp_sanitize_link_stack_start,
                                __qcom_hyp_sanitize_link_stack_end);
-
-       return 0;
 }
 #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
 
-#define MIDR_RANGE(model, min, max) \
-       .def_scope = SCOPE_LOCAL_CPU, \
-       .matches = is_affected_midr_range, \
-       .midr_model = model, \
-       .midr_range_min = min, \
-       .midr_range_max = max
+#define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)      \
+       .matches = is_affected_midr_range,                      \
+       .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
+
+#define CAP_MIDR_ALL_VERSIONS(model)                                   \
+       .matches = is_affected_midr_range,                              \
+       .midr_range = MIDR_ALL_VERSIONS(model)
+
+#define MIDR_FIXED(rev, revidr_mask) \
+       .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
+
+#define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max)           \
+       .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                         \
+       CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
+
+#define CAP_MIDR_RANGE_LIST(list)                              \
+       .matches = is_affected_midr_range_list,                 \
+       .midr_range_list = list
+
+/* Errata affecting a range of revisions of  given model variant */
+#define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max)     \
+       ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
+
+/* Errata affecting a single variant/revision of a model */
+#define ERRATA_MIDR_REV(model, var, rev)       \
+       ERRATA_MIDR_RANGE(model, var, rev, var, rev)
+
+/* Errata affecting all variants/revisions of a given a model */
+#define ERRATA_MIDR_ALL_VERSIONS(model)                                \
+       .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                 \
+       CAP_MIDR_ALL_VERSIONS(model)
+
+/* Errata affecting a list of midr ranges, with same work around */
+#define ERRATA_MIDR_RANGE_LIST(midr_list)                      \
+       .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                 \
+       CAP_MIDR_RANGE_LIST(midr_list)
+
+/*
+ * Generic helper for handling capabilties with multiple (match,enable) pairs
+ * of call backs, sharing the same capability bit.
+ * Iterate over each entry to see if at least one matches.
+ */
+static bool __maybe_unused
+multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry, int scope)
+{
+       const struct arm64_cpu_capabilities *caps;
+
+       for (caps = entry->match_list; caps->matches; caps++)
+               if (caps->matches(caps, scope))
+                       return true;
+
+       return false;
+}
+
+/*
+ * Take appropriate action for all matching entries in the shared capability
+ * entry.
+ */
+static void __maybe_unused
+multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry)
+{
+       const struct arm64_cpu_capabilities *caps;
 
-#define MIDR_ALL_VERSIONS(model) \
-       .def_scope = SCOPE_LOCAL_CPU, \
-       .matches = is_affected_midr_range, \
-       .midr_model = model, \
-       .midr_range_min = 0, \
-       .midr_range_max = (MIDR_VARIANT_MASK | MIDR_REVISION_MASK)
+       for (caps = entry->match_list; caps->matches; caps++)
+               if (caps->matches(caps, SCOPE_LOCAL_CPU) &&
+                   caps->cpu_enable)
+                       caps->cpu_enable(caps);
+}
+
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+
+/*
+ * List of CPUs where we need to issue a psci call to
+ * harden the branch predictor.
+ */
+static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
+       MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+       MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+       MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+       MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
+       MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
+       MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
+       {},
+};
+
+static const struct midr_range qcom_bp_harden_cpus[] = {
+       MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
+       MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
+       {},
+};
+
+static const struct arm64_cpu_capabilities arm64_bp_harden_list[] = {
+       {
+               CAP_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
+               .cpu_enable = enable_smccc_arch_workaround_1,
+       },
+       {
+               CAP_MIDR_RANGE_LIST(qcom_bp_harden_cpus),
+               .cpu_enable = qcom_enable_link_stack_sanitization,
+       },
+       {},
+};
+
+#endif
 
 #ifndef ERRATA_MIDR_ALL_VERSIONS
 #define        ERRATA_MIDR_ALL_VERSIONS(x)     MIDR_ALL_VERSIONS(x)
@@ -253,8 +359,8 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
        /* Cortex-A53 r0p[012] */
                .desc = "ARM errata 826319, 827319, 824069",
                .capability = ARM64_WORKAROUND_CLEAN_CACHE,
-               MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02),
-               .enable = cpu_enable_cache_maint_trap,
+               ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
+               .cpu_enable = cpu_enable_cache_maint_trap,
        },
 #endif
 #ifdef CONFIG_ARM64_ERRATUM_819472
@@ -262,8 +368,8 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
        /* Cortex-A53 r0p[01] */
                .desc = "ARM errata 819472",
                .capability = ARM64_WORKAROUND_CLEAN_CACHE,
-               MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01),
-               .enable = cpu_enable_cache_maint_trap,
+               ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
+               .cpu_enable = cpu_enable_cache_maint_trap,
        },
 #endif
 #ifdef CONFIG_ARM64_ERRATUM_832075
@@ -271,9 +377,9 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
        /* Cortex-A57 r0p0 - r1p2 */
                .desc = "ARM erratum 832075",
                .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
-               MIDR_RANGE(MIDR_CORTEX_A57,
-                          MIDR_CPU_VAR_REV(0, 0),
-                          MIDR_CPU_VAR_REV(1, 2)),
+               ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
+                                 0, 0,
+                                 1, 2),
        },
 #endif
 #ifdef CONFIG_ARM64_ERRATUM_834220
@@ -281,9 +387,18 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
        /* Cortex-A57 r0p0 - r1p2 */
                .desc = "ARM erratum 834220",
                .capability = ARM64_WORKAROUND_834220,
-               MIDR_RANGE(MIDR_CORTEX_A57,
-                          MIDR_CPU_VAR_REV(0, 0),
-                          MIDR_CPU_VAR_REV(1, 2)),
+               ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
+                                 0, 0,
+                                 1, 2),
+       },
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_843419
+       {
+       /* Cortex-A53 r0p[01234] */
+               .desc = "ARM erratum 843419",
+               .capability = ARM64_WORKAROUND_843419,
+               ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
+               MIDR_FIXED(0x4, BIT(8)),
        },
 #endif
 #ifdef CONFIG_ARM64_ERRATUM_845719
@@ -291,7 +406,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
        /* Cortex-A53 r0p[01234] */
                .desc = "ARM erratum 845719",
                .capability = ARM64_WORKAROUND_845719,
-               MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04),
+               ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
        },
 #endif
 #ifdef CONFIG_CAVIUM_ERRATUM_23154
@@ -299,7 +414,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
        /* Cavium ThunderX, pass 1.x */
                .desc = "Cavium erratum 23154",
                .capability = ARM64_WORKAROUND_CAVIUM_23154,
-               MIDR_RANGE(MIDR_THUNDERX, 0x00, 0x01),
+               ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
        },
 #endif
 #ifdef CONFIG_CAVIUM_ERRATUM_27456
@@ -307,15 +422,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
        /* Cavium ThunderX, T88 pass 1.x - 2.1 */
                .desc = "Cavium erratum 27456",
                .capability = ARM64_WORKAROUND_CAVIUM_27456,
-               MIDR_RANGE(MIDR_THUNDERX,
-                          MIDR_CPU_VAR_REV(0, 0),
-                          MIDR_CPU_VAR_REV(1, 1)),
+               ERRATA_MIDR_RANGE(MIDR_THUNDERX,
+                                 0, 0,
+                                 1, 1),
        },
        {
        /* Cavium ThunderX, T81 pass 1.0 */
                .desc = "Cavium erratum 27456",
                .capability = ARM64_WORKAROUND_CAVIUM_27456,
-               MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00),
+               ERRATA_MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
        },
 #endif
 #ifdef CONFIG_CAVIUM_ERRATUM_30115
@@ -323,42 +438,41 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
        /* Cavium ThunderX, T88 pass 1.x - 2.2 */
                .desc = "Cavium erratum 30115",
                .capability = ARM64_WORKAROUND_CAVIUM_30115,
-               MIDR_RANGE(MIDR_THUNDERX, 0x00,
-                          (1 << MIDR_VARIANT_SHIFT) | 2),
+               ERRATA_MIDR_RANGE(MIDR_THUNDERX,
+                                     0, 0,
+                                     1, 2),
        },
        {
        /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
                .desc = "Cavium erratum 30115",
                .capability = ARM64_WORKAROUND_CAVIUM_30115,
-               MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x02),
+               ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
        },
        {
        /* Cavium ThunderX, T83 pass 1.0 */
                .desc = "Cavium erratum 30115",
                .capability = ARM64_WORKAROUND_CAVIUM_30115,
-               MIDR_RANGE(MIDR_THUNDERX_83XX, 0x00, 0x00),
+               ERRATA_MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
        },
 #endif
        {
                .desc = "Mismatched cache line size",
                .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
                .matches = has_mismatched_cache_line_size,
-               .def_scope = SCOPE_LOCAL_CPU,
-               .enable = cpu_enable_trap_ctr_access,
+               .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+               .cpu_enable = cpu_enable_trap_ctr_access,
        },
 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
        {
                .desc = "Qualcomm Technologies Falkor erratum 1003",
                .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
-               MIDR_RANGE(MIDR_QCOM_FALKOR_V1,
-                          MIDR_CPU_VAR_REV(0, 0),
-                          MIDR_CPU_VAR_REV(0, 0)),
+               ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
        },
        {
                .desc = "Qualcomm Technologies Kryo erratum 1003",
                .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
-               .def_scope = SCOPE_LOCAL_CPU,
-               .midr_model = MIDR_QCOM_KRYO,
+               .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+               .midr_range.model = MIDR_QCOM_KRYO,
                .matches = is_kryo_midr,
        },
 #endif
@@ -366,9 +480,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
        {
                .desc = "Qualcomm Technologies Falkor erratum 1009",
                .capability = ARM64_WORKAROUND_REPEAT_TLBI,
-               MIDR_RANGE(MIDR_QCOM_FALKOR_V1,
-                          MIDR_CPU_VAR_REV(0, 0),
-                          MIDR_CPU_VAR_REV(0, 0)),
+               ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
        },
 #endif
 #ifdef CONFIG_ARM64_ERRATUM_858921
@@ -376,57 +488,20 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
        /* Cortex-A73 all versions */
                .desc = "ARM erratum 858921",
                .capability = ARM64_WORKAROUND_858921,
-               MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+               ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
        },
 #endif
 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
        {
                .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
-               MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
-               .enable = enable_smccc_arch_workaround_1,
-       },
-       {
-               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
-               MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
-               .enable = enable_smccc_arch_workaround_1,
-       },
-       {
-               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
-               MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
-               .enable = enable_smccc_arch_workaround_1,
-       },
-       {
-               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
-               MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
-               .enable = enable_smccc_arch_workaround_1,
-       },
-       {
-               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
-               MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
-               .enable = qcom_enable_link_stack_sanitization,
+               .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+               .matches = multi_entry_cap_matches,
+               .cpu_enable = multi_entry_cap_cpu_enable,
+               .match_list = arm64_bp_harden_list,
        },
        {
                .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
-               MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
-       },
-       {
-               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
-               MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
-               .enable = qcom_enable_link_stack_sanitization,
-       },
-       {
-               .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
-               MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
-       },
-       {
-               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
-               MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
-               .enable = enable_smccc_arch_workaround_1,
-       },
-       {
-               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
-               MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
-               .enable = enable_smccc_arch_workaround_1,
+               ERRATA_MIDR_RANGE_LIST(qcom_bp_harden_cpus),
        },
 #endif
 #ifdef CONFIG_HARDEN_EL2_VECTORS
@@ -444,36 +519,3 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
        {
        }
 };
-
-/*
- * The CPU Errata work arounds are detected and applied at boot time
- * and the related information is freed soon after. If the new CPU requires
- * an errata not detected at boot, fail this CPU.
- */
-void verify_local_cpu_errata_workarounds(void)
-{
-       const struct arm64_cpu_capabilities *caps = arm64_errata;
-
-       for (; caps->matches; caps++) {
-               if (cpus_have_cap(caps->capability)) {
-                       if (caps->enable)
-                               caps->enable((void *)caps);
-               } else if (caps->matches(caps, SCOPE_LOCAL_CPU)) {
-                       pr_crit("CPU%d: Requires work around for %s, not detected"
-                                       " at boot time\n",
-                               smp_processor_id(),
-                               caps->desc ? : "an erratum");
-                       cpu_die_early();
-               }
-       }
-}
-
-void update_cpu_errata_workarounds(void)
-{
-       update_cpu_capabilities(arm64_errata, "enabling workaround for");
-}
-
-void __init enable_errata_workarounds(void)
-{
-       enable_cpu_capabilities(arm64_errata);
-}