arm64: Add ARM_SMCCC_ARCH_WORKAROUND_1 BP hardening support
[muen/linux.git] / arch / arm64 / kernel / cpu_errata.c
1 /*
2  * Contains CPU specific errata definitions
3  *
4  * Copyright (C) 2014 ARM Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18
19 #include <linux/types.h>
20 #include <asm/cpu.h>
21 #include <asm/cputype.h>
22 #include <asm/cpufeature.h>
23
24 static bool __maybe_unused
25 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
26 {
27         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
28         return MIDR_IS_CPU_MODEL_RANGE(read_cpuid_id(), entry->midr_model,
29                                        entry->midr_range_min,
30                                        entry->midr_range_max);
31 }
32
33 static bool __maybe_unused
34 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
35 {
36         u32 model;
37
38         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
39
40         model = read_cpuid_id();
41         model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
42                  MIDR_ARCHITECTURE_MASK;
43
44         return model == entry->midr_model;
45 }
46
47 static bool
48 has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
49                                 int scope)
50 {
51         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
52         return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0.strict_mask) !=
53                 (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
54 }
55
56 static int cpu_enable_trap_ctr_access(void *__unused)
57 {
58         /* Clear SCTLR_EL1.UCT */
59         config_sctlr_el1(SCTLR_EL1_UCT, 0);
60         return 0;
61 }
62
63 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
64 #include <asm/mmu_context.h>
65 #include <asm/cacheflush.h>
66
67 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
68
69 #ifdef CONFIG_KVM
70 extern char __psci_hyp_bp_inval_start[], __psci_hyp_bp_inval_end[];
71 extern char __qcom_hyp_sanitize_link_stack_start[];
72 extern char __qcom_hyp_sanitize_link_stack_end[];
73 extern char __smccc_workaround_1_smc_start[];
74 extern char __smccc_workaround_1_smc_end[];
75 extern char __smccc_workaround_1_hvc_start[];
76 extern char __smccc_workaround_1_hvc_end[];
77
78 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
79                                 const char *hyp_vecs_end)
80 {
81         void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
82         int i;
83
84         for (i = 0; i < SZ_2K; i += 0x80)
85                 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
86
87         flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
88 }
89
90 static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
91                                       const char *hyp_vecs_start,
92                                       const char *hyp_vecs_end)
93 {
94         static int last_slot = -1;
95         static DEFINE_SPINLOCK(bp_lock);
96         int cpu, slot = -1;
97
98         spin_lock(&bp_lock);
99         for_each_possible_cpu(cpu) {
100                 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
101                         slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
102                         break;
103                 }
104         }
105
106         if (slot == -1) {
107                 last_slot++;
108                 BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
109                         / SZ_2K) <= last_slot);
110                 slot = last_slot;
111                 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
112         }
113
114         __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
115         __this_cpu_write(bp_hardening_data.fn, fn);
116         spin_unlock(&bp_lock);
117 }
118 #else
119 #define __psci_hyp_bp_inval_start               NULL
120 #define __psci_hyp_bp_inval_end                 NULL
121 #define __qcom_hyp_sanitize_link_stack_start    NULL
122 #define __qcom_hyp_sanitize_link_stack_end      NULL
123 #define __smccc_workaround_1_smc_start          NULL
124 #define __smccc_workaround_1_smc_end            NULL
125 #define __smccc_workaround_1_hvc_start          NULL
126 #define __smccc_workaround_1_hvc_end            NULL
127
128 static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
129                                       const char *hyp_vecs_start,
130                                       const char *hyp_vecs_end)
131 {
132         __this_cpu_write(bp_hardening_data.fn, fn);
133 }
134 #endif  /* CONFIG_KVM */
135
136 static void  install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
137                                      bp_hardening_cb_t fn,
138                                      const char *hyp_vecs_start,
139                                      const char *hyp_vecs_end)
140 {
141         u64 pfr0;
142
143         if (!entry->matches(entry, SCOPE_LOCAL_CPU))
144                 return;
145
146         pfr0 = read_cpuid(ID_AA64PFR0_EL1);
147         if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
148                 return;
149
150         __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
151 }
152
153 #include <uapi/linux/psci.h>
154 #include <linux/arm-smccc.h>
155 #include <linux/psci.h>
156
157 static void call_smc_arch_workaround_1(void)
158 {
159         arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
160 }
161
162 static void call_hvc_arch_workaround_1(void)
163 {
164         arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
165 }
166
167 static bool check_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
168 {
169         bp_hardening_cb_t cb;
170         void *smccc_start, *smccc_end;
171         struct arm_smccc_res res;
172
173         if (!entry->matches(entry, SCOPE_LOCAL_CPU))
174                 return false;
175
176         if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
177                 return false;
178
179         switch (psci_ops.conduit) {
180         case PSCI_CONDUIT_HVC:
181                 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
182                                   ARM_SMCCC_ARCH_WORKAROUND_1, &res);
183                 if (res.a0)
184                         return false;
185                 cb = call_hvc_arch_workaround_1;
186                 smccc_start = __smccc_workaround_1_hvc_start;
187                 smccc_end = __smccc_workaround_1_hvc_end;
188                 break;
189
190         case PSCI_CONDUIT_SMC:
191                 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
192                                   ARM_SMCCC_ARCH_WORKAROUND_1, &res);
193                 if (res.a0)
194                         return false;
195                 cb = call_smc_arch_workaround_1;
196                 smccc_start = __smccc_workaround_1_smc_start;
197                 smccc_end = __smccc_workaround_1_smc_end;
198                 break;
199
200         default:
201                 return false;
202         }
203
204         install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
205
206         return true;
207 }
208
209 static int enable_psci_bp_hardening(void *data)
210 {
211         const struct arm64_cpu_capabilities *entry = data;
212
213         if (psci_ops.get_version) {
214                 if (check_smccc_arch_workaround_1(entry))
215                         return 0;
216
217                 install_bp_hardening_cb(entry,
218                                        (bp_hardening_cb_t)psci_ops.get_version,
219                                        __psci_hyp_bp_inval_start,
220                                        __psci_hyp_bp_inval_end);
221         }
222
223         return 0;
224 }
225
226 static void qcom_link_stack_sanitization(void)
227 {
228         u64 tmp;
229
230         asm volatile("mov       %0, x30         \n"
231                      ".rept     16              \n"
232                      "bl        . + 4           \n"
233                      ".endr                     \n"
234                      "mov       x30, %0         \n"
235                      : "=&r" (tmp));
236 }
237
238 static int qcom_enable_link_stack_sanitization(void *data)
239 {
240         const struct arm64_cpu_capabilities *entry = data;
241
242         install_bp_hardening_cb(entry, qcom_link_stack_sanitization,
243                                 __qcom_hyp_sanitize_link_stack_start,
244                                 __qcom_hyp_sanitize_link_stack_end);
245
246         return 0;
247 }
248 #endif  /* CONFIG_HARDEN_BRANCH_PREDICTOR */
249
250 #define MIDR_RANGE(model, min, max) \
251         .def_scope = SCOPE_LOCAL_CPU, \
252         .matches = is_affected_midr_range, \
253         .midr_model = model, \
254         .midr_range_min = min, \
255         .midr_range_max = max
256
257 #define MIDR_ALL_VERSIONS(model) \
258         .def_scope = SCOPE_LOCAL_CPU, \
259         .matches = is_affected_midr_range, \
260         .midr_model = model, \
261         .midr_range_min = 0, \
262         .midr_range_max = (MIDR_VARIANT_MASK | MIDR_REVISION_MASK)
263
264 const struct arm64_cpu_capabilities arm64_errata[] = {
265 #if     defined(CONFIG_ARM64_ERRATUM_826319) || \
266         defined(CONFIG_ARM64_ERRATUM_827319) || \
267         defined(CONFIG_ARM64_ERRATUM_824069)
268         {
269         /* Cortex-A53 r0p[012] */
270                 .desc = "ARM errata 826319, 827319, 824069",
271                 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
272                 MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02),
273                 .enable = cpu_enable_cache_maint_trap,
274         },
275 #endif
276 #ifdef CONFIG_ARM64_ERRATUM_819472
277         {
278         /* Cortex-A53 r0p[01] */
279                 .desc = "ARM errata 819472",
280                 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
281                 MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01),
282                 .enable = cpu_enable_cache_maint_trap,
283         },
284 #endif
285 #ifdef CONFIG_ARM64_ERRATUM_832075
286         {
287         /* Cortex-A57 r0p0 - r1p2 */
288                 .desc = "ARM erratum 832075",
289                 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
290                 MIDR_RANGE(MIDR_CORTEX_A57,
291                            MIDR_CPU_VAR_REV(0, 0),
292                            MIDR_CPU_VAR_REV(1, 2)),
293         },
294 #endif
295 #ifdef CONFIG_ARM64_ERRATUM_834220
296         {
297         /* Cortex-A57 r0p0 - r1p2 */
298                 .desc = "ARM erratum 834220",
299                 .capability = ARM64_WORKAROUND_834220,
300                 MIDR_RANGE(MIDR_CORTEX_A57,
301                            MIDR_CPU_VAR_REV(0, 0),
302                            MIDR_CPU_VAR_REV(1, 2)),
303         },
304 #endif
305 #ifdef CONFIG_ARM64_ERRATUM_845719
306         {
307         /* Cortex-A53 r0p[01234] */
308                 .desc = "ARM erratum 845719",
309                 .capability = ARM64_WORKAROUND_845719,
310                 MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04),
311         },
312 #endif
313 #ifdef CONFIG_CAVIUM_ERRATUM_23154
314         {
315         /* Cavium ThunderX, pass 1.x */
316                 .desc = "Cavium erratum 23154",
317                 .capability = ARM64_WORKAROUND_CAVIUM_23154,
318                 MIDR_RANGE(MIDR_THUNDERX, 0x00, 0x01),
319         },
320 #endif
321 #ifdef CONFIG_CAVIUM_ERRATUM_27456
322         {
323         /* Cavium ThunderX, T88 pass 1.x - 2.1 */
324                 .desc = "Cavium erratum 27456",
325                 .capability = ARM64_WORKAROUND_CAVIUM_27456,
326                 MIDR_RANGE(MIDR_THUNDERX,
327                            MIDR_CPU_VAR_REV(0, 0),
328                            MIDR_CPU_VAR_REV(1, 1)),
329         },
330         {
331         /* Cavium ThunderX, T81 pass 1.0 */
332                 .desc = "Cavium erratum 27456",
333                 .capability = ARM64_WORKAROUND_CAVIUM_27456,
334                 MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00),
335         },
336 #endif
337 #ifdef CONFIG_CAVIUM_ERRATUM_30115
338         {
339         /* Cavium ThunderX, T88 pass 1.x - 2.2 */
340                 .desc = "Cavium erratum 30115",
341                 .capability = ARM64_WORKAROUND_CAVIUM_30115,
342                 MIDR_RANGE(MIDR_THUNDERX, 0x00,
343                            (1 << MIDR_VARIANT_SHIFT) | 2),
344         },
345         {
346         /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
347                 .desc = "Cavium erratum 30115",
348                 .capability = ARM64_WORKAROUND_CAVIUM_30115,
349                 MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x02),
350         },
351         {
352         /* Cavium ThunderX, T83 pass 1.0 */
353                 .desc = "Cavium erratum 30115",
354                 .capability = ARM64_WORKAROUND_CAVIUM_30115,
355                 MIDR_RANGE(MIDR_THUNDERX_83XX, 0x00, 0x00),
356         },
357 #endif
358         {
359                 .desc = "Mismatched cache line size",
360                 .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
361                 .matches = has_mismatched_cache_line_size,
362                 .def_scope = SCOPE_LOCAL_CPU,
363                 .enable = cpu_enable_trap_ctr_access,
364         },
365 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
366         {
367                 .desc = "Qualcomm Technologies Falkor erratum 1003",
368                 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
369                 MIDR_RANGE(MIDR_QCOM_FALKOR_V1,
370                            MIDR_CPU_VAR_REV(0, 0),
371                            MIDR_CPU_VAR_REV(0, 0)),
372         },
373         {
374                 .desc = "Qualcomm Technologies Kryo erratum 1003",
375                 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
376                 .def_scope = SCOPE_LOCAL_CPU,
377                 .midr_model = MIDR_QCOM_KRYO,
378                 .matches = is_kryo_midr,
379         },
380 #endif
381 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
382         {
383                 .desc = "Qualcomm Technologies Falkor erratum 1009",
384                 .capability = ARM64_WORKAROUND_REPEAT_TLBI,
385                 MIDR_RANGE(MIDR_QCOM_FALKOR_V1,
386                            MIDR_CPU_VAR_REV(0, 0),
387                            MIDR_CPU_VAR_REV(0, 0)),
388         },
389 #endif
390 #ifdef CONFIG_ARM64_ERRATUM_858921
391         {
392         /* Cortex-A73 all versions */
393                 .desc = "ARM erratum 858921",
394                 .capability = ARM64_WORKAROUND_858921,
395                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
396         },
397 #endif
398 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
399         {
400                 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
401                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
402                 .enable = enable_psci_bp_hardening,
403         },
404         {
405                 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
406                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
407                 .enable = enable_psci_bp_hardening,
408         },
409         {
410                 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
411                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
412                 .enable = enable_psci_bp_hardening,
413         },
414         {
415                 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
416                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
417                 .enable = enable_psci_bp_hardening,
418         },
419         {
420                 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
421                 MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
422                 .enable = qcom_enable_link_stack_sanitization,
423         },
424         {
425                 .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
426                 MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
427         },
428         {
429                 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
430                 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
431                 .enable = enable_psci_bp_hardening,
432         },
433         {
434                 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
435                 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
436                 .enable = enable_psci_bp_hardening,
437         },
438 #endif
439         {
440         }
441 };
442
443 /*
444  * The CPU Errata work arounds are detected and applied at boot time
445  * and the related information is freed soon after. If the new CPU requires
446  * an errata not detected at boot, fail this CPU.
447  */
448 void verify_local_cpu_errata_workarounds(void)
449 {
450         const struct arm64_cpu_capabilities *caps = arm64_errata;
451
452         for (; caps->matches; caps++) {
453                 if (cpus_have_cap(caps->capability)) {
454                         if (caps->enable)
455                                 caps->enable((void *)caps);
456                 } else if (caps->matches(caps, SCOPE_LOCAL_CPU)) {
457                         pr_crit("CPU%d: Requires work around for %s, not detected"
458                                         " at boot time\n",
459                                 smp_processor_id(),
460                                 caps->desc ? : "an erratum");
461                         cpu_die_early();
462                 }
463         }
464 }
465
466 void update_cpu_errata_workarounds(void)
467 {
468         update_cpu_capabilities(arm64_errata, "enabling workaround for");
469 }
470
471 void __init enable_errata_workarounds(void)
472 {
473         enable_cpu_capabilities(arm64_errata);
474 }