x86/speculation/l1tf: Drop the swap storage limit restriction when l1tf=off
[muen/linux.git] / arch / x86 / kernel / cpu / bugs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright (C) 1994  Linus Torvalds
4  *
5  *  Cyrix stuff, June 1998 by:
6  *      - Rafael R. Reilova (moved everything from head.S),
7  *        <rreilova@ececs.uc.edu>
8  *      - Channing Corn (tests & fixes),
9  *      - Andrew D. Balsa (code cleanup).
10  */
11 #include <linux/init.h>
12 #include <linux/utsname.h>
13 #include <linux/cpu.h>
14 #include <linux/module.h>
15 #include <linux/nospec.h>
16 #include <linux/prctl.h>
17 #include <linux/sched/smt.h>
18
19 #include <asm/spec-ctrl.h>
20 #include <asm/cmdline.h>
21 #include <asm/bugs.h>
22 #include <asm/processor.h>
23 #include <asm/processor-flags.h>
24 #include <asm/fpu/internal.h>
25 #include <asm/msr.h>
26 #include <asm/vmx.h>
27 #include <asm/paravirt.h>
28 #include <asm/alternative.h>
29 #include <asm/pgtable.h>
30 #include <asm/set_memory.h>
31 #include <asm/intel-family.h>
32 #include <asm/e820/api.h>
33 #include <asm/hypervisor.h>
34
35 static void __init spectre_v2_select_mitigation(void);
36 static void __init ssb_select_mitigation(void);
37 static void __init l1tf_select_mitigation(void);
38
39 /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
40 u64 x86_spec_ctrl_base;
41 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
42 static DEFINE_MUTEX(spec_ctrl_mutex);
43
44 /*
45  * The vendor and possibly platform specific bits which can be modified in
46  * x86_spec_ctrl_base.
47  */
48 static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
49
50 /*
51  * AMD specific MSR info for Speculative Store Bypass control.
52  * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
53  */
54 u64 __ro_after_init x86_amd_ls_cfg_base;
55 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
56
57 /* Control conditional STIBP in switch_to() */
58 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
59 /* Control conditional IBPB in switch_mm() */
60 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
61 /* Control unconditional IBPB in switch_mm() */
62 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
63
64 void __init check_bugs(void)
65 {
66         identify_boot_cpu();
67
68         /*
69          * identify_boot_cpu() initialized SMT support information, let the
70          * core code know.
71          */
72         cpu_smt_check_topology_early();
73
74         if (!IS_ENABLED(CONFIG_SMP)) {
75                 pr_info("CPU: ");
76                 print_cpu_info(&boot_cpu_data);
77         }
78
79         /*
80          * Read the SPEC_CTRL MSR to account for reserved bits which may
81          * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
82          * init code as it is not enumerated and depends on the family.
83          */
84         if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
85                 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
86
87         /* Allow STIBP in MSR_SPEC_CTRL if supported */
88         if (boot_cpu_has(X86_FEATURE_STIBP))
89                 x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
90
91         /* Select the proper spectre mitigation before patching alternatives */
92         spectre_v2_select_mitigation();
93
94         /*
95          * Select proper mitigation for any exposure to the Speculative Store
96          * Bypass vulnerability.
97          */
98         ssb_select_mitigation();
99
100         l1tf_select_mitigation();
101
102 #ifdef CONFIG_X86_32
103         /*
104          * Check whether we are able to run this kernel safely on SMP.
105          *
106          * - i386 is no longer supported.
107          * - In order to run on anything without a TSC, we need to be
108          *   compiled for a i486.
109          */
110         if (boot_cpu_data.x86 < 4)
111                 panic("Kernel requires i486+ for 'invlpg' and other features");
112
113         init_utsname()->machine[1] =
114                 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
115         alternative_instructions();
116
117         fpu__init_check_bugs();
118 #else /* CONFIG_X86_64 */
119         alternative_instructions();
120
121         /*
122          * Make sure the first 2MB area is not mapped by huge pages
123          * There are typically fixed size MTRRs in there and overlapping
124          * MTRRs into large pages causes slow downs.
125          *
126          * Right now we don't do that with gbpages because there seems
127          * very little benefit for that case.
128          */
129         if (!direct_gbpages)
130                 set_memory_4k((unsigned long)__va(0), 1);
131 #endif
132 }
133
134 void
135 x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
136 {
137         u64 msrval, guestval, hostval = x86_spec_ctrl_base;
138         struct thread_info *ti = current_thread_info();
139
140         /* Is MSR_SPEC_CTRL implemented ? */
141         if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
142                 /*
143                  * Restrict guest_spec_ctrl to supported values. Clear the
144                  * modifiable bits in the host base value and or the
145                  * modifiable bits from the guest value.
146                  */
147                 guestval = hostval & ~x86_spec_ctrl_mask;
148                 guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
149
150                 /* SSBD controlled in MSR_SPEC_CTRL */
151                 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
152                     static_cpu_has(X86_FEATURE_AMD_SSBD))
153                         hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
154
155                 /* Conditional STIBP enabled? */
156                 if (static_branch_unlikely(&switch_to_cond_stibp))
157                         hostval |= stibp_tif_to_spec_ctrl(ti->flags);
158
159                 if (hostval != guestval) {
160                         msrval = setguest ? guestval : hostval;
161                         wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
162                 }
163         }
164
165         /*
166          * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
167          * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
168          */
169         if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
170             !static_cpu_has(X86_FEATURE_VIRT_SSBD))
171                 return;
172
173         /*
174          * If the host has SSBD mitigation enabled, force it in the host's
175          * virtual MSR value. If its not permanently enabled, evaluate
176          * current's TIF_SSBD thread flag.
177          */
178         if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
179                 hostval = SPEC_CTRL_SSBD;
180         else
181                 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
182
183         /* Sanitize the guest value */
184         guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
185
186         if (hostval != guestval) {
187                 unsigned long tif;
188
189                 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
190                                  ssbd_spec_ctrl_to_tif(hostval);
191
192                 speculation_ctrl_update(tif);
193         }
194 }
195 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
196
197 static void x86_amd_ssb_disable(void)
198 {
199         u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
200
201         if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
202                 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
203         else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
204                 wrmsrl(MSR_AMD64_LS_CFG, msrval);
205 }
206
207 #undef pr_fmt
208 #define pr_fmt(fmt)     "Spectre V2 : " fmt
209
210 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
211         SPECTRE_V2_NONE;
212
213 static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
214         SPECTRE_V2_USER_NONE;
215
216 #ifdef RETPOLINE
217 static bool spectre_v2_bad_module;
218
219 bool retpoline_module_ok(bool has_retpoline)
220 {
221         if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
222                 return true;
223
224         pr_err("System may be vulnerable to spectre v2\n");
225         spectre_v2_bad_module = true;
226         return false;
227 }
228
229 static inline const char *spectre_v2_module_string(void)
230 {
231         return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
232 }
233 #else
234 static inline const char *spectre_v2_module_string(void) { return ""; }
235 #endif
236
237 static inline bool match_option(const char *arg, int arglen, const char *opt)
238 {
239         int len = strlen(opt);
240
241         return len == arglen && !strncmp(arg, opt, len);
242 }
243
244 /* The kernel command line selection for spectre v2 */
245 enum spectre_v2_mitigation_cmd {
246         SPECTRE_V2_CMD_NONE,
247         SPECTRE_V2_CMD_AUTO,
248         SPECTRE_V2_CMD_FORCE,
249         SPECTRE_V2_CMD_RETPOLINE,
250         SPECTRE_V2_CMD_RETPOLINE_GENERIC,
251         SPECTRE_V2_CMD_RETPOLINE_AMD,
252 };
253
254 enum spectre_v2_user_cmd {
255         SPECTRE_V2_USER_CMD_NONE,
256         SPECTRE_V2_USER_CMD_AUTO,
257         SPECTRE_V2_USER_CMD_FORCE,
258         SPECTRE_V2_USER_CMD_PRCTL,
259         SPECTRE_V2_USER_CMD_PRCTL_IBPB,
260         SPECTRE_V2_USER_CMD_SECCOMP,
261         SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
262 };
263
264 static const char * const spectre_v2_user_strings[] = {
265         [SPECTRE_V2_USER_NONE]          = "User space: Vulnerable",
266         [SPECTRE_V2_USER_STRICT]        = "User space: Mitigation: STIBP protection",
267         [SPECTRE_V2_USER_PRCTL]         = "User space: Mitigation: STIBP via prctl",
268         [SPECTRE_V2_USER_SECCOMP]       = "User space: Mitigation: STIBP via seccomp and prctl",
269 };
270
271 static const struct {
272         const char                      *option;
273         enum spectre_v2_user_cmd        cmd;
274         bool                            secure;
275 } v2_user_options[] __initdata = {
276         { "auto",               SPECTRE_V2_USER_CMD_AUTO,               false },
277         { "off",                SPECTRE_V2_USER_CMD_NONE,               false },
278         { "on",                 SPECTRE_V2_USER_CMD_FORCE,              true  },
279         { "prctl",              SPECTRE_V2_USER_CMD_PRCTL,              false },
280         { "prctl,ibpb",         SPECTRE_V2_USER_CMD_PRCTL_IBPB,         false },
281         { "seccomp",            SPECTRE_V2_USER_CMD_SECCOMP,            false },
282         { "seccomp,ibpb",       SPECTRE_V2_USER_CMD_SECCOMP_IBPB,       false },
283 };
284
285 static void __init spec_v2_user_print_cond(const char *reason, bool secure)
286 {
287         if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
288                 pr_info("spectre_v2_user=%s forced on command line.\n", reason);
289 }
290
291 static enum spectre_v2_user_cmd __init
292 spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
293 {
294         char arg[20];
295         int ret, i;
296
297         switch (v2_cmd) {
298         case SPECTRE_V2_CMD_NONE:
299                 return SPECTRE_V2_USER_CMD_NONE;
300         case SPECTRE_V2_CMD_FORCE:
301                 return SPECTRE_V2_USER_CMD_FORCE;
302         default:
303                 break;
304         }
305
306         ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
307                                   arg, sizeof(arg));
308         if (ret < 0)
309                 return SPECTRE_V2_USER_CMD_AUTO;
310
311         for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
312                 if (match_option(arg, ret, v2_user_options[i].option)) {
313                         spec_v2_user_print_cond(v2_user_options[i].option,
314                                                 v2_user_options[i].secure);
315                         return v2_user_options[i].cmd;
316                 }
317         }
318
319         pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
320         return SPECTRE_V2_USER_CMD_AUTO;
321 }
322
323 static void __init
324 spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
325 {
326         enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
327         bool smt_possible = IS_ENABLED(CONFIG_SMP);
328         enum spectre_v2_user_cmd cmd;
329
330         if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
331                 return;
332
333         if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
334             cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
335                 smt_possible = false;
336
337         cmd = spectre_v2_parse_user_cmdline(v2_cmd);
338         switch (cmd) {
339         case SPECTRE_V2_USER_CMD_NONE:
340                 goto set_mode;
341         case SPECTRE_V2_USER_CMD_FORCE:
342                 mode = SPECTRE_V2_USER_STRICT;
343                 break;
344         case SPECTRE_V2_USER_CMD_PRCTL:
345         case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
346                 mode = SPECTRE_V2_USER_PRCTL;
347                 break;
348         case SPECTRE_V2_USER_CMD_AUTO:
349         case SPECTRE_V2_USER_CMD_SECCOMP:
350         case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
351                 if (IS_ENABLED(CONFIG_SECCOMP))
352                         mode = SPECTRE_V2_USER_SECCOMP;
353                 else
354                         mode = SPECTRE_V2_USER_PRCTL;
355                 break;
356         }
357
358         /* Initialize Indirect Branch Prediction Barrier */
359         if (boot_cpu_has(X86_FEATURE_IBPB)) {
360                 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
361
362                 switch (cmd) {
363                 case SPECTRE_V2_USER_CMD_FORCE:
364                 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
365                 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
366                         static_branch_enable(&switch_mm_always_ibpb);
367                         break;
368                 case SPECTRE_V2_USER_CMD_PRCTL:
369                 case SPECTRE_V2_USER_CMD_AUTO:
370                 case SPECTRE_V2_USER_CMD_SECCOMP:
371                         static_branch_enable(&switch_mm_cond_ibpb);
372                         break;
373                 default:
374                         break;
375                 }
376
377                 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
378                         static_key_enabled(&switch_mm_always_ibpb) ?
379                         "always-on" : "conditional");
380         }
381
382         /* If enhanced IBRS is enabled no STIBP required */
383         if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
384                 return;
385
386         /*
387          * If SMT is not possible or STIBP is not available clear the STIBP
388          * mode.
389          */
390         if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP))
391                 mode = SPECTRE_V2_USER_NONE;
392 set_mode:
393         spectre_v2_user = mode;
394         /* Only print the STIBP mode when SMT possible */
395         if (smt_possible)
396                 pr_info("%s\n", spectre_v2_user_strings[mode]);
397 }
398
399 static const char * const spectre_v2_strings[] = {
400         [SPECTRE_V2_NONE]                       = "Vulnerable",
401         [SPECTRE_V2_RETPOLINE_GENERIC]          = "Mitigation: Full generic retpoline",
402         [SPECTRE_V2_RETPOLINE_AMD]              = "Mitigation: Full AMD retpoline",
403         [SPECTRE_V2_IBRS_ENHANCED]              = "Mitigation: Enhanced IBRS",
404 };
405
406 static const struct {
407         const char *option;
408         enum spectre_v2_mitigation_cmd cmd;
409         bool secure;
410 } mitigation_options[] __initdata = {
411         { "off",                SPECTRE_V2_CMD_NONE,              false },
412         { "on",                 SPECTRE_V2_CMD_FORCE,             true  },
413         { "retpoline",          SPECTRE_V2_CMD_RETPOLINE,         false },
414         { "retpoline,amd",      SPECTRE_V2_CMD_RETPOLINE_AMD,     false },
415         { "retpoline,generic",  SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
416         { "auto",               SPECTRE_V2_CMD_AUTO,              false },
417 };
418
419 static void __init spec_v2_print_cond(const char *reason, bool secure)
420 {
421         if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
422                 pr_info("%s selected on command line.\n", reason);
423 }
424
425 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
426 {
427         enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
428         char arg[20];
429         int ret, i;
430
431         if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
432                 return SPECTRE_V2_CMD_NONE;
433
434         ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
435         if (ret < 0)
436                 return SPECTRE_V2_CMD_AUTO;
437
438         for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
439                 if (!match_option(arg, ret, mitigation_options[i].option))
440                         continue;
441                 cmd = mitigation_options[i].cmd;
442                 break;
443         }
444
445         if (i >= ARRAY_SIZE(mitigation_options)) {
446                 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
447                 return SPECTRE_V2_CMD_AUTO;
448         }
449
450         if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
451              cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
452              cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
453             !IS_ENABLED(CONFIG_RETPOLINE)) {
454                 pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
455                 return SPECTRE_V2_CMD_AUTO;
456         }
457
458         if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
459             boot_cpu_data.x86_vendor != X86_VENDOR_HYGON &&
460             boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
461                 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
462                 return SPECTRE_V2_CMD_AUTO;
463         }
464
465         spec_v2_print_cond(mitigation_options[i].option,
466                            mitigation_options[i].secure);
467         return cmd;
468 }
469
470 static void __init spectre_v2_select_mitigation(void)
471 {
472         enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
473         enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
474
475         /*
476          * If the CPU is not affected and the command line mode is NONE or AUTO
477          * then nothing to do.
478          */
479         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
480             (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
481                 return;
482
483         switch (cmd) {
484         case SPECTRE_V2_CMD_NONE:
485                 return;
486
487         case SPECTRE_V2_CMD_FORCE:
488         case SPECTRE_V2_CMD_AUTO:
489                 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
490                         mode = SPECTRE_V2_IBRS_ENHANCED;
491                         /* Force it so VMEXIT will restore correctly */
492                         x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
493                         wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
494                         goto specv2_set_mode;
495                 }
496                 if (IS_ENABLED(CONFIG_RETPOLINE))
497                         goto retpoline_auto;
498                 break;
499         case SPECTRE_V2_CMD_RETPOLINE_AMD:
500                 if (IS_ENABLED(CONFIG_RETPOLINE))
501                         goto retpoline_amd;
502                 break;
503         case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
504                 if (IS_ENABLED(CONFIG_RETPOLINE))
505                         goto retpoline_generic;
506                 break;
507         case SPECTRE_V2_CMD_RETPOLINE:
508                 if (IS_ENABLED(CONFIG_RETPOLINE))
509                         goto retpoline_auto;
510                 break;
511         }
512         pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
513         return;
514
515 retpoline_auto:
516         if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
517             boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
518         retpoline_amd:
519                 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
520                         pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
521                         goto retpoline_generic;
522                 }
523                 mode = SPECTRE_V2_RETPOLINE_AMD;
524                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
525                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
526         } else {
527         retpoline_generic:
528                 mode = SPECTRE_V2_RETPOLINE_GENERIC;
529                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
530         }
531
532 specv2_set_mode:
533         spectre_v2_enabled = mode;
534         pr_info("%s\n", spectre_v2_strings[mode]);
535
536         /*
537          * If spectre v2 protection has been enabled, unconditionally fill
538          * RSB during a context switch; this protects against two independent
539          * issues:
540          *
541          *      - RSB underflow (and switch to BTB) on Skylake+
542          *      - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
543          */
544         setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
545         pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
546
547         /*
548          * Retpoline means the kernel is safe because it has no indirect
549          * branches. Enhanced IBRS protects firmware too, so, enable restricted
550          * speculation around firmware calls only when Enhanced IBRS isn't
551          * supported.
552          *
553          * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
554          * the user might select retpoline on the kernel command line and if
555          * the CPU supports Enhanced IBRS, kernel might un-intentionally not
556          * enable IBRS around firmware calls.
557          */
558         if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) {
559                 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
560                 pr_info("Enabling Restricted Speculation for firmware calls\n");
561         }
562
563         /* Set up IBPB and STIBP depending on the general spectre V2 command */
564         spectre_v2_user_select_mitigation(cmd);
565
566         /* Enable STIBP if appropriate */
567         arch_smt_update();
568 }
569
570 static void update_stibp_msr(void * __unused)
571 {
572         wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
573 }
574
575 /* Update x86_spec_ctrl_base in case SMT state changed. */
576 static void update_stibp_strict(void)
577 {
578         u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
579
580         if (sched_smt_active())
581                 mask |= SPEC_CTRL_STIBP;
582
583         if (mask == x86_spec_ctrl_base)
584                 return;
585
586         pr_info("Update user space SMT mitigation: STIBP %s\n",
587                 mask & SPEC_CTRL_STIBP ? "always-on" : "off");
588         x86_spec_ctrl_base = mask;
589         on_each_cpu(update_stibp_msr, NULL, 1);
590 }
591
592 /* Update the static key controlling the evaluation of TIF_SPEC_IB */
593 static void update_indir_branch_cond(void)
594 {
595         if (sched_smt_active())
596                 static_branch_enable(&switch_to_cond_stibp);
597         else
598                 static_branch_disable(&switch_to_cond_stibp);
599 }
600
601 void arch_smt_update(void)
602 {
603         /* Enhanced IBRS implies STIBP. No update required. */
604         if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
605                 return;
606
607         mutex_lock(&spec_ctrl_mutex);
608
609         switch (spectre_v2_user) {
610         case SPECTRE_V2_USER_NONE:
611                 break;
612         case SPECTRE_V2_USER_STRICT:
613                 update_stibp_strict();
614                 break;
615         case SPECTRE_V2_USER_PRCTL:
616         case SPECTRE_V2_USER_SECCOMP:
617                 update_indir_branch_cond();
618                 break;
619         }
620
621         mutex_unlock(&spec_ctrl_mutex);
622 }
623
624 #undef pr_fmt
625 #define pr_fmt(fmt)     "Speculative Store Bypass: " fmt
626
627 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
628
629 /* The kernel command line selection */
630 enum ssb_mitigation_cmd {
631         SPEC_STORE_BYPASS_CMD_NONE,
632         SPEC_STORE_BYPASS_CMD_AUTO,
633         SPEC_STORE_BYPASS_CMD_ON,
634         SPEC_STORE_BYPASS_CMD_PRCTL,
635         SPEC_STORE_BYPASS_CMD_SECCOMP,
636 };
637
638 static const char * const ssb_strings[] = {
639         [SPEC_STORE_BYPASS_NONE]        = "Vulnerable",
640         [SPEC_STORE_BYPASS_DISABLE]     = "Mitigation: Speculative Store Bypass disabled",
641         [SPEC_STORE_BYPASS_PRCTL]       = "Mitigation: Speculative Store Bypass disabled via prctl",
642         [SPEC_STORE_BYPASS_SECCOMP]     = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
643 };
644
645 static const struct {
646         const char *option;
647         enum ssb_mitigation_cmd cmd;
648 } ssb_mitigation_options[]  __initdata = {
649         { "auto",       SPEC_STORE_BYPASS_CMD_AUTO },    /* Platform decides */
650         { "on",         SPEC_STORE_BYPASS_CMD_ON },      /* Disable Speculative Store Bypass */
651         { "off",        SPEC_STORE_BYPASS_CMD_NONE },    /* Don't touch Speculative Store Bypass */
652         { "prctl",      SPEC_STORE_BYPASS_CMD_PRCTL },   /* Disable Speculative Store Bypass via prctl */
653         { "seccomp",    SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
654 };
655
656 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
657 {
658         enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
659         char arg[20];
660         int ret, i;
661
662         if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
663                 return SPEC_STORE_BYPASS_CMD_NONE;
664         } else {
665                 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
666                                           arg, sizeof(arg));
667                 if (ret < 0)
668                         return SPEC_STORE_BYPASS_CMD_AUTO;
669
670                 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
671                         if (!match_option(arg, ret, ssb_mitigation_options[i].option))
672                                 continue;
673
674                         cmd = ssb_mitigation_options[i].cmd;
675                         break;
676                 }
677
678                 if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
679                         pr_err("unknown option (%s). Switching to AUTO select\n", arg);
680                         return SPEC_STORE_BYPASS_CMD_AUTO;
681                 }
682         }
683
684         return cmd;
685 }
686
687 static enum ssb_mitigation __init __ssb_select_mitigation(void)
688 {
689         enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
690         enum ssb_mitigation_cmd cmd;
691
692         if (!boot_cpu_has(X86_FEATURE_SSBD))
693                 return mode;
694
695         cmd = ssb_parse_cmdline();
696         if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
697             (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
698              cmd == SPEC_STORE_BYPASS_CMD_AUTO))
699                 return mode;
700
701         switch (cmd) {
702         case SPEC_STORE_BYPASS_CMD_AUTO:
703         case SPEC_STORE_BYPASS_CMD_SECCOMP:
704                 /*
705                  * Choose prctl+seccomp as the default mode if seccomp is
706                  * enabled.
707                  */
708                 if (IS_ENABLED(CONFIG_SECCOMP))
709                         mode = SPEC_STORE_BYPASS_SECCOMP;
710                 else
711                         mode = SPEC_STORE_BYPASS_PRCTL;
712                 break;
713         case SPEC_STORE_BYPASS_CMD_ON:
714                 mode = SPEC_STORE_BYPASS_DISABLE;
715                 break;
716         case SPEC_STORE_BYPASS_CMD_PRCTL:
717                 mode = SPEC_STORE_BYPASS_PRCTL;
718                 break;
719         case SPEC_STORE_BYPASS_CMD_NONE:
720                 break;
721         }
722
723         /*
724          * We have three CPU feature flags that are in play here:
725          *  - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
726          *  - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
727          *  - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
728          */
729         if (mode == SPEC_STORE_BYPASS_DISABLE) {
730                 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
731                 /*
732                  * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
733                  * use a completely different MSR and bit dependent on family.
734                  */
735                 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
736                     !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
737                         x86_amd_ssb_disable();
738                 } else {
739                         x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
740                         x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
741                         wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
742                 }
743         }
744
745         return mode;
746 }
747
748 static void ssb_select_mitigation(void)
749 {
750         ssb_mode = __ssb_select_mitigation();
751
752         if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
753                 pr_info("%s\n", ssb_strings[ssb_mode]);
754 }
755
756 #undef pr_fmt
757 #define pr_fmt(fmt)     "Speculation prctl: " fmt
758
759 static void task_update_spec_tif(struct task_struct *tsk)
760 {
761         /* Force the update of the real TIF bits */
762         set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
763
764         /*
765          * Immediately update the speculation control MSRs for the current
766          * task, but for a non-current task delay setting the CPU
767          * mitigation until it is scheduled next.
768          *
769          * This can only happen for SECCOMP mitigation. For PRCTL it's
770          * always the current task.
771          */
772         if (tsk == current)
773                 speculation_ctrl_update_current();
774 }
775
776 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
777 {
778         if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
779             ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
780                 return -ENXIO;
781
782         switch (ctrl) {
783         case PR_SPEC_ENABLE:
784                 /* If speculation is force disabled, enable is not allowed */
785                 if (task_spec_ssb_force_disable(task))
786                         return -EPERM;
787                 task_clear_spec_ssb_disable(task);
788                 task_update_spec_tif(task);
789                 break;
790         case PR_SPEC_DISABLE:
791                 task_set_spec_ssb_disable(task);
792                 task_update_spec_tif(task);
793                 break;
794         case PR_SPEC_FORCE_DISABLE:
795                 task_set_spec_ssb_disable(task);
796                 task_set_spec_ssb_force_disable(task);
797                 task_update_spec_tif(task);
798                 break;
799         default:
800                 return -ERANGE;
801         }
802         return 0;
803 }
804
805 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
806 {
807         switch (ctrl) {
808         case PR_SPEC_ENABLE:
809                 if (spectre_v2_user == SPECTRE_V2_USER_NONE)
810                         return 0;
811                 /*
812                  * Indirect branch speculation is always disabled in strict
813                  * mode.
814                  */
815                 if (spectre_v2_user == SPECTRE_V2_USER_STRICT)
816                         return -EPERM;
817                 task_clear_spec_ib_disable(task);
818                 task_update_spec_tif(task);
819                 break;
820         case PR_SPEC_DISABLE:
821         case PR_SPEC_FORCE_DISABLE:
822                 /*
823                  * Indirect branch speculation is always allowed when
824                  * mitigation is force disabled.
825                  */
826                 if (spectre_v2_user == SPECTRE_V2_USER_NONE)
827                         return -EPERM;
828                 if (spectre_v2_user == SPECTRE_V2_USER_STRICT)
829                         return 0;
830                 task_set_spec_ib_disable(task);
831                 if (ctrl == PR_SPEC_FORCE_DISABLE)
832                         task_set_spec_ib_force_disable(task);
833                 task_update_spec_tif(task);
834                 break;
835         default:
836                 return -ERANGE;
837         }
838         return 0;
839 }
840
841 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
842                              unsigned long ctrl)
843 {
844         switch (which) {
845         case PR_SPEC_STORE_BYPASS:
846                 return ssb_prctl_set(task, ctrl);
847         case PR_SPEC_INDIRECT_BRANCH:
848                 return ib_prctl_set(task, ctrl);
849         default:
850                 return -ENODEV;
851         }
852 }
853
854 #ifdef CONFIG_SECCOMP
855 void arch_seccomp_spec_mitigate(struct task_struct *task)
856 {
857         if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
858                 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
859         if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP)
860                 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
861 }
862 #endif
863
864 static int ssb_prctl_get(struct task_struct *task)
865 {
866         switch (ssb_mode) {
867         case SPEC_STORE_BYPASS_DISABLE:
868                 return PR_SPEC_DISABLE;
869         case SPEC_STORE_BYPASS_SECCOMP:
870         case SPEC_STORE_BYPASS_PRCTL:
871                 if (task_spec_ssb_force_disable(task))
872                         return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
873                 if (task_spec_ssb_disable(task))
874                         return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
875                 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
876         default:
877                 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
878                         return PR_SPEC_ENABLE;
879                 return PR_SPEC_NOT_AFFECTED;
880         }
881 }
882
883 static int ib_prctl_get(struct task_struct *task)
884 {
885         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
886                 return PR_SPEC_NOT_AFFECTED;
887
888         switch (spectre_v2_user) {
889         case SPECTRE_V2_USER_NONE:
890                 return PR_SPEC_ENABLE;
891         case SPECTRE_V2_USER_PRCTL:
892         case SPECTRE_V2_USER_SECCOMP:
893                 if (task_spec_ib_force_disable(task))
894                         return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
895                 if (task_spec_ib_disable(task))
896                         return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
897                 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
898         case SPECTRE_V2_USER_STRICT:
899                 return PR_SPEC_DISABLE;
900         default:
901                 return PR_SPEC_NOT_AFFECTED;
902         }
903 }
904
905 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
906 {
907         switch (which) {
908         case PR_SPEC_STORE_BYPASS:
909                 return ssb_prctl_get(task);
910         case PR_SPEC_INDIRECT_BRANCH:
911                 return ib_prctl_get(task);
912         default:
913                 return -ENODEV;
914         }
915 }
916
917 void x86_spec_ctrl_setup_ap(void)
918 {
919         if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
920                 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
921
922         if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
923                 x86_amd_ssb_disable();
924 }
925
926 #undef pr_fmt
927 #define pr_fmt(fmt)     "L1TF: " fmt
928
929 /* Default mitigation for L1TF-affected CPUs */
930 enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
931 #if IS_ENABLED(CONFIG_KVM_INTEL)
932 EXPORT_SYMBOL_GPL(l1tf_mitigation);
933 #endif
934 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
935 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
936
937 /*
938  * These CPUs all support 44bits physical address space internally in the
939  * cache but CPUID can report a smaller number of physical address bits.
940  *
941  * The L1TF mitigation uses the top most address bit for the inversion of
942  * non present PTEs. When the installed memory reaches into the top most
943  * address bit due to memory holes, which has been observed on machines
944  * which report 36bits physical address bits and have 32G RAM installed,
945  * then the mitigation range check in l1tf_select_mitigation() triggers.
946  * This is a false positive because the mitigation is still possible due to
947  * the fact that the cache uses 44bit internally. Use the cache bits
948  * instead of the reported physical bits and adjust them on the affected
949  * machines to 44bit if the reported bits are less than 44.
950  */
951 static void override_cache_bits(struct cpuinfo_x86 *c)
952 {
953         if (c->x86 != 6)
954                 return;
955
956         switch (c->x86_model) {
957         case INTEL_FAM6_NEHALEM:
958         case INTEL_FAM6_WESTMERE:
959         case INTEL_FAM6_SANDYBRIDGE:
960         case INTEL_FAM6_IVYBRIDGE:
961         case INTEL_FAM6_HASWELL_CORE:
962         case INTEL_FAM6_HASWELL_ULT:
963         case INTEL_FAM6_HASWELL_GT3E:
964         case INTEL_FAM6_BROADWELL_CORE:
965         case INTEL_FAM6_BROADWELL_GT3E:
966         case INTEL_FAM6_SKYLAKE_MOBILE:
967         case INTEL_FAM6_SKYLAKE_DESKTOP:
968         case INTEL_FAM6_KABYLAKE_MOBILE:
969         case INTEL_FAM6_KABYLAKE_DESKTOP:
970                 if (c->x86_cache_bits < 44)
971                         c->x86_cache_bits = 44;
972                 break;
973         }
974 }
975
976 static void __init l1tf_select_mitigation(void)
977 {
978         u64 half_pa;
979
980         if (!boot_cpu_has_bug(X86_BUG_L1TF))
981                 return;
982
983         override_cache_bits(&boot_cpu_data);
984
985         switch (l1tf_mitigation) {
986         case L1TF_MITIGATION_OFF:
987         case L1TF_MITIGATION_FLUSH_NOWARN:
988         case L1TF_MITIGATION_FLUSH:
989                 break;
990         case L1TF_MITIGATION_FLUSH_NOSMT:
991         case L1TF_MITIGATION_FULL:
992                 cpu_smt_disable(false);
993                 break;
994         case L1TF_MITIGATION_FULL_FORCE:
995                 cpu_smt_disable(true);
996                 break;
997         }
998
999 #if CONFIG_PGTABLE_LEVELS == 2
1000         pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
1001         return;
1002 #endif
1003
1004         half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
1005         if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
1006                         e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
1007                 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
1008                 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
1009                                 half_pa);
1010                 pr_info("However, doing so will make a part of your RAM unusable.\n");
1011                 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n");
1012                 return;
1013         }
1014
1015         setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
1016 }
1017
1018 static int __init l1tf_cmdline(char *str)
1019 {
1020         if (!boot_cpu_has_bug(X86_BUG_L1TF))
1021                 return 0;
1022
1023         if (!str)
1024                 return -EINVAL;
1025
1026         if (!strcmp(str, "off"))
1027                 l1tf_mitigation = L1TF_MITIGATION_OFF;
1028         else if (!strcmp(str, "flush,nowarn"))
1029                 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
1030         else if (!strcmp(str, "flush"))
1031                 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
1032         else if (!strcmp(str, "flush,nosmt"))
1033                 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
1034         else if (!strcmp(str, "full"))
1035                 l1tf_mitigation = L1TF_MITIGATION_FULL;
1036         else if (!strcmp(str, "full,force"))
1037                 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
1038
1039         return 0;
1040 }
1041 early_param("l1tf", l1tf_cmdline);
1042
1043 #undef pr_fmt
1044
1045 #ifdef CONFIG_SYSFS
1046
1047 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
1048
1049 #if IS_ENABLED(CONFIG_KVM_INTEL)
1050 static const char * const l1tf_vmx_states[] = {
1051         [VMENTER_L1D_FLUSH_AUTO]                = "auto",
1052         [VMENTER_L1D_FLUSH_NEVER]               = "vulnerable",
1053         [VMENTER_L1D_FLUSH_COND]                = "conditional cache flushes",
1054         [VMENTER_L1D_FLUSH_ALWAYS]              = "cache flushes",
1055         [VMENTER_L1D_FLUSH_EPT_DISABLED]        = "EPT disabled",
1056         [VMENTER_L1D_FLUSH_NOT_REQUIRED]        = "flush not necessary"
1057 };
1058
1059 static ssize_t l1tf_show_state(char *buf)
1060 {
1061         if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
1062                 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
1063
1064         if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
1065             (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
1066              sched_smt_active())) {
1067                 return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
1068                                l1tf_vmx_states[l1tf_vmx_mitigation]);
1069         }
1070
1071         return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
1072                        l1tf_vmx_states[l1tf_vmx_mitigation],
1073                        sched_smt_active() ? "vulnerable" : "disabled");
1074 }
1075 #else
1076 static ssize_t l1tf_show_state(char *buf)
1077 {
1078         return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
1079 }
1080 #endif
1081
1082 static char *stibp_state(void)
1083 {
1084         if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
1085                 return "";
1086
1087         switch (spectre_v2_user) {
1088         case SPECTRE_V2_USER_NONE:
1089                 return ", STIBP: disabled";
1090         case SPECTRE_V2_USER_STRICT:
1091                 return ", STIBP: forced";
1092         case SPECTRE_V2_USER_PRCTL:
1093         case SPECTRE_V2_USER_SECCOMP:
1094                 if (static_key_enabled(&switch_to_cond_stibp))
1095                         return ", STIBP: conditional";
1096         }
1097         return "";
1098 }
1099
1100 static char *ibpb_state(void)
1101 {
1102         if (boot_cpu_has(X86_FEATURE_IBPB)) {
1103                 if (static_key_enabled(&switch_mm_always_ibpb))
1104                         return ", IBPB: always-on";
1105                 if (static_key_enabled(&switch_mm_cond_ibpb))
1106                         return ", IBPB: conditional";
1107                 return ", IBPB: disabled";
1108         }
1109         return "";
1110 }
1111
1112 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
1113                                char *buf, unsigned int bug)
1114 {
1115         if (!boot_cpu_has_bug(bug))
1116                 return sprintf(buf, "Not affected\n");
1117
1118         switch (bug) {
1119         case X86_BUG_CPU_MELTDOWN:
1120                 if (boot_cpu_has(X86_FEATURE_PTI))
1121                         return sprintf(buf, "Mitigation: PTI\n");
1122
1123                 if (hypervisor_is_type(X86_HYPER_XEN_PV))
1124                         return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
1125
1126                 break;
1127
1128         case X86_BUG_SPECTRE_V1:
1129                 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
1130
1131         case X86_BUG_SPECTRE_V2:
1132                 return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
1133                                ibpb_state(),
1134                                boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
1135                                stibp_state(),
1136                                boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
1137                                spectre_v2_module_string());
1138
1139         case X86_BUG_SPEC_STORE_BYPASS:
1140                 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
1141
1142         case X86_BUG_L1TF:
1143                 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
1144                         return l1tf_show_state(buf);
1145                 break;
1146         default:
1147                 break;
1148         }
1149
1150         return sprintf(buf, "Vulnerable\n");
1151 }
1152
1153 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
1154 {
1155         return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
1156 }
1157
1158 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
1159 {
1160         return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
1161 }
1162
1163 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
1164 {
1165         return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
1166 }
1167
1168 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
1169 {
1170         return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
1171 }
1172
1173 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
1174 {
1175         return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
1176 }
1177 #endif