buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
}
+#define acpi_unlazy_tlb(x)
+
#ifdef CONFIG_ACPI_NUMA
extern cpumask_t early_cpu_possible_map;
#define for_each_possible_early_cpu(cpu) \
extern int x86_acpi_numa_init(void);
#endif /* CONFIG_ACPI_NUMA */
+#define acpi_unlazy_tlb(x) leave_mm(x)
+
#ifdef CONFIG_ACPI_APEI
static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
{
case R_X86_64_NONE:
break;
case R_X86_64_64:
+ if (*(u64 *)loc != 0)
+ goto invalid_relocation;
*(u64 *)loc = val;
break;
case R_X86_64_32:
+ if (*(u32 *)loc != 0)
+ goto invalid_relocation;
*(u32 *)loc = val;
if (val != *(u32 *)loc)
goto overflow;
break;
case R_X86_64_32S:
+ if (*(s32 *)loc != 0)
+ goto invalid_relocation;
*(s32 *)loc = val;
if ((s64)val != *(s32 *)loc)
goto overflow;
break;
case R_X86_64_PC32:
+ if (*(u32 *)loc != 0)
+ goto invalid_relocation;
val -= (u64)loc;
*(u32 *)loc = val;
#if 0
}
return 0;
+invalid_relocation:
+ pr_err("x86/modules: Skipping invalid relocation target, existing value is nonzero for type %d, loc %p, val %Lx\n",
+ (int)ELF64_R_TYPE(rel[i].r_info), loc, val);
+ return -ENOEXEC;
+
overflow:
pr_err("overflow in relocation type %d val %Lx\n",
(int)ELF64_R_TYPE(rel[i].r_info), val);
switch_mm(NULL, &init_mm, NULL);
}
+EXPORT_SYMBOL_GPL(leave_mm);
void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
write_cr3(build_cr3(next, new_asid));
- trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH,
- TLB_FLUSH_ALL);
+
+ /*
+ * NB: This gets called via leave_mm() in the idle path
+ * where RCU functions differently. Tracing normally
+ * uses RCU, so we need to use the _rcuidle variant.
+ *
+ * (There is no good reason for this. The idle code should
+ * be rearranged to call this before rcu_idle_enter().)
+ */
+ trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
} else {
/* The new ASID is already up to date. */
write_cr3(build_cr3_noflush(next, new_asid));
- trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0);
+
+ /* See above wrt _rcuidle. */
+ trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
}
this_cpu_write(cpu_tlbstate.loaded_mm, next);
static void acpi_idle_enter_bm(struct acpi_processor *pr,
struct acpi_processor_cx *cx, bool timer_bc)
{
+ acpi_unlazy_tlb(smp_processor_id());
+
/*
* Must be done before busmaster disable as we might need to
* access HPET !
struct cpuidle_state *state = &drv->states[index];
unsigned long eax = flg2MWAIT(state->flags);
unsigned int cstate;
+ int cpu = smp_processor_id();
cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
/*
- * NB: if CPUIDLE_FLAG_TLB_FLUSHED is set, this idle transition
- * will probably flush the TLB. It's not guaranteed to flush
- * the TLB, though, so it's not clear that we can do anything
- * useful with this knowledge.
+ * leave_mm() to avoid costly and often unnecessary wakeups
+ * for flushing the user TLB's associated with the active mm.
*/
+ if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
+ leave_mm(cpu);
if (!(lapic_timer_reliable_states & (1 << (cstate))))
tick_broadcast_enter();