1 // SPDX-License-Identifier: GPL-2.0
3 * Performance events core code:
5 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
6 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
7 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
8 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
13 #include <linux/cpu.h>
14 #include <linux/smp.h>
15 #include <linux/idr.h>
16 #include <linux/file.h>
17 #include <linux/poll.h>
18 #include <linux/slab.h>
19 #include <linux/hash.h>
20 #include <linux/tick.h>
21 #include <linux/sysfs.h>
22 #include <linux/dcache.h>
23 #include <linux/percpu.h>
24 #include <linux/ptrace.h>
25 #include <linux/reboot.h>
26 #include <linux/vmstat.h>
27 #include <linux/device.h>
28 #include <linux/export.h>
29 #include <linux/vmalloc.h>
30 #include <linux/hardirq.h>
31 #include <linux/rculist.h>
32 #include <linux/uaccess.h>
33 #include <linux/syscalls.h>
34 #include <linux/anon_inodes.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/cgroup.h>
37 #include <linux/perf_event.h>
38 #include <linux/trace_events.h>
39 #include <linux/hw_breakpoint.h>
40 #include <linux/mm_types.h>
41 #include <linux/module.h>
42 #include <linux/mman.h>
43 #include <linux/compat.h>
44 #include <linux/bpf.h>
45 #include <linux/filter.h>
46 #include <linux/namei.h>
47 #include <linux/parser.h>
48 #include <linux/sched/clock.h>
49 #include <linux/sched/mm.h>
50 #include <linux/proc_ns.h>
51 #include <linux/mount.h>
55 #include <asm/irq_regs.h>
57 typedef int (*remote_function_f)(void *);
59 struct remote_function_call {
60 struct task_struct *p;
61 remote_function_f func;
66 static void remote_function(void *data)
68 struct remote_function_call *tfc = data;
69 struct task_struct *p = tfc->p;
73 if (task_cpu(p) != smp_processor_id())
77 * Now that we're on right CPU with IRQs disabled, we can test
78 * if we hit the right task without races.
81 tfc->ret = -ESRCH; /* No such (running) process */
86 tfc->ret = tfc->func(tfc->info);
90 * task_function_call - call a function on the cpu on which a task runs
91 * @p: the task to evaluate
92 * @func: the function to be called
93 * @info: the function call argument
95 * Calls the function @func when the task is currently running. This might
96 * be on the current CPU, which just calls the function directly
98 * returns: @func return value, or
99 * -ESRCH - when the process isn't running
100 * -EAGAIN - when the process moved away
103 task_function_call(struct task_struct *p, remote_function_f func, void *info)
105 struct remote_function_call data = {
114 ret = smp_call_function_single(task_cpu(p), remote_function, &data, 1);
117 } while (ret == -EAGAIN);
123 * cpu_function_call - call a function on the cpu
124 * @func: the function to be called
125 * @info: the function call argument
127 * Calls the function @func on the remote cpu.
129 * returns: @func return value or -ENXIO when the cpu is offline
131 static int cpu_function_call(int cpu, remote_function_f func, void *info)
133 struct remote_function_call data = {
137 .ret = -ENXIO, /* No such CPU */
140 smp_call_function_single(cpu, remote_function, &data, 1);
145 static inline struct perf_cpu_context *
146 __get_cpu_context(struct perf_event_context *ctx)
148 return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
151 static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
152 struct perf_event_context *ctx)
154 raw_spin_lock(&cpuctx->ctx.lock);
156 raw_spin_lock(&ctx->lock);
159 static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
160 struct perf_event_context *ctx)
163 raw_spin_unlock(&ctx->lock);
164 raw_spin_unlock(&cpuctx->ctx.lock);
167 #define TASK_TOMBSTONE ((void *)-1L)
169 static bool is_kernel_event(struct perf_event *event)
171 return READ_ONCE(event->owner) == TASK_TOMBSTONE;
175 * On task ctx scheduling...
177 * When !ctx->nr_events a task context will not be scheduled. This means
178 * we can disable the scheduler hooks (for performance) without leaving
179 * pending task ctx state.
181 * This however results in two special cases:
183 * - removing the last event from a task ctx; this is relatively straight
184 * forward and is done in __perf_remove_from_context.
186 * - adding the first event to a task ctx; this is tricky because we cannot
187 * rely on ctx->is_active and therefore cannot use event_function_call().
188 * See perf_install_in_context().
190 * If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set.
193 typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *,
194 struct perf_event_context *, void *);
196 struct event_function_struct {
197 struct perf_event *event;
202 static int event_function(void *info)
204 struct event_function_struct *efs = info;
205 struct perf_event *event = efs->event;
206 struct perf_event_context *ctx = event->ctx;
207 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
208 struct perf_event_context *task_ctx = cpuctx->task_ctx;
211 lockdep_assert_irqs_disabled();
213 perf_ctx_lock(cpuctx, task_ctx);
215 * Since we do the IPI call without holding ctx->lock things can have
216 * changed, double check we hit the task we set out to hit.
219 if (ctx->task != current) {
225 * We only use event_function_call() on established contexts,
226 * and event_function() is only ever called when active (or
227 * rather, we'll have bailed in task_function_call() or the
228 * above ctx->task != current test), therefore we must have
229 * ctx->is_active here.
231 WARN_ON_ONCE(!ctx->is_active);
233 * And since we have ctx->is_active, cpuctx->task_ctx must
236 WARN_ON_ONCE(task_ctx != ctx);
238 WARN_ON_ONCE(&cpuctx->ctx != ctx);
241 efs->func(event, cpuctx, ctx, efs->data);
243 perf_ctx_unlock(cpuctx, task_ctx);
248 static void event_function_call(struct perf_event *event, event_f func, void *data)
250 struct perf_event_context *ctx = event->ctx;
251 struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */
252 struct event_function_struct efs = {
258 if (!event->parent) {
260 * If this is a !child event, we must hold ctx::mutex to
261 * stabilize the the event->ctx relation. See
262 * perf_event_ctx_lock().
264 lockdep_assert_held(&ctx->mutex);
268 cpu_function_call(event->cpu, event_function, &efs);
272 if (task == TASK_TOMBSTONE)
276 if (!task_function_call(task, event_function, &efs))
279 raw_spin_lock_irq(&ctx->lock);
281 * Reload the task pointer, it might have been changed by
282 * a concurrent perf_event_context_sched_out().
285 if (task == TASK_TOMBSTONE) {
286 raw_spin_unlock_irq(&ctx->lock);
289 if (ctx->is_active) {
290 raw_spin_unlock_irq(&ctx->lock);
293 func(event, NULL, ctx, data);
294 raw_spin_unlock_irq(&ctx->lock);
298 * Similar to event_function_call() + event_function(), but hard assumes IRQs
299 * are already disabled and we're on the right CPU.
301 static void event_function_local(struct perf_event *event, event_f func, void *data)
303 struct perf_event_context *ctx = event->ctx;
304 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
305 struct task_struct *task = READ_ONCE(ctx->task);
306 struct perf_event_context *task_ctx = NULL;
308 lockdep_assert_irqs_disabled();
311 if (task == TASK_TOMBSTONE)
317 perf_ctx_lock(cpuctx, task_ctx);
320 if (task == TASK_TOMBSTONE)
325 * We must be either inactive or active and the right task,
326 * otherwise we're screwed, since we cannot IPI to somewhere
329 if (ctx->is_active) {
330 if (WARN_ON_ONCE(task != current))
333 if (WARN_ON_ONCE(cpuctx->task_ctx != ctx))
337 WARN_ON_ONCE(&cpuctx->ctx != ctx);
340 func(event, cpuctx, ctx, data);
342 perf_ctx_unlock(cpuctx, task_ctx);
345 #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
346 PERF_FLAG_FD_OUTPUT |\
347 PERF_FLAG_PID_CGROUP |\
348 PERF_FLAG_FD_CLOEXEC)
351 * branch priv levels that need permission checks
353 #define PERF_SAMPLE_BRANCH_PERM_PLM \
354 (PERF_SAMPLE_BRANCH_KERNEL |\
355 PERF_SAMPLE_BRANCH_HV)
358 EVENT_FLEXIBLE = 0x1,
361 /* see ctx_resched() for details */
363 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
367 * perf_sched_events : >0 events exist
368 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
371 static void perf_sched_delayed(struct work_struct *work);
372 DEFINE_STATIC_KEY_FALSE(perf_sched_events);
373 static DECLARE_DELAYED_WORK(perf_sched_work, perf_sched_delayed);
374 static DEFINE_MUTEX(perf_sched_mutex);
375 static atomic_t perf_sched_count;
377 static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
378 static DEFINE_PER_CPU(int, perf_sched_cb_usages);
379 static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events);
381 static atomic_t nr_mmap_events __read_mostly;
382 static atomic_t nr_comm_events __read_mostly;
383 static atomic_t nr_namespaces_events __read_mostly;
384 static atomic_t nr_task_events __read_mostly;
385 static atomic_t nr_freq_events __read_mostly;
386 static atomic_t nr_switch_events __read_mostly;
387 static atomic_t nr_ksymbol_events __read_mostly;
388 static atomic_t nr_bpf_events __read_mostly;
390 static LIST_HEAD(pmus);
391 static DEFINE_MUTEX(pmus_lock);
392 static struct srcu_struct pmus_srcu;
393 static cpumask_var_t perf_online_mask;
396 * perf event paranoia level:
397 * -1 - not paranoid at all
398 * 0 - disallow raw tracepoint access for unpriv
399 * 1 - disallow cpu events for unpriv
400 * 2 - disallow kernel profiling for unpriv
402 int sysctl_perf_event_paranoid __read_mostly = 2;
404 /* Minimum for 512 kiB + 1 user control page */
405 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
408 * max perf event sample rate
410 #define DEFAULT_MAX_SAMPLE_RATE 100000
411 #define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE)
412 #define DEFAULT_CPU_TIME_MAX_PERCENT 25
414 int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
416 static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
417 static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS;
419 static int perf_sample_allowed_ns __read_mostly =
420 DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100;
422 static void update_perf_cpu_limits(void)
424 u64 tmp = perf_sample_period_ns;
426 tmp *= sysctl_perf_cpu_time_max_percent;
427 tmp = div_u64(tmp, 100);
431 WRITE_ONCE(perf_sample_allowed_ns, tmp);
434 static bool perf_rotate_context(struct perf_cpu_context *cpuctx);
436 int perf_proc_update_handler(struct ctl_table *table, int write,
437 void __user *buffer, size_t *lenp,
441 int perf_cpu = sysctl_perf_cpu_time_max_percent;
443 * If throttling is disabled don't allow the write:
445 if (write && (perf_cpu == 100 || perf_cpu == 0))
448 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
452 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
453 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
454 update_perf_cpu_limits();
459 int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT;
461 int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
462 void __user *buffer, size_t *lenp,
465 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
470 if (sysctl_perf_cpu_time_max_percent == 100 ||
471 sysctl_perf_cpu_time_max_percent == 0) {
473 "perf: Dynamic interrupt throttling disabled, can hang your system!\n");
474 WRITE_ONCE(perf_sample_allowed_ns, 0);
476 update_perf_cpu_limits();
483 * perf samples are done in some very critical code paths (NMIs).
484 * If they take too much CPU time, the system can lock up and not
485 * get any real work done. This will drop the sample rate when
486 * we detect that events are taking too long.
488 #define NR_ACCUMULATED_SAMPLES 128
489 static DEFINE_PER_CPU(u64, running_sample_length);
491 static u64 __report_avg;
492 static u64 __report_allowed;
494 static void perf_duration_warn(struct irq_work *w)
496 printk_ratelimited(KERN_INFO
497 "perf: interrupt took too long (%lld > %lld), lowering "
498 "kernel.perf_event_max_sample_rate to %d\n",
499 __report_avg, __report_allowed,
500 sysctl_perf_event_sample_rate);
503 static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn);
505 void perf_sample_event_took(u64 sample_len_ns)
507 u64 max_len = READ_ONCE(perf_sample_allowed_ns);
515 /* Decay the counter by 1 average sample. */
516 running_len = __this_cpu_read(running_sample_length);
517 running_len -= running_len/NR_ACCUMULATED_SAMPLES;
518 running_len += sample_len_ns;
519 __this_cpu_write(running_sample_length, running_len);
522 * Note: this will be biased artifically low until we have
523 * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us
524 * from having to maintain a count.
526 avg_len = running_len/NR_ACCUMULATED_SAMPLES;
527 if (avg_len <= max_len)
530 __report_avg = avg_len;
531 __report_allowed = max_len;
534 * Compute a throttle threshold 25% below the current duration.
536 avg_len += avg_len / 4;
537 max = (TICK_NSEC / 100) * sysctl_perf_cpu_time_max_percent;
543 WRITE_ONCE(perf_sample_allowed_ns, avg_len);
544 WRITE_ONCE(max_samples_per_tick, max);
546 sysctl_perf_event_sample_rate = max * HZ;
547 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
549 if (!irq_work_queue(&perf_duration_work)) {
550 early_printk("perf: interrupt took too long (%lld > %lld), lowering "
551 "kernel.perf_event_max_sample_rate to %d\n",
552 __report_avg, __report_allowed,
553 sysctl_perf_event_sample_rate);
557 static atomic64_t perf_event_id;
559 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
560 enum event_type_t event_type);
562 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
563 enum event_type_t event_type,
564 struct task_struct *task);
566 static void update_context_time(struct perf_event_context *ctx);
567 static u64 perf_event_time(struct perf_event *event);
569 void __weak perf_event_print_debug(void) { }
571 extern __weak const char *perf_pmu_name(void)
576 static inline u64 perf_clock(void)
578 return local_clock();
581 static inline u64 perf_event_clock(struct perf_event *event)
583 return event->clock();
587 * State based event timekeeping...
589 * The basic idea is to use event->state to determine which (if any) time
590 * fields to increment with the current delta. This means we only need to
591 * update timestamps when we change state or when they are explicitly requested
594 * Event groups make things a little more complicated, but not terribly so. The
595 * rules for a group are that if the group leader is OFF the entire group is
596 * OFF, irrespecive of what the group member states are. This results in
597 * __perf_effective_state().
599 * A futher ramification is that when a group leader flips between OFF and
600 * !OFF, we need to update all group member times.
603 * NOTE: perf_event_time() is based on the (cgroup) context time, and thus we
604 * need to make sure the relevant context time is updated before we try and
605 * update our timestamps.
608 static __always_inline enum perf_event_state
609 __perf_effective_state(struct perf_event *event)
611 struct perf_event *leader = event->group_leader;
613 if (leader->state <= PERF_EVENT_STATE_OFF)
614 return leader->state;
619 static __always_inline void
620 __perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running)
622 enum perf_event_state state = __perf_effective_state(event);
623 u64 delta = now - event->tstamp;
625 *enabled = event->total_time_enabled;
626 if (state >= PERF_EVENT_STATE_INACTIVE)
629 *running = event->total_time_running;
630 if (state >= PERF_EVENT_STATE_ACTIVE)
634 static void perf_event_update_time(struct perf_event *event)
636 u64 now = perf_event_time(event);
638 __perf_update_times(event, now, &event->total_time_enabled,
639 &event->total_time_running);
643 static void perf_event_update_sibling_time(struct perf_event *leader)
645 struct perf_event *sibling;
647 for_each_sibling_event(sibling, leader)
648 perf_event_update_time(sibling);
652 perf_event_set_state(struct perf_event *event, enum perf_event_state state)
654 if (event->state == state)
657 perf_event_update_time(event);
659 * If a group leader gets enabled/disabled all its siblings
662 if ((event->state < 0) ^ (state < 0))
663 perf_event_update_sibling_time(event);
665 WRITE_ONCE(event->state, state);
668 #ifdef CONFIG_CGROUP_PERF
671 perf_cgroup_match(struct perf_event *event)
673 struct perf_event_context *ctx = event->ctx;
674 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
676 /* @event doesn't care about cgroup */
680 /* wants specific cgroup scope but @cpuctx isn't associated with any */
685 * Cgroup scoping is recursive. An event enabled for a cgroup is
686 * also enabled for all its descendant cgroups. If @cpuctx's
687 * cgroup is a descendant of @event's (the test covers identity
688 * case), it's a match.
690 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup,
691 event->cgrp->css.cgroup);
694 static inline void perf_detach_cgroup(struct perf_event *event)
696 css_put(&event->cgrp->css);
700 static inline int is_cgroup_event(struct perf_event *event)
702 return event->cgrp != NULL;
705 static inline u64 perf_cgroup_event_time(struct perf_event *event)
707 struct perf_cgroup_info *t;
709 t = per_cpu_ptr(event->cgrp->info, event->cpu);
713 static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
715 struct perf_cgroup_info *info;
720 info = this_cpu_ptr(cgrp->info);
722 info->time += now - info->timestamp;
723 info->timestamp = now;
726 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
728 struct perf_cgroup *cgrp = cpuctx->cgrp;
729 struct cgroup_subsys_state *css;
732 for (css = &cgrp->css; css; css = css->parent) {
733 cgrp = container_of(css, struct perf_cgroup, css);
734 __update_cgrp_time(cgrp);
739 static inline void update_cgrp_time_from_event(struct perf_event *event)
741 struct perf_cgroup *cgrp;
744 * ensure we access cgroup data only when needed and
745 * when we know the cgroup is pinned (css_get)
747 if (!is_cgroup_event(event))
750 cgrp = perf_cgroup_from_task(current, event->ctx);
752 * Do not update time when cgroup is not active
754 if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
755 __update_cgrp_time(event->cgrp);
759 perf_cgroup_set_timestamp(struct task_struct *task,
760 struct perf_event_context *ctx)
762 struct perf_cgroup *cgrp;
763 struct perf_cgroup_info *info;
764 struct cgroup_subsys_state *css;
767 * ctx->lock held by caller
768 * ensure we do not access cgroup data
769 * unless we have the cgroup pinned (css_get)
771 if (!task || !ctx->nr_cgroups)
774 cgrp = perf_cgroup_from_task(task, ctx);
776 for (css = &cgrp->css; css; css = css->parent) {
777 cgrp = container_of(css, struct perf_cgroup, css);
778 info = this_cpu_ptr(cgrp->info);
779 info->timestamp = ctx->timestamp;
783 static DEFINE_PER_CPU(struct list_head, cgrp_cpuctx_list);
785 #define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
786 #define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */
789 * reschedule events based on the cgroup constraint of task.
791 * mode SWOUT : schedule out everything
792 * mode SWIN : schedule in based on cgroup for next
794 static void perf_cgroup_switch(struct task_struct *task, int mode)
796 struct perf_cpu_context *cpuctx;
797 struct list_head *list;
801 * Disable interrupts and preemption to avoid this CPU's
802 * cgrp_cpuctx_entry to change under us.
804 local_irq_save(flags);
806 list = this_cpu_ptr(&cgrp_cpuctx_list);
807 list_for_each_entry(cpuctx, list, cgrp_cpuctx_entry) {
808 WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
810 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
811 perf_pmu_disable(cpuctx->ctx.pmu);
813 if (mode & PERF_CGROUP_SWOUT) {
814 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
816 * must not be done before ctxswout due
817 * to event_filter_match() in event_sched_out()
822 if (mode & PERF_CGROUP_SWIN) {
823 WARN_ON_ONCE(cpuctx->cgrp);
825 * set cgrp before ctxsw in to allow
826 * event_filter_match() to not have to pass
828 * we pass the cpuctx->ctx to perf_cgroup_from_task()
829 * because cgorup events are only per-cpu
831 cpuctx->cgrp = perf_cgroup_from_task(task,
833 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
835 perf_pmu_enable(cpuctx->ctx.pmu);
836 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
839 local_irq_restore(flags);
842 static inline void perf_cgroup_sched_out(struct task_struct *task,
843 struct task_struct *next)
845 struct perf_cgroup *cgrp1;
846 struct perf_cgroup *cgrp2 = NULL;
850 * we come here when we know perf_cgroup_events > 0
851 * we do not need to pass the ctx here because we know
852 * we are holding the rcu lock
854 cgrp1 = perf_cgroup_from_task(task, NULL);
855 cgrp2 = perf_cgroup_from_task(next, NULL);
858 * only schedule out current cgroup events if we know
859 * that we are switching to a different cgroup. Otherwise,
860 * do no touch the cgroup events.
863 perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
868 static inline void perf_cgroup_sched_in(struct task_struct *prev,
869 struct task_struct *task)
871 struct perf_cgroup *cgrp1;
872 struct perf_cgroup *cgrp2 = NULL;
876 * we come here when we know perf_cgroup_events > 0
877 * we do not need to pass the ctx here because we know
878 * we are holding the rcu lock
880 cgrp1 = perf_cgroup_from_task(task, NULL);
881 cgrp2 = perf_cgroup_from_task(prev, NULL);
884 * only need to schedule in cgroup events if we are changing
885 * cgroup during ctxsw. Cgroup events were not scheduled
886 * out of ctxsw out if that was not the case.
889 perf_cgroup_switch(task, PERF_CGROUP_SWIN);
894 static inline int perf_cgroup_connect(int fd, struct perf_event *event,
895 struct perf_event_attr *attr,
896 struct perf_event *group_leader)
898 struct perf_cgroup *cgrp;
899 struct cgroup_subsys_state *css;
900 struct fd f = fdget(fd);
906 css = css_tryget_online_from_dir(f.file->f_path.dentry,
907 &perf_event_cgrp_subsys);
913 cgrp = container_of(css, struct perf_cgroup, css);
917 * all events in a group must monitor
918 * the same cgroup because a task belongs
919 * to only one perf cgroup at a time
921 if (group_leader && group_leader->cgrp != cgrp) {
922 perf_detach_cgroup(event);
931 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
933 struct perf_cgroup_info *t;
934 t = per_cpu_ptr(event->cgrp->info, event->cpu);
935 event->shadow_ctx_time = now - t->timestamp;
939 * Update cpuctx->cgrp so that it is set when first cgroup event is added and
940 * cleared when last cgroup event is removed.
943 list_update_cgroup_event(struct perf_event *event,
944 struct perf_event_context *ctx, bool add)
946 struct perf_cpu_context *cpuctx;
947 struct list_head *cpuctx_entry;
949 if (!is_cgroup_event(event))
953 * Because cgroup events are always per-cpu events,
954 * this will always be called from the right CPU.
956 cpuctx = __get_cpu_context(ctx);
959 * Since setting cpuctx->cgrp is conditional on the current @cgrp
960 * matching the event's cgroup, we must do this for every new event,
961 * because if the first would mismatch, the second would not try again
962 * and we would leave cpuctx->cgrp unset.
964 if (add && !cpuctx->cgrp) {
965 struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
967 if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
971 if (add && ctx->nr_cgroups++)
973 else if (!add && --ctx->nr_cgroups)
976 /* no cgroup running */
980 cpuctx_entry = &cpuctx->cgrp_cpuctx_entry;
982 list_add(cpuctx_entry, this_cpu_ptr(&cgrp_cpuctx_list));
984 list_del(cpuctx_entry);
987 #else /* !CONFIG_CGROUP_PERF */
990 perf_cgroup_match(struct perf_event *event)
995 static inline void perf_detach_cgroup(struct perf_event *event)
998 static inline int is_cgroup_event(struct perf_event *event)
1003 static inline void update_cgrp_time_from_event(struct perf_event *event)
1007 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
1011 static inline void perf_cgroup_sched_out(struct task_struct *task,
1012 struct task_struct *next)
1016 static inline void perf_cgroup_sched_in(struct task_struct *prev,
1017 struct task_struct *task)
1021 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
1022 struct perf_event_attr *attr,
1023 struct perf_event *group_leader)
1029 perf_cgroup_set_timestamp(struct task_struct *task,
1030 struct perf_event_context *ctx)
1035 perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
1040 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
1044 static inline u64 perf_cgroup_event_time(struct perf_event *event)
1050 list_update_cgroup_event(struct perf_event *event,
1051 struct perf_event_context *ctx, bool add)
1058 * set default to be dependent on timer tick just
1059 * like original code
1061 #define PERF_CPU_HRTIMER (1000 / HZ)
1063 * function must be called with interrupts disabled
1065 static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
1067 struct perf_cpu_context *cpuctx;
1070 lockdep_assert_irqs_disabled();
1072 cpuctx = container_of(hr, struct perf_cpu_context, hrtimer);
1073 rotations = perf_rotate_context(cpuctx);
1075 raw_spin_lock(&cpuctx->hrtimer_lock);
1077 hrtimer_forward_now(hr, cpuctx->hrtimer_interval);
1079 cpuctx->hrtimer_active = 0;
1080 raw_spin_unlock(&cpuctx->hrtimer_lock);
1082 return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART;
1085 static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
1087 struct hrtimer *timer = &cpuctx->hrtimer;
1088 struct pmu *pmu = cpuctx->ctx.pmu;
1091 /* no multiplexing needed for SW PMU */
1092 if (pmu->task_ctx_nr == perf_sw_context)
1096 * check default is sane, if not set then force to
1097 * default interval (1/tick)
1099 interval = pmu->hrtimer_interval_ms;
1101 interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
1103 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval);
1105 raw_spin_lock_init(&cpuctx->hrtimer_lock);
1106 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
1107 timer->function = perf_mux_hrtimer_handler;
1110 static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx)
1112 struct hrtimer *timer = &cpuctx->hrtimer;
1113 struct pmu *pmu = cpuctx->ctx.pmu;
1114 unsigned long flags;
1116 /* not for SW PMU */
1117 if (pmu->task_ctx_nr == perf_sw_context)
1120 raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags);
1121 if (!cpuctx->hrtimer_active) {
1122 cpuctx->hrtimer_active = 1;
1123 hrtimer_forward_now(timer, cpuctx->hrtimer_interval);
1124 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
1126 raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags);
1131 void perf_pmu_disable(struct pmu *pmu)
1133 int *count = this_cpu_ptr(pmu->pmu_disable_count);
1135 pmu->pmu_disable(pmu);
1138 void perf_pmu_enable(struct pmu *pmu)
1140 int *count = this_cpu_ptr(pmu->pmu_disable_count);
1142 pmu->pmu_enable(pmu);
1145 static DEFINE_PER_CPU(struct list_head, active_ctx_list);
1148 * perf_event_ctx_activate(), perf_event_ctx_deactivate(), and
1149 * perf_event_task_tick() are fully serialized because they're strictly cpu
1150 * affine and perf_event_ctx{activate,deactivate} are called with IRQs
1151 * disabled, while perf_event_task_tick is called from IRQ context.
1153 static void perf_event_ctx_activate(struct perf_event_context *ctx)
1155 struct list_head *head = this_cpu_ptr(&active_ctx_list);
1157 lockdep_assert_irqs_disabled();
1159 WARN_ON(!list_empty(&ctx->active_ctx_list));
1161 list_add(&ctx->active_ctx_list, head);
1164 static void perf_event_ctx_deactivate(struct perf_event_context *ctx)
1166 lockdep_assert_irqs_disabled();
1168 WARN_ON(list_empty(&ctx->active_ctx_list));
1170 list_del_init(&ctx->active_ctx_list);
1173 static void get_ctx(struct perf_event_context *ctx)
1175 refcount_inc(&ctx->refcount);
1178 static void free_ctx(struct rcu_head *head)
1180 struct perf_event_context *ctx;
1182 ctx = container_of(head, struct perf_event_context, rcu_head);
1183 kfree(ctx->task_ctx_data);
1187 static void put_ctx(struct perf_event_context *ctx)
1189 if (refcount_dec_and_test(&ctx->refcount)) {
1190 if (ctx->parent_ctx)
1191 put_ctx(ctx->parent_ctx);
1192 if (ctx->task && ctx->task != TASK_TOMBSTONE)
1193 put_task_struct(ctx->task);
1194 call_rcu(&ctx->rcu_head, free_ctx);
1199 * Because of perf_event::ctx migration in sys_perf_event_open::move_group and
1200 * perf_pmu_migrate_context() we need some magic.
1202 * Those places that change perf_event::ctx will hold both
1203 * perf_event_ctx::mutex of the 'old' and 'new' ctx value.
1205 * Lock ordering is by mutex address. There are two other sites where
1206 * perf_event_context::mutex nests and those are:
1208 * - perf_event_exit_task_context() [ child , 0 ]
1209 * perf_event_exit_event()
1210 * put_event() [ parent, 1 ]
1212 * - perf_event_init_context() [ parent, 0 ]
1213 * inherit_task_group()
1216 * perf_event_alloc()
1218 * perf_try_init_event() [ child , 1 ]
1220 * While it appears there is an obvious deadlock here -- the parent and child
1221 * nesting levels are inverted between the two. This is in fact safe because
1222 * life-time rules separate them. That is an exiting task cannot fork, and a
1223 * spawning task cannot (yet) exit.
1225 * But remember that that these are parent<->child context relations, and
1226 * migration does not affect children, therefore these two orderings should not
1229 * The change in perf_event::ctx does not affect children (as claimed above)
1230 * because the sys_perf_event_open() case will install a new event and break
1231 * the ctx parent<->child relation, and perf_pmu_migrate_context() is only
1232 * concerned with cpuctx and that doesn't have children.
1234 * The places that change perf_event::ctx will issue:
1236 * perf_remove_from_context();
1237 * synchronize_rcu();
1238 * perf_install_in_context();
1240 * to affect the change. The remove_from_context() + synchronize_rcu() should
1241 * quiesce the event, after which we can install it in the new location. This
1242 * means that only external vectors (perf_fops, prctl) can perturb the event
1243 * while in transit. Therefore all such accessors should also acquire
1244 * perf_event_context::mutex to serialize against this.
1246 * However; because event->ctx can change while we're waiting to acquire
1247 * ctx->mutex we must be careful and use the below perf_event_ctx_lock()
1252 * task_struct::perf_event_mutex
1253 * perf_event_context::mutex
1254 * perf_event::child_mutex;
1255 * perf_event_context::lock
1256 * perf_event::mmap_mutex
1258 * perf_addr_filters_head::lock
1262 * cpuctx->mutex / perf_event_context::mutex
1264 static struct perf_event_context *
1265 perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
1267 struct perf_event_context *ctx;
1271 ctx = READ_ONCE(event->ctx);
1272 if (!refcount_inc_not_zero(&ctx->refcount)) {
1278 mutex_lock_nested(&ctx->mutex, nesting);
1279 if (event->ctx != ctx) {
1280 mutex_unlock(&ctx->mutex);
1288 static inline struct perf_event_context *
1289 perf_event_ctx_lock(struct perf_event *event)
1291 return perf_event_ctx_lock_nested(event, 0);
1294 static void perf_event_ctx_unlock(struct perf_event *event,
1295 struct perf_event_context *ctx)
1297 mutex_unlock(&ctx->mutex);
1302 * This must be done under the ctx->lock, such as to serialize against
1303 * context_equiv(), therefore we cannot call put_ctx() since that might end up
1304 * calling scheduler related locks and ctx->lock nests inside those.
1306 static __must_check struct perf_event_context *
1307 unclone_ctx(struct perf_event_context *ctx)
1309 struct perf_event_context *parent_ctx = ctx->parent_ctx;
1311 lockdep_assert_held(&ctx->lock);
1314 ctx->parent_ctx = NULL;
1320 static u32 perf_event_pid_type(struct perf_event *event, struct task_struct *p,
1325 * only top level events have the pid namespace they were created in
1328 event = event->parent;
1330 nr = __task_pid_nr_ns(p, type, event->ns);
1331 /* avoid -1 if it is idle thread or runs in another ns */
1332 if (!nr && !pid_alive(p))
1337 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
1339 return perf_event_pid_type(event, p, PIDTYPE_TGID);
1342 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
1344 return perf_event_pid_type(event, p, PIDTYPE_PID);
1348 * If we inherit events we want to return the parent event id
1351 static u64 primary_event_id(struct perf_event *event)
1356 id = event->parent->id;
1362 * Get the perf_event_context for a task and lock it.
1364 * This has to cope with with the fact that until it is locked,
1365 * the context could get moved to another task.
1367 static struct perf_event_context *
1368 perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
1370 struct perf_event_context *ctx;
1374 * One of the few rules of preemptible RCU is that one cannot do
1375 * rcu_read_unlock() while holding a scheduler (or nested) lock when
1376 * part of the read side critical section was irqs-enabled -- see
1377 * rcu_read_unlock_special().
1379 * Since ctx->lock nests under rq->lock we must ensure the entire read
1380 * side critical section has interrupts disabled.
1382 local_irq_save(*flags);
1384 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
1387 * If this context is a clone of another, it might
1388 * get swapped for another underneath us by
1389 * perf_event_task_sched_out, though the
1390 * rcu_read_lock() protects us from any context
1391 * getting freed. Lock the context and check if it
1392 * got swapped before we could get the lock, and retry
1393 * if so. If we locked the right context, then it
1394 * can't get swapped on us any more.
1396 raw_spin_lock(&ctx->lock);
1397 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
1398 raw_spin_unlock(&ctx->lock);
1400 local_irq_restore(*flags);
1404 if (ctx->task == TASK_TOMBSTONE ||
1405 !refcount_inc_not_zero(&ctx->refcount)) {
1406 raw_spin_unlock(&ctx->lock);
1409 WARN_ON_ONCE(ctx->task != task);
1414 local_irq_restore(*flags);
1419 * Get the context for a task and increment its pin_count so it
1420 * can't get swapped to another task. This also increments its
1421 * reference count so that the context can't get freed.
1423 static struct perf_event_context *
1424 perf_pin_task_context(struct task_struct *task, int ctxn)
1426 struct perf_event_context *ctx;
1427 unsigned long flags;
1429 ctx = perf_lock_task_context(task, ctxn, &flags);
1432 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1437 static void perf_unpin_context(struct perf_event_context *ctx)
1439 unsigned long flags;
1441 raw_spin_lock_irqsave(&ctx->lock, flags);
1443 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1447 * Update the record of the current time in a context.
1449 static void update_context_time(struct perf_event_context *ctx)
1451 u64 now = perf_clock();
1453 ctx->time += now - ctx->timestamp;
1454 ctx->timestamp = now;
1457 static u64 perf_event_time(struct perf_event *event)
1459 struct perf_event_context *ctx = event->ctx;
1461 if (is_cgroup_event(event))
1462 return perf_cgroup_event_time(event);
1464 return ctx ? ctx->time : 0;
1467 static enum event_type_t get_event_type(struct perf_event *event)
1469 struct perf_event_context *ctx = event->ctx;
1470 enum event_type_t event_type;
1472 lockdep_assert_held(&ctx->lock);
1475 * It's 'group type', really, because if our group leader is
1476 * pinned, so are we.
1478 if (event->group_leader != event)
1479 event = event->group_leader;
1481 event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE;
1483 event_type |= EVENT_CPU;
1489 * Helper function to initialize event group nodes.
1491 static void init_event_group(struct perf_event *event)
1493 RB_CLEAR_NODE(&event->group_node);
1494 event->group_index = 0;
1498 * Extract pinned or flexible groups from the context
1499 * based on event attrs bits.
1501 static struct perf_event_groups *
1502 get_event_groups(struct perf_event *event, struct perf_event_context *ctx)
1504 if (event->attr.pinned)
1505 return &ctx->pinned_groups;
1507 return &ctx->flexible_groups;
1511 * Helper function to initializes perf_event_group trees.
1513 static void perf_event_groups_init(struct perf_event_groups *groups)
1515 groups->tree = RB_ROOT;
1520 * Compare function for event groups;
1522 * Implements complex key that first sorts by CPU and then by virtual index
1523 * which provides ordering when rotating groups for the same CPU.
1526 perf_event_groups_less(struct perf_event *left, struct perf_event *right)
1528 if (left->cpu < right->cpu)
1530 if (left->cpu > right->cpu)
1533 if (left->group_index < right->group_index)
1535 if (left->group_index > right->group_index)
1542 * Insert @event into @groups' tree; using {@event->cpu, ++@groups->index} for
1543 * key (see perf_event_groups_less). This places it last inside the CPU
1547 perf_event_groups_insert(struct perf_event_groups *groups,
1548 struct perf_event *event)
1550 struct perf_event *node_event;
1551 struct rb_node *parent;
1552 struct rb_node **node;
1554 event->group_index = ++groups->index;
1556 node = &groups->tree.rb_node;
1561 node_event = container_of(*node, struct perf_event, group_node);
1563 if (perf_event_groups_less(event, node_event))
1564 node = &parent->rb_left;
1566 node = &parent->rb_right;
1569 rb_link_node(&event->group_node, parent, node);
1570 rb_insert_color(&event->group_node, &groups->tree);
1574 * Helper function to insert event into the pinned or flexible groups.
1577 add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx)
1579 struct perf_event_groups *groups;
1581 groups = get_event_groups(event, ctx);
1582 perf_event_groups_insert(groups, event);
1586 * Delete a group from a tree.
1589 perf_event_groups_delete(struct perf_event_groups *groups,
1590 struct perf_event *event)
1592 WARN_ON_ONCE(RB_EMPTY_NODE(&event->group_node) ||
1593 RB_EMPTY_ROOT(&groups->tree));
1595 rb_erase(&event->group_node, &groups->tree);
1596 init_event_group(event);
1600 * Helper function to delete event from its groups.
1603 del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx)
1605 struct perf_event_groups *groups;
1607 groups = get_event_groups(event, ctx);
1608 perf_event_groups_delete(groups, event);
1612 * Get the leftmost event in the @cpu subtree.
1614 static struct perf_event *
1615 perf_event_groups_first(struct perf_event_groups *groups, int cpu)
1617 struct perf_event *node_event = NULL, *match = NULL;
1618 struct rb_node *node = groups->tree.rb_node;
1621 node_event = container_of(node, struct perf_event, group_node);
1623 if (cpu < node_event->cpu) {
1624 node = node->rb_left;
1625 } else if (cpu > node_event->cpu) {
1626 node = node->rb_right;
1629 node = node->rb_left;
1637 * Like rb_entry_next_safe() for the @cpu subtree.
1639 static struct perf_event *
1640 perf_event_groups_next(struct perf_event *event)
1642 struct perf_event *next;
1644 next = rb_entry_safe(rb_next(&event->group_node), typeof(*event), group_node);
1645 if (next && next->cpu == event->cpu)
1652 * Iterate through the whole groups tree.
1654 #define perf_event_groups_for_each(event, groups) \
1655 for (event = rb_entry_safe(rb_first(&((groups)->tree)), \
1656 typeof(*event), group_node); event; \
1657 event = rb_entry_safe(rb_next(&event->group_node), \
1658 typeof(*event), group_node))
1661 * Add an event from the lists for its context.
1662 * Must be called with ctx->mutex and ctx->lock held.
1665 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
1667 lockdep_assert_held(&ctx->lock);
1669 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1670 event->attach_state |= PERF_ATTACH_CONTEXT;
1672 event->tstamp = perf_event_time(event);
1675 * If we're a stand alone event or group leader, we go to the context
1676 * list, group events are kept attached to the group so that
1677 * perf_group_detach can, at all times, locate all siblings.
1679 if (event->group_leader == event) {
1680 event->group_caps = event->event_caps;
1681 add_event_to_groups(event, ctx);
1684 list_update_cgroup_event(event, ctx, true);
1686 list_add_rcu(&event->event_entry, &ctx->event_list);
1688 if (event->attr.inherit_stat)
1695 * Initialize event state based on the perf_event_attr::disabled.
1697 static inline void perf_event__state_init(struct perf_event *event)
1699 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
1700 PERF_EVENT_STATE_INACTIVE;
1703 static void __perf_event_read_size(struct perf_event *event, int nr_siblings)
1705 int entry = sizeof(u64); /* value */
1709 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1710 size += sizeof(u64);
1712 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1713 size += sizeof(u64);
1715 if (event->attr.read_format & PERF_FORMAT_ID)
1716 entry += sizeof(u64);
1718 if (event->attr.read_format & PERF_FORMAT_GROUP) {
1720 size += sizeof(u64);
1724 event->read_size = size;
1727 static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
1729 struct perf_sample_data *data;
1732 if (sample_type & PERF_SAMPLE_IP)
1733 size += sizeof(data->ip);
1735 if (sample_type & PERF_SAMPLE_ADDR)
1736 size += sizeof(data->addr);
1738 if (sample_type & PERF_SAMPLE_PERIOD)
1739 size += sizeof(data->period);
1741 if (sample_type & PERF_SAMPLE_WEIGHT)
1742 size += sizeof(data->weight);
1744 if (sample_type & PERF_SAMPLE_READ)
1745 size += event->read_size;
1747 if (sample_type & PERF_SAMPLE_DATA_SRC)
1748 size += sizeof(data->data_src.val);
1750 if (sample_type & PERF_SAMPLE_TRANSACTION)
1751 size += sizeof(data->txn);
1753 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1754 size += sizeof(data->phys_addr);
1756 event->header_size = size;
1760 * Called at perf_event creation and when events are attached/detached from a
1763 static void perf_event__header_size(struct perf_event *event)
1765 __perf_event_read_size(event,
1766 event->group_leader->nr_siblings);
1767 __perf_event_header_size(event, event->attr.sample_type);
1770 static void perf_event__id_header_size(struct perf_event *event)
1772 struct perf_sample_data *data;
1773 u64 sample_type = event->attr.sample_type;
1776 if (sample_type & PERF_SAMPLE_TID)
1777 size += sizeof(data->tid_entry);
1779 if (sample_type & PERF_SAMPLE_TIME)
1780 size += sizeof(data->time);
1782 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1783 size += sizeof(data->id);
1785 if (sample_type & PERF_SAMPLE_ID)
1786 size += sizeof(data->id);
1788 if (sample_type & PERF_SAMPLE_STREAM_ID)
1789 size += sizeof(data->stream_id);
1791 if (sample_type & PERF_SAMPLE_CPU)
1792 size += sizeof(data->cpu_entry);
1794 event->id_header_size = size;
1797 static bool perf_event_validate_size(struct perf_event *event)
1800 * The values computed here will be over-written when we actually
1803 __perf_event_read_size(event, event->group_leader->nr_siblings + 1);
1804 __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ);
1805 perf_event__id_header_size(event);
1808 * Sum the lot; should not exceed the 64k limit we have on records.
1809 * Conservative limit to allow for callchains and other variable fields.
1811 if (event->read_size + event->header_size +
1812 event->id_header_size + sizeof(struct perf_event_header) >= 16*1024)
1818 static void perf_group_attach(struct perf_event *event)
1820 struct perf_event *group_leader = event->group_leader, *pos;
1822 lockdep_assert_held(&event->ctx->lock);
1825 * We can have double attach due to group movement in perf_event_open.
1827 if (event->attach_state & PERF_ATTACH_GROUP)
1830 event->attach_state |= PERF_ATTACH_GROUP;
1832 if (group_leader == event)
1835 WARN_ON_ONCE(group_leader->ctx != event->ctx);
1837 group_leader->group_caps &= event->event_caps;
1839 list_add_tail(&event->sibling_list, &group_leader->sibling_list);
1840 group_leader->nr_siblings++;
1842 perf_event__header_size(group_leader);
1844 for_each_sibling_event(pos, group_leader)
1845 perf_event__header_size(pos);
1849 * Remove an event from the lists for its context.
1850 * Must be called with ctx->mutex and ctx->lock held.
1853 list_del_event(struct perf_event *event, struct perf_event_context *ctx)
1855 WARN_ON_ONCE(event->ctx != ctx);
1856 lockdep_assert_held(&ctx->lock);
1859 * We can have double detach due to exit/hot-unplug + close.
1861 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
1864 event->attach_state &= ~PERF_ATTACH_CONTEXT;
1866 list_update_cgroup_event(event, ctx, false);
1869 if (event->attr.inherit_stat)
1872 list_del_rcu(&event->event_entry);
1874 if (event->group_leader == event)
1875 del_event_from_groups(event, ctx);
1878 * If event was in error state, then keep it
1879 * that way, otherwise bogus counts will be
1880 * returned on read(). The only way to get out
1881 * of error state is by explicit re-enabling
1884 if (event->state > PERF_EVENT_STATE_OFF)
1885 perf_event_set_state(event, PERF_EVENT_STATE_OFF);
1890 static void perf_group_detach(struct perf_event *event)
1892 struct perf_event *sibling, *tmp;
1893 struct perf_event_context *ctx = event->ctx;
1895 lockdep_assert_held(&ctx->lock);
1898 * We can have double detach due to exit/hot-unplug + close.
1900 if (!(event->attach_state & PERF_ATTACH_GROUP))
1903 event->attach_state &= ~PERF_ATTACH_GROUP;
1906 * If this is a sibling, remove it from its group.
1908 if (event->group_leader != event) {
1909 list_del_init(&event->sibling_list);
1910 event->group_leader->nr_siblings--;
1915 * If this was a group event with sibling events then
1916 * upgrade the siblings to singleton events by adding them
1917 * to whatever list we are on.
1919 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, sibling_list) {
1921 sibling->group_leader = sibling;
1922 list_del_init(&sibling->sibling_list);
1924 /* Inherit group flags from the previous leader */
1925 sibling->group_caps = event->group_caps;
1927 if (!RB_EMPTY_NODE(&event->group_node)) {
1928 add_event_to_groups(sibling, event->ctx);
1930 if (sibling->state == PERF_EVENT_STATE_ACTIVE) {
1931 struct list_head *list = sibling->attr.pinned ?
1932 &ctx->pinned_active : &ctx->flexible_active;
1934 list_add_tail(&sibling->active_list, list);
1938 WARN_ON_ONCE(sibling->ctx != event->ctx);
1942 perf_event__header_size(event->group_leader);
1944 for_each_sibling_event(tmp, event->group_leader)
1945 perf_event__header_size(tmp);
1948 static bool is_orphaned_event(struct perf_event *event)
1950 return event->state == PERF_EVENT_STATE_DEAD;
1953 static inline int __pmu_filter_match(struct perf_event *event)
1955 struct pmu *pmu = event->pmu;
1956 return pmu->filter_match ? pmu->filter_match(event) : 1;
1960 * Check whether we should attempt to schedule an event group based on
1961 * PMU-specific filtering. An event group can consist of HW and SW events,
1962 * potentially with a SW leader, so we must check all the filters, to
1963 * determine whether a group is schedulable:
1965 static inline int pmu_filter_match(struct perf_event *event)
1967 struct perf_event *sibling;
1969 if (!__pmu_filter_match(event))
1972 for_each_sibling_event(sibling, event) {
1973 if (!__pmu_filter_match(sibling))
1981 event_filter_match(struct perf_event *event)
1983 return (event->cpu == -1 || event->cpu == smp_processor_id()) &&
1984 perf_cgroup_match(event) && pmu_filter_match(event);
1988 event_sched_out(struct perf_event *event,
1989 struct perf_cpu_context *cpuctx,
1990 struct perf_event_context *ctx)
1992 enum perf_event_state state = PERF_EVENT_STATE_INACTIVE;
1994 WARN_ON_ONCE(event->ctx != ctx);
1995 lockdep_assert_held(&ctx->lock);
1997 if (event->state != PERF_EVENT_STATE_ACTIVE)
2001 * Asymmetry; we only schedule events _IN_ through ctx_sched_in(), but
2002 * we can schedule events _OUT_ individually through things like
2003 * __perf_remove_from_context().
2005 list_del_init(&event->active_list);
2007 perf_pmu_disable(event->pmu);
2009 event->pmu->del(event, 0);
2012 if (event->pending_disable) {
2013 event->pending_disable = 0;
2014 state = PERF_EVENT_STATE_OFF;
2016 perf_event_set_state(event, state);
2018 if (!is_software_event(event))
2019 cpuctx->active_oncpu--;
2020 if (!--ctx->nr_active)
2021 perf_event_ctx_deactivate(ctx);
2022 if (event->attr.freq && event->attr.sample_freq)
2024 if (event->attr.exclusive || !cpuctx->active_oncpu)
2025 cpuctx->exclusive = 0;
2027 perf_pmu_enable(event->pmu);
2031 group_sched_out(struct perf_event *group_event,
2032 struct perf_cpu_context *cpuctx,
2033 struct perf_event_context *ctx)
2035 struct perf_event *event;
2037 if (group_event->state != PERF_EVENT_STATE_ACTIVE)
2040 perf_pmu_disable(ctx->pmu);
2042 event_sched_out(group_event, cpuctx, ctx);
2045 * Schedule out siblings (if any):
2047 for_each_sibling_event(event, group_event)
2048 event_sched_out(event, cpuctx, ctx);
2050 perf_pmu_enable(ctx->pmu);
2052 if (group_event->attr.exclusive)
2053 cpuctx->exclusive = 0;
2056 #define DETACH_GROUP 0x01UL
2059 * Cross CPU call to remove a performance event
2061 * We disable the event on the hardware level first. After that we
2062 * remove it from the context list.
2065 __perf_remove_from_context(struct perf_event *event,
2066 struct perf_cpu_context *cpuctx,
2067 struct perf_event_context *ctx,
2070 unsigned long flags = (unsigned long)info;
2072 if (ctx->is_active & EVENT_TIME) {
2073 update_context_time(ctx);
2074 update_cgrp_time_from_cpuctx(cpuctx);
2077 event_sched_out(event, cpuctx, ctx);
2078 if (flags & DETACH_GROUP)
2079 perf_group_detach(event);
2080 list_del_event(event, ctx);
2082 if (!ctx->nr_events && ctx->is_active) {
2085 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
2086 cpuctx->task_ctx = NULL;
2092 * Remove the event from a task's (or a CPU's) list of events.
2094 * If event->ctx is a cloned context, callers must make sure that
2095 * every task struct that event->ctx->task could possibly point to
2096 * remains valid. This is OK when called from perf_release since
2097 * that only calls us on the top-level context, which can't be a clone.
2098 * When called from perf_event_exit_task, it's OK because the
2099 * context has been detached from its task.
2101 static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
2103 struct perf_event_context *ctx = event->ctx;
2105 lockdep_assert_held(&ctx->mutex);
2107 event_function_call(event, __perf_remove_from_context, (void *)flags);
2110 * The above event_function_call() can NO-OP when it hits
2111 * TASK_TOMBSTONE. In that case we must already have been detached
2112 * from the context (by perf_event_exit_event()) but the grouping
2113 * might still be in-tact.
2115 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
2116 if ((flags & DETACH_GROUP) &&
2117 (event->attach_state & PERF_ATTACH_GROUP)) {
2119 * Since in that case we cannot possibly be scheduled, simply
2122 raw_spin_lock_irq(&ctx->lock);
2123 perf_group_detach(event);
2124 raw_spin_unlock_irq(&ctx->lock);
2129 * Cross CPU call to disable a performance event
2131 static void __perf_event_disable(struct perf_event *event,
2132 struct perf_cpu_context *cpuctx,
2133 struct perf_event_context *ctx,
2136 if (event->state < PERF_EVENT_STATE_INACTIVE)
2139 if (ctx->is_active & EVENT_TIME) {
2140 update_context_time(ctx);
2141 update_cgrp_time_from_event(event);
2144 if (event == event->group_leader)
2145 group_sched_out(event, cpuctx, ctx);
2147 event_sched_out(event, cpuctx, ctx);
2149 perf_event_set_state(event, PERF_EVENT_STATE_OFF);
2155 * If event->ctx is a cloned context, callers must make sure that
2156 * every task struct that event->ctx->task could possibly point to
2157 * remains valid. This condition is satisifed when called through
2158 * perf_event_for_each_child or perf_event_for_each because they
2159 * hold the top-level event's child_mutex, so any descendant that
2160 * goes to exit will block in perf_event_exit_event().
2162 * When called from perf_pending_event it's OK because event->ctx
2163 * is the current context on this CPU and preemption is disabled,
2164 * hence we can't get into perf_event_task_sched_out for this context.
2166 static void _perf_event_disable(struct perf_event *event)
2168 struct perf_event_context *ctx = event->ctx;
2170 raw_spin_lock_irq(&ctx->lock);
2171 if (event->state <= PERF_EVENT_STATE_OFF) {
2172 raw_spin_unlock_irq(&ctx->lock);
2175 raw_spin_unlock_irq(&ctx->lock);
2177 event_function_call(event, __perf_event_disable, NULL);
2180 void perf_event_disable_local(struct perf_event *event)
2182 event_function_local(event, __perf_event_disable, NULL);
2186 * Strictly speaking kernel users cannot create groups and therefore this
2187 * interface does not need the perf_event_ctx_lock() magic.
2189 void perf_event_disable(struct perf_event *event)
2191 struct perf_event_context *ctx;
2193 ctx = perf_event_ctx_lock(event);
2194 _perf_event_disable(event);
2195 perf_event_ctx_unlock(event, ctx);
2197 EXPORT_SYMBOL_GPL(perf_event_disable);
2199 void perf_event_disable_inatomic(struct perf_event *event)
2201 event->pending_disable = 1;
2202 irq_work_queue(&event->pending);
2205 static void perf_set_shadow_time(struct perf_event *event,
2206 struct perf_event_context *ctx)
2209 * use the correct time source for the time snapshot
2211 * We could get by without this by leveraging the
2212 * fact that to get to this function, the caller
2213 * has most likely already called update_context_time()
2214 * and update_cgrp_time_xx() and thus both timestamp
2215 * are identical (or very close). Given that tstamp is,
2216 * already adjusted for cgroup, we could say that:
2217 * tstamp - ctx->timestamp
2219 * tstamp - cgrp->timestamp.
2221 * Then, in perf_output_read(), the calculation would
2222 * work with no changes because:
2223 * - event is guaranteed scheduled in
2224 * - no scheduled out in between
2225 * - thus the timestamp would be the same
2227 * But this is a bit hairy.
2229 * So instead, we have an explicit cgroup call to remain
2230 * within the time time source all along. We believe it
2231 * is cleaner and simpler to understand.
2233 if (is_cgroup_event(event))
2234 perf_cgroup_set_shadow_time(event, event->tstamp);
2236 event->shadow_ctx_time = event->tstamp - ctx->timestamp;
2239 #define MAX_INTERRUPTS (~0ULL)
2241 static void perf_log_throttle(struct perf_event *event, int enable);
2242 static void perf_log_itrace_start(struct perf_event *event);
2245 event_sched_in(struct perf_event *event,
2246 struct perf_cpu_context *cpuctx,
2247 struct perf_event_context *ctx)
2251 lockdep_assert_held(&ctx->lock);
2253 if (event->state <= PERF_EVENT_STATE_OFF)
2256 WRITE_ONCE(event->oncpu, smp_processor_id());
2258 * Order event::oncpu write to happen before the ACTIVE state is
2259 * visible. This allows perf_event_{stop,read}() to observe the correct
2260 * ->oncpu if it sees ACTIVE.
2263 perf_event_set_state(event, PERF_EVENT_STATE_ACTIVE);
2266 * Unthrottle events, since we scheduled we might have missed several
2267 * ticks already, also for a heavily scheduling task there is little
2268 * guarantee it'll get a tick in a timely manner.
2270 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
2271 perf_log_throttle(event, 1);
2272 event->hw.interrupts = 0;
2275 perf_pmu_disable(event->pmu);
2277 perf_set_shadow_time(event, ctx);
2279 perf_log_itrace_start(event);
2281 if (event->pmu->add(event, PERF_EF_START)) {
2282 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
2288 if (!is_software_event(event))
2289 cpuctx->active_oncpu++;
2290 if (!ctx->nr_active++)
2291 perf_event_ctx_activate(ctx);
2292 if (event->attr.freq && event->attr.sample_freq)
2295 if (event->attr.exclusive)
2296 cpuctx->exclusive = 1;
2299 perf_pmu_enable(event->pmu);
2305 group_sched_in(struct perf_event *group_event,
2306 struct perf_cpu_context *cpuctx,
2307 struct perf_event_context *ctx)
2309 struct perf_event *event, *partial_group = NULL;
2310 struct pmu *pmu = ctx->pmu;
2312 if (group_event->state == PERF_EVENT_STATE_OFF)
2315 pmu->start_txn(pmu, PERF_PMU_TXN_ADD);
2317 if (event_sched_in(group_event, cpuctx, ctx)) {
2318 pmu->cancel_txn(pmu);
2319 perf_mux_hrtimer_restart(cpuctx);
2324 * Schedule in siblings as one group (if any):
2326 for_each_sibling_event(event, group_event) {
2327 if (event_sched_in(event, cpuctx, ctx)) {
2328 partial_group = event;
2333 if (!pmu->commit_txn(pmu))
2338 * Groups can be scheduled in as one unit only, so undo any
2339 * partial group before returning:
2340 * The events up to the failed event are scheduled out normally.
2342 for_each_sibling_event(event, group_event) {
2343 if (event == partial_group)
2346 event_sched_out(event, cpuctx, ctx);
2348 event_sched_out(group_event, cpuctx, ctx);
2350 pmu->cancel_txn(pmu);
2352 perf_mux_hrtimer_restart(cpuctx);
2358 * Work out whether we can put this event group on the CPU now.
2360 static int group_can_go_on(struct perf_event *event,
2361 struct perf_cpu_context *cpuctx,
2365 * Groups consisting entirely of software events can always go on.
2367 if (event->group_caps & PERF_EV_CAP_SOFTWARE)
2370 * If an exclusive group is already on, no other hardware
2373 if (cpuctx->exclusive)
2376 * If this group is exclusive and there are already
2377 * events on the CPU, it can't go on.
2379 if (event->attr.exclusive && cpuctx->active_oncpu)
2382 * Otherwise, try to add it if all previous groups were able
2388 static void add_event_to_ctx(struct perf_event *event,
2389 struct perf_event_context *ctx)
2391 list_add_event(event, ctx);
2392 perf_group_attach(event);
2395 static void ctx_sched_out(struct perf_event_context *ctx,
2396 struct perf_cpu_context *cpuctx,
2397 enum event_type_t event_type);
2399 ctx_sched_in(struct perf_event_context *ctx,
2400 struct perf_cpu_context *cpuctx,
2401 enum event_type_t event_type,
2402 struct task_struct *task);
2404 static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
2405 struct perf_event_context *ctx,
2406 enum event_type_t event_type)
2408 if (!cpuctx->task_ctx)
2411 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2414 ctx_sched_out(ctx, cpuctx, event_type);
2417 static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
2418 struct perf_event_context *ctx,
2419 struct task_struct *task)
2421 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
2423 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
2424 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
2426 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
2430 * We want to maintain the following priority of scheduling:
2431 * - CPU pinned (EVENT_CPU | EVENT_PINNED)
2432 * - task pinned (EVENT_PINNED)
2433 * - CPU flexible (EVENT_CPU | EVENT_FLEXIBLE)
2434 * - task flexible (EVENT_FLEXIBLE).
2436 * In order to avoid unscheduling and scheduling back in everything every
2437 * time an event is added, only do it for the groups of equal priority and
2440 * This can be called after a batch operation on task events, in which case
2441 * event_type is a bit mask of the types of events involved. For CPU events,
2442 * event_type is only either EVENT_PINNED or EVENT_FLEXIBLE.
2444 static void ctx_resched(struct perf_cpu_context *cpuctx,
2445 struct perf_event_context *task_ctx,
2446 enum event_type_t event_type)
2448 enum event_type_t ctx_event_type;
2449 bool cpu_event = !!(event_type & EVENT_CPU);
2452 * If pinned groups are involved, flexible groups also need to be
2455 if (event_type & EVENT_PINNED)
2456 event_type |= EVENT_FLEXIBLE;
2458 ctx_event_type = event_type & EVENT_ALL;
2460 perf_pmu_disable(cpuctx->ctx.pmu);
2462 task_ctx_sched_out(cpuctx, task_ctx, event_type);
2465 * Decide which cpu ctx groups to schedule out based on the types
2466 * of events that caused rescheduling:
2467 * - EVENT_CPU: schedule out corresponding groups;
2468 * - EVENT_PINNED task events: schedule out EVENT_FLEXIBLE groups;
2469 * - otherwise, do nothing more.
2472 cpu_ctx_sched_out(cpuctx, ctx_event_type);
2473 else if (ctx_event_type & EVENT_PINNED)
2474 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2476 perf_event_sched_in(cpuctx, task_ctx, current);
2477 perf_pmu_enable(cpuctx->ctx.pmu);
2481 * Cross CPU call to install and enable a performance event
2483 * Very similar to remote_function() + event_function() but cannot assume that
2484 * things like ctx->is_active and cpuctx->task_ctx are set.
2486 static int __perf_install_in_context(void *info)
2488 struct perf_event *event = info;
2489 struct perf_event_context *ctx = event->ctx;
2490 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2491 struct perf_event_context *task_ctx = cpuctx->task_ctx;
2492 bool reprogram = true;
2495 raw_spin_lock(&cpuctx->ctx.lock);
2497 raw_spin_lock(&ctx->lock);
2500 reprogram = (ctx->task == current);
2503 * If the task is running, it must be running on this CPU,
2504 * otherwise we cannot reprogram things.
2506 * If its not running, we don't care, ctx->lock will
2507 * serialize against it becoming runnable.
2509 if (task_curr(ctx->task) && !reprogram) {
2514 WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx);
2515 } else if (task_ctx) {
2516 raw_spin_lock(&task_ctx->lock);
2519 #ifdef CONFIG_CGROUP_PERF
2520 if (is_cgroup_event(event)) {
2522 * If the current cgroup doesn't match the event's
2523 * cgroup, we should not try to schedule it.
2525 struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
2526 reprogram = cgroup_is_descendant(cgrp->css.cgroup,
2527 event->cgrp->css.cgroup);
2532 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
2533 add_event_to_ctx(event, ctx);
2534 ctx_resched(cpuctx, task_ctx, get_event_type(event));
2536 add_event_to_ctx(event, ctx);
2540 perf_ctx_unlock(cpuctx, task_ctx);
2546 * Attach a performance event to a context.
2548 * Very similar to event_function_call, see comment there.
2551 perf_install_in_context(struct perf_event_context *ctx,
2552 struct perf_event *event,
2555 struct task_struct *task = READ_ONCE(ctx->task);
2557 lockdep_assert_held(&ctx->mutex);
2559 if (event->cpu != -1)
2563 * Ensures that if we can observe event->ctx, both the event and ctx
2564 * will be 'complete'. See perf_iterate_sb_cpu().
2566 smp_store_release(&event->ctx, ctx);
2569 cpu_function_call(cpu, __perf_install_in_context, event);
2574 * Should not happen, we validate the ctx is still alive before calling.
2576 if (WARN_ON_ONCE(task == TASK_TOMBSTONE))
2580 * Installing events is tricky because we cannot rely on ctx->is_active
2581 * to be set in case this is the nr_events 0 -> 1 transition.
2583 * Instead we use task_curr(), which tells us if the task is running.
2584 * However, since we use task_curr() outside of rq::lock, we can race
2585 * against the actual state. This means the result can be wrong.
2587 * If we get a false positive, we retry, this is harmless.
2589 * If we get a false negative, things are complicated. If we are after
2590 * perf_event_context_sched_in() ctx::lock will serialize us, and the
2591 * value must be correct. If we're before, it doesn't matter since
2592 * perf_event_context_sched_in() will program the counter.
2594 * However, this hinges on the remote context switch having observed
2595 * our task->perf_event_ctxp[] store, such that it will in fact take
2596 * ctx::lock in perf_event_context_sched_in().
2598 * We do this by task_function_call(), if the IPI fails to hit the task
2599 * we know any future context switch of task must see the
2600 * perf_event_ctpx[] store.
2604 * This smp_mb() orders the task->perf_event_ctxp[] store with the
2605 * task_cpu() load, such that if the IPI then does not find the task
2606 * running, a future context switch of that task must observe the
2611 if (!task_function_call(task, __perf_install_in_context, event))
2614 raw_spin_lock_irq(&ctx->lock);
2616 if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) {
2618 * Cannot happen because we already checked above (which also
2619 * cannot happen), and we hold ctx->mutex, which serializes us
2620 * against perf_event_exit_task_context().
2622 raw_spin_unlock_irq(&ctx->lock);
2626 * If the task is not running, ctx->lock will avoid it becoming so,
2627 * thus we can safely install the event.
2629 if (task_curr(task)) {
2630 raw_spin_unlock_irq(&ctx->lock);
2633 add_event_to_ctx(event, ctx);
2634 raw_spin_unlock_irq(&ctx->lock);
2638 * Cross CPU call to enable a performance event
2640 static void __perf_event_enable(struct perf_event *event,
2641 struct perf_cpu_context *cpuctx,
2642 struct perf_event_context *ctx,
2645 struct perf_event *leader = event->group_leader;
2646 struct perf_event_context *task_ctx;
2648 if (event->state >= PERF_EVENT_STATE_INACTIVE ||
2649 event->state <= PERF_EVENT_STATE_ERROR)
2653 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
2655 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
2657 if (!ctx->is_active)
2660 if (!event_filter_match(event)) {
2661 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
2666 * If the event is in a group and isn't the group leader,
2667 * then don't put it on unless the group is on.
2669 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) {
2670 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
2674 task_ctx = cpuctx->task_ctx;
2676 WARN_ON_ONCE(task_ctx != ctx);
2678 ctx_resched(cpuctx, task_ctx, get_event_type(event));
2684 * If event->ctx is a cloned context, callers must make sure that
2685 * every task struct that event->ctx->task could possibly point to
2686 * remains valid. This condition is satisfied when called through
2687 * perf_event_for_each_child or perf_event_for_each as described
2688 * for perf_event_disable.
2690 static void _perf_event_enable(struct perf_event *event)
2692 struct perf_event_context *ctx = event->ctx;
2694 raw_spin_lock_irq(&ctx->lock);
2695 if (event->state >= PERF_EVENT_STATE_INACTIVE ||
2696 event->state < PERF_EVENT_STATE_ERROR) {
2697 raw_spin_unlock_irq(&ctx->lock);
2702 * If the event is in error state, clear that first.
2704 * That way, if we see the event in error state below, we know that it
2705 * has gone back into error state, as distinct from the task having
2706 * been scheduled away before the cross-call arrived.
2708 if (event->state == PERF_EVENT_STATE_ERROR)
2709 event->state = PERF_EVENT_STATE_OFF;
2710 raw_spin_unlock_irq(&ctx->lock);
2712 event_function_call(event, __perf_event_enable, NULL);
2716 * See perf_event_disable();
2718 void perf_event_enable(struct perf_event *event)
2720 struct perf_event_context *ctx;
2722 ctx = perf_event_ctx_lock(event);
2723 _perf_event_enable(event);
2724 perf_event_ctx_unlock(event, ctx);
2726 EXPORT_SYMBOL_GPL(perf_event_enable);
2728 struct stop_event_data {
2729 struct perf_event *event;
2730 unsigned int restart;
2733 static int __perf_event_stop(void *info)
2735 struct stop_event_data *sd = info;
2736 struct perf_event *event = sd->event;
2738 /* if it's already INACTIVE, do nothing */
2739 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
2742 /* matches smp_wmb() in event_sched_in() */
2746 * There is a window with interrupts enabled before we get here,
2747 * so we need to check again lest we try to stop another CPU's event.
2749 if (READ_ONCE(event->oncpu) != smp_processor_id())
2752 event->pmu->stop(event, PERF_EF_UPDATE);
2755 * May race with the actual stop (through perf_pmu_output_stop()),
2756 * but it is only used for events with AUX ring buffer, and such
2757 * events will refuse to restart because of rb::aux_mmap_count==0,
2758 * see comments in perf_aux_output_begin().
2760 * Since this is happening on an event-local CPU, no trace is lost
2764 event->pmu->start(event, 0);
2769 static int perf_event_stop(struct perf_event *event, int restart)
2771 struct stop_event_data sd = {
2778 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
2781 /* matches smp_wmb() in event_sched_in() */
2785 * We only want to restart ACTIVE events, so if the event goes
2786 * inactive here (event->oncpu==-1), there's nothing more to do;
2787 * fall through with ret==-ENXIO.
2789 ret = cpu_function_call(READ_ONCE(event->oncpu),
2790 __perf_event_stop, &sd);
2791 } while (ret == -EAGAIN);
2797 * In order to contain the amount of racy and tricky in the address filter
2798 * configuration management, it is a two part process:
2800 * (p1) when userspace mappings change as a result of (1) or (2) or (3) below,
2801 * we update the addresses of corresponding vmas in
2802 * event::addr_filter_ranges array and bump the event::addr_filters_gen;
2803 * (p2) when an event is scheduled in (pmu::add), it calls
2804 * perf_event_addr_filters_sync() which calls pmu::addr_filters_sync()
2805 * if the generation has changed since the previous call.
2807 * If (p1) happens while the event is active, we restart it to force (p2).
2809 * (1) perf_addr_filters_apply(): adjusting filters' offsets based on
2810 * pre-existing mappings, called once when new filters arrive via SET_FILTER
2812 * (2) perf_addr_filters_adjust(): adjusting filters' offsets based on newly
2813 * registered mapping, called for every new mmap(), with mm::mmap_sem down
2815 * (3) perf_event_addr_filters_exec(): clearing filters' offsets in the process
2818 void perf_event_addr_filters_sync(struct perf_event *event)
2820 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
2822 if (!has_addr_filter(event))
2825 raw_spin_lock(&ifh->lock);
2826 if (event->addr_filters_gen != event->hw.addr_filters_gen) {
2827 event->pmu->addr_filters_sync(event);
2828 event->hw.addr_filters_gen = event->addr_filters_gen;
2830 raw_spin_unlock(&ifh->lock);
2832 EXPORT_SYMBOL_GPL(perf_event_addr_filters_sync);
2834 static int _perf_event_refresh(struct perf_event *event, int refresh)
2837 * not supported on inherited events
2839 if (event->attr.inherit || !is_sampling_event(event))
2842 atomic_add(refresh, &event->event_limit);
2843 _perf_event_enable(event);
2849 * See perf_event_disable()
2851 int perf_event_refresh(struct perf_event *event, int refresh)
2853 struct perf_event_context *ctx;
2856 ctx = perf_event_ctx_lock(event);
2857 ret = _perf_event_refresh(event, refresh);
2858 perf_event_ctx_unlock(event, ctx);
2862 EXPORT_SYMBOL_GPL(perf_event_refresh);
2864 static int perf_event_modify_breakpoint(struct perf_event *bp,
2865 struct perf_event_attr *attr)
2869 _perf_event_disable(bp);
2871 err = modify_user_hw_breakpoint_check(bp, attr, true);
2873 if (!bp->attr.disabled)
2874 _perf_event_enable(bp);
2879 static int perf_event_modify_attr(struct perf_event *event,
2880 struct perf_event_attr *attr)
2882 if (event->attr.type != attr->type)
2885 switch (event->attr.type) {
2886 case PERF_TYPE_BREAKPOINT:
2887 return perf_event_modify_breakpoint(event, attr);
2889 /* Place holder for future additions. */
2894 static void ctx_sched_out(struct perf_event_context *ctx,
2895 struct perf_cpu_context *cpuctx,
2896 enum event_type_t event_type)
2898 struct perf_event *event, *tmp;
2899 int is_active = ctx->is_active;
2901 lockdep_assert_held(&ctx->lock);
2903 if (likely(!ctx->nr_events)) {
2905 * See __perf_remove_from_context().
2907 WARN_ON_ONCE(ctx->is_active);
2909 WARN_ON_ONCE(cpuctx->task_ctx);
2913 ctx->is_active &= ~event_type;
2914 if (!(ctx->is_active & EVENT_ALL))
2918 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
2919 if (!ctx->is_active)
2920 cpuctx->task_ctx = NULL;
2924 * Always update time if it was set; not only when it changes.
2925 * Otherwise we can 'forget' to update time for any but the last
2926 * context we sched out. For example:
2928 * ctx_sched_out(.event_type = EVENT_FLEXIBLE)
2929 * ctx_sched_out(.event_type = EVENT_PINNED)
2931 * would only update time for the pinned events.
2933 if (is_active & EVENT_TIME) {
2934 /* update (and stop) ctx time */
2935 update_context_time(ctx);
2936 update_cgrp_time_from_cpuctx(cpuctx);
2939 is_active ^= ctx->is_active; /* changed bits */
2941 if (!ctx->nr_active || !(is_active & EVENT_ALL))
2944 perf_pmu_disable(ctx->pmu);
2945 if (is_active & EVENT_PINNED) {
2946 list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list)
2947 group_sched_out(event, cpuctx, ctx);
2950 if (is_active & EVENT_FLEXIBLE) {
2951 list_for_each_entry_safe(event, tmp, &ctx->flexible_active, active_list)
2952 group_sched_out(event, cpuctx, ctx);
2954 perf_pmu_enable(ctx->pmu);
2958 * Test whether two contexts are equivalent, i.e. whether they have both been
2959 * cloned from the same version of the same context.
2961 * Equivalence is measured using a generation number in the context that is
2962 * incremented on each modification to it; see unclone_ctx(), list_add_event()
2963 * and list_del_event().
2965 static int context_equiv(struct perf_event_context *ctx1,
2966 struct perf_event_context *ctx2)
2968 lockdep_assert_held(&ctx1->lock);
2969 lockdep_assert_held(&ctx2->lock);
2971 /* Pinning disables the swap optimization */
2972 if (ctx1->pin_count || ctx2->pin_count)
2975 /* If ctx1 is the parent of ctx2 */
2976 if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen)
2979 /* If ctx2 is the parent of ctx1 */
2980 if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation)
2984 * If ctx1 and ctx2 have the same parent; we flatten the parent
2985 * hierarchy, see perf_event_init_context().
2987 if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx &&
2988 ctx1->parent_gen == ctx2->parent_gen)
2995 static void __perf_event_sync_stat(struct perf_event *event,
2996 struct perf_event *next_event)
3000 if (!event->attr.inherit_stat)
3004 * Update the event value, we cannot use perf_event_read()
3005 * because we're in the middle of a context switch and have IRQs
3006 * disabled, which upsets smp_call_function_single(), however
3007 * we know the event must be on the current CPU, therefore we
3008 * don't need to use it.
3010 if (event->state == PERF_EVENT_STATE_ACTIVE)
3011 event->pmu->read(event);
3013 perf_event_update_time(event);
3016 * In order to keep per-task stats reliable we need to flip the event
3017 * values when we flip the contexts.
3019 value = local64_read(&next_event->count);
3020 value = local64_xchg(&event->count, value);
3021 local64_set(&next_event->count, value);
3023 swap(event->total_time_enabled, next_event->total_time_enabled);
3024 swap(event->total_time_running, next_event->total_time_running);
3027 * Since we swizzled the values, update the user visible data too.
3029 perf_event_update_userpage(event);
3030 perf_event_update_userpage(next_event);
3033 static void perf_event_sync_stat(struct perf_event_context *ctx,
3034 struct perf_event_context *next_ctx)
3036 struct perf_event *event, *next_event;
3041 update_context_time(ctx);
3043 event = list_first_entry(&ctx->event_list,
3044 struct perf_event, event_entry);
3046 next_event = list_first_entry(&next_ctx->event_list,
3047 struct perf_event, event_entry);
3049 while (&event->event_entry != &ctx->event_list &&
3050 &next_event->event_entry != &next_ctx->event_list) {
3052 __perf_event_sync_stat(event, next_event);
3054 event = list_next_entry(event, event_entry);
3055 next_event = list_next_entry(next_event, event_entry);
3059 static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
3060 struct task_struct *next)
3062 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
3063 struct perf_event_context *next_ctx;
3064 struct perf_event_context *parent, *next_parent;
3065 struct perf_cpu_context *cpuctx;
3071 cpuctx = __get_cpu_context(ctx);
3072 if (!cpuctx->task_ctx)
3076 next_ctx = next->perf_event_ctxp[ctxn];
3080 parent = rcu_dereference(ctx->parent_ctx);
3081 next_parent = rcu_dereference(next_ctx->parent_ctx);
3083 /* If neither context have a parent context; they cannot be clones. */
3084 if (!parent && !next_parent)
3087 if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
3089 * Looks like the two contexts are clones, so we might be
3090 * able to optimize the context switch. We lock both
3091 * contexts and check that they are clones under the
3092 * lock (including re-checking that neither has been
3093 * uncloned in the meantime). It doesn't matter which
3094 * order we take the locks because no other cpu could
3095 * be trying to lock both of these tasks.
3097 raw_spin_lock(&ctx->lock);
3098 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
3099 if (context_equiv(ctx, next_ctx)) {
3100 WRITE_ONCE(ctx->task, next);
3101 WRITE_ONCE(next_ctx->task, task);
3103 swap(ctx->task_ctx_data, next_ctx->task_ctx_data);
3106 * RCU_INIT_POINTER here is safe because we've not
3107 * modified the ctx and the above modification of
3108 * ctx->task and ctx->task_ctx_data are immaterial
3109 * since those values are always verified under
3110 * ctx->lock which we're now holding.
3112 RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], next_ctx);
3113 RCU_INIT_POINTER(next->perf_event_ctxp[ctxn], ctx);
3117 perf_event_sync_stat(ctx, next_ctx);
3119 raw_spin_unlock(&next_ctx->lock);
3120 raw_spin_unlock(&ctx->lock);
3126 raw_spin_lock(&ctx->lock);
3127 task_ctx_sched_out(cpuctx, ctx, EVENT_ALL);
3128 raw_spin_unlock(&ctx->lock);
3132 static DEFINE_PER_CPU(struct list_head, sched_cb_list);
3134 void perf_sched_cb_dec(struct pmu *pmu)
3136 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
3138 this_cpu_dec(perf_sched_cb_usages);
3140 if (!--cpuctx->sched_cb_usage)
3141 list_del(&cpuctx->sched_cb_entry);
3145 void perf_sched_cb_inc(struct pmu *pmu)
3147 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
3149 if (!cpuctx->sched_cb_usage++)
3150 list_add(&cpuctx->sched_cb_entry, this_cpu_ptr(&sched_cb_list));
3152 this_cpu_inc(perf_sched_cb_usages);
3156 * This function provides the context switch callback to the lower code
3157 * layer. It is invoked ONLY when the context switch callback is enabled.
3159 * This callback is relevant even to per-cpu events; for example multi event
3160 * PEBS requires this to provide PID/TID information. This requires we flush
3161 * all queued PEBS records before we context switch to a new task.
3163 static void perf_pmu_sched_task(struct task_struct *prev,
3164 struct task_struct *next,
3167 struct perf_cpu_context *cpuctx;
3173 list_for_each_entry(cpuctx, this_cpu_ptr(&sched_cb_list), sched_cb_entry) {
3174 pmu = cpuctx->ctx.pmu; /* software PMUs will not have sched_task */
3176 if (WARN_ON_ONCE(!pmu->sched_task))
3179 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
3180 perf_pmu_disable(pmu);
3182 pmu->sched_task(cpuctx->task_ctx, sched_in);
3184 perf_pmu_enable(pmu);
3185 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
3189 static void perf_event_switch(struct task_struct *task,
3190 struct task_struct *next_prev, bool sched_in);
3192 #define for_each_task_context_nr(ctxn) \
3193 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
3196 * Called from scheduler to remove the events of the current task,
3197 * with interrupts disabled.
3199 * We stop each event and update the event value in event->count.
3201 * This does not protect us against NMI, but disable()
3202 * sets the disabled bit in the control field of event _before_
3203 * accessing the event control register. If a NMI hits, then it will
3204 * not restart the event.
3206 void __perf_event_task_sched_out(struct task_struct *task,
3207 struct task_struct *next)
3211 if (__this_cpu_read(perf_sched_cb_usages))
3212 perf_pmu_sched_task(task, next, false);
3214 if (atomic_read(&nr_switch_events))
3215 perf_event_switch(task, next, false);
3217 for_each_task_context_nr(ctxn)
3218 perf_event_context_sched_out(task, ctxn, next);
3221 * if cgroup events exist on this CPU, then we need
3222 * to check if we have to switch out PMU state.
3223 * cgroup event are system-wide mode only
3225 if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
3226 perf_cgroup_sched_out(task, next);
3230 * Called with IRQs disabled
3232 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
3233 enum event_type_t event_type)
3235 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
3238 static int visit_groups_merge(struct perf_event_groups *groups, int cpu,
3239 int (*func)(struct perf_event *, void *), void *data)
3241 struct perf_event **evt, *evt1, *evt2;
3244 evt1 = perf_event_groups_first(groups, -1);
3245 evt2 = perf_event_groups_first(groups, cpu);
3247 while (evt1 || evt2) {
3249 if (evt1->group_index < evt2->group_index)
3259 ret = func(*evt, data);
3263 *evt = perf_event_groups_next(*evt);
3269 struct sched_in_data {
3270 struct perf_event_context *ctx;
3271 struct perf_cpu_context *cpuctx;
3275 static int pinned_sched_in(struct perf_event *event, void *data)
3277 struct sched_in_data *sid = data;
3279 if (event->state <= PERF_EVENT_STATE_OFF)
3282 if (!event_filter_match(event))
3285 if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
3286 if (!group_sched_in(event, sid->cpuctx, sid->ctx))
3287 list_add_tail(&event->active_list, &sid->ctx->pinned_active);
3291 * If this pinned group hasn't been scheduled,
3292 * put it in error state.
3294 if (event->state == PERF_EVENT_STATE_INACTIVE)
3295 perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
3300 static int flexible_sched_in(struct perf_event *event, void *data)
3302 struct sched_in_data *sid = data;
3304 if (event->state <= PERF_EVENT_STATE_OFF)
3307 if (!event_filter_match(event))
3310 if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
3311 if (!group_sched_in(event, sid->cpuctx, sid->ctx))
3312 list_add_tail(&event->active_list, &sid->ctx->flexible_active);
3314 sid->can_add_hw = 0;
3321 ctx_pinned_sched_in(struct perf_event_context *ctx,
3322 struct perf_cpu_context *cpuctx)
3324 struct sched_in_data sid = {
3330 visit_groups_merge(&ctx->pinned_groups,
3332 pinned_sched_in, &sid);
3336 ctx_flexible_sched_in(struct perf_event_context *ctx,
3337 struct perf_cpu_context *cpuctx)
3339 struct sched_in_data sid = {
3345 visit_groups_merge(&ctx->flexible_groups,
3347 flexible_sched_in, &sid);
3351 ctx_sched_in(struct perf_event_context *ctx,
3352 struct perf_cpu_context *cpuctx,
3353 enum event_type_t event_type,
3354 struct task_struct *task)
3356 int is_active = ctx->is_active;
3359 lockdep_assert_held(&ctx->lock);
3361 if (likely(!ctx->nr_events))
3364 ctx->is_active |= (event_type | EVENT_TIME);
3367 cpuctx->task_ctx = ctx;
3369 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
3372 is_active ^= ctx->is_active; /* changed bits */
3374 if (is_active & EVENT_TIME) {
3375 /* start ctx time */
3377 ctx->timestamp = now;
3378 perf_cgroup_set_timestamp(task, ctx);
3382 * First go through the list and put on any pinned groups
3383 * in order to give them the best chance of going on.
3385 if (is_active & EVENT_PINNED)
3386 ctx_pinned_sched_in(ctx, cpuctx);
3388 /* Then walk through the lower prio flexible groups */
3389 if (is_active & EVENT_FLEXIBLE)
3390 ctx_flexible_sched_in(ctx, cpuctx);
3393 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
3394 enum event_type_t event_type,
3395 struct task_struct *task)
3397 struct perf_event_context *ctx = &cpuctx->ctx;
3399 ctx_sched_in(ctx, cpuctx, event_type, task);
3402 static void perf_event_context_sched_in(struct perf_event_context *ctx,
3403 struct task_struct *task)
3405 struct perf_cpu_context *cpuctx;
3407 cpuctx = __get_cpu_context(ctx);
3408 if (cpuctx->task_ctx == ctx)
3411 perf_ctx_lock(cpuctx, ctx);
3413 * We must check ctx->nr_events while holding ctx->lock, such
3414 * that we serialize against perf_install_in_context().
3416 if (!ctx->nr_events)
3419 perf_pmu_disable(ctx->pmu);
3421 * We want to keep the following priority order:
3422 * cpu pinned (that don't need to move), task pinned,
3423 * cpu flexible, task flexible.
3425 * However, if task's ctx is not carrying any pinned
3426 * events, no need to flip the cpuctx's events around.
3428 if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree))
3429 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
3430 perf_event_sched_in(cpuctx, ctx, task);
3431 perf_pmu_enable(ctx->pmu);
3434 perf_ctx_unlock(cpuctx, ctx);
3438 * Called from scheduler to add the events of the current task
3439 * with interrupts disabled.
3441 * We restore the event value and then enable it.
3443 * This does not protect us against NMI, but enable()
3444 * sets the enabled bit in the control field of event _before_
3445 * accessing the event control register. If a NMI hits, then it will
3446 * keep the event running.
3448 void __perf_event_task_sched_in(struct task_struct *prev,
3449 struct task_struct *task)
3451 struct perf_event_context *ctx;
3455 * If cgroup events exist on this CPU, then we need to check if we have
3456 * to switch in PMU state; cgroup event are system-wide mode only.
3458 * Since cgroup events are CPU events, we must schedule these in before
3459 * we schedule in the task events.
3461 if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
3462 perf_cgroup_sched_in(prev, task);
3464 for_each_task_context_nr(ctxn) {
3465 ctx = task->perf_event_ctxp[ctxn];
3469 perf_event_context_sched_in(ctx, task);
3472 if (atomic_read(&nr_switch_events))
3473 perf_event_switch(task, prev, true);
3475 if (__this_cpu_read(perf_sched_cb_usages))
3476 perf_pmu_sched_task(prev, task, true);
3479 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
3481 u64 frequency = event->attr.sample_freq;
3482 u64 sec = NSEC_PER_SEC;
3483 u64 divisor, dividend;
3485 int count_fls, nsec_fls, frequency_fls, sec_fls;
3487 count_fls = fls64(count);
3488 nsec_fls = fls64(nsec);
3489 frequency_fls = fls64(frequency);
3493 * We got @count in @nsec, with a target of sample_freq HZ
3494 * the target period becomes:
3497 * period = -------------------
3498 * @nsec * sample_freq
3503 * Reduce accuracy by one bit such that @a and @b converge
3504 * to a similar magnitude.
3506 #define REDUCE_FLS(a, b) \
3508 if (a##_fls > b##_fls) { \
3518 * Reduce accuracy until either term fits in a u64, then proceed with
3519 * the other, so that finally we can do a u64/u64 division.
3521 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
3522 REDUCE_FLS(nsec, frequency);
3523 REDUCE_FLS(sec, count);
3526 if (count_fls + sec_fls > 64) {
3527 divisor = nsec * frequency;
3529 while (count_fls + sec_fls > 64) {
3530 REDUCE_FLS(count, sec);
3534 dividend = count * sec;
3536 dividend = count * sec;
3538 while (nsec_fls + frequency_fls > 64) {
3539 REDUCE_FLS(nsec, frequency);
3543 divisor = nsec * frequency;
3549 return div64_u64(dividend, divisor);
3552 static DEFINE_PER_CPU(int, perf_throttled_count);
3553 static DEFINE_PER_CPU(u64, perf_throttled_seq);
3555 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
3557 struct hw_perf_event *hwc = &event->hw;
3558 s64 period, sample_period;
3561 period = perf_calculate_period(event, nsec, count);
3563 delta = (s64)(period - hwc->sample_period);
3564 delta = (delta + 7) / 8; /* low pass filter */
3566 sample_period = hwc->sample_period + delta;
3571 hwc->sample_period = sample_period;
3573 if (local64_read(&hwc->period_left) > 8*sample_period) {
3575 event->pmu->stop(event, PERF_EF_UPDATE);
3577 local64_set(&hwc->period_left, 0);
3580 event->pmu->start(event, PERF_EF_RELOAD);
3585 * combine freq adjustment with unthrottling to avoid two passes over the
3586 * events. At the same time, make sure, having freq events does not change
3587 * the rate of unthrottling as that would introduce bias.
3589 static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
3592 struct perf_event *event;
3593 struct hw_perf_event *hwc;
3594 u64 now, period = TICK_NSEC;
3598 * only need to iterate over all events iff:
3599 * - context have events in frequency mode (needs freq adjust)
3600 * - there are events to unthrottle on this cpu
3602 if (!(ctx->nr_freq || needs_unthr))
3605 raw_spin_lock(&ctx->lock);
3606 perf_pmu_disable(ctx->pmu);
3608 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3609 if (event->state != PERF_EVENT_STATE_ACTIVE)
3612 if (!event_filter_match(event))
3615 perf_pmu_disable(event->pmu);
3619 if (hwc->interrupts == MAX_INTERRUPTS) {
3620 hwc->interrupts = 0;
3621 perf_log_throttle(event, 1);
3622 event->pmu->start(event, 0);
3625 if (!event->attr.freq || !event->attr.sample_freq)
3629 * stop the event and update event->count
3631 event->pmu->stop(event, PERF_EF_UPDATE);
3633 now = local64_read(&event->count);
3634 delta = now - hwc->freq_count_stamp;
3635 hwc->freq_count_stamp = now;