livepatch: add (un)patch callbacks
[muen/linux.git] / kernel / livepatch / transition.c
1 /*
2  * transition.c - Kernel Live Patching transition functions
3  *
4  * Copyright (C) 2015-2016 Josh Poimboeuf <jpoimboe@redhat.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/cpu.h>
23 #include <linux/stacktrace.h>
24 #include "core.h"
25 #include "patch.h"
26 #include "transition.h"
27 #include "../sched/sched.h"
28
29 #define MAX_STACK_ENTRIES  100
30 #define STACK_ERR_BUF_SIZE 128
31
32 struct klp_patch *klp_transition_patch;
33
34 static int klp_target_state = KLP_UNDEFINED;
35
36 /*
37  * This work can be performed periodically to finish patching or unpatching any
38  * "straggler" tasks which failed to transition in the first attempt.
39  */
40 static void klp_transition_work_fn(struct work_struct *work)
41 {
42         mutex_lock(&klp_mutex);
43
44         if (klp_transition_patch)
45                 klp_try_complete_transition();
46
47         mutex_unlock(&klp_mutex);
48 }
49 static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
50
51 /*
52  * This function is just a stub to implement a hard force
53  * of synchronize_sched(). This requires synchronizing
54  * tasks even in userspace and idle.
55  */
56 static void klp_sync(struct work_struct *work)
57 {
58 }
59
60 /*
61  * We allow to patch also functions where RCU is not watching,
62  * e.g. before user_exit(). We can not rely on the RCU infrastructure
63  * to do the synchronization. Instead hard force the sched synchronization.
64  *
65  * This approach allows to use RCU functions for manipulating func_stack
66  * safely.
67  */
68 static void klp_synchronize_transition(void)
69 {
70         schedule_on_each_cpu(klp_sync);
71 }
72
73 /*
74  * The transition to the target patch state is complete.  Clean up the data
75  * structures.
76  */
77 static void klp_complete_transition(void)
78 {
79         struct klp_object *obj;
80         struct klp_func *func;
81         struct task_struct *g, *task;
82         unsigned int cpu;
83         bool immediate_func = false;
84
85         if (klp_target_state == KLP_UNPATCHED) {
86                 /*
87                  * All tasks have transitioned to KLP_UNPATCHED so we can now
88                  * remove the new functions from the func_stack.
89                  */
90                 klp_unpatch_objects(klp_transition_patch);
91
92                 /*
93                  * Make sure klp_ftrace_handler() can no longer see functions
94                  * from this patch on the ops->func_stack.  Otherwise, after
95                  * func->transition gets cleared, the handler may choose a
96                  * removed function.
97                  */
98                 klp_synchronize_transition();
99         }
100
101         if (klp_transition_patch->immediate)
102                 goto done;
103
104         klp_for_each_object(klp_transition_patch, obj) {
105                 klp_for_each_func(obj, func) {
106                         func->transition = false;
107                         if (func->immediate)
108                                 immediate_func = true;
109                 }
110         }
111
112         /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
113         if (klp_target_state == KLP_PATCHED)
114                 klp_synchronize_transition();
115
116         read_lock(&tasklist_lock);
117         for_each_process_thread(g, task) {
118                 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
119                 task->patch_state = KLP_UNDEFINED;
120         }
121         read_unlock(&tasklist_lock);
122
123         for_each_possible_cpu(cpu) {
124                 task = idle_task(cpu);
125                 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
126                 task->patch_state = KLP_UNDEFINED;
127         }
128
129 done:
130         klp_for_each_object(klp_transition_patch, obj) {
131                 if (!klp_is_object_loaded(obj))
132                         continue;
133                 if (klp_target_state == KLP_PATCHED)
134                         klp_post_patch_callback(obj);
135                 else if (klp_target_state == KLP_UNPATCHED)
136                         klp_post_unpatch_callback(obj);
137         }
138
139         /*
140          * See complementary comment in __klp_enable_patch() for why we
141          * keep the module reference for immediate patches.
142          */
143         if (!klp_transition_patch->immediate && !immediate_func &&
144             klp_target_state == KLP_UNPATCHED) {
145                 module_put(klp_transition_patch->mod);
146         }
147
148         klp_target_state = KLP_UNDEFINED;
149         klp_transition_patch = NULL;
150 }
151
152 /*
153  * This is called in the error path, to cancel a transition before it has
154  * started, i.e. klp_init_transition() has been called but
155  * klp_start_transition() hasn't.  If the transition *has* been started,
156  * klp_reverse_transition() should be used instead.
157  */
158 void klp_cancel_transition(void)
159 {
160         if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED))
161                 return;
162
163         klp_target_state = KLP_UNPATCHED;
164         klp_complete_transition();
165 }
166
167 /*
168  * Switch the patched state of the task to the set of functions in the target
169  * patch state.
170  *
171  * NOTE: If task is not 'current', the caller must ensure the task is inactive.
172  * Otherwise klp_ftrace_handler() might read the wrong 'patch_state' value.
173  */
174 void klp_update_patch_state(struct task_struct *task)
175 {
176         /*
177          * A variant of synchronize_sched() is used to allow patching functions
178          * where RCU is not watching, see klp_synchronize_transition().
179          */
180         preempt_disable_notrace();
181
182         /*
183          * This test_and_clear_tsk_thread_flag() call also serves as a read
184          * barrier (smp_rmb) for two cases:
185          *
186          * 1) Enforce the order of the TIF_PATCH_PENDING read and the
187          *    klp_target_state read.  The corresponding write barrier is in
188          *    klp_init_transition().
189          *
190          * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read
191          *    of func->transition, if klp_ftrace_handler() is called later on
192          *    the same CPU.  See __klp_disable_patch().
193          */
194         if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
195                 task->patch_state = READ_ONCE(klp_target_state);
196
197         preempt_enable_notrace();
198 }
199
200 /*
201  * Determine whether the given stack trace includes any references to a
202  * to-be-patched or to-be-unpatched function.
203  */
204 static int klp_check_stack_func(struct klp_func *func,
205                                 struct stack_trace *trace)
206 {
207         unsigned long func_addr, func_size, address;
208         struct klp_ops *ops;
209         int i;
210
211         if (func->immediate)
212                 return 0;
213
214         for (i = 0; i < trace->nr_entries; i++) {
215                 address = trace->entries[i];
216
217                 if (klp_target_state == KLP_UNPATCHED) {
218                          /*
219                           * Check for the to-be-unpatched function
220                           * (the func itself).
221                           */
222                         func_addr = (unsigned long)func->new_func;
223                         func_size = func->new_size;
224                 } else {
225                         /*
226                          * Check for the to-be-patched function
227                          * (the previous func).
228                          */
229                         ops = klp_find_ops(func->old_addr);
230
231                         if (list_is_singular(&ops->func_stack)) {
232                                 /* original function */
233                                 func_addr = func->old_addr;
234                                 func_size = func->old_size;
235                         } else {
236                                 /* previously patched function */
237                                 struct klp_func *prev;
238
239                                 prev = list_next_entry(func, stack_node);
240                                 func_addr = (unsigned long)prev->new_func;
241                                 func_size = prev->new_size;
242                         }
243                 }
244
245                 if (address >= func_addr && address < func_addr + func_size)
246                         return -EAGAIN;
247         }
248
249         return 0;
250 }
251
252 /*
253  * Determine whether it's safe to transition the task to the target patch state
254  * by looking for any to-be-patched or to-be-unpatched functions on its stack.
255  */
256 static int klp_check_stack(struct task_struct *task, char *err_buf)
257 {
258         static unsigned long entries[MAX_STACK_ENTRIES];
259         struct stack_trace trace;
260         struct klp_object *obj;
261         struct klp_func *func;
262         int ret;
263
264         trace.skip = 0;
265         trace.nr_entries = 0;
266         trace.max_entries = MAX_STACK_ENTRIES;
267         trace.entries = entries;
268         ret = save_stack_trace_tsk_reliable(task, &trace);
269         WARN_ON_ONCE(ret == -ENOSYS);
270         if (ret) {
271                 snprintf(err_buf, STACK_ERR_BUF_SIZE,
272                          "%s: %s:%d has an unreliable stack\n",
273                          __func__, task->comm, task->pid);
274                 return ret;
275         }
276
277         klp_for_each_object(klp_transition_patch, obj) {
278                 if (!obj->patched)
279                         continue;
280                 klp_for_each_func(obj, func) {
281                         ret = klp_check_stack_func(func, &trace);
282                         if (ret) {
283                                 snprintf(err_buf, STACK_ERR_BUF_SIZE,
284                                          "%s: %s:%d is sleeping on function %s\n",
285                                          __func__, task->comm, task->pid,
286                                          func->old_name);
287                                 return ret;
288                         }
289                 }
290         }
291
292         return 0;
293 }
294
295 /*
296  * Try to safely switch a task to the target patch state.  If it's currently
297  * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or
298  * if the stack is unreliable, return false.
299  */
300 static bool klp_try_switch_task(struct task_struct *task)
301 {
302         struct rq *rq;
303         struct rq_flags flags;
304         int ret;
305         bool success = false;
306         char err_buf[STACK_ERR_BUF_SIZE];
307
308         err_buf[0] = '\0';
309
310         /* check if this task has already switched over */
311         if (task->patch_state == klp_target_state)
312                 return true;
313
314         /*
315          * For arches which don't have reliable stack traces, we have to rely
316          * on other methods (e.g., switching tasks at kernel exit).
317          */
318         if (!klp_have_reliable_stack())
319                 return false;
320
321         /*
322          * Now try to check the stack for any to-be-patched or to-be-unpatched
323          * functions.  If all goes well, switch the task to the target patch
324          * state.
325          */
326         rq = task_rq_lock(task, &flags);
327
328         if (task_running(rq, task) && task != current) {
329                 snprintf(err_buf, STACK_ERR_BUF_SIZE,
330                          "%s: %s:%d is running\n", __func__, task->comm,
331                          task->pid);
332                 goto done;
333         }
334
335         ret = klp_check_stack(task, err_buf);
336         if (ret)
337                 goto done;
338
339         success = true;
340
341         clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
342         task->patch_state = klp_target_state;
343
344 done:
345         task_rq_unlock(rq, task, &flags);
346
347         /*
348          * Due to console deadlock issues, pr_debug() can't be used while
349          * holding the task rq lock.  Instead we have to use a temporary buffer
350          * and print the debug message after releasing the lock.
351          */
352         if (err_buf[0] != '\0')
353                 pr_debug("%s", err_buf);
354
355         return success;
356
357 }
358
359 /*
360  * Try to switch all remaining tasks to the target patch state by walking the
361  * stacks of sleeping tasks and looking for any to-be-patched or
362  * to-be-unpatched functions.  If such functions are found, the task can't be
363  * switched yet.
364  *
365  * If any tasks are still stuck in the initial patch state, schedule a retry.
366  */
367 void klp_try_complete_transition(void)
368 {
369         unsigned int cpu;
370         struct task_struct *g, *task;
371         bool complete = true;
372
373         WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
374
375         /*
376          * If the patch can be applied or reverted immediately, skip the
377          * per-task transitions.
378          */
379         if (klp_transition_patch->immediate)
380                 goto success;
381
382         /*
383          * Try to switch the tasks to the target patch state by walking their
384          * stacks and looking for any to-be-patched or to-be-unpatched
385          * functions.  If such functions are found on a stack, or if the stack
386          * is deemed unreliable, the task can't be switched yet.
387          *
388          * Usually this will transition most (or all) of the tasks on a system
389          * unless the patch includes changes to a very common function.
390          */
391         read_lock(&tasklist_lock);
392         for_each_process_thread(g, task)
393                 if (!klp_try_switch_task(task))
394                         complete = false;
395         read_unlock(&tasklist_lock);
396
397         /*
398          * Ditto for the idle "swapper" tasks.
399          */
400         get_online_cpus();
401         for_each_possible_cpu(cpu) {
402                 task = idle_task(cpu);
403                 if (cpu_online(cpu)) {
404                         if (!klp_try_switch_task(task))
405                                 complete = false;
406                 } else if (task->patch_state != klp_target_state) {
407                         /* offline idle tasks can be switched immediately */
408                         clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
409                         task->patch_state = klp_target_state;
410                 }
411         }
412         put_online_cpus();
413
414         if (!complete) {
415                 /*
416                  * Some tasks weren't able to be switched over.  Try again
417                  * later and/or wait for other methods like kernel exit
418                  * switching.
419                  */
420                 schedule_delayed_work(&klp_transition_work,
421                                       round_jiffies_relative(HZ));
422                 return;
423         }
424
425 success:
426         pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
427                   klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
428
429         /* we're done, now cleanup the data structures */
430         klp_complete_transition();
431 }
432
433 /*
434  * Start the transition to the specified target patch state so tasks can begin
435  * switching to it.
436  */
437 void klp_start_transition(void)
438 {
439         struct task_struct *g, *task;
440         unsigned int cpu;
441
442         WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
443
444         pr_notice("'%s': %s...\n", klp_transition_patch->mod->name,
445                   klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
446
447         /*
448          * If the patch can be applied or reverted immediately, skip the
449          * per-task transitions.
450          */
451         if (klp_transition_patch->immediate)
452                 return;
453
454         /*
455          * Mark all normal tasks as needing a patch state update.  They'll
456          * switch either in klp_try_complete_transition() or as they exit the
457          * kernel.
458          */
459         read_lock(&tasklist_lock);
460         for_each_process_thread(g, task)
461                 if (task->patch_state != klp_target_state)
462                         set_tsk_thread_flag(task, TIF_PATCH_PENDING);
463         read_unlock(&tasklist_lock);
464
465         /*
466          * Mark all idle tasks as needing a patch state update.  They'll switch
467          * either in klp_try_complete_transition() or at the idle loop switch
468          * point.
469          */
470         for_each_possible_cpu(cpu) {
471                 task = idle_task(cpu);
472                 if (task->patch_state != klp_target_state)
473                         set_tsk_thread_flag(task, TIF_PATCH_PENDING);
474         }
475 }
476
477 /*
478  * Initialize the global target patch state and all tasks to the initial patch
479  * state, and initialize all function transition states to true in preparation
480  * for patching or unpatching.
481  */
482 void klp_init_transition(struct klp_patch *patch, int state)
483 {
484         struct task_struct *g, *task;
485         unsigned int cpu;
486         struct klp_object *obj;
487         struct klp_func *func;
488         int initial_state = !state;
489
490         WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED);
491
492         klp_transition_patch = patch;
493
494         /*
495          * Set the global target patch state which tasks will switch to.  This
496          * has no effect until the TIF_PATCH_PENDING flags get set later.
497          */
498         klp_target_state = state;
499
500         /*
501          * If the patch can be applied or reverted immediately, skip the
502          * per-task transitions.
503          */
504         if (patch->immediate)
505                 return;
506
507         /*
508          * Initialize all tasks to the initial patch state to prepare them for
509          * switching to the target state.
510          */
511         read_lock(&tasklist_lock);
512         for_each_process_thread(g, task) {
513                 WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
514                 task->patch_state = initial_state;
515         }
516         read_unlock(&tasklist_lock);
517
518         /*
519          * Ditto for the idle "swapper" tasks.
520          */
521         for_each_possible_cpu(cpu) {
522                 task = idle_task(cpu);
523                 WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
524                 task->patch_state = initial_state;
525         }
526
527         /*
528          * Enforce the order of the task->patch_state initializations and the
529          * func->transition updates to ensure that klp_ftrace_handler() doesn't
530          * see a func in transition with a task->patch_state of KLP_UNDEFINED.
531          *
532          * Also enforce the order of the klp_target_state write and future
533          * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() doesn't
534          * set a task->patch_state to KLP_UNDEFINED.
535          */
536         smp_wmb();
537
538         /*
539          * Set the func transition states so klp_ftrace_handler() will know to
540          * switch to the transition logic.
541          *
542          * When patching, the funcs aren't yet in the func_stack and will be
543          * made visible to the ftrace handler shortly by the calls to
544          * klp_patch_object().
545          *
546          * When unpatching, the funcs are already in the func_stack and so are
547          * already visible to the ftrace handler.
548          */
549         klp_for_each_object(patch, obj)
550                 klp_for_each_func(obj, func)
551                         func->transition = true;
552 }
553
554 /*
555  * This function can be called in the middle of an existing transition to
556  * reverse the direction of the target patch state.  This can be done to
557  * effectively cancel an existing enable or disable operation if there are any
558  * tasks which are stuck in the initial patch state.
559  */
560 void klp_reverse_transition(void)
561 {
562         unsigned int cpu;
563         struct task_struct *g, *task;
564
565         klp_transition_patch->enabled = !klp_transition_patch->enabled;
566
567         klp_target_state = !klp_target_state;
568
569         /*
570          * Clear all TIF_PATCH_PENDING flags to prevent races caused by
571          * klp_update_patch_state() running in parallel with
572          * klp_start_transition().
573          */
574         read_lock(&tasklist_lock);
575         for_each_process_thread(g, task)
576                 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
577         read_unlock(&tasklist_lock);
578
579         for_each_possible_cpu(cpu)
580                 clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
581
582         /* Let any remaining calls to klp_update_patch_state() complete */
583         klp_synchronize_transition();
584
585         klp_start_transition();
586 }
587
588 /* Called from copy_process() during fork */
589 void klp_copy_process(struct task_struct *child)
590 {
591         child->patch_state = current->patch_state;
592
593         /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */
594 }