Merge branch 'for-4.16/remove-immediate' into for-linus
authorJiri Kosina <jkosina@suse.cz>
Wed, 31 Jan 2018 15:33:52 +0000 (16:33 +0100)
committerJiri Kosina <jkosina@suse.cz>
Wed, 31 Jan 2018 15:36:38 +0000 (16:36 +0100)
Pull 'immediate' feature removal from Miroslav Benes.

1  2 
kernel/livepatch/core.c

diff --combined kernel/livepatch/core.c
index 8fd8e8f126dab4c17b1292fd377c00f9ff6f7f5a,41be6061b92f28650065019b583b48610f6d7240..3a4656fb7047c24bde87c644cb79902df2a6adba
@@@ -366,11 -366,6 +366,6 @@@ static int __klp_enable_patch(struct kl
        /*
         * A reference is taken on the patch module to prevent it from being
         * unloaded.
-        *
-        * Note: For immediate (no consistency model) patches we don't allow
-        * patch modules to unload since there is no safe/sane method to
-        * determine if a thread is still running in the patched code contained
-        * in the patch module once the ftrace registration is successful.
         */
        if (!try_module_get(patch->mod))
                return -ENODEV;
@@@ -537,24 -532,22 +532,24 @@@ static ssize_t signal_store(struct kobj
        int ret;
        bool val;
  
 -      patch = container_of(kobj, struct klp_patch, kobj);
 -
 -      /*
 -       * klp_mutex lock is not grabbed here intentionally. It is not really
 -       * needed. The race window is harmless and grabbing the lock would only
 -       * hold the action back.
 -       */
 -      if (patch != klp_transition_patch)
 -              return -EINVAL;
 -
        ret = kstrtobool(buf, &val);
        if (ret)
                return ret;
  
 -      if (val)
 -              klp_send_signals();
 +      if (!val)
 +              return count;
 +
 +      mutex_lock(&klp_mutex);
 +
 +      patch = container_of(kobj, struct klp_patch, kobj);
 +      if (patch != klp_transition_patch) {
 +              mutex_unlock(&klp_mutex);
 +              return -EINVAL;
 +      }
 +
 +      klp_send_signals();
 +
 +      mutex_unlock(&klp_mutex);
  
        return count;
  }
@@@ -566,24 -559,22 +561,24 @@@ static ssize_t force_store(struct kobje
        int ret;
        bool val;
  
 -      patch = container_of(kobj, struct klp_patch, kobj);
 -
 -      /*
 -       * klp_mutex lock is not grabbed here intentionally. It is not really
 -       * needed. The race window is harmless and grabbing the lock would only
 -       * hold the action back.
 -       */
 -      if (patch != klp_transition_patch)
 -              return -EINVAL;
 -
        ret = kstrtobool(buf, &val);
        if (ret)
                return ret;
  
 -      if (val)
 -              klp_force_transition();
 +      if (!val)
 +              return count;
 +
 +      mutex_lock(&klp_mutex);
 +
 +      patch = container_of(kobj, struct klp_patch, kobj);
 +      if (patch != klp_transition_patch) {
 +              mutex_unlock(&klp_mutex);
 +              return -EINVAL;
 +      }
 +
 +      klp_force_transition();
 +
 +      mutex_unlock(&klp_mutex);
  
        return count;
  }
@@@ -894,12 -885,7 +889,7 @@@ int klp_register_patch(struct klp_patc
        if (!klp_initialized())
                return -ENODEV;
  
-       /*
-        * Architectures without reliable stack traces have to set
-        * patch->immediate because there's currently no way to patch kthreads
-        * with the consistency model.
-        */
-       if (!klp_have_reliable_stack() && !patch->immediate) {
+       if (!klp_have_reliable_stack()) {
                pr_err("This architecture doesn't have support for the livepatch consistency model.\n");
                return -ENOSYS;
        }