Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
authorDavid S. Miller <davem@davemloft.net>
Sun, 14 Oct 2018 20:01:20 +0000 (13:01 -0700)
committerDavid S. Miller <davem@davemloft.net>
Sun, 14 Oct 2018 20:01:20 +0000 (13:01 -0700)
Daniel Borkmann says:

====================
pull-request: bpf 2018-10-14

The following pull-request contains BPF updates for your *net* tree.

The main changes are:

1) Fix xsk map update and delete operation to not call synchronize_net()
   but to piggy back on SOCK_RCU_FREE for sockets instead as we are not
   allowed to sleep under RCU, from Bj√∂rn.

2) Do not change RLIMIT_MEMLOCK in reuseport_bpf selftest if the process
   already has unlimited RLIMIT_MEMLOCK, from Eric.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
kernel/bpf/xskmap.c
net/xdp/xsk.c
tools/testing/selftests/net/reuseport_bpf.c

index 9f8463afda9c857b868181938e9baecfd991a473..47147c9e184dd8bc34981031e9b4d221df7aa31a 100644 (file)
@@ -192,11 +192,8 @@ static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
        sock_hold(sock->sk);
 
        old_xs = xchg(&m->xsk_map[i], xs);
-       if (old_xs) {
-               /* Make sure we've flushed everything. */
-               synchronize_net();
+       if (old_xs)
                sock_put((struct sock *)old_xs);
-       }
 
        sockfd_put(sock);
        return 0;
@@ -212,11 +209,8 @@ static int xsk_map_delete_elem(struct bpf_map *map, void *key)
                return -EINVAL;
 
        old_xs = xchg(&m->xsk_map[k], NULL);
-       if (old_xs) {
-               /* Make sure we've flushed everything. */
-               synchronize_net();
+       if (old_xs)
                sock_put((struct sock *)old_xs);
-       }
 
        return 0;
 }
index 4e937cd7c17dc6b4b617f463336e9a8d2867ed6d..661504042d3040dd01cf3ef56955d6f93b467e76 100644 (file)
@@ -744,6 +744,8 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
        sk->sk_destruct = xsk_destruct;
        sk_refcnt_debug_inc(sk);
 
+       sock_set_flag(sk, SOCK_RCU_FREE);
+
        xs = xdp_sk(sk);
        mutex_init(&xs->mutex);
        spin_lock_init(&xs->tx_completion_lock);
index cad14cd0ea922f839d61ca8f78c8e73c9fdb89f5..b5277106df1fd156b5e7c0b30b55952c369b6bd1 100644 (file)
@@ -437,14 +437,19 @@ void enable_fastopen(void)
        }
 }
 
-static struct rlimit rlim_old, rlim_new;
+static struct rlimit rlim_old;
 
 static  __attribute__((constructor)) void main_ctor(void)
 {
        getrlimit(RLIMIT_MEMLOCK, &rlim_old);
-       rlim_new.rlim_cur = rlim_old.rlim_cur + (1UL << 20);
-       rlim_new.rlim_max = rlim_old.rlim_max + (1UL << 20);
-       setrlimit(RLIMIT_MEMLOCK, &rlim_new);
+
+       if (rlim_old.rlim_cur != RLIM_INFINITY) {
+               struct rlimit rlim_new;
+
+               rlim_new.rlim_cur = rlim_old.rlim_cur + (1UL << 20);
+               rlim_new.rlim_max = rlim_old.rlim_max + (1UL << 20);
+               setrlimit(RLIMIT_MEMLOCK, &rlim_new);
+       }
 }
 
 static __attribute__((destructor)) void main_dtor(void)