2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * ROUTE - implementation of the IP router.
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
15 * Alan Cox : Verify area fixes.
16 * Alan Cox : cli() protects routing changes
17 * Rui Oliveira : ICMP routing table updates
18 * (rco@di.uminho.pt) Routing table insertion and update
19 * Linus Torvalds : Rewrote bits to be sensible
20 * Alan Cox : Added BSD route gw semantics
21 * Alan Cox : Super /proc >4K
22 * Alan Cox : MTU in route table
23 * Alan Cox : MSS actually. Also added the window
25 * Sam Lantinga : Fixed route matching in rt_del()
26 * Alan Cox : Routing cache support.
27 * Alan Cox : Removed compatibility cruft.
28 * Alan Cox : RTF_REJECT support.
29 * Alan Cox : TCP irtt support.
30 * Jonathan Naylor : Added Metric support.
31 * Miquel van Smoorenburg : BSD API fixes.
32 * Miquel van Smoorenburg : Metrics.
33 * Alan Cox : Use __u32 properly
34 * Alan Cox : Aligned routing errors more closely with BSD
35 * our system is still very different.
36 * Alan Cox : Faster /proc handling
37 * Alexey Kuznetsov : Massive rework to support tree based routing,
38 * routing caches and better behaviour.
40 * Olaf Erb : irtt wasn't being copied right.
41 * Bjorn Ekwall : Kerneld route support.
42 * Alan Cox : Multicast fixed (I hope)
43 * Pavel Krauz : Limited broadcast fixed
44 * Mike McLagan : Routing by source
45 * Alexey Kuznetsov : End of old history. Split to fib.c and
46 * route.c and rewritten from scratch.
47 * Andi Kleen : Load-limit warning messages.
48 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
49 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
50 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
51 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
52 * Marc Boucher : routing by fwmark
53 * Robert Olsson : Added rt_cache statistics
54 * Arnaldo C. Melo : Convert proc stuff to seq_file
55 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
56 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
57 * Ilia Sotnikov : Removed TOS from hash calculations
59 * This program is free software; you can redistribute it and/or
60 * modify it under the terms of the GNU General Public License
61 * as published by the Free Software Foundation; either version
62 * 2 of the License, or (at your option) any later version.
65 #define pr_fmt(fmt) "IPv4: " fmt
67 #include <linux/module.h>
68 #include <linux/uaccess.h>
69 #include <linux/bitops.h>
70 #include <linux/types.h>
71 #include <linux/kernel.h>
73 #include <linux/string.h>
74 #include <linux/socket.h>
75 #include <linux/sockios.h>
76 #include <linux/errno.h>
78 #include <linux/inet.h>
79 #include <linux/netdevice.h>
80 #include <linux/proc_fs.h>
81 #include <linux/init.h>
82 #include <linux/skbuff.h>
83 #include <linux/inetdevice.h>
84 #include <linux/igmp.h>
85 #include <linux/pkt_sched.h>
86 #include <linux/mroute.h>
87 #include <linux/netfilter_ipv4.h>
88 #include <linux/random.h>
89 #include <linux/rcupdate.h>
90 #include <linux/times.h>
91 #include <linux/slab.h>
92 #include <linux/jhash.h>
94 #include <net/dst_metadata.h>
95 #include <net/net_namespace.h>
96 #include <net/protocol.h>
98 #include <net/route.h>
99 #include <net/inetpeer.h>
100 #include <net/sock.h>
101 #include <net/ip_fib.h>
104 #include <net/icmp.h>
105 #include <net/xfrm.h>
106 #include <net/lwtunnel.h>
107 #include <net/netevent.h>
108 #include <net/rtnetlink.h>
110 #include <linux/sysctl.h>
111 #include <linux/kmemleak.h>
113 #include <net/secure_seq.h>
114 #include <net/ip_tunnels.h>
115 #include <net/l3mdev.h>
117 #include "fib_lookup.h"
119 #define RT_FL_TOS(oldflp4) \
120 ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
122 #define RT_GC_TIMEOUT (300*HZ)
124 static int ip_rt_max_size;
125 static int ip_rt_redirect_number __read_mostly = 9;
126 static int ip_rt_redirect_load __read_mostly = HZ / 50;
127 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
128 static int ip_rt_error_cost __read_mostly = HZ;
129 static int ip_rt_error_burst __read_mostly = 5 * HZ;
130 static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
131 static u32 ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
132 static int ip_rt_min_advmss __read_mostly = 256;
134 static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
137 * Interface to generic destination cache.
140 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
141 static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
142 static unsigned int ipv4_mtu(const struct dst_entry *dst);
143 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
144 static void ipv4_link_failure(struct sk_buff *skb);
145 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
146 struct sk_buff *skb, u32 mtu);
147 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
148 struct sk_buff *skb);
149 static void ipv4_dst_destroy(struct dst_entry *dst);
151 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
157 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
160 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr);
162 static struct dst_ops ipv4_dst_ops = {
164 .check = ipv4_dst_check,
165 .default_advmss = ipv4_default_advmss,
167 .cow_metrics = ipv4_cow_metrics,
168 .destroy = ipv4_dst_destroy,
169 .negative_advice = ipv4_negative_advice,
170 .link_failure = ipv4_link_failure,
171 .update_pmtu = ip_rt_update_pmtu,
172 .redirect = ip_do_redirect,
173 .local_out = __ip_local_out,
174 .neigh_lookup = ipv4_neigh_lookup,
175 .confirm_neigh = ipv4_confirm_neigh,
178 #define ECN_OR_COST(class) TC_PRIO_##class
180 const __u8 ip_tos2prio[16] = {
182 ECN_OR_COST(BESTEFFORT),
184 ECN_OR_COST(BESTEFFORT),
190 ECN_OR_COST(INTERACTIVE),
192 ECN_OR_COST(INTERACTIVE),
193 TC_PRIO_INTERACTIVE_BULK,
194 ECN_OR_COST(INTERACTIVE_BULK),
195 TC_PRIO_INTERACTIVE_BULK,
196 ECN_OR_COST(INTERACTIVE_BULK)
198 EXPORT_SYMBOL(ip_tos2prio);
200 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
201 #define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field)
203 #ifdef CONFIG_PROC_FS
204 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
208 return SEQ_START_TOKEN;
211 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
217 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
221 static int rt_cache_seq_show(struct seq_file *seq, void *v)
223 if (v == SEQ_START_TOKEN)
224 seq_printf(seq, "%-127s\n",
225 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
226 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
231 static const struct seq_operations rt_cache_seq_ops = {
232 .start = rt_cache_seq_start,
233 .next = rt_cache_seq_next,
234 .stop = rt_cache_seq_stop,
235 .show = rt_cache_seq_show,
238 static int rt_cache_seq_open(struct inode *inode, struct file *file)
240 return seq_open(file, &rt_cache_seq_ops);
243 static const struct file_operations rt_cache_seq_fops = {
244 .open = rt_cache_seq_open,
247 .release = seq_release,
251 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
256 return SEQ_START_TOKEN;
258 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
259 if (!cpu_possible(cpu))
262 return &per_cpu(rt_cache_stat, cpu);
267 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
271 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
272 if (!cpu_possible(cpu))
275 return &per_cpu(rt_cache_stat, cpu);
281 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
286 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
288 struct rt_cache_stat *st = v;
290 if (v == SEQ_START_TOKEN) {
291 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
295 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
296 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
297 dst_entries_get_slow(&ipv4_dst_ops),
310 0, /* st->gc_total */
311 0, /* st->gc_ignored */
312 0, /* st->gc_goal_miss */
313 0, /* st->gc_dst_overflow */
314 0, /* st->in_hlist_search */
315 0 /* st->out_hlist_search */
320 static const struct seq_operations rt_cpu_seq_ops = {
321 .start = rt_cpu_seq_start,
322 .next = rt_cpu_seq_next,
323 .stop = rt_cpu_seq_stop,
324 .show = rt_cpu_seq_show,
328 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
330 return seq_open(file, &rt_cpu_seq_ops);
333 static const struct file_operations rt_cpu_seq_fops = {
334 .open = rt_cpu_seq_open,
337 .release = seq_release,
340 #ifdef CONFIG_IP_ROUTE_CLASSID
341 static int rt_acct_proc_show(struct seq_file *m, void *v)
343 struct ip_rt_acct *dst, *src;
346 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
350 for_each_possible_cpu(i) {
351 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
352 for (j = 0; j < 256; j++) {
353 dst[j].o_bytes += src[j].o_bytes;
354 dst[j].o_packets += src[j].o_packets;
355 dst[j].i_bytes += src[j].i_bytes;
356 dst[j].i_packets += src[j].i_packets;
360 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
365 static int rt_acct_proc_open(struct inode *inode, struct file *file)
367 return single_open(file, rt_acct_proc_show, NULL);
370 static const struct file_operations rt_acct_proc_fops = {
371 .open = rt_acct_proc_open,
374 .release = single_release,
378 static int __net_init ip_rt_do_proc_init(struct net *net)
380 struct proc_dir_entry *pde;
382 pde = proc_create("rt_cache", S_IRUGO, net->proc_net,
387 pde = proc_create("rt_cache", S_IRUGO,
388 net->proc_net_stat, &rt_cpu_seq_fops);
392 #ifdef CONFIG_IP_ROUTE_CLASSID
393 pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
399 #ifdef CONFIG_IP_ROUTE_CLASSID
401 remove_proc_entry("rt_cache", net->proc_net_stat);
404 remove_proc_entry("rt_cache", net->proc_net);
409 static void __net_exit ip_rt_do_proc_exit(struct net *net)
411 remove_proc_entry("rt_cache", net->proc_net_stat);
412 remove_proc_entry("rt_cache", net->proc_net);
413 #ifdef CONFIG_IP_ROUTE_CLASSID
414 remove_proc_entry("rt_acct", net->proc_net);
418 static struct pernet_operations ip_rt_proc_ops __net_initdata = {
419 .init = ip_rt_do_proc_init,
420 .exit = ip_rt_do_proc_exit,
423 static int __init ip_rt_proc_init(void)
425 return register_pernet_subsys(&ip_rt_proc_ops);
429 static inline int ip_rt_proc_init(void)
433 #endif /* CONFIG_PROC_FS */
435 static inline bool rt_is_expired(const struct rtable *rth)
437 return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
440 void rt_cache_flush(struct net *net)
442 rt_genid_bump_ipv4(net);
445 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
449 struct net_device *dev = dst->dev;
450 const __be32 *pkey = daddr;
451 const struct rtable *rt;
454 rt = (const struct rtable *) dst;
456 pkey = (const __be32 *) &rt->rt_gateway;
458 pkey = &ip_hdr(skb)->daddr;
460 n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey);
463 return neigh_create(&arp_tbl, pkey, dev);
466 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
468 struct net_device *dev = dst->dev;
469 const __be32 *pkey = daddr;
470 const struct rtable *rt;
472 rt = (const struct rtable *)dst;
474 pkey = (const __be32 *)&rt->rt_gateway;
477 (RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL)))
480 __ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
483 #define IP_IDENTS_SZ 2048u
485 static atomic_t *ip_idents __read_mostly;
486 static u32 *ip_tstamps __read_mostly;
488 /* In order to protect privacy, we add a perturbation to identifiers
489 * if one generator is seldom used. This makes hard for an attacker
490 * to infer how many packets were sent between two points in time.
492 u32 ip_idents_reserve(u32 hash, int segs)
494 u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
495 atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
496 u32 old = READ_ONCE(*p_tstamp);
497 u32 now = (u32)jiffies;
500 if (old != now && cmpxchg(p_tstamp, old, now) == old)
501 delta = prandom_u32_max(now - old);
503 /* Do not use atomic_add_return() as it makes UBSAN unhappy */
505 old = (u32)atomic_read(p_id);
506 new = old + delta + segs;
507 } while (atomic_cmpxchg(p_id, old, new) != old);
511 EXPORT_SYMBOL(ip_idents_reserve);
513 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
515 static u32 ip_idents_hashrnd __read_mostly;
518 net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
520 hash = jhash_3words((__force u32)iph->daddr,
521 (__force u32)iph->saddr,
522 iph->protocol ^ net_hash_mix(net),
524 id = ip_idents_reserve(hash, segs);
527 EXPORT_SYMBOL(__ip_select_ident);
529 static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
530 const struct sock *sk,
531 const struct iphdr *iph,
533 u8 prot, u32 mark, int flow_flags)
536 const struct inet_sock *inet = inet_sk(sk);
538 oif = sk->sk_bound_dev_if;
540 tos = RT_CONN_FLAGS(sk);
541 prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
543 flowi4_init_output(fl4, oif, mark, tos,
544 RT_SCOPE_UNIVERSE, prot,
546 iph->daddr, iph->saddr, 0, 0,
547 sock_net_uid(net, sk));
550 static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
551 const struct sock *sk)
553 const struct net *net = dev_net(skb->dev);
554 const struct iphdr *iph = ip_hdr(skb);
555 int oif = skb->dev->ifindex;
556 u8 tos = RT_TOS(iph->tos);
557 u8 prot = iph->protocol;
558 u32 mark = skb->mark;
560 __build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0);
563 static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
565 const struct inet_sock *inet = inet_sk(sk);
566 const struct ip_options_rcu *inet_opt;
567 __be32 daddr = inet->inet_daddr;
570 inet_opt = rcu_dereference(inet->inet_opt);
571 if (inet_opt && inet_opt->opt.srr)
572 daddr = inet_opt->opt.faddr;
573 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
574 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
575 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
576 inet_sk_flowi_flags(sk),
577 daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
581 static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
582 const struct sk_buff *skb)
585 build_skb_flow_key(fl4, skb, sk);
587 build_sk_flow_key(fl4, sk);
590 static DEFINE_SPINLOCK(fnhe_lock);
592 static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
596 rt = rcu_dereference(fnhe->fnhe_rth_input);
598 RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL);
599 dst_dev_put(&rt->dst);
600 dst_release(&rt->dst);
602 rt = rcu_dereference(fnhe->fnhe_rth_output);
604 RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL);
605 dst_dev_put(&rt->dst);
606 dst_release(&rt->dst);
610 static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
612 struct fib_nh_exception *fnhe, *oldest;
614 oldest = rcu_dereference(hash->chain);
615 for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
616 fnhe = rcu_dereference(fnhe->fnhe_next)) {
617 if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
620 fnhe_flush_routes(oldest);
624 static inline u32 fnhe_hashfun(__be32 daddr)
626 static u32 fnhe_hashrnd __read_mostly;
629 net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd));
630 hval = jhash_1word((__force u32) daddr, fnhe_hashrnd);
631 return hash_32(hval, FNHE_HASH_SHIFT);
634 static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
636 rt->rt_pmtu = fnhe->fnhe_pmtu;
637 rt->dst.expires = fnhe->fnhe_expires;
640 rt->rt_flags |= RTCF_REDIRECTED;
641 rt->rt_gateway = fnhe->fnhe_gw;
642 rt->rt_uses_gateway = 1;
646 static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
647 u32 pmtu, unsigned long expires)
649 struct fnhe_hash_bucket *hash;
650 struct fib_nh_exception *fnhe;
656 genid = fnhe_genid(dev_net(nh->nh_dev));
657 hval = fnhe_hashfun(daddr);
659 spin_lock_bh(&fnhe_lock);
661 hash = rcu_dereference(nh->nh_exceptions);
663 hash = kzalloc(FNHE_HASH_SIZE * sizeof(*hash), GFP_ATOMIC);
666 rcu_assign_pointer(nh->nh_exceptions, hash);
672 for (fnhe = rcu_dereference(hash->chain); fnhe;
673 fnhe = rcu_dereference(fnhe->fnhe_next)) {
674 if (fnhe->fnhe_daddr == daddr)
680 if (fnhe->fnhe_genid != genid)
681 fnhe->fnhe_genid = genid;
685 fnhe->fnhe_pmtu = pmtu;
686 fnhe->fnhe_expires = max(1UL, expires);
687 /* Update all cached dsts too */
688 rt = rcu_dereference(fnhe->fnhe_rth_input);
690 fill_route_from_fnhe(rt, fnhe);
691 rt = rcu_dereference(fnhe->fnhe_rth_output);
693 fill_route_from_fnhe(rt, fnhe);
695 if (depth > FNHE_RECLAIM_DEPTH)
696 fnhe = fnhe_oldest(hash);
698 fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
702 fnhe->fnhe_next = hash->chain;
703 rcu_assign_pointer(hash->chain, fnhe);
705 fnhe->fnhe_genid = genid;
706 fnhe->fnhe_daddr = daddr;
708 fnhe->fnhe_pmtu = pmtu;
709 fnhe->fnhe_expires = expires;
711 /* Exception created; mark the cached routes for the nexthop
712 * stale, so anyone caching it rechecks if this exception
715 rt = rcu_dereference(nh->nh_rth_input);
717 rt->dst.obsolete = DST_OBSOLETE_KILL;
719 for_each_possible_cpu(i) {
720 struct rtable __rcu **prt;
721 prt = per_cpu_ptr(nh->nh_pcpu_rth_output, i);
722 rt = rcu_dereference(*prt);
724 rt->dst.obsolete = DST_OBSOLETE_KILL;
728 fnhe->fnhe_stamp = jiffies;
731 spin_unlock_bh(&fnhe_lock);
734 static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
737 __be32 new_gw = icmp_hdr(skb)->un.gateway;
738 __be32 old_gw = ip_hdr(skb)->saddr;
739 struct net_device *dev = skb->dev;
740 struct in_device *in_dev;
741 struct fib_result res;
745 switch (icmp_hdr(skb)->code & 7) {
747 case ICMP_REDIR_NETTOS:
748 case ICMP_REDIR_HOST:
749 case ICMP_REDIR_HOSTTOS:
756 if (rt->rt_gateway != old_gw)
759 in_dev = __in_dev_get_rcu(dev);
764 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
765 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
766 ipv4_is_zeronet(new_gw))
767 goto reject_redirect;
769 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
770 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
771 goto reject_redirect;
772 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
773 goto reject_redirect;
775 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
776 goto reject_redirect;
779 n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
781 n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
783 if (!(n->nud_state & NUD_VALID)) {
784 neigh_event_send(n, NULL);
786 if (fib_lookup(net, fl4, &res, 0) == 0) {
787 struct fib_nh *nh = &FIB_RES_NH(res);
789 update_or_create_fnhe(nh, fl4->daddr, new_gw,
790 0, jiffies + ip_rt_gc_timeout);
793 rt->dst.obsolete = DST_OBSOLETE_KILL;
794 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
801 #ifdef CONFIG_IP_ROUTE_VERBOSE
802 if (IN_DEV_LOG_MARTIANS(in_dev)) {
803 const struct iphdr *iph = (const struct iphdr *) skb->data;
804 __be32 daddr = iph->daddr;
805 __be32 saddr = iph->saddr;
807 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
808 " Advised path = %pI4 -> %pI4\n",
809 &old_gw, dev->name, &new_gw,
816 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
820 const struct iphdr *iph = (const struct iphdr *) skb->data;
821 struct net *net = dev_net(skb->dev);
822 int oif = skb->dev->ifindex;
823 u8 tos = RT_TOS(iph->tos);
824 u8 prot = iph->protocol;
825 u32 mark = skb->mark;
827 rt = (struct rtable *) dst;
829 __build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
830 __ip_do_redirect(rt, skb, &fl4, true);
833 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
835 struct rtable *rt = (struct rtable *)dst;
836 struct dst_entry *ret = dst;
839 if (dst->obsolete > 0) {
842 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
853 * 1. The first ip_rt_redirect_number redirects are sent
854 * with exponential backoff, then we stop sending them at all,
855 * assuming that the host ignores our redirects.
856 * 2. If we did not see packets requiring redirects
857 * during ip_rt_redirect_silence, we assume that the host
858 * forgot redirected route and start to send redirects again.
860 * This algorithm is much cheaper and more intelligent than dumb load limiting
863 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
864 * and "frag. need" (breaks PMTU discovery) in icmp.c.
867 void ip_rt_send_redirect(struct sk_buff *skb)
869 struct rtable *rt = skb_rtable(skb);
870 struct in_device *in_dev;
871 struct inet_peer *peer;
877 in_dev = __in_dev_get_rcu(rt->dst.dev);
878 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
882 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
883 vif = l3mdev_master_ifindex_rcu(rt->dst.dev);
886 net = dev_net(rt->dst.dev);
887 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1);
889 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
890 rt_nexthop(rt, ip_hdr(skb)->daddr));
894 /* No redirected packets during ip_rt_redirect_silence;
895 * reset the algorithm.
897 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
898 peer->rate_tokens = 0;
900 /* Too many ignored redirects; do not send anything
901 * set dst.rate_last to the last seen redirected packet.
903 if (peer->rate_tokens >= ip_rt_redirect_number) {
904 peer->rate_last = jiffies;
908 /* Check for load limit; set rate_last to the latest sent
911 if (peer->rate_tokens == 0 ||
914 (ip_rt_redirect_load << peer->rate_tokens)))) {
915 __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
917 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
918 peer->rate_last = jiffies;
920 #ifdef CONFIG_IP_ROUTE_VERBOSE
922 peer->rate_tokens == ip_rt_redirect_number)
923 net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
924 &ip_hdr(skb)->saddr, inet_iif(skb),
925 &ip_hdr(skb)->daddr, &gw);
932 static int ip_error(struct sk_buff *skb)
934 struct rtable *rt = skb_rtable(skb);
935 struct net_device *dev = skb->dev;
936 struct in_device *in_dev;
937 struct inet_peer *peer;
943 if (netif_is_l3_master(skb->dev)) {
944 dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
949 in_dev = __in_dev_get_rcu(dev);
951 /* IP on this device is disabled. */
955 net = dev_net(rt->dst.dev);
956 if (!IN_DEV_FORWARD(in_dev)) {
957 switch (rt->dst.error) {
959 __IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS);
963 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
969 switch (rt->dst.error) {
974 code = ICMP_HOST_UNREACH;
977 code = ICMP_NET_UNREACH;
978 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
981 code = ICMP_PKT_FILTERED;
985 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
986 l3mdev_master_ifindex(skb->dev), 1);
991 peer->rate_tokens += now - peer->rate_last;
992 if (peer->rate_tokens > ip_rt_error_burst)
993 peer->rate_tokens = ip_rt_error_burst;
994 peer->rate_last = now;
995 if (peer->rate_tokens >= ip_rt_error_cost)
996 peer->rate_tokens -= ip_rt_error_cost;
1002 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1004 out: kfree_skb(skb);
1008 static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
1010 struct dst_entry *dst = &rt->dst;
1011 struct fib_result res;
1013 if (dst_metric_locked(dst, RTAX_MTU))
1016 if (ipv4_mtu(dst) < mtu)
1019 if (mtu < ip_rt_min_pmtu)
1020 mtu = ip_rt_min_pmtu;
1022 if (rt->rt_pmtu == mtu &&
1023 time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
1027 if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
1028 struct fib_nh *nh = &FIB_RES_NH(res);
1030 update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
1031 jiffies + ip_rt_mtu_expires);
1036 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1037 struct sk_buff *skb, u32 mtu)
1039 struct rtable *rt = (struct rtable *) dst;
1042 ip_rt_build_flow_key(&fl4, sk, skb);
1043 __ip_rt_update_pmtu(rt, &fl4, mtu);
1046 void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
1047 int oif, u32 mark, u8 protocol, int flow_flags)
1049 const struct iphdr *iph = (const struct iphdr *) skb->data;
1054 mark = IP4_REPLY_MARK(net, skb->mark);
1056 __build_flow_key(net, &fl4, NULL, iph, oif,
1057 RT_TOS(iph->tos), protocol, mark, flow_flags);
1058 rt = __ip_route_output_key(net, &fl4);
1060 __ip_rt_update_pmtu(rt, &fl4, mtu);
1064 EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
1066 static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1068 const struct iphdr *iph = (const struct iphdr *) skb->data;
1072 __build_flow_key(sock_net(sk), &fl4, sk, iph, 0, 0, 0, 0, 0);
1074 if (!fl4.flowi4_mark)
1075 fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
1077 rt = __ip_route_output_key(sock_net(sk), &fl4);
1079 __ip_rt_update_pmtu(rt, &fl4, mtu);
1084 void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1086 const struct iphdr *iph = (const struct iphdr *) skb->data;
1089 struct dst_entry *odst = NULL;
1091 struct net *net = sock_net(sk);
1095 if (!ip_sk_accept_pmtu(sk))
1098 odst = sk_dst_get(sk);
1100 if (sock_owned_by_user(sk) || !odst) {
1101 __ipv4_sk_update_pmtu(skb, sk, mtu);
1105 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1107 rt = (struct rtable *)odst;
1108 if (odst->obsolete && !odst->ops->check(odst, 0)) {
1109 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1116 __ip_rt_update_pmtu((struct rtable *) xfrm_dst_path(&rt->dst), &fl4, mtu);
1118 if (!dst_check(&rt->dst, 0)) {
1120 dst_release(&rt->dst);
1122 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1130 sk_dst_set(sk, &rt->dst);
1136 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1138 void ipv4_redirect(struct sk_buff *skb, struct net *net,
1139 int oif, u32 mark, u8 protocol, int flow_flags)
1141 const struct iphdr *iph = (const struct iphdr *) skb->data;
1145 __build_flow_key(net, &fl4, NULL, iph, oif,
1146 RT_TOS(iph->tos), protocol, mark, flow_flags);
1147 rt = __ip_route_output_key(net, &fl4);
1149 __ip_do_redirect(rt, skb, &fl4, false);
1153 EXPORT_SYMBOL_GPL(ipv4_redirect);
1155 void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
1157 const struct iphdr *iph = (const struct iphdr *) skb->data;
1160 struct net *net = sock_net(sk);
1162 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1163 rt = __ip_route_output_key(net, &fl4);
1165 __ip_do_redirect(rt, skb, &fl4, false);
1169 EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
1171 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1173 struct rtable *rt = (struct rtable *) dst;
1175 /* All IPV4 dsts are created with ->obsolete set to the value
1176 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1177 * into this function always.
1179 * When a PMTU/redirect information update invalidates a route,
1180 * this is indicated by setting obsolete to DST_OBSOLETE_KILL or
1181 * DST_OBSOLETE_DEAD by dst_free().
1183 if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
1188 static void ipv4_link_failure(struct sk_buff *skb)
1192 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1194 rt = skb_rtable(skb);
1196 dst_set_expires(&rt->dst, 0);
1199 static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb)
1201 pr_debug("%s: %pI4 -> %pI4, %s\n",
1202 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1203 skb->dev ? skb->dev->name : "?");
1210 We do not cache source address of outgoing interface,
1211 because it is used only by IP RR, TS and SRR options,
1212 so that it out of fast path.
1214 BTW remember: "addr" is allowed to be not aligned
1218 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1222 if (rt_is_output_route(rt))
1223 src = ip_hdr(skb)->saddr;
1225 struct fib_result res;
1231 memset(&fl4, 0, sizeof(fl4));
1232 fl4.daddr = iph->daddr;
1233 fl4.saddr = iph->saddr;
1234 fl4.flowi4_tos = RT_TOS(iph->tos);
1235 fl4.flowi4_oif = rt->dst.dev->ifindex;
1236 fl4.flowi4_iif = skb->dev->ifindex;
1237 fl4.flowi4_mark = skb->mark;
1240 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
1241 src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
1243 src = inet_select_addr(rt->dst.dev,
1244 rt_nexthop(rt, iph->daddr),
1248 memcpy(addr, &src, 4);
1251 #ifdef CONFIG_IP_ROUTE_CLASSID
1252 static void set_class_tag(struct rtable *rt, u32 tag)
1254 if (!(rt->dst.tclassid & 0xFFFF))
1255 rt->dst.tclassid |= tag & 0xFFFF;
1256 if (!(rt->dst.tclassid & 0xFFFF0000))
1257 rt->dst.tclassid |= tag & 0xFFFF0000;
1261 static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1263 unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr);
1264 unsigned int advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size,
1267 return min(advmss, IPV4_MAX_PMTU - header_size);
1270 static unsigned int ipv4_mtu(const struct dst_entry *dst)
1272 const struct rtable *rt = (const struct rtable *) dst;
1273 unsigned int mtu = rt->rt_pmtu;
1275 if (!mtu || time_after_eq(jiffies, rt->dst.expires))
1276 mtu = dst_metric_raw(dst, RTAX_MTU);
1281 mtu = READ_ONCE(dst->dev->mtu);
1283 if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
1284 if (rt->rt_uses_gateway && mtu > 576)
1288 mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
1290 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
1293 static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
1295 struct fnhe_hash_bucket *hash = rcu_dereference(nh->nh_exceptions);
1296 struct fib_nh_exception *fnhe;
1302 hval = fnhe_hashfun(daddr);
1304 for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1305 fnhe = rcu_dereference(fnhe->fnhe_next)) {
1306 if (fnhe->fnhe_daddr == daddr)
1312 static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1313 __be32 daddr, const bool do_cache)
1317 spin_lock_bh(&fnhe_lock);
1319 if (daddr == fnhe->fnhe_daddr) {
1320 struct rtable __rcu **porig;
1321 struct rtable *orig;
1322 int genid = fnhe_genid(dev_net(rt->dst.dev));
1324 if (rt_is_input_route(rt))
1325 porig = &fnhe->fnhe_rth_input;
1327 porig = &fnhe->fnhe_rth_output;
1328 orig = rcu_dereference(*porig);
1330 if (fnhe->fnhe_genid != genid) {
1331 fnhe->fnhe_genid = genid;
1333 fnhe->fnhe_pmtu = 0;
1334 fnhe->fnhe_expires = 0;
1335 fnhe_flush_routes(fnhe);
1338 fill_route_from_fnhe(rt, fnhe);
1339 if (!rt->rt_gateway)
1340 rt->rt_gateway = daddr;
1344 rcu_assign_pointer(*porig, rt);
1346 dst_dev_put(&orig->dst);
1347 dst_release(&orig->dst);
1352 fnhe->fnhe_stamp = jiffies;
1354 spin_unlock_bh(&fnhe_lock);
1359 static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
1361 struct rtable *orig, *prev, **p;
1364 if (rt_is_input_route(rt)) {
1365 p = (struct rtable **)&nh->nh_rth_input;
1367 p = (struct rtable **)raw_cpu_ptr(nh->nh_pcpu_rth_output);
1371 /* hold dst before doing cmpxchg() to avoid race condition
1375 prev = cmpxchg(p, orig, rt);
1378 dst_dev_put(&orig->dst);
1379 dst_release(&orig->dst);
1382 dst_release(&rt->dst);
1389 struct uncached_list {
1391 struct list_head head;
1394 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
1396 static void rt_add_uncached_list(struct rtable *rt)
1398 struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
1400 rt->rt_uncached_list = ul;
1402 spin_lock_bh(&ul->lock);
1403 list_add_tail(&rt->rt_uncached, &ul->head);
1404 spin_unlock_bh(&ul->lock);
1407 static void ipv4_dst_destroy(struct dst_entry *dst)
1409 struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
1410 struct rtable *rt = (struct rtable *) dst;
1412 if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
1415 if (!list_empty(&rt->rt_uncached)) {
1416 struct uncached_list *ul = rt->rt_uncached_list;
1418 spin_lock_bh(&ul->lock);
1419 list_del(&rt->rt_uncached);
1420 spin_unlock_bh(&ul->lock);
1424 void rt_flush_dev(struct net_device *dev)
1426 struct net *net = dev_net(dev);
1430 for_each_possible_cpu(cpu) {
1431 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
1433 spin_lock_bh(&ul->lock);
1434 list_for_each_entry(rt, &ul->head, rt_uncached) {
1435 if (rt->dst.dev != dev)
1437 rt->dst.dev = net->loopback_dev;
1438 dev_hold(rt->dst.dev);
1441 spin_unlock_bh(&ul->lock);
1445 static bool rt_cache_valid(const struct rtable *rt)
1448 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1452 static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1453 const struct fib_result *res,
1454 struct fib_nh_exception *fnhe,
1455 struct fib_info *fi, u16 type, u32 itag,
1456 const bool do_cache)
1458 bool cached = false;
1461 struct fib_nh *nh = &FIB_RES_NH(*res);
1463 if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK) {
1464 rt->rt_gateway = nh->nh_gw;
1465 rt->rt_uses_gateway = 1;
1467 dst_init_metrics(&rt->dst, fi->fib_metrics->metrics, true);
1468 if (fi->fib_metrics != &dst_default_metrics) {
1469 rt->dst._metrics |= DST_METRICS_REFCOUNTED;
1470 refcount_inc(&fi->fib_metrics->refcnt);
1472 #ifdef CONFIG_IP_ROUTE_CLASSID
1473 rt->dst.tclassid = nh->nh_tclassid;
1475 rt->dst.lwtstate = lwtstate_get(nh->nh_lwtstate);
1477 cached = rt_bind_exception(rt, fnhe, daddr, do_cache);
1479 cached = rt_cache_route(nh, rt);
1480 if (unlikely(!cached)) {
1481 /* Routes we intend to cache in nexthop exception or
1482 * FIB nexthop have the DST_NOCACHE bit clear.
1483 * However, if we are unsuccessful at storing this
1484 * route into the cache we really need to set it.
1486 if (!rt->rt_gateway)
1487 rt->rt_gateway = daddr;
1488 rt_add_uncached_list(rt);
1491 rt_add_uncached_list(rt);
1493 #ifdef CONFIG_IP_ROUTE_CLASSID
1494 #ifdef CONFIG_IP_MULTIPLE_TABLES
1495 set_class_tag(rt, res->tclassid);
1497 set_class_tag(rt, itag);
1501 struct rtable *rt_dst_alloc(struct net_device *dev,
1502 unsigned int flags, u16 type,
1503 bool nopolicy, bool noxfrm, bool will_cache)
1507 rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1508 (will_cache ? 0 : DST_HOST) |
1509 (nopolicy ? DST_NOPOLICY : 0) |
1510 (noxfrm ? DST_NOXFRM : 0));
1513 rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1514 rt->rt_flags = flags;
1516 rt->rt_is_input = 0;
1520 rt->rt_uses_gateway = 0;
1521 rt->rt_table_id = 0;
1522 INIT_LIST_HEAD(&rt->rt_uncached);
1524 rt->dst.output = ip_output;
1525 if (flags & RTCF_LOCAL)
1526 rt->dst.input = ip_local_deliver;
1531 EXPORT_SYMBOL(rt_dst_alloc);
1533 /* called in rcu_read_lock() section */
1534 int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1535 u8 tos, struct net_device *dev,
1536 struct in_device *in_dev, u32 *itag)
1540 /* Primary sanity checks. */
1544 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1545 skb->protocol != htons(ETH_P_IP))
1548 if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
1551 if (ipv4_is_zeronet(saddr)) {
1552 if (!ipv4_is_local_multicast(daddr))
1555 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1563 /* called in rcu_read_lock() section */
1564 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1565 u8 tos, struct net_device *dev, int our)
1567 struct in_device *in_dev = __in_dev_get_rcu(dev);
1568 unsigned int flags = RTCF_MULTICAST;
1573 err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag);
1578 flags |= RTCF_LOCAL;
1580 rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
1581 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
1585 #ifdef CONFIG_IP_ROUTE_CLASSID
1586 rth->dst.tclassid = itag;
1588 rth->dst.output = ip_rt_bug;
1589 rth->rt_is_input= 1;
1591 #ifdef CONFIG_IP_MROUTE
1592 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1593 rth->dst.input = ip_mr_input;
1595 RT_CACHE_STAT_INC(in_slow_mc);
1597 skb_dst_set(skb, &rth->dst);
1602 static void ip_handle_martian_source(struct net_device *dev,
1603 struct in_device *in_dev,
1604 struct sk_buff *skb,
1608 RT_CACHE_STAT_INC(in_martian_src);
1609 #ifdef CONFIG_IP_ROUTE_VERBOSE
1610 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1612 * RFC1812 recommendation, if source is martian,
1613 * the only hint is MAC header.
1615 pr_warn("martian source %pI4 from %pI4, on dev %s\n",
1616 &daddr, &saddr, dev->name);
1617 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1618 print_hex_dump(KERN_WARNING, "ll header: ",
1619 DUMP_PREFIX_OFFSET, 16, 1,
1620 skb_mac_header(skb),
1621 dev->hard_header_len, true);
1627 static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
1629 struct fnhe_hash_bucket *hash;
1630 struct fib_nh_exception *fnhe, __rcu **fnhe_p;
1631 u32 hval = fnhe_hashfun(daddr);
1633 spin_lock_bh(&fnhe_lock);
1635 hash = rcu_dereference_protected(nh->nh_exceptions,
1636 lockdep_is_held(&fnhe_lock));
1639 fnhe_p = &hash->chain;
1640 fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
1642 if (fnhe->fnhe_daddr == daddr) {
1643 rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
1644 fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
1645 fnhe_flush_routes(fnhe);
1646 kfree_rcu(fnhe, rcu);
1649 fnhe_p = &fnhe->fnhe_next;
1650 fnhe = rcu_dereference_protected(fnhe->fnhe_next,
1651 lockdep_is_held(&fnhe_lock));
1654 spin_unlock_bh(&fnhe_lock);
1657 static void set_lwt_redirect(struct rtable *rth)
1659 if (lwtunnel_output_redirect(rth->dst.lwtstate)) {
1660 rth->dst.lwtstate->orig_output = rth->dst.output;
1661 rth->dst.output = lwtunnel_output;
1664 if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
1665 rth->dst.lwtstate->orig_input = rth->dst.input;
1666 rth->dst.input = lwtunnel_input;
1670 /* called in rcu_read_lock() section */
1671 static int __mkroute_input(struct sk_buff *skb,
1672 const struct fib_result *res,
1673 struct in_device *in_dev,
1674 __be32 daddr, __be32 saddr, u32 tos)
1676 struct fib_nh_exception *fnhe;
1679 struct in_device *out_dev;
1683 /* get a working reference to the output device */
1684 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
1686 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
1690 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1691 in_dev->dev, in_dev, &itag);
1693 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1699 do_cache = res->fi && !itag;
1700 if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
1701 skb->protocol == htons(ETH_P_IP) &&
1702 (IN_DEV_SHARED_MEDIA(out_dev) ||
1703 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
1704 IPCB(skb)->flags |= IPSKB_DOREDIRECT;
1706 if (skb->protocol != htons(ETH_P_IP)) {
1707 /* Not IP (i.e. ARP). Do not create route, if it is
1708 * invalid for proxy arp. DNAT routes are always valid.
1710 * Proxy arp feature have been extended to allow, ARP
1711 * replies back to the same interface, to support
1712 * Private VLAN switch technologies. See arp.c.
1714 if (out_dev == in_dev &&
1715 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1721 fnhe = find_exception(&FIB_RES_NH(*res), daddr);
1724 rth = rcu_dereference(fnhe->fnhe_rth_input);
1725 if (rth && rth->dst.expires &&
1726 time_after(jiffies, rth->dst.expires)) {
1727 ip_del_fnhe(&FIB_RES_NH(*res), daddr);
1734 rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
1737 if (rt_cache_valid(rth)) {
1738 skb_dst_set_noref(skb, &rth->dst);
1743 rth = rt_dst_alloc(out_dev->dev, 0, res->type,
1744 IN_DEV_CONF_GET(in_dev, NOPOLICY),
1745 IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
1751 rth->rt_is_input = 1;
1753 rth->rt_table_id = res->table->tb_id;
1754 RT_CACHE_STAT_INC(in_slow_tot);
1756 rth->dst.input = ip_forward;
1758 rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag,
1760 set_lwt_redirect(rth);
1761 skb_dst_set(skb, &rth->dst);
1768 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1769 /* To make ICMP packets follow the right flow, the multipath hash is
1770 * calculated from the inner IP addresses.
1772 static void ip_multipath_l3_keys(const struct sk_buff *skb,
1773 struct flow_keys *hash_keys)
1775 const struct iphdr *outer_iph = ip_hdr(skb);
1776 const struct iphdr *inner_iph;
1777 const struct icmphdr *icmph;
1778 struct iphdr _inner_iph;
1779 struct icmphdr _icmph;
1781 hash_keys->addrs.v4addrs.src = outer_iph->saddr;
1782 hash_keys->addrs.v4addrs.dst = outer_iph->daddr;
1783 if (likely(outer_iph->protocol != IPPROTO_ICMP))
1786 if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0))
1789 icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph),
1794 if (icmph->type != ICMP_DEST_UNREACH &&
1795 icmph->type != ICMP_REDIRECT &&
1796 icmph->type != ICMP_TIME_EXCEEDED &&
1797 icmph->type != ICMP_PARAMETERPROB)
1800 inner_iph = skb_header_pointer(skb,
1801 outer_iph->ihl * 4 + sizeof(_icmph),
1802 sizeof(_inner_iph), &_inner_iph);
1805 hash_keys->addrs.v4addrs.src = inner_iph->saddr;
1806 hash_keys->addrs.v4addrs.dst = inner_iph->daddr;
1809 /* if skb is set it will be used and fl4 can be NULL */
1810 int fib_multipath_hash(const struct fib_info *fi, const struct flowi4 *fl4,
1811 const struct sk_buff *skb)
1813 struct net *net = fi->fib_net;
1814 struct flow_keys hash_keys;
1817 switch (net->ipv4.sysctl_fib_multipath_hash_policy) {
1819 memset(&hash_keys, 0, sizeof(hash_keys));
1820 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1822 ip_multipath_l3_keys(skb, &hash_keys);
1824 hash_keys.addrs.v4addrs.src = fl4->saddr;
1825 hash_keys.addrs.v4addrs.dst = fl4->daddr;
1829 /* skb is currently provided only when forwarding */
1831 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
1832 struct flow_keys keys;
1834 /* short-circuit if we already have L4 hash present */
1836 return skb_get_hash_raw(skb) >> 1;
1837 memset(&hash_keys, 0, sizeof(hash_keys));
1838 skb_flow_dissect_flow_keys(skb, &keys, flag);
1840 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1841 hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
1842 hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
1843 hash_keys.ports.src = keys.ports.src;
1844 hash_keys.ports.dst = keys.ports.dst;
1845 hash_keys.basic.ip_proto = keys.basic.ip_proto;
1847 memset(&hash_keys, 0, sizeof(hash_keys));
1848 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1849 hash_keys.addrs.v4addrs.src = fl4->saddr;
1850 hash_keys.addrs.v4addrs.dst = fl4->daddr;
1851 hash_keys.ports.src = fl4->fl4_sport;
1852 hash_keys.ports.dst = fl4->fl4_dport;
1853 hash_keys.basic.ip_proto = fl4->flowi4_proto;
1857 mhash = flow_hash_from_keys(&hash_keys);
1861 EXPORT_SYMBOL_GPL(fib_multipath_hash);
1862 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
1864 static int ip_mkroute_input(struct sk_buff *skb,
1865 struct fib_result *res,
1866 struct in_device *in_dev,
1867 __be32 daddr, __be32 saddr, u32 tos)
1869 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1870 if (res->fi && res->fi->fib_nhs > 1) {
1871 int h = fib_multipath_hash(res->fi, NULL, skb);
1873 fib_select_multipath(res, h);
1877 /* create a routing cache entry */
1878 return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
1882 * NOTE. We drop all the packets that has local source
1883 * addresses, because every properly looped back packet
1884 * must have correct destination already attached by output routine.
1886 * Such approach solves two big problems:
1887 * 1. Not simplex devices are handled properly.
1888 * 2. IP spoofing attempts are filtered with 100% of guarantee.
1889 * called with rcu_read_lock()
1892 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1893 u8 tos, struct net_device *dev,
1894 struct fib_result *res)
1896 struct in_device *in_dev = __in_dev_get_rcu(dev);
1897 struct ip_tunnel_info *tun_info;
1899 unsigned int flags = 0;
1903 struct net *net = dev_net(dev);
1906 /* IP on this device is disabled. */
1911 /* Check for the most weird martians, which can be not detected
1915 tun_info = skb_tunnel_info(skb);
1916 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
1917 fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
1919 fl4.flowi4_tun_key.tun_id = 0;
1922 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
1923 goto martian_source;
1927 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
1930 /* Accept zero addresses only to limited broadcast;
1931 * I even do not know to fix it or not. Waiting for complains :-)
1933 if (ipv4_is_zeronet(saddr))
1934 goto martian_source;
1936 if (ipv4_is_zeronet(daddr))
1937 goto martian_destination;
1939 /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
1940 * and call it once if daddr or/and saddr are loopback addresses
1942 if (ipv4_is_loopback(daddr)) {
1943 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
1944 goto martian_destination;
1945 } else if (ipv4_is_loopback(saddr)) {
1946 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
1947 goto martian_source;
1951 * Now we are ready to route packet.
1954 fl4.flowi4_iif = dev->ifindex;
1955 fl4.flowi4_mark = skb->mark;
1956 fl4.flowi4_tos = tos;
1957 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
1958 fl4.flowi4_flags = 0;
1961 fl4.flowi4_uid = sock_net_uid(net, NULL);
1962 err = fib_lookup(net, &fl4, res, 0);
1964 if (!IN_DEV_FORWARD(in_dev))
1965 err = -EHOSTUNREACH;
1969 if (res->type == RTN_BROADCAST)
1972 if (res->type == RTN_LOCAL) {
1973 err = fib_validate_source(skb, saddr, daddr, tos,
1974 0, dev, in_dev, &itag);
1976 goto martian_source;
1980 if (!IN_DEV_FORWARD(in_dev)) {
1981 err = -EHOSTUNREACH;
1984 if (res->type != RTN_UNICAST)
1985 goto martian_destination;
1987 err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos);
1991 if (skb->protocol != htons(ETH_P_IP))
1994 if (!ipv4_is_zeronet(saddr)) {
1995 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1998 goto martian_source;
2000 flags |= RTCF_BROADCAST;
2001 res->type = RTN_BROADCAST;
2002 RT_CACHE_STAT_INC(in_brd);
2008 rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
2009 if (rt_cache_valid(rth)) {
2010 skb_dst_set_noref(skb, &rth->dst);
2018 rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev,
2019 flags | RTCF_LOCAL, res->type,
2020 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
2024 rth->dst.output= ip_rt_bug;
2025 #ifdef CONFIG_IP_ROUTE_CLASSID
2026 rth->dst.tclassid = itag;
2028 rth->rt_is_input = 1;
2030 rth->rt_table_id = res->table->tb_id;
2032 RT_CACHE_STAT_INC(in_slow_tot);
2033 if (res->type == RTN_UNREACHABLE) {
2034 rth->dst.input= ip_error;
2035 rth->dst.error= -err;
2036 rth->rt_flags &= ~RTCF_LOCAL;
2040 struct fib_nh *nh = &FIB_RES_NH(*res);
2042 rth->dst.lwtstate = lwtstate_get(nh->nh_lwtstate);
2043 if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
2044 WARN_ON(rth->dst.input == lwtunnel_input);
2045 rth->dst.lwtstate->orig_input = rth->dst.input;
2046 rth->dst.input = lwtunnel_input;
2049 if (unlikely(!rt_cache_route(nh, rth)))
2050 rt_add_uncached_list(rth);
2052 skb_dst_set(skb, &rth->dst);
2057 RT_CACHE_STAT_INC(in_no_route);
2058 res->type = RTN_UNREACHABLE;
2064 * Do not cache martian addresses: they should be logged (RFC1812)
2066 martian_destination:
2067 RT_CACHE_STAT_INC(in_martian_dst);
2068 #ifdef CONFIG_IP_ROUTE_VERBOSE
2069 if (IN_DEV_LOG_MARTIANS(in_dev))
2070 net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
2071 &daddr, &saddr, dev->name);
2083 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2087 int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2088 u8 tos, struct net_device *dev)
2090 struct fib_result res;
2093 tos &= IPTOS_RT_MASK;
2095 err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res);
2100 EXPORT_SYMBOL(ip_route_input_noref);
2102 /* called with rcu_read_lock held */
2103 int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2104 u8 tos, struct net_device *dev, struct fib_result *res)
2106 /* Multicast recognition logic is moved from route cache to here.
2107 The problem was that too many Ethernet cards have broken/missing
2108 hardware multicast filters :-( As result the host on multicasting
2109 network acquires a lot of useless route cache entries, sort of
2110 SDR messages from all the world. Now we try to get rid of them.
2111 Really, provided software IP multicast filter is organized
2112 reasonably (at least, hashed), it does not result in a slowdown
2113 comparing with route cache reject entries.
2114 Note, that multicast routers are not affected, because
2115 route cache entry is created eventually.
2117 if (ipv4_is_multicast(daddr)) {
2118 struct in_device *in_dev = __in_dev_get_rcu(dev);
2123 our = ip_check_mc_rcu(in_dev, daddr, saddr,
2124 ip_hdr(skb)->protocol);
2126 /* check l3 master if no match yet */
2127 if ((!in_dev || !our) && netif_is_l3_slave(dev)) {
2128 struct in_device *l3_in_dev;
2130 l3_in_dev = __in_dev_get_rcu(skb->dev);
2132 our = ip_check_mc_rcu(l3_in_dev, daddr, saddr,
2133 ip_hdr(skb)->protocol);
2137 #ifdef CONFIG_IP_MROUTE
2139 (!ipv4_is_local_multicast(daddr) &&
2140 IN_DEV_MFORWARD(in_dev))
2143 err = ip_route_input_mc(skb, daddr, saddr,
2149 return ip_route_input_slow(skb, daddr, saddr, tos, dev, res);
2152 /* called with rcu_read_lock() */
2153 static struct rtable *__mkroute_output(const struct fib_result *res,
2154 const struct flowi4 *fl4, int orig_oif,
2155 struct net_device *dev_out,
2158 struct fib_info *fi = res->fi;
2159 struct fib_nh_exception *fnhe;
2160 struct in_device *in_dev;
2161 u16 type = res->type;
2165 in_dev = __in_dev_get_rcu(dev_out);
2167 return ERR_PTR(-EINVAL);
2169 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
2170 if (ipv4_is_loopback(fl4->saddr) &&
2171 !(dev_out->flags & IFF_LOOPBACK) &&
2172 !netif_is_l3_master(dev_out))
2173 return ERR_PTR(-EINVAL);
2175 if (ipv4_is_lbcast(fl4->daddr))
2176 type = RTN_BROADCAST;
2177 else if (ipv4_is_multicast(fl4->daddr))
2178 type = RTN_MULTICAST;
2179 else if (ipv4_is_zeronet(fl4->daddr))
2180 return ERR_PTR(-EINVAL);
2182 if (dev_out->flags & IFF_LOOPBACK)
2183 flags |= RTCF_LOCAL;
2186 if (type == RTN_BROADCAST) {
2187 flags |= RTCF_BROADCAST | RTCF_LOCAL;
2189 } else if (type == RTN_MULTICAST) {
2190 flags |= RTCF_MULTICAST | RTCF_LOCAL;
2191 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2193 flags &= ~RTCF_LOCAL;
2196 /* If multicast route do not exist use
2197 * default one, but do not gateway in this case.
2200 if (fi && res->prefixlen < 4)
2202 } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
2203 (orig_oif != dev_out->ifindex)) {
2204 /* For local routes that require a particular output interface
2205 * we do not want to cache the result. Caching the result
2206 * causes incorrect behaviour when there are multiple source
2207 * addresses on the interface, the end result being that if the
2208 * intended recipient is waiting on that interface for the
2209 * packet he won't receive it because it will be delivered on
2210 * the loopback interface and the IP_PKTINFO ipi_ifindex will
2211 * be set to the loopback interface as well.
2217 do_cache &= fi != NULL;
2219 struct rtable __rcu **prth;
2220 struct fib_nh *nh = &FIB_RES_NH(*res);
2222 fnhe = find_exception(nh, fl4->daddr);
2224 prth = &fnhe->fnhe_rth_output;
2225 rth = rcu_dereference(*prth);
2226 if (rth && rth->dst.expires &&
2227 time_after(jiffies, rth->dst.expires)) {
2228 ip_del_fnhe(nh, fl4->daddr);
2235 if (unlikely(fl4->flowi4_flags &
2236 FLOWI_FLAG_KNOWN_NH &&
2238 nh->nh_scope == RT_SCOPE_LINK))) {
2242 prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
2243 rth = rcu_dereference(*prth);
2246 if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst))
2251 rth = rt_dst_alloc(dev_out, flags, type,
2252 IN_DEV_CONF_GET(in_dev, NOPOLICY),
2253 IN_DEV_CONF_GET(in_dev, NOXFRM),
2256 return ERR_PTR(-ENOBUFS);
2258 rth->rt_iif = orig_oif;
2260 rth->rt_table_id = res->table->tb_id;
2262 RT_CACHE_STAT_INC(out_slow_tot);
2264 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2265 if (flags & RTCF_LOCAL &&
2266 !(dev_out->flags & IFF_LOOPBACK)) {
2267 rth->dst.output = ip_mc_output;
2268 RT_CACHE_STAT_INC(out_slow_mc);
2270 #ifdef CONFIG_IP_MROUTE
2271 if (type == RTN_MULTICAST) {
2272 if (IN_DEV_MFORWARD(in_dev) &&
2273 !ipv4_is_local_multicast(fl4->daddr)) {
2274 rth->dst.input = ip_mr_input;
2275 rth->dst.output = ip_mc_output;
2281 rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0, do_cache);
2282 set_lwt_redirect(rth);
2288 * Major route resolver routine.
2291 struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
2292 const struct sk_buff *skb)
2294 __u8 tos = RT_FL_TOS(fl4);
2295 struct fib_result res;
2302 fl4->flowi4_iif = LOOPBACK_IFINDEX;
2303 fl4->flowi4_tos = tos & IPTOS_RT_MASK;
2304 fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
2305 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
2308 rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb);
2313 EXPORT_SYMBOL_GPL(ip_route_output_key_hash);
2315 struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
2316 struct fib_result *res,
2317 const struct sk_buff *skb)
2319 struct net_device *dev_out = NULL;
2320 int orig_oif = fl4->flowi4_oif;
2321 unsigned int flags = 0;
2323 int err = -ENETUNREACH;
2326 rth = ERR_PTR(-EINVAL);
2327 if (ipv4_is_multicast(fl4->saddr) ||
2328 ipv4_is_lbcast(fl4->saddr) ||
2329 ipv4_is_zeronet(fl4->saddr))
2332 /* I removed check for oif == dev_out->oif here.
2333 It was wrong for two reasons:
2334 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2335 is assigned to multiple interfaces.
2336 2. Moreover, we are allowed to send packets with saddr
2337 of another iface. --ANK
2340 if (fl4->flowi4_oif == 0 &&
2341 (ipv4_is_multicast(fl4->daddr) ||
2342 ipv4_is_lbcast(fl4->daddr))) {
2343 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2344 dev_out = __ip_dev_find(net, fl4->saddr, false);
2348 /* Special hack: user can direct multicasts
2349 and limited broadcast via necessary interface
2350 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2351 This hack is not just for fun, it allows
2352 vic,vat and friends to work.
2353 They bind socket to loopback, set ttl to zero
2354 and expect that it will work.
2355 From the viewpoint of routing cache they are broken,
2356 because we are not allowed to build multicast path
2357 with loopback source addr (look, routing cache
2358 cannot know, that ttl is zero, so that packet
2359 will not leave this host and route is valid).
2360 Luckily, this hack is good workaround.
2363 fl4->flowi4_oif = dev_out->ifindex;
2367 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2368 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2369 if (!__ip_dev_find(net, fl4->saddr, false))
2375 if (fl4->flowi4_oif) {
2376 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2377 rth = ERR_PTR(-ENODEV);
2381 /* RACE: Check return value of inet_select_addr instead. */
2382 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2383 rth = ERR_PTR(-ENETUNREACH);
2386 if (ipv4_is_local_multicast(fl4->daddr) ||
2387 ipv4_is_lbcast(fl4->daddr) ||
2388 fl4->flowi4_proto == IPPROTO_IGMP) {
2390 fl4->saddr = inet_select_addr(dev_out, 0,
2395 if (ipv4_is_multicast(fl4->daddr))
2396 fl4->saddr = inet_select_addr(dev_out, 0,
2398 else if (!fl4->daddr)
2399 fl4->saddr = inet_select_addr(dev_out, 0,
2405 fl4->daddr = fl4->saddr;
2407 fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2408 dev_out = net->loopback_dev;
2409 fl4->flowi4_oif = LOOPBACK_IFINDEX;
2410 res->type = RTN_LOCAL;
2411 flags |= RTCF_LOCAL;
2415 err = fib_lookup(net, fl4, res, 0);
2419 if (fl4->flowi4_oif &&
2420 (ipv4_is_multicast(fl4->daddr) ||
2421 !netif_index_is_l3_master(net, fl4->flowi4_oif))) {
2422 /* Apparently, routing tables are wrong. Assume,
2423 that the destination is on link.
2426 Because we are allowed to send to iface
2427 even if it has NO routes and NO assigned
2428 addresses. When oif is specified, routing
2429 tables are looked up with only one purpose:
2430 to catch if destination is gatewayed, rather than
2431 direct. Moreover, if MSG_DONTROUTE is set,
2432 we send packet, ignoring both routing tables
2433 and ifaddr state. --ANK
2436 We could make it even if oif is unknown,
2437 likely IPv6, but we do not.
2440 if (fl4->saddr == 0)
2441 fl4->saddr = inet_select_addr(dev_out, 0,
2443 res->type = RTN_UNICAST;
2450 if (res->type == RTN_LOCAL) {
2452 if (res->fi->fib_prefsrc)
2453 fl4->saddr = res->fi->fib_prefsrc;
2455 fl4->saddr = fl4->daddr;
2458 /* L3 master device is the loopback for that domain */
2459 dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) ? :
2462 /* make sure orig_oif points to fib result device even
2463 * though packet rx/tx happens over loopback or l3mdev
2465 orig_oif = FIB_RES_OIF(*res);
2467 fl4->flowi4_oif = dev_out->ifindex;
2468 flags |= RTCF_LOCAL;
2472 fib_select_path(net, res, fl4, skb);
2474 dev_out = FIB_RES_DEV(*res);
2475 fl4->flowi4_oif = dev_out->ifindex;
2479 rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags);
2485 static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2490 static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
2492 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2494 return mtu ? : dst->dev->mtu;
2497 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
2498 struct sk_buff *skb, u32 mtu)
2502 static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
2503 struct sk_buff *skb)
2507 static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
2513 static struct dst_ops ipv4_dst_blackhole_ops = {
2515 .check = ipv4_blackhole_dst_check,
2516 .mtu = ipv4_blackhole_mtu,
2517 .default_advmss = ipv4_default_advmss,
2518 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
2519 .redirect = ipv4_rt_blackhole_redirect,
2520 .cow_metrics = ipv4_rt_blackhole_cow_metrics,
2521 .neigh_lookup = ipv4_neigh_lookup,
2524 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2526 struct rtable *ort = (struct rtable *) dst_orig;
2529 rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_DEAD, 0);
2531 struct dst_entry *new = &rt->dst;
2534 new->input = dst_discard;
2535 new->output = dst_discard_out;
2537 new->dev = net->loopback_dev;
2541 rt->rt_is_input = ort->rt_is_input;
2542 rt->rt_iif = ort->rt_iif;
2543 rt->rt_pmtu = ort->rt_pmtu;
2545 rt->rt_genid = rt_genid_ipv4(net);
2546 rt->rt_flags = ort->rt_flags;
2547 rt->rt_type = ort->rt_type;
2548 rt->rt_gateway = ort->rt_gateway;
2549 rt->rt_uses_gateway = ort->rt_uses_gateway;
2551 INIT_LIST_HEAD(&rt->rt_uncached);
2554 dst_release(dst_orig);
2556 return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2559 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2560 const struct sock *sk)
2562 struct rtable *rt = __ip_route_output_key(net, flp4);
2567 if (flp4->flowi4_proto)
2568 rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
2569 flowi4_to_flowi(flp4),
2574 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2576 /* called with rcu_read_lock held */
2577 static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id,
2578 struct flowi4 *fl4, struct sk_buff *skb, u32 portid,
2581 struct rtable *rt = skb_rtable(skb);
2583 struct nlmsghdr *nlh;
2584 unsigned long expires = 0;
2586 u32 metrics[RTAX_MAX];
2588 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), 0);
2592 r = nlmsg_data(nlh);
2593 r->rtm_family = AF_INET;
2594 r->rtm_dst_len = 32;
2596 r->rtm_tos = fl4->flowi4_tos;
2597 r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT;
2598 if (nla_put_u32(skb, RTA_TABLE, table_id))
2599 goto nla_put_failure;
2600 r->rtm_type = rt->rt_type;
2601 r->rtm_scope = RT_SCOPE_UNIVERSE;
2602 r->rtm_protocol = RTPROT_UNSPEC;
2603 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2604 if (rt->rt_flags & RTCF_NOTIFY)
2605 r->rtm_flags |= RTM_F_NOTIFY;
2606 if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
2607 r->rtm_flags |= RTCF_DOREDIRECT;
2609 if (nla_put_in_addr(skb, RTA_DST, dst))
2610 goto nla_put_failure;
2612 r->rtm_src_len = 32;
2613 if (nla_put_in_addr(skb, RTA_SRC, src))
2614 goto nla_put_failure;
2617 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2618 goto nla_put_failure;
2619 #ifdef CONFIG_IP_ROUTE_CLASSID
2620 if (rt->dst.tclassid &&
2621 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2622 goto nla_put_failure;
2624 if (!rt_is_input_route(rt) &&
2625 fl4->saddr != src) {
2626 if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
2627 goto nla_put_failure;
2629 if (rt->rt_uses_gateway &&
2630 nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gateway))
2631 goto nla_put_failure;
2633 expires = rt->dst.expires;
2635 unsigned long now = jiffies;
2637 if (time_before(now, expires))
2643 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2644 if (rt->rt_pmtu && expires)
2645 metrics[RTAX_MTU - 1] = rt->rt_pmtu;
2646 if (rtnetlink_put_metrics(skb, metrics) < 0)
2647 goto nla_put_failure;
2649 if (fl4->flowi4_mark &&
2650 nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
2651 goto nla_put_failure;
2653 if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
2654 nla_put_u32(skb, RTA_UID,
2655 from_kuid_munged(current_user_ns(), fl4->flowi4_uid)))
2656 goto nla_put_failure;
2658 error = rt->dst.error;
2660 if (rt_is_input_route(rt)) {
2661 #ifdef CONFIG_IP_MROUTE
2662 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
2663 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2664 int err = ipmr_get_route(net, skb,
2665 fl4->saddr, fl4->daddr,
2671 goto nla_put_failure;
2675 if (nla_put_u32(skb, RTA_IIF, skb->dev->ifindex))
2676 goto nla_put_failure;
2679 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
2680 goto nla_put_failure;
2682 nlmsg_end(skb, nlh);
2686 nlmsg_cancel(skb, nlh);
2690 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2691 struct netlink_ext_ack *extack)
2693 struct net *net = sock_net(in_skb->sk);
2695 struct nlattr *tb[RTA_MAX+1];
2696 struct fib_result res = {};
2697 struct rtable *rt = NULL;
2704 struct sk_buff *skb;
2705 u32 table_id = RT_TABLE_MAIN;
2708 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy,
2713 rtm = nlmsg_data(nlh);
2715 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2721 /* Reserve room for dummy headers, this skb can pass
2722 through good chunk of routing engine.
2724 skb_reset_mac_header(skb);
2725 skb_reset_network_header(skb);
2727 src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
2728 dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
2729 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
2730 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
2732 uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
2734 uid = (iif ? INVALID_UID : current_uid());
2736 /* Bugfix: need to give ip_route_input enough of an IP header to
2739 ip_hdr(skb)->protocol = IPPROTO_UDP;
2740 ip_hdr(skb)->saddr = src;
2741 ip_hdr(skb)->daddr = dst;
2743 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2745 memset(&fl4, 0, sizeof(fl4));
2748 fl4.flowi4_tos = rtm->rtm_tos;
2749 fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
2750 fl4.flowi4_mark = mark;
2751 fl4.flowi4_uid = uid;
2756 struct net_device *dev;
2758 dev = dev_get_by_index_rcu(net, iif);
2764 skb->protocol = htons(ETH_P_IP);
2767 err = ip_route_input_rcu(skb, dst, src, rtm->rtm_tos,
2770 rt = skb_rtable(skb);
2771 if (err == 0 && rt->dst.error)
2772 err = -rt->dst.error;
2774 fl4.flowi4_iif = LOOPBACK_IFINDEX;
2775 rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
2780 skb_dst_set(skb, &rt->dst);
2786 if (rtm->rtm_flags & RTM_F_NOTIFY)
2787 rt->rt_flags |= RTCF_NOTIFY;
2789 if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE)
2790 table_id = rt->rt_table_id;
2792 if (rtm->rtm_flags & RTM_F_FIB_MATCH) {
2794 err = fib_props[res.type].error;
2796 err = -EHOSTUNREACH;
2799 err = fib_dump_info(skb, NETLINK_CB(in_skb).portid,
2800 nlh->nlmsg_seq, RTM_NEWROUTE, table_id,
2801 rt->rt_type, res.prefix, res.prefixlen,
2802 fl4.flowi4_tos, res.fi, 0);
2804 err = rt_fill_info(net, dst, src, table_id, &fl4, skb,
2805 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq);
2812 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2822 void ip_rt_multicast_event(struct in_device *in_dev)
2824 rt_cache_flush(dev_net(in_dev->dev));
2827 #ifdef CONFIG_SYSCTL
2828 static int ip_rt_gc_interval __read_mostly = 60 * HZ;
2829 static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
2830 static int ip_rt_gc_elasticity __read_mostly = 8;
2831 static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU;
2833 static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
2834 void __user *buffer,
2835 size_t *lenp, loff_t *ppos)
2837 struct net *net = (struct net *)__ctl->extra1;
2840 rt_cache_flush(net);
2841 fnhe_genid_bump(net);
2848 static struct ctl_table ipv4_route_table[] = {
2850 .procname = "gc_thresh",
2851 .data = &ipv4_dst_ops.gc_thresh,
2852 .maxlen = sizeof(int),
2854 .proc_handler = proc_dointvec,
2857 .procname = "max_size",
2858 .data = &ip_rt_max_size,
2859 .maxlen = sizeof(int),
2861 .proc_handler = proc_dointvec,
2864 /* Deprecated. Use gc_min_interval_ms */
2866 .procname = "gc_min_interval",
2867 .data = &ip_rt_gc_min_interval,
2868 .maxlen = sizeof(int),
2870 .proc_handler = proc_dointvec_jiffies,
2873 .procname = "gc_min_interval_ms",
2874 .data = &ip_rt_gc_min_interval,
2875 .maxlen = sizeof(int),
2877 .proc_handler = proc_dointvec_ms_jiffies,
2880 .procname = "gc_timeout",
2881 .data = &ip_rt_gc_timeout,
2882 .maxlen = sizeof(int),
2884 .proc_handler = proc_dointvec_jiffies,
2887 .procname = "gc_interval",
2888 .data = &ip_rt_gc_interval,
2889 .maxlen = sizeof(int),
2891 .proc_handler = proc_dointvec_jiffies,
2894 .procname = "redirect_load",
2895 .data = &ip_rt_redirect_load,
2896 .maxlen = sizeof(int),
2898 .proc_handler = proc_dointvec,
2901 .procname = "redirect_number",
2902 .data = &ip_rt_redirect_number,
2903 .maxlen = sizeof(int),
2905 .proc_handler = proc_dointvec,
2908 .procname = "redirect_silence",
2909 .data = &ip_rt_redirect_silence,
2910 .maxlen = sizeof(int),
2912 .proc_handler = proc_dointvec,
2915 .procname = "error_cost",
2916 .data = &ip_rt_error_cost,
2917 .maxlen = sizeof(int),
2919 .proc_handler = proc_dointvec,
2922 .procname = "error_burst",
2923 .data = &ip_rt_error_burst,
2924 .maxlen = sizeof(int),
2926 .proc_handler = proc_dointvec,
2929 .procname = "gc_elasticity",
2930 .data = &ip_rt_gc_elasticity,
2931 .maxlen = sizeof(int),
2933 .proc_handler = proc_dointvec,
2936 .procname = "mtu_expires",
2937 .data = &ip_rt_mtu_expires,
2938 .maxlen = sizeof(int),
2940 .proc_handler = proc_dointvec_jiffies,
2943 .procname = "min_pmtu",
2944 .data = &ip_rt_min_pmtu,
2945 .maxlen = sizeof(int),
2947 .proc_handler = proc_dointvec_minmax,
2948 .extra1 = &ip_min_valid_pmtu,
2951 .procname = "min_adv_mss",
2952 .data = &ip_rt_min_advmss,
2953 .maxlen = sizeof(int),
2955 .proc_handler = proc_dointvec,
2960 static struct ctl_table ipv4_route_flush_table[] = {
2962 .procname = "flush",
2963 .maxlen = sizeof(int),
2965 .proc_handler = ipv4_sysctl_rtcache_flush,
2970 static __net_init int sysctl_route_net_init(struct net *net)
2972 struct ctl_table *tbl;
2974 tbl = ipv4_route_flush_table;
2975 if (!net_eq(net, &init_net)) {
2976 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
2980 /* Don't export sysctls to unprivileged users */
2981 if (net->user_ns != &init_user_ns)
2982 tbl[0].procname = NULL;
2984 tbl[0].extra1 = net;
2986 net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
2987 if (!net->ipv4.route_hdr)
2992 if (tbl != ipv4_route_flush_table)
2998 static __net_exit void sysctl_route_net_exit(struct net *net)
3000 struct ctl_table *tbl;
3002 tbl = net->ipv4.route_hdr->ctl_table_arg;
3003 unregister_net_sysctl_table(net->ipv4.route_hdr);
3004 BUG_ON(tbl == ipv4_route_flush_table);
3008 static __net_initdata struct pernet_operations sysctl_route_ops = {
3009 .init = sysctl_route_net_init,
3010 .exit = sysctl_route_net_exit,
3014 static __net_init int rt_genid_init(struct net *net)
3016 atomic_set(&net->ipv4.rt_genid, 0);
3017 atomic_set(&net->fnhe_genid, 0);
3018 atomic_set(&net->ipv4.dev_addr_genid, get_random_int());
3022 static __net_initdata struct pernet_operations rt_genid_ops = {
3023 .init = rt_genid_init,
3026 static int __net_init ipv4_inetpeer_init(struct net *net)
3028 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3032 inet_peer_base_init(bp);
3033 net->ipv4.peers = bp;
3037 static void __net_exit ipv4_inetpeer_exit(struct net *net)
3039 struct inet_peer_base *bp = net->ipv4.peers;
3041 net->ipv4.peers = NULL;
3042 inetpeer_invalidate_tree(bp);
3046 static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
3047 .init = ipv4_inetpeer_init,
3048 .exit = ipv4_inetpeer_exit,
3051 #ifdef CONFIG_IP_ROUTE_CLASSID
3052 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3053 #endif /* CONFIG_IP_ROUTE_CLASSID */
3055 int __init ip_rt_init(void)
3059 ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
3061 panic("IP: failed to allocate ip_idents\n");
3063 prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
3065 ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL);
3067 panic("IP: failed to allocate ip_tstamps\n");
3069 for_each_possible_cpu(cpu) {
3070 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
3072 INIT_LIST_HEAD(&ul->head);
3073 spin_lock_init(&ul->lock);
3075 #ifdef CONFIG_IP_ROUTE_CLASSID
3076 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
3078 panic("IP: failed to allocate ip_rt_acct\n");
3081 ipv4_dst_ops.kmem_cachep =
3082 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3083 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3085 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3087 if (dst_entries_init(&ipv4_dst_ops) < 0)
3088 panic("IP: failed to allocate ipv4_dst_ops counter\n");
3090 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3091 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3093 ipv4_dst_ops.gc_thresh = ~0;
3094 ip_rt_max_size = INT_MAX;
3099 if (ip_rt_proc_init())
3100 pr_err("Unable to create route proc files\n");
3105 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL,
3106 RTNL_FLAG_DOIT_UNLOCKED);
3108 #ifdef CONFIG_SYSCTL
3109 register_pernet_subsys(&sysctl_route_ops);
3111 register_pernet_subsys(&rt_genid_ops);
3112 register_pernet_subsys(&ipv4_inetpeer_ops);
3116 #ifdef CONFIG_SYSCTL
3118 * We really need to sanitize the damn ipv4 init order, then all
3119 * this nonsense will go away.
3121 void __init ip_static_sysctl_init(void)
3123 register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);