diff options
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/icmp.c | 3 | ||||
-rw-r--r-- | net/ipv4/ipmr.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp_minisocks.c | 4 | ||||
-rw-r--r-- | net/ipv4/udp.c | 14 |
4 files changed, 11 insertions, 12 deletions
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 4f088fa1c2f2..963a89ae9c26 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -517,6 +517,9 @@ static struct rtable *icmp_route_lookup(struct net *net, struct flowi4 *fl4, if (!IS_ERR(dst)) { if (rt != rt2) return rt; + if (inet_addr_type_dev_table(net, route_lookup_dev, + fl4->daddr) == RTN_LOCAL) + return rt; } else if (PTR_ERR(dst) == -EPERM) { rt = NULL; } else { diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index c5b8ec5c0a8c..99d8faa508e5 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -122,7 +122,7 @@ static void ipmr_expire_process(struct timer_list *t); static bool ipmr_can_free_table(struct net *net) { - return !check_net(net) || !net->ipv4.mr_rules_ops; + return !check_net(net) || !net_initialized(net); } static struct mr_table *ipmr_mr_table_iter(struct net *net, diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index bb1fe1ba867a..7121d8573928 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -326,6 +326,10 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) tcptw->tw_last_oow_ack_time = 0; tcptw->tw_tx_delay = tp->tcp_tx_delay; tw->tw_txhash = sk->sk_txhash; + tw->tw_tx_queue_mapping = sk->sk_tx_queue_mapping; +#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING + tw->tw_rx_queue_mapping = sk->sk_rx_queue_mapping; +#endif #if IS_ENABLED(CONFIG_IPV6) if (tw->tw_family == PF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 6a01905d379f..e8953e88efef 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1674,7 +1674,6 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb) struct sk_buff_head *list = &sk->sk_receive_queue; int rmem, err = -ENOMEM; spinlock_t *busy = NULL; - bool becomes_readable; int size, rcvbuf; /* Immediately drop when the receive queue is full. @@ -1715,19 +1714,12 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb) */ sock_skb_set_dropcount(sk, skb); - becomes_readable = skb_queue_empty(list); __skb_queue_tail(list, skb); spin_unlock(&list->lock); - if (!sock_flag(sk, SOCK_DEAD)) { - if (becomes_readable || - sk->sk_data_ready != sock_def_readable || - READ_ONCE(sk->sk_peek_off) >= 0) - INDIRECT_CALL_1(sk->sk_data_ready, - sock_def_readable, sk); - else - sk_wake_async_rcu(sk, SOCK_WAKE_WAITD, POLL_IN); - } + if (!sock_flag(sk, SOCK_DEAD)) + INDIRECT_CALL_1(sk->sk_data_ready, sock_def_readable, sk); + busylock_release(busy); return 0; |