diff options
Diffstat (limited to 'net')
134 files changed, 1758 insertions, 1152 deletions
diff --git a/net/6lowpan/iphc.c b/net/6lowpan/iphc.c index 346b5c1a9185..c40eb04dd856 100644 --- a/net/6lowpan/iphc.c +++ b/net/6lowpan/iphc.c @@ -569,6 +569,7 @@ int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev, hdr.hop_limit, &hdr.daddr); skb_push(skb, sizeof(hdr)); + skb_reset_mac_header(skb); skb_reset_network_header(skb); skb_copy_to_linear_data(skb, &hdr, sizeof(hdr)); diff --git a/net/9p/client.c b/net/9p/client.c index 3ff26eb1ea20..ed8738c4dc09 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -931,7 +931,7 @@ static int p9_client_version(struct p9_client *c) { int err = 0; struct p9_req_t *req; - char *version; + char *version = NULL; int msize; p9_debug(P9_DEBUG_9P, ">>> TVERSION msize %d protocol %d\n", diff --git a/net/9p/protocol.c b/net/9p/protocol.c index 16d287565987..145f80518064 100644 --- a/net/9p/protocol.c +++ b/net/9p/protocol.c @@ -46,10 +46,15 @@ p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...); void p9stat_free(struct p9_wstat *stbuf) { kfree(stbuf->name); + stbuf->name = NULL; kfree(stbuf->uid); + stbuf->uid = NULL; kfree(stbuf->gid); + stbuf->gid = NULL; kfree(stbuf->muid); + stbuf->muid = NULL; kfree(stbuf->extension); + stbuf->extension = NULL; } EXPORT_SYMBOL(p9stat_free); diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index bced8c074c12..2f68ffda3715 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c @@ -185,6 +185,8 @@ static void p9_mux_poll_stop(struct p9_conn *m) spin_lock_irqsave(&p9_poll_lock, flags); list_del_init(&m->poll_pending_link); spin_unlock_irqrestore(&p9_poll_lock, flags); + + flush_work(&p9_poll_work); } /** @@ -933,7 +935,7 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args) if (err < 0) return err; - if (valid_ipaddr4(addr) < 0) + if (addr == NULL || valid_ipaddr4(addr) < 0) return -EINVAL; csocket = NULL; @@ -981,6 +983,9 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args) csocket = NULL; + if (addr == NULL) + return -EINVAL; + if (strlen(addr) >= UNIX_PATH_MAX) { pr_err("%s (%d): address too long: %s\n", __func__, task_pid_nr(current), addr); diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c index 52b4a2f993f2..f42550dd3560 100644 --- a/net/9p/trans_rdma.c +++ b/net/9p/trans_rdma.c @@ -644,6 +644,9 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args) struct ib_qp_init_attr qp_attr; struct ib_cq_init_attr cq_attr = {}; + if (addr == NULL) + return -EINVAL; + /* Parse the transport specific mount options */ err = parse_opts(args, &opts); if (err < 0) diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index 2ddeecca5b12..2a15b6aa9cdd 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c @@ -192,7 +192,7 @@ static int pack_sg_list(struct scatterlist *sg, int start, s = rest_of_page(data); if (s > count) s = count; - BUG_ON(index > limit); + BUG_ON(index >= limit); /* Make sure we don't terminate early. */ sg_unmark_end(&sg[index]); sg_set_buf(&sg[index++], data, s); @@ -237,6 +237,7 @@ pack_sg_list_p(struct scatterlist *sg, int start, int limit, s = PAGE_SIZE - data_off; if (s > count) s = count; + BUG_ON(index >= limit); /* Make sure we don't terminate early. */ sg_unmark_end(&sg[index]); sg_set_page(&sg[index++], pdata[i++], s, data_off); @@ -409,6 +410,7 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req, p9_debug(P9_DEBUG_TRANS, "virtio request\n"); if (uodata) { + __le32 sz; int n = p9_get_mapped_pages(chan, &out_pages, uodata, outlen, &offs, &need_drop); if (n < 0) @@ -419,6 +421,12 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req, memcpy(&req->tc->sdata[req->tc->size - 4], &v, 4); outlen = n; } + /* The size field of the message must include the length of the + * header and the length of the data. We didn't actually know + * the length of the data until this point so add it in now. + */ + sz = cpu_to_le32(req->tc->size + outlen); + memcpy(&req->tc->sdata[0], &sz, sizeof(sz)); } else if (uidata) { int n = p9_get_mapped_pages(chan, &in_pages, uidata, inlen, &offs, &need_drop); @@ -566,7 +574,7 @@ static int p9_virtio_probe(struct virtio_device *vdev) chan->vq = virtio_find_single_vq(vdev, req_done, "requests"); if (IS_ERR(chan->vq)) { err = PTR_ERR(chan->vq); - goto out_free_vq; + goto out_free_chan; } chan->vq->vdev->priv = chan; spin_lock_init(&chan->lock); @@ -619,6 +627,7 @@ out_free_tag: kfree(tag); out_free_vq: vdev->config->del_vqs(vdev); +out_free_chan: kfree(chan); fail: return err; @@ -646,6 +655,9 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args) int ret = -ENOENT; int found = 0; + if (devname == NULL) + return -EINVAL; + mutex_lock(&virtio_9p_lock); list_for_each_entry(chan, &virtio_chan_list, chan_list) { if (!strncmp(devname, chan->tag, chan->tag_len) && diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c index 5d2f9d4879b2..d50c3b003dc9 100644 --- a/net/batman-adv/fragmentation.c +++ b/net/batman-adv/fragmentation.c @@ -266,7 +266,7 @@ batadv_frag_merge_packets(struct hlist_head *chain) kfree(entry); packet = (struct batadv_frag_packet *)skb_out->data; - size = ntohs(packet->total_size); + size = ntohs(packet->total_size) + hdr_size; /* Make room for the rest of the fragments. */ if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) { diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index f64de569175a..e614940c4e98 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -431,8 +431,8 @@ static void hidp_del_timer(struct hidp_session *session) del_timer(&session->timer); } -static void hidp_process_report(struct hidp_session *session, - int type, const u8 *data, int len, int intr) +static void hidp_process_report(struct hidp_session *session, int type, + const u8 *data, unsigned int len, int intr) { if (len > HID_MAX_BUFFER_SIZE) len = HID_MAX_BUFFER_SIZE; @@ -775,7 +775,7 @@ static int hidp_setup_hid(struct hidp_session *session, hid->version = req->version; hid->country = req->country; - strncpy(hid->name, req->name, sizeof(req->name) - 1); + strncpy(hid->name, req->name, sizeof(hid->name)); snprintf(hid->phys, sizeof(hid->phys), "%pMR", &l2cap_pi(session->ctrl_sock->sk)->chan->src); diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index db399c1662ab..1f4a4a790df6 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -3083,9 +3083,8 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data, /* LE address type */ addr_type = le_addr_type(cp->addr.type); - hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type); - - err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type); + /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */ + err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type); if (err < 0) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, MGMT_STATUS_NOT_PAIRED, &rp, @@ -3099,8 +3098,6 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data, goto done; } - /* Abort any ongoing SMP pairing */ - smp_cancel_pairing(conn); /* Defer clearing up the connection parameters until closing to * give a chance of keeping them if a repairing happens. diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 7d5457d64212..46c3f086a261 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -392,7 +392,8 @@ static void sco_sock_cleanup_listen(struct sock *parent) */ static void sco_sock_kill(struct sock *sk) { - if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) + if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket || + sock_flag(sk, SOCK_DEAD)) return; BT_DBG("sk %pK state %d", sk, sk->sk_state); diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index 3c9689e10721..68ffdf0d3876 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -2371,30 +2371,51 @@ unlock: return ret; } -void smp_cancel_pairing(struct hci_conn *hcon) +int smp_cancel_and_remove_pairing(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 addr_type) { - struct l2cap_conn *conn = hcon->l2cap_data; + struct hci_conn *hcon; + struct l2cap_conn *conn; struct l2cap_chan *chan; struct smp_chan *smp; + int err; + + err = hci_remove_ltk(hdev, bdaddr, addr_type); + hci_remove_irk(hdev, bdaddr, addr_type); + + hcon = hci_conn_hash_lookup_le(hdev, bdaddr, addr_type); + if (!hcon) + goto done; + conn = hcon->l2cap_data; if (!conn) - return; + goto done; chan = conn->smp; if (!chan) - return; + goto done; l2cap_chan_lock(chan); smp = chan->data; if (smp) { + /* Set keys to NULL to make sure smp_failure() does not try to + * remove and free already invalidated rcu list entries. */ + smp->ltk = NULL; + smp->slave_ltk = NULL; + smp->remote_irk = NULL; + if (test_bit(SMP_FLAG_COMPLETE, &smp->flags)) smp_failure(conn, 0); else smp_failure(conn, SMP_UNSPECIFIED); + err = 0; } l2cap_chan_unlock(chan); + +done: + return err; } static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb) diff --git a/net/bluetooth/smp.h b/net/bluetooth/smp.h index ffcc70b6b199..993cbd7bcfe7 100644 --- a/net/bluetooth/smp.h +++ b/net/bluetooth/smp.h @@ -180,7 +180,8 @@ enum smp_key_pref { }; /* SMP Commands */ -void smp_cancel_pairing(struct hci_conn *hcon); +int smp_cancel_and_remove_pairing(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 addr_type); bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level, enum smp_key_pref key_pref); int smp_conn_security(struct hci_conn *hcon, __u8 sec_level); diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 3400b1e47668..50e84e634dfe 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -511,8 +511,11 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) if (br_fdb_insert(br, p, dev->dev_addr, 0)) netdev_err(dev, "failed insert local address bridge forwarding table\n"); - if (nbp_vlan_init(p)) + err = nbp_vlan_init(p); + if (err) { netdev_err(dev, "failed to initialize vlan filtering on this port\n"); + goto err6; + } spin_lock_bh(&br->lock); changed_addr = br_stp_recalculate_bridge_id(br); @@ -533,6 +536,12 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) return 0; +err6: + list_del_rcu(&p->list); + br_fdb_delete_by_port(br, p, 0, 1); + nbp_update_port_count(br); + netdev_upper_dev_unlink(dev, br->dev); + err5: dev->priv_flags &= ~IFF_BRIDGE_PORT; netdev_rx_handler_unregister(dev); diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index d80c15d028fe..270d9c9a5331 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -1261,7 +1261,14 @@ static void br_multicast_query_received(struct net_bridge *br, return; br_multicast_update_query_timer(br, query, max_delay); - br_multicast_mark_router(br, port); + + /* Based on RFC4541, section 2.1.1 IGMP Forwarding Rules, + * the arrival port for IGMP Queries where the source address + * is 0.0.0.0 should not be added to router port list. + */ + if ((saddr->proto == htons(ETH_P_IP) && saddr->u.ip4) || + saddr->proto == htons(ETH_P_IPV6)) + br_multicast_mark_router(br, port); } static int br_ip4_multicast_query(struct net_bridge *br, diff --git a/net/bridge/netfilter/ebt_arpreply.c b/net/bridge/netfilter/ebt_arpreply.c index 070cf134a22f..f2660c1b29e4 100644 --- a/net/bridge/netfilter/ebt_arpreply.c +++ b/net/bridge/netfilter/ebt_arpreply.c @@ -67,6 +67,9 @@ static int ebt_arpreply_tg_check(const struct xt_tgchk_param *par) if (e->ethproto != htons(ETH_P_ARP) || e->invflags & EBT_IPROTO) return -EINVAL; + if (ebt_invalid_target(info->target)) + return -EINVAL; + return 0; } diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 9f70c267a7a5..8b8a43fda6ca 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -404,6 +404,12 @@ ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par, watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0); if (IS_ERR(watcher)) return PTR_ERR(watcher); + + if (watcher->family != NFPROTO_BRIDGE) { + module_put(watcher->me); + return -ENOENT; + } + w->u.watcher = watcher; par->target = watcher; @@ -701,6 +707,8 @@ ebt_check_entry(struct ebt_entry *e, struct net *net, } i = 0; + memset(&mtpar, 0, sizeof(mtpar)); + memset(&tgpar, 0, sizeof(tgpar)); mtpar.net = tgpar.net = net; mtpar.table = tgpar.table = name; mtpar.entryinfo = tgpar.entryinfo = e; @@ -722,6 +730,13 @@ ebt_check_entry(struct ebt_entry *e, struct net *net, goto cleanup_watchers; } + /* Reject UNSPEC, xtables verdicts/return values are incompatible */ + if (target->family != NFPROTO_BRIDGE) { + module_put(target->me); + ret = -ENOENT; + goto cleanup_watchers; + } + t->u.target = target; if (t->u.target == &ebt_standard_target) { if (gap < sizeof(struct ebt_standard_target)) { diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c index d730a0f68f46..a0443d40d677 100644 --- a/net/caif/caif_dev.c +++ b/net/caif/caif_dev.c @@ -131,8 +131,10 @@ static void caif_flow_cb(struct sk_buff *skb) caifd = caif_get(skb->dev); WARN_ON(caifd == NULL); - if (caifd == NULL) + if (!caifd) { + rcu_read_unlock(); return; + } caifd_hold(caifd); rcu_read_unlock(); diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c index d4f5f220a8e5..28453d698d86 100644 --- a/net/ceph/pagevec.c +++ b/net/ceph/pagevec.c @@ -26,7 +26,7 @@ struct page **ceph_get_direct_page_vector(const void __user *data, while (got < num_pages) { rc = get_user_pages_unlocked(current, current->mm, (unsigned long)data + ((unsigned long)got * PAGE_SIZE), - num_pages - got, write_page, 0, pages + got); + num_pages - got, pages + got, write_page ? FOLL_WRITE : 0); if (rc < 0) break; BUG_ON(rc == 0); diff --git a/net/core/dev.c b/net/core/dev.c index 8c884cf5fbc9..4810e43501c6 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1662,6 +1662,28 @@ int call_netdevice_notifiers(unsigned long val, struct net_device *dev) } EXPORT_SYMBOL(call_netdevice_notifiers); +/** + * call_netdevice_notifiers_mtu - call all network notifier blocks + * @val: value passed unmodified to notifier function + * @dev: net_device pointer passed unmodified to notifier function + * @arg: additional u32 argument passed to the notifier function + * + * Call all network notifier blocks. Parameters and return value + * are as for raw_notifier_call_chain(). + */ +static int call_netdevice_notifiers_mtu(unsigned long val, + struct net_device *dev, u32 arg) +{ + struct netdev_notifier_info_ext info = { + .info.dev = dev, + .ext.mtu = arg, + }; + + BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0); + + return call_netdevice_notifiers_info(val, dev, &info.info); +} + #ifdef CONFIG_NET_INGRESS static struct static_key ingress_needed __read_mostly; @@ -4467,6 +4489,10 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) skb->vlan_tci = 0; skb->dev = napi->dev; skb->skb_iif = 0; + + /* eth_type_trans() assumes pkt_type is PACKET_HOST */ + skb->pkt_type = PACKET_HOST; + skb->encapsulation = 0; skb_shinfo(skb)->gso_type = 0; skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); @@ -6170,14 +6196,16 @@ int dev_set_mtu(struct net_device *dev, int new_mtu) err = __dev_set_mtu(dev, new_mtu); if (!err) { - err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev); + err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev, + orig_mtu); err = notifier_to_errno(err); if (err) { /* setting mtu back and notifying everyone again, * so that they have a chance to revert changes. */ __dev_set_mtu(dev, orig_mtu); - call_netdevice_notifiers(NETDEV_CHANGEMTU, dev); + call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev, + new_mtu); } } return err; @@ -7456,7 +7484,8 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char /* We get here if we can't use the current device name */ if (!pat) goto out; - if (dev_get_valid_name(net, dev, pat) < 0) + err = dev_get_valid_name(net, dev, pat); + if (err < 0) goto out; } @@ -7468,7 +7497,6 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char dev_close(dev); /* And unlink it from device chain */ - err = -ENODEV; unlist_netdevice(dev); synchronize_net(); diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 875555959ed0..fbb631004b43 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -6,6 +6,7 @@ #include <linux/if_vlan.h> #include <net/ip.h> #include <net/ipv6.h> +#include <net/rmnet_config.h> #include <linux/igmp.h> #include <linux/icmp.h> #include <linux/sctp.h> @@ -515,8 +516,8 @@ ip_proto_again: break; } - if (dissector_uses_key(flow_dissector, - FLOW_DISSECTOR_KEY_PORTS)) { + if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS) && + !(key_control->flags & FLOW_DIS_IS_FRAGMENT)) { key_ports = skb_flow_dissector_target(flow_dissector, FLOW_DISSECTOR_KEY_PORTS, target_container); diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 642b13ddd69c..fef2043cdb1d 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1140,6 +1140,12 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, lladdr = neigh->ha; } + /* Update confirmed timestamp for neighbour entry after we + * received ARP packet even if it doesn't change IP to MAC binding. + */ + if (new & NUD_CONNECTED) + neigh->confirmed = jiffies; + /* If entry was valid and address is not changed, do not change entry state, if new one is STALE. */ @@ -1163,15 +1169,12 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, } } - /* Update timestamps only once we know we will make a change to the + /* Update timestamp only once we know we will make a change to the * neighbour entry. Otherwise we risk to move the locktime window with * noop updates and ignore relevant ARP updates. */ - if (new != old || lladdr != neigh->ha) { - if (new & NUD_CONNECTED) - neigh->confirmed = jiffies; + if (new != old || lladdr != neigh->ha) neigh->updated = jiffies; - } if (new != old) { neigh_del_timer(neigh); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 2017ffa5197a..d52b633164c9 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2087,9 +2087,12 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm) return err; } - dev->rtnl_link_state = RTNL_LINK_INITIALIZED; - - __dev_notify_flags(dev, old_flags, ~0U); + if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) { + __dev_notify_flags(dev, old_flags, 0U); + } else { + dev->rtnl_link_state = RTNL_LINK_INITIALIZED; + __dev_notify_flags(dev, old_flags, ~0U); + } return 0; } EXPORT_SYMBOL(rtnl_configure_link); @@ -2113,6 +2116,12 @@ struct net_device *rtnl_create_link(struct net *net, else if (ops->get_num_rx_queues) num_rx_queues = ops->get_num_rx_queues(); + if (num_tx_queues < 1 || num_tx_queues > 4096) + return ERR_PTR(-EINVAL); + + if (num_rx_queues < 1 || num_rx_queues > 4096) + return ERR_PTR(-EINVAL); + err = -ENOMEM; dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type, ops->setup, num_tx_queues, num_rx_queues); @@ -2725,6 +2734,11 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh) return -EINVAL; } + if (dev->type != ARPHRD_ETHER) { + pr_info("PF_BRIDGE: FDB add only supported for Ethernet devices"); + return -EINVAL; + } + addr = nla_data(tb[NDA_LLADDR]); err = fdb_vid_parse(tb[NDA_VLAN], &vid); @@ -2827,6 +2841,11 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh) return -EINVAL; } + if (dev->type != ARPHRD_ETHER) { + pr_info("PF_BRIDGE: FDB delete only supported for Ethernet devices"); + return -EINVAL; + } + addr = nla_data(tb[NDA_LLADDR]); err = fdb_vid_parse(tb[NDA_VLAN], &vid); @@ -2912,6 +2931,9 @@ int ndo_dflt_fdb_dump(struct sk_buff *skb, { int err; + if (dev->type != ARPHRD_ETHER) + return -EINVAL; + netif_addr_lock_bh(dev); err = nlmsg_populate_fdb(skb, cb, dev, &idx, &dev->uc); if (err) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 03928b406d4c..5bd724d09287 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -854,6 +854,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) n->cloned = 1; n->nohdr = 0; n->peeked = 0; + C(pfmemalloc); n->destructor = NULL; C(tail); C(end); @@ -2403,6 +2404,25 @@ void skb_queue_purge(struct sk_buff_head *list) EXPORT_SYMBOL(skb_queue_purge); /** + * skb_rbtree_purge - empty a skb rbtree + * @root: root of the rbtree to empty + * + * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from + * the list and one reference dropped. This function does not take + * any lock. Synchronization should be handled by the caller (e.g., TCP + * out-of-order queue is protected by the socket lock). + */ +void skb_rbtree_purge(struct rb_root *root) +{ + struct sk_buff *skb, *next; + + rbtree_postorder_for_each_entry_safe(skb, next, root, rbnode) + kfree_skb(skb); + + *root = RB_ROOT; +} + +/** * skb_queue_head - queue a buffer at the list head * @list: list to use * @newsk: buffer to queue diff --git a/net/core/sockev_nlmcast.c b/net/core/sockev_nlmcast.c index bade2d2596f7..1e92c5632b97 100644 --- a/net/core/sockev_nlmcast.c +++ b/net/core/sockev_nlmcast.c @@ -36,7 +36,6 @@ static struct netlink_kernel_cfg nlcfg = { static void _sockev_event(unsigned long event, __u8 *evstr, int buflen) { - memset(evstr, 0, buflen); switch (event) { case SOCKEV_SOCKET: @@ -99,6 +98,7 @@ static int sockev_client_cb(struct notifier_block *nb, NETLINK_CB(skb).dst_group = SKNLGRP_SOCKEV; smsg = nlmsg_data(nlh); + memset(smsg, 0, sizeof(struct sknlsockevmsg)); smsg->pid = current->pid; _sockev_event(event, smsg->event, sizeof(smsg->event)); smsg->skfamily = sk->sk_family; diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index 4f6c1862dfd2..6fe2b615518c 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -1763,7 +1763,7 @@ static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app, if (itr->app.selector == app->selector && itr->app.protocol == app->protocol && itr->ifindex == ifindex && - (!prio || itr->app.priority == prio)) + ((prio == -1) || itr->app.priority == prio)) return itr; } @@ -1798,7 +1798,8 @@ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app) u8 prio = 0; spin_lock_bh(&dcb_lock); - if ((itr = dcb_app_lookup(app, dev->ifindex, 0))) + itr = dcb_app_lookup(app, dev->ifindex, -1); + if (itr) prio = itr->app.priority; spin_unlock_bh(&dcb_lock); @@ -1826,7 +1827,8 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new) spin_lock_bh(&dcb_lock); /* Search for existing match and replace */ - if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) { + itr = dcb_app_lookup(new, dev->ifindex, -1); + if (itr) { if (new->priority) itr->app.priority = new->priority; else { @@ -1859,7 +1861,8 @@ u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app) u8 prio = 0; spin_lock_bh(&dcb_lock); - if ((itr = dcb_app_lookup(app, dev->ifindex, 0))) + itr = dcb_app_lookup(app, dev->ifindex, -1); + if (itr) prio |= 1 << itr->app.priority; spin_unlock_bh(&dcb_lock); diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c index 86a2ed0fb219..161dfcf86126 100644 --- a/net/dccp/ccids/ccid2.c +++ b/net/dccp/ccids/ccid2.c @@ -228,14 +228,16 @@ static void ccid2_cwnd_restart(struct sock *sk, const u32 now) struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); u32 cwnd = hc->tx_cwnd, restart_cwnd, iwnd = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache); + s32 delta = now - hc->tx_lsndtime; hc->tx_ssthresh = max(hc->tx_ssthresh, (cwnd >> 1) + (cwnd >> 2)); /* don't reduce cwnd below the initial window (IW) */ restart_cwnd = min(cwnd, iwnd); - cwnd >>= (now - hc->tx_lsndtime) / hc->tx_rto; - hc->tx_cwnd = max(cwnd, restart_cwnd); + while ((delta -= hc->tx_rto) >= 0 && cwnd > restart_cwnd) + cwnd >>= 1; + hc->tx_cwnd = max(cwnd, restart_cwnd); hc->tx_cwnd_stamp = now; hc->tx_cwnd_used = 0; diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index 119c04317d48..03fcf3ee1534 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c @@ -599,7 +599,7 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk, { struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); struct dccp_sock *dp = dccp_sk(sk); - ktime_t now = ktime_get_real(); + ktime_t now = ktime_get(); s64 delta = 0; switch (fbtype) { @@ -624,15 +624,14 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk, case CCID3_FBACK_PERIODIC: delta = ktime_us_delta(now, hc->rx_tstamp_last_feedback); if (delta <= 0) - DCCP_BUG("delta (%ld) <= 0", (long)delta); - else - hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta); + delta = 1; + hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta); break; default: return; } - ccid3_pr_debug("Interval %ldusec, X_recv=%u, 1/p=%u\n", (long)delta, + ccid3_pr_debug("Interval %lldusec, X_recv=%u, 1/p=%u\n", delta, hc->rx_x_recv, hc->rx_pinv); hc->rx_tstamp_last_feedback = now; @@ -679,7 +678,8 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb) static u32 ccid3_first_li(struct sock *sk) { struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); - u32 x_recv, p, delta; + u32 x_recv, p; + s64 delta; u64 fval; if (hc->rx_rtt == 0) { @@ -687,7 +687,9 @@ static u32 ccid3_first_li(struct sock *sk) hc->rx_rtt = DCCP_FALLBACK_RTT; } - delta = ktime_to_us(net_timedelta(hc->rx_tstamp_last_feedback)); + delta = ktime_us_delta(ktime_get(), hc->rx_tstamp_last_feedback); + if (delta <= 0) + delta = 1; x_recv = scaled_div32(hc->rx_bytes_recv, delta); if (x_recv == 0) { /* would also trigger divide-by-zero */ DCCP_WARN("X_recv==0\n"); diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c index e26df2764e83..1689c7bdf1c9 100644 --- a/net/dns_resolver/dns_key.c +++ b/net/dns_resolver/dns_key.c @@ -87,35 +87,39 @@ dns_resolver_preparse(struct key_preparsed_payload *prep) opt++; kdebug("options: '%s'", opt); do { + int opt_len, opt_nlen; const char *eq; - int opt_len, opt_nlen, opt_vlen, tmp; + char optval[128]; next_opt = memchr(opt, '#', end - opt) ?: end; opt_len = next_opt - opt; - if (opt_len <= 0 || opt_len > 128) { + if (opt_len <= 0 || opt_len > sizeof(optval)) { pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n", opt_len); return -EINVAL; } - eq = memchr(opt, '=', opt_len) ?: end; - opt_nlen = eq - opt; - eq++; - opt_vlen = next_opt - eq; /* will be -1 if no value */ + eq = memchr(opt, '=', opt_len); + if (eq) { + opt_nlen = eq - opt; + eq++; + memcpy(optval, eq, next_opt - eq); + optval[next_opt - eq] = '\0'; + } else { + opt_nlen = opt_len; + optval[0] = '\0'; + } - tmp = opt_vlen >= 0 ? opt_vlen : 0; - kdebug("option '%*.*s' val '%*.*s'", - opt_nlen, opt_nlen, opt, tmp, tmp, eq); + kdebug("option '%*.*s' val '%s'", + opt_nlen, opt_nlen, opt, optval); /* see if it's an error number representing a DNS error * that's to be recorded as the result in this key */ if (opt_nlen == sizeof(DNS_ERRORNO_OPTION) - 1 && memcmp(opt, DNS_ERRORNO_OPTION, opt_nlen) == 0) { kdebug("dns error number option"); - if (opt_vlen <= 0) - goto bad_option_value; - ret = kstrtoul(eq, 10, &derrno); + ret = kstrtoul(optval, 10, &derrno); if (ret < 0) goto bad_option_value; diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 554c2a961ad5..48b28a7ecc7a 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -1099,6 +1099,9 @@ int dsa_slave_suspend(struct net_device *slave_dev) { struct dsa_slave_priv *p = netdev_priv(slave_dev); + if (!netif_running(slave_dev)) + return 0; + netif_device_detach(slave_dev); if (p->phy) { @@ -1116,6 +1119,9 @@ int dsa_slave_resume(struct net_device *slave_dev) { struct dsa_slave_priv *p = netdev_priv(slave_dev); + if (!netif_running(slave_dev)) + return 0; + netif_device_attach(slave_dev); if (p->phy) { diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c index d4353faced35..a10db45b2e1e 100644 --- a/net/ieee802154/6lowpan/tx.c +++ b/net/ieee802154/6lowpan/tx.c @@ -265,9 +265,24 @@ netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev) /* We must take a copy of the skb before we modify/replace the ipv6 * header as the header could be used elsewhere */ - skb = skb_unshare(skb, GFP_ATOMIC); - if (!skb) - return NET_XMIT_DROP; + if (unlikely(skb_headroom(skb) < ldev->needed_headroom || + skb_tailroom(skb) < ldev->needed_tailroom)) { + struct sk_buff *nskb; + + nskb = skb_copy_expand(skb, ldev->needed_headroom, + ldev->needed_tailroom, GFP_ATOMIC); + if (likely(nskb)) { + consume_skb(skb); + skb = nskb; + } else { + kfree_skb(skb); + return NET_XMIT_DROP; + } + } else { + skb = skb_unshare(skb, GFP_ATOMIC); + if (!skb) + return NET_XMIT_DROP; + } ret = lowpan_header(skb, ldev, &dgram_size, &dgram_offset); if (ret < 0) { diff --git a/net/ipc_router/ipc_router_socket.c b/net/ipc_router/ipc_router_socket.c index 23e4443fc1b2..7c82f7ff8874 100644 --- a/net/ipc_router/ipc_router_socket.c +++ b/net/ipc_router/ipc_router_socket.c @@ -143,6 +143,7 @@ static int msm_ipc_router_extract_msg(struct msghdr *m, return -EINVAL; } ctl_msg = (union rr_control_msg *)(temp->data); + memset(addr, 0x0, sizeof(*addr)); addr->family = AF_MSM_IPC; addr->address.addrtype = MSM_IPC_ADDR_ID; addr->address.addr.port_addr.node_id = ctl_msg->cli.node_id; @@ -151,6 +152,7 @@ static int msm_ipc_router_extract_msg(struct msghdr *m, return offset; } if (addr && (hdr->type == IPC_ROUTER_CTRL_CMD_DATA)) { + memset(addr, 0x0, sizeof(*addr)); addr->family = AF_MSM_IPC; addr->address.addrtype = MSM_IPC_ADDR_ID; addr->address.addr.port_addr.node_id = hdr->src_node_id; diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 154e7423cdeb..395d82754626 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -354,6 +354,7 @@ config INET_ESP select CRYPTO_CBC select CRYPTO_SHA1 select CRYPTO_DES + select CRYPTO_ECHAINIV ---help--- Support for IPsec ESP. diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index aad274c67b5e..55eff963d1fe 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1301,6 +1301,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, if (encap) skb_reset_inner_headers(skb); skb->network_header = (u8 *)iph - skb->head; + skb_reset_mac_len(skb); } while ((skb = skb->next)); out: diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 5f3b81941a6f..cfaacaa023e6 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -1582,7 +1582,7 @@ static int cipso_v4_parsetag_loc(const struct cipso_v4_doi *doi_def, * * Description: * Parse the packet's IP header looking for a CIPSO option. Returns a pointer - * to the start of the CIPSO option on success, NULL if one if not found. + * to the start of the CIPSO option on success, NULL if one is not found. * */ unsigned char *cipso_v4_optptr(const struct sk_buff *skb) @@ -1592,10 +1592,21 @@ unsigned char *cipso_v4_optptr(const struct sk_buff *skb) int optlen; int taglen; - for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 0; ) { + for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 1; ) { + switch (optptr[0]) { + case IPOPT_END: + return NULL; + case IPOPT_NOOP: + taglen = 1; + break; + default: + taglen = optptr[1]; + } + if (!taglen || taglen > optlen) + return NULL; if (optptr[0] == IPOPT_CIPSO) return optptr; - taglen = optptr[1]; + optlen -= taglen; optptr += taglen; } diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 7dc9f0680bf6..860e33dd4030 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -289,18 +289,19 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb) return ip_hdr(skb)->daddr; in_dev = __in_dev_get_rcu(dev); - BUG_ON(!in_dev); net = dev_net(dev); scope = RT_SCOPE_UNIVERSE; if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) { + bool vmark = in_dev && IN_DEV_SRC_VMARK(in_dev); struct flowi4 fl4 = { .flowi4_iif = LOOPBACK_IFINDEX, + .flowi4_oif = l3mdev_master_ifindex_rcu(dev), .daddr = ip_hdr(skb)->saddr, .flowi4_tos = RT_TOS(ip_hdr(skb)->tos), .flowi4_scope = scope, - .flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0, + .flowi4_mark = vmark ? skb->mark : 0, }; if (!fib_lookup(net, &fl4, &res, 0)) return FIB_RES_PREFSRC(net, res); @@ -1170,7 +1171,8 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); - struct netdev_notifier_changeupper_info *info; + struct netdev_notifier_changeupper_info *upper_info = ptr; + struct netdev_notifier_info_ext *info_ext = ptr; struct in_device *in_dev; struct net *net = dev_net(dev); unsigned int flags; @@ -1205,16 +1207,19 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo fib_sync_up(dev, RTNH_F_LINKDOWN); else fib_sync_down_dev(dev, event, false); - /* fall through */ + rt_cache_flush(net); + break; case NETDEV_CHANGEMTU: + fib_sync_mtu(dev, info_ext->ext.mtu); rt_cache_flush(net); break; case NETDEV_CHANGEUPPER: - info = ptr; + upper_info = ptr; /* flush all routes if dev is linked to or unlinked from * an L3 master device (e.g., VRF) */ - if (info->upper_dev && netif_is_l3_master(info->upper_dev)) + if (upper_info->upper_dev && + netif_is_l3_master(upper_info->upper_dev)) fib_disable_ip(dev, NETDEV_DOWN, true); break; } diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 03ebff3950d8..3109b9bb95d2 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -1373,6 +1373,56 @@ int fib_sync_down_addr(struct net *net, __be32 local) return ret; } +/* Update the PMTU of exceptions when: + * - the new MTU of the first hop becomes smaller than the PMTU + * - the old MTU was the same as the PMTU, and it limited discovery of + * larger MTUs on the path. With that limit raised, we can now + * discover larger MTUs + * A special case is locked exceptions, for which the PMTU is smaller + * than the minimal accepted PMTU: + * - if the new MTU is greater than the PMTU, don't make any change + * - otherwise, unlock and set PMTU + */ +static void nh_update_mtu(struct fib_nh *nh, u32 new, u32 orig) +{ + struct fnhe_hash_bucket *bucket; + int i; + + bucket = rcu_dereference_protected(nh->nh_exceptions, 1); + if (!bucket) + return; + + for (i = 0; i < FNHE_HASH_SIZE; i++) { + struct fib_nh_exception *fnhe; + + for (fnhe = rcu_dereference_protected(bucket[i].chain, 1); + fnhe; + fnhe = rcu_dereference_protected(fnhe->fnhe_next, 1)) { + if (fnhe->fnhe_mtu_locked) { + if (new <= fnhe->fnhe_pmtu) { + fnhe->fnhe_pmtu = new; + fnhe->fnhe_mtu_locked = false; + } + } else if (new < fnhe->fnhe_pmtu || + orig == fnhe->fnhe_pmtu) { + fnhe->fnhe_pmtu = new; + } + } + } +} + +void fib_sync_mtu(struct net_device *dev, u32 orig_mtu) +{ + unsigned int hash = fib_devindex_hashfn(dev->ifindex); + struct hlist_head *head = &fib_info_devhash[hash]; + struct fib_nh *nh; + + hlist_for_each_entry(nh, head, nh_hash) { + if (nh->nh_dev == dev) + nh_update_mtu(nh, dev->mtu, orig_mtu); + } +} + /* Event force Flags Description * NETDEV_CHANGE 0 LINKDOWN Carrier OFF, not for scope host * NETDEV_DOWN 0 LINKDOWN|DEAD Link down, not for scope host diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index 08d8ee124538..d83888bc33d3 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c @@ -195,6 +195,14 @@ static struct sk_buff **fou_gro_receive(struct sk_buff **head, u8 proto = NAPI_GRO_CB(skb)->proto; const struct net_offload **offloads; + /* We can clear the encap_mark for FOU as we are essentially doing + * one of two possible things. We are either adding an L4 tunnel + * header to the outer L3 tunnel header, or we are are simply + * treating the GRE tunnel header as though it is a UDP protocol + * specific header such as VXLAN or GENEVE. + */ + NAPI_GRO_CB(skb)->encap_mark = 0; + rcu_read_lock(); offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; ops = rcu_dereference(offloads[proto]); @@ -354,6 +362,14 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head, } } + /* We can clear the encap_mark for GUE as we are essentially doing + * one of two possible things. We are either adding an L4 tunnel + * header to the outer L3 tunnel header, or we are are simply + * treating the GRE tunnel header as though it is a UDP protocol + * specific header such as VXLAN or GENEVE. + */ + NAPI_GRO_CB(skb)->encap_mark = 0; + rcu_read_lock(); offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; ops = rcu_dereference(offloads[guehdr->proto_ctype]); diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index b34fa1bb278f..b2001b20e029 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c @@ -364,11 +364,6 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf, { struct inet_frag_queue *q; - if (frag_mem_limit(nf) > nf->high_thresh) { - inet_frag_schedule_worker(f); - return NULL; - } - q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC); if (!q) return NULL; @@ -405,6 +400,11 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, struct inet_frag_queue *q; int depth = 0; + if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) { + inet_frag_schedule_worker(f); + return NULL; + } + if (frag_mem_limit(nf) > nf->low_thresh) inet_frag_schedule_worker(f); diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 7057a1b09b5e..72915658a6b1 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -716,10 +716,14 @@ struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user) if (ip_is_fragment(&iph)) { skb = skb_share_check(skb, GFP_ATOMIC); if (skb) { - if (!pskb_may_pull(skb, netoff + iph.ihl * 4)) - return skb; - if (pskb_trim_rcsum(skb, netoff + len)) - return skb; + if (!pskb_may_pull(skb, netoff + iph.ihl * 4)) { + kfree_skb(skb); + return NULL; + } + if (pskb_trim_rcsum(skb, netoff + len)) { + kfree_skb(skb); + return NULL; + } memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); if (ip_defrag(net, skb, user)) return NULL; diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index c1d7dc433976..ac2966f02d07 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -480,6 +480,8 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) to->dev = from->dev; to->mark = from->mark; + skb_copy_hash(to, from); + /* Copy the flags to each fragment. */ IPCB(to)->flags = IPCB(from)->flags; diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index ce9a7fbb7c5f..3f8caf7d19b8 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -134,19 +134,21 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb) static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb) { struct sockaddr_in sin; - const struct iphdr *iph = ip_hdr(skb); - __be16 *ports = (__be16 *)skb_transport_header(skb); + __be16 *ports; + int end; - if (skb_transport_offset(skb) + 4 > skb->len) + end = skb_transport_offset(skb) + 4; + if (end > 0 && !pskb_may_pull(skb, end)) return; /* All current transport protocols have the port numbers in the * first four bytes of the transport header and this function is * written with this assumption in mind. */ + ports = (__be16 *)skb_transport_header(skb); sin.sin_family = AF_INET; - sin.sin_addr.s_addr = iph->daddr; + sin.sin_addr.s_addr = ip_hdr(skb)->daddr; sin.sin_port = ports[1]; memset(sin.sin_zero, 0, sizeof(sin.sin_zero)); diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 3d62feb65932..91ae061d46ac 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -261,8 +261,8 @@ static struct net_device *__ip_tunnel_create(struct net *net, } else { if (strlen(ops->kind) > (IFNAMSIZ - 3)) goto failed; - strlcpy(name, ops->kind, IFNAMSIZ); - strncat(name, "%d", 2); + strcpy(name, ops->kind); + strcat(name, "%d"); } ASSERT_RTNL(); @@ -597,6 +597,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, const struct iphdr *tnl_params, u8 protocol) { struct ip_tunnel *tunnel = netdev_priv(dev); + unsigned int inner_nhdr_len = 0; const struct iphdr *inner_iph; struct flowi4 fl4; u8 tos, ttl; @@ -607,6 +608,14 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, int err; bool connected; + /* ensure we can access the inner net header, for several users below */ + if (skb->protocol == htons(ETH_P_IP)) + inner_nhdr_len = sizeof(struct iphdr); + else if (skb->protocol == htons(ETH_P_IPV6)) + inner_nhdr_len = sizeof(struct ipv6hdr); + if (unlikely(!pskb_may_pull(skb, inner_nhdr_len))) + goto tx_error; + inner_iph = (const struct iphdr *)skb_inner_network_header(skb); connected = (tunnel->parms.iph.daddr != 0); diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index dbda0565781c..4916d1857b75 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -71,7 +71,7 @@ int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, iph->version = 4; iph->ihl = sizeof(struct iphdr) >> 2; - iph->frag_off = df; + iph->frag_off = ip_mtu_locked(&rt->dst) ? 0 : df; iph->protocol = proto; iph->tos = tos; iph->daddr = dst; diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 9d6b9c4c5f82..60f564db25a3 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -790,6 +790,11 @@ static void __init ic_bootp_init_ext(u8 *e) */ static inline void __init ic_bootp_init(void) { + /* Re-initialise all name servers to NONE, in case any were set via the + * "ip=" or "nfsaddrs=" kernel command line parameters: any IP addresses + * specified there will already have been decoded but are no longer + * needed + */ ic_nameservers_predef(); dev_add_pack(&bootp_packet_type); @@ -1423,6 +1428,13 @@ static int __init ip_auto_config(void) int err; unsigned int i; + /* Initialise all name servers to NONE (but only if the "ip=" or + * "nfsaddrs=" kernel command line parameters weren't decoded, otherwise + * we'll overwrite the IP addresses specified there) + */ + if (ic_set_manually == 0) + ic_nameservers_predef(); + #ifdef CONFIG_PROC_FS proc_create("pnp", S_IRUGO, init_net.proc_net, &pnp_seq_fops); #endif /* CONFIG_PROC_FS */ @@ -1640,6 +1652,7 @@ static int __init ip_auto_config_setup(char *addrs) return 1; } + /* Initialise all name servers to NONE */ ic_nameservers_predef(); /* Parse string for static IP assignment. */ diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index dac62b5e7fe3..8adb6e9ba8f5 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -663,6 +663,7 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name, return -ENOMEM; j = 0; + memset(&mtpar, 0, sizeof(mtpar)); mtpar.net = net; mtpar.table = name; mtpar.entryinfo = &e->ip; @@ -2071,6 +2072,7 @@ static struct xt_match ipt_builtin_mt[] __read_mostly = { .checkentry = icmp_checkentry, .proto = IPPROTO_ICMP, .family = NFPROTO_IPV4, + .me = THIS_MODULE, }, }; diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 8233e27679f2..5d563615718d 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -145,8 +145,9 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write, if (write && ret == 0) { low = make_kgid(user_ns, urange[0]); high = make_kgid(user_ns, urange[1]); - if (!gid_valid(low) || !gid_valid(high) || - (urange[1] < urange[0]) || gid_lt(high, low)) { + if (!gid_valid(low) || !gid_valid(high)) + return -EINVAL; + if (urange[1] < urange[0] || gid_lt(high, low)) { low = make_kgid(&init_user_ns, 1); high = make_kgid(&init_user_ns, 0); } @@ -232,8 +233,9 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write, { struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) }; struct tcp_fastopen_context *ctxt; - int ret; u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */ + __le32 key[4]; + int ret, i; tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL); if (!tbl.data) @@ -242,11 +244,14 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write, rcu_read_lock(); ctxt = rcu_dereference(tcp_fastopen_ctx); if (ctxt) - memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH); + memcpy(key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH); else - memset(user_key, 0, sizeof(user_key)); + memset(key, 0, sizeof(key)); rcu_read_unlock(); + for (i = 0; i < ARRAY_SIZE(key); i++) + user_key[i] = le32_to_cpu(key[i]); + snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x", user_key[0], user_key[1], user_key[2], user_key[3]); ret = proc_dostring(&tbl, write, buffer, lenp, ppos); @@ -262,12 +267,16 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write, * first invocation of tcp_fastopen_cookie_gen */ tcp_fastopen_init_key_once(false); - tcp_fastopen_reset_cipher(user_key, TCP_FASTOPEN_KEY_LENGTH); + + for (i = 0; i < ARRAY_SIZE(user_key); i++) + key[i] = cpu_to_le32(user_key[i]); + + tcp_fastopen_reset_cipher(key, TCP_FASTOPEN_KEY_LENGTH); } bad_key: pr_debug("proc FO key set 0x%x-%x-%x-%x <- 0x%s: %u\n", - user_key[0], user_key[1], user_key[2], user_key[3], + user_key[0], user_key[1], user_key[2], user_key[3], (char *)tbl.data, ret); kfree(tbl.data); return ret; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 2f26b0d1f1d7..718049f0a295 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -388,7 +388,7 @@ void tcp_init_sock(struct sock *sk) struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); - __skb_queue_head_init(&tp->out_of_order_queue); + tp->out_of_order_queue = RB_ROOT; tcp_init_xmit_timers(sk); tcp_prequeue_init(tp); INIT_LIST_HEAD(&tp->tsq_node); @@ -1668,7 +1668,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, * shouldn't happen. */ if (WARN(before(*seq, TCP_SKB_CB(skb)->seq), - "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n", + "TCP recvmsg seq # bug: copied %X, seq %X, rcvnxt %X, fl %X\n", *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags)) break; @@ -1681,7 +1681,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) goto found_fin_ok; WARN(!(flags & MSG_PEEK), - "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n", + "TCP recvmsg seq # bug 2: copied %X, seq %X, rcvnxt %X, fl %X\n", *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags); } @@ -2249,7 +2249,7 @@ int tcp_disconnect(struct sock *sk, int flags) tcp_clear_xmit_timers(sk); __skb_queue_purge(&sk->sk_receive_queue); tcp_write_queue_purge(sk); - __skb_queue_purge(&tp->out_of_order_queue); + skb_rbtree_purge(&tp->out_of_order_queue); inet->inet_dport = 0; diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c index 55d7da1d2ce9..62f90f6b7a9d 100644 --- a/net/ipv4/tcp_dctcp.c +++ b/net/ipv4/tcp_dctcp.c @@ -55,7 +55,6 @@ struct dctcp { u32 dctcp_alpha; u32 next_seq; u32 ce_state; - u32 delayed_ack_reserved; u32 loss_cwnd; }; @@ -96,7 +95,6 @@ static void dctcp_init(struct sock *sk) ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA); - ca->delayed_ack_reserved = 0; ca->loss_cwnd = 0; ca->ce_state = 0; @@ -131,23 +129,14 @@ static void dctcp_ce_state_0_to_1(struct sock *sk) struct dctcp *ca = inet_csk_ca(sk); struct tcp_sock *tp = tcp_sk(sk); - /* State has changed from CE=0 to CE=1 and delayed - * ACK has not sent yet. - */ - if (!ca->ce_state && ca->delayed_ack_reserved) { - u32 tmp_rcv_nxt; - - /* Save current rcv_nxt. */ - tmp_rcv_nxt = tp->rcv_nxt; - - /* Generate previous ack with CE=0. */ - tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; - tp->rcv_nxt = ca->prior_rcv_nxt; - - tcp_send_ack(sk); - - /* Recover current rcv_nxt. */ - tp->rcv_nxt = tmp_rcv_nxt; + if (!ca->ce_state) { + /* State has changed from CE=0 to CE=1, force an immediate + * ACK to reflect the new CE state. If an ACK was delayed, + * send that first to reflect the prior CE state. + */ + if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) + __tcp_send_ack(sk, ca->prior_rcv_nxt); + tcp_enter_quickack_mode(sk, 1); } ca->prior_rcv_nxt = tp->rcv_nxt; @@ -161,23 +150,14 @@ static void dctcp_ce_state_1_to_0(struct sock *sk) struct dctcp *ca = inet_csk_ca(sk); struct tcp_sock *tp = tcp_sk(sk); - /* State has changed from CE=1 to CE=0 and delayed - * ACK has not sent yet. - */ - if (ca->ce_state && ca->delayed_ack_reserved) { - u32 tmp_rcv_nxt; - - /* Save current rcv_nxt. */ - tmp_rcv_nxt = tp->rcv_nxt; - - /* Generate previous ack with CE=1. */ - tp->ecn_flags |= TCP_ECN_DEMAND_CWR; - tp->rcv_nxt = ca->prior_rcv_nxt; - - tcp_send_ack(sk); - - /* Recover current rcv_nxt. */ - tp->rcv_nxt = tmp_rcv_nxt; + if (ca->ce_state) { + /* State has changed from CE=1 to CE=0, force an immediate + * ACK to reflect the new CE state. If an ACK was delayed, + * send that first to reflect the prior CE state. + */ + if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) + __tcp_send_ack(sk, ca->prior_rcv_nxt); + tcp_enter_quickack_mode(sk, 1); } ca->prior_rcv_nxt = tp->rcv_nxt; @@ -248,25 +228,6 @@ static void dctcp_state(struct sock *sk, u8 new_state) } } -static void dctcp_update_ack_reserved(struct sock *sk, enum tcp_ca_event ev) -{ - struct dctcp *ca = inet_csk_ca(sk); - - switch (ev) { - case CA_EVENT_DELAYED_ACK: - if (!ca->delayed_ack_reserved) - ca->delayed_ack_reserved = 1; - break; - case CA_EVENT_NON_DELAYED_ACK: - if (ca->delayed_ack_reserved) - ca->delayed_ack_reserved = 0; - break; - default: - /* Don't care for the rest. */ - break; - } -} - static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev) { switch (ev) { @@ -276,10 +237,6 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev) case CA_EVENT_ECN_NO_CE: dctcp_ce_state_1_to_0(sk); break; - case CA_EVENT_DELAYED_ACK: - case CA_EVENT_NON_DELAYED_ACK: - dctcp_update_ack_reserved(sk, ev); - break; default: /* Don't care for the rest. */ break; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index c02fa0b4c7e0..e3d661cc3db1 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -177,24 +177,27 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb) } } -static void tcp_incr_quickack(struct sock *sk) +static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks) { struct inet_connection_sock *icsk = inet_csk(sk); unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); if (quickacks == 0) quickacks = 2; + quickacks = min(quickacks, max_quickacks); if (quickacks > icsk->icsk_ack.quick) - icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS); + icsk->icsk_ack.quick = quickacks; } -static void tcp_enter_quickack_mode(struct sock *sk) +void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks) { struct inet_connection_sock *icsk = inet_csk(sk); - tcp_incr_quickack(sk); + + tcp_incr_quickack(sk, max_quickacks); icsk->icsk_ack.pingpong = 0; icsk->icsk_ack.ato = TCP_ATO_MIN; } +EXPORT_SYMBOL(tcp_enter_quickack_mode); /* Send ACKs quickly, if "quick" count is not exhausted * and the session is not interactive. @@ -226,8 +229,10 @@ static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp) tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; } -static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) +static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb) { + struct tcp_sock *tp = tcp_sk(sk); + switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) { case INET_ECN_NOT_ECT: /* Funny extension: if ECT is not set on a segment, @@ -235,31 +240,31 @@ static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) * it is probably a retransmit. */ if (tp->ecn_flags & TCP_ECN_SEEN) - tcp_enter_quickack_mode((struct sock *)tp); + tcp_enter_quickack_mode(sk, 2); break; case INET_ECN_CE: - if (tcp_ca_needs_ecn((struct sock *)tp)) - tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_IS_CE); + if (tcp_ca_needs_ecn(sk)) + tcp_ca_event(sk, CA_EVENT_ECN_IS_CE); if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) { /* Better not delay acks, sender can have a very low cwnd */ - tcp_enter_quickack_mode((struct sock *)tp); + tcp_enter_quickack_mode(sk, 2); tp->ecn_flags |= TCP_ECN_DEMAND_CWR; } tp->ecn_flags |= TCP_ECN_SEEN; break; default: - if (tcp_ca_needs_ecn((struct sock *)tp)) - tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_NO_CE); + if (tcp_ca_needs_ecn(sk)) + tcp_ca_event(sk, CA_EVENT_ECN_NO_CE); tp->ecn_flags |= TCP_ECN_SEEN; break; } } -static void tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) +static void tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb) { - if (tp->ecn_flags & TCP_ECN_OK) - __tcp_ecn_check_ce(tp, skb); + if (tcp_sk(sk)->ecn_flags & TCP_ECN_OK) + __tcp_ecn_check_ce(sk, skb); } static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th) @@ -651,7 +656,7 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) /* The _first_ data packet received, initialize * delayed ACK engine. */ - tcp_incr_quickack(sk); + tcp_incr_quickack(sk, TCP_MAX_QUICKACKS); icsk->icsk_ack.ato = TCP_ATO_MIN; } else { int m = now - icsk->icsk_ack.lrcvtime; @@ -667,13 +672,13 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) /* Too long gap. Apparently sender failed to * restart window, so that we send ACKs quickly. */ - tcp_incr_quickack(sk); + tcp_incr_quickack(sk, TCP_MAX_QUICKACKS); sk_mem_reclaim(sk); } } icsk->icsk_ack.lrcvtime = now; - tcp_ecn_check_ce(tp, skb); + tcp_ecn_check_ce(sk, skb); if (skb->len >= 128) tcp_grow_window(sk, skb); @@ -3219,6 +3224,15 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, if (tcp_is_reno(tp)) { tcp_remove_reno_sacks(sk, pkts_acked); + + /* If any of the cumulatively ACKed segments was + * retransmitted, non-SACK case cannot confirm that + * progress was due to original transmission due to + * lack of TCPCB_SACKED_ACKED bits even if some of + * the packets may have been never retransmitted. + */ + if (flag & FLAG_RETRANS_DATA_ACKED) + flag &= ~FLAG_ORIG_SACK_ACKED; } else { int delta; @@ -4060,7 +4074,7 @@ static void tcp_fin(struct sock *sk) /* It _is_ possible, that we have something out-of-order _after_ FIN. * Probably, we should reset in this case. For now drop them. */ - __skb_queue_purge(&tp->out_of_order_queue); + skb_rbtree_purge(&tp->out_of_order_queue); if (tcp_is_sack(tp)) tcp_sack_reset(&tp->rx_opt); sk_mem_reclaim(sk); @@ -4127,7 +4141,7 @@ static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb) if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); - tcp_enter_quickack_mode(sk); + tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS); if (tcp_is_sack(tp) && sysctl_tcp_dsack) { u32 end_seq = TCP_SKB_CB(skb)->end_seq; @@ -4220,7 +4234,7 @@ static void tcp_sack_remove(struct tcp_sock *tp) int this_sack; /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */ - if (skb_queue_empty(&tp->out_of_order_queue)) { + if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) { tp->rx_opt.num_sacks = 0; return; } @@ -4283,6 +4297,29 @@ static bool tcp_try_coalesce(struct sock *sk, return true; } +static bool tcp_ooo_try_coalesce(struct sock *sk, + struct sk_buff *to, + struct sk_buff *from, + bool *fragstolen) +{ + bool res = tcp_try_coalesce(sk, to, from, fragstolen); + + /* In case tcp_drop() is called later, update to->gso_segs */ + if (res) { + u32 gso_segs = max_t(u16, 1, skb_shinfo(to)->gso_segs) + + max_t(u16, 1, skb_shinfo(from)->gso_segs); + + skb_shinfo(to)->gso_segs = min_t(u32, gso_segs, 0xFFFF); + } + return res; +} + +static void tcp_drop(struct sock *sk, struct sk_buff *skb) +{ + sk_drops_add(sk, skb); + __kfree_skb(skb); +} + /* This one checks to see if we can put data from the * out_of_order queue into the receive_queue. */ @@ -4290,10 +4327,13 @@ static void tcp_ofo_queue(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); __u32 dsack_high = tp->rcv_nxt; + bool fin, fragstolen, eaten; struct sk_buff *skb, *tail; - bool fragstolen, eaten; + struct rb_node *p; - while ((skb = skb_peek(&tp->out_of_order_queue)) != NULL) { + p = rb_first(&tp->out_of_order_queue); + while (p) { + skb = rb_entry(p, struct sk_buff, rbnode); if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) break; @@ -4303,11 +4343,12 @@ static void tcp_ofo_queue(struct sock *sk) dsack_high = TCP_SKB_CB(skb)->end_seq; tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack); } + p = rb_next(p); + rb_erase(&skb->rbnode, &tp->out_of_order_queue); - __skb_unlink(skb, &tp->out_of_order_queue); - if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { + if (unlikely(!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))) { SOCK_DEBUG(sk, "ofo packet was already received\n"); - __kfree_skb(skb); + tcp_drop(sk, skb); continue; } SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n", @@ -4317,12 +4358,19 @@ static void tcp_ofo_queue(struct sock *sk) tail = skb_peek_tail(&sk->sk_receive_queue); eaten = tail && tcp_try_coalesce(sk, tail, skb, &fragstolen); tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); + fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN; if (!eaten) __skb_queue_tail(&sk->sk_receive_queue, skb); - if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) - tcp_fin(sk); - if (eaten) + else kfree_skb_partial(skb, fragstolen); + + if (unlikely(fin)) { + tcp_fin(sk); + /* tcp_fin() purges tp->out_of_order_queue, + * so we must end this loop right now. + */ + break; + } } } @@ -4352,14 +4400,16 @@ static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb, static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); + struct rb_node **p, *q, *parent; struct sk_buff *skb1; u32 seq, end_seq; + bool fragstolen; - tcp_ecn_check_ce(tp, skb); + tcp_ecn_check_ce(sk, skb); if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP); - __kfree_skb(skb); + tcp_drop(sk, skb); return; } @@ -4368,89 +4418,89 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) inet_csk_schedule_ack(sk); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOQUEUE); + seq = TCP_SKB_CB(skb)->seq; + end_seq = TCP_SKB_CB(skb)->end_seq; SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", - tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); + tp->rcv_nxt, seq, end_seq); - skb1 = skb_peek_tail(&tp->out_of_order_queue); - if (!skb1) { + p = &tp->out_of_order_queue.rb_node; + if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) { /* Initial out of order segment, build 1 SACK. */ if (tcp_is_sack(tp)) { tp->rx_opt.num_sacks = 1; - tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq; - tp->selective_acks[0].end_seq = - TCP_SKB_CB(skb)->end_seq; + tp->selective_acks[0].start_seq = seq; + tp->selective_acks[0].end_seq = end_seq; } - __skb_queue_head(&tp->out_of_order_queue, skb); + rb_link_node(&skb->rbnode, NULL, p); + rb_insert_color(&skb->rbnode, &tp->out_of_order_queue); + tp->ooo_last_skb = skb; goto end; } - seq = TCP_SKB_CB(skb)->seq; - end_seq = TCP_SKB_CB(skb)->end_seq; - - if (seq == TCP_SKB_CB(skb1)->end_seq) { - bool fragstolen; - - if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) { - __skb_queue_after(&tp->out_of_order_queue, skb1, skb); - } else { - tcp_grow_window(sk, skb); - kfree_skb_partial(skb, fragstolen); - skb = NULL; - } - - if (!tp->rx_opt.num_sacks || - tp->selective_acks[0].end_seq != seq) - goto add_sack; - - /* Common case: data arrive in order after hole. */ - tp->selective_acks[0].end_seq = end_seq; - goto end; - } - - /* Find place to insert this segment. */ - while (1) { - if (!after(TCP_SKB_CB(skb1)->seq, seq)) - break; - if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) { - skb1 = NULL; - break; + /* In the typical case, we are adding an skb to the end of the list. + * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup. + */ + if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb, + skb, &fragstolen)) { +coalesce_done: + tcp_grow_window(sk, skb); + kfree_skb_partial(skb, fragstolen); + skb = NULL; + goto add_sack; + } + + /* Find place to insert this segment. Handle overlaps on the way. */ + parent = NULL; + while (*p) { + parent = *p; + skb1 = rb_entry(parent, struct sk_buff, rbnode); + if (before(seq, TCP_SKB_CB(skb1)->seq)) { + p = &parent->rb_left; + continue; } - skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1); - } - /* Do skb overlap to previous one? */ - if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) { - if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { - /* All the bits are present. Drop. */ - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE); - __kfree_skb(skb); - skb = NULL; - tcp_dsack_set(sk, seq, end_seq); - goto add_sack; - } - if (after(seq, TCP_SKB_CB(skb1)->seq)) { - /* Partial overlap. */ - tcp_dsack_set(sk, seq, - TCP_SKB_CB(skb1)->end_seq); - } else { - if (skb_queue_is_first(&tp->out_of_order_queue, - skb1)) - skb1 = NULL; - else - skb1 = skb_queue_prev( - &tp->out_of_order_queue, - skb1); + if (before(seq, TCP_SKB_CB(skb1)->end_seq)) { + if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { + /* All the bits are present. Drop. */ + NET_INC_STATS(sock_net(sk), + LINUX_MIB_TCPOFOMERGE); + tcp_drop(sk, skb); + skb = NULL; + tcp_dsack_set(sk, seq, end_seq); + goto add_sack; + } + if (after(seq, TCP_SKB_CB(skb1)->seq)) { + /* Partial overlap. */ + tcp_dsack_set(sk, seq, TCP_SKB_CB(skb1)->end_seq); + } else { + /* skb's seq == skb1's seq and skb covers skb1. + * Replace skb1 with skb. + */ + rb_replace_node(&skb1->rbnode, &skb->rbnode, + &tp->out_of_order_queue); + tcp_dsack_extend(sk, + TCP_SKB_CB(skb1)->seq, + TCP_SKB_CB(skb1)->end_seq); + NET_INC_STATS(sock_net(sk), + LINUX_MIB_TCPOFOMERGE); + tcp_drop(sk, skb1); + goto merge_right; + } + } else if (tcp_ooo_try_coalesce(sk, skb1, + skb, &fragstolen)) { + goto coalesce_done; } + p = &parent->rb_right; } - if (!skb1) - __skb_queue_head(&tp->out_of_order_queue, skb); - else - __skb_queue_after(&tp->out_of_order_queue, skb1, skb); - /* And clean segments covered by new one as whole. */ - while (!skb_queue_is_last(&tp->out_of_order_queue, skb)) { - skb1 = skb_queue_next(&tp->out_of_order_queue, skb); + /* Insert segment into RB tree. */ + rb_link_node(&skb->rbnode, parent, p); + rb_insert_color(&skb->rbnode, &tp->out_of_order_queue); +merge_right: + /* Remove other segments covered by skb. */ + while ((q = rb_next(&skb->rbnode)) != NULL) { + skb1 = rb_entry(q, struct sk_buff, rbnode); if (!after(end_seq, TCP_SKB_CB(skb1)->seq)) break; if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { @@ -4458,12 +4508,15 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) end_seq); break; } - __skb_unlink(skb1, &tp->out_of_order_queue); + rb_erase(&skb1->rbnode, &tp->out_of_order_queue); tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE); - __kfree_skb(skb1); + tcp_drop(sk, skb1); } + /* If there is no skb after us, we are the last_skb ! */ + if (!q) + tp->ooo_last_skb = skb; add_sack: if (tcp_is_sack(tp)) @@ -4545,12 +4598,13 @@ err: static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); - int eaten = -1; bool fragstolen = false; + int eaten = -1; - if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) - goto drop; - + if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { + __kfree_skb(skb); + return; + } skb_dst_drop(skb); __skb_pull(skb, tcp_hdr(skb)->doff * 4); @@ -4601,13 +4655,13 @@ queue_and_out: if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) tcp_fin(sk); - if (!skb_queue_empty(&tp->out_of_order_queue)) { + if (!RB_EMPTY_ROOT(&tp->out_of_order_queue)) { tcp_ofo_queue(sk); /* RFC2581. 4.2. SHOULD send immediate ACK, when * gap in queue is filled. */ - if (skb_queue_empty(&tp->out_of_order_queue)) + if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) inet_csk(sk)->icsk_ack.pingpong = 0; } @@ -4629,10 +4683,10 @@ queue_and_out: tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); out_of_window: - tcp_enter_quickack_mode(sk); + tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS); inet_csk_schedule_ack(sk); drop: - __kfree_skb(skb); + tcp_drop(sk, skb); return; } @@ -4640,8 +4694,6 @@ drop: if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp))) goto out_of_window; - tcp_enter_quickack_mode(sk); - if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { /* Partial packet, seq < rcv_next < end_seq */ SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n", @@ -4661,48 +4713,76 @@ drop: tcp_data_queue_ofo(sk, skb); } +static struct sk_buff *tcp_skb_next(struct sk_buff *skb, struct sk_buff_head *list) +{ + if (list) + return !skb_queue_is_last(list, skb) ? skb->next : NULL; + + return rb_entry_safe(rb_next(&skb->rbnode), struct sk_buff, rbnode); +} + static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, - struct sk_buff_head *list) + struct sk_buff_head *list, + struct rb_root *root) { - struct sk_buff *next = NULL; + struct sk_buff *next = tcp_skb_next(skb, list); - if (!skb_queue_is_last(list, skb)) - next = skb_queue_next(list, skb); + if (list) + __skb_unlink(skb, list); + else + rb_erase(&skb->rbnode, root); - __skb_unlink(skb, list); __kfree_skb(skb); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED); return next; } +/* Insert skb into rb tree, ordered by TCP_SKB_CB(skb)->seq */ +static void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb) +{ + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; + struct sk_buff *skb1; + + while (*p) { + parent = *p; + skb1 = rb_entry(parent, struct sk_buff, rbnode); + if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq)) + p = &parent->rb_left; + else + p = &parent->rb_right; + } + rb_link_node(&skb->rbnode, parent, p); + rb_insert_color(&skb->rbnode, root); +} + /* Collapse contiguous sequence of skbs head..tail with * sequence numbers start..end. * - * If tail is NULL, this means until the end of the list. + * If tail is NULL, this means until the end of the queue. * * Segments with FIN/SYN are not collapsed (only because this * simplifies code) */ static void -tcp_collapse(struct sock *sk, struct sk_buff_head *list, - struct sk_buff *head, struct sk_buff *tail, - u32 start, u32 end) +tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root, + struct sk_buff *head, struct sk_buff *tail, u32 start, u32 end) { - struct sk_buff *skb, *n; + struct sk_buff *skb = head, *n; + struct sk_buff_head tmp; bool end_of_skbs; /* First, check that queue is collapsible and find - * the point where collapsing can be useful. */ - skb = head; + * the point where collapsing can be useful. + */ restart: - end_of_skbs = true; - skb_queue_walk_from_safe(list, skb, n) { - if (skb == tail) - break; + for (end_of_skbs = true; skb != NULL && skb != tail; skb = n) { + n = tcp_skb_next(skb, list); + /* No new bits? It is possible on ofo queue. */ if (!before(start, TCP_SKB_CB(skb)->end_seq)) { - skb = tcp_collapse_one(sk, skb, list); + skb = tcp_collapse_one(sk, skb, list, root); if (!skb) break; goto restart; @@ -4720,13 +4800,10 @@ restart: break; } - if (!skb_queue_is_last(list, skb)) { - struct sk_buff *next = skb_queue_next(list, skb); - if (next != tail && - TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(next)->seq) { - end_of_skbs = false; - break; - } + if (n && n != tail && + TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(n)->seq) { + end_of_skbs = false; + break; } /* Decided to skip this, advance start seq. */ @@ -4736,17 +4813,22 @@ restart: (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN))) return; + __skb_queue_head_init(&tmp); + while (before(start, end)) { int copy = min_t(int, SKB_MAX_ORDER(0, 0), end - start); struct sk_buff *nskb; nskb = alloc_skb(copy, GFP_ATOMIC); if (!nskb) - return; + break; memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; - __skb_queue_before(list, skb, nskb); + if (list) + __skb_queue_before(list, skb, nskb); + else + __skb_queue_tail(&tmp, nskb); /* defer rbtree insertion */ skb_set_owner_r(nskb, sk); /* Copy data, releasing collapsed skbs. */ @@ -4764,14 +4846,17 @@ restart: start += size; } if (!before(start, TCP_SKB_CB(skb)->end_seq)) { - skb = tcp_collapse_one(sk, skb, list); + skb = tcp_collapse_one(sk, skb, list, root); if (!skb || skb == tail || (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN))) - return; + goto end; } } } +end: + skb_queue_walk_safe(&tmp, skb, n) + tcp_rbtree_insert(root, skb); } /* Collapse ofo queue. Algorithm: select contiguous sequence of skbs @@ -4780,70 +4865,101 @@ restart: static void tcp_collapse_ofo_queue(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); - struct sk_buff *skb = skb_peek(&tp->out_of_order_queue); - struct sk_buff *head; + u32 range_truesize, sum_tiny = 0; + struct sk_buff *skb, *head; + struct rb_node *p; u32 start, end; - if (!skb) + p = rb_first(&tp->out_of_order_queue); + skb = rb_entry_safe(p, struct sk_buff, rbnode); +new_range: + if (!skb) { + p = rb_last(&tp->out_of_order_queue); + /* Note: This is possible p is NULL here. We do not + * use rb_entry_safe(), as ooo_last_skb is valid only + * if rbtree is not empty. + */ + tp->ooo_last_skb = rb_entry(p, struct sk_buff, rbnode); return; - + } start = TCP_SKB_CB(skb)->seq; end = TCP_SKB_CB(skb)->end_seq; - head = skb; + range_truesize = skb->truesize; - for (;;) { - struct sk_buff *next = NULL; + for (head = skb;;) { + skb = tcp_skb_next(skb, NULL); - if (!skb_queue_is_last(&tp->out_of_order_queue, skb)) - next = skb_queue_next(&tp->out_of_order_queue, skb); - skb = next; - - /* Segment is terminated when we see gap or when - * we are at the end of all the queue. */ + /* Range is terminated when we see a gap or when + * we are at the queue end. + */ if (!skb || after(TCP_SKB_CB(skb)->seq, end) || before(TCP_SKB_CB(skb)->end_seq, start)) { - tcp_collapse(sk, &tp->out_of_order_queue, - head, skb, start, end); - head = skb; - if (!skb) - break; - /* Start new segment */ + /* Do not attempt collapsing tiny skbs */ + if (range_truesize != head->truesize || + end - start >= SKB_WITH_OVERHEAD(SK_MEM_QUANTUM)) { + tcp_collapse(sk, NULL, &tp->out_of_order_queue, + head, skb, start, end); + } else { + sum_tiny += range_truesize; + if (sum_tiny > sk->sk_rcvbuf >> 3) + return; + } + + goto new_range; + } + + range_truesize += skb->truesize; + if (unlikely(before(TCP_SKB_CB(skb)->seq, start))) start = TCP_SKB_CB(skb)->seq; + if (after(TCP_SKB_CB(skb)->end_seq, end)) end = TCP_SKB_CB(skb)->end_seq; - } else { - if (before(TCP_SKB_CB(skb)->seq, start)) - start = TCP_SKB_CB(skb)->seq; - if (after(TCP_SKB_CB(skb)->end_seq, end)) - end = TCP_SKB_CB(skb)->end_seq; - } } } /* * Purge the out-of-order queue. + * Drop at least 12.5 % of sk_rcvbuf to avoid malicious attacks. * Return true if queue was pruned. */ static bool tcp_prune_ofo_queue(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); - bool res = false; + struct rb_node *node, *prev; + int goal; - if (!skb_queue_empty(&tp->out_of_order_queue)) { - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED); - __skb_queue_purge(&tp->out_of_order_queue); + if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) + return false; - /* Reset SACK state. A conforming SACK implementation will - * do the same at a timeout based retransmit. When a connection - * is in a sad state like this, we care only about integrity - * of the connection not performance. - */ - if (tp->rx_opt.sack_ok) - tcp_sack_reset(&tp->rx_opt); - sk_mem_reclaim(sk); - res = true; - } - return res; + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED); + goal = sk->sk_rcvbuf >> 3; + node = &tp->ooo_last_skb->rbnode; + do { + prev = rb_prev(node); + rb_erase(node, &tp->out_of_order_queue); + goal -= rb_to_skb(node)->truesize; + __kfree_skb(rb_to_skb(node)); + if (!prev || goal <= 0) { + sk_mem_reclaim(sk); + if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && + !tcp_under_memory_pressure(sk)) + break; + goal = sk->sk_rcvbuf >> 3; + } + + node = prev; + } while (node); + tp->ooo_last_skb = rb_entry(prev, struct sk_buff, rbnode); + + /* Reset SACK state. A conforming SACK implementation will + * do the same at a timeout based retransmit. When a connection + * is in a sad state like this, we care only about integrity + * of the connection not performance. + */ + if (tp->rx_opt.sack_ok) + tcp_sack_reset(&tp->rx_opt); + + return true; } /* Reduce allocated memory if we can, trying to get @@ -4866,9 +4982,12 @@ static int tcp_prune_queue(struct sock *sk) else if (tcp_under_memory_pressure(sk)) tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); + if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) + return 0; + tcp_collapse_ofo_queue(sk); if (!skb_queue_empty(&sk->sk_receive_queue)) - tcp_collapse(sk, &sk->sk_receive_queue, + tcp_collapse(sk, &sk->sk_receive_queue, NULL, skb_peek(&sk->sk_receive_queue), NULL, tp->copied_seq, tp->rcv_nxt); @@ -4974,7 +5093,7 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) /* We ACK each frame or... */ tcp_in_quickack_mode(sk) || /* We have out of order data. */ - (ofo_possible && skb_peek(&tp->out_of_order_queue))) { + (ofo_possible && !RB_EMPTY_ROOT(&tp->out_of_order_queue))) { /* Then ack it now */ tcp_send_ack(sk); } else { @@ -5210,7 +5329,7 @@ syn_challenge: return true; discard: - __kfree_skb(skb); + tcp_drop(sk, skb); return false; } @@ -5428,7 +5547,7 @@ csum_error: TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); discard: - __kfree_skb(skb); + tcp_drop(sk, skb); } EXPORT_SYMBOL(tcp_rcv_established); @@ -5653,12 +5772,12 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, * to stand against the temptation 8) --ANK */ inet_csk_schedule_ack(sk); - tcp_enter_quickack_mode(sk); + tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS); inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, TCP_DELACK_MAX, TCP_RTO_MAX); discard: - __kfree_skb(skb); + tcp_drop(sk, skb); return 0; } else { tcp_send_ack(sk); @@ -6015,7 +6134,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) if (!queued) { discard: - __kfree_skb(skb); + tcp_drop(sk, skb); } return 0; } diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 96f3209ba395..21a0fcbd7e64 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1727,6 +1727,7 @@ discard_it: return 0; discard_and_relse: + sk_drops_add(sk, skb); sock_put(sk); goto discard_it; @@ -1840,7 +1841,7 @@ void tcp_v4_destroy_sock(struct sock *sk) tcp_write_queue_purge(sk); /* Cleans up our, hopefully empty, out_of_order_queue. */ - __skb_queue_purge(&tp->out_of_order_queue); + skb_rbtree_purge(&tp->out_of_order_queue); #ifdef CONFIG_TCP_MD5SIG /* Clean up the MD5 key list, if any */ diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 4c1c94fa8f08..a48846d81b41 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -200,8 +200,9 @@ kill: inet_twsk_deschedule_put(tw); return TCP_TW_SUCCESS; } + } else { + inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); } - inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); if (tmp_opt.saw_tstamp) { tcptw->tw_ts_recent = tmp_opt.rcv_tsval; @@ -495,7 +496,6 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, newtp->snd_cwnd_cnt = 0; tcp_init_xmit_timers(newsk); - __skb_queue_head_init(&newtp->out_of_order_queue); newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1; newtp->rx_opt.saw_tstamp = 0; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index e490c9a29034..9ada5e4a28e5 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -177,8 +177,13 @@ static void tcp_event_data_sent(struct tcp_sock *tp, } /* Account for an ACK we sent. */ -static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) +static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts, + u32 rcv_nxt) { + struct tcp_sock *tp = tcp_sk(sk); + + if (unlikely(rcv_nxt != tp->rcv_nxt)) + return; /* Special ACK sent by DCTCP to reflect ECN */ tcp_dec_quickack_mode(sk, pkts); inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); } @@ -901,8 +906,8 @@ out: * We are working here with either a clone of the original * SKB, or a fresh unique copy made by the retransmit engine. */ -static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, - gfp_t gfp_mask) +static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, + int clone_it, gfp_t gfp_mask, u32 rcv_nxt) { const struct inet_connection_sock *icsk = inet_csk(sk); struct inet_sock *inet; @@ -962,7 +967,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, th->source = inet->inet_sport; th->dest = inet->inet_dport; th->seq = htonl(tcb->seq); - th->ack_seq = htonl(tp->rcv_nxt); + th->ack_seq = htonl(rcv_nxt); *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | tcb->tcp_flags); @@ -1005,7 +1010,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, icsk->icsk_af_ops->send_check(sk, skb); if (likely(tcb->tcp_flags & TCPHDR_ACK)) - tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); + tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt); if (skb->len != tcp_header_size) tcp_event_data_sent(tp, sk); @@ -1036,6 +1041,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, return net_xmit_eval(err); } +static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, + gfp_t gfp_mask) +{ + return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask, + tcp_sk(sk)->rcv_nxt); +} + /* This routine just queues the buffer for sending. * * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, @@ -2251,14 +2263,18 @@ void tcp_send_loss_probe(struct sock *sk) skb = tcp_write_queue_tail(sk); } + if (unlikely(!skb)) { + WARN_ONCE(tp->packets_out, + "invalid inflight: %u state %u cwnd %u mss %d\n", + tp->packets_out, sk->sk_state, tp->snd_cwnd, mss); + inet_csk(sk)->icsk_pending = 0; + return; + } + /* At most one outstanding TLP retransmission. */ if (tp->tlp_high_seq) goto rearm_timer; - /* Retransmit last segment. */ - if (WARN_ON(!skb)) - goto rearm_timer; - if (skb_still_in_host_queue(sk, skb)) goto rearm_timer; @@ -3304,8 +3320,6 @@ void tcp_send_delayed_ack(struct sock *sk) int ato = icsk->icsk_ack.ato; unsigned long timeout; - tcp_ca_event(sk, CA_EVENT_DELAYED_ACK); - if (ato > TCP_DELACK_MIN) { const struct tcp_sock *tp = tcp_sk(sk); int max_ato = HZ / 2; @@ -3354,7 +3368,7 @@ void tcp_send_delayed_ack(struct sock *sk) } /* This routine sends an ack and also updates the window. */ -void tcp_send_ack(struct sock *sk) +void __tcp_send_ack(struct sock *sk, u32 rcv_nxt) { struct sk_buff *buff; @@ -3362,8 +3376,6 @@ void tcp_send_ack(struct sock *sk) if (sk->sk_state == TCP_CLOSE) return; - tcp_ca_event(sk, CA_EVENT_NON_DELAYED_ACK); - /* We are not putting this on the write queue, so * tcp_transmit_skb() will set the ownership to this * sock. @@ -3391,9 +3403,14 @@ void tcp_send_ack(struct sock *sk) /* Send it off, this clears delayed acks for us. */ skb_mstamp_get(&buff->skb_mstamp); - tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC)); + __tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC), rcv_nxt); +} +EXPORT_SYMBOL_GPL(__tcp_send_ack); + +void tcp_send_ack(struct sock *sk) +{ + __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt); } -EXPORT_SYMBOL_GPL(tcp_send_ack); /* This routine sends a packet with an out of date sequence * number. It assumes the other end will try to ack it. diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index 851d5c9e3ecc..0f50248bad17 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig @@ -69,6 +69,7 @@ config INET6_ESP select CRYPTO_CBC select CRYPTO_SHA1 select CRYPTO_DES + select CRYPTO_ECHAINIV ---help--- Support for IPsec ESP. diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 199658afa68b..d0a1c54a1634 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3858,7 +3858,6 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos) p++; continue; } - state->offset++; return ifa; } @@ -3882,13 +3881,12 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq, return ifa; } + state->offset = 0; while (++state->bucket < IN6_ADDR_HSIZE) { - state->offset = 0; hlist_for_each_entry_rcu_bh(ifa, &inet6_addr_lst[state->bucket], addr_lst) { if (!net_eq(dev_net(ifa->idev->dev), net)) continue; - state->offset++; return ifa; } } @@ -4513,8 +4511,8 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb, /* unicast address incl. temp addr */ list_for_each_entry(ifa, &idev->addr_list, if_list) { - if (++ip_idx < s_ip_idx) - continue; + if (ip_idx < s_ip_idx) + goto next; err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, @@ -4523,6 +4521,8 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb, if (err < 0) break; nl_dump_check_consistent(cb, nlmsg_hdr(skb)); +next: + ip_idx++; } break; } diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index d7c1ee7cf0e2..7a62fd9173d0 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -663,13 +663,16 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg, } if (np->rxopt.bits.rxorigdstaddr) { struct sockaddr_in6 sin6; - __be16 *ports = (__be16 *) skb_transport_header(skb); + __be16 *ports; + int end; - if (skb_transport_offset(skb) + 4 <= skb->len) { + end = skb_transport_offset(skb) + 4; + if (end <= 0 || pskb_may_pull(skb, end)) { /* All current transport protocols have the port numbers in the * first four bytes of the transport header and this function is * written with this assumption in mind. */ + ports = (__be16 *)skb_transport_header(skb); sin6.sin6_family = AF_INET6; sin6.sin6_addr = ipv6_hdr(skb)->daddr; diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index c612daad9e92..59127b41414f 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -118,6 +118,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff); ipv6h->payload_len = htons(skb->len - nhoff - sizeof(*ipv6h)); skb->network_header = (u8 *)ipv6h - skb->head; + skb_reset_mac_len(skb); if (udpfrag) { int err = ip6_find_1stfragopt(skb, &prevhdr); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 74786783834b..f8cca81d66f2 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -169,39 +169,37 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, const struct ipv6_pinfo *np = inet6_sk(sk); struct in6_addr *first_hop = &fl6->daddr; struct dst_entry *dst = skb_dst(skb); + unsigned int head_room; struct ipv6hdr *hdr; u8 proto = fl6->flowi6_proto; int seg_len = skb->len; int hlimit = -1; u32 mtu; - if (opt) { - unsigned int head_room; + head_room = sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev); + if (opt) + head_room += opt->opt_nflen + opt->opt_flen; - /* First: exthdrs may take lots of space (~8K for now) - MAX_HEADER is not enough. - */ - head_room = opt->opt_nflen + opt->opt_flen; - seg_len += head_room; - head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev); - - if (skb_headroom(skb) < head_room) { - struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room); - if (!skb2) { - IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), - IPSTATS_MIB_OUTDISCARDS); - kfree_skb(skb); - return -ENOBUFS; - } - consume_skb(skb); - skb = skb2; - /* skb_set_owner_w() changes sk->sk_wmem_alloc atomically, - * it is safe to call in our context (socket lock not held) - */ - skb_set_owner_w(skb, (struct sock *)sk); + if (unlikely(skb_headroom(skb) < head_room)) { + struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room); + if (!skb2) { + IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), + IPSTATS_MIB_OUTDISCARDS); + kfree_skb(skb); + return -ENOBUFS; } + if (skb->sk) + skb_set_owner_w(skb2, skb->sk); + consume_skb(skb); + skb = skb2; + } + + if (opt) { + seg_len += opt->opt_nflen + opt->opt_flen; + if (opt->opt_flen) ipv6_push_frag_opts(skb, opt, &proto); + if (opt->opt_nflen) ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop); } @@ -559,6 +557,8 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from) to->dev = from->dev; to->mark = from->mark; + skb_copy_hash(to, from); + #ifdef CONFIG_NET_SCHED to->tc_index = from->tc_index; #endif diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 3c2468bd0b7c..8d55abb1a689 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1096,7 +1096,7 @@ static inline int ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); - const struct iphdr *iph = ip_hdr(skb); + const struct iphdr *iph; int encap_limit = -1; struct flowi6 fl6; __u8 dsfield; @@ -1104,6 +1104,11 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) u8 tproto; int err; + /* ensure we can access the full inner ip header */ + if (!pskb_may_pull(skb, sizeof(struct iphdr))) + return -1; + + iph = ip_hdr(skb); memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); tproto = ACCESS_ONCE(t->parms.proto); @@ -1142,7 +1147,7 @@ static inline int ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); - struct ipv6hdr *ipv6h = ipv6_hdr(skb); + struct ipv6hdr *ipv6h; int encap_limit = -1; __u16 offset; struct flowi6 fl6; @@ -1151,6 +1156,10 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) u8 tproto; int err; + if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) + return -1; + + ipv6h = ipv6_hdr(skb); tproto = ACCESS_ONCE(t->parms.proto); if ((tproto != IPPROTO_IPV6 && tproto != 0) || ip6_tnl_addr_conflict(t, ipv6h)) diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index b8bf123f7f79..5dd544c5cfe2 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -469,12 +469,8 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) goto tx_err_dst_release; } - skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev))); - skb_dst_set(skb, dst); - skb->dev = skb_dst(skb)->dev; - mtu = dst_mtu(dst); - if (!skb->ignore_df && skb->len > mtu) { + if (skb->len > mtu) { skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu); if (skb->protocol == htons(ETH_P_IPV6)) { @@ -487,9 +483,14 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) htonl(mtu)); } - return -EMSGSIZE; + err = -EMSGSIZE; + goto tx_err_dst_release; } + skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev))); + skb_dst_set(skb, dst); + skb->dev = skb_dst(skb)->dev; + err = dst_output(t->net, skb->sk, skb); if (net_xmit_eval(err) == 0) { struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); @@ -1140,6 +1141,33 @@ static struct xfrm6_protocol vti_ipcomp6_protocol __read_mostly = { .priority = 100, }; +static bool is_vti6_tunnel(const struct net_device *dev) +{ + return dev->netdev_ops == &vti6_netdev_ops; +} + +static int vti6_device_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct ip6_tnl *t = netdev_priv(dev); + + if (!is_vti6_tunnel(dev)) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_DOWN: + if (!net_eq(t->net, dev_net(dev))) + xfrm_garbage_collect(t->net); + break; + } + return NOTIFY_DONE; +} + +static struct notifier_block vti6_notifier_block __read_mostly = { + .notifier_call = vti6_device_event, +}; + /** * vti6_tunnel_init - register protocol and reserve needed resources * @@ -1150,6 +1178,8 @@ static int __init vti6_tunnel_init(void) const char *msg; int err; + register_netdevice_notifier(&vti6_notifier_block); + msg = "tunnel device"; err = register_pernet_device(&vti6_net_ops); if (err < 0) @@ -1182,6 +1212,7 @@ xfrm_proto_ah_failed: xfrm_proto_esp_failed: unregister_pernet_device(&vti6_net_ops); pernet_dev_failed: + unregister_netdevice_notifier(&vti6_notifier_block); pr_err("vti6 init: failed to register %s\n", msg); return err; } @@ -1196,6 +1227,7 @@ static void __exit vti6_tunnel_cleanup(void) xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH); xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP); unregister_pernet_device(&vti6_net_ops); + unregister_netdevice_notifier(&vti6_notifier_block); } module_init(vti6_tunnel_init); diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 06640685ff43..a5ec9a0cbb80 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -2061,7 +2061,8 @@ void ipv6_mc_dad_complete(struct inet6_dev *idev) mld_send_initial_cr(idev); idev->mc_dad_count--; if (idev->mc_dad_count) - mld_dad_start_timer(idev, idev->mc_maxdelay); + mld_dad_start_timer(idev, + unsolicited_report_interval(idev)); } } @@ -2073,7 +2074,8 @@ static void mld_dad_timer_expire(unsigned long data) if (idev->mc_dad_count) { idev->mc_dad_count--; if (idev->mc_dad_count) - mld_dad_start_timer(idev, idev->mc_maxdelay); + mld_dad_start_timer(idev, + unsolicited_report_interval(idev)); } in6_dev_put(idev); } @@ -2388,17 +2390,17 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml, { int err; - /* callers have the socket lock and rtnl lock - * so no other readers or writers of iml or its sflist - */ + write_lock_bh(&iml->sflock); if (!iml->sflist) { /* any-source empty exclude case */ - return ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0); + err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0); + } else { + err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, + iml->sflist->sl_count, iml->sflist->sl_addr, 0); + sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max)); + iml->sflist = NULL; } - err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, - iml->sflist->sl_count, iml->sflist->sl_addr, 0); - sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max)); - iml->sflist = NULL; + write_unlock_bh(&iml->sflock); return err; } @@ -2431,7 +2433,8 @@ static void mld_ifc_timer_expire(unsigned long data) if (idev->mc_ifc_count) { idev->mc_ifc_count--; if (idev->mc_ifc_count) - mld_ifc_start_timer(idev, idev->mc_maxdelay); + mld_ifc_start_timer(idev, + unsolicited_report_interval(idev)); } in6_dev_put(idev); } diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index dfe55e7ef07d..e03bb5a6b8d4 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -1651,10 +1651,9 @@ int ndisc_rcv(struct sk_buff *skb) return 0; } - memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb)); - switch (msg->icmph.icmp6_type) { case NDISC_NEIGHBOUR_SOLICITATION: + memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb)); ndisc_recv_ns(skb); break; diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index f424dad89054..6c65e0f6b182 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -680,6 +680,7 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name, return -ENOMEM; j = 0; + memset(&mtpar, 0, sizeof(mtpar)); mtpar.net = net; mtpar.table = name; mtpar.entryinfo = &e->ipv6; @@ -2076,6 +2077,7 @@ static struct xt_match ip6t_builtin_mt[] __read_mostly = { .checkentry = icmp6_checkentry, .proto = IPPROTO_ICMPV6, .family = NFPROTO_IPV6, + .me = THIS_MODULE, }, }; diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index eb2dc39f7066..5a9ae56e7868 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -118,7 +118,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net) if (hdr == NULL) goto err_reg; - net->nf_frag.sysctl.frags_hdr = hdr; + net->nf_frag_frags_hdr = hdr; return 0; err_reg: @@ -132,8 +132,8 @@ static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net) { struct ctl_table *table; - table = net->nf_frag.sysctl.frags_hdr->ctl_table_arg; - unregister_net_sysctl_table(net->nf_frag.sysctl.frags_hdr); + table = net->nf_frag_frags_hdr->ctl_table_arg; + unregister_net_sysctl_table(net->nf_frag_frags_hdr); if (!net_eq(net, &init_net)) kfree(table); } @@ -601,6 +601,7 @@ struct sk_buff *nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 use hdr = ipv6_hdr(clone); fhdr = (struct frag_hdr *)skb_transport_header(clone); + skb_orphan(skb); fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr, skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr)); if (fq == NULL) { diff --git a/net/ipv6/route.c b/net/ipv6/route.c index f55c7aa1db34..ac3324a8e906 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1414,8 +1414,12 @@ EXPORT_SYMBOL_GPL(ip6_update_pmtu); void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu) { - ip6_update_pmtu(skb, sock_net(sk), mtu, - sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid); + int oif = sk->sk_bound_dev_if; + + if (!oif && skb->dev) + oif = l3mdev_master_ifindex(skb->dev); + + ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid); } EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu); diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index dec4e7bda5f3..11282ffca567 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -692,7 +692,6 @@ static int ipip6_rcv(struct sk_buff *skb) if (iptunnel_pull_header(skb, 0, htons(ETH_P_IPV6))) goto out; - iph = ip_hdr(skb); err = IP_ECN_decapsulate(iph, skb); if (unlikely(err)) { diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index aa634b3815fa..ce08617dc88e 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1507,6 +1507,7 @@ discard_it: return 0; discard_and_relse: + sk_drops_add(sk, skb); sock_put(sk); goto discard_it; diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index 4d09ce6fa90e..64862c5084ee 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c @@ -165,9 +165,11 @@ static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb) if (toobig && xfrm6_local_dontfrag(skb)) { xfrm6_local_rxpmtu(skb, mtu); + kfree_skb(skb); return -EMSGSIZE; } else if (!skb->ignore_df && toobig && skb->sk) { xfrm_local_error(skb, mtu); + kfree_skb(skb); return -EMSGSIZE; } diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index 4a116d766c15..7cc9db38e1b6 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c @@ -774,6 +774,13 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) return -EINVAL; lock_sock(sk); + + /* Ensure that the socket is not already bound */ + if (self->ias_obj) { + err = -EINVAL; + goto out; + } + #ifdef CONFIG_IRDA_ULTRA /* Special care for Ultra sockets */ if ((sk->sk_type == SOCK_DGRAM) && @@ -2020,7 +2027,11 @@ static int irda_setsockopt(struct socket *sock, int level, int optname, err = -EINVAL; goto out; } - irias_insert_object(ias_obj); + + /* Only insert newly allocated objects */ + if (free_ias) + irias_insert_object(ias_obj); + kfree(ias_opt); break; case IRLMP_IAS_DEL: diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index aeffb65181f5..5984cc35d508 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -705,10 +705,8 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr, char uid[9]; /* Verify the input sockaddr */ - if (!addr || addr->sa_family != AF_IUCV) - return -EINVAL; - - if (addr_len < sizeof(struct sockaddr_iucv)) + if (addr_len < sizeof(struct sockaddr_iucv) || + addr->sa_family != AF_IUCV) return -EINVAL; lock_sock(sk); @@ -852,7 +850,7 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr, struct iucv_sock *iucv = iucv_sk(sk); int err; - if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv)) + if (alen < sizeof(struct sockaddr_iucv) || addr->sa_family != AF_IUCV) return -EINVAL; if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND) diff --git a/net/key/af_key.c b/net/key/af_key.c index 3ba903ff2bb0..58c045d027fc 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -196,30 +196,22 @@ static int pfkey_release(struct socket *sock) return 0; } -static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2, - gfp_t allocation, struct sock *sk) +static int pfkey_broadcast_one(struct sk_buff *skb, gfp_t allocation, + struct sock *sk) { int err = -ENOBUFS; - sock_hold(sk); - if (*skb2 == NULL) { - if (atomic_read(&skb->users) != 1) { - *skb2 = skb_clone(skb, allocation); - } else { - *skb2 = skb; - atomic_inc(&skb->users); - } - } - if (*skb2 != NULL) { - if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) { - skb_set_owner_r(*skb2, sk); - skb_queue_tail(&sk->sk_receive_queue, *skb2); - sk->sk_data_ready(sk); - *skb2 = NULL; - err = 0; - } + if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) + return err; + + skb = skb_clone(skb, allocation); + + if (skb) { + skb_set_owner_r(skb, sk); + skb_queue_tail(&sk->sk_receive_queue, skb); + sk->sk_data_ready(sk); + err = 0; } - sock_put(sk); return err; } @@ -234,7 +226,6 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, { struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); struct sock *sk; - struct sk_buff *skb2 = NULL; int err = -ESRCH; /* XXX Do we need something like netlink_overrun? I think @@ -253,7 +244,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, * socket. */ if (pfk->promisc) - pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk); + pfkey_broadcast_one(skb, GFP_ATOMIC, sk); /* the exact target will be processed later */ if (sk == one_sk) @@ -268,7 +259,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, continue; } - err2 = pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk); + err2 = pfkey_broadcast_one(skb, GFP_ATOMIC, sk); /* Error is cleared after successful sending to at least one * registered KM */ @@ -278,9 +269,8 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, rcu_read_unlock(); if (one_sk != NULL) - err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk); + err = pfkey_broadcast_one(skb, allocation, one_sk); - kfree_skb(skb2); kfree_skb(skb); return err; } diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 92df832a1896..591d18785285 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1145,7 +1145,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len /* Get routing info from the tunnel socket */ skb_dst_drop(skb); - skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0))); + skb_dst_set(skb, sk_dst_check(sk, 0)); inet = inet_sk(sk); fl = &inet->cork.fl; diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 48ab93842322..af74e3ba0f92 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -177,21 +177,23 @@ pass_up: tunnel_id = ntohl(*(__be32 *) &skb->data[4]); tunnel = l2tp_tunnel_find(net, tunnel_id); - if (tunnel != NULL) + if (tunnel) { sk = tunnel->sock; - else { + sock_hold(sk); + } else { struct iphdr *iph = (struct iphdr *) skb_network_header(skb); read_lock_bh(&l2tp_ip_lock); sk = __l2tp_ip_bind_lookup(net, iph->daddr, 0, tunnel_id); + if (!sk) { + read_unlock_bh(&l2tp_ip_lock); + goto discard; + } + + sock_hold(sk); read_unlock_bh(&l2tp_ip_lock); } - if (sk == NULL) - goto discard; - - sock_hold(sk); - if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) goto discard_put; diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index c8f483cd2ca9..a30f6fb6caa9 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -188,22 +188,24 @@ pass_up: tunnel_id = ntohl(*(__be32 *) &skb->data[4]); tunnel = l2tp_tunnel_find(&init_net, tunnel_id); - if (tunnel != NULL) + if (tunnel) { sk = tunnel->sock; - else { + sock_hold(sk); + } else { struct ipv6hdr *iph = ipv6_hdr(skb); read_lock_bh(&l2tp_ip6_lock); sk = __l2tp_ip6_bind_lookup(&init_net, &iph->daddr, 0, tunnel_id); + if (!sk) { + read_unlock_bh(&l2tp_ip6_lock); + goto discard; + } + + sock_hold(sk); read_unlock_bh(&l2tp_ip6_lock); } - if (sk == NULL) - goto discard; - - sock_hold(sk); - if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) goto discard_put; diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 83e8a295c806..c153fc2883a8 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -726,7 +726,6 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, struct sk_buff *skb = NULL; struct sock *sk = sock->sk; struct llc_sock *llc = llc_sk(sk); - unsigned long cpu_flags; size_t copied = 0; u32 peek_seq = 0; u32 *seq, skb_len; @@ -851,9 +850,8 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, goto copy_uaddr; if (!(flags & MSG_PEEK)) { - spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); - sk_eat_skb(sk, skb); - spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); + skb_unlink(skb, &sk->sk_receive_queue); + kfree_skb(skb); *seq = 0; } @@ -874,9 +872,8 @@ copy_uaddr: llc_cmsg_rcv(msg, skb); if (!(flags & MSG_PEEK)) { - spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); - sk_eat_skb(sk, skb); - spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); + skb_unlink(skb, &sk->sk_receive_queue); + kfree_skb(skb); *seq = 0; } diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c index 842851cef698..e896a2c53b12 100644 --- a/net/llc/llc_core.c +++ b/net/llc/llc_core.c @@ -73,8 +73,8 @@ struct llc_sap *llc_sap_find(unsigned char sap_value) rcu_read_lock_bh(); sap = __llc_sap_find(sap_value); - if (sap) - llc_sap_hold(sap); + if (!sap || !llc_sap_hold_safe(sap)) + sap = NULL; rcu_read_unlock_bh(); return sap; } diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 00a8cc572a22..67348d8ac35d 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -219,7 +219,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: /* Keys without a station are used for TX only */ - if (key->sta && test_sta_flag(key->sta, WLAN_STA_MFP)) + if (sta && test_sta_flag(sta, WLAN_STA_MFP)) key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT; break; case NL80211_IFTYPE_ADHOC: @@ -286,7 +286,7 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev, goto out_unlock; } - ieee80211_key_free(key, true); + ieee80211_key_free(key, sdata->vif.type == NL80211_IFTYPE_STATION); ret = 0; out_unlock: diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 08ac73b33947..0a35dd68f1a1 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -948,8 +948,8 @@ static void ieee80211_rx_mgmt_deauth_ibss(struct ieee80211_sub_if_data *sdata, if (len < IEEE80211_DEAUTH_FRAME_LEN) return; - ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM BSSID=%pM (reason: %d)\n", - mgmt->sa, mgmt->da, mgmt->bssid, reason); + ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da); + ibss_dbg(sdata, "\tBSSID=%pM (reason: %d)\n", mgmt->bssid, reason); sta_info_destroy_addr(sdata, mgmt->sa); } @@ -967,9 +967,9 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata, auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); - ibss_dbg(sdata, - "RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n", - mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction); + ibss_dbg(sdata, "RX Auth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da); + ibss_dbg(sdata, "\tBSSID=%pM (auth_transaction=%d)\n", + mgmt->bssid, auth_transaction); if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1) return; @@ -1174,10 +1174,10 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, rx_timestamp = drv_get_tsf(local, sdata); } - ibss_dbg(sdata, - "RX beacon SA=%pM BSSID=%pM TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n", + ibss_dbg(sdata, "RX beacon SA=%pM BSSID=%pM TSF=0x%llx\n", mgmt->sa, mgmt->bssid, - (unsigned long long)rx_timestamp, + (unsigned long long)rx_timestamp); + ibss_dbg(sdata, "\tBCN=0x%llx diff=%lld @%lu\n", (unsigned long long)beacon_timestamp, (unsigned long long)(rx_timestamp - beacon_timestamp), jiffies); @@ -1536,9 +1536,9 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata, tx_last_beacon = drv_tx_last_beacon(local); - ibss_dbg(sdata, - "RX ProbeReq SA=%pM DA=%pM BSSID=%pM (tx_last_beacon=%d)\n", - mgmt->sa, mgmt->da, mgmt->bssid, tx_last_beacon); + ibss_dbg(sdata, "RX ProbeReq SA=%pM DA=%pM\n", mgmt->sa, mgmt->da); + ibss_dbg(sdata, "\tBSSID=%pM (tx_last_beacon=%d)\n", + mgmt->bssid, tx_last_beacon); if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da)) return; diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 00b25114843f..3678f2d5fcfe 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -988,6 +988,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, if (local->open_count == 0) ieee80211_clear_tx_pending(local); + sdata->vif.bss_conf.beacon_int = 0; + /* * If the interface goes down while suspended, presumably because * the device was unplugged and that happens before our resume, diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 4a72c0d1e56f..91a4e606edcd 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -647,11 +647,15 @@ int ieee80211_key_link(struct ieee80211_key *key, { struct ieee80211_local *local = sdata->local; struct ieee80211_key *old_key; - int idx, ret; - bool pairwise; - - pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE; - idx = key->conf.keyidx; + int idx = key->conf.keyidx; + bool pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE; + /* + * We want to delay tailroom updates only for station - in that + * case it helps roaming speed, but in other cases it hurts and + * can cause warnings to appear. + */ + bool delay_tailroom = sdata->vif.type == NL80211_IFTYPE_STATION; + int ret; mutex_lock(&sdata->local->key_mtx); @@ -679,14 +683,14 @@ int ieee80211_key_link(struct ieee80211_key *key, increment_tailroom_need_count(sdata); ieee80211_key_replace(sdata, sta, pairwise, old_key, key); - ieee80211_key_destroy(old_key, true); + ieee80211_key_destroy(old_key, delay_tailroom); ieee80211_debugfs_key_add(key); if (!local->wowlan) { ret = ieee80211_key_enable_hw_accel(key); if (ret) - ieee80211_key_free(key, true); + ieee80211_key_free(key, delay_tailroom); } else { ret = 0; } @@ -874,7 +878,8 @@ void ieee80211_free_sta_keys(struct ieee80211_local *local, ieee80211_key_replace(key->sdata, key->sta, key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, key, NULL); - __ieee80211_key_destroy(key, true); + __ieee80211_key_destroy(key, key->sdata->vif.type == + NL80211_IFTYPE_STATION); } for (i = 0; i < NUM_DEFAULT_KEYS; i++) { @@ -884,7 +889,8 @@ void ieee80211_free_sta_keys(struct ieee80211_local *local, ieee80211_key_replace(key->sdata, key->sta, key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, key, NULL); - __ieee80211_key_destroy(key, true); + __ieee80211_key_destroy(key, key->sdata->vif.type == + NL80211_IFTYPE_STATION); } mutex_unlock(&local->key_mtx); diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 2ee53dc1ddf7..15d23aeea634 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -253,8 +253,27 @@ static void ieee80211_restart_work(struct work_struct *work) "%s called with hardware scan in progress\n", __func__); rtnl_lock(); - list_for_each_entry(sdata, &local->interfaces, list) + list_for_each_entry(sdata, &local->interfaces, list) { + /* + * XXX: there may be more work for other vif types and even + * for station mode: a good thing would be to run most of + * the iface type's dependent _stop (ieee80211_mg_stop, + * ieee80211_ibss_stop) etc... + * For now, fix only the specific bug that was seen: race + * between csa_connection_drop_work and us. + */ + if (sdata->vif.type == NL80211_IFTYPE_STATION) { + /* + * This worker is scheduled from the iface worker that + * runs on mac80211's workqueue, so we can't be + * scheduling this worker after the cancel right here. + * The exception is ieee80211_chswitch_done. + * Then we can have a race... + */ + cancel_work_sync(&sdata->u.mgd.csa_connection_drop_work); + } flush_delayed_work(&sdata->dec_tailroom_needed_wk); + } ieee80211_scan_cancel(local); ieee80211_reconfig(local); rtnl_unlock(); @@ -460,10 +479,7 @@ static const struct ieee80211_vht_cap mac80211_vht_capa_mod_mask = { cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC | IEEE80211_VHT_CAP_SHORT_GI_80 | IEEE80211_VHT_CAP_SHORT_GI_160 | - IEEE80211_VHT_CAP_RXSTBC_1 | - IEEE80211_VHT_CAP_RXSTBC_2 | - IEEE80211_VHT_CAP_RXSTBC_3 | - IEEE80211_VHT_CAP_RXSTBC_4 | + IEEE80211_VHT_CAP_RXSTBC_MASK | IEEE80211_VHT_CAP_TXSTBC | IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index f55cddcd556c..466922f09d04 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -552,6 +552,10 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata, forward = false; reply = true; target_metric = 0; + + if (SN_GT(target_sn, ifmsh->sn)) + ifmsh->sn = target_sn; + if (time_after(jiffies, ifmsh->last_sn_update + net_traversal_jiffies(sdata)) || time_before(jiffies, ifmsh->last_sn_update)) { diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index c16b4da20db2..9066003b21b1 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1021,6 +1021,10 @@ static void ieee80211_chswitch_work(struct work_struct *work) */ if (sdata->reserved_chanctx) { + struct ieee80211_supported_band *sband = NULL; + struct sta_info *mgd_sta = NULL; + enum ieee80211_sta_rx_bandwidth bw = IEEE80211_STA_RX_BW_20; + /* * with multi-vif csa driver may call ieee80211_csa_finish() * many times while waiting for other interfaces to use their @@ -1029,6 +1033,48 @@ static void ieee80211_chswitch_work(struct work_struct *work) if (sdata->reserved_ready) goto out; + if (sdata->vif.bss_conf.chandef.width != + sdata->csa_chandef.width) { + /* + * For managed interface, we need to also update the AP + * station bandwidth and align the rate scale algorithm + * on the bandwidth change. Here we only consider the + * bandwidth of the new channel definition (as channel + * switch flow does not have the full HT/VHT/HE + * information), assuming that if additional changes are + * required they would be done as part of the processing + * of the next beacon from the AP. + */ + switch (sdata->csa_chandef.width) { + case NL80211_CHAN_WIDTH_20_NOHT: + case NL80211_CHAN_WIDTH_20: + default: + bw = IEEE80211_STA_RX_BW_20; + break; + case NL80211_CHAN_WIDTH_40: + bw = IEEE80211_STA_RX_BW_40; + break; + case NL80211_CHAN_WIDTH_80: + bw = IEEE80211_STA_RX_BW_80; + break; + case NL80211_CHAN_WIDTH_80P80: + case NL80211_CHAN_WIDTH_160: + bw = IEEE80211_STA_RX_BW_160; + break; + } + + mgd_sta = sta_info_get(sdata, ifmgd->bssid); + sband = + local->hw.wiphy->bands[sdata->csa_chandef.chan->band]; + } + + if (sdata->vif.bss_conf.chandef.width > + sdata->csa_chandef.width) { + mgd_sta->sta.bandwidth = bw; + rate_control_rate_update(local, sband, mgd_sta, + IEEE80211_RC_BW_CHANGED); + } + ret = ieee80211_vif_use_reserved_context(sdata); if (ret) { sdata_info(sdata, @@ -1039,6 +1085,13 @@ static void ieee80211_chswitch_work(struct work_struct *work) goto out; } + if (sdata->vif.bss_conf.chandef.width < + sdata->csa_chandef.width) { + mgd_sta->sta.bandwidth = bw; + rate_control_rate_update(local, sband, mgd_sta, + IEEE80211_RC_BW_CHANGED); + } + goto out; } @@ -1833,7 +1886,8 @@ static bool ieee80211_sta_wmm_params(struct ieee80211_local *local, params[ac].acm = acm; params[ac].uapsd = uapsd; - if (params[ac].cw_min > params[ac].cw_max) { + if (params[ac].cw_min == 0 || + params[ac].cw_min > params[ac].cw_max) { sdata_info(sdata, "AP has invalid WMM params (CWmin/max=%d/%d for ACI %d), using defaults\n", params[ac].cw_min, params[ac].cw_max, aci); diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index aef0d41e9a9c..b02fcb388640 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1110,6 +1110,7 @@ ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx) return RX_CONTINUE; if (ieee80211_is_ctl(hdr->frame_control) || + ieee80211_is_nullfunc(hdr->frame_control) || ieee80211_is_qos_nullfunc(hdr->frame_control) || is_multicast_ether_addr(hdr->addr1)) return RX_CONTINUE; diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 45fb1abdb265..d221300e59e5 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -466,11 +466,6 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local, if (!skb) return; - if (dropped) { - dev_kfree_skb_any(skb); - return; - } - if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) { u64 cookie = IEEE80211_SKB_CB(skb)->ack.cookie; struct ieee80211_sub_if_data *sdata; @@ -492,6 +487,8 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local, rcu_read_unlock(); dev_kfree_skb_any(skb); + } else if (dropped) { + dev_kfree_skb_any(skb); } else { /* consumes skb */ skb_complete_wifi_ack(skb, acked); @@ -651,6 +648,8 @@ void ieee80211_tx_status_noskb(struct ieee80211_hw *hw, /* Track when last TDLS packet was ACKed */ if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) sta->status_stats.last_tdls_pkt_time = jiffies; + } else if (test_sta_flag(sta, WLAN_STA_PS_STA)) { + return; } else { ieee80211_lost_packet(sta, info); } diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index a45248a4967b..674fe547ffee 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -431,8 +431,8 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) if (ieee80211_hw_check(&tx->local->hw, QUEUE_CONTROL)) info->hw_queue = tx->sdata->vif.cab_queue; - /* no stations in PS mode */ - if (!atomic_read(&ps->num_sta_ps)) + /* no stations in PS mode and no buffered packets */ + if (!atomic_read(&ps->num_sta_ps) && skb_queue_empty(&ps->bc_buf)) return TX_CONTINUE; info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM; diff --git a/net/mac80211/util.c b/net/mac80211/util.c index f4b9f97af092..c8cc9bd7cac1 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -2006,7 +2006,8 @@ int ieee80211_reconfig(struct ieee80211_local *local) if (!sta->uploaded) continue; - if (sta->sdata->vif.type != NL80211_IFTYPE_AP) + if (sta->sdata->vif.type != NL80211_IFTYPE_AP && + sta->sdata->vif.type != NL80211_IFTYPE_AP_VLAN) continue; for (state = IEEE80211_STA_NOTEXIST; diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c index 3827f359b336..9e1ff9d4cf2d 100644 --- a/net/mac802154/tx.c +++ b/net/mac802154/tx.c @@ -72,8 +72,21 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb) int ret; if (!(local->hw.flags & IEEE802154_HW_TX_OMIT_CKSUM)) { - u16 crc = crc_ccitt(0, skb->data, skb->len); + struct sk_buff *nskb; + u16 crc; + + if (unlikely(skb_tailroom(skb) < IEEE802154_FCS_LEN)) { + nskb = skb_copy_expand(skb, 0, IEEE802154_FCS_LEN, + GFP_ATOMIC); + if (likely(nskb)) { + consume_skb(skb); + skb = nskb; + } else { + goto err_tx; + } + } + crc = crc_ccitt(0, skb->data, skb->len); put_unaligned_le16(crc, skb_put(skb, 2)); } diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c index 9a14c237830f..b259a5814965 100644 --- a/net/netfilter/ipset/ip_set_hash_netportnet.c +++ b/net/netfilter/ipset/ip_set_hash_netportnet.c @@ -213,13 +213,13 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[], if (tb[IPSET_ATTR_CIDR]) { e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]); - if (!e.cidr[0] || e.cidr[0] > HOST_MASK) + if (e.cidr[0] > HOST_MASK) return -IPSET_ERR_INVALID_CIDR; } if (tb[IPSET_ATTR_CIDR2]) { e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]); - if (!e.cidr[1] || e.cidr[1] > HOST_MASK) + if (e.cidr[1] > HOST_MASK) return -IPSET_ERR_INVALID_CIDR; } @@ -492,13 +492,13 @@ hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[], if (tb[IPSET_ATTR_CIDR]) { e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]); - if (!e.cidr[0] || e.cidr[0] > HOST_MASK) + if (e.cidr[0] > HOST_MASK) return -IPSET_ERR_INVALID_CIDR; } if (tb[IPSET_ATTR_CIDR2]) { e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]); - if (!e.cidr[1] || e.cidr[1] > HOST_MASK) + if (e.cidr[1] > HOST_MASK) return -IPSET_ERR_INVALID_CIDR; } diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index dd1649caa2b2..ac212542a217 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -1809,13 +1809,20 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) { /* the destination server is not available */ - if (sysctl_expire_nodest_conn(ipvs)) { + __u32 flags = cp->flags; + + /* when timer already started, silently drop the packet.*/ + if (timer_pending(&cp->timer)) + __ip_vs_conn_put(cp); + else + ip_vs_conn_put(cp); + + if (sysctl_expire_nodest_conn(ipvs) && + !(flags & IP_VS_CONN_F_ONE_PACKET)) { /* try to expire the connection immediately */ ip_vs_conn_expire_now(cp); } - /* don't restart its timer, and silently - drop the packet. */ - __ip_vs_conn_put(cp); + return NF_DROP; } diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index fce1b1cca32d..99d0e9261a64 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c @@ -244,14 +244,14 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] = * We currently ignore Sync packets * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ - sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, + sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG, }, [DCCP_PKT_SYNCACK] = { /* * We currently ignore SyncAck packets * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ - sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, + sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG, }, }, [CT_DCCP_ROLE_SERVER] = { @@ -372,14 +372,14 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] = * We currently ignore Sync packets * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ - sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, + sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG, }, [DCCP_PKT_SYNCACK] = { /* * We currently ignore SyncAck packets * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ - sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, + sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG, }, }, }; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 120e9ae04db3..a7967af0da82 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -3452,6 +3452,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, dreg = nft_type_to_reg(set->dtype); list_for_each_entry(binding, &set->bindings, list) { struct nft_ctx bind_ctx = { + .net = ctx->net, .afi = ctx->afi, .table = ctx->table, .chain = (struct nft_chain *)binding->chain, diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 7edcfda288c4..54cde78c2718 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -1106,6 +1106,9 @@ nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb, static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = { [NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) }, [NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) }, + [NFQA_CFG_QUEUE_MAXLEN] = { .type = NLA_U32 }, + [NFQA_CFG_MASK] = { .type = NLA_U32 }, + [NFQA_CFG_FLAGS] = { .type = NLA_U32 }, }; static const struct nf_queue_handler nfqh = { diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 1f3c305df45d..b6e72af15237 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -876,7 +876,7 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len, if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0) return ERR_PTR(-EFAULT); - strlcpy(info->name, compat_tmp.name, sizeof(info->name)); + memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1); info->num_counters = compat_tmp.num_counters; user += sizeof(compat_tmp); } else @@ -889,9 +889,9 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len, if (copy_from_user(info, user, sizeof(*info)) != 0) return ERR_PTR(-EFAULT); - info->name[sizeof(info->name) - 1] = '\0'; user += sizeof(*info); } + info->name[sizeof(info->name) - 1] = '\0'; size = sizeof(struct xt_counters); size *= info->num_counters; diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c index 4c126cd18469..b0f4f1bca61f 100644 --- a/net/netfilter/xt_IDLETIMER.c +++ b/net/netfilter/xt_IDLETIMER.c @@ -283,6 +283,22 @@ static int idletimer_resume(struct notifier_block *notifier, return NOTIFY_DONE; } +static int idletimer_check_sysfs_name(const char *name, unsigned int size) +{ + int ret; + + ret = xt_check_proc_name(name, size); + if (ret < 0) + return ret; + + if (!strcmp(name, "power") || + !strcmp(name, "subsystem") || + !strcmp(name, "uevent")) + return -EINVAL; + + return 0; +} + static int idletimer_tg_create(struct idletimer_tg_info *info) { int ret; @@ -293,6 +309,10 @@ static int idletimer_tg_create(struct idletimer_tg_info *info) goto out; } + ret = idletimer_check_sysfs_name(info->label, sizeof(info->label)); + if (ret < 0) + goto out_free_timer; + sysfs_attr_init(&info->timer->attr.attr); info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL); if (!info->timer->attr.attr.name) { diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index 3f33ec44bd28..9f4ec16abfcf 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c @@ -787,7 +787,8 @@ static int netlbl_unlabel_addrinfo_get(struct genl_info *info, { u32 addr_len; - if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR]) { + if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR] && + info->attrs[NLBL_UNLABEL_A_IPV4MASK]) { addr_len = nla_len(info->attrs[NLBL_UNLABEL_A_IPV4ADDR]); if (addr_len != sizeof(struct in_addr) && addr_len != nla_len(info->attrs[NLBL_UNLABEL_A_IPV4MASK])) diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 0fb27debd4fa..6e5c14309fb8 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -62,6 +62,7 @@ #include <asm/cacheflush.h> #include <linux/hash.h> #include <linux/genetlink.h> +#include <linux/nospec.h> #include <net/net_namespace.h> #include <net/sock.h> @@ -639,6 +640,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol, if (protocol < 0 || protocol >= MAX_LINKS) return -EPROTONOSUPPORT; + protocol = array_index_nospec(protocol, MAX_LINKS); netlink_lock_table(); #ifdef CONFIG_MODULES @@ -964,6 +966,11 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, return err; } + if (nlk->ngroups == 0) + groups = 0; + else if (nlk->ngroups < 8*sizeof(groups)) + groups &= (1UL << nlk->ngroups) - 1; + bound = nlk->bound; if (bound) { /* Ensure nlk->portid is up-to-date. */ diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c index d25212b135ea..04f060488686 100644 --- a/net/nfc/llcp_commands.c +++ b/net/nfc/llcp_commands.c @@ -754,11 +754,14 @@ int nfc_llcp_send_ui_frame(struct nfc_llcp_sock *sock, u8 ssap, u8 dsap, pr_debug("Fragment %zd bytes remaining %zd", frag_len, remaining_len); - pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT, + pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, 0, frag_len + LLCP_HEADER_SIZE, &err); if (pdu == NULL) { - pr_err("Could not allocate PDU\n"); - continue; + pr_err("Could not allocate PDU (error=%d)\n", err); + len -= remaining_len; + if (len == 0) + len = err; + break; } pdu = llcp_add_header(pdu, dsap, ssap, LLCP_PDU_UI); diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 3a63f33698d3..07668f152a3a 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2780,6 +2780,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) goto out_free; } else if (reserve) { skb_reserve(skb, -reserve); + if (len < reserve) + skb_reset_network_header(skb); } /* Returns -EFAULT on error */ @@ -4174,6 +4176,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, } if (req->tp_block_nr) { + unsigned int min_frame_size; + /* Sanity tests and some calculations */ err = -EBUSY; if (unlikely(rb->pg_vec)) @@ -4196,12 +4200,12 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, goto out; if (unlikely(!PAGE_ALIGNED(req->tp_block_size))) goto out; + min_frame_size = po->tp_hdrlen + po->tp_reserve; if (po->tp_version >= TPACKET_V3 && - req->tp_block_size <= - BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + sizeof(struct tpacket3_hdr)) + req->tp_block_size < + BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size) goto out; - if (unlikely(req->tp_frame_size < po->tp_hdrlen + - po->tp_reserve)) + if (unlikely(req->tp_frame_size < min_frame_size)) goto out; if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) goto out; diff --git a/net/rds/loop.c b/net/rds/loop.c index 6b12b68541ae..05cab8c5a379 100644 --- a/net/rds/loop.c +++ b/net/rds/loop.c @@ -191,4 +191,5 @@ struct rds_transport rds_loop_transport = { .inc_copy_to_user = rds_message_inc_copy_to_user, .inc_free = rds_loop_inc_free, .t_name = "loopback", + .t_type = RDS_TRANS_LOOP, }; diff --git a/net/rds/rds.h b/net/rds/rds.h index 4588860f4c3b..254f1345cf7e 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -401,6 +401,11 @@ struct rds_notifier { int n_status; }; +/* Available as part of RDS core, so doesn't need to participate + * in get_preferred transport etc + */ +#define RDS_TRANS_LOOP 3 + /** * struct rds_transport - transport specific behavioural hooks * diff --git a/net/rds/recv.c b/net/rds/recv.c index 0514af3ab378..6275de19689c 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c @@ -76,6 +76,11 @@ static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk, return; rs->rs_rcv_bytes += delta; + + /* loop transport doesn't send/recv congestion updates */ + if (rs->rs_transport->t_type == RDS_TRANS_LOOP) + return; + now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs); rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d " diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index 403746b20263..040d853f48b9 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -382,22 +382,20 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, tcf_bind_filter(tp, &cr.res, base); } - if (old_r) - tcf_exts_change(tp, &r->exts, &e); - else - tcf_exts_change(tp, &cr.exts, &e); - if (old_r && old_r != r) tcindex_filter_result_init(old_r); oldp = p; r->res = cr.res; + tcf_exts_change(tp, &r->exts, &e); + rcu_assign_pointer(tp->root, cp); if (r == &new_filter_result) { struct tcindex_filter *nfp; struct tcindex_filter __rcu **fp; + f->result.res = r->res; tcf_exts_change(tp, &f->result.exts, &r->exts); fp = cp->h + (handle % cp->hash); diff --git a/net/sched/sch_blackhole.c b/net/sched/sch_blackhole.c index 3fee70d9814f..562edd50fa94 100644 --- a/net/sched/sch_blackhole.c +++ b/net/sched/sch_blackhole.c @@ -20,7 +20,7 @@ static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch) { qdisc_drop(skb, sch); - return NET_XMIT_SUCCESS; + return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; } static struct sk_buff *blackhole_dequeue(struct Qdisc *sch) diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index f9e8deeeac96..a5745cb2d014 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c @@ -444,7 +444,7 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt) if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) { if (tb[TCA_GRED_LIMIT] != NULL) sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]); - return gred_change_table_def(sch, opt); + return gred_change_table_def(sch, tb[TCA_GRED_DPS]); } if (tb[TCA_GRED_PARMS] == NULL || diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c index 45d4b2f22f62..aff2a1b46f7f 100644 --- a/net/sched/sch_hhf.c +++ b/net/sched/sch_hhf.c @@ -501,6 +501,9 @@ static void hhf_destroy(struct Qdisc *sch) hhf_free(q->hhf_valid_bits[i]); } + if (!q->hh_flows) + return; + for (i = 0; i < HH_FLOWS_CNT; i++) { struct hh_flow_state *flow, *next; struct list_head *head = &q->hh_flows[i]; diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 87b02ed3d5f2..daa01d5604c2 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -1025,6 +1025,9 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt) int err; int i; + qdisc_watchdog_init(&q->watchdog, sch); + INIT_WORK(&q->work, htb_work_func); + if (!opt) return -EINVAL; @@ -1045,8 +1048,6 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt) for (i = 0; i < TC_HTB_NUMPRIO; i++) INIT_LIST_HEAD(q->drops + i); - qdisc_watchdog_init(&q->watchdog, sch); - INIT_WORK(&q->work, htb_work_func); __skb_queue_head_init(&q->direct_queue); if (tb[TCA_HTB_DIRECT_QLEN]) diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index bcdd54bb101c..cef36ad691dd 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c @@ -254,7 +254,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt) static int multiq_init(struct Qdisc *sch, struct nlattr *opt) { struct multiq_sched_data *q = qdisc_priv(sch); - int i, err; + int i; q->queues = NULL; @@ -269,12 +269,7 @@ static int multiq_init(struct Qdisc *sch, struct nlattr *opt) for (i = 0; i < q->max_bands; i++) q->queues[i] = &noop_qdisc; - err = multiq_tune(sch, opt); - - if (err) - kfree(q->queues); - - return err; + return multiq_tune(sch, opt); } static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb) diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index b7c29d5b6f04..7acf1f2b8dfc 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -432,6 +432,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) int count = 1; int rc = NET_XMIT_SUCCESS; + /* Do not fool qdisc_drop_all() */ + skb->prev = NULL; + /* Random duplication */ if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) ++count; @@ -943,11 +946,11 @@ static int netem_init(struct Qdisc *sch, struct nlattr *opt) struct netem_sched_data *q = qdisc_priv(sch); int ret; + qdisc_watchdog_init(&q->watchdog, sch); + if (!opt) return -EINVAL; - qdisc_watchdog_init(&q->watchdog, sch); - q->loss_model = CLG_RANDOM; ret = netem_change(sch, opt); if (ret) diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 0505b8408c8b..4bf2b599ef98 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -97,6 +97,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch) ret = qdisc_enqueue(skb, child); if (likely(ret == NET_XMIT_SUCCESS)) { + qdisc_qstats_backlog_inc(sch, skb); sch->q.qlen++; } else if (net_xmit_drop_count(ret)) { q->stats.pdrop++; @@ -118,6 +119,7 @@ static struct sk_buff *red_dequeue(struct Qdisc *sch) skb = child->dequeue(child); if (skb) { qdisc_bstats_update(sch, skb); + qdisc_qstats_backlog_dec(sch, skb); sch->q.qlen--; } else { if (!red_is_idling(&q->vars)) @@ -143,6 +145,7 @@ static unsigned int red_drop(struct Qdisc *sch) if (child->ops->drop && (len = child->ops->drop(child)) > 0) { q->stats.other++; qdisc_qstats_drop(sch); + sch->qstats.backlog -= len; sch->q.qlen--; return len; } @@ -158,6 +161,7 @@ static void red_reset(struct Qdisc *sch) struct red_sched_data *q = qdisc_priv(sch); qdisc_reset(q->qdisc); + sch->qstats.backlog = 0; sch->q.qlen = 0; red_restart(&q->vars); } diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index c2fbde742f37..a06c9d6bfc9c 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -432,12 +432,13 @@ static int tbf_init(struct Qdisc *sch, struct nlattr *opt) { struct tbf_sched_data *q = qdisc_priv(sch); + qdisc_watchdog_init(&q->watchdog, sch); + q->qdisc = &noop_qdisc; + if (opt == NULL) return -EINVAL; q->t_c = ktime_get_ns(); - qdisc_watchdog_init(&q->watchdog, sch); - q->qdisc = &noop_qdisc; return tbf_change(sch, opt); } diff --git a/net/sctp/associola.c b/net/sctp/associola.c index a40b8b0ef0d5..f085b01b6603 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -486,8 +486,9 @@ void sctp_assoc_set_primary(struct sctp_association *asoc, void sctp_assoc_rm_peer(struct sctp_association *asoc, struct sctp_transport *peer) { - struct list_head *pos; - struct sctp_transport *transport; + struct sctp_transport *transport; + struct list_head *pos; + struct sctp_chunk *ch; pr_debug("%s: association:%p addr:%pISpc\n", __func__, asoc, &peer->ipaddr.sa); @@ -543,7 +544,6 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc, */ if (!list_empty(&peer->transmitted)) { struct sctp_transport *active = asoc->peer.active_path; - struct sctp_chunk *ch; /* Reset the transport of each chunk on this list */ list_for_each_entry(ch, &peer->transmitted, @@ -565,6 +565,10 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc, sctp_transport_hold(active); } + list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list) + if (ch->transport == peer) + ch->transport = NULL; + asoc->peer.transport_count--; sctp_transport_free(peer); diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 13c7f42b7040..53f1b33bca4e 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -248,11 +248,10 @@ struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id) spin_lock_bh(&sctp_assocs_id_lock); asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id); + if (asoc && (asoc->base.sk != sk || asoc->base.dead)) + asoc = NULL; spin_unlock_bh(&sctp_assocs_id_lock); - if (!asoc || (asoc->base.sk != sk) || asoc->base.dead) - return NULL; - return asoc; } diff --git a/net/socket.c b/net/socket.c index 00a275ee227a..3647dc580398 100644 --- a/net/socket.c +++ b/net/socket.c @@ -91,6 +91,7 @@ #include <linux/xattr.h> #include <linux/seemp_api.h> #include <linux/seemp_instrumentation.h> +#include <linux/nospec.h> #include <asm/uaccess.h> #include <asm/unistd.h> @@ -540,7 +541,10 @@ static int sockfs_setattr(struct dentry *dentry, struct iattr *iattr) if (!err && (iattr->ia_valid & ATTR_UID)) { struct socket *sock = SOCKET_I(d_inode(dentry)); - sock->sk->sk_uid = iattr->ia_uid; + if (sock->sk) + sock->sk->sk_uid = iattr->ia_uid; + else + err = -ENOENT; } return err; @@ -591,12 +595,16 @@ static struct socket *sock_alloc(void) * an inode not a file. */ -void sock_release(struct socket *sock) +static void __sock_release(struct socket *sock, struct inode *inode) { if (sock->ops) { struct module *owner = sock->ops->owner; + if (inode) + inode_lock(inode); sock->ops->release(sock); + if (inode) + inode_unlock(inode); sock->ops = NULL; module_put(owner); } @@ -611,6 +619,11 @@ void sock_release(struct socket *sock) } sock->file = NULL; } + +void sock_release(struct socket *sock) +{ + __sock_release(sock, NULL); +} EXPORT_SYMBOL(sock_release); void __sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags) @@ -1047,7 +1060,7 @@ static int sock_mmap(struct file *file, struct vm_area_struct *vma) static int sock_close(struct inode *inode, struct file *filp) { - sock_release(SOCKET_I(inode)); + __sock_release(SOCKET_I(inode), inode); return 0; } @@ -2384,6 +2397,7 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args) if (call < 1 || call > SYS_SENDMMSG) return -EINVAL; + call = array_index_nospec(call, SYS_SENDMMSG + 1); len = nargs[call]; if (len > sizeof(a)) @@ -2818,9 +2832,14 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32) copy_in_user(&rxnfc->fs.ring_cookie, &compat_rxnfc->fs.ring_cookie, (void __user *)(&rxnfc->fs.location + 1) - - (void __user *)&rxnfc->fs.ring_cookie) || - copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt, - sizeof(rxnfc->rule_cnt))) + (void __user *)&rxnfc->fs.ring_cookie)) + return -EFAULT; + if (ethcmd == ETHTOOL_GRXCLSRLALL) { + if (put_user(rule_cnt, &rxnfc->rule_cnt)) + return -EFAULT; + } else if (copy_in_user(&rxnfc->rule_cnt, + &compat_rxnfc->rule_cnt, + sizeof(rxnfc->rule_cnt))) return -EFAULT; } diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c index 41248b1820c7..cc27e38392ea 100644 --- a/net/sunrpc/auth_generic.c +++ b/net/sunrpc/auth_generic.c @@ -272,13 +272,7 @@ static bool generic_key_to_expire(struct rpc_cred *cred) { struct auth_cred *acred = &container_of(cred, struct generic_cred, gc_base)->acred; - bool ret; - - get_rpccred(cred); - ret = test_bit(RPC_CRED_KEY_EXPIRE_SOON, &acred->ac_flags); - put_rpccred(cred); - - return ret; + return test_bit(RPC_CRED_KEY_EXPIRE_SOON, &acred->ac_flags); } static const struct rpc_credops generic_credops = { diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 1f0687d8e3d7..62fca77bf3c7 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -1722,6 +1722,7 @@ priv_release_snd_buf(struct rpc_rqst *rqstp) for (i=0; i < rqstp->rq_enc_pages_num; i++) __free_page(rqstp->rq_enc_pages[i]); kfree(rqstp->rq_enc_pages); + rqstp->rq_release_snd_buf = NULL; } static int @@ -1730,6 +1731,9 @@ alloc_enc_pages(struct rpc_rqst *rqstp) struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; int first, last, i; + if (rqstp->rq_release_snd_buf) + rqstp->rq_release_snd_buf(rqstp); + if (snd_buf->page_len == 0) { rqstp->rq_enc_pages_num = 0; return 0; diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index a6cbb2104667..71f15da72f02 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -945,7 +945,7 @@ static void call_xpt_users(struct svc_xprt *xprt) spin_lock(&xprt->xpt_lock); while (!list_empty(&xprt->xpt_users)) { u = list_first_entry(&xprt->xpt_users, struct svc_xpt_user, list); - list_del(&u->list); + list_del_init(&u->list); u->callback(u); } spin_unlock(&xprt->xpt_lock); diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 4439ac4c1b53..ed9bbd383f7d 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -512,7 +512,7 @@ EXPORT_SYMBOL_GPL(xdr_commit_encode); static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr, size_t nbytes) { - static __be32 *p; + __be32 *p; int space_left; int frag1bytes, frag2bytes; @@ -639,11 +639,10 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len) WARN_ON_ONCE(xdr->iov); return; } - if (fraglen) { + if (fraglen) xdr->end = head->iov_base + head->iov_len; - xdr->page_ptr--; - } /* (otherwise assume xdr->end is already set) */ + xdr->page_ptr--; head->iov_len = len; buf->len = len; xdr->p = head->iov_base + head->iov_len; diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 2e98f4a243e5..112c191b8336 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -758,8 +758,15 @@ void xprt_connect(struct rpc_task *task) return; if (xprt_test_and_set_connecting(xprt)) return; - xprt->stat.connect_start = jiffies; - xprt->ops->connect(xprt, task); + /* Race breaker */ + if (!xprt_connected(xprt)) { + xprt->stat.connect_start = jiffies; + xprt->ops->connect(xprt, task); + } else { + xprt_clear_connecting(xprt); + task->tk_status = 0; + rpc_wake_up_queued_task(&xprt->pending, task); + } } xprt_release_write(xprt, task); } diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index decd8b8d751f..e824fe1c3fe5 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1531,7 +1531,6 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) { int i; unsigned char max_level = 0; - int unix_sock_count = 0; if (too_many_unix_fds(current)) return -ETOOMANYREFS; @@ -1539,11 +1538,9 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) for (i = scm->fp->count - 1; i >= 0; i--) { struct sock *sk = unix_get_socket(scm->fp->fp[i]); - if (sk) { - unix_sock_count++; + if (sk) max_level = max(max_level, unix_sk(sk)->recursion_level); - } } if (unlikely(max_level > MAX_RECURSION_LEVEL)) return -ETOOMANYREFS; diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 60324f7c72bd..7f1d166ce612 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -430,14 +430,14 @@ static int vsock_send_shutdown(struct sock *sk, int mode) return transport->shutdown(vsock_sk(sk), mode); } -void vsock_pending_work(struct work_struct *work) +static void vsock_pending_work(struct work_struct *work) { struct sock *sk; struct sock *listener; struct vsock_sock *vsk; bool cleanup; - vsk = container_of(work, struct vsock_sock, dwork.work); + vsk = container_of(work, struct vsock_sock, pending_work.work); sk = sk_vsock(vsk); listener = vsk->listener; cleanup = true; @@ -477,7 +477,6 @@ out: sock_put(sk); sock_put(listener); } -EXPORT_SYMBOL_GPL(vsock_pending_work); /**** SOCKET OPERATIONS ****/ @@ -576,6 +575,8 @@ static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr) return retval; } +static void vsock_connect_timeout(struct work_struct *work); + struct sock *__vsock_create(struct net *net, struct socket *sock, struct sock *parent, @@ -618,6 +619,8 @@ struct sock *__vsock_create(struct net *net, vsk->sent_request = false; vsk->ignore_connecting_rst = false; vsk->peer_shutdown = 0; + INIT_DELAYED_WORK(&vsk->connect_work, vsock_connect_timeout); + INIT_DELAYED_WORK(&vsk->pending_work, vsock_pending_work); psk = parent ? vsock_sk(parent) : NULL; if (parent) { @@ -1094,7 +1097,7 @@ static void vsock_connect_timeout(struct work_struct *work) struct sock *sk; struct vsock_sock *vsk; - vsk = container_of(work, struct vsock_sock, dwork.work); + vsk = container_of(work, struct vsock_sock, connect_work.work); sk = sk_vsock(vsk); lock_sock(sk); @@ -1195,9 +1198,7 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr, * timeout fires. */ sock_hold(sk); - INIT_DELAYED_WORK(&vsk->dwork, - vsock_connect_timeout); - schedule_delayed_work(&vsk->dwork, timeout); + schedule_delayed_work(&vsk->connect_work, timeout); /* Skip ahead to preserve error code set above. */ goto out_wait; diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c index 662bdd20a748..589c8b9908a5 100644 --- a/net/vmw_vsock/vmci_transport.c +++ b/net/vmw_vsock/vmci_transport.c @@ -1099,8 +1099,7 @@ static int vmci_transport_recv_listen(struct sock *sk, vpending->listener = sk; sock_hold(sk); sock_hold(pending); - INIT_DELAYED_WORK(&vpending->dwork, vsock_pending_work); - schedule_delayed_work(&vpending->dwork, HZ); + schedule_delayed_work(&vpending->pending_work, HZ); out: return err; diff --git a/net/wireless/core.c b/net/wireless/core.c index a08fb95ddaa2..27d5fadddf97 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -1081,6 +1081,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, wdev->iftype == NL80211_IFTYPE_P2P_CLIENT || wdev->iftype == NL80211_IFTYPE_ADHOC) && !wdev->use_4addr) dev->priv_flags |= IFF_DONT_BRIDGE; + INIT_WORK(&wdev->disconnect_wk, cfg80211_autodisconnect_wk); break; case NETDEV_GOING_DOWN: cfg80211_leave(rdev, wdev); @@ -1166,6 +1167,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, #ifdef CONFIG_CFG80211_WEXT kzfree(wdev->wext.keys); #endif + flush_work(&wdev->disconnect_wk); } /* * synchronise (so that we won't find this netdev diff --git a/net/wireless/core.h b/net/wireless/core.h index 55c64d08db31..6046df0914a6 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -378,6 +378,7 @@ void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *resp_ie, size_t resp_ie_len); int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev); +void cfg80211_autodisconnect_wk(struct work_struct *work); /* SME implementation */ void cfg80211_conn_work(struct work_struct *work); diff --git a/net/wireless/db.txt b/net/wireless/db.txt index 499d98745d44..ce03af7baf68 100644 --- a/net/wireless/db.txt +++ b/net/wireless/db.txt @@ -25,21 +25,22 @@ country AE: DFS-ETSI country AF: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR (5490 - 5710 @ 160), (30), DFS country AI: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR (5490 - 5710 @ 160), (30), DFS country AL: DFS-ETSI (2402 - 2482 @ 40), (20) - (5150 - 5250 @ 80), (23), AUTO-BW - (5250 - 5350 @ 80), (23), DFS, AUTO-BW - (5470 - 5710 @ 160), (30), DFS + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) country AM: DFS-ETSI (2402 - 2482 @ 40), (20) @@ -48,9 +49,10 @@ country AM: DFS-ETSI country AN: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) country AR: (2402 - 2482 @ 40), (36) @@ -71,18 +73,10 @@ country AS: DFS-FCC country AT: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS - # 5.9ghz band - # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf - (5850 - 5870 @ 10), (30) - (5860 - 5880 @ 10), (30) - (5870 - 5890 @ 10), (30) - (5880 - 5900 @ 10), (30) - (5890 - 5910 @ 10), (30) - (5900 - 5920 @ 10), (30) - (5910 - 5930 @ 10), (30) + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) # 60 gHz band channels 1-4, ref: Etsi En 302 567 (57000 - 66000 @ 2160), (40) @@ -98,8 +92,8 @@ country AU: DFS-FCC country AW: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR (5490 - 5710 @ 160), (30), DFS country AZ: DFS-ETSI @@ -109,9 +103,10 @@ country AZ: DFS-ETSI country BA: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) country BB: DFS-FCC (2402 - 2482 @ 40), (20) @@ -125,18 +120,10 @@ country BD: country BE: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS - # 5.9ghz band - # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf - (5850 - 5870 @ 10), (30) - (5860 - 5880 @ 10), (30) - (5870 - 5890 @ 10), (30) - (5880 - 5900 @ 10), (30) - (5890 - 5910 @ 10), (30) - (5900 - 5920 @ 10), (30) - (5910 - 5930 @ 10), (30) + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) # 60 gHz band channels 1-4, ref: Etsi En 302 567 (57000 - 66000 @ 2160), (40) @@ -149,18 +136,10 @@ country BF: DFS-FCC country BG: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS - # 5.9ghz band - # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf - (5850 - 5870 @ 10), (30) - (5860 - 5880 @ 10), (30) - (5870 - 5890 @ 10), (30) - (5880 - 5900 @ 10), (30) - (5890 - 5910 @ 10), (30) - (5900 - 5920 @ 10), (30) - (5910 - 5930 @ 10), (30) + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) # 60 gHz band channels 1-4, ref: Etsi En 302 567 (57000 - 66000 @ 2160), (40) @@ -171,8 +150,8 @@ country BH: country BL: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR (5490 - 5710 @ 160), (30), DFS country BM: DFS-FCC @@ -213,14 +192,14 @@ country BS: DFS-FCC country BT: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR (5490 - 5710 @ 160), (30), DFS country BY: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR (5490 - 5710 @ 160), (30), DFS country BZ: @@ -247,18 +226,10 @@ country CF: DFS-FCC country CH: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS - # 5.9ghz band - # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf - (5850 - 5870 @ 10), (30) - (5860 - 5880 @ 10), (30) - (5870 - 5890 @ 10), (30) - (5880 - 5900 @ 10), (30) - (5890 - 5910 @ 10), (30) - (5900 - 5920 @ 10), (30) - (5910 - 5930 @ 10), (30) + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) # 60 gHz band channels 1-4, ref: Etsi En 302 567 (57000 - 66000 @ 2160), (40) @@ -271,8 +242,9 @@ country CI: DFS-FCC country CL: (2402 - 2482 @ 40), (20) - (5170 - 5330 @ 160), (20) - (5735 - 5835 @ 80), (20) + (5170 - 5330 @ 160), (24) + (5490 - 5730 @ 160), (24) + (5735 - 5835 @ 80), (30) # 60 gHz band channels 1-3 (57240 - 63720 @ 2160), (50), NO-OUTDOOR @@ -293,10 +265,10 @@ country CO: DFS-FCC country CR: DFS-FCC (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 20), (24) - (5250 - 5330 @ 20), (24), DFS - (5490 - 5730 @ 20), (24), DFS - (5735 - 5835 @ 20), (30) + (5170 - 5250 @ 80), (24), AUTO-BW + (5250 - 5330 @ 80), (24), DFS, AUTO-BW + (5490 - 5730 @ 160), (24), DFS + (5735 - 5835 @ 80), (30) # 60 gHz band channels 1-3 (57240 - 63720 @ 2160), (30) @@ -309,18 +281,10 @@ country CX: DFS-FCC country CY: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS - # 5.9ghz band - # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf - (5850 - 5870 @ 10), (30) - (5860 - 5880 @ 10), (30) - (5870 - 5890 @ 10), (30) - (5880 - 5900 @ 10), (30) - (5890 - 5910 @ 10), (30) - (5900 - 5920 @ 10), (30) - (5910 - 5930 @ 10), (30) + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) # 60 gHz band channels 1-4, ref: Etsi En 302 567 (57000 - 66000 @ 2160), (40) @@ -328,18 +292,10 @@ country CY: DFS-ETSI # and http://www.ctu.eu/164/download/VOR/VOR-12-05-2007-6-AN.pdf country CZ: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS - # 5.9ghz band - # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf - (5850 - 5870 @ 10), (30) - (5860 - 5880 @ 10), (30) - (5870 - 5890 @ 10), (30) - (5880 - 5900 @ 10), (30) - (5890 - 5910 @ 10), (30) - (5900 - 5920 @ 10), (30) - (5910 - 5930 @ 10), (30) + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) # 60 gHz band channels 1-4, ref: Etsi En 302 567 (57000 - 66000 @ 2160), (40) @@ -351,35 +307,19 @@ country CZ: DFS-ETSI country DE: DFS-ETSI # entries 279004 and 280006 (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS - # 5.9ghz band - # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf - (5850 - 5870 @ 10), (30) - (5860 - 5880 @ 10), (30) - (5870 - 5890 @ 10), (30) - (5880 - 5900 @ 10), (30) - (5890 - 5910 @ 10), (30) - (5900 - 5920 @ 10), (30) - (5910 - 5930 @ 10), (30) + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) # 60 gHz band channels 1-4, ref: Etsi En 302 567 (57000 - 66000 @ 2160), (40) country DK: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS - # 5.9ghz band - # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf - (5850 - 5870 @ 10), (30) - (5860 - 5880 @ 10), (30) - (5870 - 5890 @ 10), (30) - (5880 - 5900 @ 10), (30) - (5890 - 5910 @ 10), (30) - (5900 - 5920 @ 10), (30) - (5910 - 5930 @ 10), (30) + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) # 60 gHz band channels 1-4, ref: Etsi En 302 567 (57000 - 66000 @ 2160), (40) @@ -412,18 +352,10 @@ country EC: DFS-FCC country EE: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS - # 5.9ghz band - # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf - (5850 - 5870 @ 10), (30) - (5860 - 5880 @ 10), (30) - (5870 - 5890 @ 10), (30) - (5880 - 5900 @ 10), (30) - (5890 - 5910 @ 10), (30) - (5900 - 5920 @ 10), (30) - (5910 - 5930 @ 10), (30) + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) # 60 gHz band channels 1-4, ref: Etsi En 302 567 (57000 - 66000 @ 2160), (40) @@ -434,41 +366,25 @@ country EG: DFS-ETSI country ES: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS - # 5.9ghz band - # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf - (5850 - 5870 @ 10), (30) - (5860 - 5880 @ 10), (30) - (5870 - 5890 @ 10), (30) - (5880 - 5900 @ 10), (30) - (5890 - 5910 @ 10), (30) - (5900 - 5920 @ 10), (30) - (5910 - 5930 @ 10), (30) + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) # 60 gHz band channels 1-4, ref: Etsi En 302 567 (57000 - 66000 @ 2160), (40) country ET: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR (5490 - 5710 @ 160), (30), DFS country FI: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS - # 5.9ghz band - # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf - (5850 - 5870 @ 10), (30) - (5860 - 5880 @ 10), (30) - (5870 - 5890 @ 10), (30) - (5880 - 5900 @ 10), (30) - (5890 - 5910 @ 10), (30) - (5900 - 5920 @ 10), (30) - (5910 - 5930 @ 10), (30) + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) # 60 gHz band channels 1-4, ref: Etsi En 302 567 (57000 - 66000 @ 2160), (40) @@ -481,35 +397,19 @@ country FM: DFS-FCC country FR: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS - # 5.9ghz band - # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf - (5850 - 5870 @ 10), (30) - (5860 - 5880 @ 10), (30) - (5870 - 5890 @ 10), (30) - (5880 - 5900 @ 10), (30) - (5890 - 5910 @ 10), (30) - (5900 - 5920 @ 10), (30) - (5910 - 5930 @ 10), (30) + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) # 60 gHz band channels 1-4, ref: Etsi En 302 567 (57000 - 66000 @ 2160), (40) country GB: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS - # 5.9ghz band - # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf - (5850 - 5870 @ 10), (30) - (5860 - 5880 @ 10), (30) - (5870 - 5890 @ 10), (30) - (5880 - 5900 @ 10), (30) - (5890 - 5910 @ 10), (30) - (5900 - 5920 @ 10), (30) - (5910 - 5930 @ 10), (30) + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) # 60 gHz band channels 1-4, ref: Etsi En 302 567 (57000 - 66000 @ 2160), (40) @@ -527,9 +427,10 @@ country GE: DFS-ETSI country GF: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) country GH: DFS-FCC (2402 - 2482 @ 40), (20) @@ -540,43 +441,35 @@ country GH: DFS-FCC country GI: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR (5490 - 5710 @ 160), (30), DFS country GL: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR (5490 - 5710 @ 160), (30), DFS country GP: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR (5490 - 5710 @ 160), (30), DFS country GR: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS - # 5.9ghz band - # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf - (5850 - 5870 @ 10), (30) - (5860 - 5880 @ 10), (30) - (5870 - 5890 @ 10), (30) - (5880 - 5900 @ 10), (30) - (5890 - 5910 @ 10), (30) - (5900 - 5920 @ 10), (30) - (5910 - 5930 @ 10), (30) + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) # 60 gHz band channels 1-4, ref: Etsi En 302 567 (57000 - 66000 @ 2160), (40) country GT: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR (5490 - 5710 @ 160), (30), DFS country GU: DFS-FCC @@ -611,18 +504,10 @@ country HN: country HR: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS - # 5.9ghz band - # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf - (5850 - 5870 @ 10), (30) - (5860 - 5880 @ 10), (30) - (5870 - 5890 @ 10), (30) - (5880 - 5900 @ 10), (30) - (5890 - 5910 @ 10), (30) - (5900 - 5920 @ 10), (30) - (5910 - 5930 @ 10), (30) + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) # 60 gHz band channels 1-4, ref: Etsi En 302 567 (57000 - 66000 @ 2160), (40) @@ -635,18 +520,10 @@ country HT: DFS-FCC country HU: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS - # 5.9ghz band - # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf - (5850 - 5870 @ 10), (30) - (5860 - 5880 @ 10), (30) - (5870 - 5890 @ 10), (30) - (5880 - 5900 @ 10), (30) - (5890 - 5910 @ 10), (30) - (5900 - 5920 @ 10), (30) - (5910 - 5930 @ 10), (30) + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) # 60 gHz band channels 1-4, ref: Etsi En 302 567 (57000 - 66000 @ 2160), (40) @@ -657,18 +534,10 @@ country ID: country IE: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS - # 5.9ghz band - # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf - (5850 - 5870 @ 10), (30) - (5860 - 5880 @ 10), (30) - (5870 - 5890 @ 10), (30) - (5880 - 5900 @ 10), (30) - (5890 - 5910 @ 10), (30) - (5900 - 5920 @ 10), (30) - (5910 - 5930 @ 10), (30) + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) # 60 gHz band channels 1-4, ref: Etsi En 302 567 (57000 - 66000 @ 2160), (40) @@ -679,48 +548,34 @@ country IL: DFS-ETSI # 60 gHz band channels 1-4, base on Etsi En 302 567 (57000 - 66000 @ 2160), (40) -country IN: - (2402 - 2482 @ 40), (20) - (5170 - 5330 @ 160), (23) - (5735 - 5835 @ 80), (33) +country IN: DFS-ETSI + (2402 - 2482 @ 40), (30) + (5170 - 5250 @ 80), (30), AUTO-BW + (5250 - 5330 @ 80), (24), DFS, AUTO-BW + (5490 - 5730 @ 160), (24), DFS + (5735 - 5875 @ 80), (30) country IQ: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR (5490 - 5710 @ 160), (30), DFS country IS: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS - # 5.9ghz band - # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf - (5850 - 5870 @ 10), (30) - (5860 - 5880 @ 10), (30) - (5870 - 5890 @ 10), (30) - (5880 - 5900 @ 10), (30) - (5890 - 5910 @ 10), (30) - (5900 - 5920 @ 10), (30) - (5910 - 5930 @ 10), (30) + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) # 60 gHz band channels 1-4, ref: Etsi En 302 567 (57000 - 66000 @ 2160), (40) country IT: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS - # 5.9ghz band - # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf - (5850 - 5870 @ 10), (30) - (5860 - 5880 @ 10), (30) - (5870 - 5890 @ 10), (30) - (5880 - 5900 @ 10), (30) - (5890 - 5910 @ 10), (30) - (5900 - 5920 @ 10), (30) - (5910 - 5930 @ 10), (30) + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) # 60 gHz band channels 1-4, ref: Etsi En 302 567 (57000 - 66000 @ 2160), (40) @@ -741,10 +596,10 @@ country JO: (57000 - 66000 @ 2160), (40) country JP: DFS-JP - (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (20), AUTO-BW, NO-OUTDOOR - (5250 - 5330 @ 80), (20), DFS, AUTO-BW, NO-OUTDOOR - (5490 - 5710 @ 160), (20), DFS + (2402 - 2482 @ 40), (23) + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5710 @ 160), (23), DFS # 60 gHz band channels 1-4 (57240 - 65880 @ 2160), (40) @@ -756,8 +611,8 @@ country KE: DFS-ETSI country KH: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR (5490 - 5710 @ 160), (30), DFS country KN: DFS-FCC @@ -808,18 +663,10 @@ country LC: DFS-FCC country LI: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS - # 5.9ghz band - # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf - (5850 - 5870 @ 10), (30) - (5860 - 5880 @ 10), (30) - (5870 - 5890 @ 10), (30) - (5880 - 5900 @ 10), (30) - (5890 - 5910 @ 10), (30) - (5900 - 5920 @ 10), (30) - (5910 - 5930 @ 10), (30) + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) # 60 gHz band channels 1-4, ref: Etsi En 302 567 (57000 - 66000 @ 2160), (40) @@ -832,58 +679,34 @@ country LK: DFS-FCC country LS: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR (5490 - 5710 @ 160), (30), DFS country LT: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS - # 5.9ghz band - # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf - (5850 - 5870 @ 10), (30) - (5860 - 5880 @ 10), (30) - (5870 - 5890 @ 10), (30) - (5880 - 5900 @ 10), (30) - (5890 - 5910 @ 10), (30) - (5900 - 5920 @ 10), (30) - (5910 - 5930 @ 10), (30) + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) # 60 gHz band channels 1-4, ref: Etsi En 302 567 (57000 - 66000 @ 2160), (40) country LU: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS - # 5.9ghz band - # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf - (5850 - 5870 @ 10), (30) - (5860 - 5880 @ 10), (30) - (5870 - 5890 @ 10), (30) - (5880 - 5900 @ 10), (30) - (5890 - 5910 @ 10), (30) - (5900 - 5920 @ 10), (30) - (5910 - 5930 @ 10), (30) + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) # 60 gHz band channels 1-4, ref: Etsi En 302 567 (57000 - 66000 @ 2160), (40) country LV: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS - # 5.9ghz band - # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf - (5850 - 5870 @ 10), (30) - (5860 - 5880 @ 10), (30) - (5870 - 5890 @ 10), (30) - (5880 - 5900 @ 10), (30) - (5890 - 5910 @ 10), (30) - (5900 - 5920 @ 10), (30) - (5910 - 5930 @ 10), (30) + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) # 60 gHz band channels 1-4, ref: Etsi En 302 567 (57000 - 66000 @ 2160), (40) @@ -896,26 +719,29 @@ country MA: DFS-ETSI country MC: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) country MD: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) country ME: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) country MF: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR (5490 - 5710 @ 160), (30), DFS country MH: DFS-FCC @@ -927,9 +753,14 @@ country MH: DFS-FCC country MK: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) + +country MM: DFS-ETSI + (2402 - 2482 @ 40), (20) + (5735 - 5835 @ 80), (30) country MN: DFS-FCC (2402 - 2482 @ 40), (20) @@ -954,39 +785,32 @@ country MP: DFS-FCC country MQ: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) country MR: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR (5490 - 5710 @ 160), (30), DFS country MT: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS - # 5.9ghz band - # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf - (5850 - 5870 @ 10), (30) - (5860 - 5880 @ 10), (30) - (5870 - 5890 @ 10), (30) - (5880 - 5900 @ 10), (30) - (5890 - 5910 @ 10), (30) - (5900 - 5920 @ 10), (30) - (5910 - 5930 @ 10), (30) + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) # 60 gHz band channels 1-4, ref: Etsi En 302 567 (57000 - 66000 @ 2160), (40) -country MU: DFS-FCC +country MU: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (24), AUTO-BW - (5250 - 5330 @ 80), (24), DFS, AUTO-BW - (5490 - 5730 @ 160), (24), DFS - (5735 - 5835 @ 80), (30) + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) country MV: DFS-ETSI (2402 - 2482 @ 40), (20) @@ -996,8 +820,8 @@ country MV: DFS-ETSI country MW: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR (5490 - 5710 @ 160), (30), DFS country MX: DFS-FCC @@ -1041,35 +865,19 @@ country NI: DFS-FCC country NL: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS - # 5.9ghz band - # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf - (5850 - 5870 @ 10), (30) - (5860 - 5880 @ 10), (30) - (5870 - 5890 @ 10), (30) - (5880 - 5900 @ 10), (30) - (5890 - 5910 @ 10), (30) - (5900 - 5920 @ 10), (30) - (5910 - 5930 @ 10), (30) + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) # 60 gHz band channels 1-4, ref: Etsi En 302 567 (57000 - 66000 @ 2160), (40) country NO: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS - # 5.9ghz band - # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf - (5850 - 5870 @ 10), (30) - (5860 - 5880 @ 10), (30) - (5870 - 5890 @ 10), (30) - (5880 - 5900 @ 10), (30) - (5890 - 5910 @ 10), (30) - (5900 - 5920 @ 10), (30) - (5910 - 5930 @ 10), (30) + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) # 60 gHz band channels 1-4, ref: Etsi En 302 567 (57000 - 66000 @ 2160), (40) @@ -1089,8 +897,8 @@ country NZ: DFS-FCC country OM: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR (5490 - 5710 @ 160), (30), DFS country PA: @@ -1108,9 +916,10 @@ country PE: DFS-FCC country PF: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) country PG: DFS-FCC (2402 - 2482 @ 40), (20) @@ -1134,26 +943,19 @@ country PK: country PL: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS - # 5.9ghz band - # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf - (5850 - 5870 @ 10), (30) - (5860 - 5880 @ 10), (30) - (5870 - 5890 @ 10), (30) - (5880 - 5900 @ 10), (30) - (5890 - 5910 @ 10), (30) - (5900 - 5920 @ 10), (30) - (5910 - 5930 @ 10), (30) + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) # 60 gHz band channels 1-4, ref: Etsi En 302 567 (57000 - 66000 @ 2160), (40) country PM: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) country PR: DFS-FCC (2402 - 2472 @ 40), (30) @@ -1162,27 +964,12 @@ country PR: DFS-FCC (5490 - 5730 @ 160), (24), DFS (5735 - 5835 @ 80), (30) -# Public Safety FCCA, FCC4 -# 27dBm [4.9GHz 1/4 rate], 30dBm [1/2 rate], 33dBm [full rate], and 5GHz same as FCC1 -# db.txt cannot express the limitation on 5G so disable all 5G channels for FCC4 -country PS: DFS-FCC - (2402 - 2472 @ 40), (30) - (4940 - 4990 @ 40), (33) - country PT: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS - # 5.9ghz band - # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf - (5850 - 5870 @ 10), (30) - (5860 - 5880 @ 10), (30) - (5870 - 5890 @ 10), (30) - (5880 - 5900 @ 10), (30) - (5890 - 5910 @ 10), (30) - (5900 - 5920 @ 10), (30) - (5910 - 5930 @ 10), (30) + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) # 60 gHz band channels 1-4, ref: Etsi En 302 567 (57000 - 66000 @ 2160), (40) @@ -1208,24 +995,16 @@ country QA: country RE: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR (5490 - 5710 @ 160), (30), DFS country RO: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS - # 5.9ghz band - # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf - (5850 - 5870 @ 10), (30) - (5860 - 5880 @ 10), (30) - (5870 - 5890 @ 10), (30) - (5880 - 5900 @ 10), (30) - (5890 - 5910 @ 10), (30) - (5900 - 5920 @ 10), (30) - (5910 - 5930 @ 10), (30) + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) # 60 gHz band channels 1-4, ref: Etsi En 302 567 (57000 - 66000 @ 2160), (40) @@ -1233,9 +1012,10 @@ country RO: DFS-ETSI # http://www.ratel.rs/upload/documents/Plan_namene/Plan_namene-sl_glasnik.pdf country RS: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) # 60 gHz band channels 1-4, ref: Etsi En 302 567 (57000 - 66000 @ 2160), (40) @@ -1256,24 +1036,16 @@ country RW: DFS-FCC country SA: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS + (5170 - 5330 @ 160), (23) + (5490 - 5710 @ 160), (30) + (5735 - 5835 @ 80), (30), DFS country SE: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS - # 5.9ghz band - # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf - (5850 - 5870 @ 10), (30) - (5860 - 5880 @ 10), (30) - (5870 - 5890 @ 10), (30) - (5880 - 5900 @ 10), (30) - (5890 - 5910 @ 10), (30) - (5900 - 5920 @ 10), (30) - (5910 - 5930 @ 10), (30) + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) # 60 gHz band channels 1-4, ref: Etsi En 302 567 (57000 - 66000 @ 2160), (40) @@ -1288,35 +1060,19 @@ country SG: DFS-FCC country SI: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS - # 5.9ghz band - # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf - (5850 - 5870 @ 10), (30) - (5860 - 5880 @ 10), (30) - (5870 - 5890 @ 10), (30) - (5880 - 5900 @ 10), (30) - (5890 - 5910 @ 10), (30) - (5900 - 5920 @ 10), (30) - (5910 - 5930 @ 10), (30) + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) # 60 gHz band channels 1-4, ref: Etsi En 302 567 (57000 - 66000 @ 2160), (40) country SK: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS - # 5.9ghz band - # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf - (5850 - 5870 @ 10), (30) - (5860 - 5880 @ 10), (30) - (5870 - 5890 @ 10), (30) - (5880 - 5900 @ 10), (30) - (5890 - 5910 @ 10), (30) - (5900 - 5920 @ 10), (30) - (5910 - 5930 @ 10), (30) + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) # 60 gHz band channels 1-4, ref: Etsi En 302 567 (57000 - 66000 @ 2160), (40) @@ -1328,8 +1084,8 @@ country SN: country SR: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR (5490 - 5710 @ 160), (30), DFS country SV: DFS-FCC @@ -1347,14 +1103,14 @@ country TC: DFS-FCC country TD: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR (5490 - 5710 @ 160), (30), DFS country TG: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 40), (23) - (5250 - 5330 @ 40), (23), DFS + (5170 - 5250 @ 40), (23), NO-OUTDOOR + (5250 - 5330 @ 40), (23), DFS, NO-OUTDOOR (5490 - 5710 @ 40), (30), DFS country TH: DFS-FCC @@ -1373,9 +1129,10 @@ country TN: DFS-ETSI country TR: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) country TT: (2402 - 2482 @ 40), (20) @@ -1427,18 +1184,6 @@ country US: DFS-FCC (5250 - 5330 @ 80), (24), DFS, AUTO-BW (5490 - 5730 @ 160), (24), DFS (5735 - 5835 @ 80), (30) - # 5.9ghz band - # reference: https://apps.fcc.gov/edocs_public/attachmatch/FCC-03-324A1.pdf - (5842 - 5863 @ 5), (30) - (5850 - 5870 @ 10), (30) - (5860 - 5880 @ 10), (30) - (5865 - 5885 @ 20), (30) - (5870 - 5890 @ 10), (30) - (5880 - 5900 @ 10), (30) - (5890 - 5910 @ 10), (30) - (5895 - 5915 @ 20), (30) - (5900 - 5920 @ 10), (30) - (5910 - 5930 @ 10), (30) # 60g band # reference: http://cfr.regstoday.com/47cfr15.aspx#47_CFR_15p255 # channels 1,2,3,4,5,6 EIRP=40dBm(43dBm peak) @@ -1459,9 +1204,10 @@ country UZ: DFS-ETSI country VC: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW - (5490 - 5710 @ 160), (30), DFS + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR + (5490 - 5730 @ 160), (30), DFS + (5735 - 5875 @ 80), (14) country VE: DFS-FCC (2402 - 2482 @ 40), (30) @@ -1494,14 +1240,14 @@ country VU: DFS-FCC country WF: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR (5490 - 5710 @ 160), (30), DFS country WS: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 40), (23) - (5250 - 5330 @ 40), (23), DFS + (5170 - 5250 @ 40), (23), NO-OUTDOOR + (5250 - 5330 @ 40), (23), DFS, NO-OUTDOOR (5490 - 5710 @ 40), (30), DFS country XA: DFS-JP @@ -1516,8 +1262,8 @@ country YE: country YT: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR (5490 - 5710 @ 160), (30), DFS country ZA: DFS-FCC @@ -1531,6 +1277,6 @@ country ZA: DFS-FCC country ZW: DFS-ETSI (2402 - 2482 @ 40), (20) - (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW + (5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR + (5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR (5490 - 5710 @ 160), (30), DFS diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 465e0d31229d..22536248bf67 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -350,6 +350,11 @@ int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, !ether_addr_equal(wdev->current_bss->pub.bssid, bssid))) return 0; + if (ether_addr_equal(wdev->disconnect_bssid, bssid) || + (wdev->current_bss && + ether_addr_equal(wdev->current_bss->pub.bssid, bssid))) + wdev->conn_owner_nlportid = 0; + return rdev_deauth(rdev, dev, &req); } diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 4490dec28f50..b2d90f48227d 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -422,6 +422,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { .len = FILS_ERP_MAX_RRK_LEN }, [NL80211_ATTR_FILS_CACHE_ID] = { .len = 2 }, [NL80211_ATTR_PMK] = { .type = NLA_BINARY, .len = PMK_MAX_LEN }, + [NL80211_ATTR_EXTERNAL_AUTH_SUPPORT] = { .type = NLA_FLAG }, }; /* policy for the key attributes */ @@ -3677,8 +3678,8 @@ static bool nl80211_valid_auth_type(struct cfg80211_registered_device *rdev, return false; return true; case NL80211_CMD_CONNECT: - /* SAE not supported yet */ - if (auth_type == NL80211_AUTHTYPE_SAE) + if (!(rdev->wiphy.features & NL80211_FEATURE_SAE) && + auth_type == NL80211_AUTHTYPE_SAE) return false; /* FILS with SK PFS or PK not supported yet */ if (auth_type == NL80211_AUTHTYPE_FILS_SK_PFS || @@ -3998,6 +3999,7 @@ static int parse_station_flags(struct genl_info *info, params->sta_flags_mask = BIT(NL80211_STA_FLAG_AUTHENTICATED) | BIT(NL80211_STA_FLAG_MFP) | BIT(NL80211_STA_FLAG_AUTHORIZED); + break; default: return -EINVAL; } @@ -7686,6 +7688,9 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev, if (settings->n_ciphers_pairwise > cipher_limit) return -EINVAL; + if (len > sizeof(u32) * NL80211_MAX_NR_CIPHER_SUITES) + return -EINVAL; + memcpy(settings->ciphers_pairwise, data, len); for (i = 0; i < settings->n_ciphers_pairwise; i++) @@ -7833,8 +7838,17 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) err = nl80211_crypto_settings(rdev, info, &req.crypto, 1); if (!err) { wdev_lock(dev->ieee80211_ptr); + err = cfg80211_mlme_assoc(rdev, dev, chan, bssid, ssid, ssid_len, &req); + + if (!err && info->attrs[NL80211_ATTR_SOCKET_OWNER]) { + dev->ieee80211_ptr->conn_owner_nlportid = + info->snd_portid; + memcpy(dev->ieee80211_ptr->disconnect_bssid, + bssid, ETH_ALEN); + } + wdev_unlock(dev->ieee80211_ptr); } @@ -8578,11 +8592,31 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) return -EINVAL; } + if (nla_get_flag(info->attrs[NL80211_ATTR_EXTERNAL_AUTH_SUPPORT])) { + if (!info->attrs[NL80211_ATTR_SOCKET_OWNER]) { + kzfree(connkeys); + return -EINVAL; + } + connect.flags |= CONNECT_REQ_EXTERNAL_AUTH_SUPPORT; + } + wdev_lock(dev->ieee80211_ptr); err = cfg80211_connect(rdev, dev, &connect, connkeys, NULL); - wdev_unlock(dev->ieee80211_ptr); if (err) kzfree(connkeys); + + if (!err && info->attrs[NL80211_ATTR_SOCKET_OWNER]) { + dev->ieee80211_ptr->conn_owner_nlportid = info->snd_portid; + if (connect.bssid) + memcpy(dev->ieee80211_ptr->disconnect_bssid, + connect.bssid, ETH_ALEN); + else + memset(dev->ieee80211_ptr->disconnect_bssid, + 0, ETH_ALEN); + } + + wdev_unlock(dev->ieee80211_ptr); + return err; } @@ -10542,6 +10576,7 @@ static int nl80211_update_ft_ies(struct sk_buff *skb, struct genl_info *info) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_MDID] || + !info->attrs[NL80211_ATTR_IE] || !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) return -EINVAL; @@ -11132,6 +11167,41 @@ static int nl80211_tdls_cancel_channel_switch(struct sk_buff *skb, return 0; } +static int nl80211_external_auth(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev = info->user_ptr[0]; + struct net_device *dev = info->user_ptr[1]; + struct cfg80211_external_auth_params params; + + if (!rdev->ops->external_auth) + return -EOPNOTSUPP; + + if (!info->attrs[NL80211_ATTR_SSID]) + return -EINVAL; + + if (!info->attrs[NL80211_ATTR_BSSID]) + return -EINVAL; + + if (!info->attrs[NL80211_ATTR_STATUS_CODE]) + return -EINVAL; + + memset(¶ms, 0, sizeof(params)); + + params.ssid.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); + if (params.ssid.ssid_len == 0 || + params.ssid.ssid_len > IEEE80211_MAX_SSID_LEN) + return -EINVAL; + memcpy(params.ssid.ssid, nla_data(info->attrs[NL80211_ATTR_SSID]), + params.ssid.ssid_len); + + memcpy(params.bssid, nla_data(info->attrs[NL80211_ATTR_BSSID]), + ETH_ALEN); + + params.status = nla_get_u16(info->attrs[NL80211_ATTR_STATUS_CODE]); + + return rdev_external_auth(rdev, dev, ¶ms); +} + #define NL80211_FLAG_NEED_WIPHY 0x01 #define NL80211_FLAG_NEED_NETDEV 0x02 #define NL80211_FLAG_NEED_RTNL 0x04 @@ -11966,6 +12036,14 @@ static const struct genl_ops nl80211_ops[] = { .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, + { + .cmd = NL80211_CMD_EXTERNAL_AUTH, + .doit = nl80211_external_auth, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | + NL80211_FLAG_NEED_RTNL, + }, }; /* notification functions */ @@ -13753,6 +13831,8 @@ static int nl80211_netlink_notify(struct notifier_block * nb, if (wdev->owner_nlportid == notify->portid) schedule_destroy_work = true; + else if (wdev->conn_owner_nlportid == notify->portid) + schedule_work(&wdev->disconnect_wk); } spin_lock_bh(&rdev->beacon_registrations_lock); @@ -13917,6 +13997,47 @@ void cfg80211_ap_stopped(struct net_device *netdev, gfp_t gfp) } EXPORT_SYMBOL(cfg80211_ap_stopped); +int cfg80211_external_auth_request(struct net_device *dev, + struct cfg80211_external_auth_params *params, + gfp_t gfp) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); + struct sk_buff *msg; + void *hdr; + + if (!wdev->conn_owner_nlportid) + return -EINVAL; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); + if (!msg) + return -ENOMEM; + + hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_EXTERNAL_AUTH); + if (!hdr) + goto nla_put_failure; + + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || + nla_put_u32(msg, NL80211_ATTR_AKM_SUITES, params->key_mgmt_suite) || + nla_put_u32(msg, NL80211_ATTR_EXTERNAL_AUTH_ACTION, + params->action) || + nla_put(msg, NL80211_ATTR_BSSID, ETH_ALEN, params->bssid) || + nla_put(msg, NL80211_ATTR_SSID, params->ssid.ssid_len, + params->ssid.ssid)) + goto nla_put_failure; + + genlmsg_end(msg, hdr); + genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, + wdev->conn_owner_nlportid); + return 0; + + nla_put_failure: + nlmsg_free(msg); + return -ENOBUFS; +} +EXPORT_SYMBOL(cfg80211_external_auth_request); + /* initialisation/exit functions */ int nl80211_init(void) diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h index 6bde2241bffa..aef08a0f51f3 100644 --- a/net/wireless/rdev-ops.h +++ b/net/wireless/rdev-ops.h @@ -1040,4 +1040,18 @@ rdev_tdls_cancel_channel_switch(struct cfg80211_registered_device *rdev, trace_rdev_return_void(&rdev->wiphy); } +static inline int +rdev_external_auth(struct cfg80211_registered_device *rdev, + struct net_device *dev, + struct cfg80211_external_auth_params *params) +{ + int ret = -EOPNOTSUPP; + + trace_rdev_external_auth(&rdev->wiphy, dev, params); + if (rdev->ops->external_auth) + ret = rdev->ops->external_auth(&rdev->wiphy, dev, params); + trace_rdev_return_int(&rdev->wiphy, ret); + return ret; +} + #endif /* __CFG80211_RDEV_OPS */ diff --git a/net/wireless/reg.c b/net/wireless/reg.c index d68c7318805a..b66db3476d3b 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -1764,7 +1764,9 @@ static void wiphy_update_regulatory(struct wiphy *wiphy, * as some drivers used this to restore its orig_* reg domain. */ if (initiator == NL80211_REGDOM_SET_BY_CORE && - wiphy->regulatory_flags & REGULATORY_CUSTOM_REG) + wiphy->regulatory_flags & REGULATORY_CUSTOM_REG && + !(wiphy->regulatory_flags & + REGULATORY_WIPHY_SELF_MANAGED)) reg_call_notifier(wiphy, lr); return; } @@ -2262,26 +2264,6 @@ static void notify_self_managed_wiphys(struct regulatory_request *request) } } -static bool reg_only_self_managed_wiphys(void) -{ - struct cfg80211_registered_device *rdev; - struct wiphy *wiphy; - bool self_managed_found = false; - - ASSERT_RTNL(); - - list_for_each_entry(rdev, &cfg80211_rdev_list, list) { - wiphy = &rdev->wiphy; - if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) - self_managed_found = true; - else - return false; - } - - /* make sure at least one self-managed wiphy exists */ - return self_managed_found; -} - /* * Processes regulatory hints, this is all the NL80211_REGDOM_SET_BY_* * Regulatory hints come on a first come first serve basis and we @@ -2314,10 +2296,6 @@ static void reg_process_pending_hints(void) spin_unlock(®_requests_lock); notify_self_managed_wiphys(reg_request); - if (reg_only_self_managed_wiphys()) { - reg_free_request(reg_request); - return; - } reg_process_hint(reg_request); @@ -2429,6 +2407,7 @@ static int regulatory_hint_core(const char *alpha2) request->alpha2[0] = alpha2[0]; request->alpha2[1] = alpha2[1]; request->initiator = NL80211_REGDOM_SET_BY_CORE; + request->wiphy_idx = WIPHY_IDX_INVALID; queue_regulatory_request(request); diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 85c12c7d0ed1..5527bbb80f71 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -498,7 +498,7 @@ static int cfg80211_sme_get_conn_ies(struct wireless_dev *wdev, if (!buf) return -ENOMEM; - if (ies_len) { + if (ies_len && ies) { static const u8 before_extcapa[] = { /* not listing IEs expected to be created by driver */ WLAN_EID_RSN, @@ -757,6 +757,7 @@ void __cfg80211_connect_result(struct net_device *dev, kzfree(wdev->connect_keys); wdev->connect_keys = NULL; wdev->ssid_len = 0; + wdev->conn_owner_nlportid = 0; if (cr->bss) { cfg80211_unhold_bss(bss_from_pub(cr->bss)); cfg80211_put_bss(wdev->wiphy, cr->bss); @@ -1017,6 +1018,7 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, wdev->current_bss = NULL; wdev->ssid_len = 0; + wdev->conn_owner_nlportid = 0; nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap); @@ -1148,6 +1150,8 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev, kzfree(wdev->connect_keys); wdev->connect_keys = NULL; + wdev->conn_owner_nlportid = 0; + if (wdev->conn) err = cfg80211_sme_disconnect(wdev, reason); else if (!rdev->ops->disconnect) @@ -1157,3 +1161,32 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev, return err; } + +/* + * Used to clean up after the connection / connection attempt owner socket + * disconnects + */ +void cfg80211_autodisconnect_wk(struct work_struct *work) +{ + struct wireless_dev *wdev = + container_of(work, struct wireless_dev, disconnect_wk); + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); + + wdev_lock(wdev); + + if (wdev->conn_owner_nlportid) { + /* + * Use disconnect_bssid if still connecting and ops->disconnect + * not implemented. Otherwise we can use cfg80211_disconnect. + */ + if (rdev->ops->disconnect || wdev->current_bss) + cfg80211_disconnect(rdev, wdev->netdev, + WLAN_REASON_DEAUTH_LEAVING, true); + else + cfg80211_mlme_deauth(rdev, wdev->netdev, + wdev->disconnect_bssid, NULL, 0, + WLAN_REASON_DEAUTH_LEAVING, false); + } + + wdev_unlock(wdev); +} diff --git a/net/wireless/trace.h b/net/wireless/trace.h index b7bf5ba63555..03a76e86793a 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -2102,6 +2102,29 @@ TRACE_EVENT(rdev_tdls_cancel_channel_switch, WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(addr)) ); +TRACE_EVENT(rdev_external_auth, + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, + struct cfg80211_external_auth_params *params), + TP_ARGS(wiphy, netdev, params), + TP_STRUCT__entry(WIPHY_ENTRY + NETDEV_ENTRY + MAC_ENTRY(bssid) + __array(u8, ssid, IEEE80211_MAX_SSID_LEN + 1) + __field(u16, status) + ), + TP_fast_assign(WIPHY_ASSIGN; + NETDEV_ASSIGN; + MAC_ASSIGN(bssid, params->bssid); + memset(__entry->ssid, 0, IEEE80211_MAX_SSID_LEN + 1); + memcpy(__entry->ssid, params->ssid.ssid, + params->ssid.ssid_len); + __entry->status = params->status; + ), + TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT + ", ssid: %s, status: %u", WIPHY_PR_ARG, NETDEV_PR_ARG, + __entry->bssid, __entry->ssid, __entry->status) +); + /************************************************************* * cfg80211 exported functions traces * *************************************************************/ diff --git a/net/wireless/util.c b/net/wireless/util.c index afdbc1200a1b..4d3719d7113d 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -591,6 +591,7 @@ int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr, hdr.frame_control = fc; hdr.duration_id = 0; hdr.seq_ctrl = 0; + eth_zero_addr(hdr.addr4); skip_header_bytes = ETH_HLEN; if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) { @@ -1355,7 +1356,7 @@ bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef, u8 *op_class) { u8 vht_opclass; - u16 freq = chandef->center_freq1; + u32 freq = chandef->center_freq1; if (freq >= 2412 && freq <= 2472) { if (chandef->width > NL80211_CHAN_WIDTH_40) diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 6173a55af214..e098ca928538 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1846,7 +1846,10 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, /* Try to instantiate a bundle */ err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family); if (err <= 0) { - if (err != 0 && err != -EAGAIN) + if (err == 0) + return NULL; + + if (err != -EAGAIN) XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); return ERR_PTR(err); } @@ -2327,6 +2330,9 @@ struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE) return make_blackhole(net, dst_orig->ops->family, dst_orig); + if (IS_ERR(dst)) + dst_release(dst_orig); + return dst; } EXPORT_SYMBOL(xfrm_lookup_route); diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 9b6e51450fc5..99e013f3a88d 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1357,6 +1357,15 @@ out: if (x1->curlft.use_time) xfrm_state_check_expire(x1); + if (x->props.output_mark) { + spin_lock_bh(&net->xfrm.xfrm_state_lock); + + x1->props.output_mark = x->props.output_mark; + + __xfrm_state_bump_genids(x1); + spin_unlock_bh(&net->xfrm.xfrm_state_lock); + } + err = 0; x->km.state = XFRM_STATE_DEAD; __xfrm_state_put(x); diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 45f368b1eadf..634b4f85090a 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -151,10 +151,16 @@ static int verify_newsa_info(struct xfrm_usersa_info *p, err = -EINVAL; switch (p->family) { case AF_INET: + if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32) + goto out; + break; case AF_INET6: #if IS_ENABLED(CONFIG_IPV6) + if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128) + goto out; + break; #else err = -EAFNOSUPPORT; @@ -988,10 +994,12 @@ static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb, { struct sock *nlsk = rcu_dereference(net->xfrm.nlsk); - if (nlsk) - return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC); - else - return -1; + if (!nlsk) { + kfree_skb(skb); + return -EPIPE; + } + + return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC); } static inline size_t xfrm_spdinfo_msgsize(void) @@ -1318,10 +1326,16 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p) switch (p->sel.family) { case AF_INET: + if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32) + return -EINVAL; + break; case AF_INET6: #if IS_ENABLED(CONFIG_IPV6) + if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128) + return -EINVAL; + break; #else return -EAFNOSUPPORT; @@ -1402,6 +1416,9 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family) (ut[i].family != prev_family)) return -EINVAL; + if (ut[i].mode >= XFRM_MODE_MAX) + return -EINVAL; + prev_family = ut[i].family; switch (ut[i].family) { @@ -1632,9 +1649,11 @@ static inline size_t userpolicy_type_attrsize(void) #ifdef CONFIG_XFRM_SUB_POLICY static int copy_to_user_policy_type(u8 type, struct sk_buff *skb) { - struct xfrm_userpolicy_type upt = { - .type = type, - }; + struct xfrm_userpolicy_type upt; + + /* Sadly there are two holes in struct xfrm_userpolicy_type */ + memset(&upt, 0, sizeof(upt)); + upt.type = type; return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt); } |
