summaryrefslogtreecommitdiff
path: root/net/tipc/node.c
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2016-03-18 09:45:54 +0000
committerMark Brown <broonie@kernel.org>2016-03-18 09:45:54 +0000
commitddbcfcba5fdc56f30e4d02c3bac8cf965502cece (patch)
tree235f05605003ed8dc119ead980cf86157eb695e8 /net/tipc/node.c
parentdfabba9c3737ce7a2574dcf58940f038e536e233 (diff)
parent62e21959dc6f25c5fce0c1a0934e4a9d982bf99b (diff)
Merge tag 'v4.4.5' into linux-linaro-lsk-v4.4
This is the 4.4.5 stable release # gpg: Signature made Wed 09 Mar 2016 23:36:03 GMT using RSA key ID 6092693E # gpg: Good signature from "Greg Kroah-Hartman (Linux kernel stable release signing key) <greg@kroah.com>" # gpg: WARNING: This key is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: 647F 2865 4894 E3BD 4571 99BE 38DB BDC8 6092 693E
Diffstat (limited to 'net/tipc/node.c')
-rw-r--r--net/tipc/node.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 20cddec0a43c..3926b561f873 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -168,12 +168,6 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
skb_queue_head_init(&n_ptr->bc_entry.inputq1);
__skb_queue_head_init(&n_ptr->bc_entry.arrvq);
skb_queue_head_init(&n_ptr->bc_entry.inputq2);
- hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
- list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
- if (n_ptr->addr < temp_node->addr)
- break;
- }
- list_add_tail_rcu(&n_ptr->list, &temp_node->list);
n_ptr->state = SELF_DOWN_PEER_LEAVING;
n_ptr->signature = INVALID_NODE_SIG;
n_ptr->active_links[0] = INVALID_BEARER_ID;
@@ -193,6 +187,12 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
tipc_node_get(n_ptr);
setup_timer(&n_ptr->timer, tipc_node_timeout, (unsigned long)n_ptr);
n_ptr->keepalive_intv = U32_MAX;
+ hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
+ list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
+ if (n_ptr->addr < temp_node->addr)
+ break;
+ }
+ list_add_tail_rcu(&n_ptr->list, &temp_node->list);
exit:
spin_unlock_bh(&tn->node_list_lock);
return n_ptr;