aboutsummaryrefslogtreecommitdiff
path: root/net/ax25/af_ax25.c
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2006-07-12 13:25:23 -0700
committerDavid S. Miller <davem@sunset.davemloft.net>2006-07-12 13:58:57 -0700
commitc19c4b9c9acb4ab6f5477ae9ca2c0a8619f19c7a (patch)
tree3aaf3c6e4bc9dd797af434b6767c3da5732a6ba1 /net/ax25/af_ax25.c
parentda952315c9c625bd513c6162613fd3fd01d91aae (diff)
[AX.25]: Optimize AX.25 socket list lock
Right now all uses of the ax25_list_lock lock are _bh locks but knowing some code is only ever getting invoked from _bh context we can better. Signed-off-by: Ralf Baechle <ralf@linux-mips.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ax25/af_ax25.c')
-rw-r--r--net/ax25/af_ax25.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index f12be2acf9b..000695c4858 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -145,7 +145,7 @@ struct sock *ax25_find_listener(ax25_address *addr, int digi,
ax25_cb *s;
struct hlist_node *node;
- spin_lock_bh(&ax25_list_lock);
+ spin_lock(&ax25_list_lock);
ax25_for_each(s, node, &ax25_list) {
if ((s->iamdigi && !digi) || (!s->iamdigi && digi))
continue;
@@ -154,12 +154,12 @@ struct sock *ax25_find_listener(ax25_address *addr, int digi,
/* If device is null we match any device */
if (s->ax25_dev == NULL || s->ax25_dev->dev == dev) {
sock_hold(s->sk);
- spin_unlock_bh(&ax25_list_lock);
+ spin_unlock(&ax25_list_lock);
return s->sk;
}
}
}
- spin_unlock_bh(&ax25_list_lock);
+ spin_unlock(&ax25_list_lock);
return NULL;
}
@@ -174,7 +174,7 @@ struct sock *ax25_get_socket(ax25_address *my_addr, ax25_address *dest_addr,
ax25_cb *s;
struct hlist_node *node;
- spin_lock_bh(&ax25_list_lock);
+ spin_lock(&ax25_list_lock);
ax25_for_each(s, node, &ax25_list) {
if (s->sk && !ax25cmp(&s->source_addr, my_addr) &&
!ax25cmp(&s->dest_addr, dest_addr) &&
@@ -185,7 +185,7 @@ struct sock *ax25_get_socket(ax25_address *my_addr, ax25_address *dest_addr,
}
}
- spin_unlock_bh(&ax25_list_lock);
+ spin_unlock(&ax25_list_lock);
return sk;
}
@@ -235,7 +235,7 @@ void ax25_send_to_raw(ax25_address *addr, struct sk_buff *skb, int proto)
struct sk_buff *copy;
struct hlist_node *node;
- spin_lock_bh(&ax25_list_lock);
+ spin_lock(&ax25_list_lock);
ax25_for_each(s, node, &ax25_list) {
if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 &&
s->sk->sk_type == SOCK_RAW &&
@@ -248,7 +248,7 @@ void ax25_send_to_raw(ax25_address *addr, struct sk_buff *skb, int proto)
kfree_skb(copy);
}
}
- spin_unlock_bh(&ax25_list_lock);
+ spin_unlock(&ax25_list_lock);
}
/*