aboutsummaryrefslogtreecommitdiff
path: root/net/sunrpc/cache.c
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2009-09-09 16:32:54 +1000
committerJ. Bruce Fields <bfields@citi.umich.edu>2009-09-18 11:47:49 -0400
commit67e7328f1577230ef3a1430c1a7e5c07978c6e51 (patch)
tree6b160d46c8bfbc5b04b4d532f0ad99067feab49e /net/sunrpc/cache.c
parentc0826574ddc0df486ecfc2d655e08904c6513209 (diff)
sunrpc/cache: use list_del_init for the list_head entries in cache_deferred_req
Using list_del_init is generally safer than list_del, and it will allow us, in a subsequent patch, to see if an entry has already been processed or not. Signed-off-by: NeilBrown <neilb@suse.de> Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
Diffstat (limited to 'net/sunrpc/cache.c')
-rw-r--r--net/sunrpc/cache.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index f2895d0a5f8..4a32a30a03e 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -529,8 +529,8 @@ static int cache_defer_req(struct cache_req *req, struct cache_head *item)
if (++cache_defer_cnt > DFR_MAX) {
dreq = list_entry(cache_defer_list.prev,
struct cache_deferred_req, recent);
- list_del(&dreq->recent);
- list_del(&dreq->hash);
+ list_del_init(&dreq->recent);
+ list_del_init(&dreq->hash);
cache_defer_cnt--;
}
spin_unlock(&cache_defer_lock);
@@ -564,7 +564,7 @@ static void cache_revisit_request(struct cache_head *item)
dreq = list_entry(lp, struct cache_deferred_req, hash);
lp = lp->next;
if (dreq->item == item) {
- list_del(&dreq->hash);
+ list_del_init(&dreq->hash);
list_move(&dreq->recent, &pending);
cache_defer_cnt--;
}
@@ -590,7 +590,7 @@ void cache_clean_deferred(void *owner)
list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
if (dreq->owner == owner) {
- list_del(&dreq->hash);
+ list_del_init(&dreq->hash);
list_move(&dreq->recent, &pending);
cache_defer_cnt--;
}