aboutsummaryrefslogtreecommitdiff
path: root/lib/genalloc.c
diff options
context:
space:
mode:
authorAlex Shi <alex.shi@linaro.org>2018-01-18 15:26:28 +0800
committerAlex Shi <alex.shi@linaro.org>2018-01-18 15:26:28 +0800
commit2ba51c6d36ee1b43c1f898c6c48ddf3d33e9ab30 (patch)
treeff3302dc45a91d7bfcaabd37bfa8fa391a954180 /lib/genalloc.c
parent28610abf4a574c33ca70e3d7b0e523fdede488d1 (diff)
parent90816cc1d4a1d23efe37b74866c6174dd5eab6b5 (diff)
Merge remote-tracking branch 'rt-stable/v4.9-rt' into linux-linaro-lsk-v4.9-rtlsk-v4.9-18.02-rt
Conflicts: arch/arm64/mm/init.c
Diffstat (limited to 'lib/genalloc.c')
-rw-r--r--lib/genalloc.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 144fe6b1a03e..ca06adc4f445 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -194,7 +194,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy
chunk->phys_addr = phys;
chunk->start_addr = virt;
chunk->end_addr = virt + size - 1;
- atomic_set(&chunk->avail, size);
+ atomic_long_set(&chunk->avail, size);
spin_lock(&pool->lock);
list_add_rcu(&chunk->next_chunk, &pool->chunks);
@@ -304,7 +304,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
nbits = (size + (1UL << order) - 1) >> order;
rcu_read_lock();
list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
- if (size > atomic_read(&chunk->avail))
+ if (size > atomic_long_read(&chunk->avail))
continue;
start_bit = 0;
@@ -324,7 +324,7 @@ retry:
addr = chunk->start_addr + ((unsigned long)start_bit << order);
size = nbits << order;
- atomic_sub(size, &chunk->avail);
+ atomic_long_sub(size, &chunk->avail);
break;
}
rcu_read_unlock();
@@ -390,7 +390,7 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
BUG_ON(remain);
size = nbits << order;
- atomic_add(size, &chunk->avail);
+ atomic_long_add(size, &chunk->avail);
rcu_read_unlock();
return;
}
@@ -464,7 +464,7 @@ size_t gen_pool_avail(struct gen_pool *pool)
rcu_read_lock();
list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
- avail += atomic_read(&chunk->avail);
+ avail += atomic_long_read(&chunk->avail);
rcu_read_unlock();
return avail;
}