aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Makefile2
-rw-r--r--lib/debugobjects.c21
-rw-r--r--lib/devres.c2
-rw-r--r--lib/idr.c446
-rw-r--r--lib/kfifo.c607
-rw-r--r--lib/lru_cache.c3
-rw-r--r--lib/scatterlist.c86
7 files changed, 981 insertions, 186 deletions
diff --git a/lib/Makefile b/lib/Makefile
index 02ed6c04cd7..d7946ff75b2 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -23,7 +23,7 @@ lib-y += kobject.o klist.o
obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \
- bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o
+ bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o
obj-y += kstrtox.o
obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index d11808ca4bc..37061ede8b8 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -109,11 +109,10 @@ static void fill_pool(void)
*/
static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
{
- struct hlist_node *node;
struct debug_obj *obj;
int cnt = 0;
- hlist_for_each_entry(obj, node, &b->list, node) {
+ hlist_for_each_entry(obj, &b->list, node) {
cnt++;
if (obj->object == addr)
return obj;
@@ -213,7 +212,7 @@ static void free_object(struct debug_obj *obj)
static void debug_objects_oom(void)
{
struct debug_bucket *db = obj_hash;
- struct hlist_node *node, *tmp;
+ struct hlist_node *tmp;
HLIST_HEAD(freelist);
struct debug_obj *obj;
unsigned long flags;
@@ -227,7 +226,7 @@ static void debug_objects_oom(void)
raw_spin_unlock_irqrestore(&db->lock, flags);
/* Now free them */
- hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
+ hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
hlist_del(&obj->node);
free_object(obj);
}
@@ -658,7 +657,7 @@ debug_object_active_state(void *addr, struct debug_obj_descr *descr,
static void __debug_check_no_obj_freed(const void *address, unsigned long size)
{
unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
- struct hlist_node *node, *tmp;
+ struct hlist_node *tmp;
HLIST_HEAD(freelist);
struct debug_obj_descr *descr;
enum debug_obj_state state;
@@ -678,7 +677,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size)
repeat:
cnt = 0;
raw_spin_lock_irqsave(&db->lock, flags);
- hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
+ hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
cnt++;
oaddr = (unsigned long) obj->object;
if (oaddr < saddr || oaddr >= eaddr)
@@ -702,7 +701,7 @@ repeat:
raw_spin_unlock_irqrestore(&db->lock, flags);
/* Now free them */
- hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
+ hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
hlist_del(&obj->node);
free_object(obj);
}
@@ -1013,7 +1012,7 @@ void __init debug_objects_early_init(void)
static int __init debug_objects_replace_static_objects(void)
{
struct debug_bucket *db = obj_hash;
- struct hlist_node *node, *tmp;
+ struct hlist_node *tmp;
struct debug_obj *obj, *new;
HLIST_HEAD(objects);
int i, cnt = 0;
@@ -1033,7 +1032,7 @@ static int __init debug_objects_replace_static_objects(void)
local_irq_disable();
/* Remove the statically allocated objects from the pool */
- hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node)
+ hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
hlist_del(&obj->node);
/* Move the allocated objects to the pool */
hlist_move_list(&objects, &obj_pool);
@@ -1042,7 +1041,7 @@ static int __init debug_objects_replace_static_objects(void)
for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
hlist_move_list(&db->list, &objects);
- hlist_for_each_entry(obj, node, &objects, node) {
+ hlist_for_each_entry(obj, &objects, node) {
new = hlist_entry(obj_pool.first, typeof(*obj), node);
hlist_del(&new->node);
/* copy object data */
@@ -1057,7 +1056,7 @@ static int __init debug_objects_replace_static_objects(void)
obj_pool_used);
return 0;
free:
- hlist_for_each_entry_safe(obj, node, tmp, &objects, node) {
+ hlist_for_each_entry_safe(obj, tmp, &objects, node) {
hlist_del(&obj->node);
kmem_cache_free(obj_cache, obj);
}
diff --git a/lib/devres.c b/lib/devres.c
index 88ad75952a7..823533138fa 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -227,6 +227,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
devm_ioport_map_match, (void *)addr));
}
EXPORT_SYMBOL(devm_ioport_unmap);
+#endif /* CONFIG_HAS_IOPORT */
#ifdef CONFIG_PCI
/*
@@ -432,4 +433,3 @@ void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
}
EXPORT_SYMBOL(pcim_iounmap_regions);
#endif /* CONFIG_PCI */
-#endif /* CONFIG_HAS_IOPORT */
diff --git a/lib/idr.c b/lib/idr.c
index 648239079dd..73f4d53c02f 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -35,10 +35,41 @@
#include <linux/string.h>
#include <linux/idr.h>
#include <linux/spinlock.h>
+#include <linux/percpu.h>
+#include <linux/hardirq.h>
+
+#define MAX_IDR_SHIFT (sizeof(int) * 8 - 1)
+#define MAX_IDR_BIT (1U << MAX_IDR_SHIFT)
+
+/* Leave the possibility of an incomplete final layer */
+#define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
+
+/* Number of id_layer structs to leave in free list */
+#define MAX_IDR_FREE (MAX_IDR_LEVEL * 2)
static struct kmem_cache *idr_layer_cache;
+static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head);
+static DEFINE_PER_CPU(int, idr_preload_cnt);
static DEFINE_SPINLOCK(simple_ida_lock);
+/* the maximum ID which can be allocated given idr->layers */
+static int idr_max(int layers)
+{
+ int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT);
+
+ return (1 << bits) - 1;
+}
+
+/*
+ * Prefix mask for an idr_layer at @layer. For layer 0, the prefix mask is
+ * all bits except for the lower IDR_BITS. For layer 1, 2 * IDR_BITS, and
+ * so on.
+ */
+static int idr_layer_prefix_mask(int layer)
+{
+ return ~idr_max(layer + 1);
+}
+
static struct idr_layer *get_from_free_list(struct idr *idp)
{
struct idr_layer *p;
@@ -54,6 +85,50 @@ static struct idr_layer *get_from_free_list(struct idr *idp)
return(p);
}
+/**
+ * idr_layer_alloc - allocate a new idr_layer
+ * @gfp_mask: allocation mask
+ * @layer_idr: optional idr to allocate from
+ *
+ * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch
+ * one from the per-cpu preload buffer. If @layer_idr is not %NULL, fetch
+ * an idr_layer from @idr->id_free.
+ *
+ * @layer_idr is to maintain backward compatibility with the old alloc
+ * interface - idr_pre_get() and idr_get_new*() - and will be removed
+ * together with per-pool preload buffer.
+ */
+static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
+{
+ struct idr_layer *new;
+
+ /* this is the old path, bypass to get_from_free_list() */
+ if (layer_idr)
+ return get_from_free_list(layer_idr);
+
+ /* try to allocate directly from kmem_cache */
+ new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
+ if (new)
+ return new;
+
+ /*
+ * Try to fetch one from the per-cpu preload buffer if in process
+ * context. See idr_preload() for details.
+ */
+ if (in_interrupt())
+ return NULL;
+
+ preempt_disable();
+ new = __this_cpu_read(idr_preload_head);
+ if (new) {
+ __this_cpu_write(idr_preload_head, new->ary[0]);
+ __this_cpu_dec(idr_preload_cnt);
+ new->ary[0] = NULL;
+ }
+ preempt_enable();
+ return new;
+}
+
static void idr_layer_rcu_free(struct rcu_head *head)
{
struct idr_layer *layer;
@@ -62,8 +137,10 @@ static void idr_layer_rcu_free(struct rcu_head *head)
kmem_cache_free(idr_layer_cache, layer);
}
-static inline void free_layer(struct idr_layer *p)
+static inline void free_layer(struct idr *idr, struct idr_layer *p)
{
+ if (idr->hint && idr->hint == p)
+ RCU_INIT_POINTER(idr->hint, NULL);
call_rcu(&p->rcu_head, idr_layer_rcu_free);
}
@@ -92,18 +169,18 @@ static void idr_mark_full(struct idr_layer **pa, int id)
struct idr_layer *p = pa[0];
int l = 0;
- __set_bit(id & IDR_MASK, &p->bitmap);
+ __set_bit(id & IDR_MASK, p->bitmap);
/*
* If this layer is full mark the bit in the layer above to
* show that this part of the radix tree is full. This may
* complete the layer above and require walking up the radix
* tree.
*/
- while (p->bitmap == IDR_FULL) {
+ while (bitmap_full(p->bitmap, IDR_SIZE)) {
if (!(p = pa[++l]))
break;
id = id >> IDR_BITS;
- __set_bit((id & IDR_MASK), &p->bitmap);
+ __set_bit((id & IDR_MASK), p->bitmap);
}
}
@@ -133,12 +210,29 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
}
EXPORT_SYMBOL(idr_pre_get);
-static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
+/**
+ * sub_alloc - try to allocate an id without growing the tree depth
+ * @idp: idr handle
+ * @starting_id: id to start search at
+ * @id: pointer to the allocated handle
+ * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer
+ * @gfp_mask: allocation mask for idr_layer_alloc()
+ * @layer_idr: optional idr passed to idr_layer_alloc()
+ *
+ * Allocate an id in range [@starting_id, INT_MAX] from @idp without
+ * growing its depth. Returns
+ *
+ * the allocated id >= 0 if successful,
+ * -EAGAIN if the tree needs to grow for allocation to succeed,
+ * -ENOSPC if the id space is exhausted,
+ * -ENOMEM if more idr_layers need to be allocated.
+ */
+static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
+ gfp_t gfp_mask, struct idr *layer_idr)
{
int n, m, sh;
struct idr_layer *p, *new;
int l, id, oid;
- unsigned long bm;
id = *starting_id;
restart:
@@ -150,8 +244,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
* We run around this while until we reach the leaf node...
*/
n = (id >> (IDR_BITS*l)) & IDR_MASK;
- bm = ~p->bitmap;
- m = find_next_bit(&bm, IDR_SIZE, n);
+ m = find_next_zero_bit(p->bitmap, IDR_SIZE, n);
if (m == IDR_SIZE) {
/* no space available go back to previous layer. */
l++;
@@ -161,7 +254,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
/* if already at the top layer, we need to grow */
if (id >= 1 << (idp->layers * IDR_BITS)) {
*starting_id = id;
- return IDR_NEED_TO_GROW;
+ return -EAGAIN;
}
p = pa[l];
BUG_ON(!p);
@@ -180,17 +273,18 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
id = ((id >> sh) ^ n ^ m) << sh;
}
if ((id >= MAX_IDR_BIT) || (id < 0))
- return IDR_NOMORE_SPACE;
+ return -ENOSPC;
if (l == 0)
break;
/*
* Create the layer below if it is missing.
*/
if (!p->ary[m]) {
- new = get_from_free_list(idp);
+ new = idr_layer_alloc(gfp_mask, layer_idr);
if (!new)
- return -1;
+ return -ENOMEM;
new->layer = l-1;
+ new->prefix = id & idr_layer_prefix_mask(new->layer);
rcu_assign_pointer(p->ary[m], new);
p->count++;
}
@@ -203,7 +297,8 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
}
static int idr_get_empty_slot(struct idr *idp, int starting_id,
- struct idr_layer **pa)
+ struct idr_layer **pa, gfp_t gfp_mask,
+ struct idr *layer_idr)
{
struct idr_layer *p, *new;
int layers, v, id;
@@ -214,8 +309,8 @@ build_up:
p = idp->top;
layers = idp->layers;
if (unlikely(!p)) {
- if (!(p = get_from_free_list(idp)))
- return -1;
+ if (!(p = idr_layer_alloc(gfp_mask, layer_idr)))
+ return -ENOMEM;
p->layer = 0;
layers = 1;
}
@@ -223,7 +318,7 @@ build_up:
* Add a new layer to the top of the tree if the requested
* id is larger than the currently allocated space.
*/
- while ((layers < (MAX_IDR_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
+ while (id > idr_max(layers)) {
layers++;
if (!p->count) {
/* special case: if the tree is currently empty,
@@ -231,9 +326,10 @@ build_up:
* upwards.
*/
p->layer++;
+ WARN_ON_ONCE(p->prefix);
continue;
}
- if (!(new = get_from_free_list(idp))) {
+ if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) {
/*
* The allocation failed. If we built part of
* the structure tear it down.
@@ -242,45 +338,42 @@ build_up:
for (new = p; p && p != idp->top; new = p) {
p = p->ary[0];
new->ary[0] = NULL;
- new->bitmap = new->count = 0;
+ new->count = 0;
+ bitmap_clear(new->bitmap, 0, IDR_SIZE);
__move_to_free_list(idp, new);
}
spin_unlock_irqrestore(&idp->lock, flags);
- return -1;
+ return -ENOMEM;
}
new->ary[0] = p;
new->count = 1;
new->layer = layers-1;
- if (p->bitmap == IDR_FULL)
- __set_bit(0, &new->bitmap);
+ new->prefix = id & idr_layer_prefix_mask(new->layer);
+ if (bitmap_full(p->bitmap, IDR_SIZE))
+ __set_bit(0, new->bitmap);
p = new;
}
rcu_assign_pointer(idp->top, p);
idp->layers = layers;
- v = sub_alloc(idp, &id, pa);
- if (v == IDR_NEED_TO_GROW)
+ v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr);
+ if (v == -EAGAIN)
goto build_up;
return(v);
}
-static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
+/*
+ * @id and @pa are from a successful allocation from idr_get_empty_slot().
+ * Install the user pointer @ptr and mark the slot full.
+ */
+static void idr_fill_slot(struct idr *idr, void *ptr, int id,
+ struct idr_layer **pa)
{
- struct idr_layer *pa[MAX_IDR_LEVEL];
- int id;
-
- id = idr_get_empty_slot(idp, starting_id, pa);
- if (id >= 0) {
- /*
- * Successfully found an empty slot. Install the user
- * pointer and mark the slot full.
- */
- rcu_assign_pointer(pa[0]->ary[id & IDR_MASK],
- (struct idr_layer *)ptr);
- pa[0]->count++;
- idr_mark_full(pa, id);
- }
+ /* update hint used for lookup, cleared from free_layer() */
+ rcu_assign_pointer(idr->hint, pa[0]);
- return id;
+ rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr);
+ pa[0]->count++;
+ idr_mark_full(pa, id);
}
/**
@@ -303,49 +396,124 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
*/
int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
{
+ struct idr_layer *pa[MAX_IDR_LEVEL + 1];
int rv;
- rv = idr_get_new_above_int(idp, ptr, starting_id);
- /*
- * This is a cheap hack until the IDR code can be fixed to
- * return proper error values.
- */
+ rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp);
if (rv < 0)
- return _idr_rc_to_errno(rv);
+ return rv == -ENOMEM ? -EAGAIN : rv;
+
+ idr_fill_slot(idp, ptr, rv, pa);
*id = rv;
return 0;
}
EXPORT_SYMBOL(idr_get_new_above);
/**
- * idr_get_new - allocate new idr entry
- * @idp: idr handle
- * @ptr: pointer you want associated with the id
- * @id: pointer to the allocated handle
+ * idr_preload - preload for idr_alloc()
+ * @gfp_mask: allocation mask to use for preloading
*
- * If allocation from IDR's private freelist fails, idr_get_new_above() will
- * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill
- * IDR's preallocation and then retry the idr_get_new_above() call.
+ * Preload per-cpu layer buffer for idr_alloc(). Can only be used from
+ * process context and each idr_preload() invocation should be matched with
+ * idr_preload_end(). Note that preemption is disabled while preloaded.
*
- * If the idr is full idr_get_new_above() will return %-ENOSPC.
+ * The first idr_alloc() in the preloaded section can be treated as if it
+ * were invoked with @gfp_mask used for preloading. This allows using more
+ * permissive allocation masks for idrs protected by spinlocks.
+ *
+ * For example, if idr_alloc() below fails, the failure can be treated as
+ * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT.
+ *
+ * idr_preload(GFP_KERNEL);
+ * spin_lock(lock);
+ *
+ * id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT);
*
- * @id returns a value in the range %0 ... %0x7fffffff
+ * spin_unlock(lock);
+ * idr_preload_end();
+ * if (id < 0)
+ * error;
*/
-int idr_get_new(struct idr *idp, void *ptr, int *id)
+void idr_preload(gfp_t gfp_mask)
{
- int rv;
+ /*
+ * Consuming preload buffer from non-process context breaks preload
+ * allocation guarantee. Disallow usage from those contexts.
+ */
+ WARN_ON_ONCE(in_interrupt());
+ might_sleep_if(gfp_mask & __GFP_WAIT);
+
+ preempt_disable();
- rv = idr_get_new_above_int(idp, ptr, 0);
/*
- * This is a cheap hack until the IDR code can be fixed to
- * return proper error values.
+ * idr_alloc() is likely to succeed w/o full idr_layer buffer and
+ * return value from idr_alloc() needs to be checked for failure
+ * anyway. Silently give up if allocation fails. The caller can
+ * treat failures from idr_alloc() as if idr_alloc() were called
+ * with @gfp_mask which should be enough.
*/
- if (rv < 0)
- return _idr_rc_to_errno(rv);
- *id = rv;
- return 0;
+ while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) {
+ struct idr_layer *new;
+
+ preempt_enable();
+ new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
+ preempt_disable();
+ if (!new)
+ break;
+
+ /* link the new one to per-cpu preload list */
+ new->ary[0] = __this_cpu_read(idr_preload_head);
+ __this_cpu_write(idr_preload_head, new);
+ __this_cpu_inc(idr_preload_cnt);
+ }
}
-EXPORT_SYMBOL(idr_get_new);
+EXPORT_SYMBOL(idr_preload);
+
+/**
+ * idr_alloc - allocate new idr entry
+ * @idr: the (initialized) idr
+ * @ptr: pointer to be associated with the new id
+ * @start: the minimum id (inclusive)
+ * @end: the maximum id (exclusive, <= 0 for max)
+ * @gfp_mask: memory allocation flags
+ *
+ * Allocate an id in [start, end) and associate it with @ptr. If no ID is
+ * available in the specified range, returns -ENOSPC. On memory allocation
+ * failure, returns -ENOMEM.
+ *
+ * Note that @end is treated as max when <= 0. This is to always allow
+ * using @start + N as @end as long as N is inside integer range.
+ *
+ * The user is responsible for exclusively synchronizing all operations
+ * which may modify @idr. However, read-only accesses such as idr_find()
+ * or iteration can be performed under RCU read lock provided the user
+ * destroys @ptr in RCU-safe way after removal from idr.
+ */
+int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
+{
+ int max = end > 0 ? end - 1 : INT_MAX; /* inclusive upper limit */
+ struct idr_layer *pa[MAX_IDR_LEVEL + 1];
+ int id;
+
+ might_sleep_if(gfp_mask & __GFP_WAIT);
+
+ /* sanity checks */
+ if (WARN_ON_ONCE(start < 0))
+ return -EINVAL;
+ if (unlikely(max < start))
+ return -ENOSPC;
+
+ /* allocate id */
+ id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL);
+ if (unlikely(id < 0))
+ return id;
+ if (unlikely(id > max))
+ return -ENOSPC;
+
+ idr_fill_slot(idr, ptr, id, pa);
+ return id;
+}
+EXPORT_SYMBOL_GPL(idr_alloc);
static void idr_remove_warning(int id)
{
@@ -357,7 +525,7 @@ static void idr_remove_warning(int id)
static void sub_remove(struct idr *idp, int shift, int id)
{
struct idr_layer *p = idp->top;
- struct idr_layer **pa[MAX_IDR_LEVEL];
+ struct idr_layer **pa[MAX_IDR_LEVEL + 1];
struct idr_layer ***paa = &pa[0];
struct idr_layer *to_free;
int n;
@@ -367,26 +535,26 @@ static void sub_remove(struct idr *idp, int shift, int id)
while ((shift > 0) && p) {
n = (id >> shift) & IDR_MASK;
- __clear_bit(n, &p->bitmap);
+ __clear_bit(n, p->bitmap);
*++paa = &p->ary[n];
p = p->ary[n];
shift -= IDR_BITS;
}
n = id & IDR_MASK;
- if (likely(p != NULL && test_bit(n, &p->bitmap))){
- __clear_bit(n, &p->bitmap);
+ if (likely(p != NULL && test_bit(n, p->bitmap))) {
+ __clear_bit(n, p->bitmap);
rcu_assign_pointer(p->ary[n], NULL);
to_free = NULL;
while(*paa && ! --((**paa)->count)){
if (to_free)
- free_layer(to_free);
+ free_layer(idp, to_free);
to_free = **paa;
**paa-- = NULL;
}
if (!*paa)
idp->layers = 0;
if (to_free)
- free_layer(to_free);
+ free_layer(idp, to_free);
} else
idr_remove_warning(id);
}
@@ -401,8 +569,9 @@ void idr_remove(struct idr *idp, int id)
struct idr_layer *p;
struct idr_layer *to_free;
- /* Mask off upper bits we don't use for the search. */
- id &= MAX_IDR_MASK;
+ /* see comment in idr_find_slowpath() */
+ if (WARN_ON_ONCE(id < 0))
+ return;
sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
@@ -417,8 +586,9 @@ void idr_remove(struct idr *idp, int id)
p = idp->top->ary[0];
rcu_assign_pointer(idp->top, p);
--idp->layers;
- to_free->bitmap = to_free->count = 0;
- free_layer(to_free);
+ to_free->count = 0;
+ bitmap_clear(to_free->bitmap, 0, IDR_SIZE);
+ free_layer(idp, to_free);
}
while (idp->id_free_cnt >= MAX_IDR_FREE) {
p = get_from_free_list(idp);
@@ -433,34 +603,21 @@ void idr_remove(struct idr *idp, int id)
}
EXPORT_SYMBOL(idr_remove);
-/**
- * idr_remove_all - remove all ids from the given idr tree
- * @idp: idr handle
- *
- * idr_destroy() only frees up unused, cached idp_layers, but this
- * function will remove all id mappings and leave all idp_layers
- * unused.
- *
- * A typical clean-up sequence for objects stored in an idr tree will
- * use idr_for_each() to free all objects, if necessay, then
- * idr_remove_all() to remove all ids, and idr_destroy() to free
- * up the cached idr_layers.
- */
-void idr_remove_all(struct idr *idp)
+void __idr_remove_all(struct idr *idp)
{
int n, id, max;
int bt_mask;
struct idr_layer *p;
- struct idr_layer *pa[MAX_IDR_LEVEL];
+ struct idr_layer *pa[MAX_IDR_LEVEL + 1];
struct idr_layer **paa = &pa[0];
n = idp->layers * IDR_BITS;
p = idp->top;
rcu_assign_pointer(idp->top, NULL);
- max = 1 << n;
+ max = idr_max(idp->layers);
id = 0;
- while (id < max) {
+ while (id >= 0 && id <= max) {
while (n > IDR_BITS && p) {
n -= IDR_BITS;
*paa++ = p;
@@ -472,21 +629,32 @@ void idr_remove_all(struct idr *idp)
/* Get the highest bit that the above add changed from 0->1. */
while (n < fls(id ^ bt_mask)) {
if (p)
- free_layer(p);
+ free_layer(idp, p);
n += IDR_BITS;
p = *--paa;
}
}
idp->layers = 0;
}
-EXPORT_SYMBOL(idr_remove_all);
+EXPORT_SYMBOL(__idr_remove_all);
/**
* idr_destroy - release all cached layers within an idr tree
* @idp: idr handle
+ *
+ * Free all id mappings and all idp_layers. After this function, @idp is
+ * completely unused and can be freed / recycled. The caller is
+ * responsible for ensuring that no one else accesses @idp during or after
+ * idr_destroy().
+ *
+ * A typical clean-up sequence for objects stored in an idr tree will use
+ * idr_for_each() to free all objects, if necessay, then idr_destroy() to
+ * free up the id mappings and cached idr_layers.
*/
void idr_destroy(struct idr *idp)
{
+ __idr_remove_all(idp);
+
while (idp->id_free_cnt) {
struct idr_layer *p = get_from_free_list(idp);
kmem_cache_free(idr_layer_cache, p);
@@ -494,32 +662,28 @@ void idr_destroy(struct idr *idp)
}
EXPORT_SYMBOL(idr_destroy);
-/**
- * idr_find - return pointer for given id
- * @idp: idr handle
- * @id: lookup key
- *
- * Return the pointer given the id it has been registered with. A %NULL
- * return indicates that @id is not valid or you passed %NULL in
- * idr_get_new().
- *
- * This function can be called under rcu_read_lock(), given that the leaf
- * pointers lifetimes are correctly managed.
- */
-void *idr_find(struct idr *idp, int id)
+void *idr_find_slowpath(struct idr *idp, int id)
{
int n;
struct idr_layer *p;
+ /*
+ * If @id is negative, idr_find() used to ignore the sign bit and
+ * performed lookup with the rest of bits, which is weird and can
+ * lead to very obscure bugs. We're now returning NULL for all
+ * negative IDs but just in case somebody was depending on the sign
+ * bit being ignored, let's trigger WARN_ON_ONCE() so that they can
+ * be detected and fixed. WARN_ON_ONCE() can later be removed.
+ */
+ if (WARN_ON_ONCE(id < 0))
+ return NULL;
+
p = rcu_dereference_raw(idp->top);
if (!p)
return NULL;
n = (p->layer+1) * IDR_BITS;
- /* Mask off upper bits we don't use for the search. */
- id &= MAX_IDR_MASK;
-
- if (id >= (1 << n))
+ if (id > idr_max(p->layer + 1))
return NULL;
BUG_ON(n == 0);
@@ -530,7 +694,7 @@ void *idr_find(struct idr *idp, int id)
}
return((void *)p);
}
-EXPORT_SYMBOL(idr_find);
+EXPORT_SYMBOL(idr_find_slowpath);
/**
* idr_for_each - iterate through all stored pointers
@@ -555,15 +719,15 @@ int idr_for_each(struct idr *idp,
{
int n, id, max, error = 0;
struct idr_layer *p;
- struct idr_layer *pa[MAX_IDR_LEVEL];
+ struct idr_layer *pa[MAX_IDR_LEVEL + 1];
struct idr_layer **paa = &pa[0];
n = idp->layers * IDR_BITS;
p = rcu_dereference_raw(idp->top);
- max = 1 << n;
+ max = idr_max(idp->layers);
id = 0;
- while (id < max) {
+ while (id >= 0 && id <= max) {
while (n > 0 && p) {
n -= IDR_BITS;
*paa++ = p;
@@ -601,7 +765,7 @@ EXPORT_SYMBOL(idr_for_each);
*/
void *idr_get_next(struct idr *idp, int *nextidp)
{
- struct idr_layer *p, *pa[MAX_IDR_LEVEL];
+ struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1];
struct idr_layer **paa = &pa[0];
int id = *nextidp;
int n, max;
@@ -611,9 +775,9 @@ void *idr_get_next(struct idr *idp, int *nextidp)
if (!p)
return NULL;
n = (p->layer + 1) * IDR_BITS;
- max = 1 << n;
+ max = idr_max(p->layer + 1);
- while (id < max) {
+ while (id >= 0 && id <= max) {
while (n > 0 && p) {
n -= IDR_BITS;
*paa++ = p;
@@ -625,7 +789,14 @@ void *idr_get_next(struct idr *idp, int *nextidp)
return p;
}
- id += 1 << n;
+ /*
+ * Proceed to the next layer at the current level. Unlike
+ * idr_for_each(), @id isn't guaranteed to be aligned to
+ * layer boundary at this point and adding 1 << n may
+ * incorrectly skip IDs. Make sure we jump to the
+ * beginning of the next layer using round_up().
+ */
+ id = round_up(id + 1, 1 << n);
while (n < fls(id)) {
n += IDR_BITS;
p = *--paa;
@@ -653,14 +824,16 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
int n;
struct idr_layer *p, *old_p;
+ /* see comment in idr_find_slowpath() */
+ if (WARN_ON_ONCE(id < 0))
+ return ERR_PTR(-EINVAL);
+
p = idp->top;
if (!p)
return ERR_PTR(-EINVAL);
n = (p->layer+1) * IDR_BITS;
- id &= MAX_IDR_MASK;
-
if (id >= (1 << n))
return ERR_PTR(-EINVAL);
@@ -671,7 +844,7 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
}
n = id & IDR_MASK;
- if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
+ if (unlikely(p == NULL || !test_bit(n, p->bitmap)))
return ERR_PTR(-ENOENT);
old_p = p->ary[n];
@@ -780,7 +953,7 @@ EXPORT_SYMBOL(ida_pre_get);
*/
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
{
- struct idr_layer *pa[MAX_IDR_LEVEL];
+ struct idr_layer *pa[MAX_IDR_LEVEL + 1];
struct ida_bitmap *bitmap;
unsigned long flags;
int idr_id = starting_id / IDA_BITMAP_BITS;
@@ -789,9 +962,9 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
restart:
/* get vacant slot */
- t = idr_get_empty_slot(&ida->idr, idr_id, pa);
+ t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr);
if (t < 0)
- return _idr_rc_to_errno(t);
+ return t == -ENOMEM ? -EAGAIN : t;
if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT)
return -ENOSPC;
@@ -852,25 +1025,6 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
EXPORT_SYMBOL(ida_get_new_above);
/**
- * ida_get_new - allocate new ID
- * @ida: idr handle
- * @p_id: pointer to the allocated handle
- *
- * Allocate new ID. It should be called with any required locks.
- *
- * If memory is required, it will return %-EAGAIN, you should unlock
- * and go back to the idr_pre_get() call. If the idr is full, it will
- * return %-ENOSPC.
- *
- * @p_id returns a value in the range %0 ... %0x7fffffff.
- */
-int ida_get_new(struct ida *ida, int *p_id)
-{
- return ida_get_new_above(ida, 0, p_id);
-}
-EXPORT_SYMBOL(ida_get_new);
-
-/**
* ida_remove - remove the given ID
* @ida: ida handle
* @id: ID to free
@@ -887,7 +1041,7 @@ void ida_remove(struct ida *ida, int id)
/* clear full bits while looking up the leaf idr_layer */
while ((shift > 0) && p) {
n = (idr_id >> shift) & IDR_MASK;
- __clear_bit(n, &p->bitmap);
+ __clear_bit(n, p->bitmap);
p = p->ary[n];
shift -= IDR_BITS;
}
@@ -896,7 +1050,7 @@ void ida_remove(struct ida *ida, int id)
goto err;
n = idr_id & IDR_MASK;
- __clear_bit(n, &p->bitmap);
+ __clear_bit(n, p->bitmap);
bitmap = (void *)p->ary[n];
if (!test_bit(offset, bitmap->bitmap))
@@ -905,7 +1059,7 @@ void ida_remove(struct ida *ida, int id)
/* update bitmap and remove it if empty */
__clear_bit(offset, bitmap->bitmap);
if (--bitmap->nr_busy == 0) {
- __set_bit(n, &p->bitmap); /* to please idr_remove() */
+ __set_bit(n, p->bitmap); /* to please idr_remove() */
idr_remove(&ida->idr, idr_id);
free_bitmap(ida, bitmap);
}
diff --git a/lib/kfifo.c b/lib/kfifo.c
new file mode 100644
index 00000000000..7b7f83027b7
--- /dev/null
+++ b/lib/kfifo.c
@@ -0,0 +1,607 @@
+/*
+ * A generic kernel FIFO implementation
+ *
+ * Copyright (C) 2009/2010 Stefani Seibold <stefani@seibold.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/log2.h>
+#include <linux/uaccess.h>
+#include <linux/kfifo.h>
+
+/*
+ * internal helper to calculate the unused elements in a fifo
+ */
+static inline unsigned int kfifo_unused(struct __kfifo *fifo)
+{
+ return (fifo->mask + 1) - (fifo->in - fifo->out);
+}
+
+int __kfifo_alloc(struct __kfifo *fifo, unsigned int size,
+ size_t esize, gfp_t gfp_mask)
+{
+ /*
+ * round down to the next power of 2, since our 'let the indices
+ * wrap' technique works only in this case.
+ */
+ size = roundup_pow_of_two(size);
+
+ fifo->in = 0;
+ fifo->out = 0;
+ fifo->esize = esize;
+
+ if (size < 2) {
+ fifo->data = NULL;
+ fifo->mask = 0;
+ return -EINVAL;
+ }
+
+ fifo->data = kmalloc(size * esize, gfp_mask);
+
+ if (!fifo->data) {
+ fifo->mask = 0;
+ return -ENOMEM;
+ }
+ fifo->mask = size - 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(__kfifo_alloc);
+
+void __kfifo_free(struct __kfifo *fifo)
+{
+ kfree(fifo->data);
+ fifo->in = 0;
+ fifo->out = 0;
+ fifo->esize = 0;
+ fifo->data = NULL;
+ fifo->mask = 0;
+}
+EXPORT_SYMBOL(__kfifo_free);
+
+int __kfifo_init(struct __kfifo *fifo, void *buffer,
+ unsigned int size, size_t esize)
+{
+ size /= esize;
+
+ size = roundup_pow_of_two(size);
+
+ fifo->in = 0;
+ fifo->out = 0;
+ fifo->esize = esize;
+ fifo->data = buffer;
+
+ if (size < 2) {
+ fifo->mask = 0;
+ return -EINVAL;
+ }
+ fifo->mask = size - 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(__kfifo_init);
+
+static void kfifo_copy_in(struct __kfifo *fifo, const void *src,
+ unsigned int len, unsigned int off)
+{
+ unsigned int size = fifo->mask + 1;
+ unsigned int esize = fifo->esize;
+ unsigned int l;
+
+ off &= fifo->mask;
+ if (esize != 1) {
+ off *= esize;
+ size *= esize;
+ len *= esize;
+ }
+ l = min(len, size - off);
+
+ memcpy(fifo->data + off, src, l);
+ memcpy(fifo->data, src + l, len - l);
+ /*
+ * make sure that the data in the fifo is up to date before
+ * incrementing the fifo->in index counter
+ */
+ smp_wmb();
+}
+
+unsigned int __kfifo_in(struct __kfifo *fifo,
+ const void *buf, unsigned int len)
+{
+ unsigned int l;
+
+ l = kfifo_unused(fifo);
+ if (len > l)
+ len = l;
+
+ kfifo_copy_in(fifo, buf, len, fifo->in);
+ fifo->in += len;
+ return len;
+}
+EXPORT_SYMBOL(__kfifo_in);
+
+static void kfifo_copy_out(struct __kfifo *fifo, void *dst,
+ unsigned int len, unsigned int off)
+{
+ unsigned int size = fifo->mask + 1;
+ unsigned int esize = fifo->esize;
+ unsigned int l;
+
+ off &= fifo->mask;
+ if (esize != 1) {
+ off *= esize;
+ size *= esize;
+ len *= esize;
+ }
+ l = min(len, size - off);
+
+ memcpy(dst, fifo->data + off, l);
+ memcpy(dst + l, fifo->data, len - l);
+ /*
+ * make sure that the data is copied before
+ * incrementing the fifo->out index counter
+ */
+ smp_wmb();
+}
+
+unsigned int __kfifo_out_peek(struct __kfifo *fifo,
+ void *buf, unsigned int len)
+{
+ unsigned int l;
+
+ l = fifo->in - fifo->out;
+ if (len > l)
+ len = l;
+
+ kfifo_copy_out(fifo, buf, len, fifo->out);
+ return len;
+}
+EXPORT_SYMBOL(__kfifo_out_peek);
+
+unsigned int __kfifo_out(struct __kfifo *fifo,
+ void *buf, unsigned int len)
+{
+ len = __kfifo_out_peek(fifo, buf, len);
+ fifo->out += len;
+ return len;
+}
+EXPORT_SYMBOL(__kfifo_out);
+
+static unsigned long kfifo_copy_from_user(struct __kfifo *fifo,
+ const void __user *from, unsigned int len, unsigned int off,
+ unsigned int *copied)
+{
+ unsigned int size = fifo->mask + 1;
+ unsigned int esize = fifo->esize;
+ unsigned int l;
+ unsigned long ret;
+
+ off &= fifo->mask;
+ if (esize != 1) {
+ off *= esize;
+ size *= esize;
+ len *= esize;
+ }
+ l = min(len, size - off);
+
+ ret = copy_from_user(fifo->data + off, from, l);
+ if (unlikely(ret))
+ ret = DIV_ROUND_UP(ret + len - l, esize);
+ else {
+ ret = copy_from_user(fifo->data, from + l, len - l);
+ if (unlikely(ret))
+ ret = DIV_ROUND_UP(ret, esize);
+ }
+ /*
+ * make sure that the data in the fifo is up to date before
+ * incrementing the fifo->in index counter
+ */
+ smp_wmb();
+ *copied = len - ret;
+ /* return the number of elements which are not copied */
+ return ret;
+}
+
+int __kfifo_from_user(struct __kfifo *fifo, const void __user *from,
+ unsigned long len, unsigned int *copied)
+{
+ unsigned int l;
+ unsigned long ret;
+ unsigned int esize = fifo->esize;
+ int err;
+
+ if (esize != 1)
+ len /= esize;
+
+ l = kfifo_unused(fifo);
+ if (len > l)
+ len = l;
+
+ ret = kfifo_copy_from_user(fifo, from, len, fifo->in, copied);
+ if (unlikely(ret)) {
+ len -= ret;
+ err = -EFAULT;
+ } else
+ err = 0;
+ fifo->in += len;
+ return err;
+}
+EXPORT_SYMBOL(__kfifo_from_user);
+
+static unsigned long kfifo_copy_to_user(struct __kfifo *fifo, void __user *to,
+ unsigned int len, unsigned int off, unsigned int *copied)
+{
+ unsigned int l;
+ unsigned long ret;
+ unsigned int size = fifo->mask + 1;
+ unsigned int esize = fifo->esize;
+
+ off &= fifo->mask;
+ if (esize != 1) {
+ off *= esize;
+ size *= esize;
+ len *= esize;
+ }
+ l = min(len, size - off);
+
+ ret = copy_to_user(to, fifo->data + off, l);
+ if (unlikely(ret))
+ ret = DIV_ROUND_UP(ret + len - l, esize);
+ else {
+ ret = copy_to_user(to + l, fifo->data, len - l);
+ if (unlikely(ret))
+ ret = DIV_ROUND_UP(ret, esize);
+ }
+ /*
+ * make sure that the data is copied before
+ * incrementing the fifo->out index counter
+ */
+ smp_wmb();
+ *copied = len - ret;
+ /* return the number of elements which are not copied */
+ return ret;
+}
+
+int __kfifo_to_user(struct __kfifo *fifo, void __user *to,
+ unsigned long len, unsigned int *copied)
+{
+ unsigned int l;
+ unsigned long ret;
+ unsigned int esize = fifo->esize;
+ int err;
+
+ if (esize != 1)
+ len /= esize;
+
+ l = fifo->in - fifo->out;
+ if (len > l)
+ len = l;
+ ret = kfifo_copy_to_user(fifo, to, len, fifo->out, copied);
+ if (unlikely(ret)) {
+ len -= ret;
+ err = -EFAULT;
+ } else
+ err = 0;
+ fifo->out += len;
+ return err;
+}
+EXPORT_SYMBOL(__kfifo_to_user);
+
+static int setup_sgl_buf(struct scatterlist *sgl, void *buf,
+ int nents, unsigned int len)
+{
+ int n;
+ unsigned int l;
+ unsigned int off;
+ struct page *page;
+
+ if (!nents)
+ return 0;
+
+ if (!len)
+ return 0;
+
+ n = 0;
+ page = virt_to_page(buf);
+ off = offset_in_page(buf);
+ l = 0;
+
+ while (len >= l + PAGE_SIZE - off) {
+ struct page *npage;
+
+ l += PAGE_SIZE;
+ buf += PAGE_SIZE;
+ npage = virt_to_page(buf);
+ if (page_to_phys(page) != page_to_phys(npage) - l) {
+ sg_set_page(sgl, page, l - off, off);
+ sgl = sg_next(sgl);
+ if (++n == nents || sgl == NULL)
+ return n;
+ page = npage;
+ len -= l - off;
+ l = off = 0;
+ }
+ }
+ sg_set_page(sgl, page, len, off);
+ return n + 1;
+}
+
+static unsigned int setup_sgl(struct __kfifo *fifo, struct scatterlist *sgl,
+ int nents, unsigned int len, unsigned int off)
+{
+ unsigned int size = fifo->mask + 1;
+ unsigned int esize = fifo->esize;
+ unsigned int l;
+ unsigned int n;
+
+ off &= fifo->mask;
+ if (esize != 1) {
+ off *= esize;
+ size *= esize;
+ len *= esize;
+ }
+ l = min(len, size - off);
+
+ n = setup_sgl_buf(sgl, fifo->data + off, nents, l);
+ n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l);
+
+ return n;
+}
+
+unsigned int __kfifo_dma_in_prepare(struct __kfifo *fifo,
+ struct scatterlist *sgl, int nents, unsigned int len)
+{
+ unsigned int l;
+
+ l = kfifo_unused(fifo);
+ if (len > l)
+ len = l;
+
+ return setup_sgl(fifo, sgl, nents, len, fifo->in);
+}
+EXPORT_SYMBOL(__kfifo_dma_in_prepare);
+
+unsigned int __kfifo_dma_out_prepare(struct __kfifo *fifo,
+ struct scatterlist *sgl, int nents, unsigned int len)
+{
+ unsigned int l;
+
+ l = fifo->in - fifo->out;
+ if (len > l)
+ len = l;
+
+ return setup_sgl(fifo, sgl, nents, len, fifo->out);
+}
+EXPORT_SYMBOL(__kfifo_dma_out_prepare);
+
+unsigned int __kfifo_max_r(unsigned int len, size_t recsize)
+{
+ unsigned int max = (1 << (recsize << 3)) - 1;
+
+ if (len > max)
+ return max;
+ return len;
+}
+EXPORT_SYMBOL(__kfifo_max_r);
+
+#define __KFIFO_PEEK(data, out, mask) \
+ ((data)[(out) & (mask)])
+/*
+ * __kfifo_peek_n internal helper function for determinate the length of
+ * the next record in the fifo
+ */
+static unsigned int __kfifo_peek_n(struct __kfifo *fifo, size_t recsize)
+{
+ unsigned int l;
+ unsigned int mask = fifo->mask;
+ unsigned char *data = fifo->data;
+
+ l = __KFIFO_PEEK(data, fifo->out, mask);
+
+ if (--recsize)
+ l |= __KFIFO_PEEK(data, fifo->out + 1, mask) << 8;
+
+ return l;
+}
+
+#define __KFIFO_POKE(data, in, mask, val) \
+ ( \
+ (data)[(in) & (mask)] = (unsigned char)(val) \
+ )
+
+/*
+ * __kfifo_poke_n internal helper function for storeing the length of
+ * the record into the fifo
+ */
+static void __kfifo_poke_n(struct __kfifo *fifo, unsigned int n, size_t recsize)
+{
+ unsigned int mask = fifo->mask;
+ unsigned char *data = fifo->data;
+
+ __KFIFO_POKE(data, fifo->in, mask, n);
+
+ if (recsize > 1)
+ __KFIFO_POKE(data, fifo->in + 1, mask, n >> 8);
+}
+
+unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize)
+{
+ return __kfifo_peek_n(fifo, recsize);
+}
+EXPORT_SYMBOL(__kfifo_len_r);
+
+unsigned int __kfifo_in_r(struct __kfifo *fifo, const void *buf,
+ unsigned int len, size_t recsize)
+{
+ if (len + recsize > kfifo_unused(fifo))
+ return 0;
+
+ __kfifo_poke_n(fifo, len, recsize);
+
+ kfifo_copy_in(fifo, buf, len, fifo->in + recsize);
+ fifo->in += len + recsize;
+ return len;
+}
+EXPORT_SYMBOL(__kfifo_in_r);
+
+static unsigned int kfifo_out_copy_r(struct __kfifo *fifo,
+ void *buf, unsigned int len, size_t recsize, unsigned int *n)
+{
+ *n = __kfifo_peek_n(fifo, recsize);
+
+ if (len > *n)
+ len = *n;
+
+ kfifo_copy_out(fifo, buf, len, fifo->out + recsize);
+ return len;
+}
+
+unsigned int __kfifo_out_peek_r(struct __kfifo *fifo, void *buf,
+ unsigned int len, size_t recsize)
+{
+ unsigned int n;
+
+ if (fifo->in == fifo->out)
+ return 0;
+
+ return kfifo_out_copy_r(fifo, buf, len, recsize, &n);
+}
+EXPORT_SYMBOL(__kfifo_out_peek_r);
+
+unsigned int __kfifo_out_r(struct __kfifo *fifo, void *buf,
+ unsigned int len, size_t recsize)
+{
+ unsigned int n;
+
+ if (fifo->in == fifo->out)
+ return 0;
+
+ len = kfifo_out_copy_r(fifo, buf, len, recsize, &n);
+ fifo->out += n + recsize;
+ return len;
+}
+EXPORT_SYMBOL(__kfifo_out_r);
+
+void __kfifo_skip_r(struct __kfifo *fifo, size_t recsize)
+{
+ unsigned int n;
+
+ n = __kfifo_peek_n(fifo, recsize);
+ fifo->out += n + recsize;
+}
+EXPORT_SYMBOL(__kfifo_skip_r);
+
+int __kfifo_from_user_r(struct __kfifo *fifo, const void __user *from,
+ unsigned long len, unsigned int *copied, size_t recsize)
+{
+ unsigned long ret;
+
+ len = __kfifo_max_r(len, recsize);
+
+ if (len + recsize > kfifo_unused(fifo)) {
+ *copied = 0;
+ return 0;
+ }
+
+ __kfifo_poke_n(fifo, len, recsize);
+
+ ret = kfifo_copy_from_user(fifo, from, len, fifo->in + recsize, copied);
+ if (unlikely(ret)) {
+ *copied = 0;
+ return -EFAULT;
+ }
+ fifo->in += len + recsize;
+ return 0;
+}
+EXPORT_SYMBOL(__kfifo_from_user_r);
+
+int __kfifo_to_user_r(struct __kfifo *fifo, void __user *to,
+ unsigned long len, unsigned int *copied, size_t recsize)
+{
+ unsigned long ret;
+ unsigned int n;
+
+ if (fifo->in == fifo->out) {
+ *copied = 0;
+ return 0;
+ }
+
+ n = __kfifo_peek_n(fifo, recsize);
+ if (len > n)
+ len = n;
+
+ ret = kfifo_copy_to_user(fifo, to, len, fifo->out + recsize, copied);
+ if (unlikely(ret)) {
+ *copied = 0;
+ return -EFAULT;
+ }
+ fifo->out += n + recsize;
+ return 0;
+}
+EXPORT_SYMBOL(__kfifo_to_user_r);
+
+unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo,
+ struct scatterlist *sgl, int nents, unsigned int len, size_t recsize)
+{
+ if (!nents)
+ BUG();
+
+ len = __kfifo_max_r(len, recsize);
+
+ if (len + recsize > kfifo_unused(fifo))
+ return 0;
+
+ return setup_sgl(fifo, sgl, nents, len, fifo->in + recsize);
+}
+EXPORT_SYMBOL(__kfifo_dma_in_prepare_r);
+
+void __kfifo_dma_in_finish_r(struct __kfifo *fifo,
+ unsigned int len, size_t recsize)
+{
+ len = __kfifo_max_r(len, recsize);
+ __kfifo_poke_n(fifo, len, recsize);
+ fifo->in += len + recsize;
+}
+EXPORT_SYMBOL(__kfifo_dma_in_finish_r);
+
+unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo,
+ struct scatterlist *sgl, int nents, unsigned int len, size_t recsize)
+{
+ if (!nents)
+ BUG();
+
+ len = __kfifo_max_r(len, recsize);
+
+ if (len + recsize > fifo->in - fifo->out)
+ return 0;
+
+ return setup_sgl(fifo, sgl, nents, len, fifo->out + recsize);
+}
+EXPORT_SYMBOL(__kfifo_dma_out_prepare_r);
+
+void __kfifo_dma_out_finish_r(struct __kfifo *fifo, size_t recsize)
+{
+ unsigned int len;
+
+ len = __kfifo_peek_n(fifo, recsize);
+ fifo->out += len + recsize;
+}
+EXPORT_SYMBOL(__kfifo_dma_out_finish_r);
diff --git a/lib/lru_cache.c b/lib/lru_cache.c
index d71d8949894..8335d39d2cc 100644
--- a/lib/lru_cache.c
+++ b/lib/lru_cache.c
@@ -262,12 +262,11 @@ static struct hlist_head *lc_hash_slot(struct lru_cache *lc, unsigned int enr)
static struct lc_element *__lc_find(struct lru_cache *lc, unsigned int enr,
bool include_changing)
{
- struct hlist_node *n;
struct lc_element *e;
BUG_ON(!lc);
BUG_ON(!lc->nr_elements);
- hlist_for_each_entry(e, n, lc_hash_slot(lc, enr), colision) {
+ hlist_for_each_entry(e, lc_hash_slot(lc, enr), colision) {
/* "about to be changed" elements, pending transaction commit,
* are hashed by their "new number". "Normal" elements have
* lc_number == lc_new_number. */
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 7874b01e816..b83c144d731 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -394,6 +394,44 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
}
EXPORT_SYMBOL(sg_alloc_table_from_pages);
+void __sg_page_iter_start(struct sg_page_iter *piter,
+ struct scatterlist *sglist, unsigned int nents,
+ unsigned long pgoffset)
+{
+ piter->__pg_advance = 0;
+ piter->__nents = nents;
+
+ piter->page = NULL;
+ piter->sg = sglist;
+ piter->sg_pgoffset = pgoffset;
+}
+EXPORT_SYMBOL(__sg_page_iter_start);
+
+static int sg_page_count(struct scatterlist *sg)
+{
+ return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
+}
+
+bool __sg_page_iter_next(struct sg_page_iter *piter)
+{
+ if (!piter->__nents || !piter->sg)
+ return false;
+
+ piter->sg_pgoffset += piter->__pg_advance;
+ piter->__pg_advance = 1;
+
+ while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
+ piter->sg_pgoffset -= sg_page_count(piter->sg);
+ piter->sg = sg_next(piter->sg);
+ if (!--piter->__nents || !piter->sg)
+ return false;
+ }
+ piter->page = nth_page(sg_page(piter->sg), piter->sg_pgoffset);
+
+ return true;
+}
+EXPORT_SYMBOL(__sg_page_iter_next);
+
/**
* sg_miter_start - start mapping iteration over a sg list
* @miter: sg mapping iter to be started
@@ -411,9 +449,7 @@ void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
{
memset(miter, 0, sizeof(struct sg_mapping_iter));
- miter->__sg = sgl;
- miter->__nents = nents;
- miter->__offset = 0;
+ __sg_page_iter_start(&miter->piter, sgl, nents, 0);
WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
miter->__flags = flags;
}
@@ -438,36 +474,35 @@ EXPORT_SYMBOL(sg_miter_start);
*/
bool sg_miter_next(struct sg_mapping_iter *miter)
{
- unsigned int off, len;
-
- /* check for end and drop resources from the last iteration */
- if (!miter->__nents)
- return false;
-
sg_miter_stop(miter);
- /* get to the next sg if necessary. __offset is adjusted by stop */
- while (miter->__offset == miter->__sg->length) {
- if (--miter->__nents) {
- miter->__sg = sg_next(miter->__sg);
- miter->__offset = 0;
- } else
+ /*
+ * Get to the next page if necessary.
+ * __remaining, __offset is adjusted by sg_miter_stop
+ */
+ if (!miter->__remaining) {
+ struct scatterlist *sg;
+ unsigned long pgoffset;
+
+ if (!__sg_page_iter_next(&miter->piter))
return false;
- }
- /* map the next page */
- off = miter->__sg->offset + miter->__offset;
- len = miter->__sg->length - miter->__offset;
+ sg = miter->piter.sg;
+ pgoffset = miter->piter.sg_pgoffset;
- miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT);
- off &= ~PAGE_MASK;
- miter->length = min_t(unsigned int, len, PAGE_SIZE - off);
- miter->consumed = miter->length;
+ miter->__offset = pgoffset ? 0 : sg->offset;
+ miter->__remaining = sg->offset + sg->length -
+ (pgoffset << PAGE_SHIFT) - miter->__offset;
+ miter->__remaining = min_t(unsigned long, miter->__remaining,
+ PAGE_SIZE - miter->__offset);
+ }
+ miter->page = miter->piter.page;
+ miter->consumed = miter->length = miter->__remaining;
if (miter->__flags & SG_MITER_ATOMIC)
- miter->addr = kmap_atomic(miter->page) + off;
+ miter->addr = kmap_atomic(miter->page) + miter->__offset;
else
- miter->addr = kmap(miter->page) + off;
+ miter->addr = kmap(miter->page) + miter->__offset;
return true;
}
@@ -494,6 +529,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
/* drop resources from the last iteration */
if (miter->addr) {
miter->__offset += miter->consumed;
+ miter->__remaining -= miter->consumed;
if (miter->__flags & SG_MITER_TO_SG)
flush_kernel_dcache_page(miter->page);