aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/bitrev.c3
-rw-r--r--lib/bug.c2
-rw-r--r--lib/debugobjects.c15
-rw-r--r--lib/div64.c10
-rw-r--r--lib/radix-tree.c122
-rw-r--r--lib/ts_bm.c2
6 files changed, 84 insertions, 70 deletions
diff --git a/lib/bitrev.c b/lib/bitrev.c
index 989aff73f88..3956203456d 100644
--- a/lib/bitrev.c
+++ b/lib/bitrev.c
@@ -42,10 +42,11 @@ const u8 byte_rev_table[256] = {
};
EXPORT_SYMBOL_GPL(byte_rev_table);
-static __always_inline u16 bitrev16(u16 x)
+u16 bitrev16(u16 x)
{
return (bitrev8(x & 0xff) << 8) | bitrev8(x >> 8);
}
+EXPORT_SYMBOL(bitrev16);
/**
* bitrev32 - reverse the order of bits in a u32 value
diff --git a/lib/bug.c b/lib/bug.c
index 530f38f5578..bfeafd60ee9 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -37,6 +37,7 @@
*/
#include <linux/list.h>
#include <linux/module.h>
+#include <linux/kernel.h>
#include <linux/bug.h>
#include <linux/sched.h>
@@ -149,6 +150,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
(void *)bugaddr);
show_regs(regs);
+ add_taint(TAINT_WARN);
return BUG_TRAP_TYPE_WARN;
}
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index a76a5e122ae..85b18d79be8 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -68,6 +68,7 @@ static int fill_pool(void)
{
gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
struct debug_obj *new;
+ unsigned long flags;
if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL))
return obj_pool_free;
@@ -81,10 +82,10 @@ static int fill_pool(void)
if (!new)
return obj_pool_free;
- spin_lock(&pool_lock);
+ spin_lock_irqsave(&pool_lock, flags);
hlist_add_head(&new->node, &obj_pool);
obj_pool_free++;
- spin_unlock(&pool_lock);
+ spin_unlock_irqrestore(&pool_lock, flags);
}
return obj_pool_free;
}
@@ -110,16 +111,13 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
}
/*
- * Allocate a new object. If the pool is empty and no refill possible,
- * switch off the debugger.
+ * Allocate a new object. If the pool is empty, switch off the debugger.
*/
static struct debug_obj *
alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
{
struct debug_obj *obj = NULL;
- int retry = 0;
-repeat:
spin_lock(&pool_lock);
if (obj_pool.first) {
obj = hlist_entry(obj_pool.first, typeof(*obj), node);
@@ -141,9 +139,6 @@ repeat:
}
spin_unlock(&pool_lock);
- if (fill_pool() && !obj && !retry++)
- goto repeat;
-
return obj;
}
@@ -261,6 +256,8 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
struct debug_obj *obj;
unsigned long flags;
+ fill_pool();
+
db = get_bucket((unsigned long) addr);
spin_lock_irqsave(&db->lock, flags);
diff --git a/lib/div64.c b/lib/div64.c
index bb5bd0c0f03..a111eb8de9c 100644
--- a/lib/div64.c
+++ b/lib/div64.c
@@ -98,3 +98,13 @@ EXPORT_SYMBOL(div64_u64);
#endif
#endif /* BITS_PER_LONG == 32 */
+
+/*
+ * Iterative div/mod for use when dividend is not expected to be much
+ * bigger than divisor.
+ */
+u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
+{
+ return __iter_div_u64_rem(dividend, divisor, remainder);
+}
+EXPORT_SYMBOL(iter_div_u64_rem);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index bd521716ab1..56ec21a7f73 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -1,7 +1,7 @@
/*
* Copyright (C) 2001 Momchil Velikov
* Portions Copyright (C) 2001 Christoph Hellwig
- * Copyright (C) 2005 SGI, Christoph Lameter <clameter@sgi.com>
+ * Copyright (C) 2005 SGI, Christoph Lameter
* Copyright (C) 2006 Nick Piggin
*
* This program is free software; you can redistribute it and/or
@@ -88,6 +88,57 @@ static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
return root->gfp_mask & __GFP_BITS_MASK;
}
+static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
+ int offset)
+{
+ __set_bit(offset, node->tags[tag]);
+}
+
+static inline void tag_clear(struct radix_tree_node *node, unsigned int tag,
+ int offset)
+{
+ __clear_bit(offset, node->tags[tag]);
+}
+
+static inline int tag_get(struct radix_tree_node *node, unsigned int tag,
+ int offset)
+{
+ return test_bit(offset, node->tags[tag]);
+}
+
+static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag)
+{
+ root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT));
+}
+
+static inline void root_tag_clear(struct radix_tree_root *root, unsigned int tag)
+{
+ root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT));
+}
+
+static inline void root_tag_clear_all(struct radix_tree_root *root)
+{
+ root->gfp_mask &= __GFP_BITS_MASK;
+}
+
+static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag)
+{
+ return (__force unsigned)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT));
+}
+
+/*
+ * Returns 1 if any slot in the node has this tag set.
+ * Otherwise returns 0.
+ */
+static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag)
+{
+ int idx;
+ for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
+ if (node->tags[tag][idx])
+ return 1;
+ }
+ return 0;
+}
/*
* This assumes that the caller has performed appropriate preallocation, and
* that the caller has pinned this thread of control to the current CPU.
@@ -124,6 +175,17 @@ static void radix_tree_node_rcu_free(struct rcu_head *head)
{
struct radix_tree_node *node =
container_of(head, struct radix_tree_node, rcu_head);
+
+ /*
+ * must only free zeroed nodes into the slab. radix_tree_shrink
+ * can leave us with a non-NULL entry in the first slot, so clear
+ * that here to make sure.
+ */
+ tag_clear(node, 0, 0);
+ tag_clear(node, 1, 0);
+ node->slots[0] = NULL;
+ node->count = 0;
+
kmem_cache_free(radix_tree_node_cachep, node);
}
@@ -165,59 +227,6 @@ out:
}
EXPORT_SYMBOL(radix_tree_preload);
-static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
- int offset)
-{
- __set_bit(offset, node->tags[tag]);
-}
-
-static inline void tag_clear(struct radix_tree_node *node, unsigned int tag,
- int offset)
-{
- __clear_bit(offset, node->tags[tag]);
-}
-
-static inline int tag_get(struct radix_tree_node *node, unsigned int tag,
- int offset)
-{
- return test_bit(offset, node->tags[tag]);
-}
-
-static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag)
-{
- root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT));
-}
-
-
-static inline void root_tag_clear(struct radix_tree_root *root, unsigned int tag)
-{
- root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT));
-}
-
-static inline void root_tag_clear_all(struct radix_tree_root *root)
-{
- root->gfp_mask &= __GFP_BITS_MASK;
-}
-
-static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag)
-{
- return (__force unsigned)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT));
-}
-
-/*
- * Returns 1 if any slot in the node has this tag set.
- * Otherwise returns 0.
- */
-static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag)
-{
- int idx;
- for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
- if (node->tags[tag][idx])
- return 1;
- }
- return 0;
-}
-
/*
* Return the maximum key which can be store into a
* radix tree with height HEIGHT.
@@ -930,11 +939,6 @@ static inline void radix_tree_shrink(struct radix_tree_root *root)
newptr = radix_tree_ptr_to_indirect(newptr);
root->rnode = newptr;
root->height--;
- /* must only free zeroed nodes into the slab */
- tag_clear(to_free, 0, 0);
- tag_clear(to_free, 1, 0);
- to_free->slots[0] = NULL;
- to_free->count = 0;
radix_tree_node_free(to_free);
}
}
diff --git a/lib/ts_bm.c b/lib/ts_bm.c
index d90822c378a..4a7fce72898 100644
--- a/lib/ts_bm.c
+++ b/lib/ts_bm.c
@@ -63,7 +63,7 @@ static unsigned int bm_find(struct ts_config *conf, struct ts_state *state)
struct ts_bm *bm = ts_config_priv(conf);
unsigned int i, text_len, consumed = state->offset;
const u8 *text;
- int shift = bm->patlen, bs;
+ int shift = bm->patlen - 1, bs;
for (;;) {
text_len = conf->get_next_block(consumed, &text, conf, state);