aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Makefile3
-rw-r--r--lib/assoc_array.c6
-rw-r--r--lib/bitmap.c25
-rw-r--r--lib/btree.c1
-rw-r--r--lib/checksum.c12
-rw-r--r--lib/decompress_bunzip2.c2
-rw-r--r--lib/fdt_empty_tree.c2
-rw-r--r--lib/idr.c8
-rw-r--r--lib/lz4/lz4_decompress.c15
-rw-r--r--lib/lzo/lzo1x_decompress_safe.c43
-rw-r--r--lib/nlattr.c14
-rw-r--r--lib/percpu_counter.c2
-rw-r--r--lib/plist.c52
-rw-r--r--lib/radix-tree.c106
-rw-r--r--lib/string.c16
-rw-r--r--lib/strnlen_user.c3
16 files changed, 196 insertions, 114 deletions
diff --git a/lib/Makefile b/lib/Makefile
index 48140e3ba73f..befe5554a34f 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -147,7 +147,8 @@ obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o
obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
-libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o
+libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
+ fdt_empty_tree.o
$(foreach file, $(libfdt_files), \
$(eval CFLAGS_$(file) = -I$(src)/../scripts/dtc/libfdt))
lib-$(CONFIG_LIBFDT) += $(libfdt_files)
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index c0b1007011e1..2404d03e251a 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -1723,11 +1723,13 @@ ascend_old_tree:
shortcut = assoc_array_ptr_to_shortcut(ptr);
slot = shortcut->parent_slot;
cursor = shortcut->back_pointer;
+ if (!cursor)
+ goto gc_complete;
} else {
slot = node->parent_slot;
cursor = ptr;
}
- BUG_ON(!ptr);
+ BUG_ON(!cursor);
node = assoc_array_ptr_to_node(cursor);
slot++;
goto continue_node;
@@ -1735,7 +1737,7 @@ ascend_old_tree:
gc_complete:
edit->set[0].to = new_root;
assoc_array_apply_edit(edit);
- edit->array->nr_leaves_on_tree = nr_leaves_on_tree;
+ array->nr_leaves_on_tree = nr_leaves_on_tree;
return 0;
enomem:
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 06f7e4fe8d2d..c0634aa923a6 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -131,7 +131,9 @@ void __bitmap_shift_right(unsigned long *dst,
lower = src[off + k];
if (left && off + k == lim - 1)
lower &= mask;
- dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem;
+ dst[k] = lower >> rem;
+ if (rem)
+ dst[k] |= upper << (BITS_PER_LONG - rem);
if (left && k == lim - 1)
dst[k] &= mask;
}
@@ -172,7 +174,9 @@ void __bitmap_shift_left(unsigned long *dst,
upper = src[k];
if (left && k == lim - 1)
upper &= (1UL << left) - 1;
- dst[k + off] = lower >> (BITS_PER_LONG - rem) | upper << rem;
+ dst[k + off] = upper << rem;
+ if (rem)
+ dst[k + off] |= lower >> (BITS_PER_LONG - rem);
if (left && k + off == lim - 1)
dst[k + off] &= (1UL << left) - 1;
}
@@ -599,12 +603,12 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
unsigned a, b;
int c, old_c, totaldigits;
const char __user __force *ubuf = (const char __user __force *)buf;
- int exp_digit, in_range;
+ int at_start, in_range;
totaldigits = c = 0;
bitmap_zero(maskp, nmaskbits);
do {
- exp_digit = 1;
+ at_start = 1;
in_range = 0;
a = b = 0;
@@ -633,11 +637,10 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
break;
if (c == '-') {
- if (exp_digit || in_range)
+ if (at_start || in_range)
return -EINVAL;
b = 0;
in_range = 1;
- exp_digit = 1;
continue;
}
@@ -647,16 +650,18 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
b = b * 10 + (c - '0');
if (!in_range)
a = b;
- exp_digit = 0;
+ at_start = 0;
totaldigits++;
}
if (!(a <= b))
return -EINVAL;
if (b >= nmaskbits)
return -ERANGE;
- while (a <= b) {
- set_bit(a, maskp);
- a++;
+ if (!at_start) {
+ while (a <= b) {
+ set_bit(a, maskp);
+ a++;
+ }
}
} while (buflen && c == ',');
return 0;
diff --git a/lib/btree.c b/lib/btree.c
index f9a484676cb6..4264871ea1a0 100644
--- a/lib/btree.c
+++ b/lib/btree.c
@@ -198,6 +198,7 @@ EXPORT_SYMBOL_GPL(btree_init);
void btree_destroy(struct btree_head *head)
{
+ mempool_free(head->node, head->mempool);
mempool_destroy(head->mempool);
head->mempool = NULL;
}
diff --git a/lib/checksum.c b/lib/checksum.c
index 129775eb6de6..8b39e86dbab5 100644
--- a/lib/checksum.c
+++ b/lib/checksum.c
@@ -181,6 +181,15 @@ csum_partial_copy(const void *src, void *dst, int len, __wsum sum)
EXPORT_SYMBOL(csum_partial_copy);
#ifndef csum_tcpudp_nofold
+static inline u32 from64to32(u64 x)
+{
+ /* add up 32-bit and 32-bit for 32+c bit */
+ x = (x & 0xffffffff) + (x >> 32);
+ /* add up carry.. */
+ x = (x & 0xffffffff) + (x >> 32);
+ return (u32)x;
+}
+
__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
@@ -195,8 +204,7 @@ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
#else
s += (proto + len) << 8;
#endif
- s += (s >> 32);
- return (__force __wsum)s;
+ return (__force __wsum)from64to32(s);
}
EXPORT_SYMBOL(csum_tcpudp_nofold);
#endif
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index 31c5f7675fbf..f504027d66a8 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -184,7 +184,7 @@ static int INIT get_next_block(struct bunzip_data *bd)
if (get_bits(bd, 1))
return RETVAL_OBSOLETE_INPUT;
origPtr = get_bits(bd, 24);
- if (origPtr > dbufSize)
+ if (origPtr >= dbufSize)
return RETVAL_DATA_ERROR;
/* mapping table: if some byte values are never used (encoding things
like ascii text), the compression code removes the gaps to have fewer
diff --git a/lib/fdt_empty_tree.c b/lib/fdt_empty_tree.c
new file mode 100644
index 000000000000..5d30c58150ad
--- /dev/null
+++ b/lib/fdt_empty_tree.c
@@ -0,0 +1,2 @@
+#include <linux/libfdt_env.h>
+#include "../scripts/dtc/libfdt/fdt_empty_tree.c"
diff --git a/lib/idr.c b/lib/idr.c
index bfe4db4e165f..674c30bc2ed0 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -250,7 +250,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
/* if already at the top layer, we need to grow */
- if (id >= 1 << (idp->layers * IDR_BITS)) {
+ if (id > idr_max(idp->layers)) {
*starting_id = id;
return -EAGAIN;
}
@@ -827,12 +827,10 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
if (!p)
return ERR_PTR(-EINVAL);
- n = (p->layer+1) * IDR_BITS;
-
- if (id >= (1 << n))
+ if (id > idr_max(p->layer + 1))
return ERR_PTR(-EINVAL);
- n -= IDR_BITS;
+ n = p->layer * IDR_BITS;
while ((n > 0) && p) {
p = p->ary[(id >> n) & IDR_MASK];
n -= IDR_BITS;
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index df6839e3ce08..f0f5c5c3de12 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -72,6 +72,8 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
len = *ip++;
for (; len == 255; length += 255)
len = *ip++;
+ if (unlikely(length > (size_t)(length + len)))
+ goto _output_error;
length += len;
}
@@ -106,6 +108,8 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
if (length == ML_MASK) {
for (; *ip == 255; length += 255)
ip++;
+ if (unlikely(length > (size_t)(length + *ip)))
+ goto _output_error;
length += *ip++;
}
@@ -135,6 +139,9 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
/* Error: request to write beyond destination buffer */
if (cpy > oend)
goto _output_error;
+ if ((ref + COPYLENGTH) > oend ||
+ (op + COPYLENGTH) > oend)
+ goto _output_error;
LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
while (op < cpy)
*op++ = *ref++;
@@ -155,7 +162,7 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
/* write overflow error detected */
_output_error:
- return (int) (-(((char *)ip) - source));
+ return -1;
}
static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
@@ -188,6 +195,8 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
int s = 255;
while ((ip < iend) && (s == 255)) {
s = *ip++;
+ if (unlikely(length > (size_t)(length + s)))
+ goto _output_error;
length += s;
}
}
@@ -228,6 +237,8 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
if (length == ML_MASK) {
while (ip < iend) {
int s = *ip++;
+ if (unlikely(length > (size_t)(length + s)))
+ goto _output_error;
length += s;
if (s == 255)
continue;
@@ -280,7 +291,7 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
/* write overflow error detected */
_output_error:
- return (int) (-(((char *) ip) - source));
+ return -1;
}
int lz4_decompress(const unsigned char *src, size_t *src_len,
diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c
index 569985d522d5..a1c387f6afba 100644
--- a/lib/lzo/lzo1x_decompress_safe.c
+++ b/lib/lzo/lzo1x_decompress_safe.c
@@ -25,6 +25,16 @@
#define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun
#define TEST_LB(m_pos) if ((m_pos) < out) goto lookbehind_overrun
+/* This MAX_255_COUNT is the maximum number of times we can add 255 to a base
+ * count without overflowing an integer. The multiply will overflow when
+ * multiplying 255 by more than MAXINT/255. The sum will overflow earlier
+ * depending on the base count. Since the base count is taken from a u8
+ * and a few bits, it is safe to assume that it will always be lower than
+ * or equal to 2*255, thus we can always prevent any overflow by accepting
+ * two less 255 steps. See Documentation/lzo.txt for more information.
+ */
+#define MAX_255_COUNT ((((size_t)~0) / 255) - 2)
+
int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
unsigned char *out, size_t *out_len)
{
@@ -55,12 +65,19 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
if (t < 16) {
if (likely(state == 0)) {
if (unlikely(t == 0)) {
+ size_t offset;
+ const unsigned char *ip_last = ip;
+
while (unlikely(*ip == 0)) {
- t += 255;
ip++;
NEED_IP(1);
}
- t += 15 + *ip++;
+ offset = ip - ip_last;
+ if (unlikely(offset > MAX_255_COUNT))
+ return LZO_E_ERROR;
+
+ offset = (offset << 8) - offset;
+ t += offset + 15 + *ip++;
}
t += 3;
copy_literal_run:
@@ -116,12 +133,19 @@ copy_literal_run:
} else if (t >= 32) {
t = (t & 31) + (3 - 1);
if (unlikely(t == 2)) {
+ size_t offset;
+ const unsigned char *ip_last = ip;
+
while (unlikely(*ip == 0)) {
- t += 255;
ip++;
NEED_IP(1);
}
- t += 31 + *ip++;
+ offset = ip - ip_last;
+ if (unlikely(offset > MAX_255_COUNT))
+ return LZO_E_ERROR;
+
+ offset = (offset << 8) - offset;
+ t += offset + 31 + *ip++;
NEED_IP(2);
}
m_pos = op - 1;
@@ -134,12 +158,19 @@ copy_literal_run:
m_pos -= (t & 8) << 11;
t = (t & 7) + (3 - 1);
if (unlikely(t == 2)) {
+ size_t offset;
+ const unsigned char *ip_last = ip;
+
while (unlikely(*ip == 0)) {
- t += 255;
ip++;
NEED_IP(1);
}
- t += 7 + *ip++;
+ offset = ip - ip_last;
+ if (unlikely(offset > MAX_255_COUNT))
+ return LZO_E_ERROR;
+
+ offset = (offset << 8) - offset;
+ t += offset + 7 + *ip++;
NEED_IP(2);
}
next = get_unaligned_le16(ip);
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 18eca7809b08..10ad042d01be 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -201,8 +201,8 @@ int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
}
if (unlikely(rem > 0))
- printk(KERN_WARNING "netlink: %d bytes leftover after parsing "
- "attributes.\n", rem);
+ pr_warn_ratelimited("netlink: %d bytes leftover after parsing attributes in process `%s'.\n",
+ rem, current->comm);
err = 0;
errout:
@@ -303,9 +303,15 @@ int nla_memcmp(const struct nlattr *nla, const void *data,
*/
int nla_strcmp(const struct nlattr *nla, const char *str)
{
- int len = strlen(str) + 1;
- int d = nla_len(nla) - len;
+ int len = strlen(str);
+ char *buf = nla_data(nla);
+ int attrlen = nla_len(nla);
+ int d;
+ if (attrlen > 0 && buf[attrlen - 1] == '\0')
+ attrlen--;
+
+ d = attrlen - len;
if (d == 0)
d = memcmp(nla_data(nla), str, len);
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 8280a5dd1727..7dd33577b905 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -169,7 +169,7 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
struct percpu_counter *fbc;
compute_batch_value();
- if (action != CPU_DEAD)
+ if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
return NOTIFY_OK;
cpu = (unsigned long)hcpu;
diff --git a/lib/plist.c b/lib/plist.c
index 1ebc95f7a46f..0f2084d30798 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -134,6 +134,46 @@ void plist_del(struct plist_node *node, struct plist_head *head)
plist_check_head(head);
}
+/**
+ * plist_requeue - Requeue @node at end of same-prio entries.
+ *
+ * This is essentially an optimized plist_del() followed by
+ * plist_add(). It moves an entry already in the plist to
+ * after any other same-priority entries.
+ *
+ * @node: &struct plist_node pointer - entry to be moved
+ * @head: &struct plist_head pointer - list head
+ */
+void plist_requeue(struct plist_node *node, struct plist_head *head)
+{
+ struct plist_node *iter;
+ struct list_head *node_next = &head->node_list;
+
+ plist_check_head(head);
+ BUG_ON(plist_head_empty(head));
+ BUG_ON(plist_node_empty(node));
+
+ if (node == plist_last(head))
+ return;
+
+ iter = plist_next(node);
+
+ if (node->prio != iter->prio)
+ return;
+
+ plist_del(node, head);
+
+ plist_for_each_continue(iter, head) {
+ if (node->prio != iter->prio) {
+ node_next = &iter->node_list;
+ break;
+ }
+ }
+ list_add_tail(&node->node_list, node_next);
+
+ plist_check_head(head);
+}
+
#ifdef CONFIG_DEBUG_PI_LIST
#include <linux/sched.h>
#include <linux/module.h>
@@ -170,6 +210,14 @@ static void __init plist_test_check(int nr_expect)
BUG_ON(prio_pos->prio_list.next != &first->prio_list);
}
+static void __init plist_test_requeue(struct plist_node *node)
+{
+ plist_requeue(node, &test_head);
+
+ if (node != plist_last(&test_head))
+ BUG_ON(node->prio == plist_next(node)->prio);
+}
+
static int __init plist_test(void)
{
int nr_expect = 0, i, loop;
@@ -193,6 +241,10 @@ static int __init plist_test(void)
nr_expect--;
}
plist_test_check(nr_expect);
+ if (!plist_node_empty(test_node + i)) {
+ plist_test_requeue(test_node + i);
+ plist_test_check(nr_expect);
+ }
}
for (i = 0; i < ARRAY_SIZE(test_node); i++) {
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index bd4a8dfdf0b8..7e30d2a7f346 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -946,81 +946,6 @@ next:
}
EXPORT_SYMBOL(radix_tree_range_tag_if_tagged);
-
-/**
- * radix_tree_next_hole - find the next hole (not-present entry)
- * @root: tree root
- * @index: index key
- * @max_scan: maximum range to search
- *
- * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the lowest
- * indexed hole.
- *
- * Returns: the index of the hole if found, otherwise returns an index
- * outside of the set specified (in which case 'return - index >= max_scan'
- * will be true). In rare cases of index wrap-around, 0 will be returned.
- *
- * radix_tree_next_hole may be called under rcu_read_lock. However, like
- * radix_tree_gang_lookup, this will not atomically search a snapshot of
- * the tree at a single point in time. For example, if a hole is created
- * at index 5, then subsequently a hole is created at index 10,
- * radix_tree_next_hole covering both indexes may return 10 if called
- * under rcu_read_lock.
- */
-unsigned long radix_tree_next_hole(struct radix_tree_root *root,
- unsigned long index, unsigned long max_scan)
-{
- unsigned long i;
-
- for (i = 0; i < max_scan; i++) {
- if (!radix_tree_lookup(root, index))
- break;
- index++;
- if (index == 0)
- break;
- }
-
- return index;
-}
-EXPORT_SYMBOL(radix_tree_next_hole);
-
-/**
- * radix_tree_prev_hole - find the prev hole (not-present entry)
- * @root: tree root
- * @index: index key
- * @max_scan: maximum range to search
- *
- * Search backwards in the range [max(index-max_scan+1, 0), index]
- * for the first hole.
- *
- * Returns: the index of the hole if found, otherwise returns an index
- * outside of the set specified (in which case 'index - return >= max_scan'
- * will be true). In rare cases of wrap-around, ULONG_MAX will be returned.
- *
- * radix_tree_next_hole may be called under rcu_read_lock. However, like
- * radix_tree_gang_lookup, this will not atomically search a snapshot of
- * the tree at a single point in time. For example, if a hole is created
- * at index 10, then subsequently a hole is created at index 5,
- * radix_tree_prev_hole covering both indexes may return 5 if called under
- * rcu_read_lock.
- */
-unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
- unsigned long index, unsigned long max_scan)
-{
- unsigned long i;
-
- for (i = 0; i < max_scan; i++) {
- if (!radix_tree_lookup(root, index))
- break;
- index--;
- if (index == ULONG_MAX)
- break;
- }
-
- return index;
-}
-EXPORT_SYMBOL(radix_tree_prev_hole);
-
/**
* radix_tree_gang_lookup - perform multiple lookup on a radix tree
* @root: radix tree root
@@ -1337,15 +1262,18 @@ static inline void radix_tree_shrink(struct radix_tree_root *root)
}
/**
- * radix_tree_delete - delete an item from a radix tree
+ * radix_tree_delete_item - delete an item from a radix tree
* @root: radix tree root
* @index: index key
+ * @item: expected item
*
- * Remove the item at @index from the radix tree rooted at @root.
+ * Remove @item at @index from the radix tree rooted at @root.
*
- * Returns the address of the deleted item, or NULL if it was not present.
+ * Returns the address of the deleted item, or NULL if it was not present
+ * or the entry at the given @index was not @item.
*/
-void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
+void *radix_tree_delete_item(struct radix_tree_root *root,
+ unsigned long index, void *item)
{
struct radix_tree_node *node = NULL;
struct radix_tree_node *slot = NULL;
@@ -1380,6 +1308,11 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
if (slot == NULL)
goto out;
+ if (item && slot != item) {
+ slot = NULL;
+ goto out;
+ }
+
/*
* Clear all tags associated with the item to be deleted.
* This way of doing it would be inefficient, but seldom is any set.
@@ -1424,6 +1357,21 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
out:
return slot;
}
+EXPORT_SYMBOL(radix_tree_delete_item);
+
+/**
+ * radix_tree_delete - delete an item from a radix tree
+ * @root: radix tree root
+ * @index: index key
+ *
+ * Remove the item at @index from the radix tree rooted at @root.
+ *
+ * Returns the address of the deleted item, or NULL if it was not present.
+ */
+void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
+{
+ return radix_tree_delete_item(root, index, NULL);
+}
EXPORT_SYMBOL(radix_tree_delete);
/**
diff --git a/lib/string.c b/lib/string.c
index e5878de4f101..cb9ea2181557 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -586,6 +586,22 @@ void *memset(void *s, int c, size_t count)
EXPORT_SYMBOL(memset);
#endif
+/**
+ * memzero_explicit - Fill a region of memory (e.g. sensitive
+ * keying data) with 0s.
+ * @s: Pointer to the start of the area.
+ * @count: The size of the area.
+ *
+ * memzero_explicit() doesn't need an arch-specific version as
+ * it just invokes the one of memset() implicitly.
+ */
+void memzero_explicit(void *s, size_t count)
+{
+ memset(s, 0, count);
+ barrier();
+}
+EXPORT_SYMBOL(memzero_explicit);
+
#ifndef __HAVE_ARCH_MEMCPY
/**
* memcpy - Copy one area of memory to another
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index a28df5206d95..11649615c505 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -57,7 +57,8 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
return res + find_zero(data) + 1 - align;
}
res += sizeof(unsigned long);
- if (unlikely(max < sizeof(unsigned long)))
+ /* We already handled 'unsigned long' bytes. Did we do it all ? */
+ if (unlikely(max <= sizeof(unsigned long)))
break;
max -= sizeof(unsigned long);
if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))