aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug17
-rw-r--r--lib/Makefile2
-rw-r--r--lib/bitmap.c28
-rw-r--r--lib/bug.c28
-rw-r--r--lib/cmdline.c57
-rw-r--r--lib/iov_iter.c69
-rw-r--r--lib/kobject.c5
-rw-r--r--lib/refcount.c169
-rw-r--r--lib/sbitmap.c75
-rw-r--r--lib/string.c2
-rw-r--r--lib/syscall.c1
-rw-r--r--lib/test_kasan.c10
-rw-r--r--lib/test_user_copy.c1
-rw-r--r--lib/usercopy.c26
-rw-r--r--lib/vsprintf.c6
15 files changed, 408 insertions, 88 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 97d62c2da6c2..e2a617e09ab7 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -130,7 +130,8 @@ config DYNAMIC_DEBUG
nullarbor:~ # echo -n 'func svc_process -p' >
<debugfs>/dynamic_debug/control
- See Documentation/dynamic-debug-howto.txt for additional information.
+ See Documentation/admin-guide/dynamic-debug-howto.rst for additional
+ information.
endmenu # "printk and dmesg options"
@@ -356,7 +357,7 @@ config FRAME_POINTER
bool "Compile the kernel with frame pointers"
depends on DEBUG_KERNEL && \
(CRIS || M68K || FRV || UML || \
- AVR32 || SUPERH || BLACKFIN || MN10300 || METAG) || \
+ SUPERH || BLACKFIN || MN10300 || METAG) || \
ARCH_WANT_FRAME_POINTERS
default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS
help
@@ -404,8 +405,8 @@ config MAGIC_SYSRQ
by pressing various keys while holding SysRq (Alt+PrintScreen). It
also works on a serial console (on PC hardware at least), if you
send a BREAK and then within 5 seconds a command keypress. The
- keys are documented in <file:Documentation/sysrq.txt>. Don't say Y
- unless you really know what this hack does.
+ keys are documented in <file:Documentation/admin-guide/sysrq.rst>.
+ Don't say Y unless you really know what this hack does.
config MAGIC_SYSRQ_DEFAULT_ENABLE
hex "Enable magic SysRq key functions by default"
@@ -414,7 +415,7 @@ config MAGIC_SYSRQ_DEFAULT_ENABLE
help
Specifies which SysRq key functions are enabled by default.
This may be set to 1 or 0 to enable or disable them all, or
- to a bitmask as described in Documentation/sysrq.txt.
+ to a bitmask as described in Documentation/admin-guide/sysrq.rst.
config MAGIC_SYSRQ_SERIAL
bool "Enable magic SysRq key over serial"
@@ -1103,9 +1104,6 @@ config PROVE_LOCKING
For more details, see Documentation/locking/lockdep-design.txt.
-config PROVE_LOCKING_SMALL
- bool
-
config LOCKDEP
bool
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
@@ -1114,6 +1112,9 @@ config LOCKDEP
select KALLSYMS
select KALLSYMS_ALL
+config LOCKDEP_SMALL
+ bool
+
config LOCK_STAT
bool "Lock usage statistics"
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
diff --git a/lib/Makefile b/lib/Makefile
index acbc16bed9af..a155c73e3437 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -41,7 +41,7 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \
gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
bsearch.o find_bit.o llist.o memweight.o kfifo.o \
percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \
- once.o refcount.o
+ once.o refcount.o usercopy.o
obj-y += string_helpers.o
obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
obj-y += hexdump.o
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 0b66f0e5eb6b..08c6ef3a2b6f 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -502,11 +502,11 @@ EXPORT_SYMBOL(bitmap_print_to_pagebuf);
* Syntax: range:used_size/group_size
* Example: 0-1023:2/256 ==> 0,1,256,257,512,513,768,769
*
- * Returns 0 on success, -errno on invalid input strings.
- * Error values:
- * %-EINVAL: second number in range smaller than first
- * %-EINVAL: invalid character in string
- * %-ERANGE: bit number specified too large for mask
+ * Returns: 0 on success, -errno on invalid input strings. Error values:
+ *
+ * - ``-EINVAL``: second number in range smaller than first
+ * - ``-EINVAL``: invalid character in string
+ * - ``-ERANGE``: bit number specified too large for mask
*/
static int __bitmap_parselist(const char *buf, unsigned int buflen,
int is_user, unsigned long *maskp,
@@ -864,14 +864,16 @@ EXPORT_SYMBOL(bitmap_bitremap);
* 11 was set in @orig had no affect on @dst.
*
* Example [2] for bitmap_fold() + bitmap_onto():
- * Let's say @relmap has these ten bits set:
+ * Let's say @relmap has these ten bits set::
+ *
* 40 41 42 43 45 48 53 61 74 95
+ *
* (for the curious, that's 40 plus the first ten terms of the
* Fibonacci sequence.)
*
* Further lets say we use the following code, invoking
* bitmap_fold() then bitmap_onto, as suggested above to
- * avoid the possibility of an empty @dst result:
+ * avoid the possibility of an empty @dst result::
*
* unsigned long *tmp; // a temporary bitmap's bits
*
@@ -882,22 +884,26 @@ EXPORT_SYMBOL(bitmap_bitremap);
* various @orig's. I list the zero-based positions of each set bit.
* The tmp column shows the intermediate result, as computed by
* using bitmap_fold() to fold the @orig bitmap modulo ten
- * (the weight of @relmap).
+ * (the weight of @relmap):
*
+ * =============== ============== =================
* @orig tmp @dst
* 0 0 40
* 1 1 41
* 9 9 95
- * 10 0 40 (*)
+ * 10 0 40 [#f1]_
* 1 3 5 7 1 3 5 7 41 43 48 61
* 0 1 2 3 4 0 1 2 3 4 40 41 42 43 45
* 0 9 18 27 0 9 8 7 40 61 74 95
* 0 10 20 30 0 40
* 0 11 22 33 0 1 2 3 40 41 42 43
* 0 12 24 36 0 2 4 6 40 42 45 53
- * 78 102 211 1 2 8 41 42 74 (*)
+ * 78 102 211 1 2 8 41 42 74 [#f1]_
+ * =============== ============== =================
+ *
+ * .. [#f1]
*
- * (*) For these marked lines, if we hadn't first done bitmap_fold()
+ * For these marked lines, if we hadn't first done bitmap_fold()
* into tmp, then the @dst result would have been empty.
*
* If either of @orig or @relmap is empty (no set bits), then @dst
diff --git a/lib/bug.c b/lib/bug.c
index 06edbbef0623..a6a1137d06db 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -47,7 +47,7 @@
#include <linux/sched.h>
#include <linux/rculist.h>
-extern const struct bug_entry __start___bug_table[], __stop___bug_table[];
+extern struct bug_entry __start___bug_table[], __stop___bug_table[];
static inline unsigned long bug_addr(const struct bug_entry *bug)
{
@@ -62,10 +62,10 @@ static inline unsigned long bug_addr(const struct bug_entry *bug)
/* Updates are protected by module mutex */
static LIST_HEAD(module_bug_list);
-static const struct bug_entry *module_find_bug(unsigned long bugaddr)
+static struct bug_entry *module_find_bug(unsigned long bugaddr)
{
struct module *mod;
- const struct bug_entry *bug = NULL;
+ struct bug_entry *bug = NULL;
rcu_read_lock_sched();
list_for_each_entry_rcu(mod, &module_bug_list, bug_list) {
@@ -122,15 +122,15 @@ void module_bug_cleanup(struct module *mod)
#else
-static inline const struct bug_entry *module_find_bug(unsigned long bugaddr)
+static inline struct bug_entry *module_find_bug(unsigned long bugaddr)
{
return NULL;
}
#endif
-const struct bug_entry *find_bug(unsigned long bugaddr)
+struct bug_entry *find_bug(unsigned long bugaddr)
{
- const struct bug_entry *bug;
+ struct bug_entry *bug;
for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
if (bugaddr == bug_addr(bug))
@@ -141,9 +141,9 @@ const struct bug_entry *find_bug(unsigned long bugaddr)
enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
{
- const struct bug_entry *bug;
+ struct bug_entry *bug;
const char *file;
- unsigned line, warning;
+ unsigned line, warning, once, done;
if (!is_valid_bugaddr(bugaddr))
return BUG_TRAP_TYPE_NONE;
@@ -164,6 +164,18 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
line = bug->line;
#endif
warning = (bug->flags & BUGFLAG_WARNING) != 0;
+ once = (bug->flags & BUGFLAG_ONCE) != 0;
+ done = (bug->flags & BUGFLAG_DONE) != 0;
+
+ if (warning && once) {
+ if (done)
+ return BUG_TRAP_TYPE_WARN;
+
+ /*
+ * Since this is the only store, concurrency is not an issue.
+ */
+ bug->flags |= BUGFLAG_DONE;
+ }
}
if (warning) {
diff --git a/lib/cmdline.c b/lib/cmdline.c
index 8f13cf73c2ec..3c6432df7e63 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -15,6 +15,7 @@
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/string.h>
+#include <linux/ctype.h>
/*
* If a hyphen was found in get_option, this will handle the
@@ -189,3 +190,59 @@ bool parse_option_str(const char *str, const char *option)
return false;
}
+
+/*
+ * Parse a string to get a param value pair.
+ * You can use " around spaces, but can't escape ".
+ * Hyphens and underscores equivalent in parameter names.
+ */
+char *next_arg(char *args, char **param, char **val)
+{
+ unsigned int i, equals = 0;
+ int in_quote = 0, quoted = 0;
+ char *next;
+
+ if (*args == '"') {
+ args++;
+ in_quote = 1;
+ quoted = 1;
+ }
+
+ for (i = 0; args[i]; i++) {
+ if (isspace(args[i]) && !in_quote)
+ break;
+ if (equals == 0) {
+ if (args[i] == '=')
+ equals = i;
+ }
+ if (args[i] == '"')
+ in_quote = !in_quote;
+ }
+
+ *param = args;
+ if (!equals)
+ *val = NULL;
+ else {
+ args[equals] = '\0';
+ *val = args + equals + 1;
+
+ /* Don't include quotes in value. */
+ if (**val == '"') {
+ (*val)++;
+ if (args[i-1] == '"')
+ args[i-1] = '\0';
+ }
+ }
+ if (quoted && args[i-1] == '"')
+ args[i-1] = '\0';
+
+ if (args[i]) {
+ args[i] = '\0';
+ next = args + i + 1;
+ } else
+ next = args + i;
+
+ /* Chew up trailing spaces. */
+ return skip_spaces(next);
+ //return next;
+}
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index e68604ae3ced..4952311422c1 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -413,7 +413,7 @@ void iov_iter_init(struct iov_iter *i, int direction,
size_t count)
{
/* It will get better. Eventually... */
- if (segment_eq(get_fs(), KERNEL_DS)) {
+ if (uaccess_kernel()) {
direction |= ITER_KVEC;
i->type = direction;
i->kvec = (struct kvec *)iov;
@@ -604,7 +604,7 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
return 0;
}
iterate_and_advance(i, bytes, v,
- __copy_from_user_nocache((to += v.iov_len) - v.iov_len,
+ __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
v.iov_base, v.iov_len),
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
@@ -625,7 +625,7 @@ bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
if (unlikely(i->count < bytes))
return false;
iterate_all_kinds(i, bytes, v, ({
- if (__copy_from_user_nocache((to += v.iov_len) - v.iov_len,
+ if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
v.iov_base, v.iov_len))
return false;
0;}),
@@ -786,6 +786,68 @@ void iov_iter_advance(struct iov_iter *i, size_t size)
}
EXPORT_SYMBOL(iov_iter_advance);
+void iov_iter_revert(struct iov_iter *i, size_t unroll)
+{
+ if (!unroll)
+ return;
+ i->count += unroll;
+ if (unlikely(i->type & ITER_PIPE)) {
+ struct pipe_inode_info *pipe = i->pipe;
+ int idx = i->idx;
+ size_t off = i->iov_offset;
+ while (1) {
+ size_t n = off - pipe->bufs[idx].offset;
+ if (unroll < n) {
+ off -= unroll;
+ break;
+ }
+ unroll -= n;
+ if (!unroll && idx == i->start_idx) {
+ off = 0;
+ break;
+ }
+ if (!idx--)
+ idx = pipe->buffers - 1;
+ off = pipe->bufs[idx].offset + pipe->bufs[idx].len;
+ }
+ i->iov_offset = off;
+ i->idx = idx;
+ pipe_truncate(i);
+ return;
+ }
+ if (unroll <= i->iov_offset) {
+ i->iov_offset -= unroll;
+ return;
+ }
+ unroll -= i->iov_offset;
+ if (i->type & ITER_BVEC) {
+ const struct bio_vec *bvec = i->bvec;
+ while (1) {
+ size_t n = (--bvec)->bv_len;
+ i->nr_segs++;
+ if (unroll <= n) {
+ i->bvec = bvec;
+ i->iov_offset = n - unroll;
+ return;
+ }
+ unroll -= n;
+ }
+ } else { /* same logics for iovec and kvec */
+ const struct iovec *iov = i->iov;
+ while (1) {
+ size_t n = (--iov)->iov_len;
+ i->nr_segs++;
+ if (unroll <= n) {
+ i->iov = iov;
+ i->iov_offset = n - unroll;
+ return;
+ }
+ unroll -= n;
+ }
+ }
+}
+EXPORT_SYMBOL(iov_iter_revert);
+
/*
* Return the count of just the current iov_iter segment.
*/
@@ -839,6 +901,7 @@ void iov_iter_pipe(struct iov_iter *i, int direction,
i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
i->iov_offset = 0;
i->count = count;
+ i->start_idx = i->idx;
}
EXPORT_SYMBOL(iov_iter_pipe);
diff --git a/lib/kobject.c b/lib/kobject.c
index 445dcaeb0f56..763d70a18941 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -601,12 +601,15 @@ struct kobject *kobject_get(struct kobject *kobj)
}
EXPORT_SYMBOL(kobject_get);
-static struct kobject * __must_check kobject_get_unless_zero(struct kobject *kobj)
+struct kobject * __must_check kobject_get_unless_zero(struct kobject *kobj)
{
+ if (!kobj)
+ return NULL;
if (!kref_get_unless_zero(&kobj->kref))
kobj = NULL;
return kobj;
}
+EXPORT_SYMBOL(kobject_get_unless_zero);
/*
* kobject_cleanup - free kobject resources.
diff --git a/lib/refcount.c b/lib/refcount.c
index aa09ad3c30b0..f42124ccf295 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -37,11 +37,29 @@
#include <linux/refcount.h>
#include <linux/bug.h>
+/**
+ * refcount_add_not_zero - add a value to a refcount unless it is 0
+ * @i: the value to add to the refcount
+ * @r: the refcount
+ *
+ * Will saturate at UINT_MAX and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller has guaranteed the
+ * object memory to be stable (RCU, etc.). It does provide a control dependency
+ * and thereby orders future stores. See the comment on top.
+ *
+ * Use of this function is not recommended for the normal reference counting
+ * use case in which references are taken and released one at a time. In these
+ * cases, refcount_inc(), or one of its variants, should instead be used to
+ * increment a reference count.
+ *
+ * Return: false if the passed refcount is 0, true otherwise
+ */
bool refcount_add_not_zero(unsigned int i, refcount_t *r)
{
- unsigned int old, new, val = atomic_read(&r->refs);
+ unsigned int new, val = atomic_read(&r->refs);
- for (;;) {
+ do {
if (!val)
return false;
@@ -51,12 +69,8 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r)
new = val + i;
if (new < val)
new = UINT_MAX;
- old = atomic_cmpxchg_relaxed(&r->refs, val, new);
- if (old == val)
- break;
- val = old;
- }
+ } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
@@ -64,24 +78,45 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r)
}
EXPORT_SYMBOL_GPL(refcount_add_not_zero);
+/**
+ * refcount_add - add a value to a refcount
+ * @i: the value to add to the refcount
+ * @r: the refcount
+ *
+ * Similar to atomic_add(), but will saturate at UINT_MAX and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller has guaranteed the
+ * object memory to be stable (RCU, etc.). It does provide a control dependency
+ * and thereby orders future stores. See the comment on top.
+ *
+ * Use of this function is not recommended for the normal reference counting
+ * use case in which references are taken and released one at a time. In these
+ * cases, refcount_inc(), or one of its variants, should instead be used to
+ * increment a reference count.
+ */
void refcount_add(unsigned int i, refcount_t *r)
{
WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
}
EXPORT_SYMBOL_GPL(refcount_add);
-/*
- * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN.
+/**
+ * refcount_inc_not_zero - increment a refcount unless it is 0
+ * @r: the refcount to increment
+ *
+ * Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN.
*
* Provides no memory ordering, it is assumed the caller has guaranteed the
* object memory to be stable (RCU, etc.). It does provide a control dependency
* and thereby orders future stores. See the comment on top.
+ *
+ * Return: true if the increment was successful, false otherwise
*/
bool refcount_inc_not_zero(refcount_t *r)
{
- unsigned int old, new, val = atomic_read(&r->refs);
+ unsigned int new, val = atomic_read(&r->refs);
- for (;;) {
+ do {
new = val + 1;
if (!val)
@@ -90,12 +125,7 @@ bool refcount_inc_not_zero(refcount_t *r)
if (unlikely(!new))
return true;
- old = atomic_cmpxchg_relaxed(&r->refs, val, new);
- if (old == val)
- break;
-
- val = old;
- }
+ } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
@@ -103,11 +133,17 @@ bool refcount_inc_not_zero(refcount_t *r)
}
EXPORT_SYMBOL_GPL(refcount_inc_not_zero);
-/*
- * Similar to atomic_inc(), will saturate at UINT_MAX and WARN.
+/**
+ * refcount_inc - increment a refcount
+ * @r: the refcount to increment
+ *
+ * Similar to atomic_inc(), but will saturate at UINT_MAX and WARN.
*
* Provides no memory ordering, it is assumed the caller already has a
- * reference on the object, will WARN when this is not so.
+ * reference on the object.
+ *
+ * Will WARN if the refcount is 0, as this represents a possible use-after-free
+ * condition.
*/
void refcount_inc(refcount_t *r)
{
@@ -115,11 +151,31 @@ void refcount_inc(refcount_t *r)
}
EXPORT_SYMBOL_GPL(refcount_inc);
+/**
+ * refcount_sub_and_test - subtract from a refcount and test if it is 0
+ * @i: amount to subtract from the refcount
+ * @r: the refcount
+ *
+ * Similar to atomic_dec_and_test(), but it will WARN, return false and
+ * ultimately leak on underflow and will fail to decrement when saturated
+ * at UINT_MAX.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ *
+ * Use of this function is not recommended for the normal reference counting
+ * use case in which references are taken and released one at a time. In these
+ * cases, refcount_dec(), or one of its variants, should instead be used to
+ * decrement a reference count.
+ *
+ * Return: true if the resulting refcount is 0, false otherwise
+ */
bool refcount_sub_and_test(unsigned int i, refcount_t *r)
{
- unsigned int old, new, val = atomic_read(&r->refs);
+ unsigned int new, val = atomic_read(&r->refs);
- for (;;) {
+ do {
if (unlikely(val == UINT_MAX))
return false;
@@ -129,24 +185,24 @@ bool refcount_sub_and_test(unsigned int i, refcount_t *r)
return false;
}
- old = atomic_cmpxchg_release(&r->refs, val, new);
- if (old == val)
- break;
-
- val = old;
- }
+ } while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
return !new;
}
EXPORT_SYMBOL_GPL(refcount_sub_and_test);
-/*
+/**
+ * refcount_dec_and_test - decrement a refcount and test if it is 0
+ * @r: the refcount
+ *
* Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
* decrement when saturated at UINT_MAX.
*
* Provides release memory ordering, such that prior loads and stores are done
* before, and provides a control dependency such that free() must come after.
* See the comment on top.
+ *
+ * Return: true if the resulting refcount is 0, false otherwise
*/
bool refcount_dec_and_test(refcount_t *r)
{
@@ -154,21 +210,26 @@ bool refcount_dec_and_test(refcount_t *r)
}
EXPORT_SYMBOL_GPL(refcount_dec_and_test);
-/*
+/**
+ * refcount_dec - decrement a refcount
+ * @r: the refcount
+ *
* Similar to atomic_dec(), it will WARN on underflow and fail to decrement
* when saturated at UINT_MAX.
*
* Provides release memory ordering, such that prior loads and stores are done
* before.
*/
-
void refcount_dec(refcount_t *r)
{
WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
}
EXPORT_SYMBOL_GPL(refcount_dec);
-/*
+/**
+ * refcount_dec_if_one - decrement a refcount if it is 1
+ * @r: the refcount
+ *
* No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
* success thereof.
*
@@ -178,24 +239,33 @@ EXPORT_SYMBOL_GPL(refcount_dec);
* It can be used like a try-delete operator; this explicit case is provided
* and not cmpxchg in generic, because that would allow implementing unsafe
* operations.
+ *
+ * Return: true if the resulting refcount is 0, false otherwise
*/
bool refcount_dec_if_one(refcount_t *r)
{
- return atomic_cmpxchg_release(&r->refs, 1, 0) == 1;
+ int val = 1;
+
+ return atomic_try_cmpxchg_release(&r->refs, &val, 0);
}
EXPORT_SYMBOL_GPL(refcount_dec_if_one);
-/*
+/**
+ * refcount_dec_not_one - decrement a refcount if it is not 1
+ * @r: the refcount
+ *
* No atomic_t counterpart, it decrements unless the value is 1, in which case
* it will return false.
*
* Was often done like: atomic_add_unless(&var, -1, 1)
+ *
+ * Return: true if the decrement operation was successful, false otherwise
*/
bool refcount_dec_not_one(refcount_t *r)
{
- unsigned int old, new, val = atomic_read(&r->refs);
+ unsigned int new, val = atomic_read(&r->refs);
- for (;;) {
+ do {
if (unlikely(val == UINT_MAX))
return true;
@@ -208,24 +278,27 @@ bool refcount_dec_not_one(refcount_t *r)
return true;
}
- old = atomic_cmpxchg_release(&r->refs, val, new);
- if (old == val)
- break;
-
- val = old;
- }
+ } while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
return true;
}
EXPORT_SYMBOL_GPL(refcount_dec_not_one);
-/*
+/**
+ * refcount_dec_and_mutex_lock - return holding mutex if able to decrement
+ * refcount to 0
+ * @r: the refcount
+ * @lock: the mutex to be locked
+ *
* Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
* to decrement when saturated at UINT_MAX.
*
* Provides release memory ordering, such that prior loads and stores are done
* before, and provides a control dependency such that free() must come after.
* See the comment on top.
+ *
+ * Return: true and hold mutex if able to decrement refcount to 0, false
+ * otherwise
*/
bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
{
@@ -242,13 +315,21 @@ bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
}
EXPORT_SYMBOL_GPL(refcount_dec_and_mutex_lock);
-/*
+/**
+ * refcount_dec_and_lock - return holding spinlock if able to decrement
+ * refcount to 0
+ * @r: the refcount
+ * @lock: the spinlock to be locked
+ *
* Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
* decrement when saturated at UINT_MAX.
*
* Provides release memory ordering, such that prior loads and stores are done
* before, and provides a control dependency such that free() must come after.
* See the comment on top.
+ *
+ * Return: true and hold spinlock if able to decrement refcount to 0, false
+ * otherwise
*/
bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
{
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 60e800e0b5a0..80aa8d5463fa 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -79,15 +79,15 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
}
EXPORT_SYMBOL_GPL(sbitmap_resize);
-static int __sbitmap_get_word(struct sbitmap_word *word, unsigned int hint,
- bool wrap)
+static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
+ unsigned int hint, bool wrap)
{
unsigned int orig_hint = hint;
int nr;
while (1) {
- nr = find_next_zero_bit(&word->word, word->depth, hint);
- if (unlikely(nr >= word->depth)) {
+ nr = find_next_zero_bit(word, depth, hint);
+ if (unlikely(nr >= depth)) {
/*
* We started with an offset, and we didn't reset the
* offset to 0 in a failure case, so start from 0 to
@@ -100,11 +100,11 @@ static int __sbitmap_get_word(struct sbitmap_word *word, unsigned int hint,
return -1;
}
- if (!test_and_set_bit(nr, &word->word))
+ if (!test_and_set_bit(nr, word))
break;
hint = nr + 1;
- if (hint >= word->depth - 1)
+ if (hint >= depth - 1)
hint = 0;
}
@@ -119,7 +119,8 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
index = SB_NR_TO_INDEX(sb, alloc_hint);
for (i = 0; i < sb->map_nr; i++) {
- nr = __sbitmap_get_word(&sb->map[index],
+ nr = __sbitmap_get_word(&sb->map[index].word,
+ sb->map[index].depth,
SB_NR_TO_BIT(sb, alloc_hint),
!round_robin);
if (nr != -1) {
@@ -141,6 +142,37 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
}
EXPORT_SYMBOL_GPL(sbitmap_get);
+int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
+ unsigned long shallow_depth)
+{
+ unsigned int i, index;
+ int nr = -1;
+
+ index = SB_NR_TO_INDEX(sb, alloc_hint);
+
+ for (i = 0; i < sb->map_nr; i++) {
+ nr = __sbitmap_get_word(&sb->map[index].word,
+ min(sb->map[index].depth, shallow_depth),
+ SB_NR_TO_BIT(sb, alloc_hint), true);
+ if (nr != -1) {
+ nr += index << sb->shift;
+ break;
+ }
+
+ /* Jump to next index. */
+ index++;
+ alloc_hint = index << sb->shift;
+
+ if (index >= sb->map_nr) {
+ index = 0;
+ alloc_hint = 0;
+ }
+ }
+
+ return nr;
+}
+EXPORT_SYMBOL_GPL(sbitmap_get_shallow);
+
bool sbitmap_any_bit_set(const struct sbitmap *sb)
{
unsigned int i;
@@ -342,6 +374,35 @@ int __sbitmap_queue_get(struct sbitmap_queue *sbq)
}
EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
+int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
+ unsigned int shallow_depth)
+{
+ unsigned int hint, depth;
+ int nr;
+
+ hint = this_cpu_read(*sbq->alloc_hint);
+ depth = READ_ONCE(sbq->sb.depth);
+ if (unlikely(hint >= depth)) {
+ hint = depth ? prandom_u32() % depth : 0;
+ this_cpu_write(*sbq->alloc_hint, hint);
+ }
+ nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth);
+
+ if (nr == -1) {
+ /* If the map is full, a hint won't do us much good. */
+ this_cpu_write(*sbq->alloc_hint, 0);
+ } else if (nr == hint || unlikely(sbq->round_robin)) {
+ /* Only update the hint if we used it. */
+ hint = nr + 1;
+ if (hint >= depth - 1)
+ hint = 0;
+ this_cpu_write(*sbq->alloc_hint, hint);
+ }
+
+ return nr;
+}
+EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow);
+
static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
{
int i, wake_index;
diff --git a/lib/string.c b/lib/string.c
index ed83562a53ae..b5c9a1168d3a 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -131,7 +131,7 @@ EXPORT_SYMBOL(strncpy);
* @src: Where to copy the string from
* @size: size of destination buffer
*
- * Compatible with *BSD: the result is always a valid
+ * Compatible with ``*BSD``: the result is always a valid
* NUL-terminated string that fits in the buffer (unless,
* of course, the buffer size is zero). It does not pad
* out the result like strncpy() does.
diff --git a/lib/syscall.c b/lib/syscall.c
index 17d5ff5fa6a3..2c6cd1b5c3ea 100644
--- a/lib/syscall.c
+++ b/lib/syscall.c
@@ -12,6 +12,7 @@ static int collect_syscall(struct task_struct *target, long *callno,
if (!try_get_task_stack(target)) {
/* Task has no stack, so the task isn't in a syscall. */
+ *sp = *pc = 0;
*callno = -1;
return 0;
}
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 0b1d3140fbb8..a25c9763fce1 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -20,6 +20,7 @@
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/module.h>
+#include <linux/kasan.h>
/*
* Note: test functions are marked noinline so that their names appear in
@@ -474,6 +475,12 @@ static noinline void __init use_after_scope_test(void)
static int __init kmalloc_tests_init(void)
{
+ /*
+ * Temporarily enable multi-shot mode. Otherwise, we'd only get a
+ * report for the first case.
+ */
+ bool multishot = kasan_save_enable_multi_shot();
+
kmalloc_oob_right();
kmalloc_oob_left();
kmalloc_node_oob_right();
@@ -499,6 +506,9 @@ static int __init kmalloc_tests_init(void)
ksize_unpoisons_memory();
copy_user_test();
use_after_scope_test();
+
+ kasan_restore_multi_shot(multishot);
+
return -EAGAIN;
}
diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c
index 1a8d71a68531..4621db801b23 100644
--- a/lib/test_user_copy.c
+++ b/lib/test_user_copy.c
@@ -31,7 +31,6 @@
* their capability at compile-time, we just have to opt-out certain archs.
*/
#if BITS_PER_LONG == 64 || (!(defined(CONFIG_ARM) && !defined(MMU)) && \
- !defined(CONFIG_AVR32) && \
!defined(CONFIG_BLACKFIN) && \
!defined(CONFIG_M32R) && \
!defined(CONFIG_M68K) && \
diff --git a/lib/usercopy.c b/lib/usercopy.c
new file mode 100644
index 000000000000..1b6010a3beb8
--- /dev/null
+++ b/lib/usercopy.c
@@ -0,0 +1,26 @@
+#include <linux/uaccess.h>
+
+/* out-of-line parts */
+
+#ifndef INLINE_COPY_FROM_USER
+unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ unsigned long res = n;
+ if (likely(access_ok(VERIFY_READ, from, n)))
+ res = raw_copy_from_user(to, from, n);
+ if (unlikely(res))
+ memset(to + (n - res), 0, res);
+ return res;
+}
+EXPORT_SYMBOL(_copy_from_user);
+#endif
+
+#ifndef INLINE_COPY_TO_USER
+unsigned long _copy_to_user(void *to, const void __user *from, unsigned long n)
+{
+ if (likely(access_ok(VERIFY_WRITE, to, n)))
+ n = raw_copy_to_user(to, from, n);
+ return n;
+}
+EXPORT_SYMBOL(_copy_to_user);
+#endif
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index e3bf4e0f10b5..176641cc549d 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1954,13 +1954,13 @@ set_precision(struct printf_spec *spec, int prec)
* This function generally follows C99 vsnprintf, but has some
* extensions and a few limitations:
*
- * %n is unsupported
- * %p* is handled by pointer()
+ * - ``%n`` is unsupported
+ * - ``%p*`` is handled by pointer()
*
* See pointer() or Documentation/printk-formats.txt for more
* extensive description.
*
- * ** Please update the documentation in both places when making changes **
+ * **Please update the documentation in both places when making changes**
*
* The return value is the number of characters which would
* be generated for the given input, excluding the trailing