aboutsummaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/aio.c24
-rw-r--r--fs/autofs4/autofs_i.h1
-rw-r--r--fs/autofs4/expire.c2
-rw-r--r--fs/btrfs/ctree.c4
-rw-r--r--fs/btrfs/extent-tree.c8
-rw-r--r--fs/buffer.c21
-rw-r--r--fs/dcache.c5
-rw-r--r--fs/eventpoll.c4
-rw-r--r--fs/exec.c2
-rw-r--r--fs/jbd/checkpoint.c2
-rw-r--r--fs/jbd2/checkpoint.c2
-rw-r--r--fs/namespace.c8
-rw-r--r--fs/ntfs/aops.c14
-rw-r--r--fs/timerfd.c5
14 files changed, 54 insertions, 48 deletions
diff --git a/fs/aio.c b/fs/aio.c
index 3241659491b1..9aadc21d6525 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -40,6 +40,7 @@
#include <linux/ramfs.h>
#include <linux/percpu-refcount.h>
#include <linux/mount.h>
+#include <linux/work-simple.h>
#include <asm/kmap_types.h>
#include <asm/uaccess.h>
@@ -110,7 +111,7 @@ struct kioctx {
struct page **ring_pages;
long nr_pages;
- struct work_struct free_work;
+ struct swork_event free_work;
/*
* signals when all in-flight requests are done
@@ -227,6 +228,7 @@ static int __init aio_setup(void)
.mount = aio_mount,
.kill_sb = kill_anon_super,
};
+ BUG_ON(swork_get());
aio_mnt = kern_mount(&aio_fs);
if (IS_ERR(aio_mnt))
panic("Failed to create aio fs mount.");
@@ -506,9 +508,9 @@ static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb)
return cancel(kiocb);
}
-static void free_ioctx(struct work_struct *work)
+static void free_ioctx(struct swork_event *sev)
{
- struct kioctx *ctx = container_of(work, struct kioctx, free_work);
+ struct kioctx *ctx = container_of(sev, struct kioctx, free_work);
pr_debug("freeing %p\n", ctx);
@@ -525,8 +527,8 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
if (ctx->requests_done)
complete(ctx->requests_done);
- INIT_WORK(&ctx->free_work, free_ioctx);
- schedule_work(&ctx->free_work);
+ INIT_SWORK(&ctx->free_work, free_ioctx);
+ swork_queue(&ctx->free_work);
}
/*
@@ -534,9 +536,9 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
* and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
* now it's safe to cancel any that need to be.
*/
-static void free_ioctx_users(struct percpu_ref *ref)
+static void free_ioctx_users_work(struct swork_event *sev)
{
- struct kioctx *ctx = container_of(ref, struct kioctx, users);
+ struct kioctx *ctx = container_of(sev, struct kioctx, free_work);
struct kiocb *req;
spin_lock_irq(&ctx->ctx_lock);
@@ -555,6 +557,14 @@ static void free_ioctx_users(struct percpu_ref *ref)
percpu_ref_put(&ctx->reqs);
}
+static void free_ioctx_users(struct percpu_ref *ref)
+{
+ struct kioctx *ctx = container_of(ref, struct kioctx, users);
+
+ INIT_SWORK(&ctx->free_work, free_ioctx_users_work);
+ swork_queue(&ctx->free_work);
+}
+
static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
{
unsigned i, new_nr;
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index acf32054edd8..0b1130ec3e72 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -34,6 +34,7 @@
#include <linux/sched.h>
#include <linux/mount.h>
#include <linux/namei.h>
+#include <linux/delay.h>
#include <asm/current.h>
#include <asm/uaccess.h>
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index edb46e67d5ca..5ae590df109e 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -157,7 +157,7 @@ again:
parent = p->d_parent;
if (!spin_trylock(&parent->d_lock)) {
spin_unlock(&p->d_lock);
- cpu_relax();
+ cpu_chill();
goto relock;
}
spin_unlock(&p->d_lock);
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 93de3ba994e7..47bc9e613245 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -80,7 +80,7 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
{
int i;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#if (defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_PREEMPT_RT_BASE))
/* lockdep really cares that we take all of these spinlocks
* in the right order. If any of the locks in the path are not
* currently blocking, it is going to complain. So, make really
@@ -107,7 +107,7 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
}
}
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#if (defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_PREEMPT_RT_BASE))
if (held)
btrfs_clear_lock_blocking_rw(held, held_rw);
#endif
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 794d7c6ea9ee..cb80d02edfa2 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -6939,14 +6939,6 @@ again:
goto again;
}
- if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
- static DEFINE_RATELIMIT_STATE(_rs,
- DEFAULT_RATELIMIT_INTERVAL * 10,
- /*DEFAULT_RATELIMIT_BURST*/ 1);
- if (__ratelimit(&_rs))
- WARN(1, KERN_DEBUG
- "BTRFS: block rsv returned %d\n", ret);
- }
try_reserve:
ret = reserve_metadata_bytes(root, block_rsv, blocksize,
BTRFS_RESERVE_NO_FLUSH);
diff --git a/fs/buffer.c b/fs/buffer.c
index eef21c69f2d7..8a330540a184 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -322,8 +322,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
* decide that the page is now completely done.
*/
first = page_buffers(page);
- local_irq_save(flags);
- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
+ flags = bh_uptodate_lock_irqsave(first);
clear_buffer_async_read(bh);
unlock_buffer(bh);
tmp = bh;
@@ -336,8 +335,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
}
tmp = tmp->b_this_page;
} while (tmp != bh);
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
- local_irq_restore(flags);
+ bh_uptodate_unlock_irqrestore(first, flags);
/*
* If none of the buffers had errors and they are all
@@ -349,9 +347,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
return;
still_busy:
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
- local_irq_restore(flags);
- return;
+ bh_uptodate_unlock_irqrestore(first, flags);
}
/*
@@ -385,8 +381,7 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
}
first = page_buffers(page);
- local_irq_save(flags);
- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
+ flags = bh_uptodate_lock_irqsave(first);
clear_buffer_async_write(bh);
unlock_buffer(bh);
@@ -398,15 +393,12 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
}
tmp = tmp->b_this_page;
}
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
- local_irq_restore(flags);
+ bh_uptodate_unlock_irqrestore(first, flags);
end_page_writeback(page);
return;
still_busy:
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
- local_irq_restore(flags);
- return;
+ bh_uptodate_unlock_irqrestore(first, flags);
}
EXPORT_SYMBOL(end_buffer_async_write);
@@ -3358,6 +3350,7 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
if (ret) {
INIT_LIST_HEAD(&ret->b_assoc_buffers);
+ buffer_head_init_locks(ret);
preempt_disable();
__this_cpu_inc(bh_accounting.nr);
recalc_bh_state();
diff --git a/fs/dcache.c b/fs/dcache.c
index d3c2fedada14..c9e43f9ec995 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -19,6 +19,7 @@
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/fsnotify.h>
+#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/hash.h>
@@ -516,7 +517,7 @@ dentry_kill(struct dentry *dentry, int unlock_on_failure)
relock:
if (unlock_on_failure) {
spin_unlock(&dentry->d_lock);
- cpu_relax();
+ cpu_chill();
}
return dentry; /* try again with same dentry */
}
@@ -2378,7 +2379,7 @@ again:
if (dentry->d_lockref.count == 1) {
if (!spin_trylock(&inode->i_lock)) {
spin_unlock(&dentry->d_lock);
- cpu_relax();
+ cpu_chill();
goto again;
}
dentry->d_flags &= ~DCACHE_CANT_MOUNT;
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index f50d79eb52db..f698fd2ba3d4 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -505,12 +505,12 @@ static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests)
*/
static void ep_poll_safewake(wait_queue_head_t *wq)
{
- int this_cpu = get_cpu();
+ int this_cpu = get_cpu_light();
ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS,
ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu);
- put_cpu();
+ put_cpu_light();
}
static void ep_remove_wait_queue(struct eppoll_entry *pwq)
diff --git a/fs/exec.c b/fs/exec.c
index 05f1942d7edb..e5ffa0627fd9 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -843,12 +843,14 @@ static int exec_mmap(struct mm_struct *mm)
}
}
task_lock(tsk);
+ preempt_disable_rt();
active_mm = tsk->active_mm;
tsk->mm = mm;
tsk->active_mm = mm;
activate_mm(active_mm, mm);
tsk->mm->vmacache_seqnum = 0;
vmacache_flush(tsk);
+ preempt_enable_rt();
task_unlock(tsk);
if (old_mm) {
up_read(&old_mm->mmap_sem);
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
index 08c03044abdd..95debd71e5fa 100644
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -129,6 +129,8 @@ void __log_wait_for_space(journal_t *journal)
if (journal->j_flags & JFS_ABORT)
return;
spin_unlock(&journal->j_state_lock);
+ if (current->plug)
+ io_schedule();
mutex_lock(&journal->j_checkpoint_mutex);
/*
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 7f34f4716165..8c404001d3a1 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -125,6 +125,8 @@ void __jbd2_log_wait_for_space(journal_t *journal)
if (journal->j_flags & JBD2_ABORT)
return;
write_unlock(&journal->j_state_lock);
+ if (current->plug)
+ io_schedule();
mutex_lock(&journal->j_checkpoint_mutex);
/*
diff --git a/fs/namespace.c b/fs/namespace.c
index 1e06f2df10c8..f48d3cacabbd 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -14,6 +14,7 @@
#include <linux/mnt_namespace.h>
#include <linux/user_namespace.h>
#include <linux/namei.h>
+#include <linux/delay.h>
#include <linux/security.h>
#include <linux/idr.h>
#include <linux/acct.h> /* acct_auto_close_mnt */
@@ -343,8 +344,11 @@ int __mnt_want_write(struct vfsmount *m)
* incremented count after it has set MNT_WRITE_HOLD.
*/
smp_mb();
- while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD)
- cpu_relax();
+ while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) {
+ preempt_enable();
+ cpu_chill();
+ preempt_disable();
+ }
/*
* After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
* be set to match its requirements. So we must not load that until
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index d267ea6aa1a0..33f0d869e9ff 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -108,8 +108,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
"0x%llx.", (unsigned long long)bh->b_blocknr);
}
first = page_buffers(page);
- local_irq_save(flags);
- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
+ flags = bh_uptodate_lock_irqsave(first);
clear_buffer_async_read(bh);
unlock_buffer(bh);
tmp = bh;
@@ -124,8 +123,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
}
tmp = tmp->b_this_page;
} while (tmp != bh);
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
- local_irq_restore(flags);
+ bh_uptodate_unlock_irqrestore(first, flags);
/*
* If none of the buffers had errors then we can set the page uptodate,
* but we first have to perform the post read mst fixups, if the
@@ -146,13 +144,13 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
recs = PAGE_CACHE_SIZE / rec_size;
/* Should have been verified before we got here... */
BUG_ON(!recs);
- local_irq_save(flags);
+ local_irq_save_nort(flags);
kaddr = kmap_atomic(page);
for (i = 0; i < recs; i++)
post_read_mst_fixup((NTFS_RECORD*)(kaddr +
i * rec_size), rec_size);
kunmap_atomic(kaddr);
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
flush_dcache_page(page);
if (likely(page_uptodate && !PageError(page)))
SetPageUptodate(page);
@@ -160,9 +158,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
unlock_page(page);
return;
still_busy:
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
- local_irq_restore(flags);
- return;
+ bh_uptodate_unlock_irqrestore(first, flags);
}
/**
diff --git a/fs/timerfd.c b/fs/timerfd.c
index 929312180dd0..d20e45a456f2 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -380,7 +380,10 @@ static int do_timerfd_settime(int ufd, int flags,
break;
}
spin_unlock_irq(&ctx->wqh.lock);
- cpu_relax();
+ if (isalarm(ctx))
+ hrtimer_wait_for_timer(&ctx->t.alarm.timer);
+ else
+ hrtimer_wait_for_timer(&ctx->t.tmr);
}
/*