aboutsummaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2006-09-28 08:29:59 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2006-09-28 08:29:59 -0400
commit185a257f2f73bcd89050ad02da5bedbc28fc43fa (patch)
tree5e32586114534ed3f2165614cba3d578f5d87307 /fs
parent3f1a9aaeffd8d1cbc5ab9776c45cbd66af1c9699 (diff)
parenta77c64c1a641950626181b4857abb701d8f38ccc (diff)
Merge branch 'master' into gfs2
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/vfs_inode.c4
-rw-r--r--fs/Kconfig19
-rw-r--r--fs/adfs/inode.c1
-rw-r--r--fs/adfs/super.c6
-rw-r--r--fs/affs/super.c6
-rw-r--r--fs/afs/inode.c1
-rw-r--r--fs/afs/vlocation.c3
-rw-r--r--fs/afs/volume.c3
-rw-r--r--fs/autofs/inode.c6
-rw-r--r--fs/autofs/symlink.c2
-rw-r--r--fs/autofs4/expire.c6
-rw-r--r--fs/autofs4/inode.c1
-rw-r--r--fs/autofs4/root.c38
-rw-r--r--fs/befs/linuxvfs.c5
-rw-r--r--fs/bfs/dir.c2
-rw-r--r--fs/bfs/inode.c10
-rw-r--r--fs/binfmt_elf.c13
-rw-r--r--fs/binfmt_misc.c9
-rw-r--r--fs/buffer.c2
-rw-r--r--fs/char_dev.c20
-rw-r--r--fs/cifs/cifsfs.c21
-rw-r--r--fs/cifs/readdir.c5
-rw-r--r--fs/coda/coda_linux.c2
-rw-r--r--fs/coda/dir.c2
-rw-r--r--fs/coda/inode.c3
-rw-r--r--fs/compat.c5
-rw-r--r--fs/configfs/file.c3
-rw-r--r--fs/configfs/inode.c4
-rw-r--r--fs/cramfs/inode.c4
-rw-r--r--fs/debugfs/file.c60
-rw-r--r--fs/debugfs/inode.c20
-rw-r--r--fs/devpts/inode.c6
-rw-r--r--fs/efs/super.c6
-rw-r--r--fs/eventpoll.c1
-rw-r--r--fs/exec.c53
-rw-r--r--fs/ext2/acl.c4
-rw-r--r--fs/ext2/ialloc.c1
-rw-r--r--fs/ext2/inode.c1
-rw-r--r--fs/ext2/super.c38
-rw-r--r--fs/ext2/xattr.c3
-rw-r--r--fs/ext3/acl.c6
-rw-r--r--fs/ext3/balloc.c350
-rw-r--r--fs/ext3/bitmap.c2
-rw-r--r--fs/ext3/dir.c19
-rw-r--r--fs/ext3/file.c2
-rw-r--r--fs/ext3/fsync.c6
-rw-r--r--fs/ext3/hash.c8
-rw-r--r--fs/ext3/ialloc.c55
-rw-r--r--fs/ext3/inode.c77
-rw-r--r--fs/ext3/namei.c50
-rw-r--r--fs/ext3/resize.c42
-rw-r--r--fs/ext3/super.c110
-rw-r--r--fs/ext3/xattr.c16
-rw-r--r--fs/fat/cache.c3
-rw-r--r--fs/fat/inode.c38
-rw-r--r--fs/file.c6
-rw-r--r--fs/file_table.c2
-rw-r--r--fs/freevxfs/vxfs.h2
-rw-r--r--fs/freevxfs/vxfs_inode.c5
-rw-r--r--fs/fuse/control.c6
-rw-r--r--fs/fuse/inode.c1
-rw-r--r--fs/hfs/bnode.c3
-rw-r--r--fs/hfs/btree.c3
-rw-r--r--fs/hfs/inode.c2
-rw-r--r--fs/hfs/super.c6
-rw-r--r--fs/hfsplus/bnode.c3
-rw-r--r--fs/hfsplus/btree.c3
-rw-r--r--fs/hfsplus/inode.c2
-rw-r--r--fs/hfsplus/super.c3
-rw-r--r--fs/hostfs/hostfs_kern.c1
-rw-r--r--fs/hpfs/buffer.c2
-rw-r--r--fs/hpfs/inode.c1
-rw-r--r--fs/hpfs/super.c6
-rw-r--r--fs/hppfs/hppfs_kern.c1
-rw-r--r--fs/hugetlbfs/inode.c1
-rw-r--r--fs/inode.c6
-rw-r--r--fs/isofs/inode.c10
-rw-r--r--fs/jbd/checkpoint.c33
-rw-r--r--fs/jbd/commit.c182
-rw-r--r--fs/jbd/journal.c74
-rw-r--r--fs/jbd/recovery.c56
-rw-r--r--fs/jbd/revoke.c70
-rw-r--r--fs/jbd/transaction.c134
-rw-r--r--fs/jffs/inode-v23.c44
-rw-r--r--fs/jffs/intrep.c11
-rw-r--r--fs/jffs/jffs_fm.c6
-rw-r--r--fs/jffs2/fs.c2
-rw-r--r--fs/jffs2/super.c3
-rw-r--r--fs/jfs/jfs_extent.c2
-rw-r--r--fs/jfs/jfs_imap.c1
-rw-r--r--fs/jfs/jfs_inode.c1
-rw-r--r--fs/jfs/jfs_metapage.c2
-rw-r--r--fs/jfs/jfs_txnmgr.c4
-rw-r--r--fs/libfs.c4
-rw-r--r--fs/lockd/clntlock.c2
-rw-r--r--fs/lockd/clntproc.c2
-rw-r--r--fs/lockd/host.c4
-rw-r--r--fs/lockd/svcsubs.c3
-rw-r--r--fs/minix/bitmap.c2
-rw-r--r--fs/minix/inode.c13
-rw-r--r--fs/namei.c50
-rw-r--r--fs/namespace.c10
-rw-r--r--fs/ncpfs/inode.c7
-rw-r--r--fs/ncpfs/symlink.c4
-rw-r--r--fs/nfs/delegation.c7
-rw-r--r--fs/nfs/direct.c3
-rw-r--r--fs/nfs/inode.c9
-rw-r--r--fs/nfs/namespace.c12
-rw-r--r--fs/nfs/nfs3proc.c2
-rw-r--r--fs/nfs/pagelist.c3
-rw-r--r--fs/nfs/proc.c2
-rw-r--r--fs/nfs/read.c3
-rw-r--r--fs/nfs/write.c3
-rw-r--r--fs/nfsd/nfs4idmap.c3
-rw-r--r--fs/nfsd/nfs4state.c8
-rw-r--r--fs/ntfs/dir.c5
-rw-r--r--fs/ntfs/inode.c6
-rw-r--r--fs/ntfs/mft.c9
-rw-r--r--fs/ntfs/super.c28
-rw-r--r--fs/ntfs/unistr.c4
-rw-r--r--fs/ocfs2/dlm/dlmfs.c6
-rw-r--r--fs/ocfs2/dlmglue.c2
-rw-r--r--fs/ocfs2/inode.c4
-rw-r--r--fs/partitions/efi.c9
-rw-r--r--fs/pipe.c1
-rw-r--r--fs/proc/internal.h1
-rw-r--r--fs/proc/kcore.c6
-rw-r--r--fs/proc/nommu.c20
-rw-r--r--fs/proc/proc_misc.c11
-rw-r--r--fs/proc/task_mmu.c5
-rw-r--r--fs/proc/task_nommu.c74
-rw-r--r--fs/qnx4/inode.c8
-rw-r--r--fs/ramfs/inode.c1
-rw-r--r--fs/reiserfs/inode.c4
-rw-r--r--fs/reiserfs/super.c25
-rw-r--r--fs/romfs/inode.c3
-rw-r--r--fs/smbfs/inode.c5
-rw-r--r--fs/smbfs/proc.c1
-rw-r--r--fs/smbfs/request.c3
-rw-r--r--fs/stat.c3
-rw-r--r--fs/sysfs/bin.c13
-rw-r--r--fs/sysfs/dir.c2
-rw-r--r--fs/sysfs/inode.c12
-rw-r--r--fs/sysfs/symlink.c14
-rw-r--r--fs/sysfs/sysfs.h2
-rw-r--r--fs/sysv/ialloc.c2
-rw-r--r--fs/sysv/inode.c2
-rw-r--r--fs/sysv/super.c6
-rw-r--r--fs/udf/ialloc.c7
-rw-r--r--fs/udf/inode.c2
-rw-r--r--fs/udf/super.c3
-rw-r--r--fs/ufs/ialloc.c1
-rw-r--r--fs/ufs/inode.c1
-rw-r--r--fs/ufs/super.c6
-rw-r--r--fs/xfs/linux-2.6/kmem.h4
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.c1
159 files changed, 1281 insertions, 1087 deletions
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index eae50c9d6dc..7a7ec2d1d2f 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -204,7 +204,6 @@ struct inode *v9fs_get_inode(struct super_block *sb, int mode)
inode->i_mode = mode;
inode->i_uid = current->fsuid;
inode->i_gid = current->fsgid;
- inode->i_blksize = sb->s_blocksize;
inode->i_blocks = 0;
inode->i_rdev = 0;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
@@ -950,9 +949,8 @@ v9fs_stat2inode(struct v9fs_stat *stat, struct inode *inode,
inode->i_size = stat->length;
- inode->i_blksize = sb->s_blocksize;
inode->i_blocks =
- (inode->i_size + inode->i_blksize - 1) >> sb->s_blocksize_bits;
+ (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
}
/**
diff --git a/fs/Kconfig b/fs/Kconfig
index ca9affd676a..c968b9c7e58 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -827,6 +827,25 @@ config PROC_VMCORE
help
Exports the dump image of crashed kernel in ELF format.
+config PROC_SYSCTL
+ bool "Sysctl support (/proc/sys)" if EMBEDDED
+ depends on PROC_FS
+ select SYSCTL
+ default y
+ ---help---
+ The sysctl interface provides a means of dynamically changing
+ certain kernel parameters and variables on the fly without requiring
+ a recompile of the kernel or reboot of the system. The primary
+ interface is through /proc/sys. If you say Y here a tree of
+ modifiable sysctl entries will be generated beneath the
+ /proc/sys directory. They are explained in the files
+ in <file:Documentation/sysctl/>. Note that enabling this
+ option will enlarge the kernel by at least 8 KB.
+
+ As it is generally a good thing, you should say Y here unless
+ building a kernel for install/rescue disks or your system is very
+ limited in memory.
+
config SYSFS
bool "sysfs file system support" if EMBEDDED
default y
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
index 534f3eecc98..7e7a04be127 100644
--- a/fs/adfs/inode.c
+++ b/fs/adfs/inode.c
@@ -269,7 +269,6 @@ adfs_iget(struct super_block *sb, struct object_info *obj)
inode->i_ino = obj->file_id;
inode->i_size = obj->size;
inode->i_nlink = 2;
- inode->i_blksize = PAGE_SIZE;
inode->i_blocks = (inode->i_size + sb->s_blocksize - 1) >>
sb->s_blocksize_bits;
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index 82011019494..9ade139086f 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -251,8 +251,7 @@ static int init_inodecache(void)
static void destroy_inodecache(void)
{
- if (kmem_cache_destroy(adfs_inode_cachep))
- printk(KERN_INFO "adfs_inode_cache: not all structures were freed\n");
+ kmem_cache_destroy(adfs_inode_cachep);
}
static struct super_operations adfs_sops = {
@@ -339,11 +338,10 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_flags |= MS_NODIRATIME;
- asb = kmalloc(sizeof(*asb), GFP_KERNEL);
+ asb = kzalloc(sizeof(*asb), GFP_KERNEL);
if (!asb)
return -ENOMEM;
sb->s_fs_info = asb;
- memset(asb, 0, sizeof(*asb));
/* set default options */
asb->s_uid = 0;
diff --git a/fs/affs/super.c b/fs/affs/super.c
index 17352011ab6..5ea72c3a16c 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -109,8 +109,7 @@ static int init_inodecache(void)
static void destroy_inodecache(void)
{
- if (kmem_cache_destroy(affs_inode_cachep))
- printk(KERN_INFO "affs_inode_cache: not all structures were freed\n");
+ kmem_cache_destroy(affs_inode_cachep);
}
static struct super_operations affs_sops = {
@@ -280,11 +279,10 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_op = &affs_sops;
sb->s_flags |= MS_NODIRATIME;
- sbi = kmalloc(sizeof(struct affs_sb_info), GFP_KERNEL);
+ sbi = kzalloc(sizeof(struct affs_sb_info), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
sb->s_fs_info = sbi;
- memset(sbi, 0, sizeof(*sbi));
init_MUTEX(&sbi->s_bmlock);
if (!parse_options(data,&uid,&gid,&i,&reserved,&root_block,
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 4ebb30a50ed..6f37754906c 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -72,7 +72,6 @@ static int afs_inode_map_status(struct afs_vnode *vnode)
inode->i_ctime.tv_sec = vnode->status.mtime_server;
inode->i_ctime.tv_nsec = 0;
inode->i_atime = inode->i_mtime = inode->i_ctime;
- inode->i_blksize = PAGE_CACHE_SIZE;
inode->i_blocks = 0;
inode->i_version = vnode->fid.unique;
inode->i_mapping->a_ops = &afs_fs_aops;
diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c
index 331f730a1fb..782ee7c600c 100644
--- a/fs/afs/vlocation.c
+++ b/fs/afs/vlocation.c
@@ -281,11 +281,10 @@ int afs_vlocation_lookup(struct afs_cell *cell,
spin_unlock(&cell->vl_gylock);
/* not in the cell's in-memory lists - create a new record */
- vlocation = kmalloc(sizeof(struct afs_vlocation), GFP_KERNEL);
+ vlocation = kzalloc(sizeof(struct afs_vlocation), GFP_KERNEL);
if (!vlocation)
return -ENOMEM;
- memset(vlocation, 0, sizeof(struct afs_vlocation));
atomic_set(&vlocation->usage, 1);
INIT_LIST_HEAD(&vlocation->link);
rwlock_init(&vlocation->lock);
diff --git a/fs/afs/volume.c b/fs/afs/volume.c
index 0ff4b86476e..768c6dbd323 100644
--- a/fs/afs/volume.c
+++ b/fs/afs/volume.c
@@ -186,11 +186,10 @@ int afs_volume_lookup(const char *name, struct afs_cell *cell, int rwpath,
_debug("creating new volume record");
ret = -ENOMEM;
- volume = kmalloc(sizeof(struct afs_volume), GFP_KERNEL);
+ volume = kzalloc(sizeof(struct afs_volume), GFP_KERNEL);
if (!volume)
goto error_up;
- memset(volume, 0, sizeof(struct afs_volume));
atomic_set(&volume->usage, 1);
volume->type = type;
volume->type_force = force;
diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c
index af2efbbb5d7..2c9759baad6 100644
--- a/fs/autofs/inode.c
+++ b/fs/autofs/inode.c
@@ -129,10 +129,9 @@ int autofs_fill_super(struct super_block *s, void *data, int silent)
struct autofs_sb_info *sbi;
int minproto, maxproto;
- sbi = kmalloc(sizeof(*sbi), GFP_KERNEL);
+ sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if ( !sbi )
goto fail_unlock;
- memset(sbi, 0, sizeof(*sbi));
DPRINTK(("autofs: starting up, sbi = %p\n",sbi));
s->s_fs_info = sbi;
@@ -217,7 +216,6 @@ static void autofs_read_inode(struct inode *inode)
inode->i_nlink = 2;
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
inode->i_blocks = 0;
- inode->i_blksize = 1024;
if ( ino == AUTOFS_ROOT_INO ) {
inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR;
@@ -242,7 +240,7 @@ static void autofs_read_inode(struct inode *inode)
inode->i_op = &autofs_symlink_inode_operations;
sl = &sbi->symlink[n];
- inode->u.generic_ip = sl;
+ inode->i_private = sl;
inode->i_mode = S_IFLNK | S_IRWXUGO;
inode->i_mtime.tv_sec = inode->i_ctime.tv_sec = sl->mtime;
inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = 0;
diff --git a/fs/autofs/symlink.c b/fs/autofs/symlink.c
index 52e8772b066..c74f2eb6577 100644
--- a/fs/autofs/symlink.c
+++ b/fs/autofs/symlink.c
@@ -15,7 +15,7 @@
/* Nothing to release.. */
static void *autofs_follow_link(struct dentry *dentry, struct nameidata *nd)
{
- char *s=((struct autofs_symlink *)dentry->d_inode->u.generic_ip)->data;
+ char *s=((struct autofs_symlink *)dentry->d_inode->i_private)->data;
nd_set_link(nd, s);
return NULL;
}
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index 8dbd44f10e9..d96e5c14a9c 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -32,7 +32,7 @@ static inline int autofs4_can_expire(struct dentry *dentry,
if (!do_now) {
/* Too young to die */
- if (time_after(ino->last_used + timeout, now))
+ if (!timeout || time_after(ino->last_used + timeout, now))
return 0;
/* update last_used here :-
@@ -253,7 +253,7 @@ static struct dentry *autofs4_expire_direct(struct super_block *sb,
struct dentry *root = dget(sb->s_root);
int do_now = how & AUTOFS_EXP_IMMEDIATE;
- if (!sbi->exp_timeout || !root)
+ if (!root)
return NULL;
now = jiffies;
@@ -293,7 +293,7 @@ static struct dentry *autofs4_expire_indirect(struct super_block *sb,
int do_now = how & AUTOFS_EXP_IMMEDIATE;
int exp_leaves = how & AUTOFS_EXP_LEAVES;
- if ( !sbi->exp_timeout || !root )
+ if (!root)
return NULL;
now = jiffies;
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index 11a6a9ae51b..800ce876cae 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -447,7 +447,6 @@ struct inode *autofs4_get_inode(struct super_block *sb,
inode->i_uid = 0;
inode->i_gid = 0;
}
- inode->i_blksize = PAGE_CACHE_SIZE;
inode->i_blocks = 0;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 5100f984783..27e17f96cad 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -137,7 +137,9 @@ static int autofs4_dir_open(struct inode *inode, struct file *file)
nd.flags = LOOKUP_DIRECTORY;
ret = (dentry->d_op->d_revalidate)(dentry, &nd);
- if (!ret) {
+ if (ret <= 0) {
+ if (ret < 0)
+ status = ret;
dcache_dir_close(inode, file);
goto out;
}
@@ -400,13 +402,23 @@ static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd)
struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
int oz_mode = autofs4_oz_mode(sbi);
int flags = nd ? nd->flags : 0;
- int status = 0;
+ int status = 1;
/* Pending dentry */
if (autofs4_ispending(dentry)) {
- if (!oz_mode)
- status = try_to_fill_dentry(dentry, flags);
- return !status;
+ /* The daemon never causes a mount to trigger */
+ if (oz_mode)
+ return 1;
+
+ /*
+ * A zero status is success otherwise we have a
+ * negative error code.
+ */
+ status = try_to_fill_dentry(dentry, flags);
+ if (status == 0)
+ return 1;
+
+ return status;
}
/* Negative dentry.. invalidate if "old" */
@@ -421,9 +433,19 @@ static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd)
DPRINTK("dentry=%p %.*s, emptydir",
dentry, dentry->d_name.len, dentry->d_name.name);
spin_unlock(&dcache_lock);
- if (!oz_mode)
- status = try_to_fill_dentry(dentry, flags);
- return !status;
+ /* The daemon never causes a mount to trigger */
+ if (oz_mode)
+ return 1;
+
+ /*
+ * A zero status is success otherwise we have a
+ * negative error code.
+ */
+ status = try_to_fill_dentry(dentry, flags);
+ if (status == 0)
+ return 1;
+
+ return status;
}
spin_unlock(&dcache_lock);
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index 50cfca5c7ef..57020c7a7e6 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -365,7 +365,6 @@ befs_read_inode(struct inode *inode)
inode->i_mtime.tv_nsec = 0; /* lower 16 bits are not a time */
inode->i_ctime = inode->i_mtime;
inode->i_atime = inode->i_mtime;
- inode->i_blksize = befs_sb->block_size;
befs_ino->i_inode_num = fsrun_to_cpu(sb, raw_inode->inode_num);
befs_ino->i_parent = fsrun_to_cpu(sb, raw_inode->parent);
@@ -446,9 +445,7 @@ befs_init_inodecache(void)
static void
befs_destroy_inodecache(void)
{
- if (kmem_cache_destroy(befs_inode_cachep))
- printk(KERN_ERR "befs_destroy_inodecache: "
- "not all structures were freed\n");
+ kmem_cache_destroy(befs_inode_cachep);
}
/*
diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c
index 26fad962173..dcf04cb1328 100644
--- a/fs/bfs/dir.c
+++ b/fs/bfs/dir.c
@@ -102,7 +102,7 @@ static int bfs_create(struct inode * dir, struct dentry * dentry, int mode,
inode->i_uid = current->fsuid;
inode->i_gid = (dir->i_mode & S_ISGID) ? dir->i_gid : current->fsgid;
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
- inode->i_blocks = inode->i_blksize = 0;
+ inode->i_blocks = 0;
inode->i_op = &bfs_file_inops;
inode->i_fop = &bfs_file_operations;
inode->i_mapping->a_ops = &bfs_aops;
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index cf74f3d4d96..ed27ffb3459 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -76,7 +76,6 @@ static void bfs_read_inode(struct inode * inode)
inode->i_size = BFS_FILESIZE(di);
inode->i_blocks = BFS_FILEBLOCKS(di);
if (inode->i_size || inode->i_blocks) dprintf("Registered inode with %lld size, %ld blocks\n", inode->i_size, inode->i_blocks);
- inode->i_blksize = PAGE_SIZE;
inode->i_atime.tv_sec = le32_to_cpu(di->i_atime);
inode->i_mtime.tv_sec = le32_to_cpu(di->i_mtime);
inode->i_ctime.tv_sec = le32_to_cpu(di->i_ctime);
@@ -268,8 +267,7 @@ static int init_inodecache(void)
static void destroy_inodecache(void)
{
- if (kmem_cache_destroy(bfs_inode_cachep))
- printk(KERN_INFO "bfs_inode_cache: not all structures were freed\n");
+ kmem_cache_destroy(bfs_inode_cachep);
}
static struct super_operations bfs_sops = {
@@ -311,11 +309,10 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
unsigned i, imap_len;
struct bfs_sb_info * info;
- info = kmalloc(sizeof(*info), GFP_KERNEL);
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
s->s_fs_info = info;
- memset(info, 0, sizeof(*info));
sb_set_blocksize(s, BFS_BSIZE);
@@ -338,10 +335,9 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
+ BFS_ROOT_INO - 1;
imap_len = info->si_lasti/8 + 1;
- info->si_imap = kmalloc(imap_len, GFP_KERNEL);
+ info->si_imap = kzalloc(imap_len, GFP_KERNEL);
if (!info->si_imap)
goto out;
- memset(info->si_imap, 0, imap_len);
for (i=0; i<BFS_ROOT_INO; i++)
set_bit(i, info->si_imap);
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 672a3b90bc5..dfd8cfb7fb5 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -515,7 +515,8 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
{
unsigned int random_variable = 0;
- if (current->flags & PF_RANDOMIZE) {
+ if ((current->flags & PF_RANDOMIZE) &&
+ !(current->personality & ADDR_NO_RANDOMIZE)) {
random_variable = get_random_int() & STACK_RND_MASK;
random_variable <<= PAGE_SHIFT;
}
@@ -1262,7 +1263,7 @@ static void fill_elf_header(struct elfhdr *elf, int segs)
return;
}
-static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
+static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
{
phdr->p_type = PT_NOTE;
phdr->p_offset = offset;
@@ -1428,7 +1429,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file)
int i;
struct vm_area_struct *vma;
struct elfhdr *elf = NULL;
- off_t offset = 0, dataoff;
+ loff_t offset = 0, dataoff;
unsigned long limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
int numnote;
struct memelfnote *notes = NULL;
@@ -1661,11 +1662,11 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file)
ELF_CORE_WRITE_EXTRA_DATA;
#endif
- if ((off_t)file->f_pos != offset) {
+ if (file->f_pos != offset) {
/* Sanity check */
printk(KERN_WARNING
- "elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
- (off_t)file->f_pos, offset);
+ "elf_core_dump: file->f_pos (%Ld) != offset (%Ld)\n",
+ file->f_pos, offset);
}
end_coredump:
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 34ebbc191e4..66ba137f866 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -507,7 +507,6 @@ static struct inode *bm_get_inode(struct super_block *sb, int mode)
inode->i_mode = mode;
inode->i_uid = 0;
inode->i_gid = 0;
- inode->i_blksize = PAGE_CACHE_SIZE;
inode->i_blocks = 0;
inode->i_atime = inode->i_mtime = inode->i_ctime =
current_fs_time(inode->i_sb);
@@ -517,7 +516,7 @@ static struct inode *bm_get_inode(struct super_block *sb, int mode)
static void bm_clear_inode(struct inode *inode)
{
- kfree(inode->u.generic_ip);
+ kfree(inode->i_private);
}
static void kill_node(Node *e)
@@ -545,7 +544,7 @@ static void kill_node(Node *e)
static ssize_t
bm_entry_read(struct file * file, char __user * buf, size_t nbytes, loff_t *ppos)
{
- Node *e = file->f_dentry->d_inode->u.generic_ip;
+ Node *e = file->f_dentry->d_inode->i_private;
loff_t pos = *ppos;
ssize_t res;
char *page;
@@ -579,7 +578,7 @@ static ssize_t bm_entry_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
struct dentry *root;
- Node *e = file->f_dentry->d_inode->u.generic_ip;
+ Node *e = file->f_dentry->d_inode->i_private;
int res = parse_command(buffer, count);
switch (res) {
@@ -646,7 +645,7 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer,
}
e->dentry = dget(dentry);
- inode->u.generic_ip = e;
+ inode->i_private = e;
inode->i_fop = &bm_entry_operations;
d_instantiate(dentry, inode);
diff --git a/fs/buffer.c b/fs/buffer.c
index 71649ef9b65..3b6d701073e 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2987,6 +2987,7 @@ int try_to_free_buffers(struct page *page)
spin_lock(&mapping->private_lock);
ret = drop_buffers(page, &buffers_to_free);
+ spin_unlock(&mapping->private_lock);
if (ret) {
/*
* If the filesystem writes its buffers by hand (eg ext3)
@@ -2998,7 +2999,6 @@ int try_to_free_buffers(struct page *page)
*/
clear_page_dirty(page);
}
- spin_unlock(&mapping->private_lock);
out:
if (buffers_to_free) {
struct buffer_head *bh = buffers_to_free;
diff --git a/fs/char_dev.c b/fs/char_dev.c
index 3483d3cf808..0009346d827 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -19,11 +19,30 @@
#include <linux/kobj_map.h>
#include <linux/cdev.h>
#include <linux/mutex.h>
+#include <linux/backing-dev.h>
#ifdef CONFIG_KMOD
#include <linux/kmod.h>
#endif
+/*
+ * capabilities for /dev/mem, /dev/kmem and similar directly mappable character
+ * devices
+ * - permits shared-mmap for read, write and/or exec
+ * - does not permit private mmap in NOMMU mode (can't do COW)
+ * - no readahead or I/O queue unplugging required
+ */
+struct backing_dev_info directly_mappable_cdev_bdi = {
+ .capabilities = (
+#ifdef CONFIG_MMU
+ /* permit private copies of the data to be taken */
+ BDI_CAP_MAP_COPY |
+#endif
+ /* permit direct mmap, for read, write or exec */
+ BDI_CAP_MAP_DIRECT |
+ BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP),
+};
+
static struct kobj_map *cdev_map;
static DEFINE_MUTEX(chrdevs_lock);
@@ -461,3 +480,4 @@ EXPORT_SYMBOL(cdev_del);
EXPORT_SYMBOL(cdev_add);
EXPORT_SYMBOL(register_chrdev);
EXPORT_SYMBOL(unregister_chrdev);
+EXPORT_SYMBOL(directly_mappable_cdev_bdi);
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index c3ef1c0d0e6..22bcf4d7e7a 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -253,7 +253,6 @@ cifs_alloc_inode(struct super_block *sb)
file data or metadata */
cifs_inode->clientCanCacheRead = FALSE;
cifs_inode->clientCanCacheAll = FALSE;
- cifs_inode->vfs_inode.i_blksize = CIFS_MAX_MSGSIZE;
cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME;
INIT_LIST_HEAD(&cifs_inode->openFileList);
@@ -699,8 +698,7 @@ cifs_init_inodecache(void)
static void
cifs_destroy_inodecache(void)
{
- if (kmem_cache_destroy(cifs_inode_cachep))
- printk(KERN_WARNING "cifs_inode_cache: error freeing\n");
+ kmem_cache_destroy(cifs_inode_cachep);
}
static int
@@ -778,13 +776,9 @@ static void
cifs_destroy_request_bufs(void)
{
mempool_destroy(cifs_req_poolp);
- if (kmem_cache_destroy(cifs_req_cachep))
- printk(KERN_WARNING
- "cifs_destroy_request_cache: error not all structures were freed\n");
+ kmem_cache_destroy(cifs_req_cachep);
mempool_destroy(cifs_sm_req_poolp);
- if (kmem_cache_destroy(cifs_sm_req_cachep))
- printk(KERN_WARNING
- "cifs_destroy_request_cache: cifs_small_rq free error\n");
+ kmem_cache_destroy(cifs_sm_req_cachep);
}
static int
@@ -819,13 +813,8 @@ static void
cifs_destroy_mids(void)
{
mempool_destroy(cifs_mid_poolp);
- if (kmem_cache_destroy(cifs_mid_cachep))
- printk(KERN_WARNING
- "cifs_destroy_mids: error not all structures were freed\n");
-
- if (kmem_cache_destroy(cifs_oplock_cachep))
- printk(KERN_WARNING
- "error not all oplock structures were freed\n");
+ kmem_cache_destroy(cifs_mid_cachep);
+ kmem_cache_destroy(cifs_oplock_cachep);
}
static int cifs_oplock_thread(void * dummyarg)
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 9aeb58a7d36..b27b34537bf 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -216,10 +216,9 @@ static void fill_in_inode(struct inode *tmp_inode, int new_buf_type,
if (allocation_size < end_of_file)
cFYI(1, ("May be sparse file, allocation less than file size"));
- cFYI(1, ("File Size %ld and blocks %llu and blocksize %ld",
+ cFYI(1, ("File Size %ld and blocks %llu",
(unsigned long)tmp_inode->i_size,
- (unsigned long long)tmp_inode->i_blocks,
- tmp_inode->i_blksize));
+ (unsigned long long)tmp_inode->i_blocks));
if (S_ISREG(tmp_inode->i_mode)) {
cFYI(1, ("File inode"));
tmp_inode->i_op = &cifs_file_inode_ops;
diff --git a/fs/coda/coda_linux.c b/fs/coda/coda_linux.c
index 5597080cb81..95a54253c04 100644
--- a/fs/coda/coda_linux.c
+++ b/fs/coda/coda_linux.c
@@ -110,8 +110,6 @@ void coda_vattr_to_iattr(struct inode *inode, struct coda_vattr *attr)
inode->i_nlink = attr->va_nlink;
if (attr->va_size != -1)
inode->i_size = attr->va_size;
- if (attr->va_blocksize != -1)
- inode->i_blksize = attr->va_blocksize;
if (attr->va_size != -1)
inode->i_blocks = (attr->va_size + 511) >> 9;
if (attr->va_atime.tv_sec != -1)
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index 71f2ea632e5..8651ea6a23b 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -513,7 +513,7 @@ static int coda_venus_readdir(struct file *filp, filldir_t filldir,
ino_t ino;
int ret, i;
- vdir = (struct venus_dirent *)kmalloc(sizeof(*vdir), GFP_KERNEL);
+ vdir = kmalloc(sizeof(*vdir), GFP_KERNEL);
if (!vdir) return -ENOMEM;
i = filp->f_pos;
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index 87f1dc8aa24..88d12332116 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -80,8 +80,7 @@ int coda_init_inodecache(void)
void coda_destroy_inodecache(void)
{
- if (kmem_cache_destroy(coda_inode_cachep))
- printk(KERN_INFO "coda_inode_cache: not all structures were freed\n");
+ kmem_cache_destroy(coda_inode_cachep);
}
static int coda_remount(struct super_block *sb, int *flags, char *data)
diff --git a/fs/compat.c b/fs/compat.c
index e31e9cf9664..ce982f6e8c8 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1855,7 +1855,7 @@ asmlinkage long compat_sys_pselect7(int n, compat_ulong_t __user *inp,
} while (!ret && !timeout && tsp && (ts.tv_sec || ts.tv_nsec));
- if (tsp && !(current->personality & STICKY_TIMEOUTS)) {
+ if (ret == 0 && tsp && !(current->personality & STICKY_TIMEOUTS)) {
struct compat_timespec rts;
rts.tv_sec = timeout / HZ;
@@ -1866,7 +1866,8 @@ asmlinkage long compat_sys_pselect7(int n, compat_ulong_t __user *inp,
}
if (compat_timespec_compare(&rts, &ts) >= 0)
rts = ts;
- copy_to_user(tsp, &rts, sizeof(rts));
+ if (copy_to_user(tsp, &rts, sizeof(rts)))
+ ret = -EFAULT;
}
if (ret == -ERESTARTNOHAND) {
diff --git a/fs/configfs/file.c b/fs/configfs/file.c
index f499803743e..85105e50f7d 100644
--- a/fs/configfs/file.c
+++ b/fs/configfs/file.c
@@ -274,9 +274,8 @@ static int check_perm(struct inode * inode, struct file * file)
/* No error? Great, allocate a buffer for the file, and store it
* it in file->private_data for easy access.
*/
- buffer = kmalloc(sizeof(struct configfs_buffer),GFP_KERNEL);
+ buffer = kzalloc(sizeof(struct configfs_buffer),GFP_KERNEL);
if (buffer) {
- memset(buffer,0,sizeof(struct configfs_buffer));
init_MUTEX(&buffer->sem);
buffer->needs_read_fill = 1;
buffer->ops = ops;
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
index e14488ca641..fb18917954a 100644
--- a/fs/configfs/inode.c
+++ b/fs/configfs/inode.c
@@ -76,11 +76,10 @@ int configfs_setattr(struct dentry * dentry, struct iattr * iattr)
if (!sd_iattr) {
/* setting attributes for the first time, allocate now */
- sd_iattr = kmalloc(sizeof(struct iattr), GFP_KERNEL);
+ sd_iattr = kzalloc(sizeof(struct iattr), GFP_KERNEL);
if (!sd_iattr)
return -ENOMEM;
/* assign default attributes */
- memset(sd_iattr, 0, sizeof(struct iattr));
sd_iattr->ia_mode = sd->s_mode;
sd_iattr->ia_uid = 0;
sd_iattr->ia_gid = 0;
@@ -136,7 +135,6 @@ struct inode * configfs_new_inode(mode_t mode, struct configfs_dirent * sd)
{
struct inode * inode = new_inode(configfs_sb);
if (inode) {
- inode->i_blksize = PAGE_CACHE_SIZE;
inode->i_blocks = 0;
inode->i_mapping->a_ops = &configfs_aops;
inode->i_mapping->backing_dev_info = &configfs_backing_dev_info;
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index 223c0431042..ad96b699071 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -73,7 +73,6 @@ static int cramfs_iget5_set(struct inode *inode, void *opaque)
inode->i_uid = cramfs_inode->uid;
inode->i_size = cramfs_inode->size;
inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1;
- inode->i_blksize = PAGE_CACHE_SIZE;
inode->i_gid = cramfs_inode->gid;
/* Struct copy intentional */
inode->i_mtime = inode->i_atime = inode->i_ctime = zerotime;
@@ -242,11 +241,10 @@ static int cramfs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_flags |= MS_RDONLY;
- sbi = kmalloc(sizeof(struct cramfs_sb_info), GFP_KERNEL);
+ sbi = kzalloc(sizeof(struct cramfs_sb_info), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
sb->s_fs_info = sbi;
- memset(sbi, 0, sizeof(struct cramfs_sb_info));
/* Invalidate the read buffers on mount: think disk change.. */
mutex_lock(&read_mutex);
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 39640fd0345..bf3901ab174 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -32,8 +32,8 @@ static ssize_t default_write_file(struct file *file, const char __user *buf,
static int default_open(struct inode *inode, struct file *file)
{
- if (inode->u.generic_ip)
- file->private_data = inode->u.generic_ip;
+ if (inode->i_private)
+ file->private_data = inode->i_private;
return 0;
}
@@ -55,12 +55,11 @@ static u64 debugfs_u8_get(void *data)
DEFINE_SIMPLE_ATTRIBUTE(fops_u8, debugfs_u8_get, debugfs_u8_set, "%llu\n");
/**
- * debugfs_create_u8 - create a file in the debugfs filesystem that is used to read and write an unsigned 8 bit value.
- *
+ * debugfs_create_u8 - create a debugfs file that is used to read and write an unsigned 8-bit value
* @name: a pointer to a string containing the name of the file to create.
* @mode: the permission that the file should have
* @parent: a pointer to the parent dentry for this file. This should be a
- * directory dentry if set. If this paramater is NULL, then the
+ * directory dentry if set. If this parameter is %NULL, then the
* file will be created in the root of the debugfs filesystem.
* @value: a pointer to the variable that the file should read to and write
* from.
@@ -72,11 +71,11 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_u8, debugfs_u8_get, debugfs_u8_set, "%llu\n");
* This function will return a pointer to a dentry if it succeeds. This
* pointer must be passed to the debugfs_remove() function when the file is
* to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.) If an error occurs, NULL will be returned.
+ * you are responsible here.) If an error occurs, %NULL will be returned.
*
- * If debugfs is not enabled in the kernel, the value -ENODEV will be
+ * If debugfs is not enabled in the kernel, the value -%ENODEV will be
* returned. It is not wise to check for this value, but rather, check for
- * NULL or !NULL instead as to eliminate the need for #ifdef in the calling
+ * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
* code.
*/
struct dentry *debugfs_create_u8(const char *name, mode_t mode,
@@ -97,12 +96,11 @@ static u64 debugfs_u16_get(void *data)
DEFINE_SIMPLE_ATTRIBUTE(fops_u16, debugfs_u16_get, debugfs_u16_set, "%llu\n");
/**
- * debugfs_create_u16 - create a file in the debugfs filesystem that is used to read and write an unsigned 16 bit value.
- *
+ * debugfs_create_u16 - create a debugfs file that is used to read and write an unsigned 16-bit value
* @name: a pointer to a string containing the name of the file to create.
* @mode: the permission that the file should have
* @parent: a pointer to the parent dentry for this file. This should be a
- * directory dentry if set. If this paramater is NULL, then the
+ * directory dentry if set. If this parameter is %NULL, then the
* file will be created in the root of the debugfs filesystem.
* @value: a pointer to the variable that the file should read to and write
* from.
@@ -114,11 +112,11 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_u16, debugfs_u16_get, debugfs_u16_set, "%llu\n");
* This function will return a pointer to a dentry if it succeeds. This
* pointer must be passed to the debugfs_remove() function when the file is
* to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.) If an error occurs, NULL will be returned.
+ * you are responsible here.) If an error occurs, %NULL will be returned.
*
- * If debugfs is not enabled in the kernel, the value -ENODEV will be
+ * If debugfs is not enabled in the kernel, the value -%ENODEV will be
* returned. It is not wise to check for this value, but rather, check for
- * NULL or !NULL instead as to eliminate the need for #ifdef in the calling
+ * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
* code.
*/
struct dentry *debugfs_create_u16(const char *name, mode_t mode,
@@ -139,12 +137,11 @@ static u64 debugfs_u32_get(void *data)
DEFINE_SIMPLE_ATTRIBUTE(fops_u32, debugfs_u32_get, debugfs_u32_set, "%llu\n");
/**
- * debugfs_create_u32 - create a file in the debugfs filesystem that is used to read and write an unsigned 32 bit value.
- *
+ * debugfs_create_u32 - create a debugfs file that is used to read and write an unsigned 32-bit value
* @name: a pointer to a string containing the name of the file to create.
* @mode: the permission that the file should have
* @parent: a pointer to the parent dentry for this file. This should be a
- * directory dentry if set. If this paramater is NULL, then the
+ * directory dentry if set. If this parameter is %NULL, then the
* file will be created in the root of the debugfs filesystem.
* @value: a pointer to the variable that the file should read to and write
* from.
@@ -156,11 +153,11 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_u32, debugfs_u32_get, debugfs_u32_set, "%llu\n");
* This function will return a pointer to a dentry if it succeeds. This
* pointer must be passed to the debugfs_remove() function when the file is
* to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.) If an error occurs, NULL will be returned.
+ * you are responsible here.) If an error occurs, %NULL will be returned.
*
- * If debugfs is not enabled in the kernel, the value -ENODEV will be
+ * If debugfs is not enabled in the kernel, the value -%ENODEV will be
* returned. It is not wise to check for this value, but rather, check for
- * NULL or !NULL instead as to eliminate the need for #ifdef in the calling
+ * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
* code.
*/
struct dentry *debugfs_create_u32(const char *name, mode_t mode,
@@ -219,12 +216,11 @@ static const struct file_operations fops_bool = {
};
/**
- * debugfs_create_bool - create a file in the debugfs filesystem that is used to read and write a boolean value.
- *
+ * debugfs_create_bool - create a debugfs file that is used to read and write a boolean value
* @name: a pointer to a string containing the name of the file to create.
* @mode: the permission that the file should have
* @parent: a pointer to the parent dentry for this file. This should be a
- * directory dentry if set. If this paramater is NULL, then the
+ * directory dentry if set. If this parameter is %NULL, then the
* file will be created in the root of the debugfs filesystem.
* @value: a pointer to the variable that the file should read to and write
* from.
@@ -236,11 +232,11 @@ static const struct file_operations fops_bool = {
* This function will return a pointer to a dentry if it succeeds. This
* pointer must be passed to the debugfs_remove() function when the file is
* to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.) If an error occurs, NULL will be returned.
+ * you are responsible here.) If an error occurs, %NULL will be returned.
*
- * If debugfs is not enabled in the kernel, the value -ENODEV will be
+ * If debugfs is not enabled in the kernel, the value -%ENODEV will be
* returned. It is not wise to check for this value, but rather, check for
- * NULL or !NULL instead as to eliminate the need for #ifdef in the calling
+ * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
* code.
*/
struct dentry *debugfs_create_bool(const char *name, mode_t mode,
@@ -264,13 +260,11 @@ static struct file_operations fops_blob = {
};
/**
- * debugfs_create_blob - create a file in the debugfs filesystem that is
- * used to read and write a binary blob.
- *
+ * debugfs_create_blob - create a debugfs file that is used to read and write a binary blob
* @name: a pointer to a string containing the name of the file to create.
* @mode: the permission that the file should have
* @parent: a pointer to the parent dentry for this file. This should be a
- * directory dentry if set. If this paramater is NULL, then the
+ * directory dentry if set. If this parameter is %NULL, then the
* file will be created in the root of the debugfs filesystem.
* @blob: a pointer to a struct debugfs_blob_wrapper which contains a pointer
* to the blob data and the size of the data.
@@ -282,11 +276,11 @@ static struct file_operations fops_blob = {
* This function will return a pointer to a dentry if it succeeds. This
* pointer must be passed to the debugfs_remove() function when the file is
* to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.) If an error occurs, NULL will be returned.
+ * you are responsible here.) If an error occurs, %NULL will be returned.
*
- * If debugfs is not enabled in the kernel, the value -ENODEV will be
+ * If debugfs is not enabled in the kernel, the value -%ENODEV will be
* returned. It is not wise to check for this value, but rather, check for
- * NULL or !NULL instead as to eliminate the need for #ifdef in the calling
+ * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
* code.
*/
struct dentry *debugfs_create_blob(const char *name, mode_t mode,
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index e8ae3042b80..269e649e6dc 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -40,7 +40,6 @@ static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t d
inode->i_mode = mode;
inode->i_uid = 0;
inode->i_gid = 0;
- inode->i_blksize = PAGE_CACHE_SIZE;
inode->i_blocks = 0;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
switch (mode & S_IFMT) {
@@ -162,14 +161,13 @@ static int debugfs_create_by_name(const char *name, mode_t mode,
/**
* debugfs_create_file - create a file in the debugfs filesystem
- *
* @name: a pointer to a string containing the name of the file to create.
* @mode: the permission that the file should have
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this paramater is NULL, then the
* file will be created in the root of the debugfs filesystem.
* @data: a pointer to something that the caller will want to get to later
- * on. The inode.u.generic_ip pointer will point to this value on
+ * on. The inode.i_private pointer will point to this value on
* the open() call.
* @fops: a pointer to a struct file_operations that should be used for
* this file.
@@ -182,11 +180,11 @@ static int debugfs_create_by_name(const char *name, mode_t mode,
* This function will return a pointer to a dentry if it succeeds. This
* pointer must be passed to the debugfs_remove() function when the file is
* to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.) If an error occurs, NULL will be returned.
+ * you are responsible here.) If an error occurs, %NULL will be returned.
*
- * If debugfs is not enabled in the kernel, the value -ENODEV will be
+ * If debugfs is not enabled in the kernel, the value -%ENODEV will be
* returned. It is not wise to check for this value, but rather, check for
- * NULL or !NULL instead as to eliminate the need for #ifdef in the calling
+ * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
* code.
*/
struct dentry *debugfs_create_file(const char *name, mode_t mode,
@@ -210,7 +208,7 @@ struct dentry *debugfs_create_file(const char *name, mode_t mode,
if (dentry->d_inode) {
if (data)
- dentry->d_inode->u.generic_ip = data;
+ dentry->d_inode->i_private = data;
if (fops)
dentry->d_inode->i_fop = fops;
}
@@ -221,7 +219,6 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
/**
* debugfs_create_dir - create a directory in the debugfs filesystem
- *
* @name: a pointer to a string containing the name of the directory to
* create.
* @parent: a pointer to the parent dentry for this file. This should be a
@@ -233,11 +230,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
* This function will return a pointer to a dentry if it succeeds. This
* pointer must be passed to the debugfs_remove() function when the file is
* to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.) If an error occurs, NULL will be returned.
+ * you are responsible here.) If an error occurs, %NULL will be returned.
*
- * If debugfs is not enabled in the kernel, the value -ENODEV will be
+ * If debugfs is not enabled in the kernel, the value -%ENODEV will be
* returned. It is not wise to check for this value, but rather, check for
- * NULL or !NULL instead as to eliminate the need for #ifdef in the calling
+ * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
* code.
*/
struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
@@ -250,7 +247,6 @@ EXPORT_SYMBOL_GPL(debugfs_create_dir);
/**
* debugfs_remove - removes a file or directory from the debugfs filesystem
- *
* @dentry: a pointer to a the dentry of the file or directory to be
* removed.
*
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index f7aef5bb584..5f7b5a6025b 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -113,7 +113,6 @@ devpts_fill_super(struct super_block *s, void *data, int silent)
inode->i_ino = 1;
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
inode->i_blocks = 0;
- inode->i_blksize = 1024;
inode->i_uid = inode->i_gid = 0;
inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR;
inode->i_op = &simple_dir_inode_operations;
@@ -172,12 +171,11 @@ int devpts_pty_new(struct tty_struct *tty)
return -ENOMEM;
inode->i_ino = number+2;
- inode->i_blksize = 1024;
inode->i_uid = config.setuid ? config.uid : current->fsuid;
inode->i_gid = config.setgid ? config.gid : current->fsgid;
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
init_special_inode(inode, S_IFCHR|config.mode, device);
- inode->u.generic_ip = tty;
+ inode->i_private = tty;
dentry = get_node(number);
if (!IS_ERR(dentry) && !dentry->d_inode)
@@ -196,7 +194,7 @@ struct tty_struct *devpts_get_tty(int number)
tty = NULL;
if (!IS_ERR(dentry)) {
if (dentry->d_inode)
- tty = dentry->d_inode->u.generic_ip;
+ tty = dentry->d_inode->i_private;
dput(dentry);
}
diff --git a/fs/efs/super.c b/fs/efs/super.c
index 8ac2462ae5d..b3f50651eb6 100644
--- a/fs/efs/super.c
+++ b/fs/efs/super.c
@@ -90,8 +90,7 @@ static int init_inodecache(void)
static void destroy_inodecache(void)
{
- if (kmem_cache_destroy(efs_inode_cachep))
- printk(KERN_INFO "efs_inode_cache: not all structures were freed\n");
+ kmem_cache_destroy(efs_inode_cachep);
}
static void efs_put_super(struct super_block *s)
@@ -248,11 +247,10 @@ static int efs_fill_super(struct super_block *s, void *d, int silent)
struct buffer_head *bh;
struct inode *root;
- sb = kmalloc(sizeof(struct efs_sb_info), GFP_KERNEL);
+ sb = kzalloc(sizeof(struct efs_sb_info), GFP_KERNEL);
if (!sb)
return -ENOMEM;
s->s_fs_info = sb;
- memset(sb, 0, sizeof(struct efs_sb_info));
s->s_magic = EFS_SUPER_MAGIC;
if (!sb_set_blocksize(s, EFS_BLOCKSIZE)) {
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 3a3567433b9..8d544334bcd 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1590,7 +1590,6 @@ static struct inode *ep_eventpoll_inode(void)
inode->i_uid = current->fsuid;
inode->i_gid = current->fsgid;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
- inode->i_blksize = PAGE_SIZE;
return inode;
eexit_1:
diff --git a/fs/exec.c b/fs/exec.c
index 54135df2a96..97df6e0aeae 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -595,7 +595,7 @@ static int de_thread(struct task_struct *tsk)
if (!newsighand)
return -ENOMEM;
- if (thread_group_empty(current))
+ if (thread_group_empty(tsk))
goto no_thread_group;
/*
@@ -620,17 +620,17 @@ static int de_thread(struct task_struct *tsk)
* Reparenting needs write_lock on tasklist_lock,
* so it is safe to do it under read_lock.
*/
- if (unlikely(current->group_leader == child_reaper))
- child_reaper = current;
+ if (unlikely(tsk->group_leader == child_reaper))
+ child_reaper = tsk;
- zap_other_threads(current);
+ zap_other_threads(tsk);
read_unlock(&tasklist_lock);
/*
* Account for the thread group leader hanging around:
*/
count = 1;
- if (!thread_group_leader(current)) {
+ if (!thread_group_leader(tsk)) {
count = 2;
/*
* The SIGALRM timer survives the exec, but needs to point
@@ -639,14 +639,14 @@ static int de_thread(struct task_struct *tsk)
* synchronize with any firing (by calling del_timer_sync)
* before we can safely let the old group leader die.
*/
- sig->tsk = current;
+ sig->tsk = tsk;
spin_unlock_irq(lock);
if (hrtimer_cancel(&sig->real_timer))
hrtimer_restart(&sig->real_timer);
spin_lock_irq(lock);
}
while (atomic_read(&sig->count) > count) {
- sig->group_exit_task = current;
+ sig->group_exit_task = tsk;
sig->notify_count = count;
__set_current_state(TASK_UNINTERRUPTIBLE);
spin_unlock_irq(lock);
@@ -662,13 +662,13 @@ static int de_thread(struct task_struct *tsk)
* do is to wait for the thread group leader to become inactive,
* and to assume its PID:
*/
- if (!thread_group_leader(current)) {
+ if (!thread_group_leader(tsk)) {
/*
* Wait for the thread group leader to be a zombie.
* It should already be zombie at this point, most
* of the time.
*/
- leader = current->group_leader;
+ leader = tsk->group_leader;
while (leader->exit_state != EXIT_ZOMBIE)
yield();
@@ -682,12 +682,12 @@ static int de_thread(struct task_struct *tsk)
* When we take on its identity by switching to its PID, we
* also take its birthdate (always earlier than our own).
*/
- current->start_time = leader->start_time;
+ tsk->start_time = leader->start_time;
write_lock_irq(&tasklist_lock);
- BUG_ON(leader->tgid != current->tgid);
- BUG_ON(current->pid == current->tgid);
+ BUG_ON(leader->tgid != tsk->tgid);
+ BUG_ON(tsk->pid == tsk->tgid);
/*
* An exec() starts a new thread group with the
* TGID of the previous thread group. Rehash the
@@ -696,24 +696,21 @@ static int de_thread(struct task_struct *tsk)
*/
/* Become a process group leader with the old leader's pid.
- * Note: The old leader also uses thispid until release_task
+ * The old leader becomes a thread of the this thread group.
+ * Note: The old leader also uses this pid until release_task
* is called. Odd but simple and correct.
*/
- detach_pid(current, PIDTYPE_PID);
- current->pid = leader->pid;
- attach_pid(current, PIDTYPE_PID, current->pid);
- attach_pid(current, PIDTYPE_PGID, current->signal->pgrp);
- attach_pid(current, PIDTYPE_SID, current->signal->session);
- list_replace_rcu(&leader->tasks, &current->tasks);
+ detach_pid(tsk, PIDTYPE_PID);
+ tsk->pid = leader->pid;
+ attach_pid(tsk, PIDTYPE_PID, tsk->pid);
+ transfer_pid(leader, tsk, PIDTYPE_PGID);
+ transfer_pid(leader, tsk, PIDTYPE_SID);
+ list_replace_rcu(&leader->tasks, &tsk->tasks);
- current->group_leader = current;
- leader->group_leader = current;
+ tsk->group_leader = tsk;
+ leader->group_leader = tsk;
- /* Reduce leader to a thread */
- detach_pid(leader, PIDTYPE_PGID);
- detach_pid(leader, PIDTYPE_SID);
-
- current->exit_signal = SIGCHLD;
+ tsk->exit_signal = SIGCHLD;
BUG_ON(leader->exit_state != EXIT_ZOMBIE);
leader->exit_state = EXIT_DEAD;
@@ -753,7 +750,7 @@ no_thread_group:
spin_lock(&oldsighand->siglock);
spin_lock_nested(&newsighand->siglock, SINGLE_DEPTH_NESTING);
- rcu_assign_pointer(current->sighand, newsighand);
+ rcu_assign_pointer(tsk->sighand, newsighand);
recalc_sigpending();
spin_unlock(&newsighand->siglock);
@@ -764,7 +761,7 @@ no_thread_group:
kmem_cache_free(sighand_cachep, oldsighand);
}
- BUG_ON(!thread_group_leader(current));
+ BUG_ON(!thread_group_leader(tsk));
return 0;
}
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
index da52b4a5db6..7c420b800c3 100644
--- a/fs/ext2/acl.c
+++ b/fs/ext2/acl.c
@@ -89,8 +89,8 @@ ext2_acl_to_disk(const struct posix_acl *acl, size_t *size)
size_t n;
*size = ext2_acl_size(acl->a_count);
- ext_acl = (ext2_acl_header *)kmalloc(sizeof(ext2_acl_header) +
- acl->a_count * sizeof(ext2_acl_entry), GFP_KERNEL);
+ ext_acl = kmalloc(sizeof(ext2_acl_header) + acl->a_count *
+ sizeof(ext2_acl_entry), GFP_KERNEL);
if (!ext_acl)
return ERR_PTR(-ENOMEM);
ext_acl->a_version = cpu_to_le32(EXT2_ACL_VERSION);
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index 695f69ccf90..2cb545bf0f3 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -574,7 +574,6 @@ got:
inode->i_mode = mode;
inode->i_ino = ino;
- inode->i_blksize = PAGE_SIZE; /* This is the optimal IO size (for stat), not the fs block size */
inode->i_blocks = 0;
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
memset(ei->i_data, 0, sizeof(ei->i_data));
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index fb4d3220eb8..dd4e14c221e 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -1094,7 +1094,6 @@ void ext2_read_inode (struct inode * inode)
brelse (bh);
goto bad_inode;
}
- inode->i_blksize = PAGE_SIZE; /* This is the optimal IO size (for stat), not the fs block size */
inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
ei->i_flags = le32_to_cpu(raw_inode->i_flags);
ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 4286ff6330b..513cd421ac0 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -184,8 +184,7 @@ static int init_inodecache(void)
static void destroy_inodecache(void)
{
- if (kmem_cache_destroy(ext2_inode_cachep))
- printk(KERN_INFO "ext2_inode_cache: not all structures were freed\n");
+ kmem_cache_destroy(ext2_inode_cachep);
}
static void ext2_clear_inode(struct inode *inode)
@@ -544,17 +543,24 @@ static int ext2_check_descriptors (struct super_block * sb)
int i;
int desc_block = 0;
struct ext2_sb_info *sbi = EXT2_SB(sb);
- unsigned long block = le32_to_cpu(sbi->s_es->s_first_data_block);
+ unsigned long first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
+ unsigned long last_block;
struct ext2_group_desc * gdp = NULL;
ext2_debug ("Checking group descriptors");
for (i = 0; i < sbi->s_groups_count; i++)
{
+ if (i == sbi->s_groups_count - 1)
+ last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1;
+ else
+ last_block = first_block +
+ (EXT2_BLOCKS_PER_GROUP(sb) - 1);
+
if ((i % EXT2_DESC_PER_BLOCK(sb)) == 0)
gdp = (struct ext2_group_desc *) sbi->s_group_desc[desc_block++]->b_data;
- if (le32_to_cpu(gdp->bg_block_bitmap) < block ||
- le32_to_cpu(gdp->bg_block_bitmap) >= block + EXT2_BLOCKS_PER_GROUP(sb))
+ if (le32_to_cpu(gdp->bg_block_bitmap) < first_block ||
+ le32_to_cpu(gdp->bg_block_bitmap) > last_block)
{
ext2_error (sb, "ext2_check_descriptors",
"Block bitmap for group %d"
@@ -562,8 +568,8 @@ static int ext2_check_descriptors (struct super_block * sb)
i, (unsigned long) le32_to_cpu(gdp->bg_block_bitmap));
return 0;
}
- if (le32_to_cpu(gdp->bg_inode_bitmap) < block ||
- le32_to_cpu(gdp->bg_inode_bitmap) >= block + EXT2_BLOCKS_PER_GROUP(sb))
+ if (le32_to_cpu(gdp->bg_inode_bitmap) < first_block ||
+ le32_to_cpu(gdp->bg_inode_bitmap) > last_block)
{
ext2_error (sb, "ext2_check_descriptors",
"Inode bitmap for group %d"
@@ -571,9 +577,9 @@ static int ext2_check_descriptors (struct super_block * sb)
i, (unsigned long) le32_to_cpu(gdp->bg_inode_bitmap));
return 0;
}
- if (le32_to_cpu(gdp->bg_inode_table) < block ||
- le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group >=
- block + EXT2_BLOCKS_PER_GROUP(sb))
+ if (le32_to_cpu(gdp->bg_inode_table) < first_block ||
+ le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group >
+ last_block)
{
ext2_error (sb, "ext2_check_descriptors",
"Inode table for group %d"
@@ -581,7 +587,7 @@ static int ext2_check_descriptors (struct super_block * sb)
i, (unsigned long) le32_to_cpu(gdp->bg_inode_table));
return 0;
}
- block += EXT2_BLOCKS_PER_GROUP(sb);
+ first_block += EXT2_BLOCKS_PER_GROUP(sb);
gdp++;
}
return 1;
@@ -648,11 +654,10 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
int i, j;
__le32 features;
- sbi = kmalloc(sizeof(*sbi), GFP_KERNEL);
+ sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
sb->s_fs_info = sbi;
- memset(sbi, 0, sizeof(*sbi));
/*
* See what the current blocksize for the device is, and
@@ -861,10 +866,9 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
if (EXT2_BLOCKS_PER_GROUP(sb) == 0)
goto cantfind_ext2;
- sbi->s_groups_count = (le32_to_cpu(es->s_blocks_count) -
- le32_to_cpu(es->s_first_data_block) +
- EXT2_BLOCKS_PER_GROUP(sb) - 1) /
- EXT2_BLOCKS_PER_GROUP(sb);
+ sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
+ le32_to_cpu(es->s_first_data_block) - 1)
+ / EXT2_BLOCKS_PER_GROUP(sb)) + 1;
db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) /
EXT2_DESC_PER_BLOCK(sb);
sbi->s_group_desc = kmalloc (db_count * sizeof (struct buffer_head *), GFP_KERNEL);
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index 86ae8e93adb..af52a7f8b29 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -521,11 +521,10 @@ bad_block: ext2_error(sb, "ext2_xattr_set",
}
} else {
/* Allocate a buffer where we construct the new block. */
- header = kmalloc(sb->s_blocksize, GFP_KERNEL);
+ header = kzalloc(sb->s_blocksize, GFP_KERNEL);
error = -ENOMEM;
if (header == NULL)
goto cleanup;
- memset(header, 0, sb->s_blocksize);
end = (char *)header + sb->s_blocksize;
header->h_magic = cpu_to_le32(EXT2_XATTR_MAGIC);
header->h_blocks = header->h_refcount = cpu_to_le32(1);
diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c
index 0d21d558b87..1e5038d9a01 100644
--- a/fs/ext3/acl.c
+++ b/fs/ext3/acl.c
@@ -90,8 +90,8 @@ ext3_acl_to_disk(const struct posix_acl *acl, size_t *size)
size_t n;
*size = ext3_acl_size(acl->a_count);
- ext_acl = (ext3_acl_header *)kmalloc(sizeof(ext3_acl_header) +
- acl->a_count * sizeof(ext3_acl_entry), GFP_KERNEL);
+ ext_acl = kmalloc(sizeof(ext3_acl_header) + acl->a_count *
+ sizeof(ext3_acl_entry), GFP_KERNEL);
if (!ext_acl)
return ERR_PTR(-ENOMEM);
ext_acl->a_version = cpu_to_le32(EXT3_ACL_VERSION);
@@ -258,7 +258,7 @@ ext3_set_acl(handle_t *handle, struct inode *inode, int type,
default:
return -EINVAL;
}
- if (acl) {
+ if (acl) {
value = ext3_acl_to_disk(acl, &size);
if (IS_ERR(value))
return (int)PTR_ERR(value);
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index 063d994bda0..b41a7d7e20f 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -38,6 +38,13 @@
#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
+/**
+ * ext3_get_group_desc() -- load group descriptor from disk
+ * @sb: super block
+ * @block_group: given block group
+ * @bh: pointer to the buffer head to store the block
+ * group descriptor
+ */
struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb,
unsigned int block_group,
struct buffer_head ** bh)
@@ -73,8 +80,12 @@ struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb,
return desc + offset;
}
-/*
- * Read the bitmap for a given block_group, reading into the specified
+/**
+ * read_block_bitmap()
+ * @sb: super block
+ * @block_group: given block group
+ *
+ * Read the bitmap for a given block_group, reading into the specified
* slot in the superblock's bitmap cache.
*
* Return buffer_head on success or NULL in case of failure.
@@ -103,15 +114,22 @@ error_out:
* Operations include:
* dump, find, add, remove, is_empty, find_next_reservable_window, etc.
*
- * We use sorted double linked list for the per-filesystem reservation
- * window list. (like in vm_region).
+ * We use a red-black tree to represent per-filesystem reservation
+ * windows.
+ *
+ */
+
+/**
+ * __rsv_window_dump() -- Dump the filesystem block allocation reservation map
+ * @rb_root: root of per-filesystem reservation rb tree
+ * @verbose: verbose mode
+ * @fn: function which wishes to dump the reservation map
*
- * Initially, we keep those small operations in the abstract functions,
- * so later if we need a better searching tree than double linked-list,
- * we could easily switch to that without changing too much
- * code.
+ * If verbose is turned on, it will print the whole block reservation
+ * windows(start, end). Otherwise, it will only print out the "bad" windows,
+ * those windows that overlap with their immediate neighbors.
*/
-#if 0
+#if 1
static void __rsv_window_dump(struct rb_root *root, int verbose,
const char *fn)
{
@@ -129,7 +147,7 @@ restart:
rsv = list_entry(n, struct ext3_reserve_window_node, rsv_node);
if (verbose)
printk("reservation window 0x%p "
- "start: %d, end: %d\n",
+ "start: %lu, end: %lu\n",
rsv, rsv->rsv_start, rsv->rsv_end);
if (rsv->rsv_start && rsv->rsv_start >= rsv->rsv_end) {
printk("Bad reservation %p (start >= end)\n",
@@ -161,6 +179,22 @@ restart:
#define rsv_window_dump(root, verbose) do {} while (0)
#endif
+/**
+ * goal_in_my_reservation()
+ * @rsv: inode's reservation window
+ * @grp_goal: given goal block relative to the allocation block group
+ * @group: the current allocation block group
+ * @sb: filesystem super block
+ *
+ * Test if the given goal block (group relative) is within the file's
+ * own block reservation window range.
+ *
+ * If the reservation window is outside the goal allocation group, return 0;
+ * grp_goal (given goal block) could be -1, which means no specific
+ * goal block. In this case, always return 1.
+ * If the goal block is within the reservation window, return 1;
+ * otherwise, return 0;
+ */
static int
goal_in_my_reservation(struct ext3_reserve_window *rsv, ext3_grpblk_t grp_goal,
unsigned int group, struct super_block * sb)
@@ -168,7 +202,7 @@ goal_in_my_reservation(struct ext3_reserve_window *rsv, ext3_grpblk_t grp_goal,
ext3_fsblk_t group_first_block, group_last_block;
group_first_block = ext3_group_first_block_no(sb, group);
- group_last_block = group_first_block + EXT3_BLOCKS_PER_GROUP(sb) - 1;
+ group_last_block = group_first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1);
if ((rsv->_rsv_start > group_last_block) ||
(rsv->_rsv_end < group_first_block))
@@ -179,7 +213,11 @@ goal_in_my_reservation(struct ext3_reserve_window *rsv, ext3_grpblk_t grp_goal,
return 1;
}
-/*
+/**
+ * search_reserve_window()
+ * @rb_root: root of reservation tree
+ * @goal: target allocation block
+ *
* Find the reserved window which includes the goal, or the previous one
* if the goal is not in any window.
* Returns NULL if there are no windows or if all windows start after the goal.
@@ -216,6 +254,13 @@ search_reserve_window(struct rb_root *root, ext3_fsblk_t goal)
return rsv;
}
+/**
+ * ext3_rsv_window_add() -- Insert a window to the block reservation rb tree.
+ * @sb: super block
+ * @rsv: reservation window to add
+ *
+ * Must be called with rsv_lock hold.
+ */
void ext3_rsv_window_add(struct super_block *sb,
struct ext3_reserve_window_node *rsv)
{
@@ -236,14 +281,25 @@ void ext3_rsv_window_add(struct super_block *sb,
p = &(*p)->rb_left;
else if (start > this->rsv_end)
p = &(*p)->rb_right;
- else
+ else {
+ rsv_window_dump(root, 1);
BUG();
+ }
}
rb_link_node(node, parent, p);
rb_insert_color(node, root);
}
+/**
+ * ext3_rsv_window_remove() -- unlink a window from the reservation rb tree
+ * @sb: super block
+ * @rsv: reservation window to remove
+ *
+ * Mark the block reservation window as not allocated, and unlink it
+ * from the filesystem reservation window rb tree. Must be called with
+ * rsv_lock hold.
+ */
static void rsv_window_remove(struct super_block *sb,
struct ext3_reserve_window_node *rsv)
{
@@ -253,11 +309,39 @@ static void rsv_window_remove(struct super_block *sb,
rb_erase(&rsv->rsv_node, &EXT3_SB(sb)->s_rsv_window_root);
}
+/*
+ * rsv_is_empty() -- Check if the reservation window is allocated.
+ * @rsv: given reservation window to check
+ *
+ * returns 1 if the end block is EXT3_RESERVE_WINDOW_NOT_ALLOCATED.
+ */
static inline int rsv_is_empty(struct ext3_reserve_window *rsv)
{
/* a valid reservation end block could not be 0 */
- return (rsv->_rsv_end == EXT3_RESERVE_WINDOW_NOT_ALLOCATED);
+ return rsv->_rsv_end == EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
}
+
+/**
+ * ext3_init_block_alloc_info()
+ * @inode: file inode structure
+ *
+ * Allocate and initialize the reservation window structure, and
+ * link the window to the ext3 inode structure at last
+ *
+ * The reservation window structure is only dynamically allocated
+ * and linked to ext3 inode the first time the open file
+ * needs a new block. So, before every ext3_new_block(s) call, for
+ * regular files, we should check whether the reservation window
+ * structure exists or not. In the latter case, this function is called.
+ * Fail to do so will result in block reservation being turned off for that
+ * open file.
+ *
+ * This function is called from ext3_get_blocks_handle(), also called
+ * when setting the reservation window size through ioctl before the file
+ * is open for write (needs block allocation).
+ *
+ * Needs truncate_mutex protection prior to call this function.
+ */
void ext3_init_block_alloc_info(struct inode *inode)
{
struct ext3_inode_info *ei = EXT3_I(inode);
@@ -271,7 +355,7 @@ void ext3_init_block_alloc_info(struct inode *inode)
rsv->rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
rsv->rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
- /*
+ /*
* if filesystem is mounted with NORESERVATION, the goal
* reservation window size is set to zero to indicate
* block reservation is off
@@ -287,6 +371,19 @@ void ext3_init_block_alloc_info(struct inode *inode)
ei->i_block_alloc_info = block_i;
}
+/**
+ * ext3_discard_reservation()
+ * @inode: inode
+ *
+ * Discard(free) block reservation window on last file close, or truncate
+ * or at last iput().
+ *
+ * It is being called in three cases:
+ * ext3_release_file(): last writer close the file
+ * ext3_clear_inode(): last iput(), when nobody link to this file.
+ * ext3_truncate(): when the block indirect map is about to change.
+ *
+ */
void ext3_discard_reservation(struct inode *inode)
{
struct ext3_inode_info *ei = EXT3_I(inode);
@@ -306,7 +403,14 @@ void ext3_discard_reservation(struct inode *inode)
}
}
-/* Free given blocks, update quota and i_blocks field */
+/**
+ * ext3_free_blocks_sb() -- Free given blocks and update quota
+ * @handle: handle to this transaction
+ * @sb: super block
+ * @block: start physcial block to free
+ * @count: number of blocks to free
+ * @pdquot_freed_blocks: pointer to quota
+ */
void ext3_free_blocks_sb(handle_t *handle, struct super_block *sb,
ext3_fsblk_t block, unsigned long count,
unsigned long *pdquot_freed_blocks)
@@ -419,8 +523,8 @@ do_more:
}
/* @@@ This prevents newly-allocated data from being
* freed and then reallocated within the same
- * transaction.
- *
+ * transaction.
+ *
* Ideally we would want to allow that to happen, but to
* do so requires making journal_forget() capable of
* revoking the queued write of a data block, which
@@ -433,7 +537,7 @@ do_more:
* safe not to set the allocation bit in the committed
* bitmap, because we know that there is no outstanding
* activity on the buffer any more and so it is safe to
- * reallocate it.
+ * reallocate it.
*/
BUFFER_TRACE(bitmap_bh, "set in b_committed_data");
J_ASSERT_BH(bitmap_bh,
@@ -490,7 +594,13 @@ error_return:
return;
}
-/* Free given blocks, update quota and i_blocks field */
+/**
+ * ext3_free_blocks() -- Free given blocks and update quota
+ * @handle: handle for this transaction
+ * @inode: inode
+ * @block: start physical block to free
+ * @count: number of blocks to count
+ */
void ext3_free_blocks(handle_t *handle, struct inode *inode,
ext3_fsblk_t block, unsigned long count)
{
@@ -508,7 +618,11 @@ void ext3_free_blocks(handle_t *handle, struct inode *inode,
return;
}
-/*
+/**
+ * ext3_test_allocatable()
+ * @nr: given allocation block group
+ * @bh: bufferhead contains the bitmap of the given block group
+ *
* For ext3 allocations, we must not reuse any blocks which are
* allocated in the bitmap buffer's "last committed data" copy. This
* prevents deletes from freeing up the page for reuse until we have
@@ -518,7 +632,7 @@ void ext3_free_blocks(handle_t *handle, struct inode *inode,
* data would allow the old block to be overwritten before the
* transaction committed (because we force data to disk before commit).
* This would lead to corruption if we crashed between overwriting the
- * data and committing the delete.
+ * data and committing the delete.
*
* @@@ We may want to make this allocation behaviour conditional on
* data-writes at some point, and disable it for metadata allocations or
@@ -541,6 +655,16 @@ static int ext3_test_allocatable(ext3_grpblk_t nr, struct buffer_head *bh)
return ret;
}
+/**
+ * bitmap_search_next_usable_block()
+ * @start: the starting block (group relative) of the search
+ * @bh: bufferhead contains the block group bitmap
+ * @maxblocks: the ending block (group relative) of the reservation
+ *
+ * The bitmap search --- search forward alternately through the actual
+ * bitmap on disk and the last-committed copy in journal, until we find a
+ * bit free in both bitmaps.
+ */
static ext3_grpblk_t
bitmap_search_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
ext3_grpblk_t maxblocks)
@@ -548,11 +672,6 @@ bitmap_search_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
ext3_grpblk_t next;
struct journal_head *jh = bh2jh(bh);
- /*
- * The bitmap search --- search forward alternately through the actual
- * bitmap and the last-committed copy until we find a bit free in
- * both
- */
while (start < maxblocks) {
next = ext3_find_next_zero_bit(bh->b_data, maxblocks, start);
if (next >= maxblocks)
@@ -562,14 +681,20 @@ bitmap_search_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
jbd_lock_bh_state(bh);
if (jh->b_committed_data)
start = ext3_find_next_zero_bit(jh->b_committed_data,
- maxblocks, next);
+ maxblocks, next);
jbd_unlock_bh_state(bh);
}
return -1;
}
-/*
- * Find an allocatable block in a bitmap. We honour both the bitmap and
+/**
+ * find_next_usable_block()
+ * @start: the starting block (group relative) to find next
+ * allocatable block in bitmap.
+ * @bh: bufferhead contains the block group bitmap
+ * @maxblocks: the ending block (group relative) for the search
+ *
+ * Find an allocatable block in a bitmap. We honor both the bitmap and
* its last-committed copy (if that exists), and perform the "most
* appropriate allocation" algorithm of looking for a free block near
* the initial goal; then for a free byte somewhere in the bitmap; then
@@ -584,7 +709,7 @@ find_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
if (start > 0) {
/*
- * The goal was occupied; search forward for a free
+ * The goal was occupied; search forward for a free
* block within the next XX blocks.
*
* end_goal is more or less random, but it has to be
@@ -620,7 +745,11 @@ find_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
return here;
}
-/*
+/**
+ * claim_block()
+ * @block: the free block (group relative) to allocate
+ * @bh: the bufferhead containts the block group bitmap
+ *
* We think we can allocate this block in this bitmap. Try to set the bit.
* If that succeeds then check that nobody has allocated and then freed the
* block since we saw that is was not marked in b_committed_data. If it _was_
@@ -646,7 +775,26 @@ claim_block(spinlock_t *lock, ext3_grpblk_t block, struct buffer_head *bh)
return ret;
}
-/*
+/**
+ * ext3_try_to_allocate()
+ * @sb: superblock
+ * @handle: handle to this transaction
+ * @group: given allocation block group
+ * @bitmap_bh: bufferhead holds the block bitmap
+ * @grp_goal: given target block within the group
+ * @count: target number of blocks to allocate
+ * @my_rsv: reservation window
+ *
+ * Attempt to allocate blocks within a give range. Set the range of allocation
+ * first, then find the first free bit(s) from the bitmap (within the range),
+ * and at last, allocate the blocks by claiming the found free bit as allocated.
+ *
+ * To set the range of this allocation:
+ * if there is a reservation window, only try to allocate block(s) from the
+ * file's own reservation window;
+ * Otherwise, the allocation range starts from the give goal block, ends at
+ * the block group's last block.
+ *
* If we failed to allocate the desired block then we may end up crossing to a
* new bitmap. In that case we must release write access to the old one via
* ext3_journal_release_buffer(), else we'll run out of credits.
@@ -703,7 +851,8 @@ repeat:
}
start = grp_goal;
- if (!claim_block(sb_bgl_lock(EXT3_SB(sb), group), grp_goal, bitmap_bh)) {
+ if (!claim_block(sb_bgl_lock(EXT3_SB(sb), group),
+ grp_goal, bitmap_bh)) {
/*
* The block was allocated by another thread, or it was
* allocated and then freed by another thread
@@ -718,7 +867,8 @@ repeat:
grp_goal++;
while (num < *count && grp_goal < end
&& ext3_test_allocatable(grp_goal, bitmap_bh)
- && claim_block(sb_bgl_lock(EXT3_SB(sb), group), grp_goal, bitmap_bh)) {
+ && claim_block(sb_bgl_lock(EXT3_SB(sb), group),
+ grp_goal, bitmap_bh)) {
num++;
grp_goal++;
}
@@ -730,12 +880,12 @@ fail_access:
}
/**
- * find_next_reservable_window():
+ * find_next_reservable_window():
* find a reservable space within the given range.
* It does not allocate the reservation window for now:
* alloc_new_reservation() will do the work later.
*
- * @search_head: the head of the searching list;
+ * @search_head: the head of the searching list;
* This is not necessarily the list head of the whole filesystem
*
* We have both head and start_block to assist the search
@@ -743,12 +893,12 @@ fail_access:
* but we will shift to the place where start_block is,
* then start from there, when looking for a reservable space.
*
- * @size: the target new reservation window size
+ * @size: the target new reservation window size
*
- * @group_first_block: the first block we consider to start
+ * @group_first_block: the first block we consider to start
* the real search from
*
- * @last_block:
+ * @last_block:
* the maximum block number that our goal reservable space
* could start from. This is normally the last block in this
* group. The search will end when we found the start of next
@@ -756,10 +906,10 @@ fail_access:
* This could handle the cross boundary reservation window
* request.
*
- * basically we search from the given range, rather than the whole
- * reservation double linked list, (start_block, last_block)
- * to find a free region that is of my size and has not
- * been reserved.
+ * basically we search from the given range, rather than the whole
+ * reservation double linked list, (start_block, last_block)
+ * to find a free region that is of my size and has not
+ * been reserved.
*
*/
static int find_next_reservable_window(
@@ -812,7 +962,7 @@ static int find_next_reservable_window(
/*
* Found a reserveable space big enough. We could
* have a reservation across the group boundary here
- */
+ */
break;
}
}
@@ -848,7 +998,7 @@ static int find_next_reservable_window(
}
/**
- * alloc_new_reservation()--allocate a new reservation window
+ * alloc_new_reservation()--allocate a new reservation window
*
* To make a new reservation, we search part of the filesystem
* reservation list (the list that inside the group). We try to
@@ -897,7 +1047,7 @@ static int alloc_new_reservation(struct ext3_reserve_window_node *my_rsv,
spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock;
group_first_block = ext3_group_first_block_no(sb, group);
- group_end_block = group_first_block + EXT3_BLOCKS_PER_GROUP(sb) - 1;
+ group_end_block = group_first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1);
if (grp_goal < 0)
start_block = group_first_block;
@@ -929,9 +1079,10 @@ static int alloc_new_reservation(struct ext3_reserve_window_node *my_rsv,
if ((my_rsv->rsv_alloc_hit >
(my_rsv->rsv_end - my_rsv->rsv_start + 1) / 2)) {
/*
- * if we previously allocation hit ration is greater than half
- * we double the size of reservation window next time
- * otherwise keep the same
+ * if the previously allocation hit ratio is
+ * greater than 1/2, then we double the size of
+ * the reservation window the next time,
+ * otherwise we keep the same size window
*/
size = size * 2;
if (size > EXT3_MAX_RESERVE_BLOCKS)
@@ -1010,6 +1161,23 @@ retry:
goto retry;
}
+/**
+ * try_to_extend_reservation()
+ * @my_rsv: given reservation window
+ * @sb: super block
+ * @size: the delta to extend
+ *
+ * Attempt to expand the reservation window large enough to have
+ * required number of free blocks
+ *
+ * Since ext3_try_to_allocate() will always allocate blocks within
+ * the reservation window range, if the window size is too small,
+ * multiple blocks allocation has to stop at the end of the reservation
+ * window. To make this more efficient, given the total number of
+ * blocks needed and the current size of the window, we try to
+ * expand the reservation window size if necessary on a best-effort
+ * basis before ext3_new_blocks() tries to allocate blocks,
+ */
static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv,
struct super_block *sb, int size)
{
@@ -1035,7 +1203,17 @@ static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv,
spin_unlock(rsv_lock);
}
-/*
+/**
+ * ext3_try_to_allocate_with_rsv()
+ * @sb: superblock
+ * @handle: handle to this transaction
+ * @group: given allocation block group
+ * @bitmap_bh: bufferhead holds the block bitmap
+ * @grp_goal: given target block within the group
+ * @count: target number of blocks to allocate
+ * @my_rsv: reservation window
+ * @errp: pointer to store the error code
+ *
* This is the main function used to allocate a new block and its reservation
* window.
*
@@ -1051,9 +1229,7 @@ static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv,
* reservation), and there are lots of free blocks, but they are all
* being reserved.
*
- * We use a sorted double linked list for the per-filesystem reservation list.
- * The insert, remove and find a free space(non-reserved) operations for the
- * sorted double linked list should be fast.
+ * We use a red-black tree for the per-filesystem reservation list.
*
*/
static ext3_grpblk_t
@@ -1063,7 +1239,7 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
struct ext3_reserve_window_node * my_rsv,
unsigned long *count, int *errp)
{
- ext3_fsblk_t group_first_block;
+ ext3_fsblk_t group_first_block, group_last_block;
ext3_grpblk_t ret = 0;
int fatal;
unsigned long num = *count;
@@ -1100,6 +1276,7 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
* first block is the block number of the first block in this group
*/
group_first_block = ext3_group_first_block_no(sb, group);
+ group_last_block = group_first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1);
/*
* Basically we will allocate a new block from inode's reservation
@@ -1118,7 +1295,8 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
*/
while (1) {
if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) ||
- !goal_in_my_reservation(&my_rsv->rsv_window, grp_goal, group, sb)) {
+ !goal_in_my_reservation(&my_rsv->rsv_window,
+ grp_goal, group, sb)) {
if (my_rsv->rsv_goal_size < *count)
my_rsv->rsv_goal_size = *count;
ret = alloc_new_reservation(my_rsv, grp_goal, sb,
@@ -1126,17 +1304,21 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
if (ret < 0)
break; /* failed */
- if (!goal_in_my_reservation(&my_rsv->rsv_window, grp_goal, group, sb))
+ if (!goal_in_my_reservation(&my_rsv->rsv_window,
+ grp_goal, group, sb))
grp_goal = -1;
- } else if (grp_goal > 0 && (my_rsv->rsv_end-grp_goal+1) < *count)
+ } else if (grp_goal > 0 &&
+ (my_rsv->rsv_end-grp_goal+1) < *count)
try_to_extend_reservation(my_rsv, sb,
*count-my_rsv->rsv_end + grp_goal - 1);
- if ((my_rsv->rsv_start >= group_first_block + EXT3_BLOCKS_PER_GROUP(sb))
- || (my_rsv->rsv_end < group_first_block))
+ if ((my_rsv->rsv_start > group_last_block) ||
+ (my_rsv->rsv_end < group_first_block)) {
+ rsv_window_dump(&EXT3_SB(sb)->s_rsv_window_root, 1);
BUG();
- ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, grp_goal,
- &num, &my_rsv->rsv_window);
+ }
+ ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh,
+ grp_goal, &num, &my_rsv->rsv_window);
if (ret >= 0) {
my_rsv->rsv_alloc_hit += num;
*count = num;
@@ -1161,6 +1343,12 @@ out:
return ret;
}
+/**
+ * ext3_has_free_blocks()
+ * @sbi: in-core super block structure.
+ *
+ * Check if filesystem has at least 1 free block available for allocation.
+ */
static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
{
ext3_fsblk_t free_blocks, root_blocks;
@@ -1175,11 +1363,17 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
return 1;
}
-/*
+/**
+ * ext3_should_retry_alloc()
+ * @sb: super block
+ * @retries number of attemps has been made
+ *
* ext3_should_retry_alloc() is called when ENOSPC is returned, and if
* it is profitable to retry the operation, this function will wait
* for the current or commiting transaction to complete, and then
* return TRUE.
+ *
+ * if the total number of retries exceed three times, return FALSE.
*/
int ext3_should_retry_alloc(struct super_block *sb, int *retries)
{
@@ -1191,13 +1385,19 @@ int ext3_should_retry_alloc(struct super_block *sb, int *retries)
return journal_force_commit_nested(EXT3_SB(sb)->s_journal);
}
-/*
- * ext3_new_block uses a goal block to assist allocation. If the goal is
- * free, or there is a free block within 32 blocks of the goal, that block
- * is allocated. Otherwise a forward search is made for a free block; within
- * each block group the search first looks for an entire free byte in the block
- * bitmap, and then for any free bit if that fails.
- * This function also updates quota and i_blocks field.
+/**
+ * ext3_new_blocks() -- core block(s) allocation function
+ * @handle: handle to this transaction
+ * @inode: file inode
+ * @goal: given target block(filesystem wide)
+ * @count: target number of blocks to allocate
+ * @errp: error code
+ *
+ * ext3_new_blocks uses a goal block to assist allocation. It tries to
+ * allocate block(s) from the block group contains the goal block first. If that
+ * fails, it will try to allocate block(s) from other block groups without
+ * any specific goal block.
+ *
*/
ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode,
ext3_fsblk_t goal, unsigned long *count, int *errp)
@@ -1303,7 +1503,7 @@ retry_alloc:
smp_rmb();
/*
- * Now search the rest of the groups. We assume that
+ * Now search the rest of the groups. We assume that
* i and gdp correctly point to the last group visited.
*/
for (bgi = 0; bgi < ngroups; bgi++) {
@@ -1428,7 +1628,7 @@ allocated:
spin_lock(sb_bgl_lock(sbi, group_no));
gdp->bg_free_blocks_count =
- cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) - num);
+ cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)-num);
spin_unlock(sb_bgl_lock(sbi, group_no));
percpu_counter_mod(&sbi->s_freeblocks_counter, -num);
@@ -1471,6 +1671,12 @@ ext3_fsblk_t ext3_new_block(handle_t *handle, struct inode *inode,
return ext3_new_blocks(handle, inode, goal, &count, errp);
}
+/**
+ * ext3_count_free_blocks() -- count filesystem free blocks
+ * @sb: superblock
+ *
+ * Adds up the number of free blocks from each block group.
+ */
ext3_fsblk_t ext3_count_free_blocks(struct super_block *sb)
{
ext3_fsblk_t desc_count;
diff --git a/fs/ext3/bitmap.c b/fs/ext3/bitmap.c
index ce4f82b9e52..b9176eed98d 100644
--- a/fs/ext3/bitmap.c
+++ b/fs/ext3/bitmap.c
@@ -20,7 +20,7 @@ unsigned long ext3_count_free (struct buffer_head * map, unsigned int numchars)
unsigned int i;
unsigned long sum = 0;
- if (!map)
+ if (!map)
return (0);
for (i = 0; i < numchars; i++)
sum += nibblemap[map->b_data[i] & 0xf] +
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index fbb0d4ed07d..429acbb4e06 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -59,7 +59,7 @@ static unsigned char get_dtype(struct super_block *sb, int filetype)
return (ext3_filetype_table[filetype]);
}
-
+
int ext3_check_dir_entry (const char * function, struct inode * dir,
struct ext3_dir_entry_2 * de,
@@ -67,7 +67,7 @@ int ext3_check_dir_entry (const char * function, struct inode * dir,
unsigned long offset)
{
const char * error_msg = NULL;
- const int rlen = le16_to_cpu(de->rec_len);
+ const int rlen = le16_to_cpu(de->rec_len);
if (rlen < EXT3_DIR_REC_LEN(1))
error_msg = "rec_len is smaller than minimal";
@@ -162,7 +162,7 @@ revalidate:
* to make sure. */
if (filp->f_version != inode->i_version) {
for (i = 0; i < sb->s_blocksize && i < offset; ) {
- de = (struct ext3_dir_entry_2 *)
+ de = (struct ext3_dir_entry_2 *)
(bh->b_data + i);
/* It's too expensive to do a full
* dirent test each time round this
@@ -181,7 +181,7 @@ revalidate:
filp->f_version = inode->i_version;
}
- while (!error && filp->f_pos < inode->i_size
+ while (!error && filp->f_pos < inode->i_size
&& offset < sb->s_blocksize) {
de = (struct ext3_dir_entry_2 *) (bh->b_data + offset);
if (!ext3_check_dir_entry ("ext3_readdir", inode, de,
@@ -229,7 +229,7 @@ out:
/*
* These functions convert from the major/minor hash to an f_pos
* value.
- *
+ *
* Currently we only use major hash numer. This is unfortunate, but
* on 32-bit machines, the same VFS interface is used for lseek and
* llseek, so if we use the 64 bit offset, then the 32-bit versions of
@@ -250,7 +250,7 @@ out:
struct fname {
__u32 hash;
__u32 minor_hash;
- struct rb_node rb_hash;
+ struct rb_node rb_hash;
struct fname *next;
__u32 inode;
__u8 name_len;
@@ -343,10 +343,9 @@ int ext3_htree_store_dirent(struct file *dir_file, __u32 hash,
/* Create and allocate the fname structure */
len = sizeof(struct fname) + dirent->name_len + 1;
- new_fn = kmalloc(len, GFP_KERNEL);
+ new_fn = kzalloc(len, GFP_KERNEL);
if (!new_fn)
return -ENOMEM;
- memset(new_fn, 0, len);
new_fn->hash = hash;
new_fn->minor_hash = minor_hash;
new_fn->inode = le32_to_cpu(dirent->inode);
@@ -410,7 +409,7 @@ static int call_filldir(struct file * filp, void * dirent,
curr_pos = hash2pos(fname->hash, fname->minor_hash);
while (fname) {
error = filldir(dirent, fname->name,
- fname->name_len, curr_pos,
+ fname->name_len, curr_pos,
fname->inode,
get_dtype(sb, fname->file_type));
if (error) {
@@ -465,7 +464,7 @@ static int ext3_dx_readdir(struct file * filp,
/*
* Fill the rbtree if we have no more entries,
* or the inode has changed since we last read in the
- * cached entries.
+ * cached entries.
*/
if ((!info->curr_node) ||
(filp->f_version != inode->i_version)) {
diff --git a/fs/ext3/file.c b/fs/ext3/file.c
index 1efefb630ea..994efd189f4 100644
--- a/fs/ext3/file.c
+++ b/fs/ext3/file.c
@@ -100,7 +100,7 @@ ext3_file_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t
force_commit:
err = ext3_force_commit(inode->i_sb);
- if (err)
+ if (err)
return err;
return ret;
}
diff --git a/fs/ext3/fsync.c b/fs/ext3/fsync.c
index 49382a208e0..dd1fd3c0fc0 100644
--- a/fs/ext3/fsync.c
+++ b/fs/ext3/fsync.c
@@ -8,14 +8,14 @@
* Universite Pierre et Marie Curie (Paris VI)
* from
* linux/fs/minix/truncate.c Copyright (C) 1991, 1992 Linus Torvalds
- *
+ *
* ext3fs fsync primitive
*
* Big-endian to little-endian byte-swapping/bitmaps by
* David S. Miller (davem@caip.rutgers.edu), 1995
- *
+ *
* Removed unnecessary code duplication for little endian machines
- * and excessive __inline__s.
+ * and excessive __inline__s.
* Andi Kleen, 1997
*
* Major simplications and cleanup - we only need to do the metadata, because
diff --git a/fs/ext3/hash.c b/fs/ext3/hash.c
index 5a2d1235ead..deeb27b5ba8 100644
--- a/fs/ext3/hash.c
+++ b/fs/ext3/hash.c
@@ -4,7 +4,7 @@
* Copyright (C) 2002 by Theodore Ts'o
*
* This file is released under the GPL v2.
- *
+ *
* This file may be redistributed under the terms of the GNU Public
* License.
*/
@@ -80,11 +80,11 @@ static void str2hashbuf(const char *msg, int len, __u32 *buf, int num)
* Returns the hash of a filename. If len is 0 and name is NULL, then
* this function can be used to test whether or not a hash version is
* supported.
- *
+ *
* The seed is an 4 longword (32 bits) "secret" which can be used to
* uniquify a hash. If the seed is all zero's, then some default seed
* may be used.
- *
+ *
* A particular hash version specifies whether or not the seed is
* represented, and whether or not the returned hash is 32 bits or 64
* bits. 32 bit hashes will return 0 for the minor hash.
@@ -95,7 +95,7 @@ int ext3fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo)
__u32 minor_hash = 0;
const char *p;
int i;
- __u32 in[8], buf[4];
+ __u32 in[8], buf[4];
/* Initialize the default seed for the hash checksum functions */
buf[0] = 0x67452301;
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index 36546ed36a1..e45dbd65173 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -202,7 +202,7 @@ error_return:
static int find_group_dir(struct super_block *sb, struct inode *parent)
{
int ngroups = EXT3_SB(sb)->s_groups_count;
- int freei, avefreei;
+ unsigned int freei, avefreei;
struct ext3_group_desc *desc, *best_desc = NULL;
struct buffer_head *bh;
int group, best_group = -1;
@@ -216,7 +216,7 @@ static int find_group_dir(struct super_block *sb, struct inode *parent)
continue;
if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei)
continue;
- if (!best_desc ||
+ if (!best_desc ||
(le16_to_cpu(desc->bg_free_blocks_count) >
le16_to_cpu(best_desc->bg_free_blocks_count))) {
best_group = group;
@@ -226,30 +226,30 @@ static int find_group_dir(struct super_block *sb, struct inode *parent)
return best_group;
}
-/*
- * Orlov's allocator for directories.
- *
+/*
+ * Orlov's allocator for directories.
+ *
* We always try to spread first-level directories.
*
- * If there are blockgroups with both free inodes and free blocks counts
- * not worse than average we return one with smallest directory count.
- * Otherwise we simply return a random group.
- *
- * For the rest rules look so:
- *
- * It's OK to put directory into a group unless
- * it has too many directories already (max_dirs) or
- * it has too few free inodes left (min_inodes) or
- * it has too few free blocks left (min_blocks) or
- * it's already running too large debt (max_debt).
- * Parent's group is prefered, if it doesn't satisfy these
- * conditions we search cyclically through the rest. If none
- * of the groups look good we just look for a group with more
- * free inodes than average (starting at parent's group).
- *
- * Debt is incremented each time we allocate a directory and decremented
- * when we allocate an inode, within 0--255.
- */
+ * If there are blockgroups with both free inodes and free blocks counts
+ * not worse than average we return one with smallest directory count.
+ * Otherwise we simply return a random group.
+ *
+ * For the rest rules look so:
+ *
+ * It's OK to put directory into a group unless
+ * it has too many directories already (max_dirs) or
+ * it has too few free inodes left (min_inodes) or
+ * it has too few free blocks left (min_blocks) or
+ * it's already running too large debt (max_debt).
+ * Parent's group is prefered, if it doesn't satisfy these
+ * conditions we search cyclically through the rest. If none
+ * of the groups look good we just look for a group with more
+ * free inodes than average (starting at parent's group).
+ *
+ * Debt is incremented each time we allocate a directory and decremented
+ * when we allocate an inode, within 0--255.
+ */
#define INODE_COST 64
#define BLOCK_COST 256
@@ -261,10 +261,10 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent)
struct ext3_super_block *es = sbi->s_es;
int ngroups = sbi->s_groups_count;
int inodes_per_group = EXT3_INODES_PER_GROUP(sb);
- int freei, avefreei;
+ unsigned int freei, avefreei;
ext3_fsblk_t freeb, avefreeb;
ext3_fsblk_t blocks_per_dir;
- int ndirs;
+ unsigned int ndirs;
int max_debt, max_dirs, min_inodes;
ext3_grpblk_t min_blocks;
int group = -1, i;
@@ -454,7 +454,7 @@ struct inode *ext3_new_inode(handle_t *handle, struct inode * dir, int mode)
group = find_group_dir(sb, dir);
else
group = find_group_orlov(sb, dir);
- } else
+ } else
group = find_group_other(sb, dir);
err = -ENOSPC;
@@ -559,7 +559,6 @@ got:
inode->i_ino = ino;
/* This is the optimal IO size (for stat), not the fs block size */
- inode->i_blksize = PAGE_SIZE;
inode->i_blocks = 0;
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 84be02e9365..dcf4f1dd108 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -13,11 +13,11 @@
* Copyright (C) 1991, 1992 Linus Torvalds
*
* Goal-directed block allocation by Stephen Tweedie
- * (sct@redhat.com), 1993, 1998
+ * (sct@redhat.com), 1993, 1998
* Big-endian to little-endian byte-swapping/bitmaps by
* David S. Miller (davem@caip.rutgers.edu), 1995
* 64-bit file support on 64-bit platforms by Jakub Jelinek
- * (jj@sunsite.ms.mff.cuni.cz)
+ * (jj@sunsite.ms.mff.cuni.cz)
*
* Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000
*/
@@ -55,7 +55,7 @@ static int ext3_inode_is_fast_symlink(struct inode *inode)
/*
* The ext3 forget function must perform a revoke if we are freeing data
* which has been journaled. Metadata (eg. indirect blocks) must be
- * revoked in all cases.
+ * revoked in all cases.
*
* "bh" may be NULL: a metadata block may have been freed from memory
* but there may still be a record of it in the journal, and that record
@@ -105,7 +105,7 @@ int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
* Work out how many blocks we need to proceed with the next chunk of a
* truncate transaction.
*/
-static unsigned long blocks_for_truncate(struct inode *inode)
+static unsigned long blocks_for_truncate(struct inode *inode)
{
unsigned long needed;
@@ -122,13 +122,13 @@ static unsigned long blocks_for_truncate(struct inode *inode)
/* But we need to bound the transaction so we don't overflow the
* journal. */
- if (needed > EXT3_MAX_TRANS_DATA)
+ if (needed > EXT3_MAX_TRANS_DATA)
needed = EXT3_MAX_TRANS_DATA;
return EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
}
-/*
+/*
* Truncate transactions can be complex and absolutely huge. So we need to
* be able to restart the transaction at a conventient checkpoint to make
* sure we don't overflow the journal.
@@ -136,9 +136,9 @@ static unsigned long blocks_for_truncate(struct inode *inode)
* start_transaction gets us a new handle for a truncate transaction,
* and extend_transaction tries to extend the existing one a bit. If
* extend fails, we need to propagate the failure up and restart the
- * transaction in the top-level truncate loop. --sct
+ * transaction in the top-level truncate loop. --sct
*/
-static handle_t *start_transaction(struct inode *inode)
+static handle_t *start_transaction(struct inode *inode)
{
handle_t *result;
@@ -215,12 +215,12 @@ void ext3_delete_inode (struct inode * inode)
ext3_orphan_del(handle, inode);
EXT3_I(inode)->i_dtime = get_seconds();
- /*
+ /*
* One subtle ordering requirement: if anything has gone wrong
* (transaction abort, IO errors, whatever), then we can still
* do these next steps (the fs will already have been marked as
* having errors), but we can't free the inode if the mark_dirty
- * fails.
+ * fails.
*/
if (ext3_mark_inode_dirty(handle, inode))
/* If that failed, just do the required in-core inode clear. */
@@ -398,7 +398,7 @@ no_block:
* + if there is a block to the left of our position - allocate near it.
* + if pointer will live in indirect block - allocate near that block.
* + if pointer will live in inode - allocate in the same
- * cylinder group.
+ * cylinder group.
*
* In the latter case we colour the starting block by the callers PID to
* prevent it from clashing with concurrent allocations for a different inode
@@ -470,7 +470,7 @@ static ext3_fsblk_t ext3_find_goal(struct inode *inode, long block,
* ext3_blks_to_allocate: Look up the block map and count the number
* of direct blocks need to be allocated for the given branch.
*
- * @branch: chain of indirect blocks
+ * @branch: chain of indirect blocks
* @k: number of blocks need for indirect blocks
* @blks: number of data blocks to be mapped.
* @blocks_to_boundary: the offset in the indirect block
@@ -744,7 +744,7 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode,
jbd_debug(5, "splicing indirect only\n");
BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata");
err = ext3_journal_dirty_metadata(handle, where->bh);
- if (err)
+ if (err)
goto err_out;
} else {
/*
@@ -1098,7 +1098,7 @@ static int walk_page_buffers( handle_t *handle,
for ( bh = head, block_start = 0;
ret == 0 && (bh != head || !block_start);
- block_start = block_end, bh = next)
+ block_start = block_end, bh = next)
{
next = bh->b_this_page;
block_end = block_start + blocksize;
@@ -1137,7 +1137,7 @@ static int walk_page_buffers( handle_t *handle,
* So what we do is to rely on the fact that journal_stop/journal_start
* will _not_ run commit under these circumstances because handle->h_ref
* is elevated. We'll still have enough credits for the tiny quotafile
- * write.
+ * write.
*/
static int do_journal_get_write_access(handle_t *handle,
struct buffer_head *bh)
@@ -1282,7 +1282,7 @@ static int ext3_journalled_commit_write(struct file *file,
if (inode->i_size > EXT3_I(inode)->i_disksize) {
EXT3_I(inode)->i_disksize = inode->i_size;
ret2 = ext3_mark_inode_dirty(handle, inode);
- if (!ret)
+ if (!ret)
ret = ret2;
}
ret2 = ext3_journal_stop(handle);
@@ -1291,7 +1291,7 @@ static int ext3_journalled_commit_write(struct file *file,
return ret;
}
-/*
+/*
* bmap() is special. It gets used by applications such as lilo and by
* the swapper to find the on-disk block of a specific piece of data.
*
@@ -1300,10 +1300,10 @@ static int ext3_journalled_commit_write(struct file *file,
* filesystem and enables swap, then they may get a nasty shock when the
* data getting swapped to that swapfile suddenly gets overwritten by
* the original zero's written out previously to the journal and
- * awaiting writeback in the kernel's buffer cache.
+ * awaiting writeback in the kernel's buffer cache.
*
* So, if we see any bmap calls here on a modified, data-journaled file,
- * take extra steps to flush any blocks which might be in the cache.
+ * take extra steps to flush any blocks which might be in the cache.
*/
static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
{
@@ -1312,16 +1312,16 @@ static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
int err;
if (EXT3_I(inode)->i_state & EXT3_STATE_JDATA) {
- /*
+ /*
* This is a REALLY heavyweight approach, but the use of
* bmap on dirty files is expected to be extremely rare:
* only if we run lilo or swapon on a freshly made file
- * do we expect this to happen.
+ * do we expect this to happen.
*
* (bmap requires CAP_SYS_RAWIO so this does not
* represent an unprivileged user DOS attack --- we'd be
* in trouble if mortal users could trigger this path at
- * will.)
+ * will.)
*
* NB. EXT3_STATE_JDATA is not set on files other than
* regular files. If somebody wants to bmap a directory
@@ -1457,7 +1457,7 @@ static int ext3_ordered_writepage(struct page *page,
*/
/*
- * And attach them to the current transaction. But only if
+ * And attach them to the current transaction. But only if
* block_write_full_page() succeeded. Otherwise they are unmapped,
* and generally junk.
*/
@@ -1644,7 +1644,7 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
}
}
- ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
+ ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
offset, nr_segs,
ext3_get_block, NULL);
@@ -2025,7 +2025,7 @@ static void ext3_free_data(handle_t *handle, struct inode *inode,
__le32 *first, __le32 *last)
{
ext3_fsblk_t block_to_free = 0; /* Starting block # of a run */
- unsigned long count = 0; /* Number of blocks in the run */
+ unsigned long count = 0; /* Number of blocks in the run */
__le32 *block_to_free_p = NULL; /* Pointer into inode/ind
corresponding to
block_to_free */
@@ -2054,7 +2054,7 @@ static void ext3_free_data(handle_t *handle, struct inode *inode,
} else if (nr == block_to_free + count) {
count++;
} else {
- ext3_clear_blocks(handle, inode, this_bh,
+ ext3_clear_blocks(handle, inode, this_bh,
block_to_free,
count, block_to_free_p, p);
block_to_free = nr;
@@ -2115,7 +2115,7 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode,
*/
if (!bh) {
ext3_error(inode->i_sb, "ext3_free_branches",
- "Read failure, inode=%ld, block="E3FSBLK,
+ "Read failure, inode=%lu, block="E3FSBLK,
inode->i_ino, nr);
continue;
}
@@ -2184,7 +2184,7 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode,
*p = 0;
BUFFER_TRACE(parent_bh,
"call ext3_journal_dirty_metadata");
- ext3_journal_dirty_metadata(handle,
+ ext3_journal_dirty_metadata(handle,
parent_bh);
}
}
@@ -2632,9 +2632,6 @@ void ext3_read_inode(struct inode * inode)
* recovery code: that's fine, we're about to complete
* the process of deleting those. */
}
- inode->i_blksize = PAGE_SIZE; /* This is the optimal IO size
- * (for stat), not the fs block
- * size */
inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
ei->i_flags = le32_to_cpu(raw_inode->i_flags);
#ifdef EXT3_FRAGMENTS
@@ -2704,7 +2701,7 @@ void ext3_read_inode(struct inode * inode)
if (raw_inode->i_block[0])
init_special_inode(inode, inode->i_mode,
old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
- else
+ else
init_special_inode(inode, inode->i_mode,
new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
}
@@ -2724,8 +2721,8 @@ bad_inode:
*
* The caller must have write access to iloc->bh.
*/
-static int ext3_do_update_inode(handle_t *handle,
- struct inode *inode,
+static int ext3_do_update_inode(handle_t *handle,
+ struct inode *inode,
struct ext3_iloc *iloc)
{
struct ext3_inode *raw_inode = ext3_raw_inode(iloc);
@@ -2900,7 +2897,7 @@ int ext3_write_inode(struct inode *inode, int wait)
* commit will leave the blocks being flushed in an unused state on
* disk. (On recovery, the inode will get truncated and the blocks will
* be freed, so we have a strong guarantee that no future commit will
- * leave these blocks visible to the user.)
+ * leave these blocks visible to the user.)
*
* Called with inode->sem down.
*/
@@ -3043,13 +3040,13 @@ int ext3_mark_iloc_dirty(handle_t *handle,
return err;
}
-/*
+/*
* On success, We end up with an outstanding reference count against
- * iloc->bh. This _must_ be cleaned up later.
+ * iloc->bh. This _must_ be cleaned up later.
*/
int
-ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
+ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
struct ext3_iloc *iloc)
{
int err = 0;
@@ -3139,7 +3136,7 @@ out:
}
#if 0
-/*
+/*
* Bind an inode's backing buffer_head into this transaction, to prevent
* it from being flushed to disk early. Unlike
* ext3_reserve_inode_write, this leaves behind no bh reference and
@@ -3157,7 +3154,7 @@ static int ext3_pin_inode(handle_t *handle, struct inode *inode)
BUFFER_TRACE(iloc.bh, "get_write_access");
err = journal_get_write_access(handle, iloc.bh);
if (!err)
- err = ext3_journal_dirty_metadata(handle,
+ err = ext3_journal_dirty_metadata(handle,
iloc.bh);
brelse(iloc.bh);
}
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index 2aa7101b27c..85d132c37ee 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -15,13 +15,13 @@
* Big-endian to little-endian byte-swapping/bitmaps by
* David S. Miller (davem@caip.rutgers.edu), 1995
* Directory entry file type support and forward compatibility hooks
- * for B-tree directories by Theodore Ts'o (tytso@mit.edu), 1998
+ * for B-tree directories by Theodore Ts'o (tytso@mit.edu), 1998
* Hash Tree Directory indexing (c)
- * Daniel Phillips, 2001
+ * Daniel Phillips, 2001
* Hash Tree Directory indexing porting
- * Christopher Li, 2002
+ * Christopher Li, 2002
* Hash Tree Directory indexing cleanup
- * Theodore Ts'o, 2002
+ * Theodore Ts'o, 2002
*/
#include <linux/fs.h>
@@ -76,7 +76,7 @@ static struct buffer_head *ext3_append(handle_t *handle,
#ifdef DX_DEBUG
#define dxtrace(command) command
#else
-#define dxtrace(command)
+#define dxtrace(command)
#endif
struct fake_dirent
@@ -169,7 +169,7 @@ static struct ext3_dir_entry_2* dx_pack_dirents (char *base, int size);
static void dx_insert_block (struct dx_frame *frame, u32 hash, u32 block);
static int ext3_htree_next_block(struct inode *dir, __u32 hash,
struct dx_frame *frame,
- struct dx_frame *frames,
+ struct dx_frame *frames,
__u32 *start_hash);
static struct buffer_head * ext3_dx_find_entry(struct dentry *dentry,
struct ext3_dir_entry_2 **res_dir, int *err);
@@ -250,7 +250,7 @@ static void dx_show_index (char * label, struct dx_entry *entries)
}
struct stats
-{
+{
unsigned names;
unsigned space;
unsigned bcount;
@@ -278,7 +278,7 @@ static struct stats dx_show_leaf(struct dx_hash_info *hinfo, struct ext3_dir_ent
((char *) de - base));
}
space += EXT3_DIR_REC_LEN(de->name_len);
- names++;
+ names++;
}
de = (struct ext3_dir_entry_2 *) ((char *) de + le16_to_cpu(de->rec_len));
}
@@ -464,7 +464,7 @@ static void dx_release (struct dx_frame *frames)
*/
static int ext3_htree_next_block(struct inode *dir, __u32 hash,
struct dx_frame *frame,
- struct dx_frame *frames,
+ struct dx_frame *frames,
__u32 *start_hash)
{
struct dx_frame *p;
@@ -632,7 +632,7 @@ int ext3_htree_fill_tree(struct file *dir_file, __u32 start_hash,
}
count += ret;
hashval = ~0;
- ret = ext3_htree_next_block(dir, HASH_NB_ALWAYS,
+ ret = ext3_htree_next_block(dir, HASH_NB_ALWAYS,
frame, frames, &hashval);
*next_hash = hashval;
if (ret < 0) {
@@ -649,7 +649,7 @@ int ext3_htree_fill_tree(struct file *dir_file, __u32 start_hash,
break;
}
dx_release(frames);
- dxtrace(printk("Fill tree: returned %d entries, next hash: %x\n",
+ dxtrace(printk("Fill tree: returned %d entries, next hash: %x\n",
count, *next_hash));
return count;
errout:
@@ -1050,7 +1050,7 @@ struct dentry *ext3_get_parent(struct dentry *child)
parent = ERR_PTR(-ENOMEM);
}
return parent;
-}
+}
#define S_SHIFT 12
static unsigned char ext3_type_by_mode[S_IFMT >> S_SHIFT] = {
@@ -1198,7 +1198,7 @@ errout:
* add_dirent_to_buf will attempt search the directory block for
* space. It will return -ENOSPC if no space is available, and -EIO
* and -EEXIST if directory entry already exists.
- *
+ *
* NOTE! bh is NOT released in the case where ENOSPC is returned. In
* all other cases bh is released.
*/
@@ -1572,7 +1572,7 @@ cleanup:
* ext3_delete_entry deletes a directory entry by merging it with the
* previous entry
*/
-static int ext3_delete_entry (handle_t *handle,
+static int ext3_delete_entry (handle_t *handle,
struct inode * dir,
struct ext3_dir_entry_2 * de_del,
struct buffer_head * bh)
@@ -1643,12 +1643,12 @@ static int ext3_add_nondir(handle_t *handle,
* is so far negative - it has no inode.
*
* If the create succeeds, we fill in the inode information
- * with d_instantiate().
+ * with d_instantiate().
*/
static int ext3_create (struct inode * dir, struct dentry * dentry, int mode,
struct nameidata *nd)
{
- handle_t *handle;
+ handle_t *handle;
struct inode * inode;
int err, retries = 0;
@@ -1688,7 +1688,7 @@ static int ext3_mknod (struct inode * dir, struct dentry *dentry,
retry:
handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
- EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 +
+ EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 +
2*EXT3_QUOTA_INIT_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
@@ -1813,10 +1813,10 @@ static int empty_dir (struct inode * inode)
de1 = (struct ext3_dir_entry_2 *)
((char *) de + le16_to_cpu(de->rec_len));
if (le32_to_cpu(de->inode) != inode->i_ino ||
- !le32_to_cpu(de1->inode) ||
+ !le32_to_cpu(de1->inode) ||
strcmp (".", de->name) ||
strcmp ("..", de1->name)) {
- ext3_warning (inode->i_sb, "empty_dir",
+ ext3_warning (inode->i_sb, "empty_dir",
"bad directory (dir #%lu) - no `.' or `..'",
inode->i_ino);
brelse (bh);
@@ -1883,7 +1883,7 @@ int ext3_orphan_add(handle_t *handle, struct inode *inode)
* being truncated, or files being unlinked. */
/* @@@ FIXME: Observation from aviro:
- * I think I can trigger J_ASSERT in ext3_orphan_add(). We block
+ * I think I can trigger J_ASSERT in ext3_orphan_add(). We block
* here (on lock_super()), so race with ext3_link() which might bump
* ->i_nlink. For, say it, character device. Not a regular file,
* not a directory, not a symlink and ->i_nlink > 0.
@@ -1919,8 +1919,8 @@ int ext3_orphan_add(handle_t *handle, struct inode *inode)
if (!err)
list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan);
- jbd_debug(4, "superblock will point to %ld\n", inode->i_ino);
- jbd_debug(4, "orphan inode %ld will point to %d\n",
+ jbd_debug(4, "superblock will point to %lu\n", inode->i_ino);
+ jbd_debug(4, "orphan inode %lu will point to %d\n",
inode->i_ino, NEXT_ORPHAN(inode));
out_unlock:
unlock_super(sb);
@@ -2129,7 +2129,7 @@ static int ext3_symlink (struct inode * dir,
retry:
handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
- EXT3_INDEX_EXTRA_TRANS_BLOCKS + 5 +
+ EXT3_INDEX_EXTRA_TRANS_BLOCKS + 5 +
2*EXT3_QUOTA_INIT_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
@@ -2227,7 +2227,7 @@ static int ext3_rename (struct inode * old_dir, struct dentry *old_dentry,
DQUOT_INIT(new_dentry->d_inode);
handle = ext3_journal_start(old_dir, 2 *
EXT3_DATA_TRANS_BLOCKS(old_dir->i_sb) +
- EXT3_INDEX_EXTRA_TRANS_BLOCKS + 2);
+ EXT3_INDEX_EXTRA_TRANS_BLOCKS + 2);
if (IS_ERR(handle))
return PTR_ERR(handle);
@@ -2393,4 +2393,4 @@ struct inode_operations ext3_special_inode_operations = {
.removexattr = generic_removexattr,
#endif
.permission = ext3_permission,
-};
+};
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c
index 5e1337fd878..b73cba12f79 100644
--- a/fs/ext3/resize.c
+++ b/fs/ext3/resize.c
@@ -336,7 +336,7 @@ static int verify_reserved_gdb(struct super_block *sb,
unsigned five = 5;
unsigned seven = 7;
unsigned grp;
- __u32 *p = (__u32 *)primary->b_data;
+ __le32 *p = (__le32 *)primary->b_data;
int gdbackups = 0;
while ((grp = ext3_list_backups(sb, &three, &five, &seven)) < end) {
@@ -380,7 +380,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
struct buffer_head *dind;
int gdbackups;
struct ext3_iloc iloc;
- __u32 *data;
+ __le32 *data;
int err;
if (test_opt(sb, DEBUG))
@@ -417,7 +417,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
goto exit_bh;
}
- data = (__u32 *)dind->b_data;
+ data = (__le32 *)dind->b_data;
if (le32_to_cpu(data[gdb_num % EXT3_ADDR_PER_BLOCK(sb)]) != gdblock) {
ext3_warning(sb, __FUNCTION__,
"new group %u GDT block "E3FSBLK" not reserved",
@@ -439,8 +439,8 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
if ((err = ext3_reserve_inode_write(handle, inode, &iloc)))
goto exit_dindj;
- n_group_desc = (struct buffer_head **)kmalloc((gdb_num + 1) *
- sizeof(struct buffer_head *), GFP_KERNEL);
+ n_group_desc = kmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
+ GFP_KERNEL);
if (!n_group_desc) {
err = -ENOMEM;
ext3_warning (sb, __FUNCTION__,
@@ -519,7 +519,7 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
struct buffer_head *dind;
struct ext3_iloc iloc;
ext3_fsblk_t blk;
- __u32 *data, *end;
+ __le32 *data, *end;
int gdbackups = 0;
int res, i;
int err;
@@ -536,8 +536,8 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
}
blk = EXT3_SB(sb)->s_sbh->b_blocknr + 1 + EXT3_SB(sb)->s_gdb_count;
- data = (__u32 *)dind->b_data + EXT3_SB(sb)->s_gdb_count;
- end = (__u32 *)dind->b_data + EXT3_ADDR_PER_BLOCK(sb);
+ data = (__le32 *)dind->b_data + EXT3_SB(sb)->s_gdb_count;
+ end = (__le32 *)dind->b_data + EXT3_ADDR_PER_BLOCK(sb);
/* Get each reserved primary GDT block and verify it holds backups */
for (res = 0; res < reserved_gdb; res++, blk++) {
@@ -545,7 +545,8 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
ext3_warning(sb, __FUNCTION__,
"reserved block "E3FSBLK
" not at offset %ld",
- blk, (long)(data - (__u32 *)dind->b_data));
+ blk,
+ (long)(data - (__le32 *)dind->b_data));
err = -EINVAL;
goto exit_bh;
}
@@ -560,7 +561,7 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
goto exit_bh;
}
if (++data >= end)
- data = (__u32 *)dind->b_data;
+ data = (__le32 *)dind->b_data;
}
for (i = 0; i < reserved_gdb; i++) {
@@ -584,7 +585,7 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
blk = input->group * EXT3_BLOCKS_PER_GROUP(sb);
for (i = 0; i < reserved_gdb; i++) {
int err2;
- data = (__u32 *)primary[i]->b_data;
+ data = (__le32 *)primary[i]->b_data;
/* printk("reserving backup %lu[%u] = %lu\n",
primary[i]->b_blocknr, gdbackups,
blk + primary[i]->b_blocknr); */
@@ -689,7 +690,7 @@ exit_err:
"can't update backup for group %d (err %d), "
"forcing fsck on next reboot", group, err);
sbi->s_mount_state &= ~EXT3_VALID_FS;
- sbi->s_es->s_state &= ~cpu_to_le16(EXT3_VALID_FS);
+ sbi->s_es->s_state &= cpu_to_le16(~EXT3_VALID_FS);
mark_buffer_dirty(sbi->s_sbh);
}
}
@@ -730,6 +731,18 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input)
return -EPERM;
}
+ if (le32_to_cpu(es->s_blocks_count) + input->blocks_count <
+ le32_to_cpu(es->s_blocks_count)) {
+ ext3_warning(sb, __FUNCTION__, "blocks_count overflow\n");
+ return -EINVAL;
+ }
+
+ if (le32_to_cpu(es->s_inodes_count) + EXT3_INODES_PER_GROUP(sb) <
+ le32_to_cpu(es->s_inodes_count)) {
+ ext3_warning(sb, __FUNCTION__, "inodes_count overflow\n");
+ return -EINVAL;
+ }
+
if (reserved_gdb || gdb_off == 0) {
if (!EXT3_HAS_COMPAT_FEATURE(sb,
EXT3_FEATURE_COMPAT_RESIZE_INODE)){
@@ -958,6 +971,11 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
add = EXT3_BLOCKS_PER_GROUP(sb) - last;
+ if (o_blocks_count + add < o_blocks_count) {
+ ext3_warning(sb, __FUNCTION__, "blocks_count overflow");
+ return -EINVAL;
+ }
+
if (o_blocks_count + add > n_blocks_count)
add = n_blocks_count - o_blocks_count;
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 3559086eee5..8bfd56ef18c 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -45,7 +45,7 @@
static int ext3_load_journal(struct super_block *, struct ext3_super_block *,
unsigned long journal_devnum);
static int ext3_create_journal(struct super_block *, struct ext3_super_block *,
- int);
+ unsigned int);
static void ext3_commit_super (struct super_block * sb,
struct ext3_super_block * es,
int sync);
@@ -62,13 +62,13 @@ static void ext3_unlockfs(struct super_block *sb);
static void ext3_write_super (struct super_block * sb);
static void ext3_write_super_lockfs(struct super_block *sb);
-/*
+/*
* Wrappers for journal_start/end.
*
* The only special thing we need to do here is to make sure that all
* journal_end calls result in the superblock being marked dirty, so
* that sync() will call the filesystem's write_super callback if
- * appropriate.
+ * appropriate.
*/
handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks)
{
@@ -90,11 +90,11 @@ handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks)
return journal_start(journal, nblocks);
}
-/*
+/*
* The only special thing we need to do here is to make sure that all
* journal_stop calls result in the superblock being marked dirty, so
* that sync() will call the filesystem's write_super callback if
- * appropriate.
+ * appropriate.
*/
int __ext3_journal_stop(const char *where, handle_t *handle)
{
@@ -159,20 +159,21 @@ static void ext3_handle_error(struct super_block *sb)
if (sb->s_flags & MS_RDONLY)
return;
- if (test_opt (sb, ERRORS_RO)) {
- printk (KERN_CRIT "Remounting filesystem read-only\n");
- sb->s_flags |= MS_RDONLY;
- } else {
+ if (!test_opt (sb, ERRORS_CONT)) {
journal_t *journal = EXT3_SB(sb)->s_journal;
EXT3_SB(sb)->s_mount_opt |= EXT3_MOUNT_ABORT;
if (journal)
journal_abort(journal, -EIO);
}
+ if (test_opt (sb, ERRORS_RO)) {
+ printk (KERN_CRIT "Remounting filesystem read-only\n");
+ sb->s_flags |= MS_RDONLY;
+ }
+ ext3_commit_super(sb, es, 1);
if (test_opt(sb, ERRORS_PANIC))
panic("EXT3-fs (device %s): panic forced after error\n",
sb->s_id);
- ext3_commit_super(sb, es, 1);
}
void ext3_error (struct super_block * sb, const char * function,
@@ -369,16 +370,16 @@ static void dump_orphan_list(struct super_block *sb, struct ext3_sb_info *sbi)
{
struct list_head *l;
- printk(KERN_ERR "sb orphan head is %d\n",
+ printk(KERN_ERR "sb orphan head is %d\n",
le32_to_cpu(sbi->s_es->s_last_orphan));
printk(KERN_ERR "sb_info orphan list:\n");
list_for_each(l, &sbi->s_orphan) {
struct inode *inode = orphan_list_entry(l);
printk(KERN_ERR " "
- "inode %s:%ld at %p: mode %o, nlink %d, next %d\n",
+ "inode %s:%lu at %p: mode %o, nlink %d, next %d\n",
inode->i_sb->s_id, inode->i_ino, inode,
- inode->i_mode, inode->i_nlink,
+ inode->i_mode, inode->i_nlink,
NEXT_ORPHAN(inode));
}
}
@@ -475,7 +476,7 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
inode_init_once(&ei->vfs_inode);
}
}
-
+
static int init_inodecache(void)
{
ext3_inode_cachep = kmem_cache_create("ext3_inode_cache",
@@ -490,8 +491,7 @@ static int init_inodecache(void)
static void destroy_inodecache(void)
{
- if (kmem_cache_destroy(ext3_inode_cachep))
- printk(KERN_INFO "ext3_inode_cache: not all structures were freed\n");
+ kmem_cache_destroy(ext3_inode_cachep);
}
static void ext3_clear_inode(struct inode *inode)
@@ -733,8 +733,8 @@ static match_table_t tokens = {
static ext3_fsblk_t get_sb_block(void **data)
{
- ext3_fsblk_t sb_block;
- char *options = (char *) *data;
+ ext3_fsblk_t sb_block;
+ char *options = (char *) *data;
if (!options || strncmp(options, "sb=", 3) != 0)
return 1; /* Default location */
@@ -753,7 +753,7 @@ static ext3_fsblk_t get_sb_block(void **data)
}
static int parse_options (char *options, struct super_block *sb,
- unsigned long *inum, unsigned long *journal_devnum,
+ unsigned int *inum, unsigned long *journal_devnum,
ext3_fsblk_t *n_blocks_count, int is_remount)
{
struct ext3_sb_info *sbi = EXT3_SB(sb);
@@ -1174,7 +1174,8 @@ static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es,
static int ext3_check_descriptors (struct super_block * sb)
{
struct ext3_sb_info *sbi = EXT3_SB(sb);
- ext3_fsblk_t block = le32_to_cpu(sbi->s_es->s_first_data_block);
+ ext3_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
+ ext3_fsblk_t last_block;
struct ext3_group_desc * gdp = NULL;
int desc_block = 0;
int i;
@@ -1183,12 +1184,17 @@ static int ext3_check_descriptors (struct super_block * sb)
for (i = 0; i < sbi->s_groups_count; i++)
{
+ if (i == sbi->s_groups_count - 1)
+ last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1;
+ else
+ last_block = first_block +
+ (EXT3_BLOCKS_PER_GROUP(sb) - 1);
+
if ((i % EXT3_DESC_PER_BLOCK(sb)) == 0)
gdp = (struct ext3_group_desc *)
sbi->s_group_desc[desc_block++]->b_data;
- if (le32_to_cpu(gdp->bg_block_bitmap) < block ||
- le32_to_cpu(gdp->bg_block_bitmap) >=
- block + EXT3_BLOCKS_PER_GROUP(sb))
+ if (le32_to_cpu(gdp->bg_block_bitmap) < first_block ||
+ le32_to_cpu(gdp->bg_block_bitmap) > last_block)
{
ext3_error (sb, "ext3_check_descriptors",
"Block bitmap for group %d"
@@ -1197,9 +1203,8 @@ static int ext3_check_descriptors (struct super_block * sb)
le32_to_cpu(gdp->bg_block_bitmap));
return 0;
}
- if (le32_to_cpu(gdp->bg_inode_bitmap) < block ||
- le32_to_cpu(gdp->bg_inode_bitmap) >=
- block + EXT3_BLOCKS_PER_GROUP(sb))
+ if (le32_to_cpu(gdp->bg_inode_bitmap) < first_block ||
+ le32_to_cpu(gdp->bg_inode_bitmap) > last_block)
{
ext3_error (sb, "ext3_check_descriptors",
"Inode bitmap for group %d"
@@ -1208,9 +1213,9 @@ static int ext3_check_descriptors (struct super_block * sb)
le32_to_cpu(gdp->bg_inode_bitmap));
return 0;
}
- if (le32_to_cpu(gdp->bg_inode_table) < block ||
- le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group >=
- block + EXT3_BLOCKS_PER_GROUP(sb))
+ if (le32_to_cpu(gdp->bg_inode_table) < first_block ||
+ le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group >
+ last_block)
{
ext3_error (sb, "ext3_check_descriptors",
"Inode table for group %d"
@@ -1219,7 +1224,7 @@ static int ext3_check_descriptors (struct super_block * sb)
le32_to_cpu(gdp->bg_inode_table));
return 0;
}
- block += EXT3_BLOCKS_PER_GROUP(sb);
+ first_block += EXT3_BLOCKS_PER_GROUP(sb);
gdp++;
}
@@ -1301,17 +1306,17 @@ static void ext3_orphan_cleanup (struct super_block * sb,
DQUOT_INIT(inode);
if (inode->i_nlink) {
printk(KERN_DEBUG
- "%s: truncating inode %ld to %Ld bytes\n",
+ "%s: truncating inode %lu to %Ld bytes\n",
__FUNCTION__, inode->i_ino, inode->i_size);
- jbd_debug(2, "truncating inode %ld to %Ld bytes\n",
+ jbd_debug(2, "truncating inode %lu to %Ld bytes\n",
inode->i_ino, inode->i_size);
ext3_truncate(inode);
nr_truncates++;
} else {
printk(KERN_DEBUG
- "%s: deleting unreferenced inode %ld\n",
+ "%s: deleting unreferenced inode %lu\n",
__FUNCTION__, inode->i_ino);
- jbd_debug(2, "deleting unreferenced inode %ld\n",
+ jbd_debug(2, "deleting unreferenced inode %lu\n",
inode->i_ino);
nr_orphans++;
}
@@ -1390,7 +1395,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
ext3_fsblk_t sb_block = get_sb_block(&data);
ext3_fsblk_t logic_sb_block;
unsigned long offset = 0;
- unsigned long journal_inum = 0;
+ unsigned int journal_inum = 0;
unsigned long journal_devnum = 0;
unsigned long def_mount_opts;
struct inode *root;
@@ -1401,11 +1406,10 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
int needs_recovery;
__le32 features;
- sbi = kmalloc(sizeof(*sbi), GFP_KERNEL);
+ sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
sb->s_fs_info = sbi;
- memset(sbi, 0, sizeof(*sbi));
sbi->s_mount_opt = 0;
sbi->s_resuid = EXT3_DEF_RESUID;
sbi->s_resgid = EXT3_DEF_RESGID;
@@ -1483,7 +1487,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
(EXT3_HAS_COMPAT_FEATURE(sb, ~0U) ||
EXT3_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
EXT3_HAS_INCOMPAT_FEATURE(sb, ~0U)))
- printk(KERN_WARNING
+ printk(KERN_WARNING
"EXT3-fs warning: feature flags set on rev 0 fs, "
"running e2fsck is recommended\n");
/*
@@ -1509,7 +1513,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
if (blocksize < EXT3_MIN_BLOCK_SIZE ||
blocksize > EXT3_MAX_BLOCK_SIZE) {
- printk(KERN_ERR
+ printk(KERN_ERR
"EXT3-fs: Unsupported filesystem blocksize %d on %s.\n",
blocksize, sb->s_id);
goto failed_mount;
@@ -1533,14 +1537,14 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize;
bh = sb_bread(sb, logic_sb_block);
if (!bh) {
- printk(KERN_ERR
+ printk(KERN_ERR
"EXT3-fs: Can't read superblock on 2nd try.\n");
goto failed_mount;
}
es = (struct ext3_super_block *)(((char *)bh->b_data) + offset);
sbi->s_es = es;
if (es->s_magic != cpu_to_le16(EXT3_SUPER_MAGIC)) {
- printk (KERN_ERR
+ printk (KERN_ERR
"EXT3-fs: Magic mismatch, very weird !\n");
goto failed_mount;
}
@@ -1622,10 +1626,9 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
if (EXT3_BLOCKS_PER_GROUP(sb) == 0)
goto cantfind_ext3;
- sbi->s_groups_count = (le32_to_cpu(es->s_blocks_count) -
- le32_to_cpu(es->s_first_data_block) +
- EXT3_BLOCKS_PER_GROUP(sb) - 1) /
- EXT3_BLOCKS_PER_GROUP(sb);
+ sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
+ le32_to_cpu(es->s_first_data_block) - 1)
+ / EXT3_BLOCKS_PER_GROUP(sb)) + 1;
db_count = (sbi->s_groups_count + EXT3_DESC_PER_BLOCK(sb) - 1) /
EXT3_DESC_PER_BLOCK(sb);
sbi->s_group_desc = kmalloc(db_count * sizeof (struct buffer_head *),
@@ -1820,7 +1823,7 @@ out_fail:
/*
* Setup any per-fs journal parameters now. We'll do this both on
* initial mount, once the journal has been initialised but before we've
- * done any recovery; and again on any subsequent remount.
+ * done any recovery; and again on any subsequent remount.
*/
static void ext3_init_journal_params(struct super_block *sb, journal_t *journal)
{
@@ -1840,7 +1843,8 @@ static void ext3_init_journal_params(struct super_block *sb, journal_t *journal)
spin_unlock(&journal->j_state_lock);
}
-static journal_t *ext3_get_journal(struct super_block *sb, int journal_inum)
+static journal_t *ext3_get_journal(struct super_block *sb,
+ unsigned int journal_inum)
{
struct inode *journal_inode;
journal_t *journal;
@@ -1975,7 +1979,7 @@ static int ext3_load_journal(struct super_block *sb,
unsigned long journal_devnum)
{
journal_t *journal;
- int journal_inum = le32_to_cpu(es->s_journal_inum);
+ unsigned int journal_inum = le32_to_cpu(es->s_journal_inum);
dev_t journal_dev;
int err = 0;
int really_read_only;
@@ -2061,7 +2065,7 @@ static int ext3_load_journal(struct super_block *sb,
static int ext3_create_journal(struct super_block * sb,
struct ext3_super_block * es,
- int journal_inum)
+ unsigned int journal_inum)
{
journal_t *journal;
@@ -2074,7 +2078,7 @@ static int ext3_create_journal(struct super_block * sb,
if (!(journal = ext3_get_journal(sb, journal_inum)))
return -EINVAL;
- printk(KERN_INFO "EXT3-fs: creating new journal on inode %d\n",
+ printk(KERN_INFO "EXT3-fs: creating new journal on inode %u\n",
journal_inum);
if (journal_create(journal)) {
@@ -2342,10 +2346,8 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data)
*/
ext3_clear_journal_err(sb, es);
sbi->s_mount_state = le16_to_cpu(es->s_state);
- if ((ret = ext3_group_extend(sb, es, n_blocks_count))) {
- err = ret;
+ if ((err = ext3_group_extend(sb, es, n_blocks_count)))
goto restore_opts;
- }
if (!ext3_setup_super (sb, es, 0))
sb->s_flags &= ~MS_RDONLY;
}
@@ -2734,7 +2736,7 @@ static int __init init_ext3_fs(void)
out:
destroy_inodecache();
out1:
- exit_ext3_xattr();
+ exit_ext3_xattr();
return err;
}
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
index a44a0562203..f86f2482f01 100644
--- a/fs/ext3/xattr.c
+++ b/fs/ext3/xattr.c
@@ -75,7 +75,7 @@
#ifdef EXT3_XATTR_DEBUG
# define ea_idebug(inode, f...) do { \
- printk(KERN_DEBUG "inode %s:%ld: ", \
+ printk(KERN_DEBUG "inode %s:%lu: ", \
inode->i_sb->s_id, inode->i_ino); \
printk(f); \
printk("\n"); \
@@ -233,7 +233,7 @@ ext3_xattr_block_get(struct inode *inode, int name_index, const char *name,
atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
if (ext3_xattr_check_block(bh)) {
bad_block: ext3_error(inode->i_sb, __FUNCTION__,
- "inode %ld: bad block "E3FSBLK, inode->i_ino,
+ "inode %lu: bad block "E3FSBLK, inode->i_ino,
EXT3_I(inode)->i_file_acl);
error = -EIO;
goto cleanup;
@@ -375,7 +375,7 @@ ext3_xattr_block_list(struct inode *inode, char *buffer, size_t buffer_size)
atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
if (ext3_xattr_check_block(bh)) {
ext3_error(inode->i_sb, __FUNCTION__,
- "inode %ld: bad block "E3FSBLK, inode->i_ino,
+ "inode %lu: bad block "E3FSBLK, inode->i_ino,
EXT3_I(inode)->i_file_acl);
error = -EIO;
goto cleanup;
@@ -647,7 +647,7 @@ ext3_xattr_block_find(struct inode *inode, struct ext3_xattr_info *i,
le32_to_cpu(BHDR(bs->bh)->h_refcount));
if (ext3_xattr_check_block(bs->bh)) {
ext3_error(sb, __FUNCTION__,
- "inode %ld: bad block "E3FSBLK, inode->i_ino,
+ "inode %lu: bad block "E3FSBLK, inode->i_ino,
EXT3_I(inode)->i_file_acl);
error = -EIO;
goto cleanup;
@@ -848,7 +848,7 @@ cleanup_dquot:
bad_block:
ext3_error(inode->i_sb, __FUNCTION__,
- "inode %ld: bad block "E3FSBLK, inode->i_ino,
+ "inode %lu: bad block "E3FSBLK, inode->i_ino,
EXT3_I(inode)->i_file_acl);
goto cleanup;
@@ -1077,14 +1077,14 @@ ext3_xattr_delete_inode(handle_t *handle, struct inode *inode)
bh = sb_bread(inode->i_sb, EXT3_I(inode)->i_file_acl);
if (!bh) {
ext3_error(inode->i_sb, __FUNCTION__,
- "inode %ld: block "E3FSBLK" read error", inode->i_ino,
+ "inode %lu: block "E3FSBLK" read error", inode->i_ino,
EXT3_I(inode)->i_file_acl);
goto cleanup;
}
if (BHDR(bh)->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) ||
BHDR(bh)->h_blocks != cpu_to_le32(1)) {
ext3_error(inode->i_sb, __FUNCTION__,
- "inode %ld: bad block "E3FSBLK, inode->i_ino,
+ "inode %lu: bad block "E3FSBLK, inode->i_ino,
EXT3_I(inode)->i_file_acl);
goto cleanup;
}
@@ -1211,7 +1211,7 @@ again:
bh = sb_bread(inode->i_sb, ce->e_block);
if (!bh) {
ext3_error(inode->i_sb, __FUNCTION__,
- "inode %ld: block %lu read error",
+ "inode %lu: block %lu read error",
inode->i_ino, (unsigned long) ce->e_block);
} else if (le32_to_cpu(BHDR(bh)->h_refcount) >=
EXT3_XATTR_REFCOUNT_MAX) {
diff --git a/fs/fat/cache.c b/fs/fat/cache.c
index 97b967b84fc..82cc4f59e3b 100644
--- a/fs/fat/cache.c
+++ b/fs/fat/cache.c
@@ -58,8 +58,7 @@ int __init fat_cache_init(void)
void fat_cache_destroy(void)
{
- if (kmem_cache_destroy(fat_cache_cachep))
- printk(KERN_INFO "fat_cache: not all structures were freed\n");
+ kmem_cache_destroy(fat_cache_cachep);
}
static inline struct fat_cache *fat_cache_alloc(struct inode *inode)
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 31b7174176b..ab96ae82375 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -50,14 +50,14 @@ static int fat_add_cluster(struct inode *inode)
return err;
}
-static int __fat_get_blocks(struct inode *inode, sector_t iblock,
- unsigned long *max_blocks,
- struct buffer_head *bh_result, int create)
+static inline int __fat_get_block(struct inode *inode, sector_t iblock,
+ unsigned long *max_blocks,
+ struct buffer_head *bh_result, int create)
{
struct super_block *sb = inode->i_sb;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
- sector_t phys;
unsigned long mapped_blocks;
+ sector_t phys;
int err, offset;
err = fat_bmap(inode, iblock, &phys, &mapped_blocks);
@@ -73,7 +73,7 @@ static int __fat_get_blocks(struct inode *inode, sector_t iblock,
if (iblock != MSDOS_I(inode)->mmu_private >> sb->s_blocksize_bits) {
fat_fs_panic(sb, "corrupted file size (i_pos %lld, %lld)",
- MSDOS_I(inode)->i_pos, MSDOS_I(inode)->mmu_private);
+ MSDOS_I(inode)->i_pos, MSDOS_I(inode)->mmu_private);
return -EIO;
}
@@ -93,34 +93,29 @@ static int __fat_get_blocks(struct inode *inode, sector_t iblock,
err = fat_bmap(inode, iblock, &phys, &mapped_blocks);
if (err)
return err;
+
BUG_ON(!phys);
BUG_ON(*max_blocks != mapped_blocks);
set_buffer_new(bh_result);
map_bh(bh_result, sb, phys);
+
return 0;
}
-static int fat_get_blocks(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create)
+static int fat_get_block(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int create)
{
struct super_block *sb = inode->i_sb;
- int err;
unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits;
+ int err;
- err = __fat_get_blocks(inode, iblock, &max_blocks, bh_result, create);
+ err = __fat_get_block(inode, iblock, &max_blocks, bh_result, create);
if (err)
return err;
bh_result->b_size = max_blocks << sb->s_blocksize_bits;
return 0;
}
-static int fat_get_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create)
-{
- unsigned long max_blocks = 1;
- return __fat_get_blocks(inode, iblock, &max_blocks, bh_result, create);
-}
-
static int fat_writepage(struct page *page, struct writeback_control *wbc)
{
return block_write_full_page(page, fat_get_block, wbc);
@@ -188,7 +183,7 @@ static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
* condition of fat_get_block() and ->truncate().
*/
return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
- offset, nr_segs, fat_get_blocks, NULL);
+ offset, nr_segs, fat_get_block, NULL);
}
static sector_t _fat_bmap(struct address_space *mapping, sector_t block)
@@ -375,8 +370,6 @@ static int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de)
inode->i_flags |= S_IMMUTABLE;
}
MSDOS_I(inode)->i_attrs = de->attr & ATTR_UNUSED;
- /* this is as close to the truth as we can get ... */
- inode->i_blksize = sbi->cluster_size;
inode->i_blocks = ((inode->i_size + (sbi->cluster_size - 1))
& ~((loff_t)sbi->cluster_size - 1)) >> 9;
inode->i_mtime.tv_sec =
@@ -528,8 +521,7 @@ static int __init fat_init_inodecache(void)
static void __exit fat_destroy_inodecache(void)
{
- if (kmem_cache_destroy(fat_inode_cachep))
- printk(KERN_INFO "fat_inode_cache: not all structures were freed\n");
+ kmem_cache_destroy(fat_inode_cachep);
}
static int fat_remount(struct super_block *sb, int *flags, char *data)
@@ -1137,7 +1129,6 @@ static int fat_read_root(struct inode *inode)
MSDOS_I(inode)->i_start = 0;
inode->i_size = sbi->dir_entries * sizeof(struct msdos_dir_entry);
}
- inode->i_blksize = sbi->cluster_size;
inode->i_blocks = ((inode->i_size + (sbi->cluster_size - 1))
& ~((loff_t)sbi->cluster_size - 1)) >> 9;
MSDOS_I(inode)->i_logstart = 0;
@@ -1168,11 +1159,10 @@ int fat_fill_super(struct super_block *sb, void *data, int silent,
long error;
char buf[50];
- sbi = kmalloc(sizeof(struct msdos_sb_info), GFP_KERNEL);
+ sbi = kzalloc(sizeof(struct msdos_sb_info), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
sb->s_fs_info = sbi;
- memset(sbi, 0, sizeof(struct msdos_sb_info));
sb->s_flags |= MS_NODIRATIME;
sb->s_magic = MSDOS_SUPER_MAGIC;
diff --git a/fs/file.c b/fs/file.c
index b3c6b82e6a9..8d3bfca7714 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -281,10 +281,8 @@ static struct fdtable *alloc_fdtable(int nr)
out2:
nfds = fdt->max_fdset;
out:
- if (new_openset)
- free_fdset(new_openset, nfds);
- if (new_execset)
- free_fdset(new_execset, nfds);
+ free_fdset(new_openset, nfds);
+ free_fdset(new_execset, nfds);
kfree(fdt);
return NULL;
}
diff --git a/fs/file_table.c b/fs/file_table.c
index 0131ba06e1e..bc35a40417d 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -169,7 +169,7 @@ void fastcall __fput(struct file *file)
if (file->f_op && file->f_op->release)
file->f_op->release(inode, file);
security_file_free(file);
- if (unlikely(inode->i_cdev != NULL))
+ if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL))
cdev_put(inode->i_cdev);
fops_put(file->f_op);
if (file->f_mode & FMODE_WRITE)
diff --git a/fs/freevxfs/vxfs.h b/fs/freevxfs/vxfs.h
index d35979a5874..c8a92652612 100644
--- a/fs/freevxfs/vxfs.h
+++ b/fs/freevxfs/vxfs.h
@@ -252,7 +252,7 @@ enum {
* Get filesystem private data from VFS inode.
*/
#define VXFS_INO(ip) \
- ((struct vxfs_inode_info *)(ip)->u.generic_ip)
+ ((struct vxfs_inode_info *)(ip)->i_private)
/*
* Get filesystem private data from VFS superblock.
diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c
index ca6a3971477..4786d51ad3b 100644
--- a/fs/freevxfs/vxfs_inode.c
+++ b/fs/freevxfs/vxfs_inode.c
@@ -239,11 +239,10 @@ vxfs_iinit(struct inode *ip, struct vxfs_inode_info *vip)
ip->i_ctime.tv_nsec = 0;
ip->i_mtime.tv_nsec = 0;
- ip->i_blksize = PAGE_SIZE;
ip->i_blocks = vip->vii_blocks;
ip->i_generation = vip->vii_gen;
- ip->u.generic_ip = (void *)vip;
+ ip->i_private = vip;
}
@@ -338,5 +337,5 @@ vxfs_read_inode(struct inode *ip)
void
vxfs_clear_inode(struct inode *ip)
{
- kmem_cache_free(vxfs_inode_cachep, ip->u.generic_ip);
+ kmem_cache_free(vxfs_inode_cachep, ip->i_private);
}
diff --git a/fs/fuse/control.c b/fs/fuse/control.c
index 46fe60b2da2..79ec1f23d4d 100644
--- a/fs/fuse/control.c
+++ b/fs/fuse/control.c
@@ -23,7 +23,7 @@ static struct fuse_conn *fuse_ctl_file_conn_get(struct file *file)
{
struct fuse_conn *fc;
mutex_lock(&fuse_mutex);
- fc = file->f_dentry->d_inode->u.generic_ip;
+ fc = file->f_dentry->d_inode->i_private;
if (fc)
fc = fuse_conn_get(fc);
mutex_unlock(&fuse_mutex);
@@ -98,7 +98,7 @@ static struct dentry *fuse_ctl_add_dentry(struct dentry *parent,
inode->i_op = iop;
inode->i_fop = fop;
inode->i_nlink = nlink;
- inode->u.generic_ip = fc;
+ inode->i_private = fc;
d_add(dentry, inode);
return dentry;
}
@@ -150,7 +150,7 @@ void fuse_ctl_remove_conn(struct fuse_conn *fc)
for (i = fc->ctl_ndents - 1; i >= 0; i--) {
struct dentry *dentry = fc->ctl_dentry[i];
- dentry->d_inode->u.generic_ip = NULL;
+ dentry->d_inode->i_private = NULL;
d_drop(dentry);
dput(dentry);
}
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 7d25092262a..cb7cadb0b79 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -118,7 +118,6 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr)
inode->i_uid = attr->uid;
inode->i_gid = attr->gid;
i_size_write(inode, attr->size);
- inode->i_blksize = PAGE_CACHE_SIZE;
inode->i_blocks = attr->blocks;
inode->i_atime.tv_sec = attr->atime;
inode->i_atime.tv_nsec = attr->atimensec;
diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
index 13231dd5ce6..0d200068d0a 100644
--- a/fs/hfs/bnode.c
+++ b/fs/hfs/bnode.c
@@ -249,10 +249,9 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
sb = tree->inode->i_sb;
size = sizeof(struct hfs_bnode) + tree->pages_per_bnode *
sizeof(struct page *);
- node = kmalloc(size, GFP_KERNEL);
+ node = kzalloc(size, GFP_KERNEL);
if (!node)
return NULL;
- memset(node, 0, size);
node->tree = tree;
node->this = cnid;
set_bit(HFS_BNODE_NEW, &node->flags);
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
index 40035799431..5fd0ed71f92 100644
--- a/fs/hfs/btree.c
+++ b/fs/hfs/btree.c
@@ -21,10 +21,9 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
struct page *page;
unsigned int size;
- tree = kmalloc(sizeof(*tree), GFP_KERNEL);
+ tree = kzalloc(sizeof(*tree), GFP_KERNEL);
if (!tree)
return NULL;
- memset(tree, 0, sizeof(*tree));
init_MUTEX(&tree->tree_lock);
spin_lock_init(&tree->hash_lock);
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 315cf44a90b..d05641c35fc 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -154,7 +154,6 @@ struct inode *hfs_new_inode(struct inode *dir, struct qstr *name, int mode)
inode->i_gid = current->fsgid;
inode->i_nlink = 1;
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
- inode->i_blksize = HFS_SB(sb)->alloc_blksz;
HFS_I(inode)->flags = 0;
HFS_I(inode)->rsrc_inode = NULL;
HFS_I(inode)->fs_blocks = 0;
@@ -284,7 +283,6 @@ static int hfs_read_inode(struct inode *inode, void *data)
inode->i_uid = hsb->s_uid;
inode->i_gid = hsb->s_gid;
inode->i_nlink = 1;
- inode->i_blksize = HFS_SB(inode->i_sb)->alloc_blksz;
if (idata->key)
HFS_I(inode)->cat_key = *idata->key;
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 34937ee83ab..d43b4fcc8ad 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -356,11 +356,10 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
struct inode *root_inode;
int res;
- sbi = kmalloc(sizeof(struct hfs_sb_info), GFP_KERNEL);
+ sbi = kzalloc(sizeof(struct hfs_sb_info), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
sb->s_fs_info = sbi;
- memset(sbi, 0, sizeof(struct hfs_sb_info));
INIT_HLIST_HEAD(&sbi->rsrc_inodes);
res = -EINVAL;
@@ -455,8 +454,7 @@ static int __init init_hfs_fs(void)
static void __exit exit_hfs_fs(void)
{
unregister_filesystem(&hfs_fs_type);
- if (kmem_cache_destroy(hfs_inode_cachep))
- printk(KERN_ERR "hfs_inode_cache: not all structures were freed\n");
+ kmem_cache_destroy(hfs_inode_cachep);
}
module_init(init_hfs_fs)
diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
index 77bf434da67..29da6574ba7 100644
--- a/fs/hfsplus/bnode.c
+++ b/fs/hfsplus/bnode.c
@@ -409,10 +409,9 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
sb = tree->inode->i_sb;
size = sizeof(struct hfs_bnode) + tree->pages_per_bnode *
sizeof(struct page *);
- node = kmalloc(size, GFP_KERNEL);
+ node = kzalloc(size, GFP_KERNEL);
if (!node)
return NULL;
- memset(node, 0, size);
node->tree = tree;
node->this = cnid;
set_bit(HFS_BNODE_NEW, &node->flags);
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index cfc852fdd1b..a9b9e872e29 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -24,10 +24,9 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
struct page *page;
unsigned int size;
- tree = kmalloc(sizeof(*tree), GFP_KERNEL);
+ tree = kzalloc(sizeof(*tree), GFP_KERNEL);
if (!tree)
return NULL;
- memset(tree, 0, sizeof(*tree));
init_MUTEX(&tree->tree_lock);
spin_lock_init(&tree->hash_lock);
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 924ecdef809..0eb1a609266 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -304,7 +304,6 @@ struct inode *hfsplus_new_inode(struct super_block *sb, int mode)
inode->i_gid = current->fsgid;
inode->i_nlink = 1;
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
- inode->i_blksize = HFSPLUS_SB(sb).alloc_blksz;
INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list);
init_MUTEX(&HFSPLUS_I(inode).extents_lock);
atomic_set(&HFSPLUS_I(inode).opencnt, 0);
@@ -407,7 +406,6 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
HFSPLUS_I(inode).dev = 0;
- inode->i_blksize = HFSPLUS_SB(inode->i_sb).alloc_blksz;
if (type == HFSPLUS_FOLDER) {
struct hfsplus_cat_folder *folder = &entry.folder;
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index d279d5924f2..194eede52fa 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -493,8 +493,7 @@ static int __init init_hfsplus_fs(void)
static void __exit exit_hfsplus_fs(void)
{
unregister_filesystem(&hfsplus_fs_type);
- if (kmem_cache_destroy(hfsplus_inode_cachep))
- printk(KERN_ERR "hfsplus_inode_cache: not all structures were freed\n");
+ kmem_cache_destroy(hfsplus_inode_cachep);
}
module_init(init_hfsplus_fs)
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index b82e3d9c879..322e876c35e 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -156,7 +156,6 @@ static int read_name(struct inode *ino, char *name)
ino->i_mode = i_mode;
ino->i_nlink = i_nlink;
ino->i_size = i_size;
- ino->i_blksize = i_blksize;
ino->i_blocks = i_blocks;
return(0);
}
diff --git a/fs/hpfs/buffer.c b/fs/hpfs/buffer.c
index 2807aa833e6..b52b7381d10 100644
--- a/fs/hpfs/buffer.c
+++ b/fs/hpfs/buffer.c
@@ -76,7 +76,7 @@ void *hpfs_map_4sectors(struct super_block *s, unsigned secno, struct quad_buffe
return NULL;
}
- qbh->data = data = (char *)kmalloc(2048, GFP_NOFS);
+ qbh->data = data = kmalloc(2048, GFP_NOFS);
if (!data) {
printk("HPFS: hpfs_map_4sectors: out of memory\n");
goto bail;
diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c
index 56f2c338c4d..bcf6ee36e06 100644
--- a/fs/hpfs/inode.c
+++ b/fs/hpfs/inode.c
@@ -17,7 +17,6 @@ void hpfs_init_inode(struct inode *i)
i->i_gid = hpfs_sb(sb)->sb_gid;
i->i_mode = hpfs_sb(sb)->sb_mode;
hpfs_inode->i_conv = hpfs_sb(sb)->sb_conv;
- i->i_blksize = 512;
i->i_size = -1;
i->i_blocks = -1;
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 8fe51c34378..450b5e0b478 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -203,8 +203,7 @@ static int init_inodecache(void)
static void destroy_inodecache(void)
{
- if (kmem_cache_destroy(hpfs_inode_cachep))
- printk(KERN_INFO "hpfs_inode_cache: not all structures were freed\n");
+ kmem_cache_destroy(hpfs_inode_cachep);
}
/*
@@ -462,11 +461,10 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
int o;
- sbi = kmalloc(sizeof(*sbi), GFP_KERNEL);
+ sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
s->s_fs_info = sbi;
- memset(sbi, 0, sizeof(*sbi));
sbi->sb_bmp_dir = NULL;
sbi->sb_cp_table = NULL;
diff --git a/fs/hppfs/hppfs_kern.c b/fs/hppfs/hppfs_kern.c
index 3a9bdf58166..dcb6d2e988b 100644
--- a/fs/hppfs/hppfs_kern.c
+++ b/fs/hppfs/hppfs_kern.c
@@ -152,7 +152,6 @@ static void hppfs_read_inode(struct inode *ino)
ino->i_mode = proc_ino->i_mode;
ino->i_nlink = proc_ino->i_nlink;
ino->i_size = proc_ino->i_size;
- ino->i_blksize = proc_ino->i_blksize;
ino->i_blocks = proc_ino->i_blocks;
}
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index c3920c96dad..e025a31b4c6 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -357,7 +357,6 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, uid_t uid,
inode->i_mode = mode;
inode->i_uid = uid;
inode->i_gid = gid;
- inode->i_blksize = HPAGE_SIZE;
inode->i_blocks = 0;
inode->i_mapping->a_ops = &hugetlbfs_aops;
inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info;
diff --git a/fs/inode.c b/fs/inode.c
index 0bf9f0444a9..f5c04dd9ae8 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -163,7 +163,7 @@ static struct inode *alloc_inode(struct super_block *sb)
bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
mapping->backing_dev_info = bdi;
}
- memset(&inode->u, 0, sizeof(inode->u));
+ inode->i_private = 0;
inode->i_mapping = mapping;
}
return inode;
@@ -254,9 +254,9 @@ void clear_inode(struct inode *inode)
DQUOT_DROP(inode);
if (inode->i_sb && inode->i_sb->s_op->clear_inode)
inode->i_sb->s_op->clear_inode(inode);
- if (inode->i_bdev)
+ if (S_ISBLK(inode->i_mode) && inode->i_bdev)
bd_forget(inode);
- if (inode->i_cdev)
+ if (S_ISCHR(inode->i_mode) && inode->i_cdev)
cd_forget(inode);
inode->i_state = I_CLEAR;
}
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 14391361c88..4527692f432 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -96,9 +96,7 @@ static int init_inodecache(void)
static void destroy_inodecache(void)
{
- if (kmem_cache_destroy(isofs_inode_cachep))
- printk(KERN_INFO "iso_inode_cache: not all structures were "
- "freed\n");
+ kmem_cache_destroy(isofs_inode_cachep);
}
static int isofs_remount(struct super_block *sb, int *flags, char *data)
@@ -557,11 +555,10 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent)
struct iso9660_options opt;
struct isofs_sb_info * sbi;
- sbi = kmalloc(sizeof(*sbi), GFP_KERNEL);
+ sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
s->s_fs_info = sbi;
- memset(sbi, 0, sizeof(*sbi));
if (!parse_options((char *)data, &opt))
goto out_freesbi;
@@ -1238,7 +1235,7 @@ static void isofs_read_inode(struct inode *inode)
}
inode->i_uid = sbi->s_uid;
inode->i_gid = sbi->s_gid;
- inode->i_blocks = inode->i_blksize = 0;
+ inode->i_blocks = 0;
ei->i_format_parm[0] = 0;
ei->i_format_parm[1] = 0;
@@ -1294,7 +1291,6 @@ static void isofs_read_inode(struct inode *inode)
isonum_711 (de->ext_attr_length));
/* Set the number of blocks for stat() - should be done before RR */
- inode->i_blksize = PAGE_CACHE_SIZE; /* For stat() only */
inode->i_blocks = (inode->i_size + 511) >> 9;
/*
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
index 47678a26c13..0208cc7ac5d 100644
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -1,6 +1,6 @@
/*
* linux/fs/checkpoint.c
- *
+ *
* Written by Stephen C. Tweedie <sct@redhat.com>, 1999
*
* Copyright 1999 Red Hat Software --- All Rights Reserved
@@ -9,8 +9,8 @@
* the terms of the GNU General Public License, version 2, or at your
* option, any later version, incorporated herein by reference.
*
- * Checkpoint routines for the generic filesystem journaling code.
- * Part of the ext2fs journaling system.
+ * Checkpoint routines for the generic filesystem journaling code.
+ * Part of the ext2fs journaling system.
*
* Checkpointing is the process of ensuring that a section of the log is
* committed fully to disk, so that that portion of the log can be
@@ -145,6 +145,7 @@ void __log_wait_for_space(journal_t *journal)
* jbd_unlock_bh_state().
*/
static void jbd_sync_bh(journal_t *journal, struct buffer_head *bh)
+ __releases(journal->j_list_lock)
{
get_bh(bh);
spin_unlock(&journal->j_list_lock);
@@ -225,7 +226,7 @@ __flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count)
* Try to flush one buffer from the checkpoint list to disk.
*
* Return 1 if something happened which requires us to abort the current
- * scan of the checkpoint list.
+ * scan of the checkpoint list.
*
* Called with j_list_lock held and drops it if 1 is returned
* Called under jbd_lock_bh_state(jh2bh(jh)), and drops it
@@ -269,7 +270,7 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
* possibly block, while still holding the journal lock.
* We cannot afford to let the transaction logic start
* messing around with this buffer before we write it to
- * disk, as that would break recoverability.
+ * disk, as that would break recoverability.
*/
BUFFER_TRACE(bh, "queue");
get_bh(bh);
@@ -292,7 +293,7 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
* Perform an actual checkpoint. We take the first transaction on the
* list of transactions to be checkpointed and send all its buffers
* to disk. We submit larger chunks of data at once.
- *
+ *
* The journal should be locked before calling this function.
*/
int log_do_checkpoint(journal_t *journal)
@@ -303,10 +304,10 @@ int log_do_checkpoint(journal_t *journal)
jbd_debug(1, "Start checkpoint\n");
- /*
+ /*
* First thing: if there are any transactions in the log which
* don't need checkpointing, just eliminate them from the
- * journal straight away.
+ * journal straight away.
*/
result = cleanup_journal_tail(journal);
jbd_debug(1, "cleanup_journal_tail returned %d\n", result);
@@ -384,9 +385,9 @@ out:
* we have already got rid of any since the last update of the log tail
* in the journal superblock. If so, we can instantly roll the
* superblock forward to remove those transactions from the log.
- *
+ *
* Return <0 on error, 0 on success, 1 if there was nothing to clean up.
- *
+ *
* Called with the journal lock held.
*
* This is the only part of the journaling code which really needs to be
@@ -403,8 +404,8 @@ int cleanup_journal_tail(journal_t *journal)
unsigned long blocknr, freed;
/* OK, work out the oldest transaction remaining in the log, and
- * the log block it starts at.
- *
+ * the log block it starts at.
+ *
* If the log is now empty, we need to work out which is the
* next transaction ID we will write, and where it will
* start. */
@@ -479,7 +480,7 @@ static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
if (!jh)
return 0;
- last_jh = jh->b_cpprev;
+ last_jh = jh->b_cpprev;
do {
jh = next_jh;
next_jh = jh->b_cpnext;
@@ -557,7 +558,7 @@ out:
return ret;
}
-/*
+/*
* journal_remove_checkpoint: called after a buffer has been committed
* to disk (either by being write-back flushed to disk, or being
* committed to the log).
@@ -635,7 +636,7 @@ out:
* Called with the journal locked.
* Called with j_list_lock held.
*/
-void __journal_insert_checkpoint(struct journal_head *jh,
+void __journal_insert_checkpoint(struct journal_head *jh,
transaction_t *transaction)
{
JBUFFER_TRACE(jh, "entry");
@@ -657,7 +658,7 @@ void __journal_insert_checkpoint(struct journal_head *jh,
/*
* We've finished with this transaction structure: adios...
- *
+ *
* The transaction must have no links except for the checkpoint by this
* point.
*
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index 42da6078431..32a8caf0c41 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -160,6 +160,117 @@ static int journal_write_commit_record(journal_t *journal,
return (ret == -EIO);
}
+static void journal_do_submit_data(struct buffer_head **wbuf, int bufs)
+{
+ int i;
+
+ for (i = 0; i < bufs; i++) {
+ wbuf[i]->b_end_io = end_buffer_write_sync;
+ /* We use-up our safety reference in submit_bh() */
+ submit_bh(WRITE, wbuf[i]);
+ }
+}
+
+/*
+ * Submit all the data buffers to disk
+ */
+static void journal_submit_data_buffers(journal_t *journal,
+ transaction_t *commit_transaction)
+{
+ struct journal_head *jh;
+ struct buffer_head *bh;
+ int locked;
+ int bufs = 0;
+ struct buffer_head **wbuf = journal->j_wbuf;
+
+ /*
+ * Whenever we unlock the journal and sleep, things can get added
+ * onto ->t_sync_datalist, so we have to keep looping back to
+ * write_out_data until we *know* that the list is empty.
+ *
+ * Cleanup any flushed data buffers from the data list. Even in
+ * abort mode, we want to flush this out as soon as possible.
+ */
+write_out_data:
+ cond_resched();
+ spin_lock(&journal->j_list_lock);
+
+ while (commit_transaction->t_sync_datalist) {
+ jh = commit_transaction->t_sync_datalist;
+ bh = jh2bh(jh);
+ locked = 0;
+
+ /* Get reference just to make sure buffer does not disappear
+ * when we are forced to drop various locks */
+ get_bh(bh);
+ /* If the buffer is dirty, we need to submit IO and hence
+ * we need the buffer lock. We try to lock the buffer without
+ * blocking. If we fail, we need to drop j_list_lock and do
+ * blocking lock_buffer().
+ */
+ if (buffer_dirty(bh)) {
+ if (test_set_buffer_locked(bh)) {
+ BUFFER_TRACE(bh, "needs blocking lock");
+ spin_unlock(&journal->j_list_lock);
+ /* Write out all data to prevent deadlocks */
+ journal_do_submit_data(wbuf, bufs);
+ bufs = 0;
+ lock_buffer(bh);
+ spin_lock(&journal->j_list_lock);
+ }
+ locked = 1;
+ }
+ /* We have to get bh_state lock. Again out of order, sigh. */
+ if (!inverted_lock(journal, bh)) {
+ jbd_lock_bh_state(bh);
+ spin_lock(&journal->j_list_lock);
+ }
+ /* Someone already cleaned up the buffer? */
+ if (!buffer_jbd(bh)
+ || jh->b_transaction != commit_transaction
+ || jh->b_jlist != BJ_SyncData) {
+ jbd_unlock_bh_state(bh);
+ if (locked)
+ unlock_buffer(bh);
+ BUFFER_TRACE(bh, "already cleaned up");
+ put_bh(bh);
+ continue;
+ }
+ if (locked && test_clear_buffer_dirty(bh)) {
+ BUFFER_TRACE(bh, "needs writeout, adding to array");
+ wbuf[bufs++] = bh;
+ __journal_file_buffer(jh, commit_transaction,
+ BJ_Locked);
+ jbd_unlock_bh_state(bh);
+ if (bufs == journal->j_wbufsize) {
+ spin_unlock(&journal->j_list_lock);
+ journal_do_submit_data(wbuf, bufs);
+ bufs = 0;
+ goto write_out_data;
+ }
+ }
+ else {
+ BUFFER_TRACE(bh, "writeout complete: unfile");
+ __journal_unfile_buffer(jh);
+ jbd_unlock_bh_state(bh);
+ if (locked)
+ unlock_buffer(bh);
+ journal_remove_journal_head(bh);
+ /* Once for our safety reference, once for
+ * journal_remove_journal_head() */
+ put_bh(bh);
+ put_bh(bh);
+ }
+
+ if (lock_need_resched(&journal->j_list_lock)) {
+ spin_unlock(&journal->j_list_lock);
+ goto write_out_data;
+ }
+ }
+ spin_unlock(&journal->j_list_lock);
+ journal_do_submit_data(wbuf, bufs);
+}
+
/*
* journal_commit_transaction
*
@@ -313,80 +424,13 @@ void journal_commit_transaction(journal_t *journal)
* Now start flushing things to disk, in the order they appear
* on the transaction lists. Data blocks go first.
*/
-
err = 0;
- /*
- * Whenever we unlock the journal and sleep, things can get added
- * onto ->t_sync_datalist, so we have to keep looping back to
- * write_out_data until we *know* that the list is empty.
- */
- bufs = 0;
- /*
- * Cleanup any flushed data buffers from the data list. Even in
- * abort mode, we want to flush this out as soon as possible.
- */
-write_out_data:
- cond_resched();
- spin_lock(&journal->j_list_lock);
-
- while (commit_transaction->t_sync_datalist) {
- struct buffer_head *bh;
-
- jh = commit_transaction->t_sync_datalist;
- commit_transaction->t_sync_datalist = jh->b_tnext;
- bh = jh2bh(jh);
- if (buffer_locked(bh)) {
- BUFFER_TRACE(bh, "locked");
- if (!inverted_lock(journal, bh))
- goto write_out_data;
- __journal_temp_unlink_buffer(jh);
- __journal_file_buffer(jh, commit_transaction,
- BJ_Locked);
- jbd_unlock_bh_state(bh);
- if (lock_need_resched(&journal->j_list_lock)) {
- spin_unlock(&journal->j_list_lock);
- goto write_out_data;
- }
- } else {
- if (buffer_dirty(bh)) {
- BUFFER_TRACE(bh, "start journal writeout");
- get_bh(bh);
- wbuf[bufs++] = bh;
- if (bufs == journal->j_wbufsize) {
- jbd_debug(2, "submit %d writes\n",
- bufs);
- spin_unlock(&journal->j_list_lock);
- ll_rw_block(SWRITE, bufs, wbuf);
- journal_brelse_array(wbuf, bufs);
- bufs = 0;
- goto write_out_data;
- }
- } else {
- BUFFER_TRACE(bh, "writeout complete: unfile");
- if (!inverted_lock(journal, bh))
- goto write_out_data;
- __journal_unfile_buffer(jh);
- jbd_unlock_bh_state(bh);
- journal_remove_journal_head(bh);
- put_bh(bh);
- if (lock_need_resched(&journal->j_list_lock)) {
- spin_unlock(&journal->j_list_lock);
- goto write_out_data;
- }
- }
- }
- }
-
- if (bufs) {
- spin_unlock(&journal->j_list_lock);
- ll_rw_block(SWRITE, bufs, wbuf);
- journal_brelse_array(wbuf, bufs);
- spin_lock(&journal->j_list_lock);
- }
+ journal_submit_data_buffers(journal, commit_transaction);
/*
* Wait for all previously submitted IO to complete.
*/
+ spin_lock(&journal->j_list_lock);
while (commit_transaction->t_locked_list) {
struct buffer_head *bh;
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index f66724ce443..2fc66c3e668 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -181,7 +181,7 @@ loop:
transaction->t_expires))
should_sleep = 0;
if (journal->j_flags & JFS_UNMOUNT)
- should_sleep = 0;
+ should_sleep = 0;
if (should_sleep) {
spin_unlock(&journal->j_state_lock);
schedule();
@@ -271,7 +271,7 @@ static void journal_kill_thread(journal_t *journal)
int journal_write_metadata_buffer(transaction_t *transaction,
struct journal_head *jh_in,
struct journal_head **jh_out,
- int blocknr)
+ unsigned long blocknr)
{
int need_copy_out = 0;
int done_copy_out = 0;
@@ -578,7 +578,7 @@ int journal_next_log_block(journal_t *journal, unsigned long *retp)
* this is a no-op. If needed, we can use j_blk_offset - everything is
* ready.
*/
-int journal_bmap(journal_t *journal, unsigned long blocknr,
+int journal_bmap(journal_t *journal, unsigned long blocknr,
unsigned long *retp)
{
int err = 0;
@@ -696,13 +696,13 @@ fail:
* @bdev: Block device on which to create the journal
* @fs_dev: Device which hold journalled filesystem for this journal.
* @start: Block nr Start of journal.
- * @len: Lenght of the journal in blocks.
+ * @len: Length of the journal in blocks.
* @blocksize: blocksize of journalling device
* @returns: a newly created journal_t *
- *
+ *
* journal_init_dev creates a journal which maps a fixed contiguous
* range of blocks on an arbitrary block device.
- *
+ *
*/
journal_t * journal_init_dev(struct block_device *bdev,
struct block_device *fs_dev,
@@ -739,11 +739,11 @@ journal_t * journal_init_dev(struct block_device *bdev,
return journal;
}
-
-/**
+
+/**
* journal_t * journal_init_inode () - creates a journal which maps to a inode.
* @inode: An inode to create the journal in
- *
+ *
* journal_init_inode creates a journal which maps an on-disk inode as
* the journal. The inode must exist already, must support bmap() and
* must have all data blocks preallocated.
@@ -763,7 +763,7 @@ journal_t * journal_init_inode (struct inode *inode)
journal->j_inode = inode;
jbd_debug(1,
"journal %p: inode %s/%ld, size %Ld, bits %d, blksize %ld\n",
- journal, inode->i_sb->s_id, inode->i_ino,
+ journal, inode->i_sb->s_id, inode->i_ino,
(long long) inode->i_size,
inode->i_sb->s_blocksize_bits, inode->i_sb->s_blocksize);
@@ -798,10 +798,10 @@ journal_t * journal_init_inode (struct inode *inode)
return journal;
}
-/*
+/*
* If the journal init or create aborts, we need to mark the journal
* superblock as being NULL to prevent the journal destroy from writing
- * back a bogus superblock.
+ * back a bogus superblock.
*/
static void journal_fail_superblock (journal_t *journal)
{
@@ -820,7 +820,7 @@ static void journal_fail_superblock (journal_t *journal)
static int journal_reset(journal_t *journal)
{
journal_superblock_t *sb = journal->j_superblock;
- unsigned int first, last;
+ unsigned long first, last;
first = be32_to_cpu(sb->s_first);
last = be32_to_cpu(sb->s_maxlen);
@@ -844,13 +844,13 @@ static int journal_reset(journal_t *journal)
return 0;
}
-/**
+/**
* int journal_create() - Initialise the new journal file
* @journal: Journal to create. This structure must have been initialised
- *
+ *
* Given a journal_t structure which tells us which disk blocks we can
* use, create a new journal superblock and initialise all of the
- * journal fields from scratch.
+ * journal fields from scratch.
**/
int journal_create(journal_t *journal)
{
@@ -915,7 +915,7 @@ int journal_create(journal_t *journal)
return journal_reset(journal);
}
-/**
+/**
* void journal_update_superblock() - Update journal sb on disk.
* @journal: The journal to update.
* @wait: Set to '0' if you don't want to wait for IO completion.
@@ -939,7 +939,7 @@ void journal_update_superblock(journal_t *journal, int wait)
journal->j_transaction_sequence) {
jbd_debug(1,"JBD: Skipping superblock update on recovered sb "
"(start %ld, seq %d, errno %d)\n",
- journal->j_tail, journal->j_tail_sequence,
+ journal->j_tail, journal->j_tail_sequence,
journal->j_errno);
goto out;
}
@@ -1062,7 +1062,7 @@ static int load_superblock(journal_t *journal)
/**
* int journal_load() - Read journal from disk.
* @journal: Journal to act on.
- *
+ *
* Given a journal_t structure which tells us which disk blocks contain
* a journal, read the journal from disk to initialise the in-memory
* structures.
@@ -1094,7 +1094,7 @@ int journal_load(journal_t *journal)
/*
* Create a slab for this blocksize
*/
- err = journal_create_jbd_slab(cpu_to_be32(sb->s_blocksize));
+ err = journal_create_jbd_slab(be32_to_cpu(sb->s_blocksize));
if (err)
return err;
@@ -1172,9 +1172,9 @@ void journal_destroy(journal_t *journal)
* @compat: bitmask of compatible features
* @ro: bitmask of features that force read-only mount
* @incompat: bitmask of incompatible features
- *
+ *
* Check whether the journal uses all of a given set of
- * features. Return true (non-zero) if it does.
+ * features. Return true (non-zero) if it does.
**/
int journal_check_used_features (journal_t *journal, unsigned long compat,
@@ -1203,7 +1203,7 @@ int journal_check_used_features (journal_t *journal, unsigned long compat,
* @compat: bitmask of compatible features
* @ro: bitmask of features that force read-only mount
* @incompat: bitmask of incompatible features
- *
+ *
* Check whether the journaling code supports the use of
* all of a given set of features on this journal. Return true
* (non-zero) if it can. */
@@ -1241,7 +1241,7 @@ int journal_check_available_features (journal_t *journal, unsigned long compat,
* @incompat: bitmask of incompatible features
*
* Mark a given journal feature as present on the
- * superblock. Returns true if the requested features could be set.
+ * superblock. Returns true if the requested features could be set.
*
*/
@@ -1327,7 +1327,7 @@ static int journal_convert_superblock_v1(journal_t *journal,
/**
* int journal_flush () - Flush journal
* @journal: Journal to act on.
- *
+ *
* Flush all data for a given journal to disk and empty the journal.
* Filesystems can use this when remounting readonly to ensure that
* recovery does not need to happen on remount.
@@ -1394,7 +1394,7 @@ int journal_flush(journal_t *journal)
* int journal_wipe() - Wipe journal contents
* @journal: Journal to act on.
* @write: flag (see below)
- *
+ *
* Wipe out all of the contents of a journal, safely. This will produce
* a warning if the journal contains any valid recovery information.
* Must be called between journal_init_*() and journal_load().
@@ -1449,7 +1449,7 @@ static const char *journal_dev_name(journal_t *journal, char *buffer)
/*
* Journal abort has very specific semantics, which we describe
- * for journal abort.
+ * for journal abort.
*
* Two internal function, which provide abort to te jbd layer
* itself are here.
@@ -1504,7 +1504,7 @@ static void __journal_abort_soft (journal_t *journal, int errno)
* Perform a complete, immediate shutdown of the ENTIRE
* journal (not of a single transaction). This operation cannot be
* undone without closing and reopening the journal.
- *
+ *
* The journal_abort function is intended to support higher level error
* recovery mechanisms such as the ext2/ext3 remount-readonly error
* mode.
@@ -1538,7 +1538,7 @@ static void __journal_abort_soft (journal_t *journal, int errno)
* supply an errno; a null errno implies that absolutely no further
* writes are done to the journal (unless there are any already in
* progress).
- *
+ *
*/
void journal_abort(journal_t *journal, int errno)
@@ -1546,7 +1546,7 @@ void journal_abort(journal_t *journal, int errno)
__journal_abort_soft(journal, errno);
}
-/**
+/**
* int journal_errno () - returns the journal's error state.
* @journal: journal to examine.
*
@@ -1570,7 +1570,7 @@ int journal_errno(journal_t *journal)
return err;
}
-/**
+/**
* int journal_clear_err () - clears the journal's error state
* @journal: journal to act on.
*
@@ -1590,7 +1590,7 @@ int journal_clear_err(journal_t *journal)
return err;
}
-/**
+/**
* void journal_ack_err() - Ack journal err.
* @journal: journal to act on.
*
@@ -1612,7 +1612,7 @@ int journal_blocks_per_page(struct inode *inode)
/*
* Simple support for retrying memory allocations. Introduced to help to
- * debug different VM deadlock avoidance strategies.
+ * debug different VM deadlock avoidance strategies.
*/
void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry)
{
@@ -2047,13 +2047,7 @@ static int __init journal_init(void)
{
int ret;
-/* Static check for data structure consistency. There's no code
- * invoked --- we'll just get a linker failure if things aren't right.
- */
- extern void journal_bad_superblock_size(void);
- if (sizeof(struct journal_superblock_s) != 1024)
- journal_bad_superblock_size();
-
+ BUILD_BUG_ON(sizeof(struct journal_superblock_s) != 1024);
ret = journal_init_caches();
if (ret != 0)
diff --git a/fs/jbd/recovery.c b/fs/jbd/recovery.c
index de5bafb4e85..445eed6ce5d 100644
--- a/fs/jbd/recovery.c
+++ b/fs/jbd/recovery.c
@@ -1,6 +1,6 @@
/*
* linux/fs/recovery.c
- *
+ *
* Written by Stephen C. Tweedie <sct@redhat.com>, 1999
*
* Copyright 1999-2000 Red Hat Software --- All Rights Reserved
@@ -10,7 +10,7 @@
* option, any later version, incorporated herein by reference.
*
* Journal recovery routines for the generic filesystem journaling code;
- * part of the ext2fs journaling system.
+ * part of the ext2fs journaling system.
*/
#ifndef __KERNEL__
@@ -25,9 +25,9 @@
/*
* Maintain information about the progress of the recovery job, so that
- * the different passes can carry information between them.
+ * the different passes can carry information between them.
*/
-struct recovery_info
+struct recovery_info
{
tid_t start_transaction;
tid_t end_transaction;
@@ -116,7 +116,7 @@ static int do_readahead(journal_t *journal, unsigned int start)
err = 0;
failed:
- if (nbufs)
+ if (nbufs)
journal_brelse_array(bufs, nbufs);
return err;
}
@@ -128,7 +128,7 @@ failed:
* Read a block from the journal
*/
-static int jread(struct buffer_head **bhp, journal_t *journal,
+static int jread(struct buffer_head **bhp, journal_t *journal,
unsigned int offset)
{
int err;
@@ -212,14 +212,14 @@ do { \
/**
* journal_recover - recovers a on-disk journal
* @journal: the journal to recover
- *
+ *
* The primary function for recovering the log contents when mounting a
- * journaled device.
+ * journaled device.
*
* Recovery is done in three passes. In the first pass, we look for the
* end of the log. In the second, we assemble the list of revoke
* blocks. In the third and final pass, we replay any un-revoked blocks
- * in the log.
+ * in the log.
*/
int journal_recover(journal_t *journal)
{
@@ -231,10 +231,10 @@ int journal_recover(journal_t *journal)
memset(&info, 0, sizeof(info));
sb = journal->j_superblock;
- /*
+ /*
* The journal superblock's s_start field (the current log head)
* is always zero if, and only if, the journal was cleanly
- * unmounted.
+ * unmounted.
*/
if (!sb->s_start) {
@@ -253,7 +253,7 @@ int journal_recover(journal_t *journal)
jbd_debug(0, "JBD: recovery, exit status %d, "
"recovered transactions %u to %u\n",
err, info.start_transaction, info.end_transaction);
- jbd_debug(0, "JBD: Replayed %d and revoked %d/%d blocks\n",
+ jbd_debug(0, "JBD: Replayed %d and revoked %d/%d blocks\n",
info.nr_replays, info.nr_revoke_hits, info.nr_revokes);
/* Restart the log at the next transaction ID, thus invalidating
@@ -268,15 +268,15 @@ int journal_recover(journal_t *journal)
/**
* journal_skip_recovery - Start journal and wipe exiting records
* @journal: journal to startup
- *
+ *
* Locate any valid recovery information from the journal and set up the
* journal structures in memory to ignore it (presumably because the
- * caller has evidence that it is out of date).
+ * caller has evidence that it is out of date).
* This function does'nt appear to be exorted..
*
* We perform one pass over the journal to allow us to tell the user how
* much recovery information is being erased, and to let us initialise
- * the journal transaction sequence numbers to the next unused ID.
+ * the journal transaction sequence numbers to the next unused ID.
*/
int journal_skip_recovery(journal_t *journal)
{
@@ -297,7 +297,7 @@ int journal_skip_recovery(journal_t *journal)
#ifdef CONFIG_JBD_DEBUG
int dropped = info.end_transaction - be32_to_cpu(sb->s_sequence);
#endif
- jbd_debug(0,
+ jbd_debug(0,
"JBD: ignoring %d transaction%s from the journal.\n",
dropped, (dropped == 1) ? "" : "s");
journal->j_transaction_sequence = ++info.end_transaction;
@@ -314,7 +314,7 @@ static int do_one_pass(journal_t *journal,
unsigned long next_log_block;
int err, success = 0;
journal_superblock_t * sb;
- journal_header_t * tmp;
+ journal_header_t * tmp;
struct buffer_head * bh;
unsigned int sequence;
int blocktype;
@@ -324,10 +324,10 @@ static int do_one_pass(journal_t *journal,
MAX_BLOCKS_PER_DESC = ((journal->j_blocksize-sizeof(journal_header_t))
/ sizeof(journal_block_tag_t));
- /*
+ /*
* First thing is to establish what we expect to find in the log
* (in terms of transaction IDs), and where (in terms of log
- * block offsets): query the superblock.
+ * block offsets): query the superblock.
*/
sb = journal->j_superblock;
@@ -344,7 +344,7 @@ static int do_one_pass(journal_t *journal,
* Now we walk through the log, transaction by transaction,
* making sure that each transaction has a commit block in the
* expected place. Each complete transaction gets replayed back
- * into the main filesystem.
+ * into the main filesystem.
*/
while (1) {
@@ -379,8 +379,8 @@ static int do_one_pass(journal_t *journal,
next_log_block++;
wrap(journal, next_log_block);
- /* What kind of buffer is it?
- *
+ /* What kind of buffer is it?
+ *
* If it is a descriptor block, check that it has the
* expected sequence number. Otherwise, we're all done
* here. */
@@ -394,7 +394,7 @@ static int do_one_pass(journal_t *journal,
blocktype = be32_to_cpu(tmp->h_blocktype);
sequence = be32_to_cpu(tmp->h_sequence);
- jbd_debug(3, "Found magic %d, sequence %d\n",
+ jbd_debug(3, "Found magic %d, sequence %d\n",
blocktype, sequence);
if (sequence != next_commit_ID) {
@@ -438,7 +438,7 @@ static int do_one_pass(journal_t *journal,
/* Recover what we can, but
* report failure at the end. */
success = err;
- printk (KERN_ERR
+ printk (KERN_ERR
"JBD: IO error %d recovering "
"block %ld in log\n",
err, io_block);
@@ -452,7 +452,7 @@ static int do_one_pass(journal_t *journal,
* revoked, then we're all done
* here. */
if (journal_test_revoke
- (journal, blocknr,
+ (journal, blocknr,
next_commit_ID)) {
brelse(obh);
++info->nr_revoke_hits;
@@ -465,7 +465,7 @@ static int do_one_pass(journal_t *journal,
blocknr,
journal->j_blocksize);
if (nbh == NULL) {
- printk(KERN_ERR
+ printk(KERN_ERR
"JBD: Out of memory "
"during recovery.\n");
err = -ENOMEM;
@@ -537,7 +537,7 @@ static int do_one_pass(journal_t *journal,
}
done:
- /*
+ /*
* We broke out of the log scan loop: either we came to the
* known end of the log or we found an unexpected block in the
* log. If the latter happened, then we know that the "current"
@@ -567,7 +567,7 @@ static int do_one_pass(journal_t *journal,
/* Scan a revoke record, marking all blocks mentioned as revoked. */
-static int scan_revoke_records(journal_t *journal, struct buffer_head *bh,
+static int scan_revoke_records(journal_t *journal, struct buffer_head *bh,
tid_t sequence, struct recovery_info *info)
{
journal_revoke_header_t *header;
diff --git a/fs/jbd/revoke.c b/fs/jbd/revoke.c
index a5614418346..c532429d8d9 100644
--- a/fs/jbd/revoke.c
+++ b/fs/jbd/revoke.c
@@ -1,6 +1,6 @@
/*
* linux/fs/revoke.c
- *
+ *
* Written by Stephen C. Tweedie <sct@redhat.com>, 2000
*
* Copyright 2000 Red Hat corp --- All Rights Reserved
@@ -15,10 +15,10 @@
* Revoke is the mechanism used to prevent old log records for deleted
* metadata from being replayed on top of newer data using the same
* blocks. The revoke mechanism is used in two separate places:
- *
+ *
* + Commit: during commit we write the entire list of the current
* transaction's revoked blocks to the journal
- *
+ *
* + Recovery: during recovery we record the transaction ID of all
* revoked blocks. If there are multiple revoke records in the log
* for a single block, only the last one counts, and if there is a log
@@ -29,7 +29,7 @@
* single transaction:
*
* Block is revoked and then journaled:
- * The desired end result is the journaling of the new block, so we
+ * The desired end result is the journaling of the new block, so we
* cancel the revoke before the transaction commits.
*
* Block is journaled and then revoked:
@@ -41,7 +41,7 @@
* transaction must have happened after the block was journaled and so
* the revoke must take precedence.
*
- * Block is revoked and then written as data:
+ * Block is revoked and then written as data:
* The data write is allowed to succeed, but the revoke is _not_
* cancelled. We still need to prevent old log records from
* overwriting the new data. We don't even need to clear the revoke
@@ -54,7 +54,7 @@
* buffer has not been revoked, and cancel_revoke
* need do nothing.
* RevokeValid set, Revoked set:
- * buffer has been revoked.
+ * buffer has been revoked.
*/
#ifndef __KERNEL__
@@ -77,7 +77,7 @@ static kmem_cache_t *revoke_table_cache;
journal replay, this involves recording the transaction ID of the
last transaction to revoke this block. */
-struct jbd_revoke_record_s
+struct jbd_revoke_record_s
{
struct list_head hash;
tid_t sequence; /* Used for recovery only */
@@ -90,8 +90,8 @@ struct jbd_revoke_table_s
{
/* It is conceivable that we might want a larger hash table
* for recovery. Must be a power of two. */
- int hash_size;
- int hash_shift;
+ int hash_size;
+ int hash_shift;
struct list_head *hash_table;
};
@@ -301,22 +301,22 @@ void journal_destroy_revoke(journal_t *journal)
#ifdef __KERNEL__
-/*
+/*
* journal_revoke: revoke a given buffer_head from the journal. This
* prevents the block from being replayed during recovery if we take a
* crash after this current transaction commits. Any subsequent
* metadata writes of the buffer in this transaction cancel the
- * revoke.
+ * revoke.
*
* Note that this call may block --- it is up to the caller to make
* sure that there are no further calls to journal_write_metadata
* before the revoke is complete. In ext3, this implies calling the
* revoke before clearing the block bitmap when we are deleting
- * metadata.
+ * metadata.
*
* Revoke performs a journal_forget on any buffer_head passed in as a
* parameter, but does _not_ forget the buffer_head if the bh was only
- * found implicitly.
+ * found implicitly.
*
* bh_in may not be a journalled buffer - it may have come off
* the hash tables without an attached journal_head.
@@ -325,7 +325,7 @@ void journal_destroy_revoke(journal_t *journal)
* by one.
*/
-int journal_revoke(handle_t *handle, unsigned long blocknr,
+int journal_revoke(handle_t *handle, unsigned long blocknr,
struct buffer_head *bh_in)
{
struct buffer_head *bh = NULL;
@@ -487,7 +487,7 @@ void journal_switch_revoke_table(journal_t *journal)
else
journal->j_revoke = journal->j_revoke_table[0];
- for (i = 0; i < journal->j_revoke->hash_size; i++)
+ for (i = 0; i < journal->j_revoke->hash_size; i++)
INIT_LIST_HEAD(&journal->j_revoke->hash_table[i]);
}
@@ -498,7 +498,7 @@ void journal_switch_revoke_table(journal_t *journal)
* Called with the journal lock held.
*/
-void journal_write_revoke_records(journal_t *journal,
+void journal_write_revoke_records(journal_t *journal,
transaction_t *transaction)
{
struct journal_head *descriptor;
@@ -507,7 +507,7 @@ void journal_write_revoke_records(journal_t *journal,
struct list_head *hash_list;
int i, offset, count;
- descriptor = NULL;
+ descriptor = NULL;
offset = 0;
count = 0;
@@ -519,10 +519,10 @@ void journal_write_revoke_records(journal_t *journal,
hash_list = &revoke->hash_table[i];
while (!list_empty(hash_list)) {
- record = (struct jbd_revoke_record_s *)
+ record = (struct jbd_revoke_record_s *)
hash_list->next;
write_one_revoke_record(journal, transaction,
- &descriptor, &offset,
+ &descriptor, &offset,
record);
count++;
list_del(&record->hash);
@@ -534,14 +534,14 @@ void journal_write_revoke_records(journal_t *journal,
jbd_debug(1, "Wrote %d revoke records\n", count);
}
-/*
+/*
* Write out one revoke record. We need to create a new descriptor
- * block if the old one is full or if we have not already created one.
+ * block if the old one is full or if we have not already created one.
*/
-static void write_one_revoke_record(journal_t *journal,
+static void write_one_revoke_record(journal_t *journal,
transaction_t *transaction,
- struct journal_head **descriptorp,
+ struct journal_head **descriptorp,
int *offsetp,
struct jbd_revoke_record_s *record)
{
@@ -584,21 +584,21 @@ static void write_one_revoke_record(journal_t *journal,
*descriptorp = descriptor;
}
- * ((__be32 *)(&jh2bh(descriptor)->b_data[offset])) =
+ * ((__be32 *)(&jh2bh(descriptor)->b_data[offset])) =
cpu_to_be32(record->blocknr);
offset += 4;
*offsetp = offset;
}
-/*
+/*
* Flush a revoke descriptor out to the journal. If we are aborting,
* this is a noop; otherwise we are generating a buffer which needs to
* be waited for during commit, so it has to go onto the appropriate
* journal buffer list.
*/
-static void flush_descriptor(journal_t *journal,
- struct journal_head *descriptor,
+static void flush_descriptor(journal_t *journal,
+ struct journal_head *descriptor,
int offset)
{
journal_revoke_header_t *header;
@@ -618,7 +618,7 @@ static void flush_descriptor(journal_t *journal,
}
#endif
-/*
+/*
* Revoke support for recovery.
*
* Recovery needs to be able to:
@@ -629,7 +629,7 @@ static void flush_descriptor(journal_t *journal,
* check whether a given block in a given transaction should be replayed
* (ie. has not been revoked by a revoke record in that or a subsequent
* transaction)
- *
+ *
* empty the revoke table after recovery.
*/
@@ -637,11 +637,11 @@ static void flush_descriptor(journal_t *journal,
* First, setting revoke records. We create a new revoke record for
* every block ever revoked in the log as we scan it for recovery, and
* we update the existing records if we find multiple revokes for a
- * single block.
+ * single block.
*/
-int journal_set_revoke(journal_t *journal,
- unsigned long blocknr,
+int journal_set_revoke(journal_t *journal,
+ unsigned long blocknr,
tid_t sequence)
{
struct jbd_revoke_record_s *record;
@@ -653,18 +653,18 @@ int journal_set_revoke(journal_t *journal,
if (tid_gt(sequence, record->sequence))
record->sequence = sequence;
return 0;
- }
+ }
return insert_revoke_hash(journal, blocknr, sequence);
}
-/*
+/*
* Test revoke records. For a given block referenced in the log, has
* that block been revoked? A revoke record with a given transaction
* sequence number revokes all blocks in that transaction and earlier
* ones, but later transactions still need replayed.
*/
-int journal_test_revoke(journal_t *journal,
+int journal_test_revoke(journal_t *journal,
unsigned long blocknr,
tid_t sequence)
{
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index f5169a96260..e1b3c8af4d1 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -1,6 +1,6 @@
/*
* linux/fs/transaction.c
- *
+ *
* Written by Stephen C. Tweedie <sct@redhat.com>, 1998
*
* Copyright 1998 Red Hat corp --- All Rights Reserved
@@ -10,7 +10,7 @@
* option, any later version, incorporated herein by reference.
*
* Generic filesystem transaction handling code; part of the ext2fs
- * journaling system.
+ * journaling system.
*
* This file manages transactions (compound commits managed by the
* journaling code) and handles (individual atomic operations by the
@@ -74,7 +74,7 @@ get_transaction(journal_t *journal, transaction_t *transaction)
* start_this_handle: Given a handle, deal with any locking or stalling
* needed to make sure that there is enough journal space for the handle
* to begin. Attach the handle to a transaction and set up the
- * transaction's buffer credits.
+ * transaction's buffer credits.
*/
static int start_this_handle(journal_t *journal, handle_t *handle)
@@ -117,7 +117,7 @@ repeat_locked:
if (is_journal_aborted(journal) ||
(journal->j_errno != 0 && !(journal->j_flags & JFS_ACK_ERR))) {
spin_unlock(&journal->j_state_lock);
- ret = -EROFS;
+ ret = -EROFS;
goto out;
}
@@ -182,7 +182,7 @@ repeat_locked:
goto repeat;
}
- /*
+ /*
* The commit code assumes that it can get enough log space
* without forcing a checkpoint. This is *critical* for
* correctness: a checkpoint of a buffer which is also
@@ -191,7 +191,7 @@ repeat_locked:
*
* We must therefore ensure the necessary space in the journal
* *before* starting to dirty potentially checkpointed buffers
- * in the new transaction.
+ * in the new transaction.
*
* The worst part is, any transaction currently committing can
* reduce the free space arbitrarily. Be careful to account for
@@ -246,13 +246,13 @@ static handle_t *new_handle(int nblocks)
}
/**
- * handle_t *journal_start() - Obtain a new handle.
+ * handle_t *journal_start() - Obtain a new handle.
* @journal: Journal to start transaction on.
* @nblocks: number of block buffer we might modify
*
* We make sure that the transaction can guarantee at least nblocks of
* modified buffers in the log. We block until the log can guarantee
- * that much space.
+ * that much space.
*
* This function is visible to journal users (like ext3fs), so is not
* called with the journal already locked.
@@ -292,11 +292,11 @@ handle_t *journal_start(journal_t *journal, int nblocks)
* int journal_extend() - extend buffer credits.
* @handle: handle to 'extend'
* @nblocks: nr blocks to try to extend by.
- *
+ *
* Some transactions, such as large extends and truncates, can be done
* atomically all at once or in several stages. The operation requests
* a credit for a number of buffer modications in advance, but can
- * extend its credit if it needs more.
+ * extend its credit if it needs more.
*
* journal_extend tries to give the running handle more buffer credits.
* It does not guarantee that allocation - this is a best-effort only.
@@ -363,7 +363,7 @@ out:
* int journal_restart() - restart a handle .
* @handle: handle to restart
* @nblocks: nr credits requested
- *
+ *
* Restart a handle for a multi-transaction filesystem
* operation.
*
@@ -462,7 +462,7 @@ void journal_lock_updates(journal_t *journal)
/**
* void journal_unlock_updates (journal_t* journal) - release barrier
* @journal: Journal to release the barrier on.
- *
+ *
* Release a transaction barrier obtained with journal_lock_updates().
*
* Should be called without the journal lock held.
@@ -547,8 +547,8 @@ repeat:
jbd_lock_bh_state(bh);
/* We now hold the buffer lock so it is safe to query the buffer
- * state. Is the buffer dirty?
- *
+ * state. Is the buffer dirty?
+ *
* If so, there are two possibilities. The buffer may be
* non-journaled, and undergoing a quite legitimate writeback.
* Otherwise, it is journaled, and we don't expect dirty buffers
@@ -566,7 +566,7 @@ repeat:
*/
if (jh->b_transaction) {
J_ASSERT_JH(jh,
- jh->b_transaction == transaction ||
+ jh->b_transaction == transaction ||
jh->b_transaction ==
journal->j_committing_transaction);
if (jh->b_next_transaction)
@@ -580,7 +580,7 @@ repeat:
*/
JBUFFER_TRACE(jh, "Unexpected dirty buffer");
jbd_unexpected_dirty_buffer(jh);
- }
+ }
unlock_buffer(bh);
@@ -653,7 +653,7 @@ repeat:
* buffer had better remain locked during the kmalloc,
* but that should be true --- we hold the journal lock
* still and the buffer is already on the BUF_JOURNAL
- * list so won't be flushed.
+ * list so won't be flushed.
*
* Subtle point, though: if this is a get_undo_access,
* then we will be relying on the frozen_data to contain
@@ -765,8 +765,8 @@ int journal_get_write_access(handle_t *handle, struct buffer_head *bh)
* manually rather than reading off disk), then we need to keep the
* buffer_head locked until it has been completely filled with new
* data. In this case, we should be able to make the assertion that
- * the bh is not already part of an existing transaction.
- *
+ * the bh is not already part of an existing transaction.
+ *
* The buffer should already be locked by the caller by this point.
* There is no lock ranking violation: it was a newly created,
* unlocked buffer beforehand. */
@@ -778,7 +778,7 @@ int journal_get_write_access(handle_t *handle, struct buffer_head *bh)
*
* Call this if you create a new bh.
*/
-int journal_get_create_access(handle_t *handle, struct buffer_head *bh)
+int journal_get_create_access(handle_t *handle, struct buffer_head *bh)
{
transaction_t *transaction = handle->h_transaction;
journal_t *journal = transaction->t_journal;
@@ -847,13 +847,13 @@ out:
* do not reuse freed space until the deallocation has been committed,
* since if we overwrote that space we would make the delete
* un-rewindable in case of a crash.
- *
+ *
* To deal with that, journal_get_undo_access requests write access to a
* buffer for parts of non-rewindable operations such as delete
* operations on the bitmaps. The journaling code must keep a copy of
* the buffer's contents prior to the undo_access call until such time
* as we know that the buffer has definitely been committed to disk.
- *
+ *
* We never need to know which transaction the committed data is part
* of, buffers touched here are guaranteed to be dirtied later and so
* will be committed to a new transaction in due course, at which point
@@ -911,13 +911,13 @@ out:
return err;
}
-/**
+/**
* int journal_dirty_data() - mark a buffer as containing dirty data which
* needs to be flushed before we can commit the
- * current transaction.
+ * current transaction.
* @handle: transaction
* @bh: bufferhead to mark
- *
+ *
* The buffer is placed on the transaction's data list and is marked as
* belonging to the transaction.
*
@@ -946,15 +946,15 @@ int journal_dirty_data(handle_t *handle, struct buffer_head *bh)
/*
* What if the buffer is already part of a running transaction?
- *
+ *
* There are two cases:
* 1) It is part of the current running transaction. Refile it,
* just in case we have allocated it as metadata, deallocated
- * it, then reallocated it as data.
+ * it, then reallocated it as data.
* 2) It is part of the previous, still-committing transaction.
* If all we want to do is to guarantee that the buffer will be
* written to disk before this new transaction commits, then
- * being sure that the *previous* transaction has this same
+ * being sure that the *previous* transaction has this same
* property is sufficient for us! Just leave it on its old
* transaction.
*
@@ -1076,18 +1076,18 @@ no_journal:
return 0;
}
-/**
+/**
* int journal_dirty_metadata() - mark a buffer as containing dirty metadata
* @handle: transaction to add buffer to.
- * @bh: buffer to mark
- *
+ * @bh: buffer to mark
+ *
* mark dirty metadata which needs to be journaled as part of the current
* transaction.
*
* The buffer is placed on the transaction's metadata list and is marked
- * as belonging to the transaction.
+ * as belonging to the transaction.
*
- * Returns error number or 0 on success.
+ * Returns error number or 0 on success.
*
* Special care needs to be taken if the buffer already belongs to the
* current committing transaction (in which case we should have frozen
@@ -1135,11 +1135,11 @@ int journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
set_buffer_jbddirty(bh);
- /*
+ /*
* Metadata already on the current transaction list doesn't
* need to be filed. Metadata on another transaction's list must
* be committing, and will be refiled once the commit completes:
- * leave it alone for now.
+ * leave it alone for now.
*/
if (jh->b_transaction != transaction) {
JBUFFER_TRACE(jh, "already on other transaction");
@@ -1165,7 +1165,7 @@ out:
return 0;
}
-/*
+/*
* journal_release_buffer: undo a get_write_access without any buffer
* updates, if the update decided in the end that it didn't need access.
*
@@ -1176,20 +1176,20 @@ journal_release_buffer(handle_t *handle, struct buffer_head *bh)
BUFFER_TRACE(bh, "entry");
}
-/**
+/**
* void journal_forget() - bforget() for potentially-journaled buffers.
* @handle: transaction handle
* @bh: bh to 'forget'
*
* We can only do the bforget if there are no commits pending against the
* buffer. If the buffer is dirty in the current running transaction we
- * can safely unlink it.
+ * can safely unlink it.
*
* bh may not be a journalled buffer at all - it may be a non-JBD
* buffer which came off the hashtable. Check for this.
*
* Decrements bh->b_count by one.
- *
+ *
* Allow this call even if the handle has aborted --- it may be part of
* the caller's cleanup after an abort.
*/
@@ -1237,7 +1237,7 @@ int journal_forget (handle_t *handle, struct buffer_head *bh)
drop_reserve = 1;
- /*
+ /*
* We are no longer going to journal this buffer.
* However, the commit of this transaction is still
* important to the buffer: the delete that we are now
@@ -1246,7 +1246,7 @@ int journal_forget (handle_t *handle, struct buffer_head *bh)
*
* So, if we have a checkpoint on the buffer, we should
* now refile the buffer on our BJ_Forget list so that
- * we know to remove the checkpoint after we commit.
+ * we know to remove the checkpoint after we commit.
*/
if (jh->b_cp_transaction) {
@@ -1264,7 +1264,7 @@ int journal_forget (handle_t *handle, struct buffer_head *bh)
}
}
} else if (jh->b_transaction) {
- J_ASSERT_JH(jh, (jh->b_transaction ==
+ J_ASSERT_JH(jh, (jh->b_transaction ==
journal->j_committing_transaction));
/* However, if the buffer is still owned by a prior
* (committing) transaction, we can't drop it yet... */
@@ -1294,7 +1294,7 @@ drop:
/**
* int journal_stop() - complete a transaction
* @handle: tranaction to complete.
- *
+ *
* All done for a particular handle.
*
* There is not much action needed here. We just return any remaining
@@ -1303,7 +1303,7 @@ drop:
* filesystem is marked for synchronous update.
*
* journal_stop itself will not usually return an error, but it may
- * do so in unusual circumstances. In particular, expect it to
+ * do so in unusual circumstances. In particular, expect it to
* return -EIO if a journal_abort has been executed since the
* transaction began.
*/
@@ -1373,7 +1373,7 @@ int journal_stop(handle_t *handle)
if (handle->h_sync ||
transaction->t_outstanding_credits >
journal->j_max_transaction_buffers ||
- time_after_eq(jiffies, transaction->t_expires)) {
+ time_after_eq(jiffies, transaction->t_expires)) {
/* Do this even for aborted journals: an abort still
* completes the commit thread, it just doesn't write
* anything to disk. */
@@ -1388,7 +1388,7 @@ int journal_stop(handle_t *handle)
/*
* Special case: JFS_SYNC synchronous updates require us
- * to wait for the commit to complete.
+ * to wait for the commit to complete.
*/
if (handle->h_sync && !(current->flags & PF_MEMALLOC))
err = log_wait_commit(journal, tid);
@@ -1439,7 +1439,7 @@ int journal_force_commit(journal_t *journal)
* jbd_lock_bh_state(jh2bh(jh)) is held.
*/
-static inline void
+static inline void
__blist_add_buffer(struct journal_head **list, struct journal_head *jh)
{
if (!*list) {
@@ -1454,7 +1454,7 @@ __blist_add_buffer(struct journal_head **list, struct journal_head *jh)
}
}
-/*
+/*
* Remove a buffer from a transaction list, given the transaction's list
* head pointer.
*
@@ -1475,7 +1475,7 @@ __blist_del_buffer(struct journal_head **list, struct journal_head *jh)
jh->b_tnext->b_tprev = jh->b_tprev;
}
-/*
+/*
* Remove a buffer from the appropriate transaction list.
*
* Note that this function can *change* the value of
@@ -1595,17 +1595,17 @@ out:
}
-/**
+/**
* int journal_try_to_free_buffers() - try to free page buffers.
* @journal: journal for operation
* @page: to try and free
* @unused_gfp_mask: unused
*
- *
+ *
* For all the buffers on this page,
* if they are fully written out ordered data, move them onto BUF_CLEAN
* so try_to_free_buffers() can reap them.
- *
+ *
* This function returns non-zero if we wish try_to_free_buffers()
* to be called. We do this if the page is releasable by try_to_free_buffers().
* We also do it if the page has locked or dirty buffers and the caller wants
@@ -1629,7 +1629,7 @@ out:
* cannot happen because we never reallocate freed data as metadata
* while the data is part of a transaction. Yes?
*/
-int journal_try_to_free_buffers(journal_t *journal,
+int journal_try_to_free_buffers(journal_t *journal,
struct page *page, gfp_t unused_gfp_mask)
{
struct buffer_head *head;
@@ -1697,7 +1697,7 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
}
/*
- * journal_invalidatepage
+ * journal_invalidatepage
*
* This code is tricky. It has a number of cases to deal with.
*
@@ -1705,15 +1705,15 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
*
* i_size must be updated on disk before we start calling invalidatepage on the
* data.
- *
+ *
* This is done in ext3 by defining an ext3_setattr method which
* updates i_size before truncate gets going. By maintaining this
* invariant, we can be sure that it is safe to throw away any buffers
* attached to the current transaction: once the transaction commits,
* we know that the data will not be needed.
- *
+ *
* Note however that we can *not* throw away data belonging to the
- * previous, committing transaction!
+ * previous, committing transaction!
*
* Any disk blocks which *are* part of the previous, committing
* transaction (and which therefore cannot be discarded immediately) are
@@ -1732,7 +1732,7 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
* don't make guarantees about the order in which data hits disk --- in
* particular we don't guarantee that new dirty data is flushed before
* transaction commit --- so it is always safe just to discard data
- * immediately in that mode. --sct
+ * immediately in that mode. --sct
*/
/*
@@ -1876,9 +1876,9 @@ zap_buffer_unlocked:
return may_free;
}
-/**
+/**
* void journal_invalidatepage()
- * @journal: journal to use for flush...
+ * @journal: journal to use for flush...
* @page: page to flush
* @offset: length of page to invalidate.
*
@@ -1886,7 +1886,7 @@ zap_buffer_unlocked:
*
*/
void journal_invalidatepage(journal_t *journal,
- struct page *page,
+ struct page *page,
unsigned long offset)
{
struct buffer_head *head, *bh, *next;
@@ -1908,7 +1908,7 @@ void journal_invalidatepage(journal_t *journal,
next = bh->b_this_page;
if (offset <= curr_off) {
- /* This block is wholly outside the truncation point */
+ /* This block is wholly outside the truncation point */
lock_buffer(bh);
may_free &= journal_unmap_buffer(journal, bh);
unlock_buffer(bh);
@@ -1924,8 +1924,8 @@ void journal_invalidatepage(journal_t *journal,
}
}
-/*
- * File a buffer on the given transaction list.
+/*
+ * File a buffer on the given transaction list.
*/
void __journal_file_buffer(struct journal_head *jh,
transaction_t *transaction, int jlist)
@@ -1948,7 +1948,7 @@ void __journal_file_buffer(struct journal_head *jh,
* with __jbd_unexpected_dirty_buffer()'s handling of dirty
* state. */
- if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
+ if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
jlist == BJ_Shadow || jlist == BJ_Forget) {
if (test_clear_buffer_dirty(bh) ||
test_clear_buffer_jbddirty(bh))
@@ -2008,7 +2008,7 @@ void journal_file_buffer(struct journal_head *jh,
jbd_unlock_bh_state(jh2bh(jh));
}
-/*
+/*
* Remove a buffer from its current buffer list in preparation for
* dropping it from its current transaction entirely. If the buffer has
* already started to be used by a subsequent transaction, refile the
@@ -2060,7 +2060,7 @@ void __journal_refile_buffer(struct journal_head *jh)
* to the caller to remove the journal_head if necessary. For the
* unlocked journal_refile_buffer call, the caller isn't going to be
* doing anything else to the buffer so we need to do the cleanup
- * ourselves to avoid a jh leak.
+ * ourselves to avoid a jh leak.
*
* *** The journal_head may be freed by this call! ***
*/
diff --git a/fs/jffs/inode-v23.c b/fs/jffs/inode-v23.c
index 93068697a9b..f5cf9c93e24 100644
--- a/fs/jffs/inode-v23.c
+++ b/fs/jffs/inode-v23.c
@@ -364,12 +364,11 @@ jffs_new_inode(const struct inode * dir, struct jffs_raw_inode *raw_inode,
inode->i_ctime.tv_nsec = 0;
inode->i_mtime.tv_nsec = 0;
inode->i_atime.tv_nsec = 0;
- inode->i_blksize = PAGE_SIZE;
inode->i_blocks = (inode->i_size + 511) >> 9;
f = jffs_find_file(c, raw_inode->ino);
- inode->u.generic_ip = (void *)f;
+ inode->i_private = (void *)f;
insert_inode_hash(inode);
return inode;
@@ -442,7 +441,7 @@ jffs_rename(struct inode *old_dir, struct dentry *old_dentry,
});
result = -ENOTDIR;
- if (!(old_dir_f = (struct jffs_file *)old_dir->u.generic_ip)) {
+ if (!(old_dir_f = old_dir->i_private)) {
D(printk("jffs_rename(): Old dir invalid.\n"));
goto jffs_rename_end;
}
@@ -456,7 +455,7 @@ jffs_rename(struct inode *old_dir, struct dentry *old_dentry,
/* Find the new directory. */
result = -ENOTDIR;
- if (!(new_dir_f = (struct jffs_file *)new_dir->u.generic_ip)) {
+ if (!(new_dir_f = new_dir->i_private)) {
D(printk("jffs_rename(): New dir invalid.\n"));
goto jffs_rename_end;
}
@@ -593,7 +592,7 @@ jffs_readdir(struct file *filp, void *dirent, filldir_t filldir)
}
else {
ddino = ((struct jffs_file *)
- inode->u.generic_ip)->pino;
+ inode->i_private)->pino;
}
D3(printk("jffs_readdir(): \"..\" %u\n", ddino));
if (filldir(dirent, "..", 2, filp->f_pos, ddino, DT_DIR) < 0) {
@@ -604,7 +603,7 @@ jffs_readdir(struct file *filp, void *dirent, filldir_t filldir)
}
filp->f_pos++;
}
- f = ((struct jffs_file *)inode->u.generic_ip)->children;
+ f = ((struct jffs_file *)inode->i_private)->children;
j = 2;
while(f && (f->deleted || j++ < filp->f_pos )) {
@@ -652,7 +651,7 @@ jffs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
lock_kernel();
D3({
- char *s = (char *)kmalloc(len + 1, GFP_KERNEL);
+ char *s = kmalloc(len + 1, GFP_KERNEL);
memcpy(s, name, len);
s[len] = '\0';
printk("jffs_lookup(): dir: 0x%p, name: \"%s\"\n", dir, s);
@@ -668,7 +667,7 @@ jffs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
}
r = -EACCES;
- if (!(d = (struct jffs_file *)dir->u.generic_ip)) {
+ if (!(d = (struct jffs_file *)dir->i_private)) {
D(printk("jffs_lookup(): No such inode! (%lu)\n",
dir->i_ino));
goto jffs_lookup_end;
@@ -739,7 +738,7 @@ jffs_do_readpage_nolock(struct file *file, struct page *page)
unsigned long read_len;
int result;
struct inode *inode = (struct inode*)page->mapping->host;
- struct jffs_file *f = (struct jffs_file *)inode->u.generic_ip;
+ struct jffs_file *f = (struct jffs_file *)inode->i_private;
struct jffs_control *c = (struct jffs_control *)inode->i_sb->s_fs_info;
int r;
loff_t offset;
@@ -828,7 +827,7 @@ jffs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
});
lock_kernel();
- dir_f = (struct jffs_file *)dir->u.generic_ip;
+ dir_f = dir->i_private;
ASSERT(if (!dir_f) {
printk(KERN_ERR "jffs_mkdir(): No reference to a "
@@ -972,7 +971,7 @@ jffs_remove(struct inode *dir, struct dentry *dentry, int type)
kfree(_name);
});
- dir_f = (struct jffs_file *) dir->u.generic_ip;
+ dir_f = dir->i_private;
c = dir_f->c;
result = -ENOENT;
@@ -1082,7 +1081,7 @@ jffs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
if (!old_valid_dev(rdev))
return -EINVAL;
lock_kernel();
- dir_f = (struct jffs_file *)dir->u.generic_ip;
+ dir_f = dir->i_private;
c = dir_f->c;
D3(printk (KERN_NOTICE "mknod(): down biglock\n"));
@@ -1173,8 +1172,8 @@ jffs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
lock_kernel();
D1({
int len = dentry->d_name.len;
- char *_name = (char *)kmalloc(len + 1, GFP_KERNEL);
- char *_symname = (char *)kmalloc(symname_len + 1, GFP_KERNEL);
+ char *_name = kmalloc(len + 1, GFP_KERNEL);
+ char *_symname = kmalloc(symname_len + 1, GFP_KERNEL);
memcpy(_name, dentry->d_name.name, len);
_name[len] = '\0';
memcpy(_symname, symname, symname_len);
@@ -1186,7 +1185,7 @@ jffs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
kfree(_symname);
});
- dir_f = (struct jffs_file *)dir->u.generic_ip;
+ dir_f = dir->i_private;
ASSERT(if (!dir_f) {
printk(KERN_ERR "jffs_symlink(): No reference to a "
"jffs_file struct in inode.\n");
@@ -1282,14 +1281,14 @@ jffs_create(struct inode *dir, struct dentry *dentry, int mode,
lock_kernel();
D1({
int len = dentry->d_name.len;
- char *s = (char *)kmalloc(len + 1, GFP_KERNEL);
+ char *s = kmalloc(len + 1, GFP_KERNEL);
memcpy(s, dentry->d_name.name, len);
s[len] = '\0';
printk("jffs_create(): dir: 0x%p, name: \"%s\"\n", dir, s);
kfree(s);
});
- dir_f = (struct jffs_file *)dir->u.generic_ip;
+ dir_f = dir->i_private;
ASSERT(if (!dir_f) {
printk(KERN_ERR "jffs_create(): No reference to a "
"jffs_file struct in inode.\n");
@@ -1403,9 +1402,9 @@ jffs_file_write(struct file *filp, const char *buf, size_t count,
goto out_isem;
}
- if (!(f = (struct jffs_file *)inode->u.generic_ip)) {
- D(printk("jffs_file_write(): inode->u.generic_ip = 0x%p\n",
- inode->u.generic_ip));
+ if (!(f = inode->i_private)) {
+ D(printk("jffs_file_write(): inode->i_private = 0x%p\n",
+ inode->i_private));
goto out_isem;
}
@@ -1693,7 +1692,7 @@ jffs_read_inode(struct inode *inode)
mutex_unlock(&c->fmc->biglock);
return;
}
- inode->u.generic_ip = (void *)f;
+ inode->i_private = f;
inode->i_mode = f->mode;
inode->i_nlink = f->nlink;
inode->i_uid = f->uid;
@@ -1706,7 +1705,6 @@ jffs_read_inode(struct inode *inode)
inode->i_mtime.tv_nsec =
inode->i_ctime.tv_nsec = 0;
- inode->i_blksize = PAGE_SIZE;
inode->i_blocks = (inode->i_size + 511) >> 9;
if (S_ISREG(inode->i_mode)) {
inode->i_op = &jffs_file_inode_operations;
@@ -1748,7 +1746,7 @@ jffs_delete_inode(struct inode *inode)
lock_kernel();
inode->i_size = 0;
inode->i_blocks = 0;
- inode->u.generic_ip = NULL;
+ inode->i_private = NULL;
clear_inode(inode);
if (inode->i_nlink == 0) {
c = (struct jffs_control *) inode->i_sb->s_fs_info;
diff --git a/fs/jffs/intrep.c b/fs/jffs/intrep.c
index 9000f1effed..4a543e11497 100644
--- a/fs/jffs/intrep.c
+++ b/fs/jffs/intrep.c
@@ -488,13 +488,11 @@ jffs_create_file(struct jffs_control *c,
{
struct jffs_file *f;
- if (!(f = (struct jffs_file *)kmalloc(sizeof(struct jffs_file),
- GFP_KERNEL))) {
+ if (!(f = kzalloc(sizeof(*f), GFP_KERNEL))) {
D(printk("jffs_create_file(): Failed!\n"));
return NULL;
}
no_jffs_file++;
- memset(f, 0, sizeof(struct jffs_file));
f->ino = raw_inode->ino;
f->pino = raw_inode->pino;
f->nlink = raw_inode->nlink;
@@ -516,7 +514,7 @@ jffs_create_control(struct super_block *sb)
D2(printk("jffs_create_control()\n"));
- if (!(c = (struct jffs_control *)kmalloc(s, GFP_KERNEL))) {
+ if (!(c = kmalloc(s, GFP_KERNEL))) {
goto fail_control;
}
DJM(no_jffs_control++);
@@ -524,7 +522,7 @@ jffs_create_control(struct super_block *sb)
c->gc_task = NULL;
c->hash_len = JFFS_HASH_SIZE;
s = sizeof(struct list_head) * c->hash_len;
- if (!(c->hash = (struct list_head *)kmalloc(s, GFP_KERNEL))) {
+ if (!(c->hash = kmalloc(s, GFP_KERNEL))) {
goto fail_hash;
}
DJM(no_hash++);
@@ -593,8 +591,7 @@ jffs_add_virtual_root(struct jffs_control *c)
D2(printk("jffs_add_virtual_root(): "
"Creating a virtual root directory.\n"));
- if (!(root = (struct jffs_file *)kmalloc(sizeof(struct jffs_file),
- GFP_KERNEL))) {
+ if (!(root = kmalloc(sizeof(struct jffs_file), GFP_KERNEL))) {
return -ENOMEM;
}
no_jffs_file++;
diff --git a/fs/jffs/jffs_fm.c b/fs/jffs/jffs_fm.c
index 7d8ca1aeace..29b68d939bd 100644
--- a/fs/jffs/jffs_fm.c
+++ b/fs/jffs/jffs_fm.c
@@ -94,8 +94,7 @@ jffs_build_begin(struct jffs_control *c, int unit)
struct mtd_info *mtd;
D3(printk("jffs_build_begin()\n"));
- fmc = (struct jffs_fmcontrol *)kmalloc(sizeof(struct jffs_fmcontrol),
- GFP_KERNEL);
+ fmc = kmalloc(sizeof(*fmc), GFP_KERNEL);
if (!fmc) {
D(printk("jffs_build_begin(): Allocation of "
"struct jffs_fmcontrol failed!\n"));
@@ -486,8 +485,7 @@ jffs_add_node(struct jffs_node *node)
D3(printk("jffs_add_node(): ino = %u\n", node->ino));
- ref = (struct jffs_node_ref *)kmalloc(sizeof(struct jffs_node_ref),
- GFP_KERNEL);
+ ref = kmalloc(sizeof(*ref), GFP_KERNEL);
if (!ref)
return -ENOMEM;
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 4780f82825d..72d9909d95f 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -263,7 +263,6 @@ void jffs2_read_inode (struct inode *inode)
inode->i_nlink = f->inocache->nlink;
- inode->i_blksize = PAGE_SIZE;
inode->i_blocks = (inode->i_size + 511) >> 9;
switch (inode->i_mode & S_IFMT) {
@@ -449,7 +448,6 @@ struct inode *jffs2_new_inode (struct inode *dir_i, int mode, struct jffs2_raw_i
inode->i_atime = inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode->i_mtime));
- inode->i_blksize = PAGE_SIZE;
inode->i_blocks = 0;
inode->i_size = 0;
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 68e3953419b..6de374513c0 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -119,10 +119,9 @@ static int jffs2_get_sb_mtd(struct file_system_type *fs_type,
struct jffs2_sb_info *c;
int ret;
- c = kmalloc(sizeof(*c), GFP_KERNEL);
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return -ENOMEM;
- memset(c, 0, sizeof(*c));
c->mtd = mtd;
sb = sget(fs_type, jffs2_sb_compare, jffs2_sb_set, c);
diff --git a/fs/jfs/jfs_extent.c b/fs/jfs/jfs_extent.c
index 4d52593a5fc..4c74f0944f7 100644
--- a/fs/jfs/jfs_extent.c
+++ b/fs/jfs/jfs_extent.c
@@ -468,7 +468,7 @@ int extRecord(struct inode *ip, xad_t * xp)
int extFill(struct inode *ip, xad_t * xp)
{
int rc, nbperpage = JFS_SBI(ip->i_sb)->nbperpage;
- s64 blkno = offsetXAD(xp) >> ip->i_blksize;
+ s64 blkno = offsetXAD(xp) >> ip->i_blkbits;
// assert(ISSPARSE(ip));
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
index ccbe60aff83..369d7f39c04 100644
--- a/fs/jfs/jfs_imap.c
+++ b/fs/jfs/jfs_imap.c
@@ -3115,7 +3115,6 @@ static int copy_from_dinode(struct dinode * dip, struct inode *ip)
ip->i_mtime.tv_nsec = le32_to_cpu(dip->di_mtime.tv_nsec);
ip->i_ctime.tv_sec = le32_to_cpu(dip->di_ctime.tv_sec);
ip->i_ctime.tv_nsec = le32_to_cpu(dip->di_ctime.tv_nsec);
- ip->i_blksize = ip->i_sb->s_blocksize;
ip->i_blocks = LBLK2PBLK(ip->i_sb, le64_to_cpu(dip->di_nblocks));
ip->i_generation = le32_to_cpu(dip->di_gen);
diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c
index 495df402916..bffaca9ae3a 100644
--- a/fs/jfs/jfs_inode.c
+++ b/fs/jfs/jfs_inode.c
@@ -115,7 +115,6 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
}
jfs_inode->mode2 |= mode;
- inode->i_blksize = sb->s_blocksize;
inode->i_blocks = 0;
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
jfs_inode->otime = inode->i_ctime.tv_sec;
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index e1e0a6e6ebd..f5afc129d6b 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -257,7 +257,7 @@ static sector_t metapage_get_blocks(struct inode *inode, sector_t lblock,
int rc = 0;
int xflag;
s64 xaddr;
- sector_t file_blocks = (inode->i_size + inode->i_blksize - 1) >>
+ sector_t file_blocks = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
inode->i_blkbits;
if (lblock >= file_blocks)
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index efbb586bed4..3856efc399c 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -282,7 +282,7 @@ int txInit(void)
TxLockVHWM = (nTxLock * 8) / 10;
size = sizeof(struct tblock) * nTxBlock;
- TxBlock = (struct tblock *) vmalloc(size);
+ TxBlock = vmalloc(size);
if (TxBlock == NULL)
return -ENOMEM;
@@ -307,7 +307,7 @@ int txInit(void)
* tlock id = 0 is reserved.
*/
size = sizeof(struct tlock) * nTxLock;
- TxLock = (struct tlock *) vmalloc(size);
+ TxLock = vmalloc(size);
if (TxLock == NULL) {
vfree(TxBlock);
return -ENOMEM;
diff --git a/fs/libfs.c b/fs/libfs.c
index ac02ea602c3..8db5afb7b0a 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -383,7 +383,6 @@ int simple_fill_super(struct super_block *s, int magic, struct tree_descr *files
return -ENOMEM;
inode->i_mode = S_IFDIR | 0755;
inode->i_uid = inode->i_gid = 0;
- inode->i_blksize = PAGE_CACHE_SIZE;
inode->i_blocks = 0;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
inode->i_op = &simple_dir_inode_operations;
@@ -405,7 +404,6 @@ int simple_fill_super(struct super_block *s, int magic, struct tree_descr *files
goto out;
inode->i_mode = S_IFREG | files->mode;
inode->i_uid = inode->i_gid = 0;
- inode->i_blksize = PAGE_CACHE_SIZE;
inode->i_blocks = 0;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
inode->i_fop = files->ops;
@@ -547,7 +545,7 @@ int simple_attr_open(struct inode *inode, struct file *file,
attr->get = get;
attr->set = set;
- attr->data = inode->u.generic_ip;
+ attr->data = inode->i_private;
attr->fmt = fmt;
mutex_init(&attr->mutex);
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
index 52774feab93..f95cc3f3c42 100644
--- a/fs/lockd/clntlock.c
+++ b/fs/lockd/clntlock.c
@@ -160,7 +160,7 @@ static void nlmclnt_prepare_reclaim(struct nlm_host *host)
*/
list_splice_init(&host->h_granted, &host->h_reclaim);
- dprintk("NLM: reclaiming locks for host %s", host->h_name);
+ dprintk("NLM: reclaiming locks for host %s\n", host->h_name);
}
static void nlmclnt_finish_reclaim(struct nlm_host *host)
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 50dbb67ae0c..271e2165fff 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -100,7 +100,7 @@ static struct nlm_lockowner *nlm_find_lockowner(struct nlm_host *host, fl_owner_
res = __nlm_find_lockowner(host, owner);
if (res == NULL) {
spin_unlock(&host->h_lock);
- new = (struct nlm_lockowner *)kmalloc(sizeof(*new), GFP_KERNEL);
+ new = kmalloc(sizeof(*new), GFP_KERNEL);
spin_lock(&host->h_lock);
res = __nlm_find_lockowner(host, owner);
if (res == NULL && new != NULL) {
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index 703fb038c81..a0d0b58ce7a 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -99,9 +99,9 @@ nlm_lookup_host(int server, struct sockaddr_in *sin,
/* Ooops, no host found, create it */
dprintk("lockd: creating host entry\n");
- if (!(host = (struct nlm_host *) kmalloc(sizeof(*host), GFP_KERNEL)))
+ host = kzalloc(sizeof(*host), GFP_KERNEL);
+ if (!host)
goto nohost;
- memset(host, 0, sizeof(*host));
addr = sin->sin_addr.s_addr;
sprintf(host->h_name, "%u.%u.%u.%u", NIPQUAD(addr));
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
index 01b4db9e546..a92dd98f840 100644
--- a/fs/lockd/svcsubs.c
+++ b/fs/lockd/svcsubs.c
@@ -100,11 +100,10 @@ nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result,
nlm_debug_print_fh("creating file for", f);
nfserr = nlm_lck_denied_nolocks;
- file = (struct nlm_file *) kmalloc(sizeof(*file), GFP_KERNEL);
+ file = kzalloc(sizeof(*file), GFP_KERNEL);
if (!file)
goto out_unlock;
- memset(file, 0, sizeof(*file));
memcpy(&file->f_handle, f, sizeof(struct nfs_fh));
file->f_hash = hash;
init_MUTEX(&file->f_sema);
diff --git a/fs/minix/bitmap.c b/fs/minix/bitmap.c
index 4a6abc49418..df6b1075b54 100644
--- a/fs/minix/bitmap.c
+++ b/fs/minix/bitmap.c
@@ -254,7 +254,7 @@ struct inode * minix_new_inode(const struct inode * dir, int * error)
inode->i_gid = (dir->i_mode & S_ISGID) ? dir->i_gid : current->fsgid;
inode->i_ino = j;
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
- inode->i_blocks = inode->i_blksize = 0;
+ inode->i_blocks = 0;
memset(&minix_i(inode)->u, 0, sizeof(minix_i(inode)->u));
insert_inode_hash(inode);
mark_inode_dirty(inode);
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index 330ff9fc7cf..c11a4b9fb86 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -90,8 +90,7 @@ static int init_inodecache(void)
static void destroy_inodecache(void)
{
- if (kmem_cache_destroy(minix_inode_cachep))
- printk(KERN_INFO "minix_inode_cache: not all structures were freed\n");
+ kmem_cache_destroy(minix_inode_cachep);
}
static struct super_operations minix_sops = {
@@ -145,11 +144,10 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
struct inode *root_inode;
struct minix_sb_info *sbi;
- sbi = kmalloc(sizeof(struct minix_sb_info), GFP_KERNEL);
+ sbi = kzalloc(sizeof(struct minix_sb_info), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
s->s_fs_info = sbi;
- memset(sbi, 0, sizeof(struct minix_sb_info));
/* N.B. These should be compile-time tests.
Unfortunately that is impossible. */
@@ -207,10 +205,9 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0)
goto out_illegal_sb;
i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh);
- map = kmalloc(i, GFP_KERNEL);
+ map = kzalloc(i, GFP_KERNEL);
if (!map)
goto out_no_map;
- memset(map, 0, i);
sbi->s_imap = &map[0];
sbi->s_zmap = &map[sbi->s_imap_blocks];
@@ -399,7 +396,7 @@ static void V1_minix_read_inode(struct inode * inode)
inode->i_mtime.tv_nsec = 0;
inode->i_atime.tv_nsec = 0;
inode->i_ctime.tv_nsec = 0;
- inode->i_blocks = inode->i_blksize = 0;
+ inode->i_blocks = 0;
for (i = 0; i < 9; i++)
minix_inode->u.i1_data[i] = raw_inode->i_zone[i];
minix_set_inode(inode, old_decode_dev(raw_inode->i_zone[0]));
@@ -432,7 +429,7 @@ static void V2_minix_read_inode(struct inode * inode)
inode->i_mtime.tv_nsec = 0;
inode->i_atime.tv_nsec = 0;
inode->i_ctime.tv_nsec = 0;
- inode->i_blocks = inode->i_blksize = 0;
+ inode->i_blocks = 0;
for (i = 0; i < 10; i++)
minix_inode->u.i2_data[i] = raw_inode->i_zone[i];
minix_set_inode(inode, old_decode_dev(raw_inode->i_zone[0]));
diff --git a/fs/namei.c b/fs/namei.c
index 6b591c01b09..808e4ea2bb9 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -372,6 +372,30 @@ void release_open_intent(struct nameidata *nd)
fput(nd->intent.open.file);
}
+static inline struct dentry *
+do_revalidate(struct dentry *dentry, struct nameidata *nd)
+{
+ int status = dentry->d_op->d_revalidate(dentry, nd);
+ if (unlikely(status <= 0)) {
+ /*
+ * The dentry failed validation.
+ * If d_revalidate returned 0 attempt to invalidate
+ * the dentry otherwise d_revalidate is asking us
+ * to return a fail status.
+ */
+ if (!status) {
+ if (!d_invalidate(dentry)) {
+ dput(dentry);
+ dentry = NULL;
+ }
+ } else {
+ dput(dentry);
+ dentry = ERR_PTR(status);
+ }
+ }
+ return dentry;
+}
+
/*
* Internal lookup() using the new generic dcache.
* SMP-safe
@@ -386,12 +410,9 @@ static struct dentry * cached_lookup(struct dentry * parent, struct qstr * name,
if (!dentry)
dentry = d_lookup(parent, name);
- if (dentry && dentry->d_op && dentry->d_op->d_revalidate) {
- if (!dentry->d_op->d_revalidate(dentry, nd) && !d_invalidate(dentry)) {
- dput(dentry);
- dentry = NULL;
- }
- }
+ if (dentry && dentry->d_op && dentry->d_op->d_revalidate)
+ dentry = do_revalidate(dentry, nd);
+
return dentry;
}
@@ -484,10 +505,9 @@ static struct dentry * real_lookup(struct dentry * parent, struct qstr * name, s
*/
mutex_unlock(&dir->i_mutex);
if (result->d_op && result->d_op->d_revalidate) {
- if (!result->d_op->d_revalidate(result, nd) && !d_invalidate(result)) {
- dput(result);
+ result = do_revalidate(result, nd);
+ if (!result)
result = ERR_PTR(-ENOENT);
- }
}
return result;
}
@@ -767,12 +787,12 @@ need_lookup:
goto done;
need_revalidate:
- if (dentry->d_op->d_revalidate(dentry, nd))
- goto done;
- if (d_invalidate(dentry))
- goto done;
- dput(dentry);
- goto need_lookup;
+ dentry = do_revalidate(dentry, nd);
+ if (!dentry)
+ goto need_lookup;
+ if (IS_ERR(dentry))
+ goto fail;
+ goto done;
fail:
return PTR_ERR(dentry);
diff --git a/fs/namespace.c b/fs/namespace.c
index fa7ed6a9fc2..36d18085813 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -17,6 +17,7 @@
#include <linux/acct.h>
#include <linux/capability.h>
#include <linux/module.h>
+#include <linux/sysfs.h>
#include <linux/seq_file.h>
#include <linux/namespace.h>
#include <linux/namei.h>
@@ -28,15 +29,6 @@
extern int __init init_rootfs(void);
-#ifdef CONFIG_SYSFS
-extern int __init sysfs_init(void);
-#else
-static inline int sysfs_init(void)
-{
- return 0;
-}
-#endif
-
/* spinlock for vfsmount related operations, inplace of dcache_lock */
__cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 1ddf77b0b82..42e3bef270c 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -81,8 +81,7 @@ static int init_inodecache(void)
static void destroy_inodecache(void)
{
- if (kmem_cache_destroy(ncp_inode_cachep))
- printk(KERN_INFO "ncp_inode_cache: not all structures were freed\n");
+ kmem_cache_destroy(ncp_inode_cachep);
}
static int ncp_remount(struct super_block *sb, int *flags, char* data)
@@ -224,7 +223,6 @@ static void ncp_set_attr(struct inode *inode, struct ncp_entry_info *nwinfo)
inode->i_nlink = 1;
inode->i_uid = server->m.uid;
inode->i_gid = server->m.gid;
- inode->i_blksize = NCP_BLOCK_SIZE;
ncp_update_dates(inode, &nwinfo->i);
ncp_update_inode(inode, nwinfo);
@@ -411,11 +409,10 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
#endif
struct ncp_entry_info finfo;
- server = kmalloc(sizeof(struct ncp_server), GFP_KERNEL);
+ server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
if (!server)
return -ENOMEM;
sb->s_fs_info = server;
- memset(server, 0, sizeof(struct ncp_server));
error = -EFAULT;
if (raw_data == NULL)
diff --git a/fs/ncpfs/symlink.c b/fs/ncpfs/symlink.c
index ca92c240663..e3d26c1bd10 100644
--- a/fs/ncpfs/symlink.c
+++ b/fs/ncpfs/symlink.c
@@ -48,7 +48,7 @@ static int ncp_symlink_readpage(struct file *file, struct page *page)
char *buf = kmap(page);
error = -ENOMEM;
- rawlink=(char *)kmalloc(NCP_MAX_SYMLINK_SIZE, GFP_KERNEL);
+ rawlink = kmalloc(NCP_MAX_SYMLINK_SIZE, GFP_KERNEL);
if (!rawlink)
goto fail;
@@ -126,7 +126,7 @@ int ncp_symlink(struct inode *dir, struct dentry *dentry, const char *symname) {
/* EPERM is returned by VFS if symlink procedure does not exist */
return -EPERM;
- rawlink=(char *)kmalloc(NCP_MAX_SYMLINK_SIZE, GFP_KERNEL);
+ rawlink = kmalloc(NCP_MAX_SYMLINK_SIZE, GFP_KERNEL);
if (!rawlink)
return -ENOMEM;
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 57133678db1..841c99a9b11 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -20,11 +20,6 @@
#include "delegation.h"
#include "internal.h"
-static struct nfs_delegation *nfs_alloc_delegation(void)
-{
- return (struct nfs_delegation *)kmalloc(sizeof(struct nfs_delegation), GFP_KERNEL);
-}
-
static void nfs_free_delegation(struct nfs_delegation *delegation)
{
if (delegation->cred)
@@ -124,7 +119,7 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
if ((nfsi->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_ATTR)))
__nfs_revalidate_inode(NFS_SERVER(inode), inode);
- delegation = nfs_alloc_delegation();
+ delegation = kmalloc(sizeof(*delegation), GFP_KERNEL);
if (delegation == NULL)
return -ENOMEM;
memcpy(delegation->stateid.data, res->delegation.data,
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 76ca1cbc38f..377839bed17 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -855,6 +855,5 @@ int __init nfs_init_directcache(void)
*/
void nfs_destroy_directcache(void)
{
- if (kmem_cache_destroy(nfs_direct_cachep))
- printk(KERN_INFO "nfs_direct_cache: not all structures were freed\n");
+ kmem_cache_destroy(nfs_direct_cachep);
}
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index e8c143d182c..bc9376ca86c 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -277,10 +277,8 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
* report the blocks in 512byte units
*/
inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used);
- inode->i_blksize = inode->i_sb->s_blocksize;
} else {
inode->i_blocks = fattr->du.nfs2.blocks;
- inode->i_blksize = fattr->du.nfs2.blocksize;
}
nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
nfsi->attrtimeo_timestamp = jiffies;
@@ -443,7 +441,7 @@ static struct nfs_open_context *alloc_nfs_open_context(struct vfsmount *mnt, str
{
struct nfs_open_context *ctx;
- ctx = (struct nfs_open_context *)kmalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
if (ctx != NULL) {
atomic_set(&ctx->count, 1);
ctx->dentry = dget(dentry);
@@ -969,10 +967,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
* report the blocks in 512byte units
*/
inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used);
- inode->i_blksize = inode->i_sb->s_blocksize;
} else {
inode->i_blocks = fattr->du.nfs2.blocks;
- inode->i_blksize = fattr->du.nfs2.blocksize;
}
if ((fattr->valid & NFS_ATTR_FATTR_V4) != 0 &&
@@ -1134,8 +1130,7 @@ static int __init nfs_init_inodecache(void)
static void nfs_destroy_inodecache(void)
{
- if (kmem_cache_destroy(nfs_inode_cachep))
- printk(KERN_INFO "nfs_inode_cache: not all structures were freed\n");
+ kmem_cache_destroy(nfs_inode_cachep);
}
/*
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index 77b00684894..60408646176 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -26,6 +26,11 @@ LIST_HEAD(nfs_automount_list);
static DECLARE_WORK(nfs_automount_task, nfs_expire_automounts, &nfs_automount_list);
int nfs_mountpoint_expiry_timeout = 500 * HZ;
+static struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent,
+ const struct dentry *dentry,
+ struct nfs_fh *fh,
+ struct nfs_fattr *fattr);
+
/*
* nfs_path - reconstruct the path given an arbitrary dentry
* @base - arbitrary string to prepend to the path
@@ -209,9 +214,10 @@ static struct vfsmount *nfs_do_clone_mount(struct nfs_server *server,
* @fattr - attributes for new root inode
*
*/
-struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent,
- const struct dentry *dentry, struct nfs_fh *fh,
- struct nfs_fattr *fattr)
+static struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent,
+ const struct dentry *dentry,
+ struct nfs_fh *fh,
+ struct nfs_fattr *fattr)
{
struct nfs_clone_mount mountdata = {
.sb = mnt_parent->mnt_sb,
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index f8688eaa000..3b234d4601e 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -449,7 +449,7 @@ nfs3_proc_unlink_setup(struct rpc_message *msg, struct dentry *dir, struct qstr
struct nfs_fattr res;
} *ptr;
- ptr = (struct unlinkxdr *)kmalloc(sizeof(*ptr), GFP_KERNEL);
+ ptr = kmalloc(sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return -ENOMEM;
ptr->arg.fh = NFS_FH(dir->d_inode);
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 36e902a88ca..829af323f28 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -392,7 +392,6 @@ int __init nfs_init_nfspagecache(void)
void nfs_destroy_nfspagecache(void)
{
- if (kmem_cache_destroy(nfs_page_cachep))
- printk(KERN_INFO "nfs_page: not all structures were freed\n");
+ kmem_cache_destroy(nfs_page_cachep);
}
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index 630e50647bb..4529cc4f3f8 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -352,7 +352,7 @@ nfs_proc_unlink_setup(struct rpc_message *msg, struct dentry *dir, struct qstr *
{
struct nfs_diropargs *arg;
- arg = (struct nfs_diropargs *)kmalloc(sizeof(*arg), GFP_KERNEL);
+ arg = kmalloc(sizeof(*arg), GFP_KERNEL);
if (!arg)
return -ENOMEM;
arg->fh = NFS_FH(dir->d_inode);
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 69f1549da2b..c2e49c397a2 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -737,6 +737,5 @@ int __init nfs_init_readpagecache(void)
void nfs_destroy_readpagecache(void)
{
mempool_destroy(nfs_rdata_mempool);
- if (kmem_cache_destroy(nfs_rdata_cachep))
- printk(KERN_INFO "nfs_read_data: not all structures were freed\n");
+ kmem_cache_destroy(nfs_rdata_cachep);
}
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index c12effb46fe..b674462793d 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1565,7 +1565,6 @@ void nfs_destroy_writepagecache(void)
{
mempool_destroy(nfs_commit_mempool);
mempool_destroy(nfs_wdata_mempool);
- if (kmem_cache_destroy(nfs_wdata_cachep))
- printk(KERN_INFO "nfs_write_data: not all structures were freed\n");
+ kmem_cache_destroy(nfs_wdata_cachep);
}
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index bea6b947811..b1902ebaab4 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -573,10 +573,9 @@ idmap_lookup(struct svc_rqst *rqstp,
struct idmap_defer_req *mdr;
int ret;
- mdr = kmalloc(sizeof(*mdr), GFP_KERNEL);
+ mdr = kzalloc(sizeof(*mdr), GFP_KERNEL);
if (!mdr)
return -ENOMEM;
- memset(mdr, 0, sizeof(*mdr));
atomic_set(&mdr->count, 1);
init_waitqueue_head(&mdr->waitq);
mdr->req.defer = idmap_defer;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 9daa0b9feb8..ebcf226a9e4 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -339,8 +339,7 @@ alloc_client(struct xdr_netobj name)
{
struct nfs4_client *clp;
- if ((clp = kmalloc(sizeof(struct nfs4_client), GFP_KERNEL))!= NULL) {
- memset(clp, 0, sizeof(*clp));
+ if ((clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL))!= NULL) {
if ((clp->cl_name.data = kmalloc(name.len, GFP_KERNEL)) != NULL) {
memcpy(clp->cl_name.data, name.data, name.len);
clp->cl_name.len = name.len;
@@ -1006,13 +1005,10 @@ alloc_init_file(struct inode *ino)
static void
nfsd4_free_slab(kmem_cache_t **slab)
{
- int status;
-
if (*slab == NULL)
return;
- status = kmem_cache_destroy(*slab);
+ kmem_cache_destroy(*slab);
*slab = NULL;
- WARN_ON(status);
}
static void
diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
index d1e2c6f9f05..85c36b8ca45 100644
--- a/fs/ntfs/dir.c
+++ b/fs/ntfs/dir.c
@@ -1149,8 +1149,7 @@ static int ntfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
* Allocate a buffer to store the current name being processed
* converted to format determined by current NLS.
*/
- name = (u8*)kmalloc(NTFS_MAX_NAME_LEN * NLS_MAX_CHARSET_SIZE + 1,
- GFP_NOFS);
+ name = kmalloc(NTFS_MAX_NAME_LEN * NLS_MAX_CHARSET_SIZE + 1, GFP_NOFS);
if (unlikely(!name)) {
err = -ENOMEM;
goto err_out;
@@ -1191,7 +1190,7 @@ static int ntfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
* map the mft record without deadlocking.
*/
rc = le32_to_cpu(ctx->attr->data.resident.value_length);
- ir = (INDEX_ROOT*)kmalloc(rc, GFP_NOFS);
+ ir = kmalloc(rc, GFP_NOFS);
if (unlikely(!ir)) {
err = -ENOMEM;
goto err_out;
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index d313f356e66..933dbd89c2a 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -137,7 +137,7 @@ static int ntfs_init_locked_inode(struct inode *vi, ntfs_attr *na)
BUG_ON(!na->name);
i = na->name_len * sizeof(ntfschar);
- ni->name = (ntfschar*)kmalloc(i + sizeof(ntfschar), GFP_ATOMIC);
+ ni->name = kmalloc(i + sizeof(ntfschar), GFP_ATOMIC);
if (!ni->name)
return -ENOMEM;
memcpy(ni->name, na->name, i);
@@ -556,8 +556,6 @@ static int ntfs_read_locked_inode(struct inode *vi)
/* Setup the generic vfs inode parts now. */
- /* This is the optimal IO size (for stat), not the fs block size. */
- vi->i_blksize = PAGE_CACHE_SIZE;
/*
* This is for checking whether an inode has changed w.r.t. a file so
* that the file can be updated if necessary (compare with f_version).
@@ -1234,7 +1232,6 @@ static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi)
base_ni = NTFS_I(base_vi);
/* Just mirror the values from the base inode. */
- vi->i_blksize = base_vi->i_blksize;
vi->i_version = base_vi->i_version;
vi->i_uid = base_vi->i_uid;
vi->i_gid = base_vi->i_gid;
@@ -1504,7 +1501,6 @@ static int ntfs_read_locked_index_inode(struct inode *base_vi, struct inode *vi)
ni = NTFS_I(vi);
base_ni = NTFS_I(base_vi);
/* Just mirror the values from the base inode. */
- vi->i_blksize = base_vi->i_blksize;
vi->i_version = base_vi->i_version;
vi->i_uid = base_vi->i_uid;
vi->i_gid = base_vi->i_gid;
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index 2438c00ec0c..584260fd684 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -331,7 +331,7 @@ map_err_out:
ntfs_inode **tmp;
int new_size = (base_ni->nr_extents + 4) * sizeof(ntfs_inode *);
- tmp = (ntfs_inode **)kmalloc(new_size, GFP_NOFS);
+ tmp = kmalloc(new_size, GFP_NOFS);
if (unlikely(!tmp)) {
ntfs_error(base_ni->vol->sb, "Failed to allocate "
"internal buffer.");
@@ -2638,11 +2638,6 @@ mft_rec_already_initialized:
}
vi->i_ino = bit;
/*
- * This is the optimal IO size (for stat), not the fs block
- * size.
- */
- vi->i_blksize = PAGE_CACHE_SIZE;
- /*
* This is for checking whether an inode has changed w.r.t. a
* file so that the file can be updated if necessary (compare
* with f_version).
@@ -2893,7 +2888,7 @@ rollback:
if (!(base_ni->nr_extents & 3)) {
int new_size = (base_ni->nr_extents + 4) * sizeof(ntfs_inode*);
- extent_nis = (ntfs_inode**)kmalloc(new_size, GFP_NOFS);
+ extent_nis = kmalloc(new_size, GFP_NOFS);
if (unlikely(!extent_nis)) {
ntfs_error(vol->sb, "Failed to allocate internal "
"buffer during rollback.%s", es);
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 74e0ee8fce7..6b2712f10dd 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -3248,32 +3248,14 @@ ictx_err_out:
static void __exit exit_ntfs_fs(void)
{
- int err = 0;
-
ntfs_debug("Unregistering NTFS driver.");
unregister_filesystem(&ntfs_fs_type);
-
- if (kmem_cache_destroy(ntfs_big_inode_cache) && (err = 1))
- printk(KERN_CRIT "NTFS: Failed to destory %s.\n",
- ntfs_big_inode_cache_name);
- if (kmem_cache_destroy(ntfs_inode_cache) && (err = 1))
- printk(KERN_CRIT "NTFS: Failed to destory %s.\n",
- ntfs_inode_cache_name);
- if (kmem_cache_destroy(ntfs_name_cache) && (err = 1))
- printk(KERN_CRIT "NTFS: Failed to destory %s.\n",
- ntfs_name_cache_name);
- if (kmem_cache_destroy(ntfs_attr_ctx_cache) && (err = 1))
- printk(KERN_CRIT "NTFS: Failed to destory %s.\n",
- ntfs_attr_ctx_cache_name);
- if (kmem_cache_destroy(ntfs_index_ctx_cache) && (err = 1))
- printk(KERN_CRIT "NTFS: Failed to destory %s.\n",
- ntfs_index_ctx_cache_name);
- if (err)
- printk(KERN_CRIT "NTFS: This causes memory to leak! There is "
- "probably a BUG in the driver! Please report "
- "you saw this message to "
- "linux-ntfs-dev@lists.sourceforge.net\n");
+ kmem_cache_destroy(ntfs_big_inode_cache);
+ kmem_cache_destroy(ntfs_inode_cache);
+ kmem_cache_destroy(ntfs_name_cache);
+ kmem_cache_destroy(ntfs_attr_ctx_cache);
+ kmem_cache_destroy(ntfs_index_ctx_cache);
/* Unregister the ntfs sysctls. */
ntfs_sysctl(0);
}
diff --git a/fs/ntfs/unistr.c b/fs/ntfs/unistr.c
index b123c0fa6bf..a1b572196fe 100644
--- a/fs/ntfs/unistr.c
+++ b/fs/ntfs/unistr.c
@@ -350,7 +350,7 @@ int ntfs_ucstonls(const ntfs_volume *vol, const ntfschar *ins,
}
if (!ns) {
ns_len = ins_len * NLS_MAX_CHARSET_SIZE;
- ns = (unsigned char*)kmalloc(ns_len + 1, GFP_NOFS);
+ ns = kmalloc(ns_len + 1, GFP_NOFS);
if (!ns)
goto mem_err_out;
}
@@ -365,7 +365,7 @@ retry: wc = nls->uni2char(le16_to_cpu(ins[i]), ns + o,
else if (wc == -ENAMETOOLONG && ns != *outs) {
unsigned char *tc;
/* Grow in multiples of 64 bytes. */
- tc = (unsigned char*)kmalloc((ns_len + 64) &
+ tc = kmalloc((ns_len + 64) &
~63, GFP_NOFS);
if (tc) {
memcpy(tc, ns, ns_len);
diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlm/dlmfs.c
index 033ad170123..0368c640218 100644
--- a/fs/ocfs2/dlm/dlmfs.c
+++ b/fs/ocfs2/dlm/dlmfs.c
@@ -335,7 +335,6 @@ static struct inode *dlmfs_get_root_inode(struct super_block *sb)
inode->i_mode = mode;
inode->i_uid = current->fsuid;
inode->i_gid = current->fsgid;
- inode->i_blksize = PAGE_CACHE_SIZE;
inode->i_blocks = 0;
inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
@@ -362,7 +361,6 @@ static struct inode *dlmfs_get_inode(struct inode *parent,
inode->i_mode = mode;
inode->i_uid = current->fsuid;
inode->i_gid = current->fsgid;
- inode->i_blksize = PAGE_CACHE_SIZE;
inode->i_blocks = 0;
inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
@@ -629,9 +627,7 @@ static void __exit exit_dlmfs_fs(void)
flush_workqueue(user_dlm_worker);
destroy_workqueue(user_dlm_worker);
- if (kmem_cache_destroy(dlmfs_inode_cache))
- printk(KERN_INFO "dlmfs_inode_cache: not all structures "
- "were freed\n");
+ kmem_cache_destroy(dlmfs_inode_cache);
}
MODULE_AUTHOR("Oracle");
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index de887063dcf..8801e41afe8 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -2052,7 +2052,7 @@ static int ocfs2_dlm_debug_open(struct inode *inode, struct file *file)
mlog_errno(ret);
goto out;
}
- osb = (struct ocfs2_super *) inode->u.generic_ip;
+ osb = inode->i_private;
ocfs2_get_dlm_debug(osb->osb_dlm_debug);
priv->p_dlm_debug = osb->osb_dlm_debug;
INIT_LIST_HEAD(&priv->p_iter_res.l_debug_list);
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 69d3db56916..16e8e74dc96 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -269,7 +269,6 @@ int ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
inode->i_mode = le16_to_cpu(fe->i_mode);
inode->i_uid = le32_to_cpu(fe->i_uid);
inode->i_gid = le32_to_cpu(fe->i_gid);
- inode->i_blksize = (u32)osb->s_clustersize;
/* Fast symlinks will have i_size but no allocated clusters. */
if (S_ISLNK(inode->i_mode) && !fe->i_clusters)
@@ -1258,8 +1257,6 @@ leave:
void ocfs2_refresh_inode(struct inode *inode,
struct ocfs2_dinode *fe)
{
- struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
-
spin_lock(&OCFS2_I(inode)->ip_lock);
OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
@@ -1270,7 +1267,6 @@ void ocfs2_refresh_inode(struct inode *inode,
inode->i_uid = le32_to_cpu(fe->i_uid);
inode->i_gid = le32_to_cpu(fe->i_gid);
inode->i_mode = le16_to_cpu(fe->i_mode);
- inode->i_blksize = (u32) osb->s_clustersize;
if (S_ISLNK(inode->i_mode) && le32_to_cpu(fe->i_clusters) == 0)
inode->i_blocks = 0;
else
diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
index 63730282ad8..1bea610078b 100644
--- a/fs/partitions/efi.c
+++ b/fs/partitions/efi.c
@@ -238,10 +238,9 @@ alloc_read_gpt_entries(struct block_device *bdev, gpt_header *gpt)
le32_to_cpu(gpt->sizeof_partition_entry);
if (!count)
return NULL;
- pte = kmalloc(count, GFP_KERNEL);
+ pte = kzalloc(count, GFP_KERNEL);
if (!pte)
return NULL;
- memset(pte, 0, count);
if (read_lba(bdev, le64_to_cpu(gpt->partition_entry_lba),
(u8 *) pte,
@@ -269,10 +268,9 @@ alloc_read_gpt_header(struct block_device *bdev, u64 lba)
if (!bdev)
return NULL;
- gpt = kmalloc(sizeof (gpt_header), GFP_KERNEL);
+ gpt = kzalloc(sizeof (gpt_header), GFP_KERNEL);
if (!gpt)
return NULL;
- memset(gpt, 0, sizeof (gpt_header));
if (read_lba(bdev, lba, (u8 *) gpt,
sizeof (gpt_header)) < sizeof (gpt_header)) {
@@ -526,9 +524,8 @@ find_valid_gpt(struct block_device *bdev, gpt_header **gpt, gpt_entry **ptes)
lastlba = last_lba(bdev);
if (!force_gpt) {
/* This will be added to the EFI Spec. per Intel after v1.02. */
- legacymbr = kmalloc(sizeof (*legacymbr), GFP_KERNEL);
+ legacymbr = kzalloc(sizeof (*legacymbr), GFP_KERNEL);
if (legacymbr) {
- memset(legacymbr, 0, sizeof (*legacymbr));
read_lba(bdev, 0, (u8 *) legacymbr,
sizeof (*legacymbr));
good_pmbr = is_pmbr_valid(legacymbr, lastlba);
diff --git a/fs/pipe.c b/fs/pipe.c
index 20352573e02..f3b6f71e9d0 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -879,7 +879,6 @@ static struct inode * get_pipe_inode(void)
inode->i_uid = current->fsuid;
inode->i_gid = current->fsgid;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
- inode->i_blksize = PAGE_SIZE;
return inode;
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 146a434ba94..987c773dbb2 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -28,6 +28,7 @@ do { \
(vmi)->largest_chunk = 0; \
} while(0)
+extern int nommu_vma_show(struct seq_file *, struct vm_area_struct *);
#endif
extern void create_seq_entry(char *name, mode_t mode, const struct file_operations *f);
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 6a984f64edd..3ceff385727 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -279,12 +279,11 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
tsz = elf_buflen - *fpos;
if (buflen < tsz)
tsz = buflen;
- elf_buf = kmalloc(elf_buflen, GFP_ATOMIC);
+ elf_buf = kzalloc(elf_buflen, GFP_ATOMIC);
if (!elf_buf) {
read_unlock(&kclist_lock);
return -ENOMEM;
}
- memset(elf_buf, 0, elf_buflen);
elf_kcore_store_hdr(elf_buf, nphdr, elf_buflen);
read_unlock(&kclist_lock);
if (copy_to_user(buffer, elf_buf + *fpos, tsz)) {
@@ -330,10 +329,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
unsigned long curstart = start;
unsigned long cursize = tsz;
- elf_buf = kmalloc(tsz, GFP_KERNEL);
+ elf_buf = kzalloc(tsz, GFP_KERNEL);
if (!elf_buf)
return -ENOMEM;
- memset(elf_buf, 0, tsz);
read_lock(&vmlist_lock);
for (m=vmlist; m && cursize; m=m->next) {
diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
index cff10ab1af6..d7dbdf9e0f4 100644
--- a/fs/proc/nommu.c
+++ b/fs/proc/nommu.c
@@ -33,19 +33,15 @@
#include "internal.h"
/*
- * display a list of all the VMAs the kernel knows about
- * - nommu kernals have a single flat list
+ * display a single VMA to a sequenced file
*/
-static int nommu_vma_list_show(struct seq_file *m, void *v)
+int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
{
- struct vm_area_struct *vma;
unsigned long ino = 0;
struct file *file;
dev_t dev = 0;
int flags, len;
- vma = rb_entry((struct rb_node *) v, struct vm_area_struct, vm_rb);
-
flags = vma->vm_flags;
file = vma->vm_file;
@@ -78,6 +74,18 @@ static int nommu_vma_list_show(struct seq_file *m, void *v)
return 0;
}
+/*
+ * display a list of all the VMAs the kernel knows about
+ * - nommu kernals have a single flat list
+ */
+static int nommu_vma_list_show(struct seq_file *m, void *v)
+{
+ struct vm_area_struct *vma;
+
+ vma = rb_entry((struct rb_node *) v, struct vm_area_struct, vm_rb);
+ return nommu_vma_show(m, vma);
+}
+
static void *nommu_vma_list_start(struct seq_file *m, loff_t *_pos)
{
struct rb_node *_rb;
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index 94215622544..5bbd6089605 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -157,10 +157,12 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
"SwapCached: %8lu kB\n"
"Active: %8lu kB\n"
"Inactive: %8lu kB\n"
+#ifdef CONFIG_HIGHMEM
"HighTotal: %8lu kB\n"
"HighFree: %8lu kB\n"
"LowTotal: %8lu kB\n"
"LowFree: %8lu kB\n"
+#endif
"SwapTotal: %8lu kB\n"
"SwapFree: %8lu kB\n"
"Dirty: %8lu kB\n"
@@ -168,6 +170,8 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
"AnonPages: %8lu kB\n"
"Mapped: %8lu kB\n"
"Slab: %8lu kB\n"
+ "SReclaimable: %8lu kB\n"
+ "SUnreclaim: %8lu kB\n"
"PageTables: %8lu kB\n"
"NFS_Unstable: %8lu kB\n"
"Bounce: %8lu kB\n"
@@ -183,17 +187,22 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
K(total_swapcache_pages),
K(active),
K(inactive),
+#ifdef CONFIG_HIGHMEM
K(i.totalhigh),
K(i.freehigh),
K(i.totalram-i.totalhigh),
K(i.freeram-i.freehigh),
+#endif
K(i.totalswap),
K(i.freeswap),
K(global_page_state(NR_FILE_DIRTY)),
K(global_page_state(NR_WRITEBACK)),
K(global_page_state(NR_ANON_PAGES)),
K(global_page_state(NR_FILE_MAPPED)),
- K(global_page_state(NR_SLAB)),
+ K(global_page_state(NR_SLAB_RECLAIMABLE) +
+ global_page_state(NR_SLAB_UNRECLAIMABLE)),
+ K(global_page_state(NR_SLAB_RECLAIMABLE)),
+ K(global_page_state(NR_SLAB_UNRECLAIMABLE)),
K(global_page_state(NR_PAGETABLE)),
K(global_page_state(NR_UNSTABLE_NFS)),
K(global_page_state(NR_BOUNCE)),
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 0a163a4f776..6b769afac55 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -122,11 +122,6 @@ struct mem_size_stats
unsigned long private_dirty;
};
-__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
-{
- return NULL;
-}
-
static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss)
{
struct proc_maps_private *priv = m->private;
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 4616ed50ffc..091aa8e48e0 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -138,25 +138,63 @@ out:
}
/*
- * Albert D. Cahalan suggested to fake entries for the traditional
- * sections here. This might be worth investigating.
+ * display mapping lines for a particular process's /proc/pid/maps
*/
-static int show_map(struct seq_file *m, void *v)
+static int show_map(struct seq_file *m, void *_vml)
{
- return 0;
+ struct vm_list_struct *vml = _vml;
+ return nommu_vma_show(m, vml->vma);
}
+
static void *m_start(struct seq_file *m, loff_t *pos)
{
+ struct proc_maps_private *priv = m->private;
+ struct vm_list_struct *vml;
+ struct mm_struct *mm;
+ loff_t n = *pos;
+
+ /* pin the task and mm whilst we play with them */
+ priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
+ if (!priv->task)
+ return NULL;
+
+ mm = get_task_mm(priv->task);
+ if (!mm) {
+ put_task_struct(priv->task);
+ priv->task = NULL;
+ return NULL;
+ }
+
+ down_read(&mm->mmap_sem);
+
+ /* start from the Nth VMA */
+ for (vml = mm->context.vmlist; vml; vml = vml->next)
+ if (n-- == 0)
+ return vml;
return NULL;
}
-static void m_stop(struct seq_file *m, void *v)
+
+static void m_stop(struct seq_file *m, void *_vml)
{
+ struct proc_maps_private *priv = m->private;
+
+ if (priv->task) {
+ struct mm_struct *mm = priv->task->mm;
+ up_read(&mm->mmap_sem);
+ mmput(mm);
+ put_task_struct(priv->task);
+ }
}
-static void *m_next(struct seq_file *m, void *v, loff_t *pos)
+
+static void *m_next(struct seq_file *m, void *_vml, loff_t *pos)
{
- return NULL;
+ struct vm_list_struct *vml = _vml;
+
+ (*pos)++;
+ return vml ? vml->next : NULL;
}
-static struct seq_operations proc_pid_maps_op = {
+
+static struct seq_operations proc_pid_maps_ops = {
.start = m_start,
.next = m_next,
.stop = m_stop,
@@ -165,11 +203,19 @@ static struct seq_operations proc_pid_maps_op = {
static int maps_open(struct inode *inode, struct file *file)
{
- int ret;
- ret = seq_open(file, &proc_pid_maps_op);
- if (!ret) {
- struct seq_file *m = file->private_data;
- m->private = NULL;
+ struct proc_maps_private *priv;
+ int ret = -ENOMEM;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (priv) {
+ priv->pid = proc_pid(inode);
+ ret = seq_open(file, &proc_pid_maps_ops);
+ if (!ret) {
+ struct seq_file *m = file->private_data;
+ m->private = priv;
+ } else {
+ kfree(priv);
+ }
}
return ret;
}
@@ -178,6 +224,6 @@ struct file_operations proc_maps_operations = {
.open = maps_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = seq_release_private,
};
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index 5a903491e69..5a41db2a218 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -358,11 +358,10 @@ static int qnx4_fill_super(struct super_block *s, void *data, int silent)
const char *errmsg;
struct qnx4_sb_info *qs;
- qs = kmalloc(sizeof(struct qnx4_sb_info), GFP_KERNEL);
+ qs = kzalloc(sizeof(struct qnx4_sb_info), GFP_KERNEL);
if (!qs)
return -ENOMEM;
s->s_fs_info = qs;
- memset(qs, 0, sizeof(struct qnx4_sb_info));
sb_set_blocksize(s, QNX4_BLOCK_SIZE);
@@ -497,7 +496,6 @@ static void qnx4_read_inode(struct inode *inode)
inode->i_ctime.tv_sec = le32_to_cpu(raw_inode->di_ctime);
inode->i_ctime.tv_nsec = 0;
inode->i_blocks = le32_to_cpu(raw_inode->di_first_xtnt.xtnt_size);
- inode->i_blksize = QNX4_DIR_ENTRY_SIZE;
memcpy(qnx4_inode, raw_inode, QNX4_DIR_ENTRY_SIZE);
if (S_ISREG(inode->i_mode)) {
@@ -557,9 +555,7 @@ static int init_inodecache(void)
static void destroy_inodecache(void)
{
- if (kmem_cache_destroy(qnx4_inode_cachep))
- printk(KERN_INFO
- "qnx4_inode_cache: not all structures were freed\n");
+ kmem_cache_destroy(qnx4_inode_cachep);
}
static int qnx4_get_sb(struct file_system_type *fs_type,
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index b9677335cc8..bc0e5166242 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -58,7 +58,6 @@ struct inode *ramfs_get_inode(struct super_block *sb, int mode, dev_t dev)
inode->i_mode = mode;
inode->i_uid = current->fsuid;
inode->i_gid = current->fsgid;
- inode->i_blksize = PAGE_CACHE_SIZE;
inode->i_blocks = 0;
inode->i_mapping->a_ops = &ramfs_aops;
inode->i_mapping->backing_dev_info = &ramfs_backing_dev_info;
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 52f1e213654..8810fda0da4 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -17,8 +17,6 @@
#include <linux/writeback.h>
#include <linux/quotaops.h>
-extern int reiserfs_default_io_size; /* default io size devuned in super.c */
-
static int reiserfs_commit_write(struct file *f, struct page *page,
unsigned from, unsigned to);
static int reiserfs_prepare_write(struct file *f, struct page *page,
@@ -1122,7 +1120,6 @@ static void init_inode(struct inode *inode, struct path *path)
ih = PATH_PITEM_HEAD(path);
copy_key(INODE_PKEY(inode), &(ih->ih_key));
- inode->i_blksize = reiserfs_default_io_size;
INIT_LIST_HEAD(&(REISERFS_I(inode)->i_prealloc_list));
REISERFS_I(inode)->i_flags = 0;
@@ -1877,7 +1874,6 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
}
// these do not go to on-disk stat data
inode->i_ino = le32_to_cpu(ih.ih_key.k_objectid);
- inode->i_blksize = reiserfs_default_io_size;
// store in in-core inode the key of stat data and version all
// object items will have (directory items will have old offset
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 5567328f104..b40d4d64d59 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -530,9 +530,7 @@ static int init_inodecache(void)
static void destroy_inodecache(void)
{
- if (kmem_cache_destroy(reiserfs_inode_cachep))
- reiserfs_warning(NULL,
- "reiserfs_inode_cache: not all structures were freed");
+ kmem_cache_destroy(reiserfs_inode_cachep);
}
/* we don't mark inodes dirty, we just log them */
@@ -725,12 +723,6 @@ static const arg_desc_t error_actions[] = {
{NULL, 0, 0},
};
-int reiserfs_default_io_size = 128 * 1024; /* Default recommended I/O size is 128k.
- There might be broken applications that are
- confused by this. Use nolargeio mount option
- to get usual i/o size = PAGE_SIZE.
- */
-
/* proceed only one option from a list *cur - string containing of mount options
opts - array of options which are accepted
opt_arg - if option is found and requires an argument and if it is specifed
@@ -959,19 +951,8 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
}
if (c == 'w') {
- char *p = NULL;
- int val = simple_strtoul(arg, &p, 0);
-
- if (*p != '\0') {
- reiserfs_warning(s,
- "reiserfs_parse_options: non-numeric value %s for nolargeio option",
- arg);
- return 0;
- }
- if (val)
- reiserfs_default_io_size = PAGE_SIZE;
- else
- reiserfs_default_io_size = 128 * 1024;
+ reiserfs_warning(s, "reiserfs: nolargeio option is no longer supported");
+ return 0;
}
if (c == 'j') {
diff --git a/fs/romfs/inode.c b/fs/romfs/inode.c
index 22eed61ebf6..ddcd9e1ef28 100644
--- a/fs/romfs/inode.c
+++ b/fs/romfs/inode.c
@@ -589,8 +589,7 @@ static int init_inodecache(void)
static void destroy_inodecache(void)
{
- if (kmem_cache_destroy(romfs_inode_cachep))
- printk(KERN_INFO "romfs_inode_cache: not all structures were freed\n");
+ kmem_cache_destroy(romfs_inode_cachep);
}
static int romfs_remount(struct super_block *sb, int *flags, char *data)
diff --git a/fs/smbfs/inode.c b/fs/smbfs/inode.c
index a1ed657c3c8..2c122ee83ad 100644
--- a/fs/smbfs/inode.c
+++ b/fs/smbfs/inode.c
@@ -89,8 +89,7 @@ static int init_inodecache(void)
static void destroy_inodecache(void)
{
- if (kmem_cache_destroy(smb_inode_cachep))
- printk(KERN_INFO "smb_inode_cache: not all structures were freed\n");
+ kmem_cache_destroy(smb_inode_cachep);
}
static int smb_remount(struct super_block *sb, int *flags, char *data)
@@ -167,7 +166,6 @@ smb_get_inode_attr(struct inode *inode, struct smb_fattr *fattr)
fattr->f_mtime = inode->i_mtime;
fattr->f_ctime = inode->i_ctime;
fattr->f_atime = inode->i_atime;
- fattr->f_blksize= inode->i_blksize;
fattr->f_blocks = inode->i_blocks;
fattr->attr = SMB_I(inode)->attr;
@@ -201,7 +199,6 @@ smb_set_inode_attr(struct inode *inode, struct smb_fattr *fattr)
inode->i_uid = fattr->f_uid;
inode->i_gid = fattr->f_gid;
inode->i_ctime = fattr->f_ctime;
- inode->i_blksize= fattr->f_blksize;
inode->i_blocks = fattr->f_blocks;
inode->i_size = fattr->f_size;
inode->i_mtime = fattr->f_mtime;
diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c
index c3495059889..40e174db987 100644
--- a/fs/smbfs/proc.c
+++ b/fs/smbfs/proc.c
@@ -1826,7 +1826,6 @@ smb_init_dirent(struct smb_sb_info *server, struct smb_fattr *fattr)
fattr->f_nlink = 1;
fattr->f_uid = server->mnt->uid;
fattr->f_gid = server->mnt->gid;
- fattr->f_blksize = SMB_ST_BLKSIZE;
fattr->f_unix = 0;
}
diff --git a/fs/smbfs/request.c b/fs/smbfs/request.c
index c8e96195b96..0fb74697abc 100644
--- a/fs/smbfs/request.c
+++ b/fs/smbfs/request.c
@@ -49,8 +49,7 @@ int smb_init_request_cache(void)
void smb_destroy_request_cache(void)
{
- if (kmem_cache_destroy(req_cachep))
- printk(KERN_INFO "smb_destroy_request_cache: not all structures were freed\n");
+ kmem_cache_destroy(req_cachep);
}
/*
diff --git a/fs/stat.c b/fs/stat.c
index 3a44dcf97da..60a31d5e596 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -14,6 +14,7 @@
#include <linux/namei.h>
#include <linux/security.h>
#include <linux/syscalls.h>
+#include <linux/pagemap.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -32,7 +33,7 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
stat->ctime = inode->i_ctime;
stat->size = i_size_read(inode);
stat->blocks = inode->i_blocks;
- stat->blksize = inode->i_blksize;
+ stat->blksize = (1 << inode->i_blkbits);
}
EXPORT_SYMBOL(generic_fillattr);
diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
index c16a93c353c..98022e41cda 100644
--- a/fs/sysfs/bin.c
+++ b/fs/sysfs/bin.c
@@ -10,6 +10,7 @@
#include <linux/errno.h>
#include <linux/fs.h>
+#include <linux/kernel.h>
#include <linux/kobject.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -176,7 +177,6 @@ const struct file_operations bin_fops = {
* sysfs_create_bin_file - create binary file for object.
* @kobj: object.
* @attr: attribute descriptor.
- *
*/
int sysfs_create_bin_file(struct kobject * kobj, struct bin_attribute * attr)
@@ -191,13 +191,16 @@ int sysfs_create_bin_file(struct kobject * kobj, struct bin_attribute * attr)
* sysfs_remove_bin_file - remove binary file for object.
* @kobj: object.
* @attr: attribute descriptor.
- *
*/
-int sysfs_remove_bin_file(struct kobject * kobj, struct bin_attribute * attr)
+void sysfs_remove_bin_file(struct kobject * kobj, struct bin_attribute * attr)
{
- sysfs_hash_and_remove(kobj->dentry,attr->attr.name);
- return 0;
+ if (sysfs_hash_and_remove(kobj->dentry, attr->attr.name) < 0) {
+ printk(KERN_ERR "%s: "
+ "bad dentry or inode or no such file: \"%s\"\n",
+ __FUNCTION__, attr->attr.name);
+ dump_stack();
+ }
}
EXPORT_SYMBOL_GPL(sysfs_create_bin_file);
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 61c42430cba..5f3d725d112 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -43,7 +43,7 @@ static struct sysfs_dirent * sysfs_new_dirent(struct sysfs_dirent * parent_sd,
memset(sd, 0, sizeof(*sd));
atomic_set(&sd->s_count, 1);
- atomic_set(&sd->s_event, 0);
+ atomic_set(&sd->s_event, 1);
INIT_LIST_HEAD(&sd->s_children);
list_add(&sd->s_sibling, &parent_sd->s_children);
sd->s_element = element;
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index 9889e54e1f1..e79e38d52c0 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -12,6 +12,7 @@
#include <linux/namei.h>
#include <linux/backing-dev.h>
#include <linux/capability.h>
+#include <linux/errno.h>
#include "sysfs.h"
extern struct super_block * sysfs_sb;
@@ -124,7 +125,6 @@ struct inode * sysfs_new_inode(mode_t mode, struct sysfs_dirent * sd)
{
struct inode * inode = new_inode(sysfs_sb);
if (inode) {
- inode->i_blksize = PAGE_CACHE_SIZE;
inode->i_blocks = 0;
inode->i_mapping->a_ops = &sysfs_aops;
inode->i_mapping->backing_dev_info = &sysfs_backing_dev_info;
@@ -234,17 +234,18 @@ void sysfs_drop_dentry(struct sysfs_dirent * sd, struct dentry * parent)
}
}
-void sysfs_hash_and_remove(struct dentry * dir, const char * name)
+int sysfs_hash_and_remove(struct dentry * dir, const char * name)
{
struct sysfs_dirent * sd;
struct sysfs_dirent * parent_sd;
+ int found = 0;
if (!dir)
- return;
+ return -ENOENT;
if (dir->d_inode == NULL)
/* no inode means this hasn't been made visible yet */
- return;
+ return -ENOENT;
parent_sd = dir->d_fsdata;
mutex_lock(&dir->d_inode->i_mutex);
@@ -255,8 +256,11 @@ void sysfs_hash_and_remove(struct dentry * dir, const char * name)
list_del_init(&sd->s_sibling);
sysfs_drop_dentry(sd, dir);
sysfs_put(sd);
+ found = 1;
break;
}
}
mutex_unlock(&dir->d_inode->i_mutex);
+
+ return found ? 0 : -ENOENT;
}
diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
index d2eac3ceed5..f50e3cc2ded 100644
--- a/fs/sysfs/symlink.c
+++ b/fs/sysfs/symlink.c
@@ -3,6 +3,7 @@
*/
#include <linux/fs.h>
+#include <linux/mount.h>
#include <linux/module.h>
#include <linux/kobject.h>
#include <linux/namei.h>
@@ -82,10 +83,19 @@ exit1:
*/
int sysfs_create_link(struct kobject * kobj, struct kobject * target, const char * name)
{
- struct dentry * dentry = kobj->dentry;
+ struct dentry *dentry = NULL;
int error = -EEXIST;
- BUG_ON(!kobj || !kobj->dentry || !name);
+ BUG_ON(!name);
+
+ if (!kobj) {
+ if (sysfs_mount && sysfs_mount->mnt_sb)
+ dentry = sysfs_mount->mnt_sb->s_root;
+ } else
+ dentry = kobj->dentry;
+
+ if (!dentry)
+ return -EFAULT;
mutex_lock(&dentry->d_inode->i_mutex);
if (!sysfs_dirent_exist(dentry->d_fsdata, name))
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index 3651ffb5ec0..6f3d6bd5288 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -10,7 +10,7 @@ extern int sysfs_make_dirent(struct sysfs_dirent *, struct dentry *, void *,
umode_t, int);
extern int sysfs_add_file(struct dentry *, const struct attribute *, int);
-extern void sysfs_hash_and_remove(struct dentry * dir, const char * name);
+extern int sysfs_hash_and_remove(struct dentry * dir, const char * name);
extern struct sysfs_dirent *sysfs_find(struct sysfs_dirent *dir, const char * name);
extern int sysfs_create_subdir(struct kobject *, const char *, struct dentry **);
diff --git a/fs/sysv/ialloc.c b/fs/sysv/ialloc.c
index 9b585d1081c..115ab0d6f4b 100644
--- a/fs/sysv/ialloc.c
+++ b/fs/sysv/ialloc.c
@@ -170,7 +170,7 @@ struct inode * sysv_new_inode(const struct inode * dir, mode_t mode)
inode->i_uid = current->fsuid;
inode->i_ino = fs16_to_cpu(sbi, ino);
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
- inode->i_blocks = inode->i_blksize = 0;
+ inode->i_blocks = 0;
memset(SYSV_I(inode)->i_data, 0, sizeof(SYSV_I(inode)->i_data));
SYSV_I(inode)->i_dir_start_lookup = 0;
insert_inode_hash(inode);
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index 58b2d22142b..d63c5e48b05 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -201,7 +201,7 @@ static void sysv_read_inode(struct inode *inode)
inode->i_ctime.tv_nsec = 0;
inode->i_atime.tv_nsec = 0;
inode->i_mtime.tv_nsec = 0;
- inode->i_blocks = inode->i_blksize = 0;
+ inode->i_blocks = 0;
si = SYSV_I(inode);
for (block = 0; block < 10+1+1+1; block++)
diff --git a/fs/sysv/super.c b/fs/sysv/super.c
index 876639b9332..350cba5d680 100644
--- a/fs/sysv/super.c
+++ b/fs/sysv/super.c
@@ -369,10 +369,9 @@ static int sysv_fill_super(struct super_block *sb, void *data, int silent)
if (64 != sizeof (struct sysv_inode))
panic("sysv fs: bad inode size");
- sbi = kmalloc(sizeof(struct sysv_sb_info), GFP_KERNEL);
+ sbi = kzalloc(sizeof(struct sysv_sb_info), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
- memset(sbi, 0, sizeof(struct sysv_sb_info));
sbi->s_sb = sb;
sbi->s_block_base = 0;
@@ -453,10 +452,9 @@ static int v7_fill_super(struct super_block *sb, void *data, int silent)
if (64 != sizeof (struct sysv_inode))
panic("sysv fs: bad i-node size");
- sbi = kmalloc(sizeof(struct sysv_sb_info), GFP_KERNEL);
+ sbi = kzalloc(sizeof(struct sysv_sb_info), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
- memset(sbi, 0, sizeof(struct sysv_sb_info));
sbi->s_sb = sb;
sbi->s_block_base = 0;
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 33323473e3c..8206983f2eb 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -121,7 +121,6 @@ struct inode * udf_new_inode (struct inode *dir, int mode, int * err)
UDF_I_LOCATION(inode).logicalBlockNum = block;
UDF_I_LOCATION(inode).partitionReferenceNum = UDF_I_LOCATION(dir).partitionReferenceNum;
inode->i_ino = udf_get_lb_pblock(sb, UDF_I_LOCATION(inode), 0);
- inode->i_blksize = PAGE_SIZE;
inode->i_blocks = 0;
UDF_I_LENEATTR(inode) = 0;
UDF_I_LENALLOC(inode) = 0;
@@ -130,14 +129,12 @@ struct inode * udf_new_inode (struct inode *dir, int mode, int * err)
{
UDF_I_EFE(inode) = 1;
UDF_UPDATE_UDFREV(inode->i_sb, UDF_VERS_USE_EXTENDED_FE);
- UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL);
- memset(UDF_I_DATA(inode), 0x00, inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
+ UDF_I_DATA(inode) = kzalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL);
}
else
{
UDF_I_EFE(inode) = 0;
- UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL);
- memset(UDF_I_DATA(inode), 0x00, inode->i_sb->s_blocksize - sizeof(struct fileEntry));
+ UDF_I_DATA(inode) = kzalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL);
}
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_AD_IN_ICB))
UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 605f5111b6d..b223b32db99 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -916,8 +916,6 @@ __udf_read_inode(struct inode *inode)
* i_nlink = 1
* i_op = NULL;
*/
- inode->i_blksize = PAGE_SIZE;
-
bh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 0, &ident);
if (!bh)
diff --git a/fs/udf/super.c b/fs/udf/super.c
index fcce1a21a51..5dd356cbbda 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -156,8 +156,7 @@ static int init_inodecache(void)
static void destroy_inodecache(void)
{
- if (kmem_cache_destroy(udf_inode_cachep))
- printk(KERN_INFO "udf_inode_cache: not all structures were freed\n");
+ kmem_cache_destroy(udf_inode_cachep);
}
/* Superblock operations */
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
index 9501dcd3b21..2ad1259c6ec 100644
--- a/fs/ufs/ialloc.c
+++ b/fs/ufs/ialloc.c
@@ -255,7 +255,6 @@ cg_found:
inode->i_gid = current->fsgid;
inode->i_ino = cg * uspi->s_ipg + bit;
- inode->i_blksize = PAGE_SIZE; /* This is the optimal IO size (for stat), not the fs block size */
inode->i_blocks = 0;
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
ufsi->i_flags = UFS_I(dir)->i_flags;
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 30c6e8a9446..ee1eaa6f4ec 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -741,7 +741,6 @@ void ufs_read_inode(struct inode * inode)
ufs1_read_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino));
}
- inode->i_blksize = PAGE_SIZE;/*This is the optimal IO size (for stat)*/
inode->i_version++;
ufsi->i_lastfrag =
(inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 992ee0b87cc..ec79e3091d1 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -611,11 +611,10 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
UFSD("ENTER\n");
- sbi = kmalloc(sizeof(struct ufs_sb_info), GFP_KERNEL);
+ sbi = kzalloc(sizeof(struct ufs_sb_info), GFP_KERNEL);
if (!sbi)
goto failed_nomem;
sb->s_fs_info = sbi;
- memset(sbi, 0, sizeof(struct ufs_sb_info));
UFSD("flag %u\n", (int)(sb->s_flags & MS_RDONLY));
@@ -1245,8 +1244,7 @@ static int init_inodecache(void)
static void destroy_inodecache(void)
{
- if (kmem_cache_destroy(ufs_inode_cachep))
- printk(KERN_INFO "ufs_inode_cache: not all structures were freed\n");
+ kmem_cache_destroy(ufs_inode_cachep);
}
#ifdef CONFIG_QUOTA
diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/linux-2.6/kmem.h
index 939bd84bc7e..0e8293c5a32 100644
--- a/fs/xfs/linux-2.6/kmem.h
+++ b/fs/xfs/linux-2.6/kmem.h
@@ -91,8 +91,8 @@ kmem_zone_free(kmem_zone_t *zone, void *ptr)
static inline void
kmem_zone_destroy(kmem_zone_t *zone)
{
- if (zone && kmem_cache_destroy(zone))
- BUG();
+ if (zone)
+ kmem_cache_destroy(zone);
}
extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast);
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index 3d4f6dff211..41cfcba7ce4 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -370,7 +370,7 @@ xfs_file_readdir(
/* Try fairly hard to get memory */
do {
- if ((read_buf = (caddr_t)kmalloc(rlen, GFP_KERNEL)))
+ if ((read_buf = kmalloc(rlen, GFP_KERNEL)))
break;
rlen >>= 1;
} while (rlen >= 1024);
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index d9180020de6..22e3b714f62 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -553,13 +553,13 @@ xfs_vn_follow_link(
ASSERT(dentry);
ASSERT(nd);
- link = (char *)kmalloc(MAXPATHLEN+1, GFP_KERNEL);
+ link = kmalloc(MAXPATHLEN+1, GFP_KERNEL);
if (!link) {
nd_set_link(nd, ERR_PTR(-ENOMEM));
return NULL;
}
- uio = (uio_t *)kmalloc(sizeof(uio_t), GFP_KERNEL);
+ uio = kmalloc(sizeof(uio_t), GFP_KERNEL);
if (!uio) {
kfree(link);
nd_set_link(nd, ERR_PTR(-ENOMEM));
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 4754f342a5d..9df9ed37d21 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -171,7 +171,6 @@ xfs_revalidate_inode(
break;
}
- inode->i_blksize = xfs_preferred_iosize(mp);
inode->i_generation = ip->i_d.di_gen;
i_size_write(inode, ip->i_d.di_size);
inode->i_blocks =
diff --git a/fs/xfs/linux-2.6/xfs_vnode.c b/fs/xfs/linux-2.6/xfs_vnode.c
index 6628d96b6fd..553fa731ade 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.c
+++ b/fs/xfs/linux-2.6/xfs_vnode.c
@@ -122,7 +122,6 @@ vn_revalidate_core(
inode->i_blocks = vap->va_nblocks;
inode->i_mtime = vap->va_mtime;
inode->i_ctime = vap->va_ctime;
- inode->i_blksize = vap->va_blocksize;
if (vap->va_xflags & XFS_XFLAG_IMMUTABLE)
inode->i_flags |= S_IMMUTABLE;
else