From c6d5f5fa658f2569a7baaff5acda261a1316cee9 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 16 Feb 2011 09:34:26 +0100 Subject: hfsplus: lift the 2TB size limit Replace the hardcoded 2TB limit with a dynamic limit based on the block size now that we have fixed the few overflows preventing operation with large volumes. Signed-off-by: Christoph Hellwig --- fs/hfsplus/super.c | 9 +++++++++ fs/hfsplus/wrapper.c | 4 ---- 2 files changed, 9 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index 84a47b709f5..acaef57e360 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c @@ -393,6 +393,13 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) if (!sbi->rsrc_clump_blocks) sbi->rsrc_clump_blocks = 1; + err = generic_check_addressable(sbi->alloc_blksz_shift, + sbi->total_blocks); + if (err) { + printk(KERN_ERR "hfs: filesystem size too large.\n"); + goto out_free_vhdr; + } + /* Set up operations so we can load metadata */ sb->s_op = &hfsplus_sops; sb->s_maxbytes = MAX_LFS_FILESIZE; @@ -417,6 +424,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) sb->s_flags |= MS_RDONLY; } + err = -EINVAL; + /* Load metadata objects (B*Trees) */ sbi->ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID); if (!sbi->ext_tree) { diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c index 4b86468125c..2f933e83f5c 100644 --- a/fs/hfsplus/wrapper.c +++ b/fs/hfsplus/wrapper.c @@ -141,10 +141,6 @@ int hfsplus_read_wrapper(struct super_block *sb) if (hfsplus_get_last_session(sb, &part_start, &part_size)) goto out; - if ((u64)part_start + part_size > 0x100000000ULL) { - pr_err("hfs: volumes larger than 2TB are not supported yet\n"); - goto out; - } error = -ENOMEM; sbi->s_vhdr = kmalloc(HFSPLUS_SECTOR_SIZE, GFP_KERNEL); -- cgit v1.2.3