aboutsummaryrefslogtreecommitdiff
path: root/drivers/mtd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/Kconfig2
-rw-r--r--drivers/mtd/Makefile2
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c12
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c8
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c14
-rw-r--r--drivers/mtd/chips/fwh_lock.h4
-rw-r--r--drivers/mtd/devices/block2mtd.c4
-rw-r--r--drivers/mtd/devices/lart.c6
-rw-r--r--drivers/mtd/devices/m25p80.c53
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c52
-rw-r--r--drivers/mtd/ftl.c100
-rw-r--r--drivers/mtd/inftlcore.c2
-rw-r--r--drivers/mtd/inftlmount.c4
-rw-r--r--drivers/mtd/lpddr/Kconfig22
-rw-r--r--drivers/mtd/lpddr/Makefile6
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c796
-rw-r--r--drivers/mtd/lpddr/qinfo_probe.c255
-rw-r--r--drivers/mtd/maps/Kconfig21
-rw-r--r--drivers/mtd/maps/alchemy-flash.c2
-rw-r--r--drivers/mtd/maps/amd76xrom.c4
-rw-r--r--drivers/mtd/maps/cdb89712.c13
-rw-r--r--drivers/mtd/maps/cfi_flagadm.c2
-rw-r--r--drivers/mtd/maps/ck804xrom.c4
-rw-r--r--drivers/mtd/maps/dbox2-flash.c2
-rw-r--r--drivers/mtd/maps/dc21285.c7
-rw-r--r--drivers/mtd/maps/edb7312.c2
-rw-r--r--drivers/mtd/maps/esb2rom.c4
-rw-r--r--drivers/mtd/maps/fortunet.c2
-rw-r--r--drivers/mtd/maps/h720x-flash.c8
-rw-r--r--drivers/mtd/maps/ichxrom.c4
-rw-r--r--drivers/mtd/maps/impa7.c2
-rw-r--r--drivers/mtd/maps/integrator-flash.c2
-rw-r--r--drivers/mtd/maps/ipaq-flash.c2
-rw-r--r--drivers/mtd/maps/ixp2000.c6
-rw-r--r--drivers/mtd/maps/ixp4xx.c4
-rw-r--r--drivers/mtd/maps/mbx860.c2
-rw-r--r--drivers/mtd/maps/nettel.c5
-rw-r--r--drivers/mtd/maps/octagon-5066.c2
-rw-r--r--drivers/mtd/maps/omap_nor.c2
-rw-r--r--drivers/mtd/maps/physmap.c47
-rw-r--r--drivers/mtd/maps/physmap_of.c4
-rw-r--r--drivers/mtd/maps/pmcmsp-flash.c2
-rw-r--r--drivers/mtd/maps/redwood.c2
-rw-r--r--drivers/mtd/maps/rpxlite.c2
-rw-r--r--drivers/mtd/maps/sbc8240.c2
-rw-r--r--drivers/mtd/maps/scb2_flash.c8
-rw-r--r--drivers/mtd/maps/sharpsl-flash.c2
-rw-r--r--drivers/mtd/maps/tqm8xxl.c2
-rw-r--r--drivers/mtd/maps/uclinux.c4
-rw-r--r--drivers/mtd/maps/vmax301.c2
-rw-r--r--drivers/mtd/maps/wr_sbc82xx_flash.c2
-rw-r--r--drivers/mtd/mtd_blkdevs.c24
-rw-r--r--drivers/mtd/mtdchar.c16
-rw-r--r--drivers/mtd/mtdconcat.c37
-rw-r--r--drivers/mtd/mtdcore.c16
-rw-r--r--drivers/mtd/mtdoops.c9
-rw-r--r--drivers/mtd/mtdpart.c34
-rw-r--r--drivers/mtd/nand/Kconfig2
-rw-r--r--drivers/mtd/nand/alauda.c6
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c4
-rw-r--r--drivers/mtd/nand/fsl_upm.c2
-rw-r--r--drivers/mtd/nand/nand_base.c25
-rw-r--r--drivers/mtd/nand/nand_bbt.c31
-rw-r--r--drivers/mtd/nand/nandsim.c339
-rw-r--r--drivers/mtd/nand/plat_nand.c2
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c10
-rw-r--r--drivers/mtd/nand/s3c2410.c8
-rw-r--r--drivers/mtd/nand/sharpsl.c247
-rw-r--r--drivers/mtd/nand/tmio_nand.c2
-rw-r--r--drivers/mtd/nftlcore.c2
-rw-r--r--drivers/mtd/nftlmount.c4
-rw-r--r--drivers/mtd/onenand/generic.c2
-rw-r--r--drivers/mtd/onenand/omap2.c12
-rw-r--r--drivers/mtd/onenand/onenand_base.c8
-rw-r--r--drivers/mtd/rfd_ftl.c29
-rw-r--r--drivers/mtd/ssfdc.c7
-rw-r--r--drivers/mtd/ubi/build.c9
-rw-r--r--drivers/mtd/ubi/cdev.c3
-rw-r--r--drivers/mtd/ubi/debug.h10
-rw-r--r--drivers/mtd/ubi/eba.c53
-rw-r--r--drivers/mtd/ubi/gluebi.c17
-rw-r--r--drivers/mtd/ubi/io.c28
-rw-r--r--drivers/mtd/ubi/scan.c2
-rw-r--r--drivers/mtd/ubi/ubi.h45
-rw-r--r--drivers/mtd/ubi/vmt.c4
-rw-r--r--drivers/mtd/ubi/wl.c492
86 files changed, 2209 insertions, 855 deletions
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 8581ed9eafe..7d04fb9ddca 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -324,6 +324,8 @@ source "drivers/mtd/nand/Kconfig"
source "drivers/mtd/onenand/Kconfig"
+source "drivers/mtd/lpddr/Kconfig"
+
source "drivers/mtd/ubi/Kconfig"
endif # MTD
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 40d304d6191..4521b1ecce4 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -29,6 +29,6 @@ obj-$(CONFIG_MTD_OOPS) += mtdoops.o
nftl-objs := nftlcore.o nftlmount.o
inftl-objs := inftlcore.o inftlmount.o
-obj-y += chips/ maps/ devices/ nand/ onenand/ tests/
+obj-y += chips/ lpddr/ maps/ devices/ nand/ onenand/ tests/
obj-$(CONFIG_MTD_UBI) += ubi/
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index c93a8be5d5f..f5ab6fa1057 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -58,8 +58,8 @@ static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t
static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
static void cfi_intelext_sync (struct mtd_info *);
-static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
-static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
+static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
#ifdef CONFIG_MTD_OTP
static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
@@ -558,8 +558,8 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
}
for (i=0; i<mtd->numeraseregions;i++){
- printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
- i,mtd->eraseregions[i].offset,
+ printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
+ i,(unsigned long long)mtd->eraseregions[i].offset,
mtd->eraseregions[i].erasesize,
mtd->eraseregions[i].numblocks);
}
@@ -2058,7 +2058,7 @@ out: put_chip(map, chip, adr);
return ret;
}
-static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
+static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
int ret;
@@ -2082,7 +2082,7 @@ static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
return ret;
}
-static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
+static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
int ret;
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index d74ec46aa03..f9c435a4267 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -71,8 +71,8 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
#include "fwh_lock.h"
-static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
-static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
+static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
static struct mtd_chip_driver cfi_amdstd_chipdrv = {
.probe = NULL, /* Not usable directly */
@@ -1774,12 +1774,12 @@ out_unlock:
return ret;
}
-static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
+static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
}
-static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
+static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
}
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index d4714dd9f7a..6c740f346f9 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -42,8 +42,8 @@ static int cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen);
static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *);
static void cfi_staa_sync (struct mtd_info *);
-static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
-static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
+static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
static int cfi_staa_suspend (struct mtd_info *);
static void cfi_staa_resume (struct mtd_info *);
@@ -221,8 +221,8 @@ static struct mtd_info *cfi_staa_setup(struct map_info *map)
}
for (i=0; i<mtd->numeraseregions;i++){
- printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
- i,mtd->eraseregions[i].offset,
+ printk(KERN_DEBUG "%d: offset=0x%llx,size=0x%x,blocks=%d\n",
+ i, (unsigned long long)mtd->eraseregions[i].offset,
mtd->eraseregions[i].erasesize,
mtd->eraseregions[i].numblocks);
}
@@ -964,7 +964,7 @@ static int cfi_staa_erase_varsize(struct mtd_info *mtd,
adr += regions[i].erasesize;
len -= regions[i].erasesize;
- if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
+ if (adr % (1<< cfi->chipshift) == (((unsigned long)regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
i++;
if (adr >> cfi->chipshift) {
@@ -1135,7 +1135,7 @@ retry:
spin_unlock_bh(chip->mutex);
return 0;
}
-static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
+static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
@@ -1284,7 +1284,7 @@ retry:
spin_unlock_bh(chip->mutex);
return 0;
}
-static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
+static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h
index ab44f2b996f..57e0e4e921f 100644
--- a/drivers/mtd/chips/fwh_lock.h
+++ b/drivers/mtd/chips/fwh_lock.h
@@ -77,7 +77,7 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
}
-static int fwh_lock_varsize(struct mtd_info *mtd, loff_t ofs, size_t len)
+static int fwh_lock_varsize(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
int ret;
@@ -88,7 +88,7 @@ static int fwh_lock_varsize(struct mtd_info *mtd, loff_t ofs, size_t len)
}
-static int fwh_unlock_varsize(struct mtd_info *mtd, loff_t ofs, size_t len)
+static int fwh_unlock_varsize(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
int ret;
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 91fbba76763..8c295f40d2a 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -224,7 +224,7 @@ static void block2mtd_free_device(struct block2mtd_dev *dev)
if (dev->blkdev) {
invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping,
0, -1);
- close_bdev_excl(dev->blkdev);
+ close_bdev_exclusive(dev->blkdev, FMODE_READ|FMODE_WRITE);
}
kfree(dev);
@@ -246,7 +246,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
return NULL;
/* Get a handle on the device */
- bdev = open_bdev_excl(devname, O_RDWR, NULL);
+ bdev = open_bdev_exclusive(devname, FMODE_READ|FMODE_WRITE, NULL);
#ifndef MODULE
if (IS_ERR(bdev)) {
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index f4bda4cee49..578de1c67bf 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -619,7 +619,7 @@ static struct mtd_partition lart_partitions[] = {
};
#endif
-int __init lart_flash_init (void)
+static int __init lart_flash_init (void)
{
int result;
memset (&mtd,0,sizeof (mtd));
@@ -690,7 +690,7 @@ int __init lart_flash_init (void)
return (result);
}
-void __exit lart_flash_exit (void)
+static void __exit lart_flash_exit (void)
{
#ifndef HAVE_PARTITIONS
del_mtd_device (&mtd);
@@ -705,5 +705,3 @@ module_exit (lart_flash_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Abraham vd Merwe <abraham@2d3d.co.za>");
MODULE_DESCRIPTION("MTD driver for Intel 28F160F3 on LART board");
-
-
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 6659b2275c0..7c3fc766dcf 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -20,6 +20,7 @@
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
+#include <linux/math64.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
@@ -169,9 +170,9 @@ static int wait_till_ready(struct m25p *flash)
*/
static int erase_chip(struct m25p *flash)
{
- DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB\n",
- flash->spi->dev.bus_id, __func__,
- flash->mtd.size / 1024);
+ DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %lldKiB\n",
+ dev_name(&flash->spi->dev), __func__,
+ (long long)(flash->mtd.size >> 10));
/* Wait until finished previous write command. */
if (wait_till_ready(flash))
@@ -197,7 +198,7 @@ static int erase_chip(struct m25p *flash)
static int erase_sector(struct m25p *flash, u32 offset)
{
DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB at 0x%08x\n",
- flash->spi->dev.bus_id, __func__,
+ dev_name(&flash->spi->dev), __func__,
flash->mtd.erasesize / 1024, offset);
/* Wait until finished previous write command. */
@@ -232,18 +233,18 @@ static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct m25p *flash = mtd_to_m25p(mtd);
u32 addr,len;
+ uint32_t rem;
- DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %d\n",
- flash->spi->dev.bus_id, __func__, "at",
- (u32)instr->addr, instr->len);
+ DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%llx, len %lld\n",
+ dev_name(&flash->spi->dev), __func__, "at",
+ (long long)instr->addr, (long long)instr->len);
/* sanity checks */
if (instr->addr + instr->len > flash->mtd.size)
return -EINVAL;
- if ((instr->addr % mtd->erasesize) != 0
- || (instr->len % mtd->erasesize) != 0) {
+ div_u64_rem(instr->len, mtd->erasesize, &rem);
+ if (rem)
return -EINVAL;
- }
addr = instr->addr;
len = instr->len;
@@ -295,7 +296,7 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
struct spi_message m;
DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
- flash->spi->dev.bus_id, __func__, "from",
+ dev_name(&flash->spi->dev), __func__, "from",
(u32)from, len);
/* sanity checks */
@@ -367,7 +368,7 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
struct spi_message m;
DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
- flash->spi->dev.bus_id, __func__, "to",
+ dev_name(&flash->spi->dev), __func__, "to",
(u32)to, len);
if (retlen)
@@ -563,7 +564,7 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
tmp = spi_write_then_read(spi, &code, 1, id, 5);
if (tmp < 0) {
DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n",
- spi->dev.bus_id, tmp);
+ dev_name(&spi->dev), tmp);
return NULL;
}
jedec = id[0];
@@ -617,7 +618,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
/* unrecognized chip? */
if (i == ARRAY_SIZE(m25p_data)) {
DEBUG(MTD_DEBUG_LEVEL0, "%s: unrecognized id %s\n",
- spi->dev.bus_id, data->type);
+ dev_name(&spi->dev), data->type);
info = NULL;
/* recognized; is that chip really what's there? */
@@ -658,7 +659,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
if (data && data->name)
flash->mtd.name = data->name;
else
- flash->mtd.name = spi->dev.bus_id;
+ flash->mtd.name = dev_name(&spi->dev);
flash->mtd.type = MTD_NORFLASH;
flash->mtd.writesize = 1;
@@ -677,24 +678,24 @@ static int __devinit m25p_probe(struct spi_device *spi)
flash->mtd.erasesize = info->sector_size;
}
- dev_info(&spi->dev, "%s (%d Kbytes)\n", info->name,
- flash->mtd.size / 1024);
+ dev_info(&spi->dev, "%s (%lld Kbytes)\n", info->name,
+ (long long)flash->mtd.size >> 10);
DEBUG(MTD_DEBUG_LEVEL2,
- "mtd .name = %s, .size = 0x%.8x (%uMiB) "
+ "mtd .name = %s, .size = 0x%llx (%lldMiB) "
".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
flash->mtd.name,
- flash->mtd.size, flash->mtd.size / (1024*1024),
+ (long long)flash->mtd.size, (long long)(flash->mtd.size >> 20),
flash->mtd.erasesize, flash->mtd.erasesize / 1024,
flash->mtd.numeraseregions);
if (flash->mtd.numeraseregions)
for (i = 0; i < flash->mtd.numeraseregions; i++)
DEBUG(MTD_DEBUG_LEVEL2,
- "mtd.eraseregions[%d] = { .offset = 0x%.8x, "
+ "mtd.eraseregions[%d] = { .offset = 0x%llx, "
".erasesize = 0x%.8x (%uKiB), "
".numblocks = %d }\n",
- i, flash->mtd.eraseregions[i].offset,
+ i, (long long)flash->mtd.eraseregions[i].offset,
flash->mtd.eraseregions[i].erasesize,
flash->mtd.eraseregions[i].erasesize / 1024,
flash->mtd.eraseregions[i].numblocks);
@@ -722,12 +723,12 @@ static int __devinit m25p_probe(struct spi_device *spi)
if (nr_parts > 0) {
for (i = 0; i < nr_parts; i++) {
DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = "
- "{.name = %s, .offset = 0x%.8x, "
- ".size = 0x%.8x (%uKiB) }\n",
+ "{.name = %s, .offset = 0x%llx, "
+ ".size = 0x%llx (%lldKiB) }\n",
i, parts[i].name,
- parts[i].offset,
- parts[i].size,
- parts[i].size / 1024);
+ (long long)parts[i].offset,
+ (long long)parts[i].size,
+ (long long)(parts[i].size >> 10));
}
flash->partitioned = 1;
return add_mtd_partitions(&flash->mtd, parts, nr_parts);
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 6dd9aff8bb2..d44f741ae22 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -16,6 +16,7 @@
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/err.h>
+#include <linux/math64.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
@@ -128,7 +129,7 @@ static int dataflash_waitready(struct spi_device *spi)
status = dataflash_status(spi);
if (status < 0) {
DEBUG(MTD_DEBUG_LEVEL1, "%s: status %d?\n",
- spi->dev.bus_id, status);
+ dev_name(&spi->dev), status);
status = 0;
}
@@ -152,15 +153,20 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
struct spi_message msg;
unsigned blocksize = priv->page_size << 3;
uint8_t *command;
+ uint32_t rem;
- DEBUG(MTD_DEBUG_LEVEL2, "%s: erase addr=0x%x len 0x%x\n",
- spi->dev.bus_id,
- instr->addr, instr->len);
+ DEBUG(MTD_DEBUG_LEVEL2, "%s: erase addr=0x%llx len 0x%llx\n",
+ dev_name(&spi->dev), (long long)instr->addr,
+ (long long)instr->len);
/* Sanity checks */
- if ((instr->addr + instr->len) > mtd->size
- || (instr->len % priv->page_size) != 0
- || (instr->addr % priv->page_size) != 0)
+ if (instr->addr + instr->len > mtd->size)
+ return -EINVAL;
+ div_u64_rem(instr->len, priv->page_size, &rem);
+ if (rem)
+ return -EINVAL;
+ div_u64_rem(instr->addr, priv->page_size, &rem);
+ if (rem)
return -EINVAL;
spi_message_init(&msg);
@@ -178,7 +184,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
/* Calculate flash page address; use block erase (for speed) if
* we're at a block boundary and need to erase the whole block.
*/
- pageaddr = instr->addr / priv->page_size;
+ pageaddr = div_u64(instr->len, priv->page_size);
do_block = (pageaddr & 0x7) == 0 && instr->len >= blocksize;
pageaddr = pageaddr << priv->page_offset;
@@ -197,7 +203,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
if (status < 0) {
printk(KERN_ERR "%s: erase %x, err %d\n",
- spi->dev.bus_id, pageaddr, status);
+ dev_name(&spi->dev), pageaddr, status);
/* REVISIT: can retry instr->retries times; or
* giveup and instr->fail_addr = instr->addr;
*/
@@ -239,7 +245,7 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
int status;
DEBUG(MTD_DEBUG_LEVEL2, "%s: read 0x%x..0x%x\n",
- priv->spi->dev.bus_id, (unsigned)from, (unsigned)(from + len));
+ dev_name(&priv->spi->dev), (unsigned)from, (unsigned)(from + len));
*retlen = 0;
@@ -288,7 +294,7 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
status = 0;
} else
DEBUG(MTD_DEBUG_LEVEL1, "%s: read %x..%x --> %d\n",
- priv->spi->dev.bus_id,
+ dev_name(&priv->spi->dev),
(unsigned)from, (unsigned)(from + len),
status);
return status;
@@ -315,7 +321,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
uint8_t *command;
DEBUG(MTD_DEBUG_LEVEL2, "%s: write 0x%x..0x%x\n",
- spi->dev.bus_id, (unsigned)to, (unsigned)(to + len));
+ dev_name(&spi->dev), (unsigned)to, (unsigned)(to + len));
*retlen = 0;
@@ -374,7 +380,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
status = spi_sync(spi, &msg);
if (status < 0)
DEBUG(MTD_DEBUG_LEVEL1, "%s: xfer %u -> %d \n",
- spi->dev.bus_id, addr, status);
+ dev_name(&spi->dev), addr, status);
(void) dataflash_waitready(priv->spi);
}
@@ -396,7 +402,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
spi_transfer_del(x + 1);
if (status < 0)
DEBUG(MTD_DEBUG_LEVEL1, "%s: pgm %u/%u -> %d \n",
- spi->dev.bus_id, addr, writelen, status);
+ dev_name(&spi->dev), addr, writelen, status);
(void) dataflash_waitready(priv->spi);
@@ -416,14 +422,14 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
status = spi_sync(spi, &msg);
if (status < 0)
DEBUG(MTD_DEBUG_LEVEL1, "%s: compare %u -> %d \n",
- spi->dev.bus_id, addr, status);
+ dev_name(&spi->dev), addr, status);
status = dataflash_waitready(priv->spi);
/* Check result of the compare operation */
if (status & (1 << 6)) {
printk(KERN_ERR "%s: compare page %u, err %d\n",
- spi->dev.bus_id, pageaddr, status);
+ dev_name(&spi->dev), pageaddr, status);
remaining = 0;
status = -EIO;
break;
@@ -667,8 +673,8 @@ add_dataflash_otp(struct spi_device *spi, char *name,
if (revision >= 'c')
otp_tag = otp_setup(device, revision);
- dev_info(&spi->dev, "%s (%d KBytes) pagesize %d bytes%s\n",
- name, DIV_ROUND_UP(device->size, 1024),
+ dev_info(&spi->dev, "%s (%lld KBytes) pagesize %d bytes%s\n",
+ name, (long long)((device->size + 1023) >> 10),
pagesize, otp_tag);
dev_set_drvdata(&spi->dev, priv);
@@ -779,7 +785,7 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
tmp = spi_write_then_read(spi, &code, 1, id, 3);
if (tmp < 0) {
DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n",
- spi->dev.bus_id, tmp);
+ dev_name(&spi->dev), tmp);
return ERR_PTR(tmp);
}
if (id[0] != 0x1f)
@@ -869,7 +875,7 @@ static int __devinit dataflash_probe(struct spi_device *spi)
status = dataflash_status(spi);
if (status <= 0 || status == 0xff) {
DEBUG(MTD_DEBUG_LEVEL1, "%s: status error %d\n",
- spi->dev.bus_id, status);
+ dev_name(&spi->dev), status);
if (status == 0 || status == 0xff)
status = -ENODEV;
return status;
@@ -905,13 +911,13 @@ static int __devinit dataflash_probe(struct spi_device *spi)
/* obsolete AT45DB1282 not (yet?) supported */
default:
DEBUG(MTD_DEBUG_LEVEL1, "%s: unsupported device (%x)\n",
- spi->dev.bus_id, status & 0x3c);
+ dev_name(&spi->dev), status & 0x3c);
status = -ENODEV;
}
if (status < 0)
DEBUG(MTD_DEBUG_LEVEL1, "%s: add_dataflash --> %d\n",
- spi->dev.bus_id, status);
+ dev_name(&spi->dev), status);
return status;
}
@@ -921,7 +927,7 @@ static int __devexit dataflash_remove(struct spi_device *spi)
struct dataflash *flash = dev_get_drvdata(&spi->dev);
int status;
- DEBUG(MTD_DEBUG_LEVEL1, "%s: remove\n", spi->dev.bus_id);
+ DEBUG(MTD_DEBUG_LEVEL1, "%s: remove\n", dev_name(&spi->dev));
if (mtd_has_partitions() && flash->partitioned)
status = del_mtd_partitions(&flash->mtd);
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index 9bf581c4f74..a790c062af1 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -109,25 +109,25 @@ module_param(shuffle_freq, int, 0);
/* Each memory region corresponds to a minor device */
typedef struct partition_t {
struct mtd_blktrans_dev mbd;
- u_int32_t state;
- u_int32_t *VirtualBlockMap;
- u_int32_t *VirtualPageMap;
- u_int32_t FreeTotal;
+ uint32_t state;
+ uint32_t *VirtualBlockMap;
+ uint32_t *VirtualPageMap;
+ uint32_t FreeTotal;
struct eun_info_t {
- u_int32_t Offset;
- u_int32_t EraseCount;
- u_int32_t Free;
- u_int32_t Deleted;
+ uint32_t Offset;
+ uint32_t EraseCount;
+ uint32_t Free;
+ uint32_t Deleted;
} *EUNInfo;
struct xfer_info_t {
- u_int32_t Offset;
- u_int32_t EraseCount;
- u_int16_t state;
+ uint32_t Offset;
+ uint32_t EraseCount;
+ uint16_t state;
} *XferInfo;
- u_int16_t bam_index;
- u_int32_t *bam_cache;
- u_int16_t DataUnits;
- u_int32_t BlocksPerUnit;
+ uint16_t bam_index;
+ uint32_t *bam_cache;
+ uint16_t DataUnits;
+ uint32_t BlocksPerUnit;
erase_unit_header_t header;
} partition_t;
@@ -199,8 +199,8 @@ static int scan_header(partition_t *part)
static int build_maps(partition_t *part)
{
erase_unit_header_t header;
- u_int16_t xvalid, xtrans, i;
- u_int blocks, j;
+ uint16_t xvalid, xtrans, i;
+ unsigned blocks, j;
int hdr_ok, ret = -1;
ssize_t retval;
loff_t offset;
@@ -269,14 +269,14 @@ static int build_maps(partition_t *part)
/* Set up virtual page map */
blocks = le32_to_cpu(header.FormattedSize) >> header.BlockSize;
- part->VirtualBlockMap = vmalloc(blocks * sizeof(u_int32_t));
+ part->VirtualBlockMap = vmalloc(blocks * sizeof(uint32_t));
if (!part->VirtualBlockMap)
goto out_XferInfo;
- memset(part->VirtualBlockMap, 0xff, blocks * sizeof(u_int32_t));
+ memset(part->VirtualBlockMap, 0xff, blocks * sizeof(uint32_t));
part->BlocksPerUnit = (1 << header.EraseUnitSize) >> header.BlockSize;
- part->bam_cache = kmalloc(part->BlocksPerUnit * sizeof(u_int32_t),
+ part->bam_cache = kmalloc(part->BlocksPerUnit * sizeof(uint32_t),
GFP_KERNEL);
if (!part->bam_cache)
goto out_VirtualBlockMap;
@@ -290,7 +290,7 @@ static int build_maps(partition_t *part)
offset = part->EUNInfo[i].Offset + le32_to_cpu(header.BAMOffset);
ret = part->mbd.mtd->read(part->mbd.mtd, offset,
- part->BlocksPerUnit * sizeof(u_int32_t), &retval,
+ part->BlocksPerUnit * sizeof(uint32_t), &retval,
(unsigned char *)part->bam_cache);
if (ret)
@@ -332,7 +332,7 @@ out:
======================================================================*/
static int erase_xfer(partition_t *part,
- u_int16_t xfernum)
+ uint16_t xfernum)
{
int ret;
struct xfer_info_t *xfer;
@@ -408,7 +408,7 @@ static int prepare_xfer(partition_t *part, int i)
erase_unit_header_t header;
struct xfer_info_t *xfer;
int nbam, ret;
- u_int32_t ctl;
+ uint32_t ctl;
ssize_t retlen;
loff_t offset;
@@ -430,15 +430,15 @@ static int prepare_xfer(partition_t *part, int i)
}
/* Write the BAM stub */
- nbam = (part->BlocksPerUnit * sizeof(u_int32_t) +
+ nbam = (part->BlocksPerUnit * sizeof(uint32_t) +
le32_to_cpu(part->header.BAMOffset) + SECTOR_SIZE - 1) / SECTOR_SIZE;
offset = xfer->Offset + le32_to_cpu(part->header.BAMOffset);
ctl = cpu_to_le32(BLOCK_CONTROL);
- for (i = 0; i < nbam; i++, offset += sizeof(u_int32_t)) {
+ for (i = 0; i < nbam; i++, offset += sizeof(uint32_t)) {
- ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(u_int32_t),
+ ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint32_t),
&retlen, (u_char *)&ctl);
if (ret)
@@ -461,18 +461,18 @@ static int prepare_xfer(partition_t *part, int i)
======================================================================*/
-static int copy_erase_unit(partition_t *part, u_int16_t srcunit,
- u_int16_t xferunit)
+static int copy_erase_unit(partition_t *part, uint16_t srcunit,
+ uint16_t xferunit)
{
u_char buf[SECTOR_SIZE];
struct eun_info_t *eun;
struct xfer_info_t *xfer;
- u_int32_t src, dest, free, i;
- u_int16_t unit;
+ uint32_t src, dest, free, i;
+ uint16_t unit;
int ret;
ssize_t retlen;
loff_t offset;
- u_int16_t srcunitswap = cpu_to_le16(srcunit);
+ uint16_t srcunitswap = cpu_to_le16(srcunit);
eun = &part->EUNInfo[srcunit];
xfer = &part->XferInfo[xferunit];
@@ -486,7 +486,7 @@ static int copy_erase_unit(partition_t *part, u_int16_t srcunit,
offset = eun->Offset + le32_to_cpu(part->header.BAMOffset);
ret = part->mbd.mtd->read(part->mbd.mtd, offset,
- part->BlocksPerUnit * sizeof(u_int32_t),
+ part->BlocksPerUnit * sizeof(uint32_t),
&retlen, (u_char *) (part->bam_cache));
/* mark the cache bad, in case we get an error later */
@@ -503,7 +503,7 @@ static int copy_erase_unit(partition_t *part, u_int16_t srcunit,
offset = xfer->Offset + 20; /* Bad! */
unit = cpu_to_le16(0x7fff);
- ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(u_int16_t),
+ ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint16_t),
&retlen, (u_char *) &unit);
if (ret) {
@@ -560,7 +560,7 @@ static int copy_erase_unit(partition_t *part, u_int16_t srcunit,
/* All clear? Then update the LogicalEUN again */
- ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + 20, sizeof(u_int16_t),
+ ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + 20, sizeof(uint16_t),
&retlen, (u_char *)&srcunitswap);
if (ret) {
@@ -605,8 +605,8 @@ static int copy_erase_unit(partition_t *part, u_int16_t srcunit,
static int reclaim_block(partition_t *part)
{
- u_int16_t i, eun, xfer;
- u_int32_t best;
+ uint16_t i, eun, xfer;
+ uint32_t best;
int queued, ret;
DEBUG(0, "ftl_cs: reclaiming space...\n");
@@ -723,10 +723,10 @@ static void dump_lists(partition_t *part)
}
#endif
-static u_int32_t find_free(partition_t *part)
+static uint32_t find_free(partition_t *part)
{
- u_int16_t stop, eun;
- u_int32_t blk;
+ uint16_t stop, eun;
+ uint32_t blk;
size_t retlen;
int ret;
@@ -749,7 +749,7 @@ static u_int32_t find_free(partition_t *part)
ret = part->mbd.mtd->read(part->mbd.mtd,
part->EUNInfo[eun].Offset + le32_to_cpu(part->header.BAMOffset),
- part->BlocksPerUnit * sizeof(u_int32_t),
+ part->BlocksPerUnit * sizeof(uint32_t),
&retlen, (u_char *) (part->bam_cache));
if (ret) {
@@ -786,7 +786,7 @@ static u_int32_t find_free(partition_t *part)
static int ftl_read(partition_t *part, caddr_t buffer,
u_long sector, u_long nblocks)
{
- u_int32_t log_addr, bsize;
+ uint32_t log_addr, bsize;
u_long i;
int ret;
size_t offset, retlen;
@@ -829,14 +829,14 @@ static int ftl_read(partition_t *part, caddr_t buffer,
======================================================================*/
-static int set_bam_entry(partition_t *part, u_int32_t log_addr,
- u_int32_t virt_addr)
+static int set_bam_entry(partition_t *part, uint32_t log_addr,
+ uint32_t virt_addr)
{
- u_int32_t bsize, blk, le_virt_addr;
+ uint32_t bsize, blk, le_virt_addr;
#ifdef PSYCHO_DEBUG
- u_int32_t old_addr;
+ uint32_t old_addr;
#endif
- u_int16_t eun;
+ uint16_t eun;
int ret;
size_t retlen, offset;
@@ -845,11 +845,11 @@ static int set_bam_entry(partition_t *part, u_int32_t log_addr,
bsize = 1 << part->header.EraseUnitSize;
eun = log_addr / bsize;
blk = (log_addr % bsize) / SECTOR_SIZE;
- offset = (part->EUNInfo[eun].Offset + blk * sizeof(u_int32_t) +
+ offset = (part->EUNInfo[eun].Offset + blk * sizeof(uint32_t) +
le32_to_cpu(part->header.BAMOffset));
#ifdef PSYCHO_DEBUG
- ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(u_int32_t),
+ ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(uint32_t),
&retlen, (u_char *)&old_addr);
if (ret) {
printk(KERN_WARNING"ftl: Error reading old_addr in set_bam_entry: %d\n",ret);
@@ -886,7 +886,7 @@ static int set_bam_entry(partition_t *part, u_int32_t log_addr,
#endif
part->bam_cache[blk] = le_virt_addr;
}
- ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(u_int32_t),
+ ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint32_t),
&retlen, (u_char *)&le_virt_addr);
if (ret) {
@@ -900,7 +900,7 @@ static int set_bam_entry(partition_t *part, u_int32_t log_addr,
static int ftl_write(partition_t *part, caddr_t buffer,
u_long sector, u_long nblocks)
{
- u_int32_t bsize, log_addr, virt_addr, old_addr, blk;
+ uint32_t bsize, log_addr, virt_addr, old_addr, blk;
u_long i;
int ret;
size_t retlen, offset;
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index 50ce13887f6..73f05227dc8 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -50,7 +50,7 @@ static void inftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
struct INFTLrecord *inftl;
unsigned long temp;
- if (mtd->type != MTD_NANDFLASH)
+ if (mtd->type != MTD_NANDFLASH || mtd->size > UINT_MAX)
return;
/* OK, this is moderately ugly. But probably safe. Alternatives? */
if (memcmp(mtd->name, "DiskOnChip", 10))
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
index 9113628ed1e..f751dd97c54 100644
--- a/drivers/mtd/inftlmount.c
+++ b/drivers/mtd/inftlmount.c
@@ -63,7 +63,7 @@ static int find_boot_record(struct INFTLrecord *inftl)
* otherwise.
*/
inftl->EraseSize = inftl->mbd.mtd->erasesize;
- inftl->nb_blocks = inftl->mbd.mtd->size / inftl->EraseSize;
+ inftl->nb_blocks = (u32)inftl->mbd.mtd->size / inftl->EraseSize;
inftl->MediaUnit = BLOCK_NIL;
@@ -187,7 +187,7 @@ static int find_boot_record(struct INFTLrecord *inftl)
mh->BlockMultiplierBits);
inftl->EraseSize = inftl->mbd.mtd->erasesize <<
mh->BlockMultiplierBits;
- inftl->nb_blocks = inftl->mbd.mtd->size / inftl->EraseSize;
+ inftl->nb_blocks = (u32)inftl->mbd.mtd->size / inftl->EraseSize;
block >>= mh->BlockMultiplierBits;
}
diff --git a/drivers/mtd/lpddr/Kconfig b/drivers/mtd/lpddr/Kconfig
new file mode 100644
index 00000000000..acd4ea9b227
--- /dev/null
+++ b/drivers/mtd/lpddr/Kconfig
@@ -0,0 +1,22 @@
+# drivers/mtd/chips/Kconfig
+
+menu "LPDDR flash memory drivers"
+ depends on MTD!=n
+
+config MTD_LPDDR
+ tristate "Support for LPDDR flash chips"
+ select MTD_QINFO_PROBE
+ help
+ This option enables support of LPDDR (Low power double data rate)
+ flash chips. Synonymous with Mobile-DDR. It is a new standard for
+ DDR memories, intended for battery-operated systems.
+
+config MTD_QINFO_PROBE
+ tristate "Detect flash chips by QINFO probe"
+ help
+ Device Information for LPDDR chips is offered through the Overlay
+ Window QINFO interface, permits software to be used for entire
+ families of devices. This serves similar purpose of CFI on legacy
+ Flash products
+endmenu
+
diff --git a/drivers/mtd/lpddr/Makefile b/drivers/mtd/lpddr/Makefile
new file mode 100644
index 00000000000..da48e46b581
--- /dev/null
+++ b/drivers/mtd/lpddr/Makefile
@@ -0,0 +1,6 @@
+#
+# linux/drivers/mtd/lpddr/Makefile
+#
+
+obj-$(CONFIG_MTD_QINFO_PROBE) += qinfo_probe.o
+obj-$(CONFIG_MTD_LPDDR) += lpddr_cmds.o
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
new file mode 100644
index 00000000000..e22ca49583e
--- /dev/null
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -0,0 +1,796 @@
+/*
+ * LPDDR flash memory device operations. This module provides read, write,
+ * erase, lock/unlock support for LPDDR flash memories
+ * (C) 2008 Korolev Alexey <akorolev@infradead.org>
+ * (C) 2008 Vasiliy Leonenko <vasiliy.leonenko@gmail.com>
+ * Many thanks to Roman Borisov for intial enabling
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ * TODO:
+ * Implement VPP management
+ * Implement XIP support
+ * Implement OTP support
+ */
+#include <linux/mtd/pfow.h>
+#include <linux/mtd/qinfo.h>
+
+static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
+ size_t *retlen, u_char *buf);
+static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to,
+ size_t len, size_t *retlen, const u_char *buf);
+static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen);
+static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr);
+static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
+ size_t *retlen, void **mtdbuf, resource_size_t *phys);
+static void lpddr_unpoint(struct mtd_info *mtd, loff_t adr, size_t len);
+static int get_chip(struct map_info *map, struct flchip *chip, int mode);
+static int chip_ready(struct map_info *map, struct flchip *chip, int mode);
+static void put_chip(struct map_info *map, struct flchip *chip);
+
+struct mtd_info *lpddr_cmdset(struct map_info *map)
+{
+ struct lpddr_private *lpddr = map->fldrv_priv;
+ struct flchip_shared *shared;
+ struct flchip *chip;
+ struct mtd_info *mtd;
+ int numchips;
+ int i, j;
+
+ mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
+ if (!mtd) {
+ printk(KERN_ERR "Failed to allocate memory for MTD device\n");
+ return NULL;
+ }
+ mtd->priv = map;
+ mtd->type = MTD_NORFLASH;
+
+ /* Fill in the default mtd operations */
+ mtd->read = lpddr_read;
+ mtd->type = MTD_NORFLASH;
+ mtd->flags = MTD_CAP_NORFLASH;
+ mtd->flags &= ~MTD_BIT_WRITEABLE;
+ mtd->erase = lpddr_erase;
+ mtd->write = lpddr_write_buffers;
+ mtd->writev = lpddr_writev;
+ mtd->read_oob = NULL;
+ mtd->write_oob = NULL;
+ mtd->sync = NULL;
+ mtd->lock = lpddr_lock;
+ mtd->unlock = lpddr_unlock;
+ mtd->suspend = NULL;
+ mtd->resume = NULL;
+ if (map_is_linear(map)) {
+ mtd->point = lpddr_point;
+ mtd->unpoint = lpddr_unpoint;
+ }
+ mtd->block_isbad = NULL;
+ mtd->block_markbad = NULL;
+ mtd->size = 1 << lpddr->qinfo->DevSizeShift;
+ mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift;
+ mtd->writesize = 1 << lpddr->qinfo->BufSizeShift;
+
+ shared = kmalloc(sizeof(struct flchip_shared) * lpddr->numchips,
+ GFP_KERNEL);
+ if (!shared) {
+ kfree(lpddr);
+ kfree(mtd);
+ return NULL;
+ }
+
+ chip = &lpddr->chips[0];
+ numchips = lpddr->numchips / lpddr->qinfo->HWPartsNum;
+ for (i = 0; i < numchips; i++) {
+ shared[i].writing = shared[i].erasing = NULL;
+ spin_lock_init(&shared[i].lock);
+ for (j = 0; j < lpddr->qinfo->HWPartsNum; j++) {
+ *chip = lpddr->chips[i];
+ chip->start += j << lpddr->chipshift;
+ chip->oldstate = chip->state = FL_READY;
+ chip->priv = &shared[i];
+ /* those should be reset too since
+ they create memory references. */
+ init_waitqueue_head(&chip->wq);
+ spin_lock_init(&chip->_spinlock);
+ chip->mutex = &chip->_spinlock;
+ chip++;
+ }
+ }
+
+ return mtd;
+}
+EXPORT_SYMBOL(lpddr_cmdset);
+
+static int wait_for_ready(struct map_info *map, struct flchip *chip,
+ unsigned int chip_op_time)
+{
+ unsigned int timeo, reset_timeo, sleep_time;
+ unsigned int dsr;
+ flstate_t chip_state = chip->state;
+ int ret = 0;
+
+ /* set our timeout to 8 times the expected delay */
+ timeo = chip_op_time * 8;
+ if (!timeo)
+ timeo = 500000;
+ reset_timeo = timeo;
+ sleep_time = chip_op_time / 2;
+
+ for (;;) {
+ dsr = CMDVAL(map_read(map, map->pfow_base + PFOW_DSR));
+ if (dsr & DSR_READY_STATUS)
+ break;
+ if (!timeo) {
+ printk(KERN_ERR "%s: Flash timeout error state %d \n",
+ map->name, chip_state);
+ ret = -ETIME;
+ break;
+ }
+
+ /* OK Still waiting. Drop the lock, wait a while and retry. */
+ spin_unlock(chip->mutex);
+ if (sleep_time >= 1000000/HZ) {
+ /*
+ * Half of the normal delay still remaining
+ * can be performed with a sleeping delay instead
+ * of busy waiting.
+ */
+ msleep(sleep_time/1000);
+ timeo -= sleep_time;
+ sleep_time = 1000000/HZ;
+ } else {
+ udelay(1);
+ cond_resched();
+ timeo--;
+ }
+ spin_lock(chip->mutex);
+
+ while (chip->state != chip_state) {
+ /* Someone's suspended the operation: sleep */
+ DECLARE_WAITQUEUE(wait, current);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ spin_unlock(chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ spin_lock(chip->mutex);
+ }
+ if (chip->erase_suspended || chip->write_suspended) {
+ /* Suspend has occured while sleep: reset timeout */
+ timeo = reset_timeo;
+ chip->erase_suspended = chip->write_suspended = 0;
+ }
+ }
+ /* check status for errors */
+ if (dsr & DSR_ERR) {
+ /* Clear DSR*/
+ map_write(map, CMD(~(DSR_ERR)), map->pfow_base + PFOW_DSR);
+ printk(KERN_WARNING"%s: Bad status on wait: 0x%x \n",
+ map->name, dsr);
+ print_drs_error(dsr);
+ ret = -EIO;
+ }
+ chip->state = FL_READY;
+ return ret;
+}
+
+static int get_chip(struct map_info *map, struct flchip *chip, int mode)
+{
+ int ret;
+ DECLARE_WAITQUEUE(wait, current);
+
+ retry:
+ if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING)
+ && chip->state != FL_SYNCING) {
+ /*
+ * OK. We have possibility for contension on the write/erase
+ * operations which are global to the real chip and not per
+ * partition. So let's fight it over in the partition which
+ * currently has authority on the operation.
+ *
+ * The rules are as follows:
+ *
+ * - any write operation must own shared->writing.
+ *
+ * - any erase operation must own _both_ shared->writing and
+ * shared->erasing.
+ *
+ * - contension arbitration is handled in the owner's context.
+ *
+ * The 'shared' struct can be read and/or written only when
+ * its lock is taken.
+ */
+ struct flchip_shared *shared = chip->priv;
+ struct flchip *contender;
+ spin_lock(&shared->lock);
+ contender = shared->writing;
+ if (contender && contender != chip) {
+ /*
+ * The engine to perform desired operation on this
+ * partition is already in use by someone else.
+ * Let's fight over it in the context of the chip
+ * currently using it. If it is possible to suspend,
+ * that other partition will do just that, otherwise
+ * it'll happily send us to sleep. In any case, when
+ * get_chip returns success we're clear to go ahead.
+ */
+ ret = spin_trylock(contender->mutex);
+ spin_unlock(&shared->lock);
+ if (!ret)
+ goto retry;
+ spin_unlock(chip->mutex);
+ ret = chip_ready(map, contender, mode);
+ spin_lock(chip->mutex);
+
+ if (ret == -EAGAIN) {
+ spin_unlock(contender->mutex);
+ goto retry;
+ }
+ if (ret) {
+ spin_unlock(contender->mutex);
+ return ret;
+ }
+ spin_lock(&shared->lock);
+
+ /* We should not own chip if it is already in FL_SYNCING
+ * state. Put contender and retry. */
+ if (chip->state == FL_SYNCING) {
+ put_chip(map, contender);
+ spin_unlock(contender->mutex);
+ goto retry;
+ }
+ spin_unlock(contender->mutex);
+ }
+
+ /* Check if we have suspended erase on this chip.
+ Must sleep in such a case. */
+ if (mode == FL_ERASING && shared->erasing
+ && shared->erasing->oldstate == FL_ERASING) {
+ spin_unlock(&shared->lock);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ spin_unlock(chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ spin_lock(chip->mutex);
+ goto retry;
+ }
+
+ /* We now own it */
+ shared->writing = chip;
+ if (mode == FL_ERASING)
+ shared->erasing = chip;
+ spin_unlock(&shared->lock);
+ }
+
+ ret = chip_ready(map, chip, mode);
+ if (ret == -EAGAIN)
+ goto retry;
+
+ return ret;
+}
+
+static int chip_ready(struct map_info *map, struct flchip *chip, int mode)
+{
+ struct lpddr_private *lpddr = map->fldrv_priv;
+ int ret = 0;
+ DECLARE_WAITQUEUE(wait, current);
+
+ /* Prevent setting state FL_SYNCING for chip in suspended state. */
+ if (FL_SYNCING == mode && FL_READY != chip->oldstate)
+ goto sleep;
+
+ switch (chip->state) {
+ case FL_READY:
+ case FL_JEDEC_QUERY:
+ return 0;
+
+ case FL_ERASING:
+ if (!lpddr->qinfo->SuspEraseSupp ||
+ !(mode == FL_READY || mode == FL_POINT))
+ goto sleep;
+
+ map_write(map, CMD(LPDDR_SUSPEND),
+ map->pfow_base + PFOW_PROGRAM_ERASE_SUSPEND);
+ chip->oldstate = FL_ERASING;
+ chip->state = FL_ERASE_SUSPENDING;
+ ret = wait_for_ready(map, chip, 0);
+ if (ret) {
+ /* Oops. something got wrong. */
+ /* Resume and pretend we weren't here. */
+ map_write(map, CMD(LPDDR_RESUME),
+ map->pfow_base + PFOW_COMMAND_CODE);
+ map_write(map, CMD(LPDDR_START_EXECUTION),
+ map->pfow_base + PFOW_COMMAND_EXECUTE);
+ chip->state = FL_ERASING;
+ chip->oldstate = FL_READY;
+ printk(KERN_ERR "%s: suspend operation failed."
+ "State may be wrong \n", map->name);
+ return -EIO;
+ }
+ chip->erase_suspended = 1;
+ chip->state = FL_READY;
+ return 0;
+ /* Erase suspend */
+ case FL_POINT:
+ /* Only if there's no operation suspended... */
+ if (mode == FL_READY && chip->oldstate == FL_READY)
+ return 0;
+
+ default:
+sleep:
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ spin_unlock(chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ spin_lock(chip->mutex);
+ return -EAGAIN;
+ }
+}
+
+static void put_chip(struct map_info *map, struct flchip *chip)
+{
+ if (chip->priv) {
+ struct flchip_shared *shared = chip->priv;
+ spin_lock(&shared->lock);
+ if (shared->writing == chip && chip->oldstate == FL_READY) {
+ /* We own the ability to write, but we're done */
+ shared->writing = shared->erasing;
+ if (shared->writing && shared->writing != chip) {
+ /* give back the ownership */
+ struct flchip *loaner = shared->writing;
+ spin_lock(loaner->mutex);
+ spin_unlock(&shared->lock);
+ spin_unlock(chip->mutex);
+ put_chip(map, loaner);
+ spin_lock(chip->mutex);
+ spin_unlock(loaner->mutex);
+ wake_up(&chip->wq);
+ return;
+ }
+ shared->erasing = NULL;
+ shared->writing = NULL;
+ } else if (shared->erasing == chip && shared->writing != chip) {
+ /*
+ * We own the ability to erase without the ability
+ * to write, which means the erase was suspended
+ * and some other partition is currently writing.
+ * Don't let the switch below mess things up since
+ * we don't have ownership to resume anything.
+ */
+ spin_unlock(&shared->lock);
+ wake_up(&chip->wq);
+ return;
+ }
+ spin_unlock(&shared->lock);
+ }
+
+ switch (chip->oldstate) {
+ case FL_ERASING:
+ chip->state = chip->oldstate;
+ map_write(map, CMD(LPDDR_RESUME),
+ map->pfow_base + PFOW_COMMAND_CODE);
+ map_write(map, CMD(LPDDR_START_EXECUTION),
+ map->pfow_base + PFOW_COMMAND_EXECUTE);
+ chip->oldstate = FL_READY;
+ chip->state = FL_ERASING;
+ break;
+ case FL_READY:
+ break;
+ default:
+ printk(KERN_ERR "%s: put_chip() called with oldstate %d!\n",
+ map->name, chip->oldstate);
+ }
+ wake_up(&chip->wq);
+}
+
+int do_write_buffer(struct map_info *map, struct flchip *chip,
+ unsigned long adr, const struct kvec **pvec,
+ unsigned long *pvec_seek, int len)
+{
+ struct lpddr_private *lpddr = map->fldrv_priv;
+ map_word datum;
+ int ret, wbufsize, word_gap, words;
+ const struct kvec *vec;
+ unsigned long vec_seek;
+ unsigned long prog_buf_ofs;
+
+ wbufsize = 1 << lpddr->qinfo->BufSizeShift;
+
+ spin_lock(chip->mutex);
+ ret = get_chip(map, chip, FL_WRITING);
+ if (ret) {
+ spin_unlock(chip->mutex);
+ return ret;
+ }
+ /* Figure out the number of words to write */
+ word_gap = (-adr & (map_bankwidth(map)-1));
+ words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
+ if (!word_gap) {
+ words--;
+ } else {
+ word_gap = map_bankwidth(map) - word_gap;
+ adr -= word_gap;
+ datum = map_word_ff(map);
+ }
+ /* Write data */
+ /* Get the program buffer offset from PFOW register data first*/
+ prog_buf_ofs = map->pfow_base + CMDVAL(map_read(map,
+ map->pfow_base + PFOW_PROGRAM_BUFFER_OFFSET));
+ vec = *pvec;
+ vec_seek = *pvec_seek;
+ do {
+ int n = map_bankwidth(map) - word_gap;
+
+ if (n > vec->iov_len - vec_seek)
+ n = vec->iov_len - vec_seek;
+ if (n > len)
+ n = len;
+
+ if (!word_gap && (len < map_bankwidth(map)))
+ datum = map_word_ff(map);
+
+ datum = map_word_load_partial(map, datum,
+ vec->iov_base + vec_seek, word_gap, n);
+
+ len -= n;
+ word_gap += n;
+ if (!len || word_gap == map_bankwidth(map)) {
+ map_write(map, datum, prog_buf_ofs);
+ prog_buf_ofs += map_bankwidth(map);
+ word_gap = 0;
+ }
+
+ vec_seek += n;
+ if (vec_seek == vec->iov_len) {
+ vec++;
+ vec_seek = 0;
+ }
+ } while (len);
+ *pvec = vec;
+ *pvec_seek = vec_seek;
+
+ /* GO GO GO */
+ send_pfow_command(map, LPDDR_BUFF_PROGRAM, adr, wbufsize, NULL);
+ chip->state = FL_WRITING;
+ ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->ProgBufferTime));
+ if (ret) {
+ printk(KERN_WARNING"%s Buffer program error: %d at %lx; \n",
+ map->name, ret, adr);
+ goto out;
+ }
+
+ out: put_chip(map, chip);
+ spin_unlock(chip->mutex);
+ return ret;
+}
+
+int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
+{
+ struct map_info *map = mtd->priv;
+ struct lpddr_private *lpddr = map->fldrv_priv;
+ int chipnum = adr >> lpddr->chipshift;
+ struct flchip *chip = &lpddr->chips[chipnum];
+ int ret;
+
+ spin_lock(chip->mutex);
+ ret = get_chip(map, chip, FL_ERASING);
+ if (ret) {
+ spin_unlock(chip->mutex);
+ return ret;
+ }
+ send_pfow_command(map, LPDDR_BLOCK_ERASE, adr, 0, NULL);
+ chip->state = FL_ERASING;
+ ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->BlockEraseTime)*1000);
+ if (ret) {
+ printk(KERN_WARNING"%s Erase block error %d at : %llx\n",
+ map->name, ret, adr);
+ goto out;
+ }
+ out: put_chip(map, chip);
+ spin_unlock(chip->mutex);
+ return ret;
+}
+
+static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+ struct lpddr_private *lpddr = map->fldrv_priv;
+ int chipnum = adr >> lpddr->chipshift;
+ struct flchip *chip = &lpddr->chips[chipnum];
+ int ret = 0;
+
+ spin_lock(chip->mutex);
+ ret = get_chip(map, chip, FL_READY);
+ if (ret) {
+ spin_unlock(chip->mutex);
+ return ret;
+ }
+
+ map_copy_from(map, buf, adr, len);
+ *retlen = len;
+
+ put_chip(map, chip);
+ spin_unlock(chip->mutex);
+ return ret;
+}
+
+static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
+ size_t *retlen, void **mtdbuf, resource_size_t *phys)
+{
+ struct map_info *map = mtd->priv;
+ struct lpddr_private *lpddr = map->fldrv_priv;
+ int chipnum = adr >> lpddr->chipshift;
+ unsigned long ofs, last_end = 0;
+ struct flchip *chip = &lpddr->chips[chipnum];
+ int ret = 0;
+
+ if (!map->virt || (adr + len > mtd->size))
+ return -EINVAL;
+
+ /* ofs: offset within the first chip that the first read should start */
+ ofs = adr - (chipnum << lpddr->chipshift);
+
+ *mtdbuf = (void *)map->virt + chip->start + ofs;
+ *retlen = 0;
+
+ while (len) {
+ unsigned long thislen;
+
+ if (chipnum >= lpddr->numchips)
+ break;
+
+ /* We cannot point across chips that are virtually disjoint */
+ if (!last_end)
+ last_end = chip->start;
+ else if (chip->start != last_end)
+ break;
+
+ if ((len + ofs - 1) >> lpddr->chipshift)
+ thislen = (1<<lpddr->chipshift) - ofs;
+ else
+ thislen = len;
+ /* get the chip */
+ spin_lock(chip->mutex);
+ ret = get_chip(map, chip, FL_POINT);
+ spin_unlock(chip->mutex);
+ if (ret)
+ break;
+
+ chip->state = FL_POINT;
+ chip->ref_point_counter++;
+ *retlen += thislen;
+ len -= thislen;
+
+ ofs = 0;
+ last_end += 1 << lpddr->chipshift;
+ chipnum++;
+ chip = &lpddr->chips[chipnum];
+ }
+ return 0;
+}
+
+static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
+{
+ struct map_info *map = mtd->priv;
+ struct lpddr_private *lpddr = map->fldrv_priv;
+ int chipnum = adr >> lpddr->chipshift;
+ unsigned long ofs;
+
+ /* ofs: offset within the first chip that the first read should start */
+ ofs = adr - (chipnum << lpddr->chipshift);
+
+ while (len) {
+ unsigned long thislen;
+ struct flchip *chip;
+
+ chip = &lpddr->chips[chipnum];
+ if (chipnum >= lpddr->numchips)
+ break;
+
+ if ((len + ofs - 1) >> lpddr->chipshift)
+ thislen = (1<<lpddr->chipshift) - ofs;
+ else
+ thislen = len;
+
+ spin_lock(chip->mutex);
+ if (chip->state == FL_POINT) {
+ chip->ref_point_counter--;
+ if (chip->ref_point_counter == 0)
+ chip->state = FL_READY;
+ } else
+ printk(KERN_WARNING "%s: Warning: unpoint called on non"
+ "pointed region\n", map->name);
+
+ put_chip(map, chip);
+ spin_unlock(chip->mutex);
+
+ len -= thislen;
+ ofs = 0;
+ chipnum++;
+ }
+}
+
+static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct kvec vec;
+
+ vec.iov_base = (void *) buf;
+ vec.iov_len = len;
+
+ return lpddr_writev(mtd, &vec, 1, to, retlen);
+}
+
+
+static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen)
+{
+ struct map_info *map = mtd->priv;
+ struct lpddr_private *lpddr = map->fldrv_priv;
+ int ret = 0;
+ int chipnum;
+ unsigned long ofs, vec_seek, i;
+ int wbufsize = 1 << lpddr->qinfo->BufSizeShift;
+
+ size_t len = 0;
+
+ for (i = 0; i < count; i++)
+ len += vecs[i].iov_len;
+
+ *retlen = 0;
+ if (!len)
+ return 0;
+
+ chipnum = to >> lpddr->chipshift;
+
+ ofs = to;
+ vec_seek = 0;
+
+ do {
+ /* We must not cross write block boundaries */
+ int size = wbufsize - (ofs & (wbufsize-1));
+
+ if (size > len)
+ size = len;
+
+ ret = do_write_buffer(map, &lpddr->chips[chipnum],
+ ofs, &vecs, &vec_seek, size);
+ if (ret)
+ return ret;
+
+ ofs += size;
+ (*retlen) += size;
+ len -= size;
+
+ /* Be nice and reschedule with the chip in a usable
+ * state for other processes */
+ cond_resched();
+
+ } while (len);
+
+ return 0;
+}
+
+static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ unsigned long ofs, len;
+ int ret;
+ struct map_info *map = mtd->priv;
+ struct lpddr_private *lpddr = map->fldrv_priv;
+ int size = 1 << lpddr->qinfo->UniformBlockSizeShift;
+
+ ofs = instr->addr;
+ len = instr->len;
+
+ if (ofs > mtd->size || (len + ofs) > mtd->size)
+ return -EINVAL;
+
+ while (len > 0) {
+ ret = do_erase_oneblock(mtd, ofs);
+ if (ret)
+ return ret;
+ ofs += size;
+ len -= size;
+ }
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+
+ return 0;
+}
+
+#define DO_XXLOCK_LOCK 1
+#define DO_XXLOCK_UNLOCK 2
+int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
+{
+ int ret = 0;
+ struct map_info *map = mtd->priv;
+ struct lpddr_private *lpddr = map->fldrv_priv;
+ int chipnum = adr >> lpddr->chipshift;
+ struct flchip *chip = &lpddr->chips[chipnum];
+
+ spin_lock(chip->mutex);
+ ret = get_chip(map, chip, FL_LOCKING);
+ if (ret) {
+ spin_unlock(chip->mutex);
+ return ret;
+ }
+
+ if (thunk == DO_XXLOCK_LOCK) {
+ send_pfow_command(map, LPDDR_LOCK_BLOCK, adr, adr + len, NULL);
+ chip->state = FL_LOCKING;
+ } else if (thunk == DO_XXLOCK_UNLOCK) {
+ send_pfow_command(map, LPDDR_UNLOCK_BLOCK, adr, adr + len, NULL);
+ chip->state = FL_UNLOCKING;
+ } else
+ BUG();
+
+ ret = wait_for_ready(map, chip, 1);
+ if (ret) {
+ printk(KERN_ERR "%s: block unlock error status %d \n",
+ map->name, ret);
+ goto out;
+ }
+out: put_chip(map, chip);
+ spin_unlock(chip->mutex);
+ return ret;
+}
+
+static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ return do_xxlock(mtd, ofs, len, DO_XXLOCK_LOCK);
+}
+
+static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ return do_xxlock(mtd, ofs, len, DO_XXLOCK_UNLOCK);
+}
+
+int word_program(struct map_info *map, loff_t adr, uint32_t curval)
+{
+ int ret;
+ struct lpddr_private *lpddr = map->fldrv_priv;
+ int chipnum = adr >> lpddr->chipshift;
+ struct flchip *chip = &lpddr->chips[chipnum];
+
+ spin_lock(chip->mutex);
+ ret = get_chip(map, chip, FL_WRITING);
+ if (ret) {
+ spin_unlock(chip->mutex);
+ return ret;
+ }
+
+ send_pfow_command(map, LPDDR_WORD_PROGRAM, adr, 0x00, (map_word *)&curval);
+
+ ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->SingleWordProgTime));
+ if (ret) {
+ printk(KERN_WARNING"%s word_program error at: %llx; val: %x\n",
+ map->name, adr, curval);
+ goto out;
+ }
+
+out: put_chip(map, chip);
+ spin_unlock(chip->mutex);
+ return ret;
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alexey Korolev <akorolev@infradead.org>");
+MODULE_DESCRIPTION("MTD driver for LPDDR flash chips");
diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
new file mode 100644
index 00000000000..79bf40f48b7
--- /dev/null
+++ b/drivers/mtd/lpddr/qinfo_probe.c
@@ -0,0 +1,255 @@
+/*
+ * Probing flash chips with QINFO records.
+ * (C) 2008 Korolev Alexey <akorolev@infradead.org>
+ * (C) 2008 Vasiliy Leonenko <vasiliy.leonenko@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <linux/mtd/xip.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/pfow.h>
+#include <linux/mtd/qinfo.h>
+
+static int lpddr_chip_setup(struct map_info *map, struct lpddr_private *lpddr);
+struct mtd_info *lpddr_probe(struct map_info *map);
+static struct lpddr_private *lpddr_probe_chip(struct map_info *map);
+static int lpddr_pfow_present(struct map_info *map,
+ struct lpddr_private *lpddr);
+
+static struct qinfo_query_info qinfo_array[] = {
+ /* General device info */
+ {0, 0, "DevSizeShift", "Device size 2^n bytes"},
+ {0, 3, "BufSizeShift", "Program buffer size 2^n bytes"},
+ /* Erase block information */
+ {1, 1, "TotalBlocksNum", "Total number of blocks"},
+ {1, 2, "UniformBlockSizeShift", "Uniform block size 2^n bytes"},
+ /* Partition information */
+ {2, 1, "HWPartsNum", "Number of hardware partitions"},
+ /* Optional features */
+ {5, 1, "SuspEraseSupp", "Suspend erase supported"},
+ /* Operation typical time */
+ {10, 0, "SingleWordProgTime", "Single word program 2^n u-sec"},
+ {10, 1, "ProgBufferTime", "Program buffer write 2^n u-sec"},
+ {10, 2, "BlockEraseTime", "Block erase 2^n m-sec"},
+ {10, 3, "FullChipEraseTime", "Full chip erase 2^n m-sec"},
+};
+
+static long lpddr_get_qinforec_pos(struct map_info *map, char *id_str)
+{
+ int qinfo_lines = sizeof(qinfo_array)/sizeof(struct qinfo_query_info);
+ int i;
+ int bankwidth = map_bankwidth(map) * 8;
+ int major, minor;
+
+ for (i = 0; i < qinfo_lines; i++) {
+ if (strcmp(id_str, qinfo_array[i].id_str) == 0) {
+ major = qinfo_array[i].major & ((1 << bankwidth) - 1);
+ minor = qinfo_array[i].minor & ((1 << bankwidth) - 1);
+ return minor | (major << bankwidth);
+ }
+ }
+ printk(KERN_ERR"%s qinfo id string is wrong! \n", map->name);
+ BUG();
+ return -1;
+}
+
+static uint16_t lpddr_info_query(struct map_info *map, char *id_str)
+{
+ unsigned int dsr, val;
+ int bits_per_chip = map_bankwidth(map) * 8;
+ unsigned long adr = lpddr_get_qinforec_pos(map, id_str);
+ int attempts = 20;
+
+ /* Write a request for the PFOW record */
+ map_write(map, CMD(LPDDR_INFO_QUERY),
+ map->pfow_base + PFOW_COMMAND_CODE);
+ map_write(map, CMD(adr & ((1 << bits_per_chip) - 1)),
+ map->pfow_base + PFOW_COMMAND_ADDRESS_L);
+ map_write(map, CMD(adr >> bits_per_chip),
+ map->pfow_base + PFOW_COMMAND_ADDRESS_H);
+ map_write(map, CMD(LPDDR_START_EXECUTION),
+ map->pfow_base + PFOW_COMMAND_EXECUTE);
+
+ while ((attempts--) > 0) {
+ dsr = CMDVAL(map_read(map, map->pfow_base + PFOW_DSR));
+ if (dsr & DSR_READY_STATUS)
+ break;
+ udelay(10);
+ }
+
+ val = CMDVAL(map_read(map, map->pfow_base + PFOW_COMMAND_DATA));
+ return val;
+}
+
+static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
+{
+ map_word pfow_val[4];
+
+ /* Check identification string */
+ pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
+ pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
+ pfow_val[2] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_O);
+ pfow_val[3] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_W);
+
+ if (!map_word_equal(map, CMD('P'), pfow_val[0]))
+ goto out;
+
+ if (!map_word_equal(map, CMD('F'), pfow_val[1]))
+ goto out;
+
+ if (!map_word_equal(map, CMD('O'), pfow_val[2]))
+ goto out;
+
+ if (!map_word_equal(map, CMD('W'), pfow_val[3]))
+ goto out;
+
+ return 1; /* "PFOW" is found */
+out:
+ printk(KERN_WARNING"%s: PFOW string at 0x%lx is not found \n",
+ map->name, map->pfow_base);
+ return 0;
+}
+
+static int lpddr_chip_setup(struct map_info *map, struct lpddr_private *lpddr)
+{
+
+ lpddr->qinfo = kmalloc(sizeof(struct qinfo_chip), GFP_KERNEL);
+ if (!lpddr->qinfo) {
+ printk(KERN_WARNING "%s: no memory for LPDDR qinfo structure\n",
+ map->name);
+ return 0;
+ }
+ memset(lpddr->qinfo, 0, sizeof(struct qinfo_chip));
+
+ /* Get the ManuID */
+ lpddr->ManufactId = CMDVAL(map_read(map, map->pfow_base + PFOW_MANUFACTURER_ID));
+ /* Get the DeviceID */
+ lpddr->DevId = CMDVAL(map_read(map, map->pfow_base + PFOW_DEVICE_ID));
+ /* read parameters from chip qinfo table */
+ lpddr->qinfo->DevSizeShift = lpddr_info_query(map, "DevSizeShift");
+ lpddr->qinfo->TotalBlocksNum = lpddr_info_query(map, "TotalBlocksNum");
+ lpddr->qinfo->BufSizeShift = lpddr_info_query(map, "BufSizeShift");
+ lpddr->qinfo->HWPartsNum = lpddr_info_query(map, "HWPartsNum");
+ lpddr->qinfo->UniformBlockSizeShift =
+ lpddr_info_query(map, "UniformBlockSizeShift");
+ lpddr->qinfo->SuspEraseSupp = lpddr_info_query(map, "SuspEraseSupp");
+ lpddr->qinfo->SingleWordProgTime =
+ lpddr_info_query(map, "SingleWordProgTime");
+ lpddr->qinfo->ProgBufferTime = lpddr_info_query(map, "ProgBufferTime");
+ lpddr->qinfo->BlockEraseTime = lpddr_info_query(map, "BlockEraseTime");
+ return 1;
+}
+static struct lpddr_private *lpddr_probe_chip(struct map_info *map)
+{
+ struct lpddr_private lpddr;
+ struct lpddr_private *retlpddr;
+ int numvirtchips;
+
+
+ if ((map->pfow_base + 0x1000) >= map->size) {
+ printk(KERN_NOTICE"%s Probe at base (0x%08lx) past the end of"
+ "the map(0x%08lx)\n", map->name,
+ (unsigned long)map->pfow_base, map->size - 1);
+ return NULL;
+ }
+ memset(&lpddr, 0, sizeof(struct lpddr_private));
+ if (!lpddr_pfow_present(map, &lpddr))
+ return NULL;
+
+ if (!lpddr_chip_setup(map, &lpddr))
+ return NULL;
+
+ /* Ok so we found a chip */
+ lpddr.chipshift = lpddr.qinfo->DevSizeShift;
+ lpddr.numchips = 1;
+
+ numvirtchips = lpddr.numchips * lpddr.qinfo->HWPartsNum;
+ retlpddr = kmalloc(sizeof(struct lpddr_private) +
+ numvirtchips * sizeof(struct flchip), GFP_KERNEL);
+ if (!retlpddr)
+ return NULL;
+
+ memset(retlpddr, 0, sizeof(struct lpddr_private) +
+ numvirtchips * sizeof(struct flchip));
+ memcpy(retlpddr, &lpddr, sizeof(struct lpddr_private));
+
+ retlpddr->numchips = numvirtchips;
+ retlpddr->chipshift = retlpddr->qinfo->DevSizeShift -
+ __ffs(retlpddr->qinfo->HWPartsNum);
+
+ return retlpddr;
+}
+
+struct mtd_info *lpddr_probe(struct map_info *map)
+{
+ struct mtd_info *mtd = NULL;
+ struct lpddr_private *lpddr;
+
+ /* First probe the map to see if we havecan open PFOW here */
+ lpddr = lpddr_probe_chip(map);
+ if (!lpddr)
+ return NULL;
+
+ map->fldrv_priv = lpddr;
+ mtd = lpddr_cmdset(map);
+ if (mtd) {
+ if (mtd->size > map->size) {
+ printk(KERN_WARNING "Reducing visibility of %ldKiB chip"
+ "to %ldKiB\n", (unsigned long)mtd->size >> 10,
+ (unsigned long)map->size >> 10);
+ mtd->size = map->size;
+ }
+ return mtd;
+ }
+
+ kfree(lpddr->qinfo);
+ kfree(lpddr);
+ map->fldrv_priv = NULL;
+ return NULL;
+}
+
+static struct mtd_chip_driver lpddr_chipdrv = {
+ .probe = lpddr_probe,
+ .name = "qinfo_probe",
+ .module = THIS_MODULE
+};
+
+static int __init lpddr_probe_init(void)
+{
+ register_mtd_chip_driver(&lpddr_chipdrv);
+ return 0;
+}
+
+static void __exit lpddr_probe_exit(void)
+{
+ unregister_mtd_chip_driver(&lpddr_chipdrv);
+}
+
+module_init(lpddr_probe_init);
+module_exit(lpddr_probe_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vasiliy Leonenko <vasiliy.leonenko@gmail.com>");
+MODULE_DESCRIPTION("Driver to probe qinfo flash chips");
+
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 5ea16936216..0225cbbf22d 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -10,8 +10,8 @@ config MTD_COMPLEX_MAPPINGS
paged mappings of flash chips.
config MTD_PHYSMAP
- tristate "CFI Flash device in physical memory map"
- depends on MTD_CFI || MTD_JEDECPROBE || MTD_ROM
+ tristate "Flash device in physical memory map"
+ depends on MTD_CFI || MTD_JEDECPROBE || MTD_ROM || MTD_LPDDR
help
This provides a 'mapping' driver which allows the NOR Flash and
ROM driver code to communicate with chips which are mapped
@@ -23,9 +23,20 @@ config MTD_PHYSMAP
To compile this driver as a module, choose M here: the
module will be called physmap.
+config MTD_PHYSMAP_COMPAT
+ bool "Physmap compat support"
+ depends on MTD_PHYSMAP
+ default n
+ help
+ Setup a simple mapping via the Kconfig options. Normally the
+ physmap configuration options are done via your board's
+ resource file.
+
+ If unsure, say N here.
+
config MTD_PHYSMAP_START
hex "Physical start address of flash mapping"
- depends on MTD_PHYSMAP
+ depends on MTD_PHYSMAP_COMPAT
default "0x8000000"
help
This is the physical memory location at which the flash chips
@@ -37,7 +48,7 @@ config MTD_PHYSMAP_START
config MTD_PHYSMAP_LEN
hex "Physical length of flash mapping"
- depends on MTD_PHYSMAP
+ depends on MTD_PHYSMAP_COMPAT
default "0"
help
This is the total length of the mapping of the flash chips on
@@ -51,7 +62,7 @@ config MTD_PHYSMAP_LEN
config MTD_PHYSMAP_BANKWIDTH
int "Bank width in octets"
- depends on MTD_PHYSMAP
+ depends on MTD_PHYSMAP_COMPAT
default "2"
help
This is the total width of the data bus of the flash devices
diff --git a/drivers/mtd/maps/alchemy-flash.c b/drivers/mtd/maps/alchemy-flash.c
index 82811bcb043..845ad4f2a54 100644
--- a/drivers/mtd/maps/alchemy-flash.c
+++ b/drivers/mtd/maps/alchemy-flash.c
@@ -111,7 +111,7 @@ static struct mtd_partition alchemy_partitions[] = {
static struct mtd_info *mymtd;
-int __init alchemy_mtd_init(void)
+static int __init alchemy_mtd_init(void)
{
struct mtd_partition *parts;
int nb_parts = 0;
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
index d1eec7d3243..237733d094c 100644
--- a/drivers/mtd/maps/amd76xrom.c
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -232,8 +232,8 @@ static int __devinit amd76xrom_init_one (struct pci_dev *pdev,
/* Trim the size if we are larger than the map */
if (map->mtd->size > map->map.size) {
printk(KERN_WARNING MOD_NAME
- " rom(%u) larger than window(%lu). fixing...\n",
- map->mtd->size, map->map.size);
+ " rom(%llu) larger than window(%lu). fixing...\n",
+ (unsigned long long)map->mtd->size, map->map.size);
map->mtd->size = map->map.size;
}
if (window->rsrc.parent) {
diff --git a/drivers/mtd/maps/cdb89712.c b/drivers/mtd/maps/cdb89712.c
index e5059aa3c72..8d92d8db9a9 100644
--- a/drivers/mtd/maps/cdb89712.c
+++ b/drivers/mtd/maps/cdb89712.c
@@ -14,7 +14,18 @@
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
-
+/* dynamic ioremap() areas */
+#define FLASH_START 0x00000000
+#define FLASH_SIZE 0x800000
+#define FLASH_WIDTH 4
+
+#define SRAM_START 0x60000000
+#define SRAM_SIZE 0xc000
+#define SRAM_WIDTH 4
+
+#define BOOTROM_START 0x70000000
+#define BOOTROM_SIZE 0x80
+#define BOOTROM_WIDTH 4
static struct mtd_info *flash_mtd;
diff --git a/drivers/mtd/maps/cfi_flagadm.c b/drivers/mtd/maps/cfi_flagadm.c
index 0ecc3f6d735..b4ed8161191 100644
--- a/drivers/mtd/maps/cfi_flagadm.c
+++ b/drivers/mtd/maps/cfi_flagadm.c
@@ -88,7 +88,7 @@ struct mtd_partition flagadm_parts[] = {
static struct mtd_info *mymtd;
-int __init init_flagadm(void)
+static int __init init_flagadm(void)
{
printk(KERN_NOTICE "FlagaDM flash device: %x at %x\n",
FLASH_SIZE, FLASH_PHYS_ADDR);
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
index 1a6feb4474d..5f7a245ed13 100644
--- a/drivers/mtd/maps/ck804xrom.c
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -263,8 +263,8 @@ static int __devinit ck804xrom_init_one (struct pci_dev *pdev,
/* Trim the size if we are larger than the map */
if (map->mtd->size > map->map.size) {
printk(KERN_WARNING MOD_NAME
- " rom(%u) larger than window(%lu). fixing...\n",
- map->mtd->size, map->map.size);
+ " rom(%llu) larger than window(%lu). fixing...\n",
+ (unsigned long long)map->mtd->size, map->map.size);
map->mtd->size = map->map.size;
}
if (window->rsrc.parent) {
diff --git a/drivers/mtd/maps/dbox2-flash.c b/drivers/mtd/maps/dbox2-flash.c
index e115667bf1d..cfacfa6f45d 100644
--- a/drivers/mtd/maps/dbox2-flash.c
+++ b/drivers/mtd/maps/dbox2-flash.c
@@ -69,7 +69,7 @@ struct map_info dbox2_flash_map = {
.phys = WINDOW_ADDR,
};
-int __init init_dbox2_flash(void)
+static int __init init_dbox2_flash(void)
{
printk(KERN_NOTICE "D-Box 2 flash driver (size->0x%X mem->0x%X)\n", WINDOW_SIZE, WINDOW_ADDR);
dbox2_flash_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE);
diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c
index 3aa018c092f..42969fe051b 100644
--- a/drivers/mtd/maps/dc21285.c
+++ b/drivers/mtd/maps/dc21285.c
@@ -32,16 +32,15 @@ static struct mtd_info *dc21285_mtd;
*/
static void nw_en_write(void)
{
- extern spinlock_t gpio_lock;
unsigned long flags;
/*
* we want to write a bit pattern XXX1 to Xilinx to enable
* the write gate, which will be open for about the next 2ms.
*/
- spin_lock_irqsave(&gpio_lock, flags);
- cpld_modify(1, 1);
- spin_unlock_irqrestore(&gpio_lock, flags);
+ spin_lock_irqsave(&nw_gpio_lock, flags);
+ nw_cpld_modify(CPLD_FLASH_WR_ENABLE, CPLD_FLASH_WR_ENABLE);
+ spin_unlock_irqrestore(&nw_gpio_lock, flags);
/*
* let the ISA bus to catch on...
diff --git a/drivers/mtd/maps/edb7312.c b/drivers/mtd/maps/edb7312.c
index 9433738c166..be9e90b4458 100644
--- a/drivers/mtd/maps/edb7312.c
+++ b/drivers/mtd/maps/edb7312.c
@@ -71,7 +71,7 @@ static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
static int mtd_parts_nb = 0;
static struct mtd_partition *mtd_parts = 0;
-int __init init_edb7312nor(void)
+static int __init init_edb7312nor(void)
{
static const char *rom_probe_types[] = PROBETYPES;
const char **type;
diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c
index bbbcdd4c8d1..11a2f57df9c 100644
--- a/drivers/mtd/maps/esb2rom.c
+++ b/drivers/mtd/maps/esb2rom.c
@@ -324,8 +324,8 @@ static int __devinit esb2rom_init_one(struct pci_dev *pdev,
/* Trim the size if we are larger than the map */
if (map->mtd->size > map->map.size) {
printk(KERN_WARNING MOD_NAME
- " rom(%u) larger than window(%lu). fixing...\n",
- map->mtd->size, map->map.size);
+ " rom(%llu) larger than window(%lu). fixing...\n",
+ (unsigned long long)map->mtd->size, map->map.size);
map->mtd->size = map->map.size;
}
if (window->rsrc.parent) {
diff --git a/drivers/mtd/maps/fortunet.c b/drivers/mtd/maps/fortunet.c
index a8e3fde4cbd..1e43124d498 100644
--- a/drivers/mtd/maps/fortunet.c
+++ b/drivers/mtd/maps/fortunet.c
@@ -181,7 +181,7 @@ __setup("MTD_Partition=", MTD_New_Partition);
/* Backwards-spelling-compatibility */
__setup("MTD_Partion=", MTD_New_Partition);
-int __init init_fortunet(void)
+static int __init init_fortunet(void)
{
int ix,iy;
for(iy=ix=0;ix<MAX_NUM_REGIONS;ix++)
diff --git a/drivers/mtd/maps/h720x-flash.c b/drivers/mtd/maps/h720x-flash.c
index 35fef655ccc..72c724fa8c2 100644
--- a/drivers/mtd/maps/h720x-flash.c
+++ b/drivers/mtd/maps/h720x-flash.c
@@ -24,8 +24,8 @@ static struct mtd_info *mymtd;
static struct map_info h720x_map = {
.name = "H720X",
.bankwidth = 4,
- .size = FLASH_SIZE,
- .phys = FLASH_PHYS,
+ .size = H720X_FLASH_SIZE,
+ .phys = H720X_FLASH_PHYS,
};
static struct mtd_partition h720x_partitions[] = {
@@ -65,12 +65,12 @@ static const char *probes[] = { "cmdlinepart", NULL };
/*
* Initialize FLASH support
*/
-int __init h720x_mtd_init(void)
+static int __init h720x_mtd_init(void)
{
char *part_type = NULL;
- h720x_map.virt = ioremap(FLASH_PHYS, FLASH_SIZE);
+ h720x_map.virt = ioremap(h720x_map.phys, h720x_map.size);
if (!h720x_map.virt) {
printk(KERN_ERR "H720x-MTD: ioremap failed\n");
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
index aeb6c916e23..c32bc28920b 100644
--- a/drivers/mtd/maps/ichxrom.c
+++ b/drivers/mtd/maps/ichxrom.c
@@ -258,8 +258,8 @@ static int __devinit ichxrom_init_one (struct pci_dev *pdev,
/* Trim the size if we are larger than the map */
if (map->mtd->size > map->map.size) {
printk(KERN_WARNING MOD_NAME
- " rom(%u) larger than window(%lu). fixing...\n",
- map->mtd->size, map->map.size);
+ " rom(%llu) larger than window(%lu). fixing...\n",
+ (unsigned long long)map->mtd->size, map->map.size);
map->mtd->size = map->map.size;
}
if (window->rsrc.parent) {
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c
index 2682ab51a36..998a27da97f 100644
--- a/drivers/mtd/maps/impa7.c
+++ b/drivers/mtd/maps/impa7.c
@@ -70,7 +70,7 @@ static struct mtd_partition *mtd_parts[NUM_FLASHBANKS];
static const char *probes[] = { "cmdlinepart", NULL };
-int __init init_impa7(void)
+static int __init init_impa7(void)
{
static const char *rom_probe_types[] = PROBETYPES;
const char **type;
diff --git a/drivers/mtd/maps/integrator-flash.c b/drivers/mtd/maps/integrator-flash.c
index 7100ee3c7b0..d2ec262666c 100644
--- a/drivers/mtd/maps/integrator-flash.c
+++ b/drivers/mtd/maps/integrator-flash.c
@@ -105,7 +105,7 @@ static int armflash_probe(struct platform_device *dev)
info->map.bankwidth = plat->width;
info->map.phys = res->start;
info->map.virt = base;
- info->map.name = dev->dev.bus_id;
+ info->map.name = dev_name(&dev->dev);
info->map.set_vpp = armflash_set_vpp;
simple_map_init(&info->map);
diff --git a/drivers/mtd/maps/ipaq-flash.c b/drivers/mtd/maps/ipaq-flash.c
index ed58f6a77bd..748c85f635f 100644
--- a/drivers/mtd/maps/ipaq-flash.c
+++ b/drivers/mtd/maps/ipaq-flash.c
@@ -202,7 +202,7 @@ static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
static int __init h1900_special_case(void);
-int __init ipaq_mtd_init(void)
+static int __init ipaq_mtd_init(void)
{
struct mtd_partition *parts = NULL;
int nb_parts = 0;
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index dcdb1f17577..d4fb9a3ab4d 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -170,7 +170,7 @@ static int ixp2000_flash_probe(struct platform_device *dev)
err = -ENOMEM;
goto Error;
}
- memzero(info, sizeof(struct ixp2000_flash_info));
+ memset(info, 0, sizeof(struct ixp2000_flash_info));
platform_set_drvdata(dev, info);
@@ -188,7 +188,7 @@ static int ixp2000_flash_probe(struct platform_device *dev)
*/
info->map.map_priv_2 = (unsigned long) ixp_data->bank_setup;
- info->map.name = dev->dev.bus_id;
+ info->map.name = dev_name(&dev->dev);
info->map.read = ixp2000_flash_read8;
info->map.write = ixp2000_flash_write8;
info->map.copy_from = ixp2000_flash_copy_from;
@@ -196,7 +196,7 @@ static int ixp2000_flash_probe(struct platform_device *dev)
info->res = request_mem_region(dev->resource->start,
dev->resource->end - dev->resource->start + 1,
- dev->dev.bus_id);
+ dev_name(&dev->dev));
if (!info->res) {
dev_err(&dev->dev, "Could not reserve memory region\n");
err = -ENOMEM;
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 9c7a5fbd4e5..7214b876feb 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -201,7 +201,7 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
err = -ENOMEM;
goto Error;
}
- memzero(info, sizeof(struct ixp4xx_flash_info));
+ memset(info, 0, sizeof(struct ixp4xx_flash_info));
platform_set_drvdata(dev, info);
@@ -218,7 +218,7 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
* handle that.
*/
info->map.bankwidth = 2;
- info->map.name = dev->dev.bus_id;
+ info->map.name = dev_name(&dev->dev);
info->map.read = ixp4xx_read16,
info->map.write = ixp4xx_probe_write16,
info->map.copy_from = ixp4xx_copy_from,
diff --git a/drivers/mtd/maps/mbx860.c b/drivers/mtd/maps/mbx860.c
index 706f67394b0..0eb5a7c8538 100644
--- a/drivers/mtd/maps/mbx860.c
+++ b/drivers/mtd/maps/mbx860.c
@@ -55,7 +55,7 @@ struct map_info mbx_map = {
.bankwidth = 4,
};
-int __init init_mbx(void)
+static int __init init_mbx(void)
{
printk(KERN_NOTICE "Motorola MBX flash device: 0x%x at 0x%x\n", WINDOW_SIZE*4, WINDOW_ADDR);
mbx_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE * 4);
diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c
index 965e6c6d6ab..df682667604 100644
--- a/drivers/mtd/maps/nettel.c
+++ b/drivers/mtd/maps/nettel.c
@@ -226,7 +226,7 @@ static int __init nettel_init(void)
if ((amd_mtd = do_map_probe("jedec_probe", &nettel_amd_map))) {
printk(KERN_NOTICE "SNAPGEAR: AMD flash device size = %dK\n",
- amd_mtd->size>>10);
+ (int)(amd_mtd->size>>10));
amd_mtd->owner = THIS_MODULE;
@@ -362,8 +362,7 @@ static int __init nettel_init(void)
intel_mtd->owner = THIS_MODULE;
- num_intel_partitions = sizeof(nettel_intel_partitions) /
- sizeof(nettel_intel_partitions[0]);
+ num_intel_partitions = ARRAY_SIZE(nettel_intel_partitions);
if (intelboot) {
/*
diff --git a/drivers/mtd/maps/octagon-5066.c b/drivers/mtd/maps/octagon-5066.c
index 43e04c1d22a..2b2e4509321 100644
--- a/drivers/mtd/maps/octagon-5066.c
+++ b/drivers/mtd/maps/octagon-5066.c
@@ -184,7 +184,7 @@ void cleanup_oct5066(void)
release_region(PAGE_IO, 1);
}
-int __init init_oct5066(void)
+static int __init init_oct5066(void)
{
int i;
int ret = 0;
diff --git a/drivers/mtd/maps/omap_nor.c b/drivers/mtd/maps/omap_nor.c
index 05f276af15d..7e50e9b1b78 100644
--- a/drivers/mtd/maps/omap_nor.c
+++ b/drivers/mtd/maps/omap_nor.c
@@ -101,7 +101,7 @@ static int __init omapflash_probe(struct platform_device *pdev)
err = -ENOMEM;
goto out_release_mem_region;
}
- info->map.name = pdev->dev.bus_id;
+ info->map.name = dev_name(&pdev->dev);
info->map.phys = res->start;
info->map.size = size;
info->map.bankwidth = pdata->width;
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index dfbf3f270ce..87743661d48 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -29,7 +29,6 @@ struct physmap_flash_info {
struct map_info map[MAX_RESOURCES];
#ifdef CONFIG_MTD_PARTITIONS
int nr_parts;
- struct mtd_partition *parts;
#endif
};
@@ -56,14 +55,10 @@ static int physmap_flash_remove(struct platform_device *dev)
for (i = 0; i < MAX_RESOURCES; i++) {
if (info->mtd[i] != NULL) {
#ifdef CONFIG_MTD_PARTITIONS
- if (info->nr_parts) {
+ if (info->nr_parts || physmap_data->nr_parts)
del_mtd_partitions(info->mtd[i]);
- kfree(info->parts);
- } else if (physmap_data->nr_parts) {
- del_mtd_partitions(info->mtd[i]);
- } else {
+ else
del_mtd_device(info->mtd[i]);
- }
#else
del_mtd_device(info->mtd[i]);
#endif
@@ -73,7 +68,12 @@ static int physmap_flash_remove(struct platform_device *dev)
return 0;
}
-static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", "map_rom", NULL };
+static const char *rom_probe_types[] = {
+ "cfi_probe",
+ "jedec_probe",
+ "qinfo_probe",
+ "map_rom",
+ NULL };
#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
#endif
@@ -86,6 +86,9 @@ static int physmap_flash_probe(struct platform_device *dev)
int err = 0;
int i;
int devices_found = 0;
+#ifdef CONFIG_MTD_PARTITIONS
+ struct mtd_partition *parts;
+#endif
physmap_data = dev->dev.platform_data;
if (physmap_data == NULL)
@@ -108,17 +111,18 @@ static int physmap_flash_probe(struct platform_device *dev)
if (!devm_request_mem_region(&dev->dev,
dev->resource[i].start,
dev->resource[i].end - dev->resource[i].start + 1,
- dev->dev.bus_id)) {
+ dev_name(&dev->dev))) {
dev_err(&dev->dev, "Could not reserve memory region\n");
err = -ENOMEM;
goto err_out;
}
- info->map[i].name = dev->dev.bus_id;
+ info->map[i].name = dev_name(&dev->dev);
info->map[i].phys = dev->resource[i].start;
info->map[i].size = dev->resource[i].end - dev->resource[i].start + 1;
info->map[i].bankwidth = physmap_data->width;
info->map[i].set_vpp = physmap_data->set_vpp;
+ info->map[i].pfow_base = physmap_data->pfow_base;
info->map[i].virt = devm_ioremap(&dev->dev, info->map[i].phys,
info->map[i].size);
@@ -150,7 +154,7 @@ static int physmap_flash_probe(struct platform_device *dev)
* We detected multiple devices. Concatenate them together.
*/
#ifdef CONFIG_MTD_CONCAT
- info->cmtd = mtd_concat_create(info->mtd, devices_found, dev->dev.bus_id);
+ info->cmtd = mtd_concat_create(info->mtd, devices_found, dev_name(&dev->dev));
if (info->cmtd == NULL)
err = -ENXIO;
#else
@@ -163,9 +167,10 @@ static int physmap_flash_probe(struct platform_device *dev)
goto err_out;
#ifdef CONFIG_MTD_PARTITIONS
- err = parse_mtd_partitions(info->cmtd, part_probe_types, &info->parts, 0);
+ err = parse_mtd_partitions(info->cmtd, part_probe_types, &parts, 0);
if (err > 0) {
- add_mtd_partitions(info->cmtd, info->parts, err);
+ add_mtd_partitions(info->cmtd, parts, err);
+ kfree(parts);
return 0;
}
@@ -251,14 +256,7 @@ static struct platform_driver physmap_flash_driver = {
};
-#ifdef CONFIG_MTD_PHYSMAP_LEN
-#if CONFIG_MTD_PHYSMAP_LEN != 0
-#warning using PHYSMAP compat code
-#define PHYSMAP_COMPAT
-#endif
-#endif
-
-#ifdef PHYSMAP_COMPAT
+#ifdef CONFIG_MTD_PHYSMAP_COMPAT
static struct physmap_flash_data physmap_flash_data = {
.width = CONFIG_MTD_PHYSMAP_BANKWIDTH,
};
@@ -302,7 +300,7 @@ static int __init physmap_init(void)
int err;
err = platform_driver_register(&physmap_flash_driver);
-#ifdef PHYSMAP_COMPAT
+#ifdef CONFIG_MTD_PHYSMAP_COMPAT
if (err == 0)
platform_device_register(&physmap_flash);
#endif
@@ -312,7 +310,7 @@ static int __init physmap_init(void)
static void __exit physmap_exit(void)
{
-#ifdef PHYSMAP_COMPAT
+#ifdef CONFIG_MTD_PHYSMAP_COMPAT
platform_device_unregister(&physmap_flash);
#endif
platform_driver_unregister(&physmap_flash_driver);
@@ -326,8 +324,7 @@ MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
MODULE_DESCRIPTION("Generic configurable MTD map driver");
/* legacy platform drivers can't hotplug or coldplg */
-#ifndef PHYSMAP_COMPAT
+#ifndef CONFIG_MTD_PHYSMAP_COMPAT
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:physmap-flash");
#endif
-
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 5fcfec034a9..fbf0ca939d7 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -183,7 +183,7 @@ static int __devinit of_flash_probe(struct of_device *dev,
err = -EBUSY;
info->res = request_mem_region(res.start, res.end - res.start + 1,
- dev->dev.bus_id);
+ dev_name(&dev->dev));
if (!info->res)
goto err_out;
@@ -194,7 +194,7 @@ static int __devinit of_flash_probe(struct of_device *dev,
goto err_out;
}
- info->map.name = dev->dev.bus_id;
+ info->map.name = dev_name(&dev->dev);
info->map.phys = res.start;
info->map.size = res.end - res.start + 1;
info->map.bankwidth = *width;
diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c
index f43ba2815cb..4768bd5459d 100644
--- a/drivers/mtd/maps/pmcmsp-flash.c
+++ b/drivers/mtd/maps/pmcmsp-flash.c
@@ -48,7 +48,7 @@ static int fcnt;
#define DEBUG_MARKER printk(KERN_NOTICE "%s[%d]\n", __func__, __LINE__)
-int __init init_msp_flash(void)
+static int __init init_msp_flash(void)
{
int i, j;
int offset, coff;
diff --git a/drivers/mtd/maps/redwood.c b/drivers/mtd/maps/redwood.c
index de002eb1a7f..933c0b63b01 100644
--- a/drivers/mtd/maps/redwood.c
+++ b/drivers/mtd/maps/redwood.c
@@ -122,7 +122,7 @@ struct map_info redwood_flash_map = {
static struct mtd_info *redwood_mtd;
-int __init init_redwood_flash(void)
+static int __init init_redwood_flash(void)
{
int err;
diff --git a/drivers/mtd/maps/rpxlite.c b/drivers/mtd/maps/rpxlite.c
index 14d90edb443..3e3ef53d4fd 100644
--- a/drivers/mtd/maps/rpxlite.c
+++ b/drivers/mtd/maps/rpxlite.c
@@ -23,7 +23,7 @@ static struct map_info rpxlite_map = {
.phys = WINDOW_ADDR,
};
-int __init init_rpxlite(void)
+static int __init init_rpxlite(void)
{
printk(KERN_NOTICE "RPX Lite or CLLF flash device: %x at %x\n", WINDOW_SIZE*4, WINDOW_ADDR);
rpxlite_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE * 4);
diff --git a/drivers/mtd/maps/sbc8240.c b/drivers/mtd/maps/sbc8240.c
index 6e1e99cd2b5..d5374cdcb16 100644
--- a/drivers/mtd/maps/sbc8240.c
+++ b/drivers/mtd/maps/sbc8240.c
@@ -136,7 +136,7 @@ static struct mtd_part_def sbc8240_part_banks[NUM_FLASH_BANKS];
#endif /* CONFIG_MTD_PARTITIONS */
-int __init init_sbc8240_mtd (void)
+static int __init init_sbc8240_mtd (void)
{
static struct _cjs {
u_long addr;
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index 21169e6d646..7e329f09a54 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -118,7 +118,8 @@ scb2_fixup_mtd(struct mtd_info *mtd)
struct mtd_erase_region_info *region = &mtd->eraseregions[i];
if (region->numblocks * region->erasesize > mtd->size) {
- region->numblocks = (mtd->size / region->erasesize);
+ region->numblocks = ((unsigned long)mtd->size /
+ region->erasesize);
done = 1;
} else {
region->numblocks = 0;
@@ -187,8 +188,9 @@ scb2_flash_probe(struct pci_dev *dev, const struct pci_device_id *ent)
return -ENODEV;
}
- printk(KERN_NOTICE MODNAME ": chip size 0x%x at offset 0x%x\n",
- scb2_mtd->size, SCB2_WINDOW - scb2_mtd->size);
+ printk(KERN_NOTICE MODNAME ": chip size 0x%llx at offset 0x%llx\n",
+ (unsigned long long)scb2_mtd->size,
+ (unsigned long long)(SCB2_WINDOW - scb2_mtd->size));
add_mtd_device(scb2_mtd);
diff --git a/drivers/mtd/maps/sharpsl-flash.c b/drivers/mtd/maps/sharpsl-flash.c
index 026eab02818..b392f096c70 100644
--- a/drivers/mtd/maps/sharpsl-flash.c
+++ b/drivers/mtd/maps/sharpsl-flash.c
@@ -47,7 +47,7 @@ static struct mtd_partition sharpsl_partitions[1] = {
}
};
-int __init init_sharpsl(void)
+static int __init init_sharpsl(void)
{
struct mtd_partition *parts;
int nb_parts = 0;
diff --git a/drivers/mtd/maps/tqm8xxl.c b/drivers/mtd/maps/tqm8xxl.c
index a5d3d8531fa..60146984f4b 100644
--- a/drivers/mtd/maps/tqm8xxl.c
+++ b/drivers/mtd/maps/tqm8xxl.c
@@ -109,7 +109,7 @@ static struct mtd_partition tqm8xxl_fs_partitions[] = {
};
#endif
-int __init init_tqm_mtd(void)
+static int __init init_tqm_mtd(void)
{
int idx = 0, ret = 0;
unsigned long flash_addr, flash_size, mtd_size = 0;
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index 0dc645f8152..81756e39771 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -51,7 +51,7 @@ int uclinux_point(struct mtd_info *mtd, loff_t from, size_t len,
/****************************************************************************/
-int __init uclinux_mtd_init(void)
+static int __init uclinux_mtd_init(void)
{
struct mtd_info *mtd;
struct map_info *mapp;
@@ -94,7 +94,7 @@ int __init uclinux_mtd_init(void)
/****************************************************************************/
-void __exit uclinux_mtd_cleanup(void)
+static void __exit uclinux_mtd_cleanup(void)
{
if (uclinux_ram_mtdinfo) {
del_mtd_partitions(uclinux_ram_mtdinfo);
diff --git a/drivers/mtd/maps/vmax301.c b/drivers/mtd/maps/vmax301.c
index 5a0c9a353b0..6d452dcdfe3 100644
--- a/drivers/mtd/maps/vmax301.c
+++ b/drivers/mtd/maps/vmax301.c
@@ -146,7 +146,7 @@ static void __exit cleanup_vmax301(void)
iounmap((void *)vmax_map[0].map_priv_1 - WINDOW_START);
}
-int __init init_vmax301(void)
+static int __init init_vmax301(void)
{
int i;
unsigned long iomapadr;
diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c
index 413b0cf9bbd..933a2b6598b 100644
--- a/drivers/mtd/maps/wr_sbc82xx_flash.c
+++ b/drivers/mtd/maps/wr_sbc82xx_flash.c
@@ -74,7 +74,7 @@ do { \
} \
} while (0);
-int __init init_sbc82xx_flash(void)
+static int __init init_sbc82xx_flash(void)
{
volatile memctl_cpm2_t *mc = &cpm2_immr->im_memctl;
int bigflash;
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 681d5aca2af..1409f01406f 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -133,15 +133,12 @@ static void mtd_blktrans_request(struct request_queue *rq)
}
-static int blktrans_open(struct inode *i, struct file *f)
+static int blktrans_open(struct block_device *bdev, fmode_t mode)
{
- struct mtd_blktrans_dev *dev;
- struct mtd_blktrans_ops *tr;
+ struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
+ struct mtd_blktrans_ops *tr = dev->tr;
int ret = -ENODEV;
- dev = i->i_bdev->bd_disk->private_data;
- tr = dev->tr;
-
if (!try_module_get(dev->mtd->owner))
goto out;
@@ -164,15 +161,12 @@ static int blktrans_open(struct inode *i, struct file *f)
return ret;
}
-static int blktrans_release(struct inode *i, struct file *f)
+static int blktrans_release(struct gendisk *disk, fmode_t mode)
{
- struct mtd_blktrans_dev *dev;
- struct mtd_blktrans_ops *tr;
+ struct mtd_blktrans_dev *dev = disk->private_data;
+ struct mtd_blktrans_ops *tr = dev->tr;
int ret = 0;
- dev = i->i_bdev->bd_disk->private_data;
- tr = dev->tr;
-
if (tr->release)
ret = tr->release(dev);
@@ -194,10 +188,10 @@ static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return -ENOTTY;
}
-static int blktrans_ioctl(struct inode *inode, struct file *file,
+static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- struct mtd_blktrans_dev *dev = inode->i_bdev->bd_disk->private_data;
+ struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
struct mtd_blktrans_ops *tr = dev->tr;
switch (cmd) {
@@ -215,7 +209,7 @@ static struct block_device_operations mtd_blktrans_ops = {
.owner = THIS_MODULE,
.open = blktrans_open,
.release = blktrans_release,
- .ioctl = blktrans_ioctl,
+ .locked_ioctl = blktrans_ioctl,
.getgeo = blktrans_getgeo,
};
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 963840e9b5b..e9ec59e9a56 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -96,7 +96,7 @@ static int mtd_open(struct inode *inode, struct file *file)
return -ENODEV;
/* You can't open the RO devices RW */
- if ((file->f_mode & 2) && (minor & 1))
+ if ((file->f_mode & FMODE_WRITE) && (minor & 1))
return -EACCES;
lock_kernel();
@@ -114,7 +114,7 @@ static int mtd_open(struct inode *inode, struct file *file)
}
/* You can't open it RW if it's not a writeable device */
- if ((file->f_mode & 2) && !(mtd->flags & MTD_WRITEABLE)) {
+ if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
put_mtd_device(mtd);
ret = -EACCES;
goto out;
@@ -144,7 +144,7 @@ static int mtd_close(struct inode *inode, struct file *file)
DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n");
/* Only sync if opened RW */
- if ((file->f_mode & 2) && mtd->sync)
+ if ((file->f_mode & FMODE_WRITE) && mtd->sync)
mtd->sync(mtd);
put_mtd_device(mtd);
@@ -443,23 +443,27 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
{
struct erase_info *erase;
- if(!(file->f_mode & 2))
+ if(!(file->f_mode & FMODE_WRITE))
return -EPERM;
erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
if (!erase)
ret = -ENOMEM;
else {
+ struct erase_info_user einfo;
+
wait_queue_head_t waitq;
DECLARE_WAITQUEUE(wait, current);
init_waitqueue_head(&waitq);
- if (copy_from_user(&erase->addr, argp,
+ if (copy_from_user(&einfo, argp,
sizeof(struct erase_info_user))) {
kfree(erase);
return -EFAULT;
}
+ erase->addr = einfo.start;
+ erase->len = einfo.length;
erase->mtd = mtd;
erase->callback = mtdchar_erase_callback;
erase->priv = (unsigned long)&waitq;
@@ -497,7 +501,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
struct mtd_oob_buf __user *user_buf = argp;
uint32_t retlen;
- if(!(file->f_mode & 2))
+ if(!(file->f_mode & FMODE_WRITE))
return -EPERM;
if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 789842d0e6f..3dbb1b38db6 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -197,7 +197,7 @@ concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
continue;
}
- size = min(total_len, (size_t)(subdev->size - to));
+ size = min_t(uint64_t, total_len, subdev->size - to);
wsize = size; /* store for future use */
entry_high = entry_low;
@@ -385,7 +385,7 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
struct mtd_concat *concat = CONCAT(mtd);
struct mtd_info *subdev;
int i, err;
- u_int32_t length, offset = 0;
+ uint64_t length, offset = 0;
struct erase_info *erase;
if (!(mtd->flags & MTD_WRITEABLE))
@@ -518,7 +518,7 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
return 0;
}
-static int concat_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
+static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct mtd_concat *concat = CONCAT(mtd);
int i, err = -EINVAL;
@@ -528,7 +528,7 @@ static int concat_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
- size_t size;
+ uint64_t size;
if (ofs >= subdev->size) {
size = 0;
@@ -556,7 +556,7 @@ static int concat_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
return err;
}
-static int concat_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
+static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct mtd_concat *concat = CONCAT(mtd);
int i, err = 0;
@@ -566,7 +566,7 @@ static int concat_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
- size_t size;
+ uint64_t size;
if (ofs >= subdev->size) {
size = 0;
@@ -691,12 +691,12 @@ static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
*/
struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to concatenate */
int num_devs, /* number of subdevices */
- char *name)
+ const char *name)
{ /* name for the new device */
int i;
size_t size;
struct mtd_concat *concat;
- u_int32_t max_erasesize, curr_erasesize;
+ uint32_t max_erasesize, curr_erasesize;
int num_erase_region;
printk(KERN_NOTICE "Concatenating MTD devices:\n");
@@ -842,12 +842,14 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
concat->mtd.erasesize = curr_erasesize;
concat->mtd.numeraseregions = 0;
} else {
+ uint64_t tmp64;
+
/*
* erase block size varies across the subdevices: allocate
* space to store the data describing the variable erase regions
*/
struct mtd_erase_region_info *erase_region_p;
- u_int32_t begin, position;
+ uint64_t begin, position;
concat->mtd.erasesize = max_erasesize;
concat->mtd.numeraseregions = num_erase_region;
@@ -879,8 +881,9 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
erase_region_p->offset = begin;
erase_region_p->erasesize =
curr_erasesize;
- erase_region_p->numblocks =
- (position - begin) / curr_erasesize;
+ tmp64 = position - begin;
+ do_div(tmp64, curr_erasesize);
+ erase_region_p->numblocks = tmp64;
begin = position;
curr_erasesize = subdev[i]->erasesize;
@@ -897,9 +900,9 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
erase_region_p->offset = begin;
erase_region_p->erasesize =
curr_erasesize;
- erase_region_p->numblocks =
- (position -
- begin) / curr_erasesize;
+ tmp64 = position - begin;
+ do_div(tmp64, curr_erasesize);
+ erase_region_p->numblocks = tmp64;
begin = position;
curr_erasesize =
@@ -909,14 +912,16 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
}
position +=
subdev[i]->eraseregions[j].
- numblocks * curr_erasesize;
+ numblocks * (uint64_t)curr_erasesize;
}
}
}
/* Now write the final entry */
erase_region_p->offset = begin;
erase_region_p->erasesize = curr_erasesize;
- erase_region_p->numblocks = (position - begin) / curr_erasesize;
+ tmp64 = position - begin;
+ do_div(tmp64, curr_erasesize);
+ erase_region_p->numblocks = tmp64;
}
return &concat->mtd;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index a9d24694982..76fe0a1e7a5 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -57,6 +57,19 @@ int add_mtd_device(struct mtd_info *mtd)
mtd->index = i;
mtd->usecount = 0;
+ if (is_power_of_2(mtd->erasesize))
+ mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
+ else
+ mtd->erasesize_shift = 0;
+
+ if (is_power_of_2(mtd->writesize))
+ mtd->writesize_shift = ffs(mtd->writesize) - 1;
+ else
+ mtd->writesize_shift = 0;
+
+ mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
+ mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
+
/* Some chips always power up locked. Unlock them now */
if ((mtd->flags & MTD_WRITEABLE)
&& (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) {
@@ -344,7 +357,8 @@ static inline int mtd_proc_info (char *buf, int i)
if (!this)
return 0;
- return sprintf(buf, "mtd%d: %8.8x %8.8x \"%s\"\n", i, this->size,
+ return sprintf(buf, "mtd%d: %8.8llx %8.8x \"%s\"\n", i,
+ (unsigned long long)this->size,
this->erasesize, this->name);
}
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index aebb3b27edb..1a6b3beabe8 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -80,9 +80,9 @@ static int mtdoops_erase_block(struct mtd_info *mtd, int offset)
if (ret) {
set_current_state(TASK_RUNNING);
remove_wait_queue(&wait_q, &wait);
- printk (KERN_WARNING "mtdoops: erase of region [0x%x, 0x%x] "
+ printk (KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] "
"on \"%s\" failed\n",
- erase.addr, erase.len, mtd->name);
+ (unsigned long long)erase.addr, (unsigned long long)erase.len, mtd->name);
return ret;
}
@@ -289,7 +289,10 @@ static void mtdoops_notify_add(struct mtd_info *mtd)
}
cxt->mtd = mtd;
- cxt->oops_pages = mtd->size / OOPS_PAGE_SIZE;
+ if (mtd->size > INT_MAX)
+ cxt->oops_pages = INT_MAX / OOPS_PAGE_SIZE;
+ else
+ cxt->oops_pages = (int)mtd->size / OOPS_PAGE_SIZE;
find_next_position(cxt);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 3728913fa5f..144e6b613a7 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -26,7 +26,7 @@ static LIST_HEAD(mtd_partitions);
struct mtd_part {
struct mtd_info mtd;
struct mtd_info *master;
- u_int32_t offset;
+ uint64_t offset;
int index;
struct list_head list;
int registered;
@@ -235,7 +235,7 @@ void mtd_erase_callback(struct erase_info *instr)
}
EXPORT_SYMBOL_GPL(mtd_erase_callback);
-static int part_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
+static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct mtd_part *part = PART(mtd);
if ((len + ofs) > mtd->size)
@@ -243,7 +243,7 @@ static int part_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
return part->master->lock(part->master, ofs + part->offset, len);
}
-static int part_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
+static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct mtd_part *part = PART(mtd);
if ((len + ofs) > mtd->size)
@@ -317,7 +317,7 @@ EXPORT_SYMBOL(del_mtd_partitions);
static struct mtd_part *add_one_partition(struct mtd_info *master,
const struct mtd_partition *part, int partno,
- u_int32_t cur_offset)
+ uint64_t cur_offset)
{
struct mtd_part *slave;
@@ -395,19 +395,19 @@ static struct mtd_part *add_one_partition(struct mtd_info *master,
slave->offset = cur_offset;
if (slave->offset == MTDPART_OFS_NXTBLK) {
slave->offset = cur_offset;
- if ((cur_offset % master->erasesize) != 0) {
+ if (mtd_mod_by_eb(cur_offset, master) != 0) {
/* Round up to next erasesize */
- slave->offset = ((cur_offset / master->erasesize) + 1) * master->erasesize;
+ slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize;
printk(KERN_NOTICE "Moving partition %d: "
- "0x%08x -> 0x%08x\n", partno,
- cur_offset, slave->offset);
+ "0x%012llx -> 0x%012llx\n", partno,
+ (unsigned long long)cur_offset, (unsigned long long)slave->offset);
}
}
if (slave->mtd.size == MTDPART_SIZ_FULL)
slave->mtd.size = master->size - slave->offset;
- printk(KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset,
- slave->offset + slave->mtd.size, slave->mtd.name);
+ printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
+ (unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
/* let's do some sanity checks */
if (slave->offset >= master->size) {
@@ -420,13 +420,13 @@ static struct mtd_part *add_one_partition(struct mtd_info *master,
}
if (slave->offset + slave->mtd.size > master->size) {
slave->mtd.size = master->size - slave->offset;
- printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n",
- part->name, master->name, slave->mtd.size);
+ printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
+ part->name, master->name, (unsigned long long)slave->mtd.size);
}
if (master->numeraseregions > 1) {
/* Deal with variable erase size stuff */
int i, max = master->numeraseregions;
- u32 end = slave->offset + slave->mtd.size;
+ u64 end = slave->offset + slave->mtd.size;
struct mtd_erase_region_info *regions = master->eraseregions;
/* Find the first erase regions which is part of this
@@ -449,7 +449,7 @@ static struct mtd_part *add_one_partition(struct mtd_info *master,
}
if ((slave->mtd.flags & MTD_WRITEABLE) &&
- (slave->offset % slave->mtd.erasesize)) {
+ mtd_mod_by_eb(slave->offset, &slave->mtd)) {
/* Doesn't start on a boundary of major erase size */
/* FIXME: Let it be writable if it is on a boundary of
* _minor_ erase size though */
@@ -458,7 +458,7 @@ static struct mtd_part *add_one_partition(struct mtd_info *master,
part->name);
}
if ((slave->mtd.flags & MTD_WRITEABLE) &&
- (slave->mtd.size % slave->mtd.erasesize)) {
+ mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) {
slave->mtd.flags &= ~MTD_WRITEABLE;
printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
part->name);
@@ -466,7 +466,7 @@ static struct mtd_part *add_one_partition(struct mtd_info *master,
slave->mtd.ecclayout = master->ecclayout;
if (master->block_isbad) {
- uint32_t offs = 0;
+ uint64_t offs = 0;
while (offs < slave->mtd.size) {
if (master->block_isbad(master,
@@ -501,7 +501,7 @@ int add_mtd_partitions(struct mtd_info *master,
int nbparts)
{
struct mtd_part *slave;
- u_int32_t cur_offset = 0;
+ uint64_t cur_offset = 0;
int i;
printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 1c2e9450d66..f8ae0400c49 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -408,7 +408,7 @@ config MTD_NAND_FSL_UPM
config MTD_NAND_MXC
tristate "MXC NAND support"
- depends on ARCH_MX2
+ depends on ARCH_MX2 || ARCH_MX3
help
This enables the driver for the NAND flash controller on the
MXC processors.
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
index 96238039485..6d9649159a1 100644
--- a/drivers/mtd/nand/alauda.c
+++ b/drivers/mtd/nand/alauda.c
@@ -676,11 +676,11 @@ static int alauda_probe(struct usb_interface *interface,
goto error;
al->write_out = usb_sndbulkpipe(al->dev,
- ep_wr->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+ usb_endpoint_num(ep_wr));
al->bulk_in = usb_rcvbulkpipe(al->dev,
- ep_in->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+ usb_endpoint_num(ep_in));
al->bulk_out = usb_sndbulkpipe(al->dev,
- ep_out->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+ usb_endpoint_num(ep_out));
/* second device is identical up to now */
memcpy(al+1, al, sizeof(*al));
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 4aa5bd6158d..65929db2944 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -777,7 +777,9 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
/* Fill in fsl_elbc_mtd structure */
priv->mtd.priv = chip;
priv->mtd.owner = THIS_MODULE;
- priv->fmr = 0; /* rest filled in later */
+
+ /* Set the ECCM according to the settings in bootloader.*/
+ priv->fmr = in_be32(&lbc->fmr) & FMR_ECCM;
/* fill in nand_chip structure */
/* set up function call table */
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index a83192f80eb..7815a404a63 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -222,7 +222,7 @@ static int __devinit fun_probe(struct of_device *ofdev,
fun->rnb_gpio = of_get_gpio(ofdev->node, 0);
if (fun->rnb_gpio >= 0) {
- ret = gpio_request(fun->rnb_gpio, ofdev->dev.bus_id);
+ ret = gpio_request(fun->rnb_gpio, dev_name(&ofdev->dev));
if (ret) {
dev_err(&ofdev->dev, "can't request RNB gpio\n");
goto err2;
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 0a9c9cd33f9..0c3afccde8a 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2014,13 +2014,14 @@ static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
int allowbbt)
{
- int page, len, status, pages_per_block, ret, chipnr;
+ int page, status, pages_per_block, ret, chipnr;
struct nand_chip *chip = mtd->priv;
- int rewrite_bbt[NAND_MAX_CHIPS]={0};
+ loff_t rewrite_bbt[NAND_MAX_CHIPS]={0};
unsigned int bbt_masked_page = 0xffffffff;
+ loff_t len;
- DEBUG(MTD_DEBUG_LEVEL3, "nand_erase: start = 0x%08x, len = %i\n",
- (unsigned int)instr->addr, (unsigned int)instr->len);
+ DEBUG(MTD_DEBUG_LEVEL3, "nand_erase: start = 0x%012llx, len = %llu\n",
+ (unsigned long long)instr->addr, (unsigned long long)instr->len);
/* Start address must align on block boundary */
if (instr->addr & ((1 << chip->phys_erase_shift) - 1)) {
@@ -2116,7 +2117,8 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: "
"Failed erase, page 0x%08x\n", page);
instr->state = MTD_ERASE_FAILED;
- instr->fail_addr = (page << chip->page_shift);
+ instr->fail_addr =
+ ((loff_t)page << chip->page_shift);
goto erase_exit;
}
@@ -2126,7 +2128,8 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
*/
if (bbt_masked_page != 0xffffffff &&
(page & BBT_PAGE_MASK) == bbt_masked_page)
- rewrite_bbt[chipnr] = (page << chip->page_shift);
+ rewrite_bbt[chipnr] =
+ ((loff_t)page << chip->page_shift);
/* Increment page address and decrement length */
len -= (1 << chip->phys_erase_shift);
@@ -2173,7 +2176,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
continue;
/* update the BBT for chip */
DEBUG(MTD_DEBUG_LEVEL0, "nand_erase_nand: nand_update_bbt "
- "(%d:0x%0x 0x%0x)\n", chipnr, rewrite_bbt[chipnr],
+ "(%d:0x%0llx 0x%0x)\n", chipnr, rewrite_bbt[chipnr],
chip->bbt_td->pages[chipnr]);
nand_update_bbt(mtd, rewrite_bbt[chipnr]);
}
@@ -2365,7 +2368,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
if (!mtd->name)
mtd->name = type->name;
- chip->chipsize = type->chipsize << 20;
+ chip->chipsize = (uint64_t)type->chipsize << 20;
/* Newer devices have all the information in additional id bytes */
if (!type->pagesize) {
@@ -2423,7 +2426,10 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
chip->bbt_erase_shift = chip->phys_erase_shift =
ffs(mtd->erasesize) - 1;
- chip->chip_shift = ffs(chip->chipsize) - 1;
+ if (chip->chipsize & 0xffffffff)
+ chip->chip_shift = ffs((unsigned)chip->chipsize) - 1;
+ else
+ chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32)) + 32 - 1;
/* Set the bad block position */
chip->badblockpos = mtd->writesize > 512 ?
@@ -2517,7 +2523,6 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips)
/**
* nand_scan_tail - [NAND Interface] Scan for the NAND device
* @mtd: MTD device structure
- * @maxchips: Number of chips to scan for
*
* This is the second phase of the normal nand_scan() function. It
* fills out all the uninitialized function pointers with the defaults
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 0b1c48595f1..55c23e5cd21 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -171,16 +171,16 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
if (tmp == msk)
continue;
if (reserved_block_code && (tmp == reserved_block_code)) {
- printk(KERN_DEBUG "nand_read_bbt: Reserved block at 0x%08x\n",
- ((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
+ printk(KERN_DEBUG "nand_read_bbt: Reserved block at 0x%012llx\n",
+ (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06);
mtd->ecc_stats.bbtblocks++;
continue;
}
/* Leave it for now, if its matured we can move this
* message to MTD_DEBUG_LEVEL0 */
- printk(KERN_DEBUG "nand_read_bbt: Bad block at 0x%08x\n",
- ((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
+ printk(KERN_DEBUG "nand_read_bbt: Bad block at 0x%012llx\n",
+ (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
/* Factory marked bad or worn out ? */
if (tmp == 0)
this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06);
@@ -284,7 +284,7 @@ static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
/* Read the primary version, if available */
if (td->options & NAND_BBT_VERSION) {
- scan_read_raw(mtd, buf, td->pages[0] << this->page_shift,
+ scan_read_raw(mtd, buf, (loff_t)td->pages[0] << this->page_shift,
mtd->writesize);
td->version[0] = buf[mtd->writesize + td->veroffs];
printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n",
@@ -293,7 +293,7 @@ static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
/* Read the mirror version, if available */
if (md && (md->options & NAND_BBT_VERSION)) {
- scan_read_raw(mtd, buf, md->pages[0] << this->page_shift,
+ scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
mtd->writesize);
md->version[0] = buf[mtd->writesize + md->veroffs];
printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n",
@@ -411,7 +411,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
numblocks = this->chipsize >> (this->bbt_erase_shift - 1);
startblock = chip * numblocks;
numblocks += startblock;
- from = startblock << (this->bbt_erase_shift - 1);
+ from = (loff_t)startblock << (this->bbt_erase_shift - 1);
}
for (i = startblock; i < numblocks;) {
@@ -428,8 +428,8 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
if (ret) {
this->bbt[i >> 3] |= 0x03 << (i & 0x6);
- printk(KERN_WARNING "Bad eraseblock %d at 0x%08x\n",
- i >> 1, (unsigned int)from);
+ printk(KERN_WARNING "Bad eraseblock %d at 0x%012llx\n",
+ i >> 1, (unsigned long long)from);
mtd->ecc_stats.badblocks++;
}
@@ -495,7 +495,7 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
for (block = 0; block < td->maxblocks; block++) {
int actblock = startblock + dir * block;
- loff_t offs = actblock << this->bbt_erase_shift;
+ loff_t offs = (loff_t)actblock << this->bbt_erase_shift;
/* Read first page */
scan_read_raw(mtd, buf, offs, mtd->writesize);
@@ -719,7 +719,7 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
memset(&einfo, 0, sizeof(einfo));
einfo.mtd = mtd;
- einfo.addr = (unsigned long)to;
+ einfo.addr = to;
einfo.len = 1 << this->bbt_erase_shift;
res = nand_erase_nand(mtd, &einfo, 1);
if (res < 0)
@@ -729,8 +729,8 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
if (res < 0)
goto outerr;
- printk(KERN_DEBUG "Bad block table written to 0x%08x, version "
- "0x%02X\n", (unsigned int)to, td->version[chip]);
+ printk(KERN_DEBUG "Bad block table written to 0x%012llx, version "
+ "0x%02X\n", (unsigned long long)to, td->version[chip]);
/* Mark it as used */
td->pages[chip] = page;
@@ -910,7 +910,7 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
newval = oldval | (0x2 << (block & 0x06));
this->bbt[(block >> 3)] = newval;
if ((oldval != newval) && td->reserved_block_code)
- nand_update_bbt(mtd, block << (this->bbt_erase_shift - 1));
+ nand_update_bbt(mtd, (loff_t)block << (this->bbt_erase_shift - 1));
continue;
}
update = 0;
@@ -931,7 +931,7 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
new ones have been marked, then we need to update the stored
bbts. This should only happen once. */
if (update && td->reserved_block_code)
- nand_update_bbt(mtd, (block - 2) << (this->bbt_erase_shift - 1));
+ nand_update_bbt(mtd, (loff_t)(block - 2) << (this->bbt_erase_shift - 1));
}
}
@@ -1027,7 +1027,6 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
if (!this->bbt || !td)
return -EINVAL;
- len = mtd->size >> (this->bbt_erase_shift + 2);
/* Allocate a temporary buffer for one eraseblock incl. oob */
len = (1 << this->bbt_erase_shift);
len += (len >> this->page_shift) * mtd->oobsize;
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index ae7c57781a6..cd0711b83ac 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -38,6 +38,9 @@
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/random.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
/* Default simulator parameters values */
#if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \
@@ -100,6 +103,7 @@ static unsigned int bitflips = 0;
static char *gravepages = NULL;
static unsigned int rptwear = 0;
static unsigned int overridesize = 0;
+static char *cache_file = NULL;
module_param(first_id_byte, uint, 0400);
module_param(second_id_byte, uint, 0400);
@@ -122,12 +126,13 @@ module_param(bitflips, uint, 0400);
module_param(gravepages, charp, 0400);
module_param(rptwear, uint, 0400);
module_param(overridesize, uint, 0400);
+module_param(cache_file, charp, 0400);
MODULE_PARM_DESC(first_id_byte, "The first byte returned by NAND Flash 'read ID' command (manufacturer ID)");
MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID)");
MODULE_PARM_DESC(third_id_byte, "The third byte returned by NAND Flash 'read ID' command");
MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read ID' command");
-MODULE_PARM_DESC(access_delay, "Initial page access delay (microiseconds)");
+MODULE_PARM_DESC(access_delay, "Initial page access delay (microseconds)");
MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds");
MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)");
MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanodeconds)");
@@ -153,6 +158,7 @@ MODULE_PARM_DESC(rptwear, "Number of erases inbetween reporting wear, if
MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the ID bytes. "
"The size is specified in erase blocks and as the exponent of a power of two"
" e.g. 5 means a size of 32 erase blocks");
+MODULE_PARM_DESC(cache_file, "File to use to cache nand pages instead of memory");
/* The largest possible page size */
#define NS_LARGEST_PAGE_SIZE 2048
@@ -266,6 +272,9 @@ MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the I
*/
#define NS_MAX_PREVSTATES 1
+/* Maximum page cache pages needed to read or write a NAND page to the cache_file */
+#define NS_MAX_HELD_PAGES 16
+
/*
* A union to represent flash memory contents and flash buffer.
*/
@@ -295,6 +304,9 @@ struct nandsim {
/* The simulated NAND flash pages array */
union ns_mem *pages;
+ /* Slab allocator for nand pages */
+ struct kmem_cache *nand_pages_slab;
+
/* Internal buffer of page + OOB size bytes */
union ns_mem buf;
@@ -335,6 +347,13 @@ struct nandsim {
int ale; /* address Latch Enable */
int wp; /* write Protect */
} lines;
+
+ /* Fields needed when using a cache file */
+ struct file *cfile; /* Open file */
+ unsigned char *pages_written; /* Which pages have been written */
+ void *file_buf;
+ struct page *held_pages[NS_MAX_HELD_PAGES];
+ int held_cnt;
};
/*
@@ -420,25 +439,69 @@ static struct mtd_info *nsmtd;
static u_char ns_verify_buf[NS_LARGEST_PAGE_SIZE];
/*
- * Allocate array of page pointers and initialize the array to NULL
- * pointers.
+ * Allocate array of page pointers, create slab allocation for an array
+ * and initialize the array by NULL pointers.
*
* RETURNS: 0 if success, -ENOMEM if memory alloc fails.
*/
static int alloc_device(struct nandsim *ns)
{
- int i;
+ struct file *cfile;
+ int i, err;
+
+ if (cache_file) {
+ cfile = filp_open(cache_file, O_CREAT | O_RDWR | O_LARGEFILE, 0600);
+ if (IS_ERR(cfile))
+ return PTR_ERR(cfile);
+ if (!cfile->f_op || (!cfile->f_op->read && !cfile->f_op->aio_read)) {
+ NS_ERR("alloc_device: cache file not readable\n");
+ err = -EINVAL;
+ goto err_close;
+ }
+ if (!cfile->f_op->write && !cfile->f_op->aio_write) {
+ NS_ERR("alloc_device: cache file not writeable\n");
+ err = -EINVAL;
+ goto err_close;
+ }
+ ns->pages_written = vmalloc(ns->geom.pgnum);
+ if (!ns->pages_written) {
+ NS_ERR("alloc_device: unable to allocate pages written array\n");
+ err = -ENOMEM;
+ goto err_close;
+ }
+ ns->file_buf = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
+ if (!ns->file_buf) {
+ NS_ERR("alloc_device: unable to allocate file buf\n");
+ err = -ENOMEM;
+ goto err_free;
+ }
+ ns->cfile = cfile;
+ memset(ns->pages_written, 0, ns->geom.pgnum);
+ return 0;
+ }
ns->pages = vmalloc(ns->geom.pgnum * sizeof(union ns_mem));
if (!ns->pages) {
- NS_ERR("alloc_map: unable to allocate page array\n");
+ NS_ERR("alloc_device: unable to allocate page array\n");
return -ENOMEM;
}
for (i = 0; i < ns->geom.pgnum; i++) {
ns->pages[i].byte = NULL;
}
+ ns->nand_pages_slab = kmem_cache_create("nandsim",
+ ns->geom.pgszoob, 0, 0, NULL);
+ if (!ns->nand_pages_slab) {
+ NS_ERR("cache_create: unable to create kmem_cache\n");
+ return -ENOMEM;
+ }
return 0;
+
+err_free:
+ vfree(ns->pages_written);
+err_close:
+ filp_close(cfile, NULL);
+ return err;
}
/*
@@ -448,11 +511,20 @@ static void free_device(struct nandsim *ns)
{
int i;
+ if (ns->cfile) {
+ kfree(ns->file_buf);
+ vfree(ns->pages_written);
+ filp_close(ns->cfile, NULL);
+ return;
+ }
+
if (ns->pages) {
for (i = 0; i < ns->geom.pgnum; i++) {
if (ns->pages[i].byte)
- kfree(ns->pages[i].byte);
+ kmem_cache_free(ns->nand_pages_slab,
+ ns->pages[i].byte);
}
+ kmem_cache_destroy(ns->nand_pages_slab);
vfree(ns->pages);
}
}
@@ -464,7 +536,7 @@ static char *get_partition_name(int i)
return kstrdup(buf, GFP_KERNEL);
}
-static u_int64_t divide(u_int64_t n, u_int32_t d)
+static uint64_t divide(uint64_t n, uint32_t d)
{
do_div(n, d);
return n;
@@ -480,8 +552,8 @@ static int init_nandsim(struct mtd_info *mtd)
struct nand_chip *chip = (struct nand_chip *)mtd->priv;
struct nandsim *ns = (struct nandsim *)(chip->priv);
int i, ret = 0;
- u_int64_t remains;
- u_int64_t next_offset;
+ uint64_t remains;
+ uint64_t next_offset;
if (NS_IS_INITIALIZED(ns)) {
NS_ERR("init_nandsim: nandsim is already initialized\n");
@@ -548,7 +620,7 @@ static int init_nandsim(struct mtd_info *mtd)
remains = ns->geom.totsz;
next_offset = 0;
for (i = 0; i < parts_num; ++i) {
- u_int64_t part_sz = (u_int64_t)parts[i] * ns->geom.secsz;
+ uint64_t part_sz = (uint64_t)parts[i] * ns->geom.secsz;
if (!part_sz || part_sz > remains) {
NS_ERR("bad partition size.\n");
@@ -1211,6 +1283,97 @@ static int find_operation(struct nandsim *ns, uint32_t flag)
return -1;
}
+static void put_pages(struct nandsim *ns)
+{
+ int i;
+
+ for (i = 0; i < ns->held_cnt; i++)
+ page_cache_release(ns->held_pages[i]);
+}
+
+/* Get page cache pages in advance to provide NOFS memory allocation */
+static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t pos)
+{
+ pgoff_t index, start_index, end_index;
+ struct page *page;
+ struct address_space *mapping = file->f_mapping;
+
+ start_index = pos >> PAGE_CACHE_SHIFT;
+ end_index = (pos + count - 1) >> PAGE_CACHE_SHIFT;
+ if (end_index - start_index + 1 > NS_MAX_HELD_PAGES)
+ return -EINVAL;
+ ns->held_cnt = 0;
+ for (index = start_index; index <= end_index; index++) {
+ page = find_get_page(mapping, index);
+ if (page == NULL) {
+ page = find_or_create_page(mapping, index, GFP_NOFS);
+ if (page == NULL) {
+ write_inode_now(mapping->host, 1);
+ page = find_or_create_page(mapping, index, GFP_NOFS);
+ }
+ if (page == NULL) {
+ put_pages(ns);
+ return -ENOMEM;
+ }
+ unlock_page(page);
+ }
+ ns->held_pages[ns->held_cnt++] = page;
+ }
+ return 0;
+}
+
+static int set_memalloc(void)
+{
+ if (current->flags & PF_MEMALLOC)
+ return 0;
+ current->flags |= PF_MEMALLOC;
+ return 1;
+}
+
+static void clear_memalloc(int memalloc)
+{
+ if (memalloc)
+ current->flags &= ~PF_MEMALLOC;
+}
+
+static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t *pos)
+{
+ mm_segment_t old_fs;
+ ssize_t tx;
+ int err, memalloc;
+
+ err = get_pages(ns, file, count, *pos);
+ if (err)
+ return err;
+ old_fs = get_fs();
+ set_fs(get_ds());
+ memalloc = set_memalloc();
+ tx = vfs_read(file, (char __user *)buf, count, pos);
+ clear_memalloc(memalloc);
+ set_fs(old_fs);
+ put_pages(ns);
+ return tx;
+}
+
+static ssize_t write_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t *pos)
+{
+ mm_segment_t old_fs;
+ ssize_t tx;
+ int err, memalloc;
+
+ err = get_pages(ns, file, count, *pos);
+ if (err)
+ return err;
+ old_fs = get_fs();
+ set_fs(get_ds());
+ memalloc = set_memalloc();
+ tx = vfs_write(file, (char __user *)buf, count, pos);
+ clear_memalloc(memalloc);
+ set_fs(old_fs);
+ put_pages(ns);
+ return tx;
+}
+
/*
* Returns a pointer to the current page.
*/
@@ -1227,6 +1390,38 @@ static inline u_char *NS_PAGE_BYTE_OFF(struct nandsim *ns)
return NS_GET_PAGE(ns)->byte + ns->regs.column + ns->regs.off;
}
+int do_read_error(struct nandsim *ns, int num)
+{
+ unsigned int page_no = ns->regs.row;
+
+ if (read_error(page_no)) {
+ int i;
+ memset(ns->buf.byte, 0xFF, num);
+ for (i = 0; i < num; ++i)
+ ns->buf.byte[i] = random32();
+ NS_WARN("simulating read error in page %u\n", page_no);
+ return 1;
+ }
+ return 0;
+}
+
+void do_bit_flips(struct nandsim *ns, int num)
+{
+ if (bitflips && random32() < (1 << 22)) {
+ int flips = 1;
+ if (bitflips > 1)
+ flips = (random32() % (int) bitflips) + 1;
+ while (flips--) {
+ int pos = random32() % (num * 8);
+ ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
+ NS_WARN("read_page: flipping bit %d in page %d "
+ "reading from %d ecc: corrected=%u failed=%u\n",
+ pos, ns->regs.row, ns->regs.column + ns->regs.off,
+ nsmtd->ecc_stats.corrected, nsmtd->ecc_stats.failed);
+ }
+ }
+}
+
/*
* Fill the NAND buffer with data read from the specified page.
*/
@@ -1234,36 +1429,40 @@ static void read_page(struct nandsim *ns, int num)
{
union ns_mem *mypage;
+ if (ns->cfile) {
+ if (!ns->pages_written[ns->regs.row]) {
+ NS_DBG("read_page: page %d not written\n", ns->regs.row);
+ memset(ns->buf.byte, 0xFF, num);
+ } else {
+ loff_t pos;
+ ssize_t tx;
+
+ NS_DBG("read_page: page %d written, reading from %d\n",
+ ns->regs.row, ns->regs.column + ns->regs.off);
+ if (do_read_error(ns, num))
+ return;
+ pos = (loff_t)ns->regs.row * ns->geom.pgszoob + ns->regs.column + ns->regs.off;
+ tx = read_file(ns, ns->cfile, ns->buf.byte, num, &pos);
+ if (tx != num) {
+ NS_ERR("read_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
+ return;
+ }
+ do_bit_flips(ns, num);
+ }
+ return;
+ }
+
mypage = NS_GET_PAGE(ns);
if (mypage->byte == NULL) {
NS_DBG("read_page: page %d not allocated\n", ns->regs.row);
memset(ns->buf.byte, 0xFF, num);
} else {
- unsigned int page_no = ns->regs.row;
NS_DBG("read_page: page %d allocated, reading from %d\n",
ns->regs.row, ns->regs.column + ns->regs.off);
- if (read_error(page_no)) {
- int i;
- memset(ns->buf.byte, 0xFF, num);
- for (i = 0; i < num; ++i)
- ns->buf.byte[i] = random32();
- NS_WARN("simulating read error in page %u\n", page_no);
+ if (do_read_error(ns, num))
return;
- }
memcpy(ns->buf.byte, NS_PAGE_BYTE_OFF(ns), num);
- if (bitflips && random32() < (1 << 22)) {
- int flips = 1;
- if (bitflips > 1)
- flips = (random32() % (int) bitflips) + 1;
- while (flips--) {
- int pos = random32() % (num * 8);
- ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
- NS_WARN("read_page: flipping bit %d in page %d "
- "reading from %d ecc: corrected=%u failed=%u\n",
- pos, ns->regs.row, ns->regs.column + ns->regs.off,
- nsmtd->ecc_stats.corrected, nsmtd->ecc_stats.failed);
- }
- }
+ do_bit_flips(ns, num);
}
}
@@ -1275,11 +1474,20 @@ static void erase_sector(struct nandsim *ns)
union ns_mem *mypage;
int i;
+ if (ns->cfile) {
+ for (i = 0; i < ns->geom.pgsec; i++)
+ if (ns->pages_written[ns->regs.row + i]) {
+ NS_DBG("erase_sector: freeing page %d\n", ns->regs.row + i);
+ ns->pages_written[ns->regs.row + i] = 0;
+ }
+ return;
+ }
+
mypage = NS_GET_PAGE(ns);
for (i = 0; i < ns->geom.pgsec; i++) {
if (mypage->byte != NULL) {
NS_DBG("erase_sector: freeing page %d\n", ns->regs.row+i);
- kfree(mypage->byte);
+ kmem_cache_free(ns->nand_pages_slab, mypage->byte);
mypage->byte = NULL;
}
mypage++;
@@ -1295,16 +1503,57 @@ static int prog_page(struct nandsim *ns, int num)
union ns_mem *mypage;
u_char *pg_off;
+ if (ns->cfile) {
+ loff_t off, pos;
+ ssize_t tx;
+ int all;
+
+ NS_DBG("prog_page: writing page %d\n", ns->regs.row);
+ pg_off = ns->file_buf + ns->regs.column + ns->regs.off;
+ off = (loff_t)ns->regs.row * ns->geom.pgszoob + ns->regs.column + ns->regs.off;
+ if (!ns->pages_written[ns->regs.row]) {
+ all = 1;
+ memset(ns->file_buf, 0xff, ns->geom.pgszoob);
+ } else {
+ all = 0;
+ pos = off;
+ tx = read_file(ns, ns->cfile, pg_off, num, &pos);
+ if (tx != num) {
+ NS_ERR("prog_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
+ return -1;
+ }
+ }
+ for (i = 0; i < num; i++)
+ pg_off[i] &= ns->buf.byte[i];
+ if (all) {
+ pos = (loff_t)ns->regs.row * ns->geom.pgszoob;
+ tx = write_file(ns, ns->cfile, ns->file_buf, ns->geom.pgszoob, &pos);
+ if (tx != ns->geom.pgszoob) {
+ NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
+ return -1;
+ }
+ ns->pages_written[ns->regs.row] = 1;
+ } else {
+ pos = off;
+ tx = write_file(ns, ns->cfile, pg_off, num, &pos);
+ if (tx != num) {
+ NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
+ return -1;
+ }
+ }
+ return 0;
+ }
+
mypage = NS_GET_PAGE(ns);
if (mypage->byte == NULL) {
NS_DBG("prog_page: allocating page %d\n", ns->regs.row);
/*
* We allocate memory with GFP_NOFS because a flash FS may
* utilize this. If it is holding an FS lock, then gets here,
- * then kmalloc runs writeback which goes to the FS again
- * and deadlocks. This was seen in practice.
+ * then kernel memory alloc runs writeback which goes to the FS
+ * again and deadlocks. This was seen in practice.
*/
- mypage->byte = kmalloc(ns->geom.pgszoob, GFP_NOFS);
+ mypage->byte = kmem_cache_alloc(ns->nand_pages_slab, GFP_NOFS);
if (mypage->byte == NULL) {
NS_ERR("prog_page: error allocating memory for page %d\n", ns->regs.row);
return -1;
@@ -1736,13 +1985,17 @@ static void ns_nand_write_byte(struct mtd_info *mtd, u_char byte)
/* Check if chip is expecting command */
if (NS_STATE(ns->nxstate) != STATE_UNKNOWN && !(ns->nxstate & STATE_CMD_MASK)) {
- /*
- * We are in situation when something else (not command)
- * was expected but command was input. In this case ignore
- * previous command(s)/state(s) and accept the last one.
- */
- NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, "
- "ignore previous states\n", (uint)byte, get_state_name(ns->nxstate));
+ /* Do not warn if only 2 id bytes are read */
+ if (!(ns->regs.command == NAND_CMD_READID &&
+ NS_STATE(ns->state) == STATE_DATAOUT_ID && ns->regs.count == 2)) {
+ /*
+ * We are in situation when something else (not command)
+ * was expected but command was input. In this case ignore
+ * previous command(s)/state(s) and accept the last one.
+ */
+ NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, "
+ "ignore previous states\n", (uint)byte, get_state_name(ns->nxstate));
+ }
switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
}
@@ -2044,7 +2297,7 @@ static int __init ns_init_module(void)
}
if (overridesize) {
- u_int64_t new_size = (u_int64_t)nsmtd->erasesize << overridesize;
+ uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize;
if (new_size >> overridesize != nsmtd->erasesize) {
NS_ERR("overridesize is too big\n");
goto err_exit;
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index f674c5427b1..75f9f4874ec 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -54,7 +54,7 @@ static int __init plat_nand_probe(struct platform_device *pdev)
data->chip.priv = &data;
data->mtd.priv = &data->chip;
data->mtd.owner = THIS_MODULE;
- data->mtd.name = pdev->dev.bus_id;
+ data->mtd.name = dev_name(&pdev->dev);
data->chip.IO_ADDR_R = data->io_base;
data->chip.IO_ADDR_W = data->io_base;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 15f0a26730a..cc55cbc2b30 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -20,8 +20,8 @@
#include <linux/mtd/partitions.h>
#include <linux/io.h>
#include <linux/irq.h>
-#include <asm/dma.h>
+#include <mach/dma.h>
#include <mach/pxa-regs.h>
#include <mach/pxa3xx_nand.h>
@@ -298,7 +298,7 @@ static struct pxa3xx_nand_flash *builtin_flash_types[] = {
#define NDTR1_tAR(c) (min((c), 15) << 0)
/* convert nano-seconds to nand flash controller clock cycles */
-#define ns2cycle(ns, clk) (int)(((ns) * (clk / 1000000) / 1000) + 1)
+#define ns2cycle(ns, clk) (int)(((ns) * (clk / 1000000) / 1000) - 1)
static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info,
const struct pxa3xx_nand_timing *t)
@@ -368,14 +368,14 @@ static int prepare_read_prog_cmd(struct pxa3xx_nand_info *info,
/* large block, 2 cycles for column address
* row address starts from 3rd cycle
*/
- info->ndcb1 |= (page_addr << 16) | (column & 0xffff);
+ info->ndcb1 |= page_addr << 16;
if (info->row_addr_cycles == 3)
info->ndcb2 = (page_addr >> 16) & 0xff;
} else
/* small block, 1 cycles for column address
* row address starts from 2nd cycle
*/
- info->ndcb1 = (page_addr << 8) | (column & 0xff);
+ info->ndcb1 = page_addr << 8;
if (cmd == cmdset->program)
info->ndcb0 |= NDCB0_CMD_TYPE(1) | NDCB0_AUTO_RS;
@@ -1080,7 +1080,7 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
this = &info->nand_chip;
mtd->priv = info;
- info->clk = clk_get(&pdev->dev, "NANDCLK");
+ info->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(info->clk)) {
dev_err(&pdev->dev, "failed to get nand clock\n");
ret = PTR_ERR(info->clk);
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 556139ed1fd..8e375d5fe23 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -45,8 +45,8 @@
#include <asm/io.h>
-#include <asm/plat-s3c/regs-nand.h>
-#include <asm/plat-s3c/nand.h>
+#include <plat/regs-nand.h>
+#include <plat/nand.h>
#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
static int hardware_ecc = 1;
@@ -818,7 +818,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev,
goto exit_error;
}
- memzero(info, sizeof(*info));
+ memset(info, 0, sizeof(*info));
platform_set_drvdata(pdev, info);
spin_lock_init(&info->controller.lock);
@@ -883,7 +883,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev,
goto exit_error;
}
- memzero(info->mtds, size);
+ memset(info->mtds, 0, size);
/* initialise all possible chips */
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index 30a518e211b..54ec7542a7b 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -2,6 +2,7 @@
* drivers/mtd/nand/sharpsl.c
*
* Copyright (C) 2004 Richard Purdie
+ * Copyright (C) 2008 Dmitry Baryshkov
*
* Based on Sharp's NAND driver sharp_sl.c
*
@@ -19,22 +20,31 @@
#include <linux/mtd/nand.h>
#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
+#include <linux/mtd/sharpsl.h>
#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
#include <asm/io.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
-static void __iomem *sharpsl_io_base;
-static int sharpsl_phys_base = 0x0C000000;
+struct sharpsl_nand {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+
+ void __iomem *io;
+};
+
+#define mtd_to_sharpsl(_mtd) container_of(_mtd, struct sharpsl_nand, mtd)
/* register offset */
-#define ECCLPLB sharpsl_io_base+0x00 /* line parity 7 - 0 bit */
-#define ECCLPUB sharpsl_io_base+0x04 /* line parity 15 - 8 bit */
-#define ECCCP sharpsl_io_base+0x08 /* column parity 5 - 0 bit */
-#define ECCCNTR sharpsl_io_base+0x0C /* ECC byte counter */
-#define ECCCLRR sharpsl_io_base+0x10 /* cleare ECC */
-#define FLASHIO sharpsl_io_base+0x14 /* Flash I/O */
-#define FLASHCTL sharpsl_io_base+0x18 /* Flash Control */
+#define ECCLPLB 0x00 /* line parity 7 - 0 bit */
+#define ECCLPUB 0x04 /* line parity 15 - 8 bit */
+#define ECCCP 0x08 /* column parity 5 - 0 bit */
+#define ECCCNTR 0x0C /* ECC byte counter */
+#define ECCCLRR 0x10 /* cleare ECC */
+#define FLASHIO 0x14 /* Flash I/O */
+#define FLASHCTL 0x18 /* Flash Control */
/* Flash control bit */
#define FLRYBY (1 << 5)
@@ -45,35 +55,6 @@ static int sharpsl_phys_base = 0x0C000000;
#define FLCE0 (1 << 0)
/*
- * MTD structure for SharpSL
- */
-static struct mtd_info *sharpsl_mtd = NULL;
-
-/*
- * Define partitions for flash device
- */
-#define DEFAULT_NUM_PARTITIONS 3
-
-static int nr_partitions;
-static struct mtd_partition sharpsl_nand_default_partition_info[] = {
- {
- .name = "System Area",
- .offset = 0,
- .size = 7 * 1024 * 1024,
- },
- {
- .name = "Root Filesystem",
- .offset = 7 * 1024 * 1024,
- .size = 30 * 1024 * 1024,
- },
- {
- .name = "Home Filesystem",
- .offset = MTDPART_OFS_APPEND,
- .size = MTDPART_SIZ_FULL,
- },
-};
-
-/*
* hardware specific access to control-lines
* ctrl:
* NAND_CNE: bit 0 -> ! bit 0 & 4
@@ -84,6 +65,7 @@ static struct mtd_partition sharpsl_nand_default_partition_info[] = {
static void sharpsl_nand_hwcontrol(struct mtd_info *mtd, int cmd,
unsigned int ctrl)
{
+ struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd);
struct nand_chip *chip = mtd->priv;
if (ctrl & NAND_CTRL_CHANGE) {
@@ -93,103 +75,97 @@ static void sharpsl_nand_hwcontrol(struct mtd_info *mtd, int cmd,
bits ^= 0x11;
- writeb((readb(FLASHCTL) & ~0x17) | bits, FLASHCTL);
+ writeb((readb(sharpsl->io + FLASHCTL) & ~0x17) | bits, sharpsl->io + FLASHCTL);
}
if (cmd != NAND_CMD_NONE)
writeb(cmd, chip->IO_ADDR_W);
}
-static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
-
-static struct nand_bbt_descr sharpsl_bbt = {
- .options = 0,
- .offs = 4,
- .len = 2,
- .pattern = scan_ff_pattern
-};
-
-static struct nand_bbt_descr sharpsl_akita_bbt = {
- .options = 0,
- .offs = 4,
- .len = 1,
- .pattern = scan_ff_pattern
-};
-
-static struct nand_ecclayout akita_oobinfo = {
- .eccbytes = 24,
- .eccpos = {
- 0x5, 0x1, 0x2, 0x3, 0x6, 0x7, 0x15, 0x11,
- 0x12, 0x13, 0x16, 0x17, 0x25, 0x21, 0x22, 0x23,
- 0x26, 0x27, 0x35, 0x31, 0x32, 0x33, 0x36, 0x37},
- .oobfree = {{0x08, 0x09}}
-};
-
static int sharpsl_nand_dev_ready(struct mtd_info *mtd)
{
- return !((readb(FLASHCTL) & FLRYBY) == 0);
+ struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd);
+ return !((readb(sharpsl->io + FLASHCTL) & FLRYBY) == 0);
}
static void sharpsl_nand_enable_hwecc(struct mtd_info *mtd, int mode)
{
- writeb(0, ECCCLRR);
+ struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd);
+ writeb(0, sharpsl->io + ECCCLRR);
}
static int sharpsl_nand_calculate_ecc(struct mtd_info *mtd, const u_char * dat, u_char * ecc_code)
{
- ecc_code[0] = ~readb(ECCLPUB);
- ecc_code[1] = ~readb(ECCLPLB);
- ecc_code[2] = (~readb(ECCCP) << 2) | 0x03;
- return readb(ECCCNTR) != 0;
+ struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd);
+ ecc_code[0] = ~readb(sharpsl->io + ECCLPUB);
+ ecc_code[1] = ~readb(sharpsl->io + ECCLPLB);
+ ecc_code[2] = (~readb(sharpsl->io + ECCCP) << 2) | 0x03;
+ return readb(sharpsl->io + ECCCNTR) != 0;
}
#ifdef CONFIG_MTD_PARTITIONS
-const char *part_probes[] = { "cmdlinepart", NULL };
+static const char *part_probes[] = { "cmdlinepart", NULL };
#endif
/*
* Main initialization routine
*/
-static int __init sharpsl_nand_init(void)
+static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
{
struct nand_chip *this;
+#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *sharpsl_partition_info;
+ int nr_partitions;
+#endif
+ struct resource *r;
int err = 0;
+ struct sharpsl_nand *sharpsl;
+ struct sharpsl_nand_platform_data *data = pdev->dev.platform_data;
+
+ if (!data) {
+ dev_err(&pdev->dev, "no platform data!\n");
+ return -EINVAL;
+ }
/* Allocate memory for MTD device structure and private data */
- sharpsl_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
- if (!sharpsl_mtd) {
+ sharpsl = kzalloc(sizeof(struct sharpsl_nand), GFP_KERNEL);
+ if (!sharpsl) {
printk("Unable to allocate SharpSL NAND MTD device structure.\n");
return -ENOMEM;
}
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "no io memory resource defined!\n");
+ err = -ENODEV;
+ goto err_get_res;
+ }
+
/* map physical address */
- sharpsl_io_base = ioremap(sharpsl_phys_base, 0x1000);
- if (!sharpsl_io_base) {
+ sharpsl->io = ioremap(r->start, resource_size(r));
+ if (!sharpsl->io) {
printk("ioremap to access Sharp SL NAND chip failed\n");
- kfree(sharpsl_mtd);
- return -EIO;
+ err = -EIO;
+ goto err_ioremap;
}
/* Get pointer to private data */
- this = (struct nand_chip *)(&sharpsl_mtd[1]);
-
- /* Initialize structures */
- memset(sharpsl_mtd, 0, sizeof(struct mtd_info));
- memset(this, 0, sizeof(struct nand_chip));
+ this = (struct nand_chip *)(&sharpsl->chip);
/* Link the private data with the MTD structure */
- sharpsl_mtd->priv = this;
- sharpsl_mtd->owner = THIS_MODULE;
+ sharpsl->mtd.priv = this;
+ sharpsl->mtd.owner = THIS_MODULE;
+
+ platform_set_drvdata(pdev, sharpsl);
/*
* PXA initialize
*/
- writeb(readb(FLASHCTL) | FLWP, FLASHCTL);
+ writeb(readb(sharpsl->io + FLASHCTL) | FLWP, sharpsl->io + FLASHCTL);
/* Set address of NAND IO lines */
- this->IO_ADDR_R = FLASHIO;
- this->IO_ADDR_W = FLASHIO;
+ this->IO_ADDR_R = sharpsl->io + FLASHIO;
+ this->IO_ADDR_W = sharpsl->io + FLASHIO;
/* Set address of hardware control function */
this->cmd_ctrl = sharpsl_nand_hwcontrol;
this->dev_ready = sharpsl_nand_dev_ready;
@@ -199,68 +175,89 @@ static int __init sharpsl_nand_init(void)
this->ecc.mode = NAND_ECC_HW;
this->ecc.size = 256;
this->ecc.bytes = 3;
- this->badblock_pattern = &sharpsl_bbt;
- if (machine_is_akita() || machine_is_borzoi()) {
- this->badblock_pattern = &sharpsl_akita_bbt;
- this->ecc.layout = &akita_oobinfo;
- }
+ this->badblock_pattern = data->badblock_pattern;
+ this->ecc.layout = data->ecc_layout;
this->ecc.hwctl = sharpsl_nand_enable_hwecc;
this->ecc.calculate = sharpsl_nand_calculate_ecc;
this->ecc.correct = nand_correct_data;
/* Scan to find existence of the device */
- err = nand_scan(sharpsl_mtd, 1);
- if (err) {
- iounmap(sharpsl_io_base);
- kfree(sharpsl_mtd);
- return err;
- }
+ err = nand_scan(&sharpsl->mtd, 1);
+ if (err)
+ goto err_scan;
/* Register the partitions */
- sharpsl_mtd->name = "sharpsl-nand";
- nr_partitions = parse_mtd_partitions(sharpsl_mtd, part_probes, &sharpsl_partition_info, 0);
-
+ sharpsl->mtd.name = "sharpsl-nand";
+#ifdef CONFIG_MTD_PARTITIONS
+ nr_partitions = parse_mtd_partitions(&sharpsl->mtd, part_probes, &sharpsl_partition_info, 0);
if (nr_partitions <= 0) {
- nr_partitions = DEFAULT_NUM_PARTITIONS;
- sharpsl_partition_info = sharpsl_nand_default_partition_info;
- if (machine_is_poodle()) {
- sharpsl_partition_info[1].size = 22 * 1024 * 1024;
- } else if (machine_is_corgi() || machine_is_shepherd()) {
- sharpsl_partition_info[1].size = 25 * 1024 * 1024;
- } else if (machine_is_husky()) {
- sharpsl_partition_info[1].size = 53 * 1024 * 1024;
- } else if (machine_is_spitz()) {
- sharpsl_partition_info[1].size = 5 * 1024 * 1024;
- } else if (machine_is_akita()) {
- sharpsl_partition_info[1].size = 58 * 1024 * 1024;
- } else if (machine_is_borzoi()) {
- sharpsl_partition_info[1].size = 32 * 1024 * 1024;
- }
+ nr_partitions = data->nr_partitions;
+ sharpsl_partition_info = data->partitions;
}
- add_mtd_partitions(sharpsl_mtd, sharpsl_partition_info, nr_partitions);
+ if (nr_partitions > 0)
+ err = add_mtd_partitions(&sharpsl->mtd, sharpsl_partition_info, nr_partitions);
+ else
+#endif
+ err = add_mtd_device(&sharpsl->mtd);
+ if (err)
+ goto err_add;
/* Return happy */
return 0;
-}
-module_init(sharpsl_nand_init);
+err_add:
+ nand_release(&sharpsl->mtd);
+
+err_scan:
+ platform_set_drvdata(pdev, NULL);
+ iounmap(sharpsl->io);
+err_ioremap:
+err_get_res:
+ kfree(sharpsl);
+ return err;
+}
/*
* Clean up routine
*/
-static void __exit sharpsl_nand_cleanup(void)
+static int __devexit sharpsl_nand_remove(struct platform_device *pdev)
{
+ struct sharpsl_nand *sharpsl = platform_get_drvdata(pdev);
+
/* Release resources, unregister device */
- nand_release(sharpsl_mtd);
+ nand_release(&sharpsl->mtd);
- iounmap(sharpsl_io_base);
+ platform_set_drvdata(pdev, NULL);
+
+ iounmap(sharpsl->io);
/* Free the MTD device structure */
- kfree(sharpsl_mtd);
+ kfree(sharpsl);
+
+ return 0;
+}
+
+static struct platform_driver sharpsl_nand_driver = {
+ .driver = {
+ .name = "sharpsl-nand",
+ .owner = THIS_MODULE,
+ },
+ .probe = sharpsl_nand_probe,
+ .remove = __devexit_p(sharpsl_nand_remove),
+};
+
+static int __init sharpsl_nand_init(void)
+{
+ return platform_driver_register(&sharpsl_nand_driver);
}
+module_init(sharpsl_nand_init);
-module_exit(sharpsl_nand_cleanup);
+static void __exit sharpsl_nand_exit(void)
+{
+ platform_driver_unregister(&sharpsl_nand_driver);
+}
+module_exit(sharpsl_nand_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index edb1e322113..daa6a4c3b8c 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -433,7 +433,7 @@ static int tmio_probe(struct platform_device *dev)
nand_chip->chip_delay = 15;
retval = request_irq(irq, &tmio_irq,
- IRQF_DISABLED, dev->dev.bus_id, tmio);
+ IRQF_DISABLED, dev_name(&dev->dev), tmio);
if (retval) {
dev_err(&dev->dev, "request_irq error %d\n", retval);
goto err_irq;
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index 320b929abe7..d1c4546513f 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -39,7 +39,7 @@ static void nftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
struct NFTLrecord *nftl;
unsigned long temp;
- if (mtd->type != MTD_NANDFLASH)
+ if (mtd->type != MTD_NANDFLASH || mtd->size > UINT_MAX)
return;
/* OK, this is moderately ugly. But probably safe. Alternatives? */
if (memcmp(mtd->name, "DiskOnChip", 10))
diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
index ccc4f209fbb..8b22b1836e9 100644
--- a/drivers/mtd/nftlmount.c
+++ b/drivers/mtd/nftlmount.c
@@ -51,7 +51,7 @@ static int find_boot_record(struct NFTLrecord *nftl)
the mtd device accordingly. We could even get rid of
nftl->EraseSize if there were any point in doing so. */
nftl->EraseSize = nftl->mbd.mtd->erasesize;
- nftl->nb_blocks = nftl->mbd.mtd->size / nftl->EraseSize;
+ nftl->nb_blocks = (u32)nftl->mbd.mtd->size / nftl->EraseSize;
nftl->MediaUnit = BLOCK_NIL;
nftl->SpareMediaUnit = BLOCK_NIL;
@@ -168,7 +168,7 @@ device is already correct.
printk(KERN_NOTICE "WARNING: Support for NFTL with UnitSizeFactor 0x%02x is experimental\n",
mh->UnitSizeFactor);
nftl->EraseSize = nftl->mbd.mtd->erasesize << (0xff - mh->UnitSizeFactor);
- nftl->nb_blocks = nftl->mbd.mtd->size / nftl->EraseSize;
+ nftl->nb_blocks = (u32)nftl->mbd.mtd->size / nftl->EraseSize;
}
#endif
nftl->nb_boot_blocks = le16_to_cpu(mh->FirstPhysicalEUN);
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index ad81ab8e95e..5b69e7773c6 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -63,7 +63,7 @@ static int __devinit generic_onenand_probe(struct device *dev)
info->onenand.mmcontrol = pdata->mmcontrol;
info->onenand.irq = platform_get_irq(pdev, 0);
- info->mtd.name = pdev->dev.bus_id;
+ info->mtd.name = dev_name(&pdev->dev);
info->mtd.priv = &info->onenand;
info->mtd.owner = THIS_MODULE;
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index a7e4d985f5e..96ecc1766fa 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -149,7 +149,7 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
INIT_COMPLETION(c->irq_done);
if (c->gpio_irq) {
- result = omap_get_gpio_datain(c->gpio_irq);
+ result = gpio_get_value(c->gpio_irq);
if (result == -1) {
ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
intr = read_reg(c, ONENAND_REG_INTERRUPT);
@@ -634,9 +634,9 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
"OneNAND\n", c->gpio_irq);
goto err_iounmap;
}
- omap_set_gpio_direction(c->gpio_irq, 1);
+ gpio_direction_input(c->gpio_irq);
- if ((r = request_irq(OMAP_GPIO_IRQ(c->gpio_irq),
+ if ((r = request_irq(gpio_to_irq(c->gpio_irq),
omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
pdev->dev.driver->name, c)) < 0)
goto err_release_gpio;
@@ -668,7 +668,7 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
c->onenand.base);
c->pdev = pdev;
- c->mtd.name = pdev->dev.bus_id;
+ c->mtd.name = dev_name(&pdev->dev);
c->mtd.priv = &c->onenand;
c->mtd.owner = THIS_MODULE;
@@ -723,7 +723,7 @@ err_release_dma:
if (c->dma_channel != -1)
omap_free_dma(c->dma_channel);
if (c->gpio_irq)
- free_irq(OMAP_GPIO_IRQ(c->gpio_irq), c);
+ free_irq(gpio_to_irq(c->gpio_irq), c);
err_release_gpio:
if (c->gpio_irq)
omap_free_gpio(c->gpio_irq);
@@ -760,7 +760,7 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev)
omap2_onenand_shutdown(pdev);
platform_set_drvdata(pdev, NULL);
if (c->gpio_irq) {
- free_irq(OMAP_GPIO_IRQ(c->gpio_irq), c);
+ free_irq(gpio_to_irq(c->gpio_irq), c);
omap_free_gpio(c->gpio_irq);
}
iounmap(c->onenand.base);
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 90ed319f26e..529af271db1 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -1772,7 +1772,7 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
int len;
int ret = 0;
- DEBUG(MTD_DEBUG_LEVEL3, "onenand_erase: start = 0x%08x, len = %i\n", (unsigned int) instr->addr, (unsigned int) instr->len);
+ DEBUG(MTD_DEBUG_LEVEL3, "onenand_erase: start = 0x%012llx, len = %llu\n", (unsigned long long) instr->addr, (unsigned long long) instr->len);
block_size = (1 << this->erase_shift);
@@ -1810,7 +1810,7 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
/* Check if we have a bad block, we do not erase bad blocks */
if (onenand_block_isbad_nolock(mtd, addr, 0)) {
- printk (KERN_WARNING "onenand_erase: attempt to erase a bad block at addr 0x%08x\n", (unsigned int) addr);
+ printk (KERN_WARNING "onenand_erase: attempt to erase a bad block at addr 0x%012llx\n", (unsigned long long) addr);
instr->state = MTD_ERASE_FAILED;
goto erase_exit;
}
@@ -2029,7 +2029,7 @@ static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int
*
* Lock one or more blocks
*/
-static int onenand_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
+static int onenand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
int ret;
@@ -2047,7 +2047,7 @@ static int onenand_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
*
* Unlock one or more blocks
*/
-static int onenand_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
+static int onenand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
int ret;
diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c
index e538c0a72ab..d2aa9c46530 100644
--- a/drivers/mtd/rfd_ftl.c
+++ b/drivers/mtd/rfd_ftl.c
@@ -21,8 +21,6 @@
#include <asm/types.h>
-#define const_cpu_to_le16 __constant_cpu_to_le16
-
static int block_size = 0;
module_param(block_size, int, 0);
MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size");
@@ -156,7 +154,7 @@ static int scan_header(struct partition *part)
size_t retlen;
sectors_per_block = part->block_size / SECTOR_SIZE;
- part->total_blocks = part->mbd.mtd->size / part->block_size;
+ part->total_blocks = (u32)part->mbd.mtd->size / part->block_size;
if (part->total_blocks < 2)
return -ENOENT;
@@ -276,16 +274,17 @@ static void erase_callback(struct erase_info *erase)
part = (struct partition*)erase->priv;
- i = erase->addr / part->block_size;
- if (i >= part->total_blocks || part->blocks[i].offset != erase->addr) {
- printk(KERN_ERR PREFIX "erase callback for unknown offset %x "
- "on '%s'\n", erase->addr, part->mbd.mtd->name);
+ i = (u32)erase->addr / part->block_size;
+ if (i >= part->total_blocks || part->blocks[i].offset != erase->addr ||
+ erase->addr > UINT_MAX) {
+ printk(KERN_ERR PREFIX "erase callback for unknown offset %llx "
+ "on '%s'\n", (unsigned long long)erase->addr, part->mbd.mtd->name);
return;
}
if (erase->state != MTD_ERASE_DONE) {
- printk(KERN_WARNING PREFIX "erase failed at 0x%x on '%s', "
- "state %d\n", erase->addr,
+ printk(KERN_WARNING PREFIX "erase failed at 0x%llx on '%s', "
+ "state %d\n", (unsigned long long)erase->addr,
part->mbd.mtd->name, erase->state);
part->blocks[i].state = BLOCK_FAILED;
@@ -297,7 +296,7 @@ static void erase_callback(struct erase_info *erase)
return;
}
- magic = const_cpu_to_le16(RFD_MAGIC);
+ magic = cpu_to_le16(RFD_MAGIC);
part->blocks[i].state = BLOCK_ERASED;
part->blocks[i].free_sectors = part->data_sectors_per_block;
@@ -345,9 +344,9 @@ static int erase_block(struct partition *part, int block)
rc = part->mbd.mtd->erase(part->mbd.mtd, erase);
if (rc) {
- printk(KERN_ERR PREFIX "erase of region %x,%x on '%s' "
- "failed\n", erase->addr, erase->len,
- part->mbd.mtd->name);
+ printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' "
+ "failed\n", (unsigned long long)erase->addr,
+ (unsigned long long)erase->len, part->mbd.mtd->name);
kfree(erase);
}
@@ -587,7 +586,7 @@ static int mark_sector_deleted(struct partition *part, u_long old_addr)
int block, offset, rc;
u_long addr;
size_t retlen;
- u16 del = const_cpu_to_le16(SECTOR_DELETED);
+ u16 del = cpu_to_le16(SECTOR_DELETED);
block = old_addr / part->block_size;
offset = (old_addr % part->block_size) / SECTOR_SIZE -
@@ -763,7 +762,7 @@ static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
{
struct partition *part;
- if (mtd->type != MTD_NORFLASH)
+ if (mtd->type != MTD_NORFLASH || mtd->size > UINT_MAX)
return;
part = kzalloc(sizeof(struct partition), GFP_KERNEL);
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
index 33a5d6ed6f1..3f67e00d98e 100644
--- a/drivers/mtd/ssfdc.c
+++ b/drivers/mtd/ssfdc.c
@@ -294,7 +294,8 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
int cis_sector;
/* Check for small page NAND flash */
- if (mtd->type != MTD_NANDFLASH || mtd->oobsize != OOB_SIZE)
+ if (mtd->type != MTD_NANDFLASH || mtd->oobsize != OOB_SIZE ||
+ mtd->size > UINT_MAX)
return;
/* Check for SSDFC format by reading CIS/IDI sector */
@@ -316,7 +317,7 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
ssfdc->cis_block = cis_sector / (mtd->erasesize >> SECTOR_SHIFT);
ssfdc->erase_size = mtd->erasesize;
- ssfdc->map_len = mtd->size / mtd->erasesize;
+ ssfdc->map_len = (u32)mtd->size / mtd->erasesize;
DEBUG(MTD_DEBUG_LEVEL1,
"SSFDC_RO: cis_block=%d,erase_size=%d,map_len=%d,n_zones=%d\n",
@@ -327,7 +328,7 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
ssfdc->heads = 16;
ssfdc->sectors = 32;
get_chs(mtd->size, NULL, &ssfdc->heads, &ssfdc->sectors);
- ssfdc->cylinders = (unsigned short)((mtd->size >> SECTOR_SHIFT) /
+ ssfdc->cylinders = (unsigned short)(((u32)mtd->size >> SECTOR_SHIFT) /
((long)ssfdc->sectors * (long)ssfdc->heads));
DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: using C:%d H:%d S:%d == %ld sects\n",
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index c7630a22831..9082768cc6c 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -280,7 +280,7 @@ static int ubi_sysfs_init(struct ubi_device *ubi)
ubi->dev.release = dev_release;
ubi->dev.devt = ubi->cdev.dev;
ubi->dev.class = ubi_class;
- sprintf(&ubi->dev.bus_id[0], UBI_NAME_STR"%d", ubi->ubi_num);
+ dev_set_name(&ubi->dev, UBI_NAME_STR"%d", ubi->ubi_num);
err = device_register(&ubi->dev);
if (err)
return err;
@@ -561,7 +561,7 @@ static int io_init(struct ubi_device *ubi)
*/
ubi->peb_size = ubi->mtd->erasesize;
- ubi->peb_count = ubi->mtd->size / ubi->mtd->erasesize;
+ ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
ubi->flash_size = ubi->mtd->size;
if (ubi->mtd->block_isbad && ubi->mtd->block_markbad)
@@ -815,19 +815,20 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
if (err)
goto out_free;
+ err = -ENOMEM;
ubi->peb_buf1 = vmalloc(ubi->peb_size);
if (!ubi->peb_buf1)
goto out_free;
ubi->peb_buf2 = vmalloc(ubi->peb_size);
if (!ubi->peb_buf2)
- goto out_free;
+ goto out_free;
#ifdef CONFIG_MTD_UBI_DEBUG
mutex_init(&ubi->dbg_buf_mutex);
ubi->dbg_peb_buf = vmalloc(ubi->peb_size);
if (!ubi->dbg_peb_buf)
- goto out_free;
+ goto out_free;
#endif
err = attach_by_scanning(ubi);
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index b30a0b83d7f..98cf31ed081 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -721,7 +721,8 @@ static int rename_volumes(struct ubi_device *ubi,
* It seems we need to remove volume with name @re->new_name,
* if it exists.
*/
- desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name, UBI_EXCLUSIVE);
+ desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name,
+ UBI_EXCLUSIVE);
if (IS_ERR(desc)) {
err = PTR_ERR(desc);
if (err == -ENODEV)
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 78e914d23ec..13777e5beac 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -27,11 +27,11 @@
#define dbg_err(fmt, ...) ubi_err(fmt, ##__VA_ARGS__)
#define ubi_assert(expr) do { \
- if (unlikely(!(expr))) { \
- printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \
- __func__, __LINE__, current->pid); \
- ubi_dbg_dump_stack(); \
- } \
+ if (unlikely(!(expr))) { \
+ printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \
+ __func__, __LINE__, current->pid); \
+ ubi_dbg_dump_stack(); \
+ } \
} while (0)
#define dbg_msg(fmt, ...) \
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index e04bcf1dff8..048a606cebd 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -504,12 +504,9 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
if (!vid_hdr)
return -ENOMEM;
- mutex_lock(&ubi->buf_mutex);
-
retry:
new_pnum = ubi_wl_get_peb(ubi, UBI_UNKNOWN);
if (new_pnum < 0) {
- mutex_unlock(&ubi->buf_mutex);
ubi_free_vid_hdr(ubi, vid_hdr);
return new_pnum;
}
@@ -529,20 +526,23 @@ retry:
goto write_error;
data_size = offset + len;
+ mutex_lock(&ubi->buf_mutex);
memset(ubi->peb_buf1 + offset, 0xFF, len);
/* Read everything before the area where the write failure happened */
if (offset > 0) {
err = ubi_io_read_data(ubi, ubi->peb_buf1, pnum, 0, offset);
if (err && err != UBI_IO_BITFLIPS)
- goto out_put;
+ goto out_unlock;
}
memcpy(ubi->peb_buf1 + offset, buf, len);
err = ubi_io_write_data(ubi, ubi->peb_buf1, new_pnum, 0, data_size);
- if (err)
+ if (err) {
+ mutex_unlock(&ubi->buf_mutex);
goto write_error;
+ }
mutex_unlock(&ubi->buf_mutex);
ubi_free_vid_hdr(ubi, vid_hdr);
@@ -553,8 +553,9 @@ retry:
ubi_msg("data was successfully recovered");
return 0;
-out_put:
+out_unlock:
mutex_unlock(&ubi->buf_mutex);
+out_put:
ubi_wl_put_peb(ubi, new_pnum, 1);
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
@@ -567,7 +568,6 @@ write_error:
ubi_warn("failed to write to PEB %d", new_pnum);
ubi_wl_put_peb(ubi, new_pnum, 1);
if (++tries > UBI_IO_RETRIES) {
- mutex_unlock(&ubi->buf_mutex);
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
}
@@ -949,10 +949,14 @@ write_error:
* This function copies logical eraseblock from physical eraseblock @from to
* physical eraseblock @to. The @vid_hdr buffer may be changed by this
* function. Returns:
- * o %0 in case of success;
- * o %1 if the operation was canceled and should be tried later (e.g.,
- * because a bit-flip was detected at the target PEB);
- * o %2 if the volume is being deleted and this LEB should not be moved.
+ * o %0 in case of success;
+ * o %1 if the operation was canceled because the volume is being deleted
+ * or because the PEB was put meanwhile;
+ * o %2 if the operation was canceled because there was a write error to the
+ * target PEB;
+ * o %-EAGAIN if the operation was canceled because a bit-flip was detected
+ * in the target PEB;
+ * o a negative error code in case of failure.
*/
int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
struct ubi_vid_hdr *vid_hdr)
@@ -978,7 +982,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
/*
* Note, we may race with volume deletion, which means that the volume
* this logical eraseblock belongs to might be being deleted. Since the
- * volume deletion unmaps all the volume's logical eraseblocks, it will
+ * volume deletion un-maps all the volume's logical eraseblocks, it will
* be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish.
*/
vol = ubi->volumes[idx];
@@ -986,7 +990,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
/* No need to do further work, cancel */
dbg_eba("volume %d is being removed, cancel", vol_id);
spin_unlock(&ubi->volumes_lock);
- return 2;
+ return 1;
}
spin_unlock(&ubi->volumes_lock);
@@ -1022,8 +1026,8 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
}
/*
- * OK, now the LEB is locked and we can safely start moving iy. Since
- * this function utilizes thie @ubi->peb1_buf buffer which is shared
+ * OK, now the LEB is locked and we can safely start moving it. Since
+ * this function utilizes the @ubi->peb1_buf buffer which is shared
* with some other functions, so lock the buffer by taking the
* @ubi->buf_mutex.
*/
@@ -1068,8 +1072,11 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
- if (err)
+ if (err) {
+ if (err == -EIO)
+ err = 2;
goto out_unlock_buf;
+ }
cond_resched();
@@ -1079,14 +1086,17 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
if (err != UBI_IO_BITFLIPS)
ubi_warn("cannot read VID header back from PEB %d", to);
else
- err = 1;
+ err = -EAGAIN;
goto out_unlock_buf;
}
if (data_size > 0) {
err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size);
- if (err)
+ if (err) {
+ if (err == -EIO)
+ err = 2;
goto out_unlock_buf;
+ }
cond_resched();
@@ -1101,15 +1111,16 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
ubi_warn("cannot read data back from PEB %d",
to);
else
- err = 1;
+ err = -EAGAIN;
goto out_unlock_buf;
}
cond_resched();
if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) {
- ubi_warn("read data back from PEB %d - it is different",
- to);
+ ubi_warn("read data back from PEB %d and it is "
+ "different", to);
+ err = -EINVAL;
goto out_unlock_buf;
}
}
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index 605812bb0b1..6dd4f5e77f8 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -215,7 +215,8 @@ static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr)
struct ubi_volume *vol;
struct ubi_device *ubi;
- dbg_gen("erase %u bytes at offset %u", instr->len, instr->addr);
+ dbg_gen("erase %llu bytes at offset %llu", (unsigned long long)instr->len,
+ (unsigned long long)instr->addr);
if (instr->addr < 0 || instr->addr > mtd->size - mtd->erasesize)
return -EINVAL;
@@ -223,11 +224,11 @@ static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr)
if (instr->len < 0 || instr->addr + instr->len > mtd->size)
return -EINVAL;
- if (instr->addr % mtd->writesize || instr->len % mtd->writesize)
+ if (mtd_mod_by_ws(instr->addr, mtd) || mtd_mod_by_ws(instr->len, mtd))
return -EINVAL;
- lnum = instr->addr / mtd->erasesize;
- count = instr->len / mtd->erasesize;
+ lnum = mtd_div_by_eb(instr->addr, mtd);
+ count = mtd_div_by_eb(instr->len, mtd);
vol = container_of(mtd, struct ubi_volume, gluebi_mtd);
ubi = vol->ubi;
@@ -255,7 +256,7 @@ static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr)
out_err:
instr->state = MTD_ERASE_FAILED;
- instr->fail_addr = lnum * mtd->erasesize;
+ instr->fail_addr = (long long)lnum * mtd->erasesize;
return err;
}
@@ -294,7 +295,7 @@ int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol)
* bytes.
*/
if (vol->vol_type == UBI_DYNAMIC_VOLUME)
- mtd->size = vol->usable_leb_size * vol->reserved_pebs;
+ mtd->size = (long long)vol->usable_leb_size * vol->reserved_pebs;
else
mtd->size = vol->used_bytes;
@@ -304,8 +305,8 @@ int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol)
return -ENFILE;
}
- dbg_gen("added mtd%d (\"%s\"), size %u, EB size %u",
- mtd->index, mtd->name, mtd->size, mtd->erasesize);
+ dbg_gen("added mtd%d (\"%s\"), size %llu, EB size %u",
+ mtd->index, mtd->name, (unsigned long long)mtd->size, mtd->erasesize);
return 0;
}
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 2fb64be44f1..a74118c0574 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -637,8 +637,6 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
dbg_io("read EC header from PEB %d", pnum);
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
- if (UBI_IO_DEBUG)
- verbose = 1;
err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
if (err) {
@@ -685,6 +683,9 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
if (verbose)
ubi_warn("no EC header found at PEB %d, "
"only 0xFF bytes", pnum);
+ else if (UBI_IO_DEBUG)
+ dbg_msg("no EC header found at PEB %d, "
+ "only 0xFF bytes", pnum);
return UBI_IO_PEB_EMPTY;
}
@@ -696,7 +697,9 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
ubi_warn("bad magic number at PEB %d: %08x instead of "
"%08x", pnum, magic, UBI_EC_HDR_MAGIC);
ubi_dbg_dump_ec_hdr(ec_hdr);
- }
+ } else if (UBI_IO_DEBUG)
+ dbg_msg("bad magic number at PEB %d: %08x instead of "
+ "%08x", pnum, magic, UBI_EC_HDR_MAGIC);
return UBI_IO_BAD_EC_HDR;
}
@@ -708,7 +711,9 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
ubi_warn("bad EC header CRC at PEB %d, calculated "
"%#08x, read %#08x", pnum, crc, hdr_crc);
ubi_dbg_dump_ec_hdr(ec_hdr);
- }
+ } else if (UBI_IO_DEBUG)
+ dbg_msg("bad EC header CRC at PEB %d, calculated "
+ "%#08x, read %#08x", pnum, crc, hdr_crc);
return UBI_IO_BAD_EC_HDR;
}
@@ -912,8 +917,6 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
dbg_io("read VID header from PEB %d", pnum);
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
- if (UBI_IO_DEBUG)
- verbose = 1;
p = (char *)vid_hdr - ubi->vid_hdr_shift;
err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
@@ -960,6 +963,9 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
if (verbose)
ubi_warn("no VID header found at PEB %d, "
"only 0xFF bytes", pnum);
+ else if (UBI_IO_DEBUG)
+ dbg_msg("no VID header found at PEB %d, "
+ "only 0xFF bytes", pnum);
return UBI_IO_PEB_FREE;
}
@@ -971,7 +977,9 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
ubi_warn("bad magic number at PEB %d: %08x instead of "
"%08x", pnum, magic, UBI_VID_HDR_MAGIC);
ubi_dbg_dump_vid_hdr(vid_hdr);
- }
+ } else if (UBI_IO_DEBUG)
+ dbg_msg("bad magic number at PEB %d: %08x instead of "
+ "%08x", pnum, magic, UBI_VID_HDR_MAGIC);
return UBI_IO_BAD_VID_HDR;
}
@@ -983,7 +991,9 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
ubi_warn("bad CRC at PEB %d, calculated %#08x, "
"read %#08x", pnum, crc, hdr_crc);
ubi_dbg_dump_vid_hdr(vid_hdr);
- }
+ } else if (UBI_IO_DEBUG)
+ dbg_msg("bad CRC at PEB %d, calculated %#08x, "
+ "read %#08x", pnum, crc, hdr_crc);
return UBI_IO_BAD_VID_HDR;
}
@@ -1024,7 +1034,7 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
err = paranoid_check_peb_ec_hdr(ubi, pnum);
if (err)
- return err > 0 ? -EINVAL: err;
+ return err > 0 ? -EINVAL : err;
vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC);
vid_hdr->version = UBI_VERSION;
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index 4f2daa5bbec..41d47e1cf15 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -320,7 +320,7 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
}
err = ubi_io_read_data(ubi, buf, pnum, 0, len);
- if (err && err != UBI_IO_BITFLIPS)
+ if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG)
goto out_free_buf;
data_crc = be32_to_cpu(vid_hdr->data_crc);
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 1c3fa18c26a..4a8ec485c91 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -74,6 +74,13 @@
#define UBI_IO_RETRIES 3
/*
+ * Length of the protection queue. The length is effectively equivalent to the
+ * number of (global) erase cycles PEBs are protected from the wear-leveling
+ * worker.
+ */
+#define UBI_PROT_QUEUE_LEN 10
+
+/*
* Error codes returned by the I/O sub-system.
*
* UBI_IO_PEB_EMPTY: the physical eraseblock is empty, i.e. it contains only
@@ -95,7 +102,8 @@ enum {
/**
* struct ubi_wl_entry - wear-leveling entry.
- * @rb: link in the corresponding RB-tree
+ * @u.rb: link in the corresponding (free/used) RB-tree
+ * @u.list: link in the protection queue
* @ec: erase counter
* @pnum: physical eraseblock number
*
@@ -104,7 +112,10 @@ enum {
* RB-trees. See WL sub-system for details.
*/
struct ubi_wl_entry {
- struct rb_node rb;
+ union {
+ struct rb_node rb;
+ struct list_head list;
+ } u;
int ec;
int pnum;
};
@@ -288,7 +299,7 @@ struct ubi_wl_entry;
* @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling
*
* @autoresize_vol_id: ID of the volume which has to be auto-resized at the end
- * of UBI ititializetion
+ * of UBI initialization
* @vtbl_slots: how many slots are available in the volume table
* @vtbl_size: size of the volume table in bytes
* @vtbl: in-RAM volume table copy
@@ -306,18 +317,17 @@ struct ubi_wl_entry;
* @used: RB-tree of used physical eraseblocks
* @free: RB-tree of free physical eraseblocks
* @scrub: RB-tree of physical eraseblocks which need scrubbing
- * @prot: protection trees
- * @prot.pnum: protection tree indexed by physical eraseblock numbers
- * @prot.aec: protection tree indexed by absolute erase counter value
- * @wl_lock: protects the @used, @free, @prot, @lookuptbl, @abs_ec, @move_from,
- * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works
- * fields
+ * @pq: protection queue (contain physical eraseblocks which are temporarily
+ * protected from the wear-leveling worker)
+ * @pq_head: protection queue head
+ * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from,
+ * @move_to, @move_to_put @erase_pending, @wl_scheduled and @works
+ * fields
* @move_mutex: serializes eraseblock moves
- * @work_sem: sycnhronizes the WL worker with use tasks
+ * @work_sem: synchronizes the WL worker with use tasks
* @wl_scheduled: non-zero if the wear-leveling was scheduled
* @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any
* physical eraseblock
- * @abs_ec: absolute erase counter
* @move_from: physical eraseblock from where the data is being moved
* @move_to: physical eraseblock where the data is being moved to
* @move_to_put: if the "to" PEB was put
@@ -351,11 +361,11 @@ struct ubi_wl_entry;
*
* @peb_buf1: a buffer of PEB size used for different purposes
* @peb_buf2: another buffer of PEB size used for different purposes
- * @buf_mutex: proptects @peb_buf1 and @peb_buf2
+ * @buf_mutex: protects @peb_buf1 and @peb_buf2
* @ckvol_mutex: serializes static volume checking when opening
- * @mult_mutex: serializes operations on multiple volumes, like re-nameing
+ * @mult_mutex: serializes operations on multiple volumes, like re-naming
* @dbg_peb_buf: buffer of PEB size used for debugging
- * @dbg_buf_mutex: proptects @dbg_peb_buf
+ * @dbg_buf_mutex: protects @dbg_peb_buf
*/
struct ubi_device {
struct cdev cdev;
@@ -392,16 +402,13 @@ struct ubi_device {
struct rb_root used;
struct rb_root free;
struct rb_root scrub;
- struct {
- struct rb_root pnum;
- struct rb_root aec;
- } prot;
+ struct list_head pq[UBI_PROT_QUEUE_LEN];
+ int pq_head;
spinlock_t wl_lock;
struct mutex move_mutex;
struct rw_semaphore work_sem;
int wl_scheduled;
struct ubi_wl_entry **lookuptbl;
- unsigned long long abs_ec;
struct ubi_wl_entry *move_from;
struct ubi_wl_entry *move_to;
int move_to_put;
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 3531ca9a1e2..22e1d7398fc 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -329,7 +329,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
vol->dev.devt = dev;
vol->dev.class = ubi_class;
- sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id);
+ dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
err = device_register(&vol->dev);
if (err) {
ubi_err("cannot register device");
@@ -678,7 +678,7 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
vol->dev.parent = &ubi->dev;
vol->dev.devt = dev;
vol->dev.class = ubi_class;
- sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id);
+ dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
err = device_register(&vol->dev);
if (err)
goto out_gluebi;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 05d70937b54..14901cb82c1 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -22,7 +22,7 @@
* UBI wear-leveling sub-system.
*
* This sub-system is responsible for wear-leveling. It works in terms of
- * physical* eraseblocks and erase counters and knows nothing about logical
+ * physical eraseblocks and erase counters and knows nothing about logical
* eraseblocks, volumes, etc. From this sub-system's perspective all physical
* eraseblocks are of two types - used and free. Used physical eraseblocks are
* those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
@@ -55,8 +55,39 @@
*
* As it was said, for the UBI sub-system all physical eraseblocks are either
* "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
- * used eraseblocks are kept in a set of different RB-trees: @wl->used,
- * @wl->prot.pnum, @wl->prot.aec, and @wl->scrub.
+ * used eraseblocks are kept in @wl->used or @wl->scrub RB-trees, or
+ * (temporarily) in the @wl->pq queue.
+ *
+ * When the WL sub-system returns a physical eraseblock, the physical
+ * eraseblock is protected from being moved for some "time". For this reason,
+ * the physical eraseblock is not directly moved from the @wl->free tree to the
+ * @wl->used tree. There is a protection queue in between where this
+ * physical eraseblock is temporarily stored (@wl->pq).
+ *
+ * All this protection stuff is needed because:
+ * o we don't want to move physical eraseblocks just after we have given them
+ * to the user; instead, we first want to let users fill them up with data;
+ *
+ * o there is a chance that the user will put the physical eraseblock very
+ * soon, so it makes sense not to move it for some time, but wait; this is
+ * especially important in case of "short term" physical eraseblocks.
+ *
+ * Physical eraseblocks stay protected only for limited time. But the "time" is
+ * measured in erase cycles in this case. This is implemented with help of the
+ * protection queue. Eraseblocks are put to the tail of this queue when they
+ * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
+ * head of the queue on each erase operation (for any eraseblock). So the
+ * length of the queue defines how may (global) erase cycles PEBs are protected.
+ *
+ * To put it differently, each physical eraseblock has 2 main states: free and
+ * used. The former state corresponds to the @wl->free tree. The latter state
+ * is split up on several sub-states:
+ * o the WL movement is allowed (@wl->used tree);
+ * o the WL movement is temporarily prohibited (@wl->pq queue);
+ * o scrubbing is needed (@wl->scrub tree).
+ *
+ * Depending on the sub-state, wear-leveling entries of the used physical
+ * eraseblocks may be kept in one of those structures.
*
* Note, in this implementation, we keep a small in-RAM object for each physical
* eraseblock. This is surely not a scalable solution. But it appears to be good
@@ -70,9 +101,6 @@
* target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
* pick target PEB with an average EC if our PEB is not very "old". This is a
* room for future re-works of the WL sub-system.
- *
- * Note: the stuff with protection trees looks too complex and is difficult to
- * understand. Should be fixed.
*/
#include <linux/slab.h>
@@ -85,14 +113,6 @@
#define WL_RESERVED_PEBS 1
/*
- * How many erase cycles are short term, unknown, and long term physical
- * eraseblocks protected.
- */
-#define ST_PROTECTION 16
-#define U_PROTECTION 10
-#define LT_PROTECTION 4
-
-/*
* Maximum difference between two erase counters. If this threshold is
* exceeded, the WL sub-system starts moving data from used physical
* eraseblocks with low erase counter to free physical eraseblocks with high
@@ -120,64 +140,9 @@
#define WL_MAX_FAILURES 32
/**
- * struct ubi_wl_prot_entry - PEB protection entry.
- * @rb_pnum: link in the @wl->prot.pnum RB-tree
- * @rb_aec: link in the @wl->prot.aec RB-tree
- * @abs_ec: the absolute erase counter value when the protection ends
- * @e: the wear-leveling entry of the physical eraseblock under protection
- *
- * When the WL sub-system returns a physical eraseblock, the physical
- * eraseblock is protected from being moved for some "time". For this reason,
- * the physical eraseblock is not directly moved from the @wl->free tree to the
- * @wl->used tree. There is one more tree in between where this physical
- * eraseblock is temporarily stored (@wl->prot).
- *
- * All this protection stuff is needed because:
- * o we don't want to move physical eraseblocks just after we have given them
- * to the user; instead, we first want to let users fill them up with data;
- *
- * o there is a chance that the user will put the physical eraseblock very
- * soon, so it makes sense not to move it for some time, but wait; this is
- * especially important in case of "short term" physical eraseblocks.
- *
- * Physical eraseblocks stay protected only for limited time. But the "time" is
- * measured in erase cycles in this case. This is implemented with help of the
- * absolute erase counter (@wl->abs_ec). When it reaches certain value, the
- * physical eraseblocks are moved from the protection trees (@wl->prot.*) to
- * the @wl->used tree.
- *
- * Protected physical eraseblocks are searched by physical eraseblock number
- * (when they are put) and by the absolute erase counter (to check if it is
- * time to move them to the @wl->used tree). So there are actually 2 RB-trees
- * storing the protected physical eraseblocks: @wl->prot.pnum and
- * @wl->prot.aec. They are referred to as the "protection" trees. The
- * first one is indexed by the physical eraseblock number. The second one is
- * indexed by the absolute erase counter. Both trees store
- * &struct ubi_wl_prot_entry objects.
- *
- * Each physical eraseblock has 2 main states: free and used. The former state
- * corresponds to the @wl->free tree. The latter state is split up on several
- * sub-states:
- * o the WL movement is allowed (@wl->used tree);
- * o the WL movement is temporarily prohibited (@wl->prot.pnum and
- * @wl->prot.aec trees);
- * o scrubbing is needed (@wl->scrub tree).
- *
- * Depending on the sub-state, wear-leveling entries of the used physical
- * eraseblocks may be kept in one of those trees.
- */
-struct ubi_wl_prot_entry {
- struct rb_node rb_pnum;
- struct rb_node rb_aec;
- unsigned long long abs_ec;
- struct ubi_wl_entry *e;
-};
-
-/**
* struct ubi_work - UBI work description data structure.
* @list: a link in the list of pending works
* @func: worker function
- * @priv: private data of the worker function
* @e: physical eraseblock to erase
* @torture: if the physical eraseblock has to be tortured
*
@@ -198,9 +163,11 @@ struct ubi_work {
static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
struct rb_root *root);
+static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e);
#else
#define paranoid_check_ec(ubi, pnum, ec) 0
#define paranoid_check_in_wl_tree(e, root)
+#define paranoid_check_in_pq(ubi, e) 0
#endif
/**
@@ -220,7 +187,7 @@ static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
struct ubi_wl_entry *e1;
parent = *p;
- e1 = rb_entry(parent, struct ubi_wl_entry, rb);
+ e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
if (e->ec < e1->ec)
p = &(*p)->rb_left;
@@ -235,8 +202,8 @@ static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
}
}
- rb_link_node(&e->rb, parent, p);
- rb_insert_color(&e->rb, root);
+ rb_link_node(&e->u.rb, parent, p);
+ rb_insert_color(&e->u.rb, root);
}
/**
@@ -331,7 +298,7 @@ static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
while (p) {
struct ubi_wl_entry *e1;
- e1 = rb_entry(p, struct ubi_wl_entry, rb);
+ e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
if (e->pnum == e1->pnum) {
ubi_assert(e == e1);
@@ -355,50 +322,24 @@ static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
}
/**
- * prot_tree_add - add physical eraseblock to protection trees.
+ * prot_queue_add - add physical eraseblock to the protection queue.
* @ubi: UBI device description object
* @e: the physical eraseblock to add
- * @pe: protection entry object to use
- * @abs_ec: absolute erase counter value when this physical eraseblock has
- * to be removed from the protection trees.
*
- * @wl->lock has to be locked.
+ * This function adds @e to the tail of the protection queue @ubi->pq, where
+ * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
+ * temporarily protected from the wear-leveling worker. Note, @wl->lock has to
+ * be locked.
*/
-static void prot_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e,
- struct ubi_wl_prot_entry *pe, int abs_ec)
+static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
{
- struct rb_node **p, *parent = NULL;
- struct ubi_wl_prot_entry *pe1;
-
- pe->e = e;
- pe->abs_ec = ubi->abs_ec + abs_ec;
-
- p = &ubi->prot.pnum.rb_node;
- while (*p) {
- parent = *p;
- pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_pnum);
-
- if (e->pnum < pe1->e->pnum)
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
- }
- rb_link_node(&pe->rb_pnum, parent, p);
- rb_insert_color(&pe->rb_pnum, &ubi->prot.pnum);
-
- p = &ubi->prot.aec.rb_node;
- parent = NULL;
- while (*p) {
- parent = *p;
- pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_aec);
+ int pq_tail = ubi->pq_head - 1;
- if (pe->abs_ec < pe1->abs_ec)
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
- }
- rb_link_node(&pe->rb_aec, parent, p);
- rb_insert_color(&pe->rb_aec, &ubi->prot.aec);
+ if (pq_tail < 0)
+ pq_tail = UBI_PROT_QUEUE_LEN - 1;
+ ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
+ list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
+ dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
}
/**
@@ -414,14 +355,14 @@ static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
struct rb_node *p;
struct ubi_wl_entry *e;
- e = rb_entry(rb_first(root), struct ubi_wl_entry, rb);
+ e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
max += e->ec;
p = root->rb_node;
while (p) {
struct ubi_wl_entry *e1;
- e1 = rb_entry(p, struct ubi_wl_entry, rb);
+ e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
if (e1->ec >= max)
p = p->rb_left;
else {
@@ -443,17 +384,12 @@ static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
*/
int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
{
- int err, protect, medium_ec;
+ int err, medium_ec;
struct ubi_wl_entry *e, *first, *last;
- struct ubi_wl_prot_entry *pe;
ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
dtype == UBI_UNKNOWN);
- pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
- if (!pe)
- return -ENOMEM;
-
retry:
spin_lock(&ubi->wl_lock);
if (!ubi->free.rb_node) {
@@ -461,16 +397,13 @@ retry:
ubi_assert(list_empty(&ubi->works));
ubi_err("no free eraseblocks");
spin_unlock(&ubi->wl_lock);
- kfree(pe);
return -ENOSPC;
}
spin_unlock(&ubi->wl_lock);
err = produce_free_peb(ubi);
- if (err < 0) {
- kfree(pe);
+ if (err < 0)
return err;
- }
goto retry;
}
@@ -483,7 +416,6 @@ retry:
* %WL_FREE_MAX_DIFF.
*/
e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
- protect = LT_PROTECTION;
break;
case UBI_UNKNOWN:
/*
@@ -492,81 +424,63 @@ retry:
* eraseblock with erase counter greater or equivalent than the
* lowest erase counter plus %WL_FREE_MAX_DIFF.
*/
- first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, rb);
- last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, rb);
+ first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry,
+ u.rb);
+ last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb);
if (last->ec - first->ec < WL_FREE_MAX_DIFF)
e = rb_entry(ubi->free.rb_node,
- struct ubi_wl_entry, rb);
+ struct ubi_wl_entry, u.rb);
else {
medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
e = find_wl_entry(&ubi->free, medium_ec);
}
- protect = U_PROTECTION;
break;
case UBI_SHORTTERM:
/*
* For short term data we pick a physical eraseblock with the
* lowest erase counter as we expect it will be erased soon.
*/
- e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, rb);
- protect = ST_PROTECTION;
+ e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb);
break;
default:
- protect = 0;
- e = NULL;
BUG();
}
+ paranoid_check_in_wl_tree(e, &ubi->free);
+
/*
- * Move the physical eraseblock to the protection trees where it will
+ * Move the physical eraseblock to the protection queue where it will
* be protected from being moved for some time.
*/
- paranoid_check_in_wl_tree(e, &ubi->free);
- rb_erase(&e->rb, &ubi->free);
- prot_tree_add(ubi, e, pe, protect);
-
- dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect);
+ rb_erase(&e->u.rb, &ubi->free);
+ dbg_wl("PEB %d EC %d", e->pnum, e->ec);
+ prot_queue_add(ubi, e);
spin_unlock(&ubi->wl_lock);
-
return e->pnum;
}
/**
- * prot_tree_del - remove a physical eraseblock from the protection trees
+ * prot_queue_del - remove a physical eraseblock from the protection queue.
* @ubi: UBI device description object
* @pnum: the physical eraseblock to remove
*
- * This function returns PEB @pnum from the protection trees and returns zero
- * in case of success and %-ENODEV if the PEB was not found in the protection
- * trees.
+ * This function deletes PEB @pnum from the protection queue and returns zero
+ * in case of success and %-ENODEV if the PEB was not found.
*/
-static int prot_tree_del(struct ubi_device *ubi, int pnum)
+static int prot_queue_del(struct ubi_device *ubi, int pnum)
{
- struct rb_node *p;
- struct ubi_wl_prot_entry *pe = NULL;
-
- p = ubi->prot.pnum.rb_node;
- while (p) {
-
- pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum);
-
- if (pnum == pe->e->pnum)
- goto found;
+ struct ubi_wl_entry *e;
- if (pnum < pe->e->pnum)
- p = p->rb_left;
- else
- p = p->rb_right;
- }
+ e = ubi->lookuptbl[pnum];
+ if (!e)
+ return -ENODEV;
- return -ENODEV;
+ if (paranoid_check_in_pq(ubi, e))
+ return -ENODEV;
-found:
- ubi_assert(pe->e->pnum == pnum);
- rb_erase(&pe->rb_aec, &ubi->prot.aec);
- rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
- kfree(pe);
+ list_del(&e->u.list);
+ dbg_wl("deleted PEB %d from the protection queue", e->pnum);
return 0;
}
@@ -632,47 +546,47 @@ out_free:
}
/**
- * check_protection_over - check if it is time to stop protecting some PEBs.
+ * serve_prot_queue - check if it is time to stop protecting PEBs.
* @ubi: UBI device description object
*
- * This function is called after each erase operation, when the absolute erase
- * counter is incremented, to check if some physical eraseblock have not to be
- * protected any longer. These physical eraseblocks are moved from the
- * protection trees to the used tree.
+ * This function is called after each erase operation and removes PEBs from the
+ * tail of the protection queue. These PEBs have been protected for long enough
+ * and should be moved to the used tree.
*/
-static void check_protection_over(struct ubi_device *ubi)
+static void serve_prot_queue(struct ubi_device *ubi)
{
- struct ubi_wl_prot_entry *pe;
+ struct ubi_wl_entry *e, *tmp;
+ int count;
/*
* There may be several protected physical eraseblock to remove,
* process them all.
*/
- while (1) {
- spin_lock(&ubi->wl_lock);
- if (!ubi->prot.aec.rb_node) {
- spin_unlock(&ubi->wl_lock);
- break;
- }
-
- pe = rb_entry(rb_first(&ubi->prot.aec),
- struct ubi_wl_prot_entry, rb_aec);
+repeat:
+ count = 0;
+ spin_lock(&ubi->wl_lock);
+ list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
+ dbg_wl("PEB %d EC %d protection over, move to used tree",
+ e->pnum, e->ec);
- if (pe->abs_ec > ubi->abs_ec) {
+ list_del(&e->u.list);
+ wl_tree_add(e, &ubi->used);
+ if (count++ > 32) {
+ /*
+ * Let's be nice and avoid holding the spinlock for
+ * too long.
+ */
spin_unlock(&ubi->wl_lock);
- break;
+ cond_resched();
+ goto repeat;
}
-
- dbg_wl("PEB %d protection over, abs_ec %llu, PEB abs_ec %llu",
- pe->e->pnum, ubi->abs_ec, pe->abs_ec);
- rb_erase(&pe->rb_aec, &ubi->prot.aec);
- rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
- wl_tree_add(pe->e, &ubi->used);
- spin_unlock(&ubi->wl_lock);
-
- kfree(pe);
- cond_resched();
}
+
+ ubi->pq_head += 1;
+ if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
+ ubi->pq_head = 0;
+ ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
+ spin_unlock(&ubi->wl_lock);
}
/**
@@ -680,8 +594,8 @@ static void check_protection_over(struct ubi_device *ubi)
* @ubi: UBI device description object
* @wrk: the work to schedule
*
- * This function enqueues a work defined by @wrk to the tail of the pending
- * works list.
+ * This function adds a work defined by @wrk to the tail of the pending works
+ * list.
*/
static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
{
@@ -739,13 +653,11 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
int cancel)
{
- int err, put = 0, scrubbing = 0, protect = 0;
- struct ubi_wl_prot_entry *uninitialized_var(pe);
+ int err, scrubbing = 0, torture = 0;
struct ubi_wl_entry *e1, *e2;
struct ubi_vid_hdr *vid_hdr;
kfree(wrk);
-
if (cancel)
return 0;
@@ -781,7 +693,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
* highly worn-out free physical eraseblock. If the erase
* counters differ much enough, start wear-leveling.
*/
- e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb);
+ e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
@@ -790,21 +702,21 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
goto out_cancel;
}
paranoid_check_in_wl_tree(e1, &ubi->used);
- rb_erase(&e1->rb, &ubi->used);
+ rb_erase(&e1->u.rb, &ubi->used);
dbg_wl("move PEB %d EC %d to PEB %d EC %d",
e1->pnum, e1->ec, e2->pnum, e2->ec);
} else {
/* Perform scrubbing */
scrubbing = 1;
- e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb);
+ e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
paranoid_check_in_wl_tree(e1, &ubi->scrub);
- rb_erase(&e1->rb, &ubi->scrub);
+ rb_erase(&e1->u.rb, &ubi->scrub);
dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
}
paranoid_check_in_wl_tree(e2, &ubi->free);
- rb_erase(&e2->rb, &ubi->free);
+ rb_erase(&e2->u.rb, &ubi->free);
ubi->move_from = e1;
ubi->move_to = e2;
spin_unlock(&ubi->wl_lock);
@@ -844,46 +756,67 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
if (err) {
-
+ if (err == -EAGAIN)
+ goto out_not_moved;
if (err < 0)
goto out_error;
- if (err == 1)
+ if (err == 2) {
+ /* Target PEB write error, torture it */
+ torture = 1;
goto out_not_moved;
+ }
/*
- * For some reason the LEB was not moved - it might be because
- * the volume is being deleted. We should prevent this PEB from
- * being selected for wear-levelling movement for some "time",
- * so put it to the protection tree.
+ * The LEB has not been moved because the volume is being
+ * deleted or the PEB has been put meanwhile. We should prevent
+ * this PEB from being selected for wear-leveling movement
+ * again, so put it to the protection queue.
*/
- dbg_wl("cancelled moving PEB %d", e1->pnum);
- pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
- if (!pe) {
- err = -ENOMEM;
- goto out_error;
- }
+ dbg_wl("canceled moving PEB %d", e1->pnum);
+ ubi_assert(err == 1);
+
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ vid_hdr = NULL;
+
+ spin_lock(&ubi->wl_lock);
+ prot_queue_add(ubi, e1);
+ ubi_assert(!ubi->move_to_put);
+ ubi->move_from = ubi->move_to = NULL;
+ ubi->wl_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
- protect = 1;
+ e1 = NULL;
+ err = schedule_erase(ubi, e2, 0);
+ if (err)
+ goto out_error;
+ mutex_unlock(&ubi->move_mutex);
+ return 0;
}
+ /* The PEB has been successfully moved */
ubi_free_vid_hdr(ubi, vid_hdr);
- if (scrubbing && !protect)
+ vid_hdr = NULL;
+ if (scrubbing)
ubi_msg("scrubbed PEB %d, data moved to PEB %d",
e1->pnum, e2->pnum);
spin_lock(&ubi->wl_lock);
- if (protect)
- prot_tree_add(ubi, e1, pe, protect);
- if (!ubi->move_to_put)
+ if (!ubi->move_to_put) {
wl_tree_add(e2, &ubi->used);
- else
- put = 1;
+ e2 = NULL;
+ }
ubi->move_from = ubi->move_to = NULL;
ubi->move_to_put = ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
- if (put) {
+ err = schedule_erase(ubi, e1, 0);
+ if (err) {
+ e1 = NULL;
+ goto out_error;
+ }
+
+ if (e2) {
/*
* Well, the target PEB was put meanwhile, schedule it for
* erasure.
@@ -894,13 +827,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
goto out_error;
}
- if (!protect) {
- err = schedule_erase(ubi, e1, 0);
- if (err)
- goto out_error;
- }
-
-
dbg_wl("done");
mutex_unlock(&ubi->move_mutex);
return 0;
@@ -908,20 +834,24 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
/*
* For some reasons the LEB was not moved, might be an error, might be
* something else. @e1 was not changed, so return it back. @e2 might
- * be changed, schedule it for erasure.
+ * have been changed, schedule it for erasure.
*/
out_not_moved:
+ dbg_wl("canceled moving PEB %d", e1->pnum);
ubi_free_vid_hdr(ubi, vid_hdr);
+ vid_hdr = NULL;
spin_lock(&ubi->wl_lock);
if (scrubbing)
wl_tree_add(e1, &ubi->scrub);
else
wl_tree_add(e1, &ubi->used);
+ ubi_assert(!ubi->move_to_put);
ubi->move_from = ubi->move_to = NULL;
- ubi->move_to_put = ubi->wl_scheduled = 0;
+ ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
- err = schedule_erase(ubi, e2, 0);
+ e1 = NULL;
+ err = schedule_erase(ubi, e2, torture);
if (err)
goto out_error;
@@ -938,8 +868,10 @@ out_error:
ubi->move_to_put = ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
- kmem_cache_free(ubi_wl_entry_slab, e1);
- kmem_cache_free(ubi_wl_entry_slab, e2);
+ if (e1)
+ kmem_cache_free(ubi_wl_entry_slab, e1);
+ if (e2)
+ kmem_cache_free(ubi_wl_entry_slab, e2);
ubi_ro_mode(ubi);
mutex_unlock(&ubi->move_mutex);
@@ -988,7 +920,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi)
* erase counter of free physical eraseblocks is greater then
* %UBI_WL_THRESHOLD.
*/
- e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb);
+ e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
@@ -1050,7 +982,6 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
kfree(wl_wrk);
spin_lock(&ubi->wl_lock);
- ubi->abs_ec += 1;
wl_tree_add(e, &ubi->free);
spin_unlock(&ubi->wl_lock);
@@ -1058,7 +989,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
* One more erase operation has happened, take care about
* protected physical eraseblocks.
*/
- check_protection_over(ubi);
+ serve_prot_queue(ubi);
/* And take care about wear-leveling */
err = ensure_wear_leveling(ubi);
@@ -1190,12 +1121,12 @@ retry:
} else {
if (in_wl_tree(e, &ubi->used)) {
paranoid_check_in_wl_tree(e, &ubi->used);
- rb_erase(&e->rb, &ubi->used);
+ rb_erase(&e->u.rb, &ubi->used);
} else if (in_wl_tree(e, &ubi->scrub)) {
paranoid_check_in_wl_tree(e, &ubi->scrub);
- rb_erase(&e->rb, &ubi->scrub);
+ rb_erase(&e->u.rb, &ubi->scrub);
} else {
- err = prot_tree_del(ubi, e->pnum);
+ err = prot_queue_del(ubi, e->pnum);
if (err) {
ubi_err("PEB %d not found", pnum);
ubi_ro_mode(ubi);
@@ -1255,11 +1186,11 @@ retry:
if (in_wl_tree(e, &ubi->used)) {
paranoid_check_in_wl_tree(e, &ubi->used);
- rb_erase(&e->rb, &ubi->used);
+ rb_erase(&e->u.rb, &ubi->used);
} else {
int err;
- err = prot_tree_del(ubi, e->pnum);
+ err = prot_queue_del(ubi, e->pnum);
if (err) {
ubi_err("PEB %d not found", pnum);
ubi_ro_mode(ubi);
@@ -1290,7 +1221,7 @@ int ubi_wl_flush(struct ubi_device *ubi)
int err;
/*
- * Erase while the pending works queue is not empty, but not more then
+ * Erase while the pending works queue is not empty, but not more than
* the number of currently pending works.
*/
dbg_wl("flush (%d pending works)", ubi->works_count);
@@ -1308,7 +1239,7 @@ int ubi_wl_flush(struct ubi_device *ubi)
up_write(&ubi->work_sem);
/*
- * And in case last was the WL worker and it cancelled the LEB
+ * And in case last was the WL worker and it canceled the LEB
* movement, flush again.
*/
while (ubi->works_count) {
@@ -1337,11 +1268,11 @@ static void tree_destroy(struct rb_root *root)
else if (rb->rb_right)
rb = rb->rb_right;
else {
- e = rb_entry(rb, struct ubi_wl_entry, rb);
+ e = rb_entry(rb, struct ubi_wl_entry, u.rb);
rb = rb_parent(rb);
if (rb) {
- if (rb->rb_left == &e->rb)
+ if (rb->rb_left == &e->u.rb)
rb->rb_left = NULL;
else
rb->rb_right = NULL;
@@ -1396,7 +1327,8 @@ int ubi_thread(void *u)
ubi_msg("%s: %d consecutive failures",
ubi->bgt_name, WL_MAX_FAILURES);
ubi_ro_mode(ubi);
- break;
+ ubi->thread_enabled = 0;
+ continue;
}
} else
failures = 0;
@@ -1435,15 +1367,13 @@ static void cancel_pending(struct ubi_device *ubi)
*/
int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
{
- int err;
+ int err, i;
struct rb_node *rb1, *rb2;
struct ubi_scan_volume *sv;
struct ubi_scan_leb *seb, *tmp;
struct ubi_wl_entry *e;
-
ubi->used = ubi->free = ubi->scrub = RB_ROOT;
- ubi->prot.pnum = ubi->prot.aec = RB_ROOT;
spin_lock_init(&ubi->wl_lock);
mutex_init(&ubi->move_mutex);
init_rwsem(&ubi->work_sem);
@@ -1457,6 +1387,10 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
if (!ubi->lookuptbl)
return err;
+ for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
+ INIT_LIST_HEAD(&ubi->pq[i]);
+ ubi->pq_head = 0;
+
list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
cond_resched();
@@ -1551,33 +1485,18 @@ out_free:
}
/**
- * protection_trees_destroy - destroy the protection RB-trees.
+ * protection_queue_destroy - destroy the protection queue.
* @ubi: UBI device description object
*/
-static void protection_trees_destroy(struct ubi_device *ubi)
+static void protection_queue_destroy(struct ubi_device *ubi)
{
- struct rb_node *rb;
- struct ubi_wl_prot_entry *pe;
-
- rb = ubi->prot.aec.rb_node;
- while (rb) {
- if (rb->rb_left)
- rb = rb->rb_left;
- else if (rb->rb_right)
- rb = rb->rb_right;
- else {
- pe = rb_entry(rb, struct ubi_wl_prot_entry, rb_aec);
-
- rb = rb_parent(rb);
- if (rb) {
- if (rb->rb_left == &pe->rb_aec)
- rb->rb_left = NULL;
- else
- rb->rb_right = NULL;
- }
+ int i;
+ struct ubi_wl_entry *e, *tmp;
- kmem_cache_free(ubi_wl_entry_slab, pe->e);
- kfree(pe);
+ for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
+ list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
+ list_del(&e->u.list);
+ kmem_cache_free(ubi_wl_entry_slab, e);
}
}
}
@@ -1590,7 +1509,7 @@ void ubi_wl_close(struct ubi_device *ubi)
{
dbg_wl("close the WL sub-system");
cancel_pending(ubi);
- protection_trees_destroy(ubi);
+ protection_queue_destroy(ubi);
tree_destroy(&ubi->used);
tree_destroy(&ubi->free);
tree_destroy(&ubi->scrub);
@@ -1660,4 +1579,27 @@ static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
return 1;
}
+/**
+ * paranoid_check_in_pq - check if wear-leveling entry is in the protection
+ * queue.
+ * @ubi: UBI device description object
+ * @e: the wear-leveling entry to check
+ *
+ * This function returns zero if @e is in @ubi->pq and %1 if it is not.
+ */
+static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e)
+{
+ struct ubi_wl_entry *p;
+ int i;
+
+ for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
+ list_for_each_entry(p, &ubi->pq[i], u.list)
+ if (p == e)
+ return 0;
+
+ ubi_err("paranoid check failed for PEB %d, EC %d, Protect queue",
+ e->pnum, e->ec);
+ ubi_dbg_dump_stack();
+ return 1;
+}
#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */