summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlex Shi <alex.shi@linaro.org>2015-05-12 15:18:24 +0800
committerAlex Shi <alex.shi@linaro.org>2015-05-12 15:18:24 +0800
commit54db03197e2d0aca252bc377938777b30d3e7f10 (patch)
tree8f078dfe75c88e09ae8557f8b6aa288bbdaddc97
parent2075186d012c815911494f34fc16aef7c8f3492b (diff)
parent1f8fdf83fe376cc83e96e942689a6cfac01e9634 (diff)
Merge branch 'linux-linaro-lsk-v3.10' into linux-linaro-lsk-v3.10-android
-rw-r--r--Documentation/ABI/testing/sysfs-block-zram62
-rw-r--r--Documentation/blockdev/zram.txt129
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/mm/fault.c2
-rw-r--r--arch/arc/mm/fault.c2
-rw-r--r--arch/arm/include/asm/elf.h2
-rw-r--r--arch/arm/mach-s3c64xx/crag6410.h1
-rw-r--r--arch/arm/mach-s3c64xx/mach-crag6410.c1
-rw-r--r--arch/avr32/mm/fault.c2
-rw-r--r--arch/c6x/kernel/time.c2
-rw-r--r--arch/cris/mm/fault.c2
-rw-r--r--arch/frv/mm/fault.c2
-rw-r--r--arch/ia64/mm/fault.c2
-rw-r--r--arch/m32r/mm/fault.c2
-rw-r--r--arch/m68k/mm/fault.c2
-rw-r--r--arch/metag/mm/fault.c2
-rw-r--r--arch/microblaze/mm/fault.c2
-rw-r--r--arch/mips/include/asm/suspend.h7
-rw-r--r--arch/mips/mm/fault.c2
-rw-r--r--arch/mips/power/cpu.c2
-rw-r--r--arch/mips/power/hibernate.S3
-rw-r--r--arch/mn10300/mm/fault.c2
-rw-r--r--arch/openrisc/mm/fault.c2
-rw-r--r--arch/parisc/mm/fault.c2
-rw-r--r--arch/powerpc/kernel/suspend.c4
-rw-r--r--arch/powerpc/mm/fault.c2
-rw-r--r--arch/powerpc/perf/callchain.c2
-rw-r--r--arch/powerpc/platforms/cell/spu_fault.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c2
-rw-r--r--arch/s390/kernel/suspend.c11
-rw-r--r--arch/s390/kvm/priv.c1
-rw-r--r--arch/s390/mm/fault.c6
-rw-r--r--arch/score/mm/fault.c2
-rw-r--r--arch/sh/include/asm/sections.h1
-rw-r--r--arch/sh/mm/fault.c2
-rw-r--r--arch/sparc/mm/fault_32.c2
-rw-r--r--arch/sparc/mm/fault_64.c2
-rw-r--r--arch/sparc/power/hibernate.c4
-rw-r--r--arch/tile/mm/fault.c2
-rw-r--r--arch/um/kernel/trap.c2
-rw-r--r--arch/unicore32/include/mach/pm.h3
-rw-r--r--arch/unicore32/kernel/hibernate.c1
-rw-r--r--arch/x86/kvm/emulate.c27
-rw-r--r--arch/x86/mm/fault.c10
-rw-r--r--arch/x86/power/hibernate_32.c4
-rw-r--r--arch/x86/power/hibernate_64.c4
-rw-r--r--arch/xtensa/Kconfig30
-rw-r--r--arch/xtensa/include/uapi/asm/unistd.h2
-rw-r--r--arch/xtensa/mm/fault.c2
-rw-r--r--arch/xtensa/platforms/xtfpga/Makefile3
-rw-r--r--arch/xtensa/platforms/xtfpga/include/platform/hardware.h3
-rw-r--r--arch/xtensa/platforms/xtfpga/include/platform/lcd.h15
-rw-r--r--arch/xtensa/platforms/xtfpga/lcd.c55
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--drivers/block/Kconfig2
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/nbd.c8
-rw-r--r--drivers/block/zram/Kconfig (renamed from drivers/staging/zram/Kconfig)11
-rw-r--r--drivers/block/zram/Makefile5
-rw-r--r--drivers/block/zram/zcomp.c353
-rw-r--r--drivers/block/zram/zcomp.h68
-rw-r--r--drivers/block/zram/zcomp_lz4.c47
-rw-r--r--drivers/block/zram/zcomp_lz4.h17
-rw-r--r--drivers/block/zram/zcomp_lzo.c47
-rw-r--r--drivers/block/zram/zcomp_lzo.h17
-rw-r--r--drivers/block/zram/zram_drv.c1162
-rw-r--r--drivers/block/zram/zram_drv.h (renamed from drivers/staging/zram/zram_drv.h)85
-rw-r--r--drivers/bluetooth/ath3k.c4
-rw-r--r--drivers/bluetooth/btusb.c13
-rw-r--r--drivers/dma/omap-dma.c1
-rw-r--r--drivers/edac/sb_edac.c8
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c66
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c10
-rw-r--r--drivers/hv/channel.c7
-rw-r--r--drivers/i2c/i2c-core.c3
-rw-r--r--drivers/iio/imu/adis_trigger.c2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c25
-rw-r--r--drivers/infiniband/core/umem.c11
-rw-r--r--drivers/infiniband/core/uverbs_main.c1
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c20
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c3
-rw-r--r--drivers/input/mouse/elantech.c22
-rw-r--r--drivers/input/mouse/elantech.h1
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_common.h2
-rw-r--r--drivers/media/usb/stk1160/stk1160-v4l.c17
-rw-r--r--drivers/memstick/core/mspro_block.c3
-rw-r--r--drivers/mtd/ubi/attach.c2
-rw-r--r--drivers/mtd/ubi/cdev.c2
-rw-r--r--drivers/mtd/ubi/eba.c3
-rw-r--r--drivers/mtd/ubi/wl.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c6
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c14
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c10
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c6
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c2
-rw-r--r--drivers/net/ethernet/realtek/8139too.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.c6
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h1
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c2
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.h4
-rw-r--r--drivers/parport/Kconfig4
-rw-r--r--drivers/power/lp8788-charger.c4
-rw-r--r--drivers/scsi/be2iscsi/be_main.c2
-rw-r--r--drivers/scsi/mvsas/mv_sas.c5
-rw-r--r--drivers/scsi/scsi_lib.c4
-rw-r--r--drivers/scsi/storvsc_drv.c15
-rw-r--r--drivers/spi/spidev.c5
-rw-r--r--drivers/staging/Kconfig4
-rw-r--r--drivers/staging/Makefile2
-rw-r--r--drivers/staging/zram/Makefile3
-rw-r--r--drivers/staging/zram/zram.txt77
-rw-r--r--drivers/staging/zram/zram_drv.c760
-rw-r--r--drivers/staging/zram/zram_sysfs.c229
-rw-r--r--drivers/staging/zsmalloc/Makefile3
-rw-r--r--drivers/target/iscsi/iscsi_target.c2
-rw-r--r--drivers/tty/serial/8250/8250_dw.c10
-rw-r--r--drivers/usb/class/cdc-wdm.c12
-rw-r--r--drivers/usb/core/hub.c4
-rw-r--r--drivers/usb/gadget/composite.c2
-rw-r--r--drivers/usb/host/isp116x-hcd.c2
-rw-r--r--drivers/usb/host/r8a66597-hcd.c2
-rw-r--r--drivers/usb/host/sl811-hcd.c2
-rw-r--r--drivers/usb/host/xhci-pci.c2
-rw-r--r--drivers/usb/host/xhci-ring.c2
-rw-r--r--drivers/usb/phy/phy.c4
-rw-r--r--drivers/usb/serial/ftdi_sio.c9
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h6
-rw-r--r--drivers/video/console/Kconfig5
-rw-r--r--fs/affs/amigaffs.c2
-rw-r--r--fs/autofs4/expire.c12
-rw-r--r--fs/autofs4/root.c2
-rw-r--r--fs/binfmt_elf.c9
-rw-r--r--fs/btrfs/extent-tree.c5
-rw-r--r--fs/btrfs/ioctl.c5
-rw-r--r--fs/ceph/dir.c8
-rw-r--r--fs/ceph/inode.c6
-rw-r--r--fs/cifs/file.c1
-rw-r--r--fs/cifs/inode.c2
-rw-r--r--fs/coda/cache.c2
-rw-r--r--fs/dcache.c176
-rw-r--r--fs/debugfs/inode.c6
-rw-r--r--fs/exportfs/expfs.c2
-rw-r--r--fs/ext4/namei.c20
-rw-r--r--fs/jfs/jfs_dtree.c4
-rw-r--r--fs/libfs.c12
-rw-r--r--fs/namei.c6
-rw-r--r--fs/ncpfs/dir.c2
-rw-r--r--fs/ncpfs/ncplib_kernel.h4
-rw-r--r--fs/nfs/getroot.c2
-rw-r--r--fs/notify/fsnotify.c4
-rw-r--r--fs/ocfs2/dcache.c2
-rw-r--r--fs/ocfs2/file.c22
-rw-r--r--fs/proc/task_mmu.c10
-rw-r--r--fs/reiserfs/reiserfs.h2
-rw-r--r--fs/splice.c8
-rw-r--r--include/acpi/actypes.h20
-rw-r--r--include/acpi/platform/acenv.h1
-rw-r--r--include/asm-generic/pgtable.h5
-rw-r--r--include/asm-generic/sections.h4
-rw-r--r--include/linux/blk_types.h4
-rw-r--r--include/linux/ceph/decode.h17
-rw-r--r--include/linux/cpu.h47
-rw-r--r--include/linux/dcache.h8
-rw-r--r--include/linux/kernel.h13
-rw-r--r--include/linux/mm.h5
-rw-r--r--include/linux/usb.h26
-rw-r--r--include/linux/zpool.h106
-rw-r--r--include/linux/zsmalloc.h (renamed from drivers/staging/zsmalloc/zsmalloc.h)3
-rw-r--r--ipc/compat.c2
-rw-r--r--kernel/cgroup.c2
-rw-r--r--kernel/cpu.c21
-rw-r--r--kernel/printk.c4
-rw-r--r--kernel/ptrace.c20
-rw-r--r--kernel/softirq.c6
-rw-r--r--kernel/trace/ring_buffer.c11
-rw-r--r--kernel/trace/trace.c4
-rw-r--r--kernel/trace/trace_events.c2
-rw-r--r--lib/string.c2
-rw-r--r--mm/Kconfig32
-rw-r--r--mm/Makefile2
-rw-r--r--mm/ksm.c2
-rw-r--r--mm/memory.c7
-rw-r--r--mm/memory_hotplug.c13
-rw-r--r--mm/page-writeback.c7
-rw-r--r--mm/zpool.c364
-rw-r--r--mm/zsmalloc.c (renamed from drivers/staging/zsmalloc/zsmalloc-main.c)274
-rw-r--r--net/ipv4/ip_forward.c3
-rw-r--r--net/ipv4/tcp_illinois.c1
-rw-r--r--net/ipv4/tcp_input.c7
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/tcp_output.c67
-rw-r--r--net/ipv6/ndisc.c9
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/llc/sysctl_net_llc.c8
-rw-r--r--net/netfilter/nf_conntrack_proto_generic.c26
-rw-r--r--net/rds/sysctl.c4
-rw-r--r--security/selinux/selinuxfs.c8
-rw-r--r--sound/pci/emu10k1/emuproc.c12
-rw-r--r--sound/pci/hda/patch_realtek.c11
-rw-r--r--sound/usb/mixer_quirks.c1
-rw-r--r--tools/power/x86/turbostat/Makefile6
-rw-r--r--virt/kvm/kvm_main.c4
207 files changed, 3649 insertions, 1644 deletions
diff --git a/Documentation/ABI/testing/sysfs-block-zram b/Documentation/ABI/testing/sysfs-block-zram
index ec93fe33baa..0c7f4f91c6b 100644
--- a/Documentation/ABI/testing/sysfs-block-zram
+++ b/Documentation/ABI/testing/sysfs-block-zram
@@ -42,15 +42,48 @@ Description:
The invalid_io file is read-only and specifies the number of
non-page-size-aligned I/O requests issued to this device.
+What: /sys/block/zram<id>/failed_reads
+Date: February 2014
+Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Description:
+ The failed_reads file is read-only and specifies the number of
+ failed reads happened on this device.
+
+What: /sys/block/zram<id>/failed_writes
+Date: February 2014
+Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Description:
+ The failed_writes file is read-only and specifies the number of
+ failed writes happened on this device.
+
+What: /sys/block/zram<id>/max_comp_streams
+Date: February 2014
+Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Description:
+ The max_comp_streams file is read-write and specifies the
+ number of backend's zcomp_strm compression streams (number of
+ concurrent compress operations).
+
+What: /sys/block/zram<id>/comp_algorithm
+Date: February 2014
+Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Description:
+ The comp_algorithm file is read-write and lets to show
+ available and selected compression algorithms, change
+ compression algorithm selection.
+
What: /sys/block/zram<id>/notify_free
Date: August 2010
Contact: Nitin Gupta <ngupta@vflare.org>
Description:
- The notify_free file is read-only and specifies the number of
- swap slot free notifications received by this device. These
- notifications are send to a swap block device when a swap slot
- is freed. This statistic is applicable only when this disk is
- being used as a swap disk.
+ The notify_free file is read-only. Depending on device usage
+ scenario it may account a) the number of pages freed because
+ of swap slot free notifications or b) the number of pages freed
+ because of REQ_DISCARD requests sent by bio. The former ones
+ are sent to a swap block device when a swap slot is freed, which
+ implies that this disk is being used as a swap disk. The latter
+ ones are sent by filesystem mounted with discard option,
+ whenever some data blocks are getting discarded.
What: /sys/block/zram<id>/discard
Date: August 2010
@@ -97,3 +130,22 @@ Description:
efficiency can be calculated using compr_data_size and this
statistic.
Unit: bytes
+
+What: /sys/block/zram<id>/mem_used_max
+Date: August 2014
+Contact: Minchan Kim <minchan@kernel.org>
+Description:
+ The mem_used_max file is read/write and specifies the amount
+ of maximum memory zram have consumed to store compressed data.
+ For resetting the value, you should write "0". Otherwise,
+ you could see -EINVAL.
+ Unit: bytes
+
+What: /sys/block/zram<id>/mem_limit
+Date: August 2014
+Contact: Minchan Kim <minchan@kernel.org>
+Description:
+ The mem_limit file is read/write and specifies the maximum
+ amount of memory ZRAM can use to store the compressed data. The
+ limit could be changed in run time and "0" means disable the
+ limit. No limit is the initial state. Unit: bytes
diff --git a/Documentation/blockdev/zram.txt b/Documentation/blockdev/zram.txt
new file mode 100644
index 00000000000..5cd0bd903f5
--- /dev/null
+++ b/Documentation/blockdev/zram.txt
@@ -0,0 +1,129 @@
+zram: Compressed RAM based block devices
+----------------------------------------
+
+* Introduction
+
+The zram module creates RAM based block devices named /dev/zram<id>
+(<id> = 0, 1, ...). Pages written to these disks are compressed and stored
+in memory itself. These disks allow very fast I/O and compression provides
+good amounts of memory savings. Some of the usecases include /tmp storage,
+use as swap disks, various caches under /var and maybe many more :)
+
+Statistics for individual zram devices are exported through sysfs nodes at
+/sys/block/zram<id>/
+
+* Usage
+
+Following shows a typical sequence of steps for using zram.
+
+1) Load Module:
+ modprobe zram num_devices=4
+ This creates 4 devices: /dev/zram{0,1,2,3}
+ (num_devices parameter is optional. Default: 1)
+
+2) Set max number of compression streams
+ Compression backend may use up to max_comp_streams compression streams,
+ thus allowing up to max_comp_streams concurrent compression operations.
+ By default, compression backend uses single compression stream.
+
+ Examples:
+ #show max compression streams number
+ cat /sys/block/zram0/max_comp_streams
+
+ #set max compression streams number to 3
+ echo 3 > /sys/block/zram0/max_comp_streams
+
+Note:
+In order to enable compression backend's multi stream support max_comp_streams
+must be initially set to desired concurrency level before ZRAM device
+initialisation. Once the device initialised as a single stream compression
+backend (max_comp_streams equals to 1), you will see error if you try to change
+the value of max_comp_streams because single stream compression backend
+implemented as a special case by lock overhead issue and does not support
+dynamic max_comp_streams. Only multi stream backend supports dynamic
+max_comp_streams adjustment.
+
+3) Select compression algorithm
+ Using comp_algorithm device attribute one can see available and
+ currently selected (shown in square brackets) compression algortithms,
+ change selected compression algorithm (once the device is initialised
+ there is no way to change compression algorithm).
+
+ Examples:
+ #show supported compression algorithms
+ cat /sys/block/zram0/comp_algorithm
+ lzo [lz4]
+
+ #select lzo compression algorithm
+ echo lzo > /sys/block/zram0/comp_algorithm
+
+4) Set Disksize
+ Set disk size by writing the value to sysfs node 'disksize'.
+ The value can be either in bytes or you can use mem suffixes.
+ Examples:
+ # Initialize /dev/zram0 with 50MB disksize
+ echo $((50*1024*1024)) > /sys/block/zram0/disksize
+
+ # Using mem suffixes
+ echo 256K > /sys/block/zram0/disksize
+ echo 512M > /sys/block/zram0/disksize
+ echo 1G > /sys/block/zram0/disksize
+
+Note:
+There is little point creating a zram of greater than twice the size of memory
+since we expect a 2:1 compression ratio. Note that zram uses about 0.1% of the
+size of the disk when not in use so a huge zram is wasteful.
+
+5) Set memory limit: Optional
+ Set memory limit by writing the value to sysfs node 'mem_limit'.
+ The value can be either in bytes or you can use mem suffixes.
+ In addition, you could change the value in runtime.
+ Examples:
+ # limit /dev/zram0 with 50MB memory
+ echo $((50*1024*1024)) > /sys/block/zram0/mem_limit
+
+ # Using mem suffixes
+ echo 256K > /sys/block/zram0/mem_limit
+ echo 512M > /sys/block/zram0/mem_limit
+ echo 1G > /sys/block/zram0/mem_limit
+
+ # To disable memory limit
+ echo 0 > /sys/block/zram0/mem_limit
+
+6) Activate:
+ mkswap /dev/zram0
+ swapon /dev/zram0
+
+ mkfs.ext4 /dev/zram1
+ mount /dev/zram1 /tmp
+
+7) Stats:
+ Per-device statistics are exported as various nodes under
+ /sys/block/zram<id>/
+ disksize
+ num_reads
+ num_writes
+ invalid_io
+ notify_free
+ discard
+ zero_pages
+ orig_data_size
+ compr_data_size
+ mem_used_total
+ mem_used_max
+
+8) Deactivate:
+ swapoff /dev/zram0
+ umount /dev/zram1
+
+9) Reset:
+ Write any positive value to 'reset' sysfs node
+ echo 1 > /sys/block/zram0/reset
+ echo 1 > /sys/block/zram1/reset
+
+ This frees all the memory allocated for the given device and
+ resets the disksize to zero. You must set the disksize again
+ before reusing the device.
+
+Nitin Gupta
+ngupta@vflare.org
diff --git a/Makefile b/Makefile
index d2a3930159e..923ad8a64e3 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 10
-SUBLEVEL = 74
+SUBLEVEL = 77
EXTRAVERSION =
NAME = TOSSUG Baby Fish
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index 98838a05ba6..9d0ac091a52 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -156,6 +156,8 @@ retry:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index 50533b750a9..08f65bcf913 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -160,6 +160,8 @@ good_area:
/* TBD: switch to pagefault_out_of_memory() */
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index f4b46d39b9c..051b7269e63 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -114,7 +114,7 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
/* When the program starts, a1 contains a pointer to a function to be
registered with atexit, as per the SVR4 ABI. A value of 0 means we
diff --git a/arch/arm/mach-s3c64xx/crag6410.h b/arch/arm/mach-s3c64xx/crag6410.h
index 4c3c9994fc2..81dc722ced5 100644
--- a/arch/arm/mach-s3c64xx/crag6410.h
+++ b/arch/arm/mach-s3c64xx/crag6410.h
@@ -14,6 +14,7 @@
#include <linux/gpio.h>
#define GLENFARCLAS_PMIC_IRQ_BASE IRQ_BOARD_START
+#define BANFF_PMIC_IRQ_BASE (IRQ_BOARD_START + 64)
#define PCA935X_GPIO_BASE GPIO_BOARD_START
#define CODEC_GPIO_BASE (GPIO_BOARD_START + 8)
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c
index 8ad88ace795..5fa9ac9104e 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410.c
@@ -558,6 +558,7 @@ static struct wm831x_touch_pdata touch_pdata = {
static struct wm831x_pdata crag_pmic_pdata = {
.wm831x_num = 1,
+ .irq_base = BANFF_PMIC_IRQ_BASE,
.gpio_base = BANFF_PMIC_GPIO_BASE,
.soft_shutdown = true,
diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
index 0eca9332719..d223a8b57c1 100644
--- a/arch/avr32/mm/fault.c
+++ b/arch/avr32/mm/fault.c
@@ -142,6 +142,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/c6x/kernel/time.c b/arch/c6x/kernel/time.c
index 356ee84cad9..04845aaf598 100644
--- a/arch/c6x/kernel/time.c
+++ b/arch/c6x/kernel/time.c
@@ -49,7 +49,7 @@ u64 sched_clock(void)
return (tsc * sched_clock_multiplier) >> SCHED_CLOCK_SHIFT;
}
-void time_init(void)
+void __init time_init(void)
{
u64 tmp = (u64)NSEC_PER_SEC << SCHED_CLOCK_SHIFT;
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
index 1790f22e71a..2686a7aa8ec 100644
--- a/arch/cris/mm/fault.c
+++ b/arch/cris/mm/fault.c
@@ -176,6 +176,8 @@ retry:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c
index 9a66372fc7c..ec4917ddf67 100644
--- a/arch/frv/mm/fault.c
+++ b/arch/frv/mm/fault.c
@@ -168,6 +168,8 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 7225dad8709..ba5ba7accd0 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -172,6 +172,8 @@ retry:
*/
if (fault & VM_FAULT_OOM) {
goto out_of_memory;
+ } else if (fault & VM_FAULT_SIGSEGV) {
+ goto bad_area;
} else if (fault & VM_FAULT_SIGBUS) {
signal = SIGBUS;
goto bad_area;
diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c
index e9c6a8014bd..e3d4d489010 100644
--- a/arch/m32r/mm/fault.c
+++ b/arch/m32r/mm/fault.c
@@ -200,6 +200,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index eb1d61f6872..f0eef0491f7 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -153,6 +153,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto map_err;
else if (fault & VM_FAULT_SIGBUS)
goto bus_err;
BUG();
diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c
index 332680e5ebf..2de5dc695a8 100644
--- a/arch/metag/mm/fault.c
+++ b/arch/metag/mm/fault.c
@@ -141,6 +141,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index fa4cf52aa7a..d46a5ebb757 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -224,6 +224,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/mips/include/asm/suspend.h b/arch/mips/include/asm/suspend.h
deleted file mode 100644
index 3adac3b53d1..00000000000
--- a/arch/mips/include/asm/suspend.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef __ASM_SUSPEND_H
-#define __ASM_SUSPEND_H
-
-/* References to section boundaries */
-extern const void __nosave_begin, __nosave_end;
-
-#endif /* __ASM_SUSPEND_H */
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 0214a43b991..c40a8d1c43b 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -157,6 +157,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/mips/power/cpu.c b/arch/mips/power/cpu.c
index 521e5963df0..2129e67723f 100644
--- a/arch/mips/power/cpu.c
+++ b/arch/mips/power/cpu.c
@@ -7,7 +7,7 @@
* Author: Hu Hongbing <huhb@lemote.com>
* Wu Zhangjin <wuzhangjin@gmail.com>
*/
-#include <asm/suspend.h>
+#include <asm/sections.h>
#include <asm/fpu.h>
#include <asm/dsp.h>
diff --git a/arch/mips/power/hibernate.S b/arch/mips/power/hibernate.S
index 32a7c828f07..e7567c8a9e7 100644
--- a/arch/mips/power/hibernate.S
+++ b/arch/mips/power/hibernate.S
@@ -30,6 +30,8 @@ LEAF(swsusp_arch_suspend)
END(swsusp_arch_suspend)
LEAF(swsusp_arch_resume)
+ /* Avoid TLB mismatch during and after kernel resume */
+ jal local_flush_tlb_all
PTR_L t0, restore_pblist
0:
PTR_L t1, PBE_ADDRESS(t0) /* source */
@@ -43,7 +45,6 @@ LEAF(swsusp_arch_resume)
bne t1, t3, 1b
PTR_L t0, PBE_NEXT(t0)
bnez t0, 0b
- jal local_flush_tlb_all /* Avoid TLB mismatch after kernel resume */
PTR_LA t0, saved_regs
PTR_L ra, PT_R31(t0)
PTR_L sp, PT_R29(t0)
diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c
index 3516cbdf1ee..0c2cc5d39c8 100644
--- a/arch/mn10300/mm/fault.c
+++ b/arch/mn10300/mm/fault.c
@@ -262,6 +262,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
index 0703acf7d32..230ac20ae79 100644
--- a/arch/openrisc/mm/fault.c
+++ b/arch/openrisc/mm/fault.c
@@ -171,6 +171,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index d10d27a720c..c45130f56a9 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -220,6 +220,8 @@ good_area:
*/
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto bad_area;
BUG();
diff --git a/arch/powerpc/kernel/suspend.c b/arch/powerpc/kernel/suspend.c
index 0167d53da30..a531154cc0f 100644
--- a/arch/powerpc/kernel/suspend.c
+++ b/arch/powerpc/kernel/suspend.c
@@ -9,9 +9,7 @@
#include <linux/mm.h>
#include <asm/page.h>
-
-/* References to section boundaries */
-extern const void __nosave_begin, __nosave_end;
+#include <asm/sections.h>
/*
* pfn_is_nosave - check if given pfn is in the 'nosave' section
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index d9196c9f93d..d51a0c110eb 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -425,6 +425,8 @@ good_area:
*/
fault = handle_mm_fault(mm, vma, address, flags);
if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
+ if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
rc = mm_fault_error(regs, address, fault);
if (rc >= MM_FAULT_RETURN)
goto bail;
diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
index 2396dda282c..ead55351b25 100644
--- a/arch/powerpc/perf/callchain.c
+++ b/arch/powerpc/perf/callchain.c
@@ -243,7 +243,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
sp = regs->gpr[1];
perf_callchain_store(entry, next_ip);
- for (;;) {
+ while (entry->nr < PERF_MAX_STACK_DEPTH) {
fp = (unsigned long __user *) sp;
if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
return;
diff --git a/arch/powerpc/platforms/cell/spu_fault.c b/arch/powerpc/platforms/cell/spu_fault.c
index 641e7273d75..62f3e4e48a0 100644
--- a/arch/powerpc/platforms/cell/spu_fault.c
+++ b/arch/powerpc/platforms/cell/spu_fault.c
@@ -75,7 +75,7 @@ int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
if (*flt & VM_FAULT_OOM) {
ret = -ENOMEM;
goto out_unlock;
- } else if (*flt & VM_FAULT_SIGBUS) {
+ } else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
ret = -EFAULT;
goto out_unlock;
}
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 35f77a42bed..c5c5788e8a1 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -164,7 +164,7 @@ static void spufs_prune_dir(struct dentry *dir)
struct dentry *dentry, *tmp;
mutex_lock(&dir->d_inode->i_mutex);
- list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) {
+ list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_child) {
spin_lock(&dentry->d_lock);
if (!(d_unhashed(dentry)) && dentry->d_inode) {
dget_dlock(dentry);
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
index c479d2f9605..58cbb75e89e 100644
--- a/arch/s390/kernel/suspend.c
+++ b/arch/s390/kernel/suspend.c
@@ -9,12 +9,9 @@
#include <linux/pfn.h>
#include <linux/suspend.h>
#include <linux/mm.h>
+#include <asm/sections.h>
#include <asm/ctl_reg.h>
-
-/*
- * References to section boundaries
- */
-extern const void __nosave_begin, __nosave_end;
+#include <asm/ipl.h>
/*
* The restore of the saved pages in an hibernation image will set
@@ -138,6 +135,8 @@ int pfn_is_nosave(unsigned long pfn)
{
unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end));
+ unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1;
+ unsigned long stext_pfn = PFN_DOWN(__pa(&_stext));
/* Always save lowcore pages (LC protection might be enabled). */
if (pfn <= LC_PAGES)
@@ -145,6 +144,8 @@ int pfn_is_nosave(unsigned long pfn)
if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
return 1;
/* Skip memory holes and read-only pages (NSS, DCSS, ...). */
+ if (pfn >= stext_pfn && pfn <= eshared_pfn)
+ return ipl_info.type == IPL_TYPE_NSS ? 1 : 0;
if (tprot(PFN_PHYS(pfn)))
return 1;
return 0;
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 6bbd7b5a0bb..0220c2ba759 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -328,6 +328,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
for (n = mem->count - 1; n > 0 ; n--)
memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
+ memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
mem->vm[0].cpus_total = cpus;
mem->vm[0].cpus_configured = cpus;
mem->vm[0].cpus_standby = 0;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 416facec4a3..d214321db72 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -244,6 +244,12 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault)
do_no_context(regs);
else
pagefault_out_of_memory();
+ } else if (fault & VM_FAULT_SIGSEGV) {
+ /* Kernel mode? Handle exceptions or die */
+ if (!user_mode(regs))
+ do_no_context(regs);
+ else
+ do_sigsegv(regs, SEGV_MAPERR);
} else if (fault & VM_FAULT_SIGBUS) {
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c
index 52238983527..6860beb2a28 100644
--- a/arch/score/mm/fault.c
+++ b/arch/score/mm/fault.c
@@ -114,6 +114,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h
index 1b6199740e9..7a99e6af637 100644
--- a/arch/sh/include/asm/sections.h
+++ b/arch/sh/include/asm/sections.h
@@ -3,7 +3,6 @@
#include <asm-generic/sections.h>
-extern long __nosave_begin, __nosave_end;
extern long __machvec_start, __machvec_end;
extern char __uncached_start, __uncached_end;
extern char __start_eh_frame[], __stop_eh_frame[];
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 541dc610150..a58fec9b55e 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -353,6 +353,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
} else {
if (fault & VM_FAULT_SIGBUS)
do_sigbus(regs, error_code, address);
+ else if (fault & VM_FAULT_SIGSEGV)
+ bad_area(regs, error_code, address);
else
BUG();
}
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index 59dbd464572..163c7871211 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -252,6 +252,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 3841a081beb..ac2db923e51 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -443,6 +443,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/sparc/power/hibernate.c b/arch/sparc/power/hibernate.c
index 42b0b8ce699..17bd2e167e0 100644
--- a/arch/sparc/power/hibernate.c
+++ b/arch/sparc/power/hibernate.c
@@ -9,11 +9,9 @@
#include <asm/hibernate.h>
#include <asm/visasm.h>
#include <asm/page.h>
+#include <asm/sections.h>
#include <asm/tlb.h>
-/* References to section boundaries */
-extern const void __nosave_begin, __nosave_end;
-
struct saved_context saved_context;
/*
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 3ff289f422e..12b732f593b 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -446,6 +446,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index 5c3aef74237..06ab0ebe0a0 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -80,6 +80,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) {
goto out_of_memory;
+ } else if (fault & VM_FAULT_SIGSEGV) {
+ goto out;
} else if (fault & VM_FAULT_SIGBUS) {
err = -EACCES;
goto out;
diff --git a/arch/unicore32/include/mach/pm.h b/arch/unicore32/include/mach/pm.h
index 4dcd34ae194..77b522694e7 100644
--- a/arch/unicore32/include/mach/pm.h
+++ b/arch/unicore32/include/mach/pm.h
@@ -36,8 +36,5 @@ extern int puv3_pm_enter(suspend_state_t state);
/* Defined in hibernate_asm.S */
extern int restore_image(pgd_t *resume_pg_dir, struct pbe *restore_pblist);
-/* References to section boundaries */
-extern const void __nosave_begin, __nosave_end;
-
extern struct pbe *restore_pblist;
#endif
diff --git a/arch/unicore32/kernel/hibernate.c b/arch/unicore32/kernel/hibernate.c
index d75ef8b6cb5..9969ec374ab 100644
--- a/arch/unicore32/kernel/hibernate.c
+++ b/arch/unicore32/kernel/hibernate.c
@@ -18,6 +18,7 @@
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
+#include <asm/sections.h>
#include <asm/suspend.h>
#include "mach/pm.h"
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index af88fa20dbe..ddad189e596 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2450,7 +2450,7 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
* Not recognized on AMD in compat mode (but is recognized in legacy
* mode).
*/
- if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
+ if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
&& !vendor_intel(ctxt))
return emulate_ud(ctxt);
@@ -2463,25 +2463,13 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
setup_syscalls_segments(ctxt, &cs, &ss);
ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
- switch (ctxt->mode) {
- case X86EMUL_MODE_PROT32:
- if ((msr_data & 0xfffc) == 0x0)
- return emulate_gp(ctxt, 0);
- break;
- case X86EMUL_MODE_PROT64:
- if (msr_data == 0x0)
- return emulate_gp(ctxt, 0);
- break;
- default:
- break;
- }
+ if ((msr_data & 0xfffc) == 0x0)
+ return emulate_gp(ctxt, 0);
ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
- cs_sel = (u16)msr_data;
- cs_sel &= ~SELECTOR_RPL_MASK;
+ cs_sel = (u16)msr_data & ~SELECTOR_RPL_MASK;
ss_sel = cs_sel + 8;
- ss_sel &= ~SELECTOR_RPL_MASK;
- if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
+ if (efer & EFER_LMA) {
cs.d = 0;
cs.l = 1;
}
@@ -2490,10 +2478,11 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
- ctxt->_eip = msr_data;
+ ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
- *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
+ *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
+ (u32)msr_data;
return X86EMUL_CONTINUE;
}
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index d8b1ff68dbb..e4780b05253 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -812,11 +812,8 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
unsigned int fault)
{
struct task_struct *tsk = current;
- struct mm_struct *mm = tsk->mm;
int code = BUS_ADRERR;
- up_read(&mm->mmap_sem);
-
/* Kernel mode? Handle exceptions or die: */
if (!(error_code & PF_USER)) {
no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
@@ -847,7 +844,6 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
unsigned long address, unsigned int fault)
{
if (fatal_signal_pending(current) && !(error_code & PF_USER)) {
- up_read(&current->mm->mmap_sem);
no_context(regs, error_code, address, 0, 0);
return;
}
@@ -855,14 +851,11 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
if (fault & VM_FAULT_OOM) {
/* Kernel mode? Handle exceptions or die: */
if (!(error_code & PF_USER)) {
- up_read(&current->mm->mmap_sem);
no_context(regs, error_code, address,
SIGSEGV, SEGV_MAPERR);
return;
}
- up_read(&current->mm->mmap_sem);
-
/*
* We ran out of memory, call the OOM killer, and return the
* userspace (which will retry the fault, or kill us if we got
@@ -873,6 +866,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
VM_FAULT_HWPOISON_LARGE))
do_sigbus(regs, error_code, address, fault);
+ else if (fault & VM_FAULT_SIGSEGV)
+ bad_area_nosemaphore(regs, error_code, address);
else
BUG();
}
@@ -1193,6 +1188,7 @@ good_area:
return;
if (unlikely(fault & VM_FAULT_ERROR)) {
+ up_read(&mm->mmap_sem);
mm_fault_error(regs, error_code, address, fault);
return;
}
diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c
index 7d28c885d23..291226b952a 100644
--- a/arch/x86/power/hibernate_32.c
+++ b/arch/x86/power/hibernate_32.c
@@ -13,13 +13,11 @@
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/mmzone.h>
+#include <asm/sections.h>
/* Defined in hibernate_asm_32.S */
extern int restore_image(void);
-/* References to section boundaries */
-extern const void __nosave_begin, __nosave_end;
-
/* Pointer to the temporary resume page tables */
pgd_t *resume_pg_dir;
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
index a0fde91c16c..8ecaed12763 100644
--- a/arch/x86/power/hibernate_64.c
+++ b/arch/x86/power/hibernate_64.c
@@ -17,11 +17,9 @@
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/mtrr.h>
+#include <asm/sections.h>
#include <asm/suspend.h>
-/* References to section boundaries */
-extern const void __nosave_begin, __nosave_end;
-
/* Defined in hibernate_asm_64.S */
extern int restore_image(void);
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 0a1b95f81a3..2b086a6ae6c 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -287,6 +287,36 @@ menu "Executable file formats"
source "fs/Kconfig.binfmt"
+config XTFPGA_LCD
+ bool "Enable XTFPGA LCD driver"
+ depends on XTENSA_PLATFORM_XTFPGA
+ default n
+ help
+ There's a 2x16 LCD on most of XTFPGA boards, kernel may output
+ progress messages there during bootup/shutdown. It may be useful
+ during board bringup.
+
+ If unsure, say N.
+
+config XTFPGA_LCD_BASE_ADDR
+ hex "XTFPGA LCD base address"
+ depends on XTFPGA_LCD
+ default "0x0d0c0000"
+ help
+ Base address of the LCD controller inside KIO region.
+ Different boards from XTFPGA family have LCD controller at different
+ addresses. Please consult prototyping user guide for your board for
+ the correct address. Wrong address here may lead to hardware lockup.
+
+config XTFPGA_LCD_8BIT_ACCESS
+ bool "Use 8-bit access to XTFPGA LCD"
+ depends on XTFPGA_LCD
+ default n
+ help
+ LCD may be connected with 4- or 8-bit interface, 8-bit access may
+ only be used with 8-bit interface. Please consult prototyping user
+ guide for your board for the correct interface width.
+
endmenu
source "net/Kconfig"
diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h
index 513effd4806..d07c1886bc8 100644
--- a/arch/xtensa/include/uapi/asm/unistd.h
+++ b/arch/xtensa/include/uapi/asm/unistd.h
@@ -715,7 +715,7 @@ __SYSCALL(323, sys_process_vm_writev, 6)
__SYSCALL(324, sys_name_to_handle_at, 5)
#define __NR_open_by_handle_at 325
__SYSCALL(325, sys_open_by_handle_at, 3)
-#define __NR_sync_file_range 326
+#define __NR_sync_file_range2 326
__SYSCALL(326, sys_sync_file_range2, 6)
#define __NR_perf_event_open 327
__SYSCALL(327, sys_perf_event_open, 5)
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index 70fa7bc42b4..38278337d85 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -117,6 +117,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/xtensa/platforms/xtfpga/Makefile b/arch/xtensa/platforms/xtfpga/Makefile
index b9ae206340c..7839d38b233 100644
--- a/arch/xtensa/platforms/xtfpga/Makefile
+++ b/arch/xtensa/platforms/xtfpga/Makefile
@@ -6,4 +6,5 @@
#
# Note 2! The CFLAGS definitions are in the main makefile...
-obj-y = setup.o lcd.o
+obj-y += setup.o
+obj-$(CONFIG_XTFPGA_LCD) += lcd.o
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
index 4416773cbde..b39fbcf5c61 100644
--- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
+++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
@@ -44,9 +44,6 @@
/* UART */
#define DUART16552_PADDR (XCHAL_KIO_PADDR + 0x0D050020)
-/* LCD instruction and data addresses. */
-#define LCD_INSTR_ADDR ((char *)IOADDR(0x0D040000))
-#define LCD_DATA_ADDR ((char *)IOADDR(0x0D040004))
/* Misc. */
#define XTFPGA_FPGAREGS_VADDR IOADDR(0x0D020000)
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/lcd.h b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
index 0e435645af5..4c8541ed113 100644
--- a/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
+++ b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
@@ -11,10 +11,25 @@
#ifndef __XTENSA_XTAVNET_LCD_H
#define __XTENSA_XTAVNET_LCD_H
+#ifdef CONFIG_XTFPGA_LCD
/* Display string STR at position POS on the LCD. */
void lcd_disp_at_pos(char *str, unsigned char pos);
/* Shift the contents of the LCD display left or right. */
void lcd_shiftleft(void);
void lcd_shiftright(void);
+#else
+static inline void lcd_disp_at_pos(char *str, unsigned char pos)
+{
+}
+
+static inline void lcd_shiftleft(void)
+{
+}
+
+static inline void lcd_shiftright(void)
+{
+}
+#endif
+
#endif
diff --git a/arch/xtensa/platforms/xtfpga/lcd.c b/arch/xtensa/platforms/xtfpga/lcd.c
index 2872301598d..4dc0c1b43f4 100644
--- a/arch/xtensa/platforms/xtfpga/lcd.c
+++ b/arch/xtensa/platforms/xtfpga/lcd.c
@@ -1,50 +1,63 @@
/*
- * Driver for the LCD display on the Tensilica LX60 Board.
+ * Driver for the LCD display on the Tensilica XTFPGA board family.
+ * http://www.mytechcorp.com/cfdata/productFile/File1/MOC-16216B-B-A0A04.pdf
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001, 2006 Tensilica Inc.
+ * Copyright (C) 2015 Cadence Design Systems Inc.
*/
-/*
- *
- * FIXME: this code is from the examples from the LX60 user guide.
- *
- * The lcd_pause function does busy waiting, which is probably not
- * great. Maybe the code could be changed to use kernel timers, or
- * change the hardware to not need to wait.
- */
-
+#include <linux/delay.h>
#include <linux/init.h>
#include <linux/io.h>
#include <platform/hardware.h>
#include <platform/lcd.h>
-#include <linux/delay.h>
-#define LCD_PAUSE_ITERATIONS 4000
+/* LCD instruction and data addresses. */
+#define LCD_INSTR_ADDR ((char *)IOADDR(CONFIG_XTFPGA_LCD_BASE_ADDR))
+#define LCD_DATA_ADDR (LCD_INSTR_ADDR + 4)
+
#define LCD_CLEAR 0x1
#define LCD_DISPLAY_ON 0xc
/* 8bit and 2 lines display */
#define LCD_DISPLAY_MODE8BIT 0x38
+#define LCD_DISPLAY_MODE4BIT 0x28
#define LCD_DISPLAY_POS 0x80
#define LCD_SHIFT_LEFT 0x18
#define LCD_SHIFT_RIGHT 0x1c
+static void lcd_put_byte(u8 *addr, u8 data)
+{
+#ifdef CONFIG_XTFPGA_LCD_8BIT_ACCESS
+ ACCESS_ONCE(*addr) = data;
+#else
+ ACCESS_ONCE(*addr) = data & 0xf0;
+ ACCESS_ONCE(*addr) = (data << 4) & 0xf0;
+#endif
+}
+
static int __init lcd_init(void)
{
- *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
+ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
mdelay(5);
- *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
+ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
udelay(200);
- *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
+ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
+ udelay(50);
+#ifndef CONFIG_XTFPGA_LCD_8BIT_ACCESS
+ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE4BIT;
+ udelay(50);
+ lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT);
udelay(50);
- *LCD_INSTR_ADDR = LCD_DISPLAY_ON;
+#endif
+ lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_ON);
udelay(50);
- *LCD_INSTR_ADDR = LCD_CLEAR;
+ lcd_put_byte(LCD_INSTR_ADDR, LCD_CLEAR);
mdelay(10);
lcd_disp_at_pos("XTENSA LINUX", 0);
return 0;
@@ -52,10 +65,10 @@ static int __init lcd_init(void)
void lcd_disp_at_pos(char *str, unsigned char pos)
{
- *LCD_INSTR_ADDR = LCD_DISPLAY_POS | pos;
+ lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_POS | pos);
udelay(100);
while (*str != 0) {
- *LCD_DATA_ADDR = *str;
+ lcd_put_byte(LCD_DATA_ADDR, *str);
udelay(200);
str++;
}
@@ -63,13 +76,13 @@ void lcd_disp_at_pos(char *str, unsigned char pos)
void lcd_shiftleft(void)
{
- *LCD_INSTR_ADDR = LCD_SHIFT_LEFT;
+ lcd_put_byte(LCD_INSTR_ADDR, LCD_SHIFT_LEFT);
udelay(50);
}
void lcd_shiftright(void)
{
- *LCD_INSTR_ADDR = LCD_SHIFT_RIGHT;
+ lcd_put_byte(LCD_INSTR_ADDR, LCD_SHIFT_RIGHT);
udelay(50);
}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index a88894190e4..c991fe680e5 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -978,7 +978,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
return -EINVAL;
drv->safe_state_index = -1;
- for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
+ for (i = CPUIDLE_DRIVER_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
drv->states[i].name[0] = '\0';
drv->states[i].desc[0] = '\0';
}
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index b81ddfea1da..9da952c9af9 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -105,6 +105,8 @@ source "drivers/block/paride/Kconfig"
source "drivers/block/mtip32xx/Kconfig"
+source "drivers/block/zram/Kconfig"
+
config BLK_CPQ_DA
tristate "Compaq SMART2 support"
depends on PCI && VIRT_TO_BUS
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index ca07399a8d9..3675937ab65 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_BLK_DEV_RBD) += rbd.o
obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/
obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/
+obj-$(CONFIG_ZRAM) += zram/
nvme-y := nvme-core.o nvme-scsi.o
swim_mod-y := swim.o swim_asm.o
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index cf1576d5436..a5c987ae665 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -815,10 +815,6 @@ static int __init nbd_init(void)
return -EINVAL;
}
- nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
- if (!nbd_dev)
- return -ENOMEM;
-
part_shift = 0;
if (max_part > 0) {
part_shift = fls(max_part);
@@ -840,6 +836,10 @@ static int __init nbd_init(void)
if (nbds_max > 1UL << (MINORBITS - part_shift))
return -EINVAL;
+ nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
+ if (!nbd_dev)
+ return -ENOMEM;
+
for (i = 0; i < nbds_max; i++) {
struct gendisk *disk = alloc_disk(1 << part_shift);
if (!disk)
diff --git a/drivers/staging/zram/Kconfig b/drivers/block/zram/Kconfig
index 983314c4134..6489c0fd0ea 100644
--- a/drivers/staging/zram/Kconfig
+++ b/drivers/block/zram/Kconfig
@@ -14,7 +14,16 @@ config ZRAM
disks and maybe many more.
See zram.txt for more information.
- Project home: <https://compcache.googlecode.com/>
+
+config ZRAM_LZ4_COMPRESS
+ bool "Enable LZ4 algorithm support"
+ depends on ZRAM
+ select LZ4_COMPRESS
+ select LZ4_DECOMPRESS
+ default n
+ help
+ This option enables LZ4 compression algorithm support. Compression
+ algorithm can be changed using `comp_algorithm' device attribute.
config ZRAM_DEBUG
bool "Compressed RAM block device debug support"
diff --git a/drivers/block/zram/Makefile b/drivers/block/zram/Makefile
new file mode 100644
index 00000000000..be0763ff57a
--- /dev/null
+++ b/drivers/block/zram/Makefile
@@ -0,0 +1,5 @@
+zram-y := zcomp_lzo.o zcomp.o zram_drv.o
+
+zram-$(CONFIG_ZRAM_LZ4_COMPRESS) += zcomp_lz4.o
+
+obj-$(CONFIG_ZRAM) += zram.o
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
new file mode 100644
index 00000000000..f1ff39a3d1c
--- /dev/null
+++ b/drivers/block/zram/zcomp.c
@@ -0,0 +1,353 @@
+/*
+ * Copyright (C) 2014 Sergey Senozhatsky.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+
+#include "zcomp.h"
+#include "zcomp_lzo.h"
+#ifdef CONFIG_ZRAM_LZ4_COMPRESS
+#include "zcomp_lz4.h"
+#endif
+
+/*
+ * single zcomp_strm backend
+ */
+struct zcomp_strm_single {
+ struct mutex strm_lock;
+ struct zcomp_strm *zstrm;
+};
+
+/*
+ * multi zcomp_strm backend
+ */
+struct zcomp_strm_multi {
+ /* protect strm list */
+ spinlock_t strm_lock;
+ /* max possible number of zstrm streams */
+ int max_strm;
+ /* number of available zstrm streams */
+ int avail_strm;
+ /* list of available strms */
+ struct list_head idle_strm;
+ wait_queue_head_t strm_wait;
+};
+
+static struct zcomp_backend *backends[] = {
+ &zcomp_lzo,
+#ifdef CONFIG_ZRAM_LZ4_COMPRESS
+ &zcomp_lz4,
+#endif
+ NULL
+};
+
+static struct zcomp_backend *find_backend(const char *compress)
+{
+ int i = 0;
+ while (backends[i]) {
+ if (sysfs_streq(compress, backends[i]->name))
+ break;
+ i++;
+ }
+ return backends[i];
+}
+
+static void zcomp_strm_free(struct zcomp *comp, struct zcomp_strm *zstrm)
+{
+ if (zstrm->private)
+ comp->backend->destroy(zstrm->private);
+ free_pages((unsigned long)zstrm->buffer, 1);
+ kfree(zstrm);
+}
+
+/*
+ * allocate new zcomp_strm structure with ->private initialized by
+ * backend, return NULL on error
+ */
+static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp)
+{
+ struct zcomp_strm *zstrm = kmalloc(sizeof(*zstrm), GFP_KERNEL);
+ if (!zstrm)
+ return NULL;
+
+ zstrm->private = comp->backend->create();
+ /*
+ * allocate 2 pages. 1 for compressed data, plus 1 extra for the
+ * case when compressed size is larger than the original one
+ */
+ zstrm->buffer = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
+ if (!zstrm->private || !zstrm->buffer) {
+ zcomp_strm_free(comp, zstrm);
+ zstrm = NULL;
+ }
+ return zstrm;
+}
+
+/*
+ * get idle zcomp_strm or wait until other process release
+ * (zcomp_strm_release()) one for us
+ */
+static struct zcomp_strm *zcomp_strm_multi_find(struct zcomp *comp)
+{
+ struct zcomp_strm_multi *zs = comp->stream;
+ struct zcomp_strm *zstrm;
+
+ while (1) {
+ spin_lock(&zs->strm_lock);
+ if (!list_empty(&zs->idle_strm)) {
+ zstrm = list_entry(zs->idle_strm.next,
+ struct zcomp_strm, list);
+ list_del(&zstrm->list);
+ spin_unlock(&zs->strm_lock);
+ return zstrm;
+ }
+ /* zstrm streams limit reached, wait for idle stream */
+ if (zs->avail_strm >= zs->max_strm) {
+ spin_unlock(&zs->strm_lock);
+ wait_event(zs->strm_wait, !list_empty(&zs->idle_strm));
+ continue;
+ }
+ /* allocate new zstrm stream */
+ zs->avail_strm++;
+ spin_unlock(&zs->strm_lock);
+
+ zstrm = zcomp_strm_alloc(comp);
+ if (!zstrm) {
+ spin_lock(&zs->strm_lock);
+ zs->avail_strm--;
+ spin_unlock(&zs->strm_lock);
+ wait_event(zs->strm_wait, !list_empty(&zs->idle_strm));
+ continue;
+ }
+ break;
+ }
+ return zstrm;
+}
+
+/* add stream back to idle list and wake up waiter or free the stream */
+static void zcomp_strm_multi_release(struct zcomp *comp, struct zcomp_strm *zstrm)
+{
+ struct zcomp_strm_multi *zs = comp->stream;
+
+ spin_lock(&zs->strm_lock);
+ if (zs->avail_strm <= zs->max_strm) {
+ list_add(&zstrm->list, &zs->idle_strm);
+ spin_unlock(&zs->strm_lock);
+ wake_up(&zs->strm_wait);
+ return;
+ }
+
+ zs->avail_strm--;
+ spin_unlock(&zs->strm_lock);
+ zcomp_strm_free(comp, zstrm);
+}
+
+/* change max_strm limit */
+static bool zcomp_strm_multi_set_max_streams(struct zcomp *comp, int num_strm)
+{
+ struct zcomp_strm_multi *zs = comp->stream;
+ struct zcomp_strm *zstrm;
+
+ spin_lock(&zs->strm_lock);
+ zs->max_strm = num_strm;
+ /*
+ * if user has lowered the limit and there are idle streams,
+ * immediately free as much streams (and memory) as we can.
+ */
+ while (zs->avail_strm > num_strm && !list_empty(&zs->idle_strm)) {
+ zstrm = list_entry(zs->idle_strm.next,
+ struct zcomp_strm, list);
+ list_del(&zstrm->list);
+ zcomp_strm_free(comp, zstrm);
+ zs->avail_strm--;
+ }
+ spin_unlock(&zs->strm_lock);
+ return true;
+}
+
+static void zcomp_strm_multi_destroy(struct zcomp *comp)
+{
+ struct zcomp_strm_multi *zs = comp->stream;
+ struct zcomp_strm *zstrm;
+
+ while (!list_empty(&zs->idle_strm)) {
+ zstrm = list_entry(zs->idle_strm.next,
+ struct zcomp_strm, list);
+ list_del(&zstrm->list);
+ zcomp_strm_free(comp, zstrm);
+ }
+ kfree(zs);
+}
+
+static int zcomp_strm_multi_create(struct zcomp *comp, int max_strm)
+{
+ struct zcomp_strm *zstrm;
+ struct zcomp_strm_multi *zs;
+
+ comp->destroy = zcomp_strm_multi_destroy;
+ comp->strm_find = zcomp_strm_multi_find;
+ comp->strm_release = zcomp_strm_multi_release;
+ comp->set_max_streams = zcomp_strm_multi_set_max_streams;
+ zs = kmalloc(sizeof(struct zcomp_strm_multi), GFP_KERNEL);
+ if (!zs)
+ return -ENOMEM;
+
+ comp->stream = zs;
+ spin_lock_init(&zs->strm_lock);
+ INIT_LIST_HEAD(&zs->idle_strm);
+ init_waitqueue_head(&zs->strm_wait);
+ zs->max_strm = max_strm;
+ zs->avail_strm = 1;
+
+ zstrm = zcomp_strm_alloc(comp);
+ if (!zstrm) {
+ kfree(zs);
+ return -ENOMEM;
+ }
+ list_add(&zstrm->list, &zs->idle_strm);
+ return 0;
+}
+
+static struct zcomp_strm *zcomp_strm_single_find(struct zcomp *comp)
+{
+ struct zcomp_strm_single *zs = comp->stream;
+ mutex_lock(&zs->strm_lock);
+ return zs->zstrm;
+}
+
+static void zcomp_strm_single_release(struct zcomp *comp,
+ struct zcomp_strm *zstrm)
+{
+ struct zcomp_strm_single *zs = comp->stream;
+ mutex_unlock(&zs->strm_lock);
+}
+
+static bool zcomp_strm_single_set_max_streams(struct zcomp *comp, int num_strm)
+{
+ /* zcomp_strm_single support only max_comp_streams == 1 */
+ return false;
+}
+
+static void zcomp_strm_single_destroy(struct zcomp *comp)
+{
+ struct zcomp_strm_single *zs = comp->stream;
+ zcomp_strm_free(comp, zs->zstrm);
+ kfree(zs);
+}
+
+static int zcomp_strm_single_create(struct zcomp *comp)
+{
+ struct zcomp_strm_single *zs;
+
+ comp->destroy = zcomp_strm_single_destroy;
+ comp->strm_find = zcomp_strm_single_find;
+ comp->strm_release = zcomp_strm_single_release;
+ comp->set_max_streams = zcomp_strm_single_set_max_streams;
+ zs = kmalloc(sizeof(struct zcomp_strm_single), GFP_KERNEL);
+ if (!zs)
+ return -ENOMEM;
+
+ comp->stream = zs;
+ mutex_init(&zs->strm_lock);
+ zs->zstrm = zcomp_strm_alloc(comp);
+ if (!zs->zstrm) {
+ kfree(zs);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/* show available compressors */
+ssize_t zcomp_available_show(const char *comp, char *buf)
+{
+ ssize_t sz = 0;
+ int i = 0;
+
+ while (backends[i]) {
+ if (sysfs_streq(comp, backends[i]->name))
+ sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
+ "[%s] ", backends[i]->name);
+ else
+ sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
+ "%s ", backends[i]->name);
+ i++;
+ }
+ sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n");
+ return sz;
+}
+
+bool zcomp_set_max_streams(struct zcomp *comp, int num_strm)
+{
+ return comp->set_max_streams(comp, num_strm);
+}
+
+struct zcomp_strm *zcomp_strm_find(struct zcomp *comp)
+{
+ return comp->strm_find(comp);
+}
+
+void zcomp_strm_release(struct zcomp *comp, struct zcomp_strm *zstrm)
+{
+ comp->strm_release(comp, zstrm);
+}
+
+int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
+ const unsigned char *src, size_t *dst_len)
+{
+ return comp->backend->compress(src, zstrm->buffer, dst_len,
+ zstrm->private);
+}
+
+int zcomp_decompress(struct zcomp *comp, const unsigned char *src,
+ size_t src_len, unsigned char *dst)
+{
+ return comp->backend->decompress(src, src_len, dst);
+}
+
+void zcomp_destroy(struct zcomp *comp)
+{
+ comp->destroy(comp);
+ kfree(comp);
+}
+
+/*
+ * search available compressors for requested algorithm.
+ * allocate new zcomp and initialize it. return compressing
+ * backend pointer or ERR_PTR if things went bad. ERR_PTR(-EINVAL)
+ * if requested algorithm is not supported, ERR_PTR(-ENOMEM) in
+ * case of allocation error.
+ */
+struct zcomp *zcomp_create(const char *compress, int max_strm)
+{
+ struct zcomp *comp;
+ struct zcomp_backend *backend;
+
+ backend = find_backend(compress);
+ if (!backend)
+ return ERR_PTR(-EINVAL);
+
+ comp = kzalloc(sizeof(struct zcomp), GFP_KERNEL);
+ if (!comp)
+ return ERR_PTR(-ENOMEM);
+
+ comp->backend = backend;
+ if (max_strm > 1)
+ zcomp_strm_multi_create(comp, max_strm);
+ else
+ zcomp_strm_single_create(comp);
+ if (!comp->stream) {
+ kfree(comp);
+ return ERR_PTR(-ENOMEM);
+ }
+ return comp;
+}
diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h
new file mode 100644
index 00000000000..c59d1fca72c
--- /dev/null
+++ b/drivers/block/zram/zcomp.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2014 Sergey Senozhatsky.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ZCOMP_H_
+#define _ZCOMP_H_
+
+#include <linux/mutex.h>
+
+struct zcomp_strm {
+ /* compression/decompression buffer */
+ void *buffer;
+ /*
+ * The private data of the compression stream, only compression
+ * stream backend can touch this (e.g. compression algorithm
+ * working memory)
+ */
+ void *private;
+ /* used in multi stream backend, protected by backend strm_lock */
+ struct list_head list;
+};
+
+/* static compression backend */
+struct zcomp_backend {
+ int (*compress)(const unsigned char *src, unsigned char *dst,
+ size_t *dst_len, void *private);
+
+ int (*decompress)(const unsigned char *src, size_t src_len,
+ unsigned char *dst);
+
+ void *(*create)(void);
+ void (*destroy)(void *private);
+
+ const char *name;
+};
+
+/* dynamic per-device compression frontend */
+struct zcomp {
+ void *stream;
+ struct zcomp_backend *backend;
+
+ struct zcomp_strm *(*strm_find)(struct zcomp *comp);
+ void (*strm_release)(struct zcomp *comp, struct zcomp_strm *zstrm);
+ bool (*set_max_streams)(struct zcomp *comp, int num_strm);
+ void (*destroy)(struct zcomp *comp);
+};
+
+ssize_t zcomp_available_show(const char *comp, char *buf);
+
+struct zcomp *zcomp_create(const char *comp, int max_strm);
+void zcomp_destroy(struct zcomp *comp);
+
+struct zcomp_strm *zcomp_strm_find(struct zcomp *comp);
+void zcomp_strm_release(struct zcomp *comp, struct zcomp_strm *zstrm);
+
+int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
+ const unsigned char *src, size_t *dst_len);
+
+int zcomp_decompress(struct zcomp *comp, const unsigned char *src,
+ size_t src_len, unsigned char *dst);
+
+bool zcomp_set_max_streams(struct zcomp *comp, int num_strm);
+#endif /* _ZCOMP_H_ */
diff --git a/drivers/block/zram/zcomp_lz4.c b/drivers/block/zram/zcomp_lz4.c
new file mode 100644
index 00000000000..f2afb7e988c
--- /dev/null
+++ b/drivers/block/zram/zcomp_lz4.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2014 Sergey Senozhatsky.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/lz4.h>
+
+#include "zcomp_lz4.h"
+
+static void *zcomp_lz4_create(void)
+{
+ return kzalloc(LZ4_MEM_COMPRESS, GFP_KERNEL);
+}
+
+static void zcomp_lz4_destroy(void *private)
+{
+ kfree(private);
+}
+
+static int zcomp_lz4_compress(const unsigned char *src, unsigned char *dst,
+ size_t *dst_len, void *private)
+{
+ /* return : Success if return 0 */
+ return lz4_compress(src, PAGE_SIZE, dst, dst_len, private);
+}
+
+static int zcomp_lz4_decompress(const unsigned char *src, size_t src_len,
+ unsigned char *dst)
+{
+ size_t dst_len = PAGE_SIZE;
+ /* return : Success if return 0 */
+ return lz4_decompress_unknownoutputsize(src, src_len, dst, &dst_len);
+}
+
+struct zcomp_backend zcomp_lz4 = {
+ .compress = zcomp_lz4_compress,
+ .decompress = zcomp_lz4_decompress,
+ .create = zcomp_lz4_create,
+ .destroy = zcomp_lz4_destroy,
+ .name = "lz4",
+};
diff --git a/drivers/block/zram/zcomp_lz4.h b/drivers/block/zram/zcomp_lz4.h
new file mode 100644
index 00000000000..60613fb29dd
--- /dev/null
+++ b/drivers/block/zram/zcomp_lz4.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2014 Sergey Senozhatsky.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ZCOMP_LZ4_H_
+#define _ZCOMP_LZ4_H_
+
+#include "zcomp.h"
+
+extern struct zcomp_backend zcomp_lz4;
+
+#endif /* _ZCOMP_LZ4_H_ */
diff --git a/drivers/block/zram/zcomp_lzo.c b/drivers/block/zram/zcomp_lzo.c
new file mode 100644
index 00000000000..da1bc47d588
--- /dev/null
+++ b/drivers/block/zram/zcomp_lzo.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2014 Sergey Senozhatsky.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/lzo.h>
+
+#include "zcomp_lzo.h"
+
+static void *lzo_create(void)
+{
+ return kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
+}
+
+static void lzo_destroy(void *private)
+{
+ kfree(private);
+}
+
+static int lzo_compress(const unsigned char *src, unsigned char *dst,
+ size_t *dst_len, void *private)
+{
+ int ret = lzo1x_1_compress(src, PAGE_SIZE, dst, dst_len, private);
+ return ret == LZO_E_OK ? 0 : ret;
+}
+
+static int lzo_decompress(const unsigned char *src, size_t src_len,
+ unsigned char *dst)
+{
+ size_t dst_len = PAGE_SIZE;
+ int ret = lzo1x_decompress_safe(src, src_len, dst, &dst_len);
+ return ret == LZO_E_OK ? 0 : ret;
+}
+
+struct zcomp_backend zcomp_lzo = {
+ .compress = lzo_compress,
+ .decompress = lzo_decompress,
+ .create = lzo_create,
+ .destroy = lzo_destroy,
+ .name = "lzo",
+};
diff --git a/drivers/block/zram/zcomp_lzo.h b/drivers/block/zram/zcomp_lzo.h
new file mode 100644
index 00000000000..128c5807fa1
--- /dev/null
+++ b/drivers/block/zram/zcomp_lzo.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2014 Sergey Senozhatsky.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ZCOMP_LZO_H_
+#define _ZCOMP_LZO_H_
+
+#include "zcomp.h"
+
+extern struct zcomp_backend zcomp_lzo;
+
+#endif /* _ZCOMP_LZO_H_ */
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
new file mode 100644
index 00000000000..45e2e85815a
--- /dev/null
+++ b/drivers/block/zram/zram_drv.c
@@ -0,0 +1,1162 @@
+/*
+ * Compressed RAM block device
+ *
+ * Copyright (C) 2008, 2009, 2010 Nitin Gupta
+ * 2012, 2013 Minchan Kim
+ *
+ * This code is released using a dual license strategy: BSD/GPL
+ * You can choose the licence that better fits your requirements.
+ *
+ * Released under the terms of 3-clause BSD License
+ * Released under the terms of GNU General Public License Version 2.0
+ *
+ */
+
+#define KMSG_COMPONENT "zram"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#ifdef CONFIG_ZRAM_DEBUG
+#define DEBUG
+#endif
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/bio.h>
+#include <linux/bitops.h>
+#include <linux/blkdev.h>
+#include <linux/buffer_head.h>
+#include <linux/device.h>
+#include <linux/genhd.h>
+#include <linux/highmem.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+#include <linux/err.h>
+
+#include "zram_drv.h"
+
+/* Globals */
+static int zram_major;
+static struct zram *zram_devices;
+static const char *default_compressor = "lzo";
+
+/* Module params (documentation at end) */
+static unsigned int num_devices = 1;
+
+#define ZRAM_ATTR_RO(name) \
+static ssize_t zram_attr_##name##_show(struct device *d, \
+ struct device_attribute *attr, char *b) \
+{ \
+ struct zram *zram = dev_to_zram(d); \
+ return scnprintf(b, PAGE_SIZE, "%llu\n", \
+ (u64)atomic64_read(&zram->stats.name)); \
+} \
+static struct device_attribute dev_attr_##name = \
+ __ATTR(name, S_IRUGO, zram_attr_##name##_show, NULL);
+
+static inline int init_done(struct zram *zram)
+{
+ return zram->meta != NULL;
+}
+
+static inline struct zram *dev_to_zram(struct device *dev)
+{
+ return (struct zram *)dev_to_disk(dev)->private_data;
+}
+
+static ssize_t disksize_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zram *zram = dev_to_zram(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
+}
+
+static ssize_t initstate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 val;
+ struct zram *zram = dev_to_zram(dev);
+
+ down_read(&zram->init_lock);
+ val = init_done(zram);
+ up_read(&zram->init_lock);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t orig_data_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zram *zram = dev_to_zram(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%llu\n",
+ (u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
+}
+
+static ssize_t mem_used_total_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u64 val = 0;
+ struct zram *zram = dev_to_zram(dev);
+
+ down_read(&zram->init_lock);
+ if (init_done(zram)) {
+ struct zram_meta *meta = zram->meta;
+ val = zs_get_total_pages(meta->mem_pool);
+ }
+ up_read(&zram->init_lock);
+
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
+}
+
+static ssize_t max_comp_streams_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int val;
+ struct zram *zram = dev_to_zram(dev);
+
+ down_read(&zram->init_lock);
+ val = zram->max_comp_streams;
+ up_read(&zram->init_lock);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t mem_limit_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u64 val;
+ struct zram *zram = dev_to_zram(dev);
+
+ down_read(&zram->init_lock);
+ val = zram->limit_pages;
+ up_read(&zram->init_lock);
+
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
+}
+
+static ssize_t mem_limit_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ u64 limit;
+ char *tmp;
+ struct zram *zram = dev_to_zram(dev);
+
+ limit = memparse(buf, &tmp);
+ if (buf == tmp) /* no chars parsed, invalid input */
+ return -EINVAL;
+
+ down_write(&zram->init_lock);
+ zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
+ up_write(&zram->init_lock);
+
+ return len;
+}
+
+static ssize_t mem_used_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u64 val = 0;
+ struct zram *zram = dev_to_zram(dev);
+
+ down_read(&zram->init_lock);
+ if (init_done(zram))
+ val = atomic_long_read(&zram->stats.max_used_pages);
+ up_read(&zram->init_lock);
+
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
+}
+
+static ssize_t mem_used_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ int err;
+ unsigned long val;
+ struct zram *zram = dev_to_zram(dev);
+
+ err = kstrtoul(buf, 10, &val);
+ if (err || val != 0)
+ return -EINVAL;
+
+ down_read(&zram->init_lock);
+ if (init_done(zram)) {
+ struct zram_meta *meta = zram->meta;
+ atomic_long_set(&zram->stats.max_used_pages,
+ zs_get_total_pages(meta->mem_pool));
+ }
+ up_read(&zram->init_lock);
+
+ return len;
+}
+
+static ssize_t max_comp_streams_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ int num;
+ struct zram *zram = dev_to_zram(dev);
+ int ret;
+
+ ret = kstrtoint(buf, 0, &num);
+ if (ret < 0)
+ return ret;
+ if (num < 1)
+ return -EINVAL;
+
+ down_write(&zram->init_lock);
+ if (init_done(zram)) {
+ if (!zcomp_set_max_streams(zram->comp, num)) {
+ pr_info("Cannot change max compression streams\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ zram->max_comp_streams = num;
+ ret = len;
+out:
+ up_write(&zram->init_lock);
+ return ret;
+}
+
+static ssize_t comp_algorithm_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ size_t sz;
+ struct zram *zram = dev_to_zram(dev);
+
+ down_read(&zram->init_lock);
+ sz = zcomp_available_show(zram->compressor, buf);
+ up_read(&zram->init_lock);
+
+ return sz;
+}
+
+static ssize_t comp_algorithm_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct zram *zram = dev_to_zram(dev);
+ down_write(&zram->init_lock);
+ if (init_done(zram)) {
+ up_write(&zram->init_lock);
+ pr_info("Can't change algorithm for initialized device\n");
+ return -EBUSY;
+ }
+ strlcpy(zram->compressor, buf, sizeof(zram->compressor));
+ up_write(&zram->init_lock);
+ return len;
+}
+
+/* flag operations needs meta->tb_lock */
+static int zram_test_flag(struct zram_meta *meta, u32 index,
+ enum zram_pageflags flag)
+{
+ return meta->table[index].value & BIT(flag);
+}
+
+static void zram_set_flag(struct zram_meta *meta, u32 index,
+ enum zram_pageflags flag)
+{
+ meta->table[index].value |= BIT(flag);
+}
+
+static void zram_clear_flag(struct zram_meta *meta, u32 index,
+ enum zram_pageflags flag)
+{
+ meta->table[index].value &= ~BIT(flag);
+}
+
+static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
+{
+ return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
+}
+
+static void zram_set_obj_size(struct zram_meta *meta,
+ u32 index, size_t size)
+{
+ unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;
+
+ meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
+}
+
+static inline int is_partial_io(struct bio_vec *bvec)
+{
+ return bvec->bv_len != PAGE_SIZE;
+}
+
+/*
+ * Check if request is within bounds and aligned on zram logical blocks.
+ */
+static inline int valid_io_request(struct zram *zram, struct bio *bio)
+{
+ u64 start, end, bound;
+
+ /* unaligned request */
+ if (unlikely(bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
+ return 0;
+ if (unlikely(bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
+ return 0;
+
+ start = bio->bi_sector;
+ end = start + (bio->bi_size >> SECTOR_SHIFT);
+ bound = zram->disksize >> SECTOR_SHIFT;
+ /* out of range range */
+ if (unlikely(start >= bound || end >= bound || start > end))
+ return 0;
+
+ /* I/O request is valid */
+ return 1;
+}
+
+static void zram_meta_free(struct zram_meta *meta, u64 disksize)
+{
+ size_t num_pages = disksize >> PAGE_SHIFT;
+ size_t index;
+
+ /* Free all pages that are still in this zram device */
+ for (index = 0; index < num_pages; index++) {
+ unsigned long handle = meta->table[index].handle;
+
+ if (!handle)
+ continue;
+
+ zs_free(meta->mem_pool, handle);
+ }
+
+ zs_destroy_pool(meta->mem_pool);
+ vfree(meta->table);
+ kfree(meta);
+}
+
+static struct zram_meta *zram_meta_alloc(u64 disksize)
+{
+ size_t num_pages;
+ struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
+ if (!meta)
+ goto out;
+
+ num_pages = disksize >> PAGE_SHIFT;
+ meta->table = vzalloc(num_pages * sizeof(*meta->table));
+ if (!meta->table) {
+ pr_err("Error allocating zram address table\n");
+ goto free_meta;
+ }
+
+ meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
+ if (!meta->mem_pool) {
+ pr_err("Error creating memory pool\n");
+ goto free_table;
+ }
+
+ return meta;
+
+free_table:
+ vfree(meta->table);
+free_meta:
+ kfree(meta);
+ meta = NULL;
+out:
+ return meta;
+}
+
+static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
+{
+ if (*offset + bvec->bv_len >= PAGE_SIZE)
+ (*index)++;
+ *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
+}
+
+static int page_zero_filled(void *ptr)
+{
+ unsigned int pos;
+ unsigned long *page;
+
+ page = (unsigned long *)ptr;
+
+ for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
+ if (page[pos])
+ return 0;
+ }
+
+ return 1;
+}
+
+static void handle_zero_page(struct bio_vec *bvec)
+{
+ struct page *page = bvec->bv_page;
+ void *user_mem;
+
+ user_mem = kmap_atomic(page);
+ if (is_partial_io(bvec))
+ memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
+ else
+ clear_page(user_mem);
+ kunmap_atomic(user_mem);
+
+ flush_dcache_page(page);
+}
+
+
+/*
+ * To protect concurrent access to the same index entry,
+ * caller should hold this table index entry's bit_spinlock to
+ * indicate this index entry is accessing.
+ */
+static void zram_free_page(struct zram *zram, size_t index)
+{
+ struct zram_meta *meta = zram->meta;
+ unsigned long handle = meta->table[index].handle;
+
+ if (unlikely(!handle)) {
+ /*
+ * No memory is allocated for zero filled pages.
+ * Simply clear zero page flag.
+ */
+ if (zram_test_flag(meta, index, ZRAM_ZERO)) {
+ zram_clear_flag(meta, index, ZRAM_ZERO);
+ atomic64_dec(&zram->stats.zero_pages);
+ }
+ return;
+ }
+
+ zs_free(meta->mem_pool, handle);
+
+ atomic64_sub(zram_get_obj_size(meta, index),
+ &zram->stats.compr_data_size);
+ atomic64_dec(&zram->stats.pages_stored);
+
+ meta->table[index].handle = 0;
+ zram_set_obj_size(meta, index, 0);
+}
+
+static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
+{
+ int ret = 0;
+ unsigned char *cmem;
+ struct zram_meta *meta = zram->meta;
+ unsigned long handle;
+ size_t size;
+
+ bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
+ handle = meta->table[index].handle;
+ size = zram_get_obj_size(meta, index);
+
+ if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
+ bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+ clear_page(mem);
+ return 0;
+ }
+
+ cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
+ if (size == PAGE_SIZE)
+ copy_page(mem, cmem);
+ else
+ ret = zcomp_decompress(zram->comp, cmem, size, mem);
+ zs_unmap_object(meta->mem_pool, handle);
+ bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+
+ /* Should NEVER happen. Return bio error if it does. */
+ if (unlikely(ret)) {
+ pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
+ u32 index, int offset, struct bio *bio)
+{
+ int ret;
+ struct page *page;
+ unsigned char *user_mem, *uncmem = NULL;
+ struct zram_meta *meta = zram->meta;
+ page = bvec->bv_page;
+
+ bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
+ if (unlikely(!meta->table[index].handle) ||
+ zram_test_flag(meta, index, ZRAM_ZERO)) {
+ bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+ handle_zero_page(bvec);
+ return 0;
+ }
+ bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+
+ if (is_partial_io(bvec))
+ /* Use a temporary buffer to decompress the page */
+ uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
+
+ user_mem = kmap_atomic(page);
+ if (!is_partial_io(bvec))
+ uncmem = user_mem;
+
+ if (!uncmem) {
+ pr_info("Unable to allocate temp memory\n");
+ ret = -ENOMEM;
+ goto out_cleanup;
+ }
+
+ ret = zram_decompress_page(zram, uncmem, index);
+ /* Should NEVER happen. Return bio error if it does. */
+ if (unlikely(ret))
+ goto out_cleanup;
+
+ if (is_partial_io(bvec))
+ memcpy(user_mem + bvec->bv_offset, uncmem + offset,
+ bvec->bv_len);
+
+ flush_dcache_page(page);
+ ret = 0;
+out_cleanup:
+ kunmap_atomic(user_mem);
+ if (is_partial_io(bvec))
+ kfree(uncmem);
+ return ret;
+}
+
+static inline void update_used_max(struct zram *zram,
+ const unsigned long pages)
+{
+ int old_max, cur_max;
+
+ old_max = atomic_long_read(&zram->stats.max_used_pages);
+
+ do {
+ cur_max = old_max;
+ if (pages > cur_max)
+ old_max = atomic_long_cmpxchg(
+ &zram->stats.max_used_pages, cur_max, pages);
+ } while (old_max != cur_max);
+}
+
+static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
+ int offset)
+{
+ int ret = 0;
+ size_t clen;
+ unsigned long handle;
+ struct page *page;
+ unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
+ struct zram_meta *meta = zram->meta;
+ struct zcomp_strm *zstrm;
+ bool locked = false;
+ unsigned long alloced_pages;
+
+ page = bvec->bv_page;
+ if (is_partial_io(bvec)) {
+ /*
+ * This is a partial IO. We need to read the full page
+ * before to write the changes.
+ */
+ uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
+ if (!uncmem) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = zram_decompress_page(zram, uncmem, index);
+ if (ret)
+ goto out;
+ }
+
+ zstrm = zcomp_strm_find(zram->comp);
+ locked = true;
+ user_mem = kmap_atomic(page);
+
+ if (is_partial_io(bvec)) {
+ memcpy(uncmem + offset, user_mem + bvec->bv_offset,
+ bvec->bv_len);
+ kunmap_atomic(user_mem);
+ user_mem = NULL;
+ } else {
+ uncmem = user_mem;
+ }
+
+ if (page_zero_filled(uncmem)) {
+ if (user_mem)
+ kunmap_atomic(user_mem);
+ /* Free memory associated with this sector now. */
+ bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_free_page(zram, index);
+ zram_set_flag(meta, index, ZRAM_ZERO);
+ bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+
+ atomic64_inc(&zram->stats.zero_pages);
+ ret = 0;
+ goto out;
+ }
+
+ ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
+ if (!is_partial_io(bvec)) {
+ kunmap_atomic(user_mem);
+ user_mem = NULL;
+ uncmem = NULL;
+ }
+
+ if (unlikely(ret)) {
+ pr_err("Compression failed! err=%d\n", ret);
+ goto out;
+ }
+ src = zstrm->buffer;
+ if (unlikely(clen > max_zpage_size)) {
+ clen = PAGE_SIZE;
+ if (is_partial_io(bvec))
+ src = uncmem;
+ }
+
+ handle = zs_malloc(meta->mem_pool, clen);
+ if (!handle) {
+ pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
+ index, clen);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ alloced_pages = zs_get_total_pages(meta->mem_pool);
+ if (zram->limit_pages && alloced_pages > zram->limit_pages) {
+ zs_free(meta->mem_pool, handle);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ update_used_max(zram, alloced_pages);
+
+ cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
+
+ if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
+ src = kmap_atomic(page);
+ copy_page(cmem, src);
+ kunmap_atomic(src);
+ } else {
+ memcpy(cmem, src, clen);
+ }
+
+ zcomp_strm_release(zram->comp, zstrm);
+ locked = false;
+ zs_unmap_object(meta->mem_pool, handle);
+
+ /*
+ * Free memory associated with this sector
+ * before overwriting unused sectors.
+ */
+ bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_free_page(zram, index);
+
+ meta->table[index].handle = handle;
+ zram_set_obj_size(meta, index, clen);
+ bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+
+ /* Update stats */
+ atomic64_add(clen, &zram->stats.compr_data_size);
+ atomic64_inc(&zram->stats.pages_stored);
+out:
+ if (locked)
+ zcomp_strm_release(zram->comp, zstrm);
+ if (is_partial_io(bvec))
+ kfree(uncmem);
+ return ret;
+}
+
+static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
+ int offset, struct bio *bio)
+{
+ int ret;
+ int rw = bio_data_dir(bio);
+
+ if (rw == READ) {
+ atomic64_inc(&zram->stats.num_reads);
+ ret = zram_bvec_read(zram, bvec, index, offset, bio);
+ } else {
+ atomic64_inc(&zram->stats.num_writes);
+ ret = zram_bvec_write(zram, bvec, index, offset);
+ }
+
+ if (unlikely(ret)) {
+ if (rw == READ)
+ atomic64_inc(&zram->stats.failed_reads);
+ else
+ atomic64_inc(&zram->stats.failed_writes);
+ }
+
+ return ret;
+}
+
+/*
+ * zram_bio_discard - handler on discard request
+ * @index: physical block index in PAGE_SIZE units
+ * @offset: byte offset within physical block
+ */
+static void zram_bio_discard(struct zram *zram, u32 index,
+ int offset, struct bio *bio)
+{
+ size_t n = bio->bi_size;
+ struct zram_meta *meta = zram->meta;
+
+ /*
+ * zram manages data in physical block size units. Because logical block
+ * size isn't identical with physical block size on some arch, we
+ * could get a discard request pointing to a specific offset within a
+ * certain physical block. Although we can handle this request by
+ * reading that physiclal block and decompressing and partially zeroing
+ * and re-compressing and then re-storing it, this isn't reasonable
+ * because our intent with a discard request is to save memory. So
+ * skipping this logical block is appropriate here.
+ */
+ if (offset) {
+ if (n <= (PAGE_SIZE - offset))
+ return;
+
+ n -= (PAGE_SIZE - offset);
+ index++;
+ }
+
+ while (n >= PAGE_SIZE) {
+ bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_free_page(zram, index);
+ bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+ atomic64_inc(&zram->stats.notify_free);
+ index++;
+ n -= PAGE_SIZE;
+ }
+}
+
+static void zram_reset_device(struct zram *zram)
+{
+ down_write(&zram->init_lock);
+
+ zram->limit_pages = 0;
+
+ if (!init_done(zram)) {
+ up_write(&zram->init_lock);
+ return;
+ }
+
+ zcomp_destroy(zram->comp);
+ zram->max_comp_streams = 1;
+ zram_meta_free(zram->meta, zram->disksize);
+ zram->meta = NULL;
+ /* Reset stats */
+ memset(&zram->stats, 0, sizeof(zram->stats));
+
+ zram->disksize = 0;
+ up_write(&zram->init_lock);
+}
+
+static ssize_t disksize_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ u64 disksize;
+ struct zcomp *comp;
+ struct zram_meta *meta;
+ struct zram *zram = dev_to_zram(dev);
+ int err;
+
+ disksize = memparse(buf, NULL);
+ if (!disksize)
+ return -EINVAL;
+
+ disksize = PAGE_ALIGN(disksize);
+ meta = zram_meta_alloc(disksize);
+ if (!meta)
+ return -ENOMEM;
+
+ comp = zcomp_create(zram->compressor, zram->max_comp_streams);
+ if (IS_ERR(comp)) {
+ pr_info("Cannot initialise %s compressing backend\n",
+ zram->compressor);
+ err = PTR_ERR(comp);
+ goto out_free_meta;
+ }
+
+ down_write(&zram->init_lock);
+ if (init_done(zram)) {
+ pr_info("Cannot change disksize for initialized device\n");
+ err = -EBUSY;
+ goto out_destroy_comp;
+ }
+
+ zram->meta = meta;
+ zram->comp = comp;
+ zram->disksize = disksize;
+ set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
+ up_write(&zram->init_lock);
+
+ /*
+ * Revalidate disk out of the init_lock to avoid lockdep splat.
+ * It's okay because disk's capacity is protected by init_lock
+ * so that revalidate_disk always sees up-to-date capacity.
+ */
+ revalidate_disk(zram->disk);
+
+ return len;
+
+out_destroy_comp:
+ up_write(&zram->init_lock);
+ zcomp_destroy(comp);
+out_free_meta:
+ zram_meta_free(meta, disksize);
+ return err;
+}
+
+static ssize_t reset_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ int ret;
+ unsigned short do_reset;
+ struct zram *zram;
+ struct block_device *bdev;
+
+ zram = dev_to_zram(dev);
+ bdev = bdget_disk(zram->disk, 0);
+
+ if (!bdev)
+ return -ENOMEM;
+
+ mutex_lock(&bdev->bd_mutex);
+ /* Do not reset an active device! */
+ if (bdev->bd_holders) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = kstrtou16(buf, 10, &do_reset);
+ if (ret)
+ goto out;
+
+ if (!do_reset) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Make sure all pending I/O is finished */
+ fsync_bdev(bdev);
+ zram_reset_device(zram);
+ set_capacity(zram->disk, 0);
+
+ mutex_unlock(&bdev->bd_mutex);
+ revalidate_disk(zram->disk);
+ bdput(bdev);
+
+ return len;
+
+out:
+ mutex_unlock(&bdev->bd_mutex);
+ bdput(bdev);
+ return ret;
+}
+
+static void __zram_make_request(struct zram *zram, struct bio *bio)
+{
+ int i, offset;
+ u32 index;
+ struct bio_vec *bvec;
+
+
+ index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
+ offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
+
+ if (unlikely(bio->bi_rw & REQ_DISCARD)) {
+ zram_bio_discard(zram, index, offset, bio);
+ bio_endio(bio, 0);
+ return;
+ }
+
+ bio_for_each_segment(bvec, bio, i) {
+ int max_transfer_size = PAGE_SIZE - offset;
+
+ if (bvec->bv_len > max_transfer_size) {
+ /*
+ * zram_bvec_rw() can only make operation on a single
+ * zram page. Split the bio vector.
+ */
+ struct bio_vec bv;
+
+ bv.bv_page = bvec->bv_page;
+ bv.bv_len = max_transfer_size;
+ bv.bv_offset = bvec->bv_offset;
+
+ if (zram_bvec_rw(zram, &bv, index, offset, bio) < 0)
+ goto out;
+
+ bv.bv_len = bvec->bv_len - max_transfer_size;
+ bv.bv_offset += max_transfer_size;
+ if (zram_bvec_rw(zram, &bv, index + 1, 0, bio) < 0)
+ goto out;
+ } else
+ if (zram_bvec_rw(zram, bvec, index, offset, bio) < 0)
+ goto out;
+
+ update_position(&index, &offset, bvec);
+ }
+
+ set_bit(BIO_UPTODATE, &bio->bi_flags);
+ bio_endio(bio, 0);
+ return;
+
+out:
+ bio_io_error(bio);
+}
+
+/*
+ * Handler function for all zram I/O requests.
+ */
+static void zram_make_request(struct request_queue *queue, struct bio *bio)
+{
+ struct zram *zram = queue->queuedata;
+
+ down_read(&zram->init_lock);
+ if (unlikely(!init_done(zram)))
+ goto error;
+
+ if (!valid_io_request(zram, bio)) {
+ atomic64_inc(&zram->stats.invalid_io);
+ goto error;
+ }
+
+ __zram_make_request(zram, bio);
+ up_read(&zram->init_lock);
+
+ return;
+
+error:
+ up_read(&zram->init_lock);
+ bio_io_error(bio);
+}
+
+static void zram_slot_free_notify(struct block_device *bdev,
+ unsigned long index)
+{
+ struct zram *zram;
+ struct zram_meta *meta;
+
+ zram = bdev->bd_disk->private_data;
+ meta = zram->meta;
+
+ bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_free_page(zram, index);
+ bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+ atomic64_inc(&zram->stats.notify_free);
+}
+
+static const struct block_device_operations zram_devops = {
+ .swap_slot_free_notify = zram_slot_free_notify,
+ .owner = THIS_MODULE
+};
+
+static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
+ disksize_show, disksize_store);
+static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
+static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
+static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
+static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
+static DEVICE_ATTR(mem_limit, S_IRUGO | S_IWUSR, mem_limit_show,
+ mem_limit_store);
+static DEVICE_ATTR(mem_used_max, S_IRUGO | S_IWUSR, mem_used_max_show,
+ mem_used_max_store);
+static DEVICE_ATTR(max_comp_streams, S_IRUGO | S_IWUSR,
+ max_comp_streams_show, max_comp_streams_store);
+static DEVICE_ATTR(comp_algorithm, S_IRUGO | S_IWUSR,
+ comp_algorithm_show, comp_algorithm_store);
+
+ZRAM_ATTR_RO(num_reads);
+ZRAM_ATTR_RO(num_writes);
+ZRAM_ATTR_RO(failed_reads);
+ZRAM_ATTR_RO(failed_writes);
+ZRAM_ATTR_RO(invalid_io);
+ZRAM_ATTR_RO(notify_free);
+ZRAM_ATTR_RO(zero_pages);
+ZRAM_ATTR_RO(compr_data_size);
+
+static struct attribute *zram_disk_attrs[] = {
+ &dev_attr_disksize.attr,
+ &dev_attr_initstate.attr,
+ &dev_attr_reset.attr,
+ &dev_attr_num_reads.attr,
+ &dev_attr_num_writes.attr,
+ &dev_attr_failed_reads.attr,
+ &dev_attr_failed_writes.attr,
+ &dev_attr_invalid_io.attr,
+ &dev_attr_notify_free.attr,
+ &dev_attr_zero_pages.attr,
+ &dev_attr_orig_data_size.attr,
+ &dev_attr_compr_data_size.attr,
+ &dev_attr_mem_used_total.attr,
+ &dev_attr_mem_limit.attr,
+ &dev_attr_mem_used_max.attr,
+ &dev_attr_max_comp_streams.attr,
+ &dev_attr_comp_algorithm.attr,
+ NULL,
+};
+
+static struct attribute_group zram_disk_attr_group = {
+ .attrs = zram_disk_attrs,
+};
+
+static int create_device(struct zram *zram, int device_id)
+{
+ int ret = -ENOMEM;
+
+ init_rwsem(&zram->init_lock);
+
+ zram->queue = blk_alloc_queue(GFP_KERNEL);
+ if (!zram->queue) {
+ pr_err("Error allocating disk queue for device %d\n",
+ device_id);
+ goto out;
+ }
+
+ blk_queue_make_request(zram->queue, zram_make_request);
+ zram->queue->queuedata = zram;
+
+ /* gendisk structure */
+ zram->disk = alloc_disk(1);
+ if (!zram->disk) {
+ pr_warn("Error allocating disk structure for device %d\n",
+ device_id);
+ goto out_free_queue;
+ }
+
+ zram->disk->major = zram_major;
+ zram->disk->first_minor = device_id;
+ zram->disk->fops = &zram_devops;
+ zram->disk->queue = zram->queue;
+ zram->disk->private_data = zram;
+ snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
+
+ /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
+ set_capacity(zram->disk, 0);
+ /* zram devices sort of resembles non-rotational disks */
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
+ /*
+ * To ensure that we always get PAGE_SIZE aligned
+ * and n*PAGE_SIZED sized I/O requests.
+ */
+ blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
+ blk_queue_logical_block_size(zram->disk->queue,
+ ZRAM_LOGICAL_BLOCK_SIZE);
+ blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
+ blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
+ zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
+ zram->disk->queue->limits.max_discard_sectors = UINT_MAX;
+ /*
+ * zram_bio_discard() will clear all logical blocks if logical block
+ * size is identical with physical block size(PAGE_SIZE). But if it is
+ * different, we will skip discarding some parts of logical blocks in
+ * the part of the request range which isn't aligned to physical block
+ * size. So we can't ensure that all discarded logical blocks are
+ * zeroed.
+ */
+ if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
+ zram->disk->queue->limits.discard_zeroes_data = 1;
+ else
+ zram->disk->queue->limits.discard_zeroes_data = 0;
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
+
+ add_disk(zram->disk);
+
+ ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
+ &zram_disk_attr_group);
+ if (ret < 0) {
+ pr_warn("Error creating sysfs group");
+ goto out_free_disk;
+ }
+ strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
+ zram->meta = NULL;
+ zram->max_comp_streams = 1;
+ return 0;
+
+out_free_disk:
+ del_gendisk(zram->disk);
+ put_disk(zram->disk);
+out_free_queue:
+ blk_cleanup_queue(zram->queue);
+out:
+ return ret;
+}
+
+static void destroy_device(struct zram *zram)
+{
+ sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
+ &zram_disk_attr_group);
+
+ del_gendisk(zram->disk);
+ put_disk(zram->disk);
+
+ blk_cleanup_queue(zram->queue);
+}
+
+static int __init zram_init(void)
+{
+ int ret, dev_id;
+
+ if (num_devices > max_num_devices) {
+ pr_warn("Invalid value for num_devices: %u\n",
+ num_devices);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ zram_major = register_blkdev(0, "zram");
+ if (zram_major <= 0) {
+ pr_warn("Unable to get major number\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* Allocate the device array and initialize each one */
+ zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
+ if (!zram_devices) {
+ ret = -ENOMEM;
+ goto unregister;
+ }
+
+ for (dev_id = 0; dev_id < num_devices; dev_id++) {
+ ret = create_device(&zram_devices[dev_id], dev_id);
+ if (ret)
+ goto free_devices;
+ }
+
+ pr_info("Created %u device(s) ...\n", num_devices);
+
+ return 0;
+
+free_devices:
+ while (dev_id)
+ destroy_device(&zram_devices[--dev_id]);
+ kfree(zram_devices);
+unregister:
+ unregister_blkdev(zram_major, "zram");
+out:
+ return ret;
+}
+
+static void __exit zram_exit(void)
+{
+ int i;
+ struct zram *zram;
+
+ for (i = 0; i < num_devices; i++) {
+ zram = &zram_devices[i];
+
+ destroy_device(zram);
+ /*
+ * Shouldn't access zram->disk after destroy_device
+ * because destroy_device already released zram->disk.
+ */
+ zram_reset_device(zram);
+ }
+
+ unregister_blkdev(zram_major, "zram");
+
+ kfree(zram_devices);
+ pr_debug("Cleanup done!\n");
+}
+
+module_init(zram_init);
+module_exit(zram_exit);
+
+module_param(num_devices, uint, 0);
+MODULE_PARM_DESC(num_devices, "Number of zram devices");
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
+MODULE_DESCRIPTION("Compressed RAM Block Device");
diff --git a/drivers/staging/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index d542eee8135..b05a816b09a 100644
--- a/drivers/staging/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -2,6 +2,7 @@
* Compressed RAM block device
*
* Copyright (C) 2008, 2009, 2010 Nitin Gupta
+ * 2012, 2013 Minchan Kim
*
* This code is released using a dual license strategy: BSD/GPL
* You can choose the licence that better fits your requirements.
@@ -9,16 +10,15 @@
* Released under the terms of 3-clause BSD License
* Released under the terms of GNU General Public License Version 2.0
*
- * Project home: http://compcache.googlecode.com
*/
#ifndef _ZRAM_DRV_H_
#define _ZRAM_DRV_H_
#include <linux/spinlock.h>
-#include <linux/mutex.h>
+#include <linux/zsmalloc.h>
-#include "../zsmalloc/zsmalloc.h"
+#include "zcomp.h"
/*
* Some arbitrary value. This is just to catch
@@ -43,7 +43,6 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
/*-- End of configurable params */
#define SECTOR_SHIFT 9
-#define SECTOR_SIZE (1 << SECTOR_SHIFT)
#define SECTORS_PER_PAGE_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
#define SECTORS_PER_PAGE (1 << SECTORS_PER_PAGE_SHIFT)
#define ZRAM_LOGICAL_BLOCK_SHIFT 12
@@ -51,10 +50,24 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
#define ZRAM_SECTOR_PER_LOGICAL_BLOCK \
(1 << (ZRAM_LOGICAL_BLOCK_SHIFT - SECTOR_SHIFT))
-/* Flags for zram pages (table[page_no].flags) */
+
+/*
+ * The lower ZRAM_FLAG_SHIFT bits of table.value is for
+ * object size (excluding header), the higher bits is for
+ * zram_pageflags.
+ *
+ * zram is mainly used for memory efficiency so we want to keep memory
+ * footprint small so we can squeeze size and flags into a field.
+ * The lower ZRAM_FLAG_SHIFT bits is for object size (excluding header),
+ * the higher bits is for zram_pageflags.
+ */
+#define ZRAM_FLAG_SHIFT 24
+
+/* Flags for zram pages (table[page_no].value) */
enum zram_pageflags {
/* Page consists entirely of zeros */
- ZRAM_ZERO,
+ ZRAM_ZERO = ZRAM_FLAG_SHIFT,
+ ZRAM_ACCESS, /* page is now accessed */
__NR_ZRAM_PAGEFLAGS,
};
@@ -62,43 +75,35 @@ enum zram_pageflags {
/*-- Data structures */
/* Allocated for each disk page */
-struct table {
+struct zram_table_entry {
unsigned long handle;
- u16 size; /* object size (excluding header) */
- u8 count; /* object ref count (not yet used) */
- u8 flags;
-} __aligned(4);
+ unsigned long value;
+};
struct zram_stats {
- u64 compr_size; /* compressed size of pages stored */
- u64 num_reads; /* failed + successful */
- u64 num_writes; /* --do-- */
- u64 failed_reads; /* should NEVER! happen */
- u64 failed_writes; /* can happen when memory is too low */
- u64 invalid_io; /* non-page-aligned I/O requests */
- u64 notify_free; /* no. of swap slot free notifications */
- u32 pages_zero; /* no. of zero filled pages */
- u32 pages_stored; /* no. of pages currently stored */
- u32 good_compress; /* % of pages with compression ratio<=50% */
- u32 bad_compress; /* % of pages with compression ratio>=75% */
+ atomic64_t compr_data_size; /* compressed size of pages stored */
+ atomic64_t num_reads; /* failed + successful */
+ atomic64_t num_writes; /* --do-- */
+ atomic64_t failed_reads; /* can happen when memory is too low */
+ atomic64_t failed_writes; /* can happen when memory is too low */
+ atomic64_t invalid_io; /* non-page-aligned I/O requests */
+ atomic64_t notify_free; /* no. of swap slot free notifications */
+ atomic64_t zero_pages; /* no. of zero filled pages */
+ atomic64_t pages_stored; /* no. of pages currently stored */
+ atomic_long_t max_used_pages; /* no. of maximum pages stored */
};
struct zram_meta {
- void *compress_workmem;
- void *compress_buffer;
- struct table *table;
+ struct zram_table_entry *table;
struct zs_pool *mem_pool;
};
struct zram {
struct zram_meta *meta;
- spinlock_t stat64_lock; /* protect 64-bit stats */
- struct rw_semaphore lock; /* protect compression buffers, table,
- * 32bit stat counters against concurrent
- * notifications, reads and writes */
struct request_queue *queue;
struct gendisk *disk;
- int init_done;
+ struct zcomp *comp;
+
/* Prevent concurrent execution of device init, reset and R/W request */
struct rw_semaphore init_lock;
/*
@@ -106,19 +111,13 @@ struct zram {
* we can store in a disk.
*/
u64 disksize; /* bytes */
-
+ int max_comp_streams;
struct zram_stats stats;
-};
-
-extern struct zram *zram_devices;
-unsigned int zram_get_num_devices(void);
-#ifdef CONFIG_SYSFS
-extern struct attribute_group zram_disk_attr_group;
-#endif
-
-extern void zram_reset_device(struct zram *zram);
-extern struct zram_meta *zram_meta_alloc(u64 disksize);
-extern void zram_meta_free(struct zram_meta *meta);
-extern void zram_init_device(struct zram *zram, struct zram_meta *meta);
+ /*
+ * the number of pages zram can consume for storing compressed data
+ */
+ unsigned long limit_pages;
+ char compressor[10];
+};
#endif
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index dad8891ecbf..9c2c4eca52e 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -77,6 +77,8 @@ static struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x0CF3, 0x3004) },
{ USB_DEVICE(0x0CF3, 0x3008) },
{ USB_DEVICE(0x0CF3, 0x311D) },
+ { USB_DEVICE(0x0CF3, 0x311E) },
+ { USB_DEVICE(0x0CF3, 0x311F) },
{ USB_DEVICE(0x0CF3, 0x817a) },
{ USB_DEVICE(0x13d3, 0x3375) },
{ USB_DEVICE(0x04CA, 0x3004) },
@@ -120,6 +122,8 @@ static struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x311E), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x311F), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 61a8ec4e5f4..92b98531777 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -49,6 +49,7 @@ static struct usb_driver btusb_driver;
#define BTUSB_WRONG_SCO_MTU 0x40
#define BTUSB_ATH3012 0x80
#define BTUSB_INTEL 0x100
+#define BTUSB_INTEL_BOOT 0x200
static struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
@@ -113,6 +114,13 @@ static struct usb_device_id btusb_table[] = {
/*Broadcom devices with vendor specific id */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) },
+ /* IMC Networks - Broadcom based */
+ { USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01) },
+
+ /* Intel Bluetooth USB Bootloader (RAM module) */
+ { USB_DEVICE(0x8087, 0x0a5a),
+ .driver_info = BTUSB_INTEL_BOOT | BTUSB_BROKEN_ISOC },
+
{ } /* Terminating entry */
};
@@ -141,6 +149,8 @@ static struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x311f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
@@ -1444,6 +1454,9 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_INTEL)
hdev->setup = btusb_setup_intel;
+ if (id->driver_info & BTUSB_INTEL_BOOT)
+ set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
+
/* Interface numbers are hardcoded in the specification */
data->isoc = usb_ifnum_to_if(data->udev, 1);
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index ec3fc4fd916..b94a37630e3 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -487,6 +487,7 @@ static int omap_dma_terminate_all(struct omap_chan *c)
* c->desc is NULL and exit.)
*/
if (c->desc) {
+ omap_dma_desc_free(&c->desc->vd);
c->desc = NULL;
/* Avoid stopping the dma twice */
if (!c->paused)
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index e04462b6075..f505e4ca6d5 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -270,8 +270,9 @@ static const u32 correrrthrsld[] = {
* sbridge structs
*/
-#define NUM_CHANNELS 4
-#define MAX_DIMMS 3 /* Max DIMMS per channel */
+#define NUM_CHANNELS 4
+#define MAX_DIMMS 3 /* Max DIMMS per channel */
+#define CHANNEL_UNSPECIFIED 0xf /* Intel IA32 SDM 15-14 */
struct sbridge_info {
u32 mcmtr;
@@ -1451,6 +1452,9 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
/* FIXME: need support for channel mask */
+ if (channel == CHANNEL_UNSPECIFIED)
+ channel = -1;
+
/* Call the helper to output message */
edac_mc_handle_error(tp_event, mci, core_err_cnt,
m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 7695b5dd9d2..35287ab445c 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -909,6 +909,7 @@
#define GMBUS_CYCLE_INDEX (2<<25)
#define GMBUS_CYCLE_STOP (4<<25)
#define GMBUS_BYTE_COUNT_SHIFT 16
+#define GMBUS_BYTE_COUNT_MAX 256U
#define GMBUS_SLAVE_INDEX_SHIFT 8
#define GMBUS_SLAVE_ADDR_SHIFT 1
#define GMBUS_SLAVE_READ (1<<0)
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 639fe192997..4a21e13cc58 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -276,18 +276,17 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
}
static int
-gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
- u32 gmbus1_index)
+gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
+ unsigned short addr, u8 *buf, unsigned int len,
+ u32 gmbus1_index)
{
int reg_offset = dev_priv->gpio_mmio_base;
- u16 len = msg->len;
- u8 *buf = msg->buf;
I915_WRITE(GMBUS1 + reg_offset,
gmbus1_index |
GMBUS_CYCLE_WAIT |
(len << GMBUS_BYTE_COUNT_SHIFT) |
- (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ (addr << GMBUS_SLAVE_ADDR_SHIFT) |
GMBUS_SLAVE_READ | GMBUS_SW_RDY);
while (len) {
int ret;
@@ -309,11 +308,35 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
}
static int
-gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
+gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
+ u32 gmbus1_index)
{
- int reg_offset = dev_priv->gpio_mmio_base;
- u16 len = msg->len;
u8 *buf = msg->buf;
+ unsigned int rx_size = msg->len;
+ unsigned int len;
+ int ret;
+
+ do {
+ len = min(rx_size, GMBUS_BYTE_COUNT_MAX);
+
+ ret = gmbus_xfer_read_chunk(dev_priv, msg->addr,
+ buf, len, gmbus1_index);
+ if (ret)
+ return ret;
+
+ rx_size -= len;
+ buf += len;
+ } while (rx_size != 0);
+
+ return 0;
+}
+
+static int
+gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
+ unsigned short addr, u8 *buf, unsigned int len)
+{
+ int reg_offset = dev_priv->gpio_mmio_base;
+ unsigned int chunk_size = len;
u32 val, loop;
val = loop = 0;
@@ -325,8 +348,8 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
I915_WRITE(GMBUS3 + reg_offset, val);
I915_WRITE(GMBUS1 + reg_offset,
GMBUS_CYCLE_WAIT |
- (msg->len << GMBUS_BYTE_COUNT_SHIFT) |
- (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ (chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
+ (addr << GMBUS_SLAVE_ADDR_SHIFT) |
GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
while (len) {
int ret;
@@ -343,6 +366,29 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
if (ret)
return ret;
}
+
+ return 0;
+}
+
+static int
+gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
+{
+ u8 *buf = msg->buf;
+ unsigned int tx_size = msg->len;
+ unsigned int len;
+ int ret;
+
+ do {
+ len = min(tx_size, GMBUS_BYTE_COUNT_MAX);
+
+ ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len);
+ if (ret)
+ return ret;
+
+ buf += len;
+ tx_size -= len;
+ } while (tx_size != 0);
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 971dd8795b6..8ac33309499 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -312,8 +312,10 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
misc |= ATOM_COMPOSITESYNC;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
misc |= ATOM_INTERLACE;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
misc |= ATOM_DOUBLE_CLOCK_MODE;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2;
args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
args.ucCRTC = radeon_crtc->crtc_id;
@@ -356,8 +358,10 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
misc |= ATOM_COMPOSITESYNC;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
misc |= ATOM_INTERLACE;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
misc |= ATOM_DOUBLE_CLOCK_MODE;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2;
args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
args.ucCRTC = radeon_crtc->crtc_id;
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index b131520521e..72b02483ff0 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -76,7 +76,7 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev)
static bool radeon_read_bios(struct radeon_device *rdev)
{
- uint8_t __iomem *bios;
+ uint8_t __iomem *bios, val1, val2;
size_t size;
rdev->bios = NULL;
@@ -86,15 +86,19 @@ static bool radeon_read_bios(struct radeon_device *rdev)
return false;
}
- if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
+ val1 = readb(&bios[0]);
+ val2 = readb(&bios[1]);
+
+ if (size == 0 || val1 != 0x55 || val2 != 0xaa) {
pci_unmap_rom(rdev->pdev, bios);
return false;
}
- rdev->bios = kmemdup(bios, size, GFP_KERNEL);
+ rdev->bios = kzalloc(size, GFP_KERNEL);
if (rdev->bios == NULL) {
pci_unmap_rom(rdev->pdev, bios);
return false;
}
+ memcpy_fromio(rdev->bios, bios, size);
pci_unmap_rom(rdev->pdev, bios);
return true;
}
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 92f34de7aee..05e6a7d13d4 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -169,7 +169,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
GFP_KERNEL);
if (!open_info) {
err = -ENOMEM;
- goto error0;
+ goto error_gpadl;
}
init_completion(&open_info->waitevent);
@@ -185,7 +185,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
if (userdatalen > MAX_USER_DEFINED_BYTES) {
err = -EINVAL;
- goto error0;
+ goto error_gpadl;
}
if (userdatalen)
@@ -226,6 +226,9 @@ error1:
list_del(&open_info->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+error_gpadl:
+ vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
+
error0:
free_pages((unsigned long)out,
get_order(send_ringbuffer_size + recv_ringbuffer_size));
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 780599ce465..c12299fd8cc 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -213,6 +213,7 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap)
adap->bus_recovery_info->set_scl(adap, 1);
return i2c_generic_recovery(adap);
}
+EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery);
int i2c_generic_gpio_recovery(struct i2c_adapter *adap)
{
@@ -227,6 +228,7 @@ int i2c_generic_gpio_recovery(struct i2c_adapter *adap)
return ret;
}
+EXPORT_SYMBOL_GPL(i2c_generic_gpio_recovery);
int i2c_recover_bus(struct i2c_adapter *adap)
{
@@ -236,6 +238,7 @@ int i2c_recover_bus(struct i2c_adapter *adap)
dev_dbg(&adap->dev, "Trying i2c bus recovery\n");
return adap->bus_recovery_info->recover_bus(adap);
}
+EXPORT_SYMBOL_GPL(i2c_recover_bus);
static int i2c_device_probe(struct device *dev)
{
diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c
index e0017c22bb9..f53e9a803a0 100644
--- a/drivers/iio/imu/adis_trigger.c
+++ b/drivers/iio/imu/adis_trigger.c
@@ -60,7 +60,7 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
iio_trigger_set_drvdata(adis->trig, adis);
ret = iio_trigger_register(adis->trig);
- indio_dev->trig = adis->trig;
+ indio_dev->trig = iio_trigger_get(adis->trig);
if (ret)
goto error_free_irq;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
index 7da0832f187..01d661e0fa6 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -25,6 +25,16 @@
#include <linux/poll.h>
#include "inv_mpu_iio.h"
+static void inv_clear_kfifo(struct inv_mpu6050_state *st)
+{
+ unsigned long flags;
+
+ /* take the spin lock sem to avoid interrupt kick in */
+ spin_lock_irqsave(&st->time_stamp_lock, flags);
+ kfifo_reset(&st->timestamps);
+ spin_unlock_irqrestore(&st->time_stamp_lock, flags);
+}
+
int inv_reset_fifo(struct iio_dev *indio_dev)
{
int result;
@@ -51,6 +61,10 @@ int inv_reset_fifo(struct iio_dev *indio_dev)
INV_MPU6050_BIT_FIFO_RST);
if (result)
goto reset_fifo_fail;
+
+ /* clear timestamps fifo */
+ inv_clear_kfifo(st);
+
/* enable interrupt */
if (st->chip_config.accl_fifo_enable ||
st->chip_config.gyro_fifo_enable) {
@@ -84,16 +98,6 @@ reset_fifo_fail:
return result;
}
-static void inv_clear_kfifo(struct inv_mpu6050_state *st)
-{
- unsigned long flags;
-
- /* take the spin lock sem to avoid interrupt kick in */
- spin_lock_irqsave(&st->time_stamp_lock, flags);
- kfifo_reset(&st->timestamps);
- spin_unlock_irqrestore(&st->time_stamp_lock, flags);
-}
-
/**
* inv_mpu6050_irq_handler() - Cache a timestamp at each data ready interrupt.
*/
@@ -187,7 +191,6 @@ end_session:
flush_fifo:
/* Flush HW and SW FIFOs. */
inv_reset_fifo(indio_dev);
- inv_clear_kfifo(st);
mutex_unlock(&indio_dev->mlock);
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index a8411232207..c1fef27010d 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -94,6 +94,17 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
if (dmasync)
dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
+ if (!size)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * If the combination of the addr and size requested for this memory
+ * region causes an integer overflow, return error.
+ */
+ if (((addr + size) < addr) ||
+ PAGE_ALIGN(addr + size) < (addr + size))
+ return ERR_PTR(-EINVAL);
+
if (!can_do_mlock())
return ERR_PTR(-EPERM);
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 2c6f0f2ecd9..949b3863349 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -460,6 +460,7 @@ static void ib_uverbs_async_handler(struct ib_uverbs_file *file,
entry->desc.async.element = element;
entry->desc.async.event_type = event;
+ entry->desc.async.reserved = 0;
entry->counter = counter;
list_add_tail(&entry->list, &file->async_file->event_list);
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 4d599cedbb0..6ee53487453 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -64,6 +64,14 @@ enum {
#define GUID_TBL_BLK_NUM_ENTRIES 8
#define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
+/* Counters should be saturate once they reach their maximum value */
+#define ASSIGN_32BIT_COUNTER(counter, value) do {\
+ if ((value) > U32_MAX) \
+ counter = cpu_to_be32(U32_MAX); \
+ else \
+ counter = cpu_to_be32(value); \
+} while (0)
+
struct mlx4_mad_rcv_buf {
struct ib_grh grh;
u8 payload[256];
@@ -730,10 +738,14 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
static void edit_counter(struct mlx4_counter *cnt,
struct ib_pma_portcounters *pma_cnt)
{
- pma_cnt->port_xmit_data = cpu_to_be32((be64_to_cpu(cnt->tx_bytes)>>2));
- pma_cnt->port_rcv_data = cpu_to_be32((be64_to_cpu(cnt->rx_bytes)>>2));
- pma_cnt->port_xmit_packets = cpu_to_be32(be64_to_cpu(cnt->tx_frames));
- pma_cnt->port_rcv_packets = cpu_to_be32(be64_to_cpu(cnt->rx_frames));
+ ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
+ (be64_to_cpu(cnt->tx_bytes) >> 2));
+ ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
+ (be64_to_cpu(cnt->rx_bytes) >> 2));
+ ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
+ be64_to_cpu(cnt->tx_frames));
+ ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
+ be64_to_cpu(cnt->rx_frames));
}
static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 4f10af2905b..262a18437ce 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -2174,8 +2174,7 @@ static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr,
memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen);
- *lso_hdr_sz = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 |
- wr->wr.ud.hlen);
+ *lso_hdr_sz = cpu_to_be32(wr->wr.ud.mss << 16 | wr->wr.ud.hlen);
*lso_seg_len = halign;
return 0;
}
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 85e75239c81..1af7df26336 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -784,6 +784,21 @@ static psmouse_ret_t elantech_process_byte(struct psmouse *psmouse)
}
/*
+ * This writes the reg_07 value again to the hardware at the end of every
+ * set_rate call because the register loses its value. reg_07 allows setting
+ * absolute mode on v4 hardware
+ */
+static void elantech_set_rate_restore_reg_07(struct psmouse *psmouse,
+ unsigned int rate)
+{
+ struct elantech_data *etd = psmouse->private;
+
+ etd->original_set_rate(psmouse, rate);
+ if (elantech_write_reg(psmouse, 0x07, etd->reg_07))
+ psmouse_err(psmouse, "restoring reg_07 failed\n");
+}
+
+/*
* Put the touchpad into absolute mode
*/
static int elantech_set_absolute_mode(struct psmouse *psmouse)
@@ -985,6 +1000,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
* Asus K53SV 0x450f01 78, 15, 0c 2 hw buttons
* Asus G46VW 0x460f02 00, 18, 0c 2 hw buttons
* Asus G750JX 0x360f00 00, 16, 0c 2 hw buttons
+ * Asus TP500LN 0x381f17 10, 14, 0e clickpad
+ * Asus X750JN 0x381f17 10, 14, 0e clickpad
* Asus UX31 0x361f00 20, 15, 0e clickpad
* Asus UX32VD 0x361f02 00, 15, 0e clickpad
* Avatar AVIU-145A2 0x361f00 ? clickpad
@@ -1452,6 +1469,11 @@ int elantech_init(struct psmouse *psmouse)
goto init_fail;
}
+ if (etd->fw_version == 0x381f17) {
+ etd->original_set_rate = psmouse->set_rate;
+ psmouse->set_rate = elantech_set_rate_restore_reg_07;
+ }
+
if (elantech_set_input_params(psmouse)) {
psmouse_err(psmouse, "failed to query touchpad range.\n");
goto init_fail;
diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
index c1c15ab6872..13a12ccbff5 100644
--- a/drivers/input/mouse/elantech.h
+++ b/drivers/input/mouse/elantech.h
@@ -138,6 +138,7 @@ struct elantech_data {
struct finger_pos mt[ETP_MAX_FINGERS];
unsigned char parity[256];
int (*send_cmd)(struct psmouse *psmouse, unsigned char c, unsigned char *param);
+ void (*original_set_rate)(struct psmouse *psmouse, unsigned int rate);
};
#ifdef CONFIG_MOUSE_PS2_ELANTECH
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
index f804c1faa7f..d3b54f7b849 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
@@ -29,7 +29,7 @@
/* Offset base used to differentiate between CAPTURE and OUTPUT
* while mmaping */
-#define DST_QUEUE_OFF_BASE (TASK_SIZE / 2)
+#define DST_QUEUE_OFF_BASE (1 << 30)
#define MFC_BANK1_ALLOC_CTX 0
#define MFC_BANK2_ALLOC_CTX 1
diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c
index a59153d2f8b..518a5299ff0 100644
--- a/drivers/media/usb/stk1160/stk1160-v4l.c
+++ b/drivers/media/usb/stk1160/stk1160-v4l.c
@@ -245,6 +245,11 @@ static int stk1160_stop_streaming(struct stk1160 *dev)
if (mutex_lock_interruptible(&dev->v4l_lock))
return -ERESTARTSYS;
+ /*
+ * Once URBs are cancelled, the URB complete handler
+ * won't be running. This is required to safely release the
+ * current buffer (dev->isoc_ctl.buf).
+ */
stk1160_cancel_isoc(dev);
/*
@@ -665,8 +670,16 @@ void stk1160_clear_queue(struct stk1160 *dev)
stk1160_info("buffer [%p/%d] aborted\n",
buf, buf->vb.v4l2_buf.index);
}
- /* It's important to clear current buffer */
- dev->isoc_ctl.buf = NULL;
+
+ /* It's important to release the current buffer */
+ if (dev->isoc_ctl.buf) {
+ buf = dev->isoc_ctl.buf;
+ dev->isoc_ctl.buf = NULL;
+
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ stk1160_info("buffer [%p/%d] aborted\n",
+ buf, buf->vb.v4l2_buf.index);
+ }
spin_unlock_irqrestore(&dev->buf_lock, flags);
}
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index f4176ca3a79..cdd61ab5c2b 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -758,7 +758,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
if (error || (card->current_mrq.tpc == MSPRO_CMD_STOP)) {
if (msb->data_dir == READ) {
- for (cnt = 0; cnt < msb->current_seg; cnt++)
+ for (cnt = 0; cnt < msb->current_seg; cnt++) {
t_len += msb->req_sg[cnt].length
/ msb->page_size;
@@ -766,6 +766,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
t_len += msb->current_page - 1;
t_len *= msb->page_size;
+ }
}
} else
t_len = blk_rq_bytes(msb->block_req);
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index c071d410488..79d69bd26dd 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -408,7 +408,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
second_is_newer = !second_is_newer;
} else {
dbg_bld("PEB %d CRC is OK", pnum);
- bitflips = !!err;
+ bitflips |= !!err;
}
mutex_unlock(&ubi->buf_mutex);
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 4f02848bb2b..fc764e7976b 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -475,7 +475,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
/* Validate the request */
err = -EINVAL;
if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
- req.bytes < 0 || req.lnum >= vol->usable_leb_size)
+ req.bytes < 0 || req.bytes > vol->usable_leb_size)
break;
err = get_exclusive(desc);
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 0e11671dadc..930cf2c77ab 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -1362,7 +1362,8 @@ int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
* during re-size.
*/
ubi_move_aeb_to_list(av, aeb, &ai->erase);
- vol->eba_tbl[aeb->lnum] = aeb->pnum;
+ else
+ vol->eba_tbl[aeb->lnum] = aeb->pnum;
}
}
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 49e570abe58..c08254016fe 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -999,7 +999,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
int cancel)
{
int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
- int vol_id = -1, uninitialized_var(lnum);
+ int vol_id = -1, lnum = -1;
#ifdef CONFIG_MTD_UBI_FASTMAP
int anchor = wrk->anchor;
#endif
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 5d204492c60..161dcba13c4 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -2869,7 +2869,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
sw_cons = BNX2_NEXT_TX_BD(sw_cons);
tx_bytes += skb->len;
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
tx_pkt++;
if (tx_pkt == budget)
break;
@@ -6610,7 +6610,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
if (dma_mapping_error(&bp->pdev->dev, mapping)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -6703,7 +6703,7 @@ dma_error:
PCI_DMA_TODEVICE);
}
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 8c1eab1151b..680d26d6d2c 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6437,7 +6437,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
pkts_compl++;
bytes_compl += skb->len;
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
if (unlikely(tx_bug)) {
tg3_tx_recover(tp);
@@ -6769,7 +6769,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
if (len > (tp->dev->mtu + ETH_HLEN) &&
skb->protocol != htons(ETH_P_8021Q) &&
skb->protocol != htons(ETH_P_8021AD)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
goto drop_it_no_recycle;
}
@@ -7652,7 +7652,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
PCI_DMA_TODEVICE);
/* Make sure the mapping succeeded */
if (pci_dma_mapping_error(tp->pdev, new_addr)) {
- dev_kfree_skb(new_skb);
+ dev_kfree_skb_any(new_skb);
ret = -1;
} else {
u32 save_entry = *entry;
@@ -7667,13 +7667,13 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
new_skb->len, base_flags,
mss, vlan)) {
tg3_tx_skb_unmap(tnapi, save_entry, -1);
- dev_kfree_skb(new_skb);
+ dev_kfree_skb_any(new_skb);
ret = -1;
}
}
}
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
*pskb = new_skb;
return ret;
}
@@ -7716,7 +7716,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
} while (segs);
tg3_tso_bug_end:
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -7954,7 +7954,7 @@ dma_error:
tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
drop:
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
drop_nofree:
tp->tx_dropped++;
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index d81a7dbfeef..88e85cb8834 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1767,7 +1767,7 @@ static u16 be_tx_compl_process(struct be_adapter *adapter,
queue_tail_inc(txq);
} while (cur_index != last_index);
- kfree_skb(sent_skb);
+ dev_kfree_skb_any(sent_skb);
return num_wrbs;
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 59ad007dd5a..a978fc82ceb 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -144,6 +144,11 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
int *work_done, int work_to_do);
+static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int cleaned_count)
+{
+}
static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
int cleaned_count);
@@ -3555,8 +3560,11 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
msleep(1);
/* e1000_down has a dependency on max_frame_size */
hw->max_frame_size = max_frame;
- if (netif_running(netdev))
+ if (netif_running(netdev)) {
+ /* prevent buffers from being reallocated */
+ adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
e1000_down(adapter);
+ }
/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
* means we reserve 2 more, this pushes us to allocate from the next
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index fce3e92f9d1..c5a9dcc01ca 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -1527,12 +1527,12 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
int tso;
if (test_bit(__IXGB_DOWN, &adapter->flags)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
if (skb->len <= 0) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -1549,7 +1549,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
tso = ixgb_tso(adapter, skb);
if (tso < 0) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 064425d3178..437d4cfd42c 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -899,7 +899,7 @@ out_unlock:
return NETDEV_TX_OK;
out_dma_error:
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
cp->dev->stats.tx_dropped++;
goto out_unlock;
}
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 3ccedeb8aba..942673fcb39 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -1715,9 +1715,9 @@ static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb,
if (len < ETH_ZLEN)
memset(tp->tx_buf[entry], 0, ETH_ZLEN);
skb_copy_and_csum_dev(skb, tp->tx_buf[entry]);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
} else {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index e9b5d77a90d..2183c618914 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -5768,7 +5768,7 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
tp->TxDescArray + entry);
if (skb) {
tp->dev->stats.tx_dropped++;
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
tx_skb->skb = NULL;
}
}
@@ -5993,7 +5993,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
err_dma_1:
rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
err_dma_0:
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
err_update_stats:
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
@@ -6076,7 +6076,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
tp->tx_stats.packets++;
tp->tx_stats.bytes += tx_skb->skb->len;
u64_stats_update_end(&tp->tx_stats.syncp);
- dev_kfree_skb(tx_skb->skb);
+ dev_kfree_skb_any(tx_skb->skb);
tx_skb->skb = NULL;
}
dirty_tx++;
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index 71ea77576d2..e783ea0e383 100644
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -670,7 +670,6 @@ struct iwl_priv {
unsigned long reload_jiffies;
int reload_count;
bool ucode_loaded;
- bool init_ucode_run; /* Don't run init uCode again */
u8 plcp_delta_threshold;
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index 0a1cdc5e856..5ad94a8080b 100644
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -425,9 +425,6 @@ int iwl_run_init_ucode(struct iwl_priv *priv)
if (!priv->fw->img[IWL_UCODE_INIT].sec[0].len)
return 0;
- if (priv->init_ucode_run)
- return 0;
-
iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
calib_complete, ARRAY_SIZE(calib_complete),
iwlagn_wait_calib, priv);
@@ -447,8 +444,6 @@ int iwl_run_init_ucode(struct iwl_priv *priv)
*/
ret = iwl_wait_notification(&priv->notif_wait, &calib_wait,
UCODE_CALIB_TIMEOUT);
- if (!ret)
- priv->init_ucode_run = true;
goto out;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index e7a2af3ad05..7555095e0b7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -313,6 +313,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
{RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
{RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
+ {RTL_USB_DEVICE(0x0b05, 0x17ba, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
{RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
{RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
{RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
@@ -369,6 +370,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
{RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
{RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
+ {RTL_USB_DEVICE(0x2001, 0x330d, rtl92cu_hal_cfg)}, /*D-Link DWA-131 */
{RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
{RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/
{RTL_USB_DEVICE(0x2357, 0x0100, rtl92cu_hal_cfg)}, /*TP-Link WN8200ND*/
diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c
index 7f1669cdea0..779dc2b2ca7 100644
--- a/drivers/net/wireless/ti/wl18xx/debugfs.c
+++ b/drivers/net/wireless/ti/wl18xx/debugfs.c
@@ -136,7 +136,7 @@ WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, protection_filter, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, accum_arp_pend_requests, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, max_arp_queue_dep, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(rx_rate, rx_frames_per_rates, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(rx_rate, rx_frames_per_rates, 50);
WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_vs_rate,
AGGR_STATS_TX_AGG*AGGR_STATS_TX_RATE);
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
index f7381dd6900..1bce4325e86 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.h
+++ b/drivers/net/wireless/ti/wlcore/debugfs.h
@@ -26,8 +26,8 @@
#include "wlcore.h"
-int wl1271_format_buffer(char __user *userbuf, size_t count,
- loff_t *ppos, char *fmt, ...);
+__printf(4, 5) int wl1271_format_buffer(char __user *userbuf, size_t count,
+ loff_t *ppos, char *fmt, ...);
int wl1271_debugfs_init(struct wl1271 *wl);
void wl1271_debugfs_exit(struct wl1271 *wl);
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
index a50576081b3..46d2de24bf3 100644
--- a/drivers/parport/Kconfig
+++ b/drivers/parport/Kconfig
@@ -36,7 +36,9 @@ if PARPORT
config PARPORT_PC
tristate "PC-style hardware"
depends on (!SPARC64 || PCI) && !SPARC32 && !M32R && !FRV && !S390 && \
- (!M68K || ISA) && !MN10300 && !AVR32 && !BLACKFIN && !XTENSA
+ (!M68K || ISA) && !MN10300 && !AVR32 && !BLACKFIN && \
+ !XTENSA && !CRIS && !H8300 && !ARM64
+
---help---
You should say Y here if you have a PC-style parallel port. All
IBM PC compatible computers and some Alphas have PC-style
diff --git a/drivers/power/lp8788-charger.c b/drivers/power/lp8788-charger.c
index ed49b50b220..72da2a6c22d 100644
--- a/drivers/power/lp8788-charger.c
+++ b/drivers/power/lp8788-charger.c
@@ -417,8 +417,10 @@ static int lp8788_psy_register(struct platform_device *pdev,
pchg->battery.num_properties = ARRAY_SIZE(lp8788_battery_prop);
pchg->battery.get_property = lp8788_battery_get_property;
- if (power_supply_register(&pdev->dev, &pchg->battery))
+ if (power_supply_register(&pdev->dev, &pchg->battery)) {
+ power_supply_unregister(&pchg->charger);
return -EPERM;
+ }
return 0;
}
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 1ad39c799c7..bfe812fcce3 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -5080,9 +5080,9 @@ free_port:
hba_free:
if (phba->msix_enabled)
pci_disable_msix(phba->pcidev);
- iscsi_host_remove(phba->shost);
pci_dev_put(phba->pcidev);
iscsi_host_free(phba->shost);
+ pci_set_drvdata(pcidev, NULL);
disable_pci:
pci_disable_device(pcidev);
return ret;
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index c9e244984e3..fa50c7dc3d3 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -441,14 +441,11 @@ static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
static int mvs_task_prep_ata(struct mvs_info *mvi,
struct mvs_task_exec_info *tei)
{
- struct sas_ha_struct *sha = mvi->sas;
struct sas_task *task = tei->task;
struct domain_device *dev = task->dev;
struct mvs_device *mvi_dev = dev->lldd_dev;
struct mvs_cmd_hdr *hdr = tei->hdr;
struct asd_sas_port *sas_port = dev->port;
- struct sas_phy *sphy = dev->phy;
- struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number];
struct mvs_slot_info *slot;
void *buf_prd;
u32 tag = tei->tag, hdr_tag;
@@ -468,7 +465,7 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
slot->tx = mvi->tx_prod;
del_q = TXQ_MODE_I | tag |
(TXQ_CMD_STP << TXQ_CMD_SHIFT) |
- (MVS_PHY_ID << TXQ_PHY_SHIFT) |
+ ((sas_port->phy_mask & TXQ_PHY_MASK) << TXQ_PHY_SHIFT) |
(mvi_dev->taskfileset << TXQ_SRS_SHIFT);
mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index e5953c8018c..9f3168e8e5a 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1242,9 +1242,11 @@ int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
"rejecting I/O to dead device\n");
ret = BLKPREP_KILL;
break;
- case SDEV_QUIESCE:
case SDEV_BLOCK:
case SDEV_CREATED_BLOCK:
+ ret = BLKPREP_DEFER;
+ break;
+ case SDEV_QUIESCE:
/*
* If the devices is blocked we defer normal commands.
*/
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index a8990783ba6..913b91c78a2 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -631,21 +631,22 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
if (bounce_sgl[j].length == PAGE_SIZE) {
/* full..move to next entry */
sg_kunmap_atomic(bounce_addr);
+ bounce_addr = 0;
j++;
+ }
- /* if we need to use another bounce buffer */
- if (srclen || i != orig_sgl_count - 1)
- bounce_addr = sg_kmap_atomic(bounce_sgl,j);
+ /* if we need to use another bounce buffer */
+ if (srclen && bounce_addr == 0)
+ bounce_addr = sg_kmap_atomic(bounce_sgl, j);
- } else if (srclen == 0 && i == orig_sgl_count - 1) {
- /* unmap the last bounce that is < PAGE_SIZE */
- sg_kunmap_atomic(bounce_addr);
- }
}
sg_kunmap_atomic(src_addr - orig_sgl[i].offset);
}
+ if (bounce_addr)
+ sg_kunmap_atomic(bounce_addr);
+
local_irq_restore(flags);
return total_copied;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 911e9e0711d..a08f923b992 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -243,7 +243,10 @@ static int spidev_message(struct spidev_data *spidev,
k_tmp->len = u_tmp->len;
total += k_tmp->len;
- if (total > bufsiz) {
+ /* Check total length of transfers. Also check each
+ * transfer length to avoid arithmetic overflow.
+ */
+ if (total > bufsiz || k_tmp->len > bufsiz) {
status = -EMSGSIZE;
goto done;
}
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index aefe820a800..25c8bffdd24 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -72,10 +72,6 @@ source "drivers/staging/sep/Kconfig"
source "drivers/staging/iio/Kconfig"
-source "drivers/staging/zsmalloc/Kconfig"
-
-source "drivers/staging/zram/Kconfig"
-
source "drivers/staging/wlags49_h2/Kconfig"
source "drivers/staging/wlags49_h25/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 415772ea306..f9d86a4b48e 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -30,8 +30,6 @@ obj-$(CONFIG_VT6656) += vt6656/
obj-$(CONFIG_VME_BUS) += vme/
obj-$(CONFIG_DX_SEP) += sep/
obj-$(CONFIG_IIO) += iio/
-obj-$(CONFIG_ZRAM) += zram/
-obj-$(CONFIG_ZSMALLOC) += zsmalloc/
obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/
obj-$(CONFIG_WLAGS49_H25) += wlags49_h25/
obj-$(CONFIG_FB_SM7XX) += sm7xxfb/
diff --git a/drivers/staging/zram/Makefile b/drivers/staging/zram/Makefile
deleted file mode 100644
index 7f4a3019e9c..00000000000
--- a/drivers/staging/zram/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-zram-y := zram_drv.o zram_sysfs.o
-
-obj-$(CONFIG_ZRAM) += zram.o
diff --git a/drivers/staging/zram/zram.txt b/drivers/staging/zram/zram.txt
deleted file mode 100644
index 765d790ae83..00000000000
--- a/drivers/staging/zram/zram.txt
+++ /dev/null
@@ -1,77 +0,0 @@
-zram: Compressed RAM based block devices
-----------------------------------------
-
-Project home: http://compcache.googlecode.com/
-
-* Introduction
-
-The zram module creates RAM based block devices named /dev/zram<id>
-(<id> = 0, 1, ...). Pages written to these disks are compressed and stored
-in memory itself. These disks allow very fast I/O and compression provides
-good amounts of memory savings. Some of the usecases include /tmp storage,
-use as swap disks, various caches under /var and maybe many more :)
-
-Statistics for individual zram devices are exported through sysfs nodes at
-/sys/block/zram<id>/
-
-* Usage
-
-Following shows a typical sequence of steps for using zram.
-
-1) Load Module:
- modprobe zram num_devices=4
- This creates 4 devices: /dev/zram{0,1,2,3}
- (num_devices parameter is optional. Default: 1)
-
-2) Set Disksize
- Set disk size by writing the value to sysfs node 'disksize'.
- The value can be either in bytes or you can use mem suffixes.
- Examples:
- # Initialize /dev/zram0 with 50MB disksize
- echo $((50*1024*1024)) > /sys/block/zram0/disksize
-
- # Using mem suffixes
- echo 256K > /sys/block/zram0/disksize
- echo 512M > /sys/block/zram0/disksize
- echo 1G > /sys/block/zram0/disksize
-
-3) Activate:
- mkswap /dev/zram0
- swapon /dev/zram0
-
- mkfs.ext4 /dev/zram1
- mount /dev/zram1 /tmp
-
-4) Stats:
- Per-device statistics are exported as various nodes under
- /sys/block/zram<id>/
- disksize
- num_reads
- num_writes
- invalid_io
- notify_free
- discard
- zero_pages
- orig_data_size
- compr_data_size
- mem_used_total
-
-5) Deactivate:
- swapoff /dev/zram0
- umount /dev/zram1
-
-6) Reset:
- Write any positive value to 'reset' sysfs node
- echo 1 > /sys/block/zram0/reset
- echo 1 > /sys/block/zram1/reset
-
- This frees all the memory allocated for the given device and
- resets the disksize to zero. You must set the disksize again
- before reusing the device.
-
-Please report any problems at:
- - Mailing list: linux-mm-cc at laptop dot org
- - Issue tracker: http://code.google.com/p/compcache/issues/list
-
-Nitin Gupta
-ngupta@vflare.org
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
deleted file mode 100644
index a333d44d0cf..00000000000
--- a/drivers/staging/zram/zram_drv.c
+++ /dev/null
@@ -1,760 +0,0 @@
-/*
- * Compressed RAM block device
- *
- * Copyright (C) 2008, 2009, 2010 Nitin Gupta
- *
- * This code is released using a dual license strategy: BSD/GPL
- * You can choose the licence that better fits your requirements.
- *
- * Released under the terms of 3-clause BSD License
- * Released under the terms of GNU General Public License Version 2.0
- *
- * Project home: http://compcache.googlecode.com
- */
-
-#define KMSG_COMPONENT "zram"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-
-#ifdef CONFIG_ZRAM_DEBUG
-#define DEBUG
-#endif
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/bio.h>
-#include <linux/bitops.h>
-#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
-#include <linux/device.h>
-#include <linux/genhd.h>
-#include <linux/highmem.h>
-#include <linux/slab.h>
-#include <linux/lzo.h>
-#include <linux/string.h>
-#include <linux/vmalloc.h>
-
-#include "zram_drv.h"
-
-/* Globals */
-static int zram_major;
-struct zram *zram_devices;
-
-/* Module params (documentation at end) */
-static unsigned int num_devices = 1;
-
-static void zram_stat64_add(struct zram *zram, u64 *v, u64 inc)
-{
- spin_lock(&zram->stat64_lock);
- *v = *v + inc;
- spin_unlock(&zram->stat64_lock);
-}
-
-static void zram_stat64_sub(struct zram *zram, u64 *v, u64 dec)
-{
- spin_lock(&zram->stat64_lock);
- *v = *v - dec;
- spin_unlock(&zram->stat64_lock);
-}
-
-static void zram_stat64_inc(struct zram *zram, u64 *v)
-{
- zram_stat64_add(zram, v, 1);
-}
-
-static int zram_test_flag(struct zram_meta *meta, u32 index,
- enum zram_pageflags flag)
-{
- return meta->table[index].flags & BIT(flag);
-}
-
-static void zram_set_flag(struct zram_meta *meta, u32 index,
- enum zram_pageflags flag)
-{
- meta->table[index].flags |= BIT(flag);
-}
-
-static void zram_clear_flag(struct zram_meta *meta, u32 index,
- enum zram_pageflags flag)
-{
- meta->table[index].flags &= ~BIT(flag);
-}
-
-static int page_zero_filled(void *ptr)
-{
- unsigned int pos;
- unsigned long *page;
-
- page = (unsigned long *)ptr;
-
- for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
- if (page[pos])
- return 0;
- }
-
- return 1;
-}
-
-static void zram_free_page(struct zram *zram, size_t index)
-{
- struct zram_meta *meta = zram->meta;
- unsigned long handle = meta->table[index].handle;
- u16 size = meta->table[index].size;
-
- if (unlikely(!handle)) {
- /*
- * No memory is allocated for zero filled pages.
- * Simply clear zero page flag.
- */
- if (zram_test_flag(meta, index, ZRAM_ZERO)) {
- zram_clear_flag(meta, index, ZRAM_ZERO);
- zram->stats.pages_zero--;
- }
- return;
- }
-
- if (unlikely(size > max_zpage_size))
- zram->stats.bad_compress--;
-
- zs_free(meta->mem_pool, handle);
-
- if (size <= PAGE_SIZE / 2)
- zram->stats.good_compress--;
-
- zram_stat64_sub(zram, &zram->stats.compr_size,
- meta->table[index].size);
- zram->stats.pages_stored--;
-
- meta->table[index].handle = 0;
- meta->table[index].size = 0;
-}
-
-static void handle_zero_page(struct bio_vec *bvec)
-{
- struct page *page = bvec->bv_page;
- void *user_mem;
-
- user_mem = kmap_atomic(page);
- memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
- kunmap_atomic(user_mem);
-
- flush_dcache_page(page);
-}
-
-static inline int is_partial_io(struct bio_vec *bvec)
-{
- return bvec->bv_len != PAGE_SIZE;
-}
-
-static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
-{
- int ret = LZO_E_OK;
- size_t clen = PAGE_SIZE;
- unsigned char *cmem;
- struct zram_meta *meta = zram->meta;
- unsigned long handle = meta->table[index].handle;
-
- if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
- memset(mem, 0, PAGE_SIZE);
- return 0;
- }
-
- cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
- if (meta->table[index].size == PAGE_SIZE)
- memcpy(mem, cmem, PAGE_SIZE);
- else
- ret = lzo1x_decompress_safe(cmem, meta->table[index].size,
- mem, &clen);
- zs_unmap_object(meta->mem_pool, handle);
-
- /* Should NEVER happen. Return bio error if it does. */
- if (unlikely(ret != LZO_E_OK)) {
- pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
- zram_stat64_inc(zram, &zram->stats.failed_reads);
- return ret;
- }
-
- return 0;
-}
-
-static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
- u32 index, int offset, struct bio *bio)
-{
- int ret;
- struct page *page;
- unsigned char *user_mem, *uncmem = NULL;
- struct zram_meta *meta = zram->meta;
- page = bvec->bv_page;
-
- if (unlikely(!meta->table[index].handle) ||
- zram_test_flag(meta, index, ZRAM_ZERO)) {
- handle_zero_page(bvec);
- return 0;
- }
-
- if (is_partial_io(bvec))
- /* Use a temporary buffer to decompress the page */
- uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
-
- user_mem = kmap_atomic(page);
- if (!is_partial_io(bvec))
- uncmem = user_mem;
-
- if (!uncmem) {
- pr_info("Unable to allocate temp memory\n");
- ret = -ENOMEM;
- goto out_cleanup;
- }
-
- ret = zram_decompress_page(zram, uncmem, index);
- /* Should NEVER happen. Return bio error if it does. */
- if (unlikely(ret != LZO_E_OK))
- goto out_cleanup;
-
- if (is_partial_io(bvec))
- memcpy(user_mem + bvec->bv_offset, uncmem + offset,
- bvec->bv_len);
-
- flush_dcache_page(page);
- ret = 0;
-out_cleanup:
- kunmap_atomic(user_mem);
- if (is_partial_io(bvec))
- kfree(uncmem);
- return ret;
-}
-
-static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
- int offset)
-{
- int ret = 0;
- size_t clen;
- unsigned long handle;
- struct page *page;
- unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
- struct zram_meta *meta = zram->meta;
-
- page = bvec->bv_page;
- src = meta->compress_buffer;
-
- if (is_partial_io(bvec)) {
- /*
- * This is a partial IO. We need to read the full page
- * before to write the changes.
- */
- uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
- if (!uncmem) {
- ret = -ENOMEM;
- goto out;
- }
- ret = zram_decompress_page(zram, uncmem, index);
- if (ret)
- goto out;
- }
-
- /*
- * System overwrites unused sectors. Free memory associated
- * with this sector now.
- */
- if (meta->table[index].handle ||
- zram_test_flag(meta, index, ZRAM_ZERO))
- zram_free_page(zram, index);
-
- user_mem = kmap_atomic(page);
-
- if (is_partial_io(bvec)) {
- memcpy(uncmem + offset, user_mem + bvec->bv_offset,
- bvec->bv_len);
- kunmap_atomic(user_mem);
- user_mem = NULL;
- } else {
- uncmem = user_mem;
- }
-
- if (page_zero_filled(uncmem)) {
- kunmap_atomic(user_mem);
- zram->stats.pages_zero++;
- zram_set_flag(meta, index, ZRAM_ZERO);
- ret = 0;
- goto out;
- }
-
- ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
- meta->compress_workmem);
-
- if (!is_partial_io(bvec)) {
- kunmap_atomic(user_mem);
- user_mem = NULL;
- uncmem = NULL;
- }
-
- if (unlikely(ret != LZO_E_OK)) {
- pr_err("Compression failed! err=%d\n", ret);
- goto out;
- }
-
- if (unlikely(clen > max_zpage_size)) {
- zram->stats.bad_compress++;
- clen = PAGE_SIZE;
- src = NULL;
- if (is_partial_io(bvec))
- src = uncmem;
- }
-
- handle = zs_malloc(meta->mem_pool, clen);
- if (!handle) {
- pr_info("Error allocating memory for compressed "
- "page: %u, size=%zu\n", index, clen);
- ret = -ENOMEM;
- goto out;
- }
- cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
-
- if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
- src = kmap_atomic(page);
- memcpy(cmem, src, clen);
- if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
- kunmap_atomic(src);
-
- zs_unmap_object(meta->mem_pool, handle);
-
- meta->table[index].handle = handle;
- meta->table[index].size = clen;
-
- /* Update stats */
- zram_stat64_add(zram, &zram->stats.compr_size, clen);
- zram->stats.pages_stored++;
- if (clen <= PAGE_SIZE / 2)
- zram->stats.good_compress++;
-
-out:
- if (is_partial_io(bvec))
- kfree(uncmem);
-
- if (ret)
- zram_stat64_inc(zram, &zram->stats.failed_writes);
- return ret;
-}
-
-static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
- int offset, struct bio *bio, int rw)
-{
- int ret;
-
- if (rw == READ) {
- down_read(&zram->lock);
- ret = zram_bvec_read(zram, bvec, index, offset, bio);
- up_read(&zram->lock);
- } else {
- down_write(&zram->lock);
- ret = zram_bvec_write(zram, bvec, index, offset);
- up_write(&zram->lock);
- }
-
- return ret;
-}
-
-static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
-{
- if (*offset + bvec->bv_len >= PAGE_SIZE)
- (*index)++;
- *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
-}
-
-static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
-{
- int i, offset;
- u32 index;
- struct bio_vec *bvec;
-
- switch (rw) {
- case READ:
- zram_stat64_inc(zram, &zram->stats.num_reads);
- break;
- case WRITE:
- zram_stat64_inc(zram, &zram->stats.num_writes);
- break;
- }
-
- index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
- offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
-
- bio_for_each_segment(bvec, bio, i) {
- int max_transfer_size = PAGE_SIZE - offset;
-
- if (bvec->bv_len > max_transfer_size) {
- /*
- * zram_bvec_rw() can only make operation on a single
- * zram page. Split the bio vector.
- */
- struct bio_vec bv;
-
- bv.bv_page = bvec->bv_page;
- bv.bv_len = max_transfer_size;
- bv.bv_offset = bvec->bv_offset;
-
- if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
- goto out;
-
- bv.bv_len = bvec->bv_len - max_transfer_size;
- bv.bv_offset += max_transfer_size;
- if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
- goto out;
- } else
- if (zram_bvec_rw(zram, bvec, index, offset, bio, rw)
- < 0)
- goto out;
-
- update_position(&index, &offset, bvec);
- }
-
- set_bit(BIO_UPTODATE, &bio->bi_flags);
- bio_endio(bio, 0);
- return;
-
-out:
- bio_io_error(bio);
-}
-
-/*
- * Check if request is within bounds and aligned on zram logical blocks.
- */
-static inline int valid_io_request(struct zram *zram, struct bio *bio)
-{
- u64 start, end, bound;
-
- /* unaligned request */
- if (unlikely(bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
- return 0;
- if (unlikely(bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
- return 0;
-
- start = bio->bi_sector;
- end = start + (bio->bi_size >> SECTOR_SHIFT);
- bound = zram->disksize >> SECTOR_SHIFT;
- /* out of range range */
- if (unlikely(start >= bound || end > bound || start > end))
- return 0;
-
- /* I/O request is valid */
- return 1;
-}
-
-/*
- * Handler function for all zram I/O requests.
- */
-static void zram_make_request(struct request_queue *queue, struct bio *bio)
-{
- struct zram *zram = queue->queuedata;
-
- down_read(&zram->init_lock);
- if (unlikely(!zram->init_done))
- goto error;
-
- if (!valid_io_request(zram, bio)) {
- zram_stat64_inc(zram, &zram->stats.invalid_io);
- goto error;
- }
-
- __zram_make_request(zram, bio, bio_data_dir(bio));
- up_read(&zram->init_lock);
-
- return;
-
-error:
- up_read(&zram->init_lock);
- bio_io_error(bio);
-}
-
-static void __zram_reset_device(struct zram *zram)
-{
- size_t index;
- struct zram_meta *meta;
-
- if (!zram->init_done)
- return;
-
- meta = zram->meta;
- zram->init_done = 0;
-
- /* Free all pages that are still in this zram device */
- for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
- unsigned long handle = meta->table[index].handle;
- if (!handle)
- continue;
-
- zs_free(meta->mem_pool, handle);
- }
-
- zram_meta_free(zram->meta);
- zram->meta = NULL;
- /* Reset stats */
- memset(&zram->stats, 0, sizeof(zram->stats));
-
- zram->disksize = 0;
- set_capacity(zram->disk, 0);
-}
-
-void zram_reset_device(struct zram *zram)
-{
- down_write(&zram->init_lock);
- __zram_reset_device(zram);
- up_write(&zram->init_lock);
-}
-
-void zram_meta_free(struct zram_meta *meta)
-{
- zs_destroy_pool(meta->mem_pool);
- kfree(meta->compress_workmem);
- free_pages((unsigned long)meta->compress_buffer, 1);
- vfree(meta->table);
- kfree(meta);
-}
-
-struct zram_meta *zram_meta_alloc(u64 disksize)
-{
- size_t num_pages;
- struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
- if (!meta)
- goto out;
-
- meta->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
- if (!meta->compress_workmem)
- goto free_meta;
-
- meta->compress_buffer =
- (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
- if (!meta->compress_buffer) {
- pr_err("Error allocating compressor buffer space\n");
- goto free_workmem;
- }
-
- num_pages = disksize >> PAGE_SHIFT;
- meta->table = vzalloc(num_pages * sizeof(*meta->table));
- if (!meta->table) {
- pr_err("Error allocating zram address table\n");
- goto free_buffer;
- }
-
- meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
- if (!meta->mem_pool) {
- pr_err("Error creating memory pool\n");
- goto free_table;
- }
-
- return meta;
-
-free_table:
- vfree(meta->table);
-free_buffer:
- free_pages((unsigned long)meta->compress_buffer, 1);
-free_workmem:
- kfree(meta->compress_workmem);
-free_meta:
- kfree(meta);
- meta = NULL;
-out:
- return meta;
-}
-
-void zram_init_device(struct zram *zram, struct zram_meta *meta)
-{
- if (zram->disksize > 2 * (totalram_pages << PAGE_SHIFT)) {
- pr_info(
- "There is little point creating a zram of greater than "
- "twice the size of memory since we expect a 2:1 compression "
- "ratio. Note that zram uses about 0.1%% of the size of "
- "the disk when not in use so a huge zram is "
- "wasteful.\n"
- "\tMemory Size: %lu kB\n"
- "\tSize you selected: %llu kB\n"
- "Continuing anyway ...\n",
- (totalram_pages << PAGE_SHIFT) >> 10, zram->disksize >> 10
- );
- }
-
- /* zram devices sort of resembles non-rotational disks */
- queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
-
- zram->meta = meta;
- zram->init_done = 1;
-
- pr_debug("Initialization done!\n");
-}
-
-static void zram_slot_free_notify(struct block_device *bdev,
- unsigned long index)
-{
- struct zram *zram;
-
- zram = bdev->bd_disk->private_data;
- down_write(&zram->lock);
- zram_free_page(zram, index);
- up_write(&zram->lock);
- zram_stat64_inc(zram, &zram->stats.notify_free);
-}
-
-static const struct block_device_operations zram_devops = {
- .swap_slot_free_notify = zram_slot_free_notify,
- .owner = THIS_MODULE
-};
-
-static int create_device(struct zram *zram, int device_id)
-{
- int ret = -ENOMEM;
-
- init_rwsem(&zram->lock);
- init_rwsem(&zram->init_lock);
- spin_lock_init(&zram->stat64_lock);
-
- zram->queue = blk_alloc_queue(GFP_KERNEL);
- if (!zram->queue) {
- pr_err("Error allocating disk queue for device %d\n",
- device_id);
- goto out;
- }
-
- blk_queue_make_request(zram->queue, zram_make_request);
- zram->queue->queuedata = zram;
-
- /* gendisk structure */
- zram->disk = alloc_disk(1);
- if (!zram->disk) {
- pr_warn("Error allocating disk structure for device %d\n",
- device_id);
- goto out_free_queue;
- }
-
- zram->disk->major = zram_major;
- zram->disk->first_minor = device_id;
- zram->disk->fops = &zram_devops;
- zram->disk->queue = zram->queue;
- zram->disk->private_data = zram;
- snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
-
- /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
- set_capacity(zram->disk, 0);
-
- /*
- * To ensure that we always get PAGE_SIZE aligned
- * and n*PAGE_SIZED sized I/O requests.
- */
- blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
- blk_queue_logical_block_size(zram->disk->queue,
- ZRAM_LOGICAL_BLOCK_SIZE);
- blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
- blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
-
- add_disk(zram->disk);
-
- ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
- &zram_disk_attr_group);
- if (ret < 0) {
- pr_warn("Error creating sysfs group");
- goto out_free_disk;
- }
-
- zram->init_done = 0;
- return 0;
-
-out_free_disk:
- del_gendisk(zram->disk);
- put_disk(zram->disk);
-out_free_queue:
- blk_cleanup_queue(zram->queue);
-out:
- return ret;
-}
-
-static void destroy_device(struct zram *zram)
-{
- sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
- &zram_disk_attr_group);
-
- if (zram->disk) {
- del_gendisk(zram->disk);
- put_disk(zram->disk);
- }
-
- if (zram->queue)
- blk_cleanup_queue(zram->queue);
-}
-
-unsigned int zram_get_num_devices(void)
-{
- return num_devices;
-}
-
-static int __init zram_init(void)
-{
- int ret, dev_id;
-
- if (num_devices > max_num_devices) {
- pr_warn("Invalid value for num_devices: %u\n",
- num_devices);
- ret = -EINVAL;
- goto out;
- }
-
- zram_major = register_blkdev(0, "zram");
- if (zram_major <= 0) {
- pr_warn("Unable to get major number\n");
- ret = -EBUSY;
- goto out;
- }
-
- /* Allocate the device array and initialize each one */
- zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
- if (!zram_devices) {
- ret = -ENOMEM;
- goto unregister;
- }
-
- for (dev_id = 0; dev_id < num_devices; dev_id++) {
- ret = create_device(&zram_devices[dev_id], dev_id);
- if (ret)
- goto free_devices;
- }
-
- pr_info("Created %u device(s) ...\n", num_devices);
-
- return 0;
-
-free_devices:
- while (dev_id)
- destroy_device(&zram_devices[--dev_id]);
- kfree(zram_devices);
-unregister:
- unregister_blkdev(zram_major, "zram");
-out:
- return ret;
-}
-
-static void __exit zram_exit(void)
-{
- int i;
- struct zram *zram;
-
- for (i = 0; i < num_devices; i++) {
- zram = &zram_devices[i];
-
- get_disk(zram->disk);
- destroy_device(zram);
- zram_reset_device(zram);
- put_disk(zram->disk);
- }
-
- unregister_blkdev(zram_major, "zram");
-
- kfree(zram_devices);
- pr_debug("Cleanup done!\n");
-}
-
-module_param(num_devices, uint, 0);
-MODULE_PARM_DESC(num_devices, "Number of zram devices");
-
-module_init(zram_init);
-module_exit(zram_exit);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
-MODULE_DESCRIPTION("Compressed RAM Block Device");
diff --git a/drivers/staging/zram/zram_sysfs.c b/drivers/staging/zram/zram_sysfs.c
deleted file mode 100644
index dc76a3dba1b..00000000000
--- a/drivers/staging/zram/zram_sysfs.c
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * Compressed RAM block device
- *
- * Copyright (C) 2008, 2009, 2010 Nitin Gupta
- *
- * This code is released using a dual license strategy: BSD/GPL
- * You can choose the licence that better fits your requirements.
- *
- * Released under the terms of 3-clause BSD License
- * Released under the terms of GNU General Public License Version 2.0
- *
- * Project home: http://compcache.googlecode.com/
- */
-
-#include <linux/device.h>
-#include <linux/genhd.h>
-#include <linux/mm.h>
-#include <linux/kernel.h>
-
-#include "zram_drv.h"
-
-static u64 zram_stat64_read(struct zram *zram, u64 *v)
-{
- u64 val;
-
- spin_lock(&zram->stat64_lock);
- val = *v;
- spin_unlock(&zram->stat64_lock);
-
- return val;
-}
-
-static struct zram *dev_to_zram(struct device *dev)
-{
- int i;
- struct zram *zram = NULL;
-
- for (i = 0; i < zram_get_num_devices(); i++) {
- zram = &zram_devices[i];
- if (disk_to_dev(zram->disk) == dev)
- break;
- }
-
- return zram;
-}
-
-static ssize_t disksize_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%llu\n", zram->disksize);
-}
-
-static ssize_t disksize_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
-{
- u64 disksize;
- struct zram_meta *meta;
- struct zram *zram = dev_to_zram(dev);
-
- disksize = memparse(buf, NULL);
- if (!disksize)
- return -EINVAL;
-
- disksize = PAGE_ALIGN(disksize);
- meta = zram_meta_alloc(disksize);
- down_write(&zram->init_lock);
- if (zram->init_done) {
- up_write(&zram->init_lock);
- zram_meta_free(meta);
- pr_info("Cannot change disksize for initialized device\n");
- return -EBUSY;
- }
-
- zram->disksize = disksize;
- set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
- zram_init_device(zram, meta);
- up_write(&zram->init_lock);
-
- return len;
-}
-
-static ssize_t initstate_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%u\n", zram->init_done);
-}
-
-static ssize_t reset_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
-{
- int ret;
- unsigned short do_reset;
- struct zram *zram;
- struct block_device *bdev;
-
- zram = dev_to_zram(dev);
- bdev = bdget_disk(zram->disk, 0);
-
- /* Do not reset an active device! */
- if (bdev->bd_holders)
- return -EBUSY;
-
- ret = kstrtou16(buf, 10, &do_reset);
- if (ret)
- return ret;
-
- if (!do_reset)
- return -EINVAL;
-
- /* Make sure all pending I/O is finished */
- if (bdev)
- fsync_bdev(bdev);
-
- zram_reset_device(zram);
- return len;
-}
-
-static ssize_t num_reads_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%llu\n",
- zram_stat64_read(zram, &zram->stats.num_reads));
-}
-
-static ssize_t num_writes_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%llu\n",
- zram_stat64_read(zram, &zram->stats.num_writes));
-}
-
-static ssize_t invalid_io_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%llu\n",
- zram_stat64_read(zram, &zram->stats.invalid_io));
-}
-
-static ssize_t notify_free_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%llu\n",
- zram_stat64_read(zram, &zram->stats.notify_free));
-}
-
-static ssize_t zero_pages_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%u\n", zram->stats.pages_zero);
-}
-
-static ssize_t orig_data_size_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%llu\n",
- (u64)(zram->stats.pages_stored) << PAGE_SHIFT);
-}
-
-static ssize_t compr_data_size_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%llu\n",
- zram_stat64_read(zram, &zram->stats.compr_size));
-}
-
-static ssize_t mem_used_total_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- u64 val = 0;
- struct zram *zram = dev_to_zram(dev);
- struct zram_meta *meta = zram->meta;
-
- down_read(&zram->init_lock);
- if (zram->init_done)
- val = zs_get_total_size_bytes(meta->mem_pool);
- up_read(&zram->init_lock);
-
- return sprintf(buf, "%llu\n", val);
-}
-
-static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
- disksize_show, disksize_store);
-static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
-static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
-static DEVICE_ATTR(num_reads, S_IRUGO, num_reads_show, NULL);
-static DEVICE_ATTR(num_writes, S_IRUGO, num_writes_show, NULL);
-static DEVICE_ATTR(invalid_io, S_IRUGO, invalid_io_show, NULL);
-static DEVICE_ATTR(notify_free, S_IRUGO, notify_free_show, NULL);
-static DEVICE_ATTR(zero_pages, S_IRUGO, zero_pages_show, NULL);
-static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
-static DEVICE_ATTR(compr_data_size, S_IRUGO, compr_data_size_show, NULL);
-static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
-
-static struct attribute *zram_disk_attrs[] = {
- &dev_attr_disksize.attr,
- &dev_attr_initstate.attr,
- &dev_attr_reset.attr,
- &dev_attr_num_reads.attr,
- &dev_attr_num_writes.attr,
- &dev_attr_invalid_io.attr,
- &dev_attr_notify_free.attr,
- &dev_attr_zero_pages.attr,
- &dev_attr_orig_data_size.attr,
- &dev_attr_compr_data_size.attr,
- &dev_attr_mem_used_total.attr,
- NULL,
-};
-
-struct attribute_group zram_disk_attr_group = {
- .attrs = zram_disk_attrs,
-};
diff --git a/drivers/staging/zsmalloc/Makefile b/drivers/staging/zsmalloc/Makefile
deleted file mode 100644
index b134848a590..00000000000
--- a/drivers/staging/zsmalloc/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-zsmalloc-y := zsmalloc-main.o
-
-obj-$(CONFIG_ZSMALLOC) += zsmalloc.o
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 9559ea749d8..5a3ea20e9cb 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1179,7 +1179,7 @@ iscsit_handle_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
* traditional iSCSI block I/O.
*/
if (iscsit_allocate_iovecs(cmd) < 0) {
- return iscsit_add_reject_cmd(cmd,
+ return iscsit_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
}
immed_data = cmd->immediate_data;
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 345b5ddcb1a..86281fa5dcc 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -98,7 +98,10 @@ static void dw8250_serial_out(struct uart_port *p, int offset, int value)
dw8250_force_idle(p);
writeb(value, p->membase + (UART_LCR << p->regshift));
}
- dev_err(p->dev, "Couldn't set LCR to %d\n", value);
+ /*
+ * FIXME: this deadlocks if port->lock is already held
+ * dev_err(p->dev, "Couldn't set LCR to %d\n", value);
+ */
}
}
@@ -128,7 +131,10 @@ static void dw8250_serial_out32(struct uart_port *p, int offset, int value)
dw8250_force_idle(p);
writel(value, p->membase + (UART_LCR << p->regshift));
}
- dev_err(p->dev, "Couldn't set LCR to %d\n", value);
+ /*
+ * FIXME: this deadlocks if port->lock is already held
+ * dev_err(p->dev, "Couldn't set LCR to %d\n", value);
+ */
}
}
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 6463ca3bcfb..07133d0c971 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -244,7 +244,7 @@ static void wdm_int_callback(struct urb *urb)
case USB_CDC_NOTIFY_RESPONSE_AVAILABLE:
dev_dbg(&desc->intf->dev,
"NOTIFY_RESPONSE_AVAILABLE received: index %d len %d",
- dr->wIndex, dr->wLength);
+ le16_to_cpu(dr->wIndex), le16_to_cpu(dr->wLength));
break;
case USB_CDC_NOTIFY_NETWORK_CONNECTION:
@@ -257,7 +257,9 @@ static void wdm_int_callback(struct urb *urb)
clear_bit(WDM_POLL_RUNNING, &desc->flags);
dev_err(&desc->intf->dev,
"unknown notification %d received: index %d len %d\n",
- dr->bNotificationType, dr->wIndex, dr->wLength);
+ dr->bNotificationType,
+ le16_to_cpu(dr->wIndex),
+ le16_to_cpu(dr->wLength));
goto exit;
}
@@ -403,7 +405,7 @@ static ssize_t wdm_write
USB_RECIP_INTERFACE);
req->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND;
req->wValue = 0;
- req->wIndex = desc->inum;
+ req->wIndex = desc->inum; /* already converted */
req->wLength = cpu_to_le16(count);
set_bit(WDM_IN_USE, &desc->flags);
desc->outbuf = buf;
@@ -417,7 +419,7 @@ static ssize_t wdm_write
rv = usb_translate_errors(rv);
} else {
dev_dbg(&desc->intf->dev, "Tx URB has been submitted index=%d",
- req->wIndex);
+ le16_to_cpu(req->wIndex));
}
out:
usb_autopm_put_interface(desc->intf);
@@ -780,7 +782,7 @@ static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor
desc->irq->bRequestType = (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE);
desc->irq->bRequest = USB_CDC_GET_ENCAPSULATED_RESPONSE;
desc->irq->wValue = 0;
- desc->irq->wIndex = desc->inum;
+ desc->irq->wIndex = desc->inum; /* already converted */
desc->irq->wLength = cpu_to_le16(desc->wMaxCommand);
usb_fill_control_urb(
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index c9f56ffdba9..11a073cda1d 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -3282,10 +3282,10 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
dev_dbg(hub->intfdev, "can't resume port %d, status %d\n",
port1, status);
} else {
- /* drive resume for at least 20 msec */
+ /* drive resume for USB_RESUME_TIMEOUT msec */
dev_dbg(&udev->dev, "usb %sresume\n",
(PMSG_IS_AUTO(msg) ? "auto-" : ""));
- msleep(25);
+ msleep(USB_RESUME_TIMEOUT);
/* Virtual root hubs can trigger on GET_PORT_STATUS to
* stop resume signaling. Then finish the resume
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 2dd57853de6..6a87860a32b 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -528,7 +528,7 @@ static int bos_desc(struct usb_composite_dev *cdev)
usb_ext->bLength = USB_DT_USB_EXT_CAP_SIZE;
usb_ext->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
usb_ext->bDevCapabilityType = USB_CAP_TYPE_EXT;
- usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT);
+ usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT | USB_BESL_SUPPORT);
/*
* The Superspeed USB Capability descriptor shall be implemented by all
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
index b64e661618b..baf2807934c 100644
--- a/drivers/usb/host/isp116x-hcd.c
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -1488,7 +1488,7 @@ static int isp116x_bus_resume(struct usb_hcd *hcd)
spin_unlock_irq(&isp116x->lock);
hcd->state = HC_STATE_RESUMING;
- msleep(20);
+ msleep(USB_RESUME_TIMEOUT);
/* Go operational */
spin_lock_irq(&isp116x->lock);
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index a6fd8f5371d..6656dfda566 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -2301,7 +2301,7 @@ static int r8a66597_bus_resume(struct usb_hcd *hcd)
rh->port &= ~USB_PORT_STAT_SUSPEND;
rh->port |= USB_PORT_STAT_C_SUSPEND << 16;
r8a66597_mdfy(r8a66597, RESUME, RESUME | UACT, dvstctr_reg);
- msleep(50);
+ msleep(USB_RESUME_TIMEOUT);
r8a66597_mdfy(r8a66597, UACT, RESUME | UACT, dvstctr_reg);
}
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index b2ec7fe758d..b4cad934603 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -1251,7 +1251,7 @@ sl811h_hub_control(
sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
mod_timer(&sl811->timer, jiffies
- + msecs_to_jiffies(20));
+ + msecs_to_jiffies(USB_RESUME_TIMEOUT));
break;
case USB_PORT_FEAT_POWER:
port_power(sl811, 0);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 0e57bcb8e3f..2320e20d5be 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -94,6 +94,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
xhci->quirks |= XHCI_LPM_SUPPORT;
xhci->quirks |= XHCI_INTEL_HOST;
+ xhci->quirks |= XHCI_AVOID_BEI;
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) {
@@ -109,7 +110,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
* PPT chipsets.
*/
xhci->quirks |= XHCI_SPURIOUS_REBOOT;
- xhci->quirks |= XHCI_AVOID_BEI;
}
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 9948890ef93..bc7a886e3c3 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1697,7 +1697,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
} else {
xhci_dbg(xhci, "resume HS port %d\n", port_id);
bus_state->resume_done[faked_port_index] = jiffies +
- msecs_to_jiffies(20);
+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
set_bit(faked_port_index, &bus_state->resuming_ports);
mod_timer(&hcd->rh_timer,
bus_state->resume_done[faked_port_index]);
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index a9984c700d2..5f79d8e2caa 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -78,7 +78,9 @@ static void devm_usb_phy_release(struct device *dev, void *res)
static int devm_usb_phy_match(struct device *dev, void *res, void *match_data)
{
- return res == match_data;
+ struct usb_phy **phy = res;
+
+ return *phy == match_data;
}
/**
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index cf127a08064..4d918d5f945 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -620,6 +620,7 @@ static struct usb_device_id id_table_combined [] = {
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
/*
* ELV devices:
*/
@@ -1899,8 +1900,12 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial)
{
struct usb_device *udev = serial->dev;
- if ((udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems")) ||
- (udev->product && !strcmp(udev->product, "BeagleBone/XDS100V2")))
+ if (udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems"))
+ return ftdi_jtag_probe(serial);
+
+ if (udev->product &&
+ (!strcmp(udev->product, "BeagleBone/XDS100V2") ||
+ !strcmp(udev->product, "SNAP Connect E10")))
return ftdi_jtag_probe(serial);
return 0;
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index e8d35261529..e906b6aa242 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -561,6 +561,12 @@
*/
#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
+/*
+ * Synapse Wireless product ids (FTDI_VID)
+ * http://www.synapse-wireless.com
+ */
+#define FTDI_SYNAPSE_SS200_PID 0x9090 /* SS200 - SNAP Stick 200 */
+
/********************************/
/** third-party VID/PID combos **/
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index d5fa5f3fe6d..37e62c7b327 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -6,7 +6,10 @@ menu "Console display driver support"
config VGA_CONSOLE
bool "VGA text console" if EXPERT || !X86
- depends on !4xx && !8xx && !SPARC && !M68K && !PARISC && !FRV && !SUPERH && !BLACKFIN && !AVR32 && !MN10300 && (!ARM || ARCH_FOOTBRIDGE || ARCH_INTEGRATOR || ARCH_NETWINDER) && !ARM64
+ depends on !4xx && !8xx && !SPARC && !M68K && !PARISC && !FRV && \
+ !SUPERH && !BLACKFIN && !AVR32 && !MN10300 && !CRIS && \
+ (!ARM || ARCH_FOOTBRIDGE || ARCH_INTEGRATOR || ARCH_NETWINDER) && \
+ !ARM64
default y
help
Saying Y here will allow you to use Linux in text mode through a
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index d9a43674cb9..9cca0ea4e47 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -126,7 +126,7 @@ affs_fix_dcache(struct inode *inode, u32 entry_ino)
{
struct dentry *dentry;
spin_lock(&inode->i_lock);
- hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
+ hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
if (entry_ino == (u32)(long)dentry->d_fsdata) {
dentry->d_fsdata = (void *)inode->i_ino;
break;
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index 13ddec92341..8ad277990ea 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -91,7 +91,7 @@ static struct dentry *get_next_positive_subdir(struct dentry *prev,
spin_lock(&root->d_lock);
if (prev)
- next = prev->d_u.d_child.next;
+ next = prev->d_child.next;
else {
prev = dget_dlock(root);
next = prev->d_subdirs.next;
@@ -105,13 +105,13 @@ cont:
return NULL;
}
- q = list_entry(next, struct dentry, d_u.d_child);
+ q = list_entry(next, struct dentry, d_child);
spin_lock_nested(&q->d_lock, DENTRY_D_LOCK_NESTED);
/* Already gone or negative dentry (under construction) - try next */
if (q->d_count == 0 || !simple_positive(q)) {
spin_unlock(&q->d_lock);
- next = q->d_u.d_child.next;
+ next = q->d_child.next;
goto cont;
}
dget_dlock(q);
@@ -161,13 +161,13 @@ again:
goto relock;
}
spin_unlock(&p->d_lock);
- next = p->d_u.d_child.next;
+ next = p->d_child.next;
p = parent;
if (next != &parent->d_subdirs)
break;
}
}
- ret = list_entry(next, struct dentry, d_u.d_child);
+ ret = list_entry(next, struct dentry, d_child);
spin_lock_nested(&ret->d_lock, DENTRY_D_LOCK_NESTED);
/* Negative dentry - try next */
@@ -447,7 +447,7 @@ found:
spin_lock(&sbi->lookup_lock);
spin_lock(&expired->d_parent->d_lock);
spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED);
- list_move(&expired->d_parent->d_subdirs, &expired->d_u.d_child);
+ list_move(&expired->d_parent->d_subdirs, &expired->d_child);
spin_unlock(&expired->d_lock);
spin_unlock(&expired->d_parent->d_lock);
spin_unlock(&sbi->lookup_lock);
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 085da86e07c..79ab4cb3590 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -655,7 +655,7 @@ static void autofs_clear_leaf_automount_flags(struct dentry *dentry)
/* only consider parents below dentrys in the root */
if (IS_ROOT(parent->d_parent))
return;
- d_child = &dentry->d_u.d_child;
+ d_child = &dentry->d_child;
/* Set parent managed if it's becoming empty */
if (d_child->next == &parent->d_subdirs &&
d_child->prev == &parent->d_subdirs)
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 3c4d8797ea9..53f620a4350 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -756,6 +756,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
int elf_prot = 0, elf_flags;
unsigned long k, vaddr;
+ unsigned long total_size = 0;
if (elf_ppnt->p_type != PT_LOAD)
continue;
@@ -820,10 +821,16 @@ static int load_elf_binary(struct linux_binprm *bprm)
#else
load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
#endif
+ total_size = total_mapping_size(elf_phdata,
+ loc->elf_ex.e_phnum);
+ if (!total_size) {
+ error = -EINVAL;
+ goto out_free_dentry;
+ }
}
error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
- elf_prot, elf_flags, 0);
+ elf_prot, elf_flags, total_size);
if (BAD_ADDR(error)) {
send_sig(SIGKILL, current, 0);
retval = IS_ERR((void *)error) ?
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index f99c71e40f8..07f167a1d27 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -6363,12 +6363,11 @@ static int __btrfs_free_reserved_extent(struct btrfs_root *root,
return -ENOSPC;
}
- if (btrfs_test_opt(root, DISCARD))
- ret = btrfs_discard_extent(root, start, len, NULL);
-
if (pin)
pin_down_extent(root, cache, start, len, 1);
else {
+ if (btrfs_test_opt(root, DISCARD))
+ ret = btrfs_discard_extent(root, start, len, NULL);
btrfs_add_free_space(cache, start, len);
btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
}
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 783906c687b..dbefa6c609f 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -2572,6 +2572,11 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
if (off + len == src->i_size)
len = ALIGN(src->i_size, bs) - off;
+ if (len == 0) {
+ ret = 0;
+ goto out_unlock;
+ }
+
/* verify the end result is block aligned */
if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs) ||
!IS_ALIGNED(destoff, bs))
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index f02d82b7933..ccb43298e27 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -103,7 +103,7 @@ static unsigned fpos_off(loff_t p)
/*
* When possible, we try to satisfy a readdir by peeking at the
* dcache. We make this work by carefully ordering dentries on
- * d_u.d_child when we initially get results back from the MDS, and
+ * d_child when we initially get results back from the MDS, and
* falling back to a "normal" sync readdir if any dentries in the dir
* are dropped.
*
@@ -139,11 +139,11 @@ static int __dcache_readdir(struct file *filp,
p = parent->d_subdirs.prev;
dout(" initial p %p/%p\n", p->prev, p->next);
} else {
- p = last->d_u.d_child.prev;
+ p = last->d_child.prev;
}
more:
- dentry = list_entry(p, struct dentry, d_u.d_child);
+ dentry = list_entry(p, struct dentry, d_child);
di = ceph_dentry(dentry);
while (1) {
dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next,
@@ -165,7 +165,7 @@ more:
!dentry->d_inode ? " null" : "");
spin_unlock(&dentry->d_lock);
p = p->prev;
- dentry = list_entry(p, struct dentry, d_u.d_child);
+ dentry = list_entry(p, struct dentry, d_child);
di = ceph_dentry(dentry);
}
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index be0f7e20d62..0cf23a7b88c 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -867,9 +867,9 @@ static void ceph_set_dentry_offset(struct dentry *dn)
spin_lock(&dir->d_lock);
spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
- list_move(&dn->d_u.d_child, &dir->d_subdirs);
+ list_move(&dn->d_child, &dir->d_subdirs);
dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
- dn->d_u.d_child.prev, dn->d_u.d_child.next);
+ dn->d_child.prev, dn->d_child.next);
spin_unlock(&dn->d_lock);
spin_unlock(&dir->d_lock);
}
@@ -1296,7 +1296,7 @@ retry_lookup:
/* reorder parent's d_subdirs */
spin_lock(&parent->d_lock);
spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
- list_move(&dn->d_u.d_child, &parent->d_subdirs);
+ list_move(&dn->d_child, &parent->d_subdirs);
spin_unlock(&dn->d_lock);
spin_unlock(&parent->d_lock);
}
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 5fcc10fa62b..f4a8577c3e9 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1789,6 +1789,7 @@ refind_writable:
cifsFileInfo_put(inv_file);
spin_lock(&cifs_file_list_lock);
++refind;
+ inv_file = NULL;
goto refind_writable;
}
}
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 0dee93706c9..54304ccae7e 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -832,7 +832,7 @@ inode_has_hashed_dentries(struct inode *inode)
struct dentry *dentry;
spin_lock(&inode->i_lock);
- hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
+ hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
if (!d_unhashed(dentry) || IS_ROOT(dentry)) {
spin_unlock(&inode->i_lock);
return true;
diff --git a/fs/coda/cache.c b/fs/coda/cache.c
index 1da168c61d3..9bc1147a6c5 100644
--- a/fs/coda/cache.c
+++ b/fs/coda/cache.c
@@ -92,7 +92,7 @@ static void coda_flag_children(struct dentry *parent, int flag)
struct dentry *de;
spin_lock(&parent->d_lock);
- list_for_each_entry(de, &parent->d_subdirs, d_u.d_child) {
+ list_for_each_entry(de, &parent->d_subdirs, d_child) {
/* don't know what to do with negative dentries */
if (de->d_inode )
coda_flag_inode(de->d_inode, flag);
diff --git a/fs/dcache.c b/fs/dcache.c
index 25c0a1b5f6c..efa4602e064 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -43,7 +43,7 @@
/*
* Usage:
* dcache->d_inode->i_lock protects:
- * - i_dentry, d_alias, d_inode of aliases
+ * - i_dentry, d_u.d_alias, d_inode of aliases
* dcache_hash_bucket lock protects:
* - the dcache hash table
* s_anon bl list spinlock protects:
@@ -58,7 +58,7 @@
* - d_unhashed()
* - d_parent and d_subdirs
* - childrens' d_child and d_parent
- * - d_alias, d_inode
+ * - d_u.d_alias, d_inode
*
* Ordering:
* dentry->d_inode->i_lock
@@ -215,7 +215,6 @@ static void __d_free(struct rcu_head *head)
{
struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
- WARN_ON(!hlist_unhashed(&dentry->d_alias));
if (dname_external(dentry))
kfree(dentry->d_name.name);
kmem_cache_free(dentry_cache, dentry);
@@ -226,6 +225,7 @@ static void __d_free(struct rcu_head *head)
*/
static void d_free(struct dentry *dentry)
{
+ WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
BUG_ON(dentry->d_count);
this_cpu_dec(nr_dentry);
if (dentry->d_op && dentry->d_op->d_release)
@@ -264,7 +264,7 @@ static void dentry_iput(struct dentry * dentry)
struct inode *inode = dentry->d_inode;
if (inode) {
dentry->d_inode = NULL;
- hlist_del_init(&dentry->d_alias);
+ hlist_del_init(&dentry->d_u.d_alias);
spin_unlock(&dentry->d_lock);
spin_unlock(&inode->i_lock);
if (!inode->i_nlink)
@@ -288,7 +288,7 @@ static void dentry_unlink_inode(struct dentry * dentry)
{
struct inode *inode = dentry->d_inode;
dentry->d_inode = NULL;
- hlist_del_init(&dentry->d_alias);
+ hlist_del_init(&dentry->d_u.d_alias);
dentry_rcuwalk_barrier(dentry);
spin_unlock(&dentry->d_lock);
spin_unlock(&inode->i_lock);
@@ -364,9 +364,9 @@ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
__releases(parent->d_lock)
__releases(dentry->d_inode->i_lock)
{
- list_del(&dentry->d_u.d_child);
+ __list_del_entry(&dentry->d_child);
/*
- * Inform try_to_ascend() that we are no longer attached to the
+ * Inform ascending readers that we are no longer attached to the
* dentry tree
*/
dentry->d_flags |= DCACHE_DENTRY_KILLED;
@@ -660,7 +660,7 @@ static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
again:
discon_alias = NULL;
- hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
+ hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
spin_lock(&alias->d_lock);
if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
if (IS_ROOT(alias) &&
@@ -713,7 +713,7 @@ void d_prune_aliases(struct inode *inode)
struct dentry *dentry;
restart:
spin_lock(&inode->i_lock);
- hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
+ hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
spin_lock(&dentry->d_lock);
if (!dentry->d_count) {
__dget_dlock(dentry);
@@ -893,7 +893,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
/* descend to the first leaf in the current subtree */
while (!list_empty(&dentry->d_subdirs))
dentry = list_entry(dentry->d_subdirs.next,
- struct dentry, d_u.d_child);
+ struct dentry, d_child);
/* consume the dentries from this leaf up through its parents
* until we find one with children or run out altogether */
@@ -927,17 +927,17 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
if (IS_ROOT(dentry)) {
parent = NULL;
- list_del(&dentry->d_u.d_child);
+ list_del(&dentry->d_child);
} else {
parent = dentry->d_parent;
parent->d_count--;
- list_del(&dentry->d_u.d_child);
+ list_del(&dentry->d_child);
}
inode = dentry->d_inode;
if (inode) {
dentry->d_inode = NULL;
- hlist_del_init(&dentry->d_alias);
+ hlist_del_init(&dentry->d_u.d_alias);
if (dentry->d_op && dentry->d_op->d_iput)
dentry->d_op->d_iput(dentry, inode);
else
@@ -955,7 +955,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
} while (list_empty(&dentry->d_subdirs));
dentry = list_entry(dentry->d_subdirs.next,
- struct dentry, d_u.d_child);
+ struct dentry, d_child);
}
}
@@ -988,35 +988,6 @@ void shrink_dcache_for_umount(struct super_block *sb)
}
/*
- * This tries to ascend one level of parenthood, but
- * we can race with renaming, so we need to re-check
- * the parenthood after dropping the lock and check
- * that the sequence number still matches.
- */
-static struct dentry *try_to_ascend(struct dentry *old, int locked, unsigned seq)
-{
- struct dentry *new = old->d_parent;
-
- rcu_read_lock();
- spin_unlock(&old->d_lock);
- spin_lock(&new->d_lock);
-
- /*
- * might go back up the wrong parent if we have had a rename
- * or deletion
- */
- if (new != old->d_parent ||
- (old->d_flags & DCACHE_DENTRY_KILLED) ||
- (!locked && read_seqretry(&rename_lock, seq))) {
- spin_unlock(&new->d_lock);
- new = NULL;
- }
- rcu_read_unlock();
- return new;
-}
-
-
-/*
* Search for at least 1 mount point in the dentry's subdirs.
* We descend to the next level whenever the d_subdirs
* list is non-empty and continue searching.
@@ -1048,7 +1019,7 @@ repeat:
resume:
while (next != &this_parent->d_subdirs) {
struct list_head *tmp = next;
- struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
+ struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
next = tmp->next;
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
@@ -1070,30 +1041,48 @@ resume:
/*
* All done at this level ... ascend and resume the search.
*/
+ rcu_read_lock();
+ascend:
if (this_parent != parent) {
struct dentry *child = this_parent;
- this_parent = try_to_ascend(this_parent, locked, seq);
- if (!this_parent)
+ this_parent = child->d_parent;
+
+ spin_unlock(&child->d_lock);
+ spin_lock(&this_parent->d_lock);
+
+ /* might go back up the wrong parent if we have had a rename. */
+ if (!locked && read_seqretry(&rename_lock, seq))
goto rename_retry;
- next = child->d_u.d_child.next;
+ next = child->d_child.next;
+ while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) {
+ if (next == &this_parent->d_subdirs)
+ goto ascend;
+ child = list_entry(next, struct dentry, d_child);
+ next = next->next;
+ }
+ rcu_read_unlock();
goto resume;
}
- spin_unlock(&this_parent->d_lock);
if (!locked && read_seqretry(&rename_lock, seq))
goto rename_retry;
+ spin_unlock(&this_parent->d_lock);
+ rcu_read_unlock();
if (locked)
write_sequnlock(&rename_lock);
return 0; /* No mount points found in tree */
positive:
if (!locked && read_seqretry(&rename_lock, seq))
- goto rename_retry;
+ goto rename_retry_unlocked;
if (locked)
write_sequnlock(&rename_lock);
return 1;
rename_retry:
+ spin_unlock(&this_parent->d_lock);
+ rcu_read_unlock();
if (locked)
goto again;
+rename_retry_unlocked:
locked = 1;
write_seqlock(&rename_lock);
goto again;
@@ -1131,7 +1120,7 @@ repeat:
resume:
while (next != &this_parent->d_subdirs) {
struct list_head *tmp = next;
- struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
+ struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
next = tmp->next;
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
@@ -1158,6 +1147,7 @@ resume:
*/
if (found && need_resched()) {
spin_unlock(&dentry->d_lock);
+ rcu_read_lock();
goto out;
}
@@ -1177,23 +1167,40 @@ resume:
/*
* All done at this level ... ascend and resume the search.
*/
+ rcu_read_lock();
+ascend:
if (this_parent != parent) {
struct dentry *child = this_parent;
- this_parent = try_to_ascend(this_parent, locked, seq);
- if (!this_parent)
+ this_parent = child->d_parent;
+
+ spin_unlock(&child->d_lock);
+ spin_lock(&this_parent->d_lock);
+
+ /* might go back up the wrong parent if we have had a rename. */
+ if (!locked && read_seqretry(&rename_lock, seq))
goto rename_retry;
- next = child->d_u.d_child.next;
+ next = child->d_child.next;
+ while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) {
+ if (next == &this_parent->d_subdirs)
+ goto ascend;
+ child = list_entry(next, struct dentry, d_child);
+ next = next->next;
+ }
+ rcu_read_unlock();
goto resume;
}
out:
- spin_unlock(&this_parent->d_lock);
if (!locked && read_seqretry(&rename_lock, seq))
goto rename_retry;
+ spin_unlock(&this_parent->d_lock);
+ rcu_read_unlock();
if (locked)
write_sequnlock(&rename_lock);
return found;
rename_retry:
+ spin_unlock(&this_parent->d_lock);
+ rcu_read_unlock();
if (found)
return found;
if (locked)
@@ -1278,8 +1285,8 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
INIT_HLIST_BL_NODE(&dentry->d_hash);
INIT_LIST_HEAD(&dentry->d_lru);
INIT_LIST_HEAD(&dentry->d_subdirs);
- INIT_HLIST_NODE(&dentry->d_alias);
- INIT_LIST_HEAD(&dentry->d_u.d_child);
+ INIT_HLIST_NODE(&dentry->d_u.d_alias);
+ INIT_LIST_HEAD(&dentry->d_child);
d_set_d_op(dentry, dentry->d_sb->s_d_op);
this_cpu_inc(nr_dentry);
@@ -1309,7 +1316,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
*/
__dget_dlock(parent);
dentry->d_parent = parent;
- list_add(&dentry->d_u.d_child, &parent->d_subdirs);
+ list_add(&dentry->d_child, &parent->d_subdirs);
spin_unlock(&parent->d_lock);
return dentry;
@@ -1369,7 +1376,7 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
if (inode) {
if (unlikely(IS_AUTOMOUNT(inode)))
dentry->d_flags |= DCACHE_NEED_AUTOMOUNT;
- hlist_add_head(&dentry->d_alias, &inode->i_dentry);
+ hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
}
dentry->d_inode = inode;
dentry_rcuwalk_barrier(dentry);
@@ -1394,7 +1401,7 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
void d_instantiate(struct dentry *entry, struct inode * inode)
{
- BUG_ON(!hlist_unhashed(&entry->d_alias));
+ BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
if (inode)
spin_lock(&inode->i_lock);
__d_instantiate(entry, inode);
@@ -1433,7 +1440,7 @@ static struct dentry *__d_instantiate_unique(struct dentry *entry,
return NULL;
}
- hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
+ hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
/*
* Don't need alias->d_lock here, because aliases with
* d_parent == entry->d_parent are not subject to name or
@@ -1459,7 +1466,7 @@ struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
{
struct dentry *result;
- BUG_ON(!hlist_unhashed(&entry->d_alias));
+ BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
if (inode)
spin_lock(&inode->i_lock);
@@ -1502,7 +1509,7 @@ static struct dentry * __d_find_any_alias(struct inode *inode)
if (hlist_empty(&inode->i_dentry))
return NULL;
- alias = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
+ alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
__dget(alias);
return alias;
}
@@ -1576,7 +1583,7 @@ struct dentry *d_obtain_alias(struct inode *inode)
spin_lock(&tmp->d_lock);
tmp->d_inode = inode;
tmp->d_flags |= DCACHE_DISCONNECTED;
- hlist_add_head(&tmp->d_alias, &inode->i_dentry);
+ hlist_add_head(&tmp->d_u.d_alias, &inode->i_dentry);
hlist_bl_lock(&tmp->d_sb->s_anon);
hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
hlist_bl_unlock(&tmp->d_sb->s_anon);
@@ -2019,7 +2026,7 @@ int d_validate(struct dentry *dentry, struct dentry *dparent)
struct dentry *child;
spin_lock(&dparent->d_lock);
- list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) {
+ list_for_each_entry(child, &dparent->d_subdirs, d_child) {
if (dentry == child) {
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
__dget_dlock(dentry);
@@ -2266,8 +2273,8 @@ static void __d_move(struct dentry * dentry, struct dentry * target)
/* Unhash the target: dput() will then get rid of it */
__d_drop(target);
- list_del(&dentry->d_u.d_child);
- list_del(&target->d_u.d_child);
+ list_del(&dentry->d_child);
+ list_del(&target->d_child);
/* Switch the names.. */
switch_names(dentry, target);
@@ -2277,15 +2284,15 @@ static void __d_move(struct dentry * dentry, struct dentry * target)
if (IS_ROOT(dentry)) {
dentry->d_parent = target->d_parent;
target->d_parent = target;
- INIT_LIST_HEAD(&target->d_u.d_child);
+ INIT_LIST_HEAD(&target->d_child);
} else {
swap(dentry->d_parent, target->d_parent);
/* And add them back to the (new) parent lists */
- list_add(&target->d_u.d_child, &target->d_parent->d_subdirs);
+ list_add(&target->d_child, &target->d_parent->d_subdirs);
}
- list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
+ list_add(&dentry->d_child, &dentry->d_parent->d_subdirs);
write_seqcount_end(&target->d_seq);
write_seqcount_end(&dentry->d_seq);
@@ -2392,9 +2399,9 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
swap(dentry->d_name.hash, anon->d_name.hash);
dentry->d_parent = dentry;
- list_del_init(&dentry->d_u.d_child);
+ list_del_init(&dentry->d_child);
anon->d_parent = dparent;
- list_move(&anon->d_u.d_child, &dparent->d_subdirs);
+ list_move(&anon->d_child, &dparent->d_subdirs);
write_seqcount_end(&dentry->d_seq);
write_seqcount_end(&anon->d_seq);
@@ -2933,7 +2940,7 @@ repeat:
resume:
while (next != &this_parent->d_subdirs) {
struct list_head *tmp = next;
- struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
+ struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
next = tmp->next;
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
@@ -2954,26 +2961,43 @@ resume:
}
spin_unlock(&dentry->d_lock);
}
+ rcu_read_lock();
+ascend:
if (this_parent != root) {
struct dentry *child = this_parent;
if (!(this_parent->d_flags & DCACHE_GENOCIDE)) {
this_parent->d_flags |= DCACHE_GENOCIDE;
this_parent->d_count--;
}
- this_parent = try_to_ascend(this_parent, locked, seq);
- if (!this_parent)
+ this_parent = child->d_parent;
+
+ spin_unlock(&child->d_lock);
+ spin_lock(&this_parent->d_lock);
+
+ /* might go back up the wrong parent if we have had a rename. */
+ if (!locked && read_seqretry(&rename_lock, seq))
goto rename_retry;
- next = child->d_u.d_child.next;
+ next = child->d_child.next;
+ while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) {
+ if (next == &this_parent->d_subdirs)
+ goto ascend;
+ child = list_entry(next, struct dentry, d_child);
+ next = next->next;
+ }
+ rcu_read_unlock();
goto resume;
}
- spin_unlock(&this_parent->d_lock);
if (!locked && read_seqretry(&rename_lock, seq))
goto rename_retry;
+ spin_unlock(&this_parent->d_lock);
+ rcu_read_unlock();
if (locked)
write_sequnlock(&rename_lock);
return;
rename_retry:
+ spin_unlock(&this_parent->d_lock);
+ rcu_read_unlock();
if (locked)
goto again;
locked = 1;
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 4660a419033..84378f0ffd6 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -546,7 +546,7 @@ void debugfs_remove_recursive(struct dentry *dentry)
parent = dentry;
down:
mutex_lock(&parent->d_inode->i_mutex);
- list_for_each_entry_safe(child, next, &parent->d_subdirs, d_u.d_child) {
+ list_for_each_entry_safe(child, next, &parent->d_subdirs, d_child) {
if (!debugfs_positive(child))
continue;
@@ -567,8 +567,8 @@ void debugfs_remove_recursive(struct dentry *dentry)
mutex_lock(&parent->d_inode->i_mutex);
if (child != dentry) {
- next = list_entry(child->d_u.d_child.next, struct dentry,
- d_u.d_child);
+ next = list_entry(child->d_child.next, struct dentry,
+ d_child);
goto up;
}
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index 43b448ddc3d..dc4784b52d9 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -50,7 +50,7 @@ find_acceptable_alias(struct dentry *result,
inode = result->d_inode;
spin_lock(&inode->i_lock);
- hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
+ hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
dget(dentry);
spin_unlock(&inode->i_lock);
if (toput)
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index f1312173fa9..facf8590b71 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1880,7 +1880,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
struct inode *inode)
{
struct inode *dir = dentry->d_parent->d_inode;
- struct buffer_head *bh;
+ struct buffer_head *bh = NULL;
struct ext4_dir_entry_2 *de;
struct ext4_dir_entry_tail *t;
struct super_block *sb;
@@ -1905,14 +1905,14 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
return retval;
if (retval == 1) {
retval = 0;
- return retval;
+ goto out;
}
}
if (is_dx(dir)) {
retval = ext4_dx_add_entry(handle, dentry, inode);
if (!retval || (retval != ERR_BAD_DX_DIR))
- return retval;
+ goto out;
ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
dx_fallback++;
ext4_mark_inode_dirty(handle, dir);
@@ -1924,14 +1924,15 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
return PTR_ERR(bh);
retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
- if (retval != -ENOSPC) {
- brelse(bh);
- return retval;
- }
+ if (retval != -ENOSPC)
+ goto out;
if (blocks == 1 && !dx_fallback &&
- EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX))
- return make_indexed_dir(handle, dentry, inode, bh);
+ EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) {
+ retval = make_indexed_dir(handle, dentry, inode, bh);
+ bh = NULL; /* make_indexed_dir releases bh */
+ goto out;
+ }
brelse(bh);
}
bh = ext4_append(handle, dir, &block);
@@ -1947,6 +1948,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
}
retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
+out:
brelse(bh);
if (retval == 0)
ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
index c450fdb3d78..5d876b1c9ea 100644
--- a/fs/jfs/jfs_dtree.c
+++ b/fs/jfs/jfs_dtree.c
@@ -3103,7 +3103,7 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
* self "."
*/
filp->f_pos = 1;
- if (filldir(dirent, ".", 1, 0, ip->i_ino,
+ if (filldir(dirent, ".", 1, 1, ip->i_ino,
DT_DIR))
return 0;
}
@@ -3111,7 +3111,7 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
* parent ".."
*/
filp->f_pos = 2;
- if (filldir(dirent, "..", 2, 1, PARENT(ip), DT_DIR))
+ if (filldir(dirent, "..", 2, 2, PARENT(ip), DT_DIR))
return 0;
/*
diff --git a/fs/libfs.c b/fs/libfs.c
index 916da8c4158..1299bd5e07b 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -104,18 +104,18 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
spin_lock(&dentry->d_lock);
/* d_lock not required for cursor */
- list_del(&cursor->d_u.d_child);
+ list_del(&cursor->d_child);
p = dentry->d_subdirs.next;
while (n && p != &dentry->d_subdirs) {
struct dentry *next;
- next = list_entry(p, struct dentry, d_u.d_child);
+ next = list_entry(p, struct dentry, d_child);
spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
if (simple_positive(next))
n--;
spin_unlock(&next->d_lock);
p = p->next;
}
- list_add_tail(&cursor->d_u.d_child, p);
+ list_add_tail(&cursor->d_child, p);
spin_unlock(&dentry->d_lock);
}
}
@@ -139,7 +139,7 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
{
struct dentry *dentry = filp->f_path.dentry;
struct dentry *cursor = filp->private_data;
- struct list_head *p, *q = &cursor->d_u.d_child;
+ struct list_head *p, *q = &cursor->d_child;
ino_t ino;
int i = filp->f_pos;
@@ -165,7 +165,7 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
struct dentry *next;
- next = list_entry(p, struct dentry, d_u.d_child);
+ next = list_entry(p, struct dentry, d_child);
spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
if (!simple_positive(next)) {
spin_unlock(&next->d_lock);
@@ -289,7 +289,7 @@ int simple_empty(struct dentry *dentry)
int ret = 0;
spin_lock(&dentry->d_lock);
- list_for_each_entry(child, &dentry->d_subdirs, d_u.d_child) {
+ list_for_each_entry(child, &dentry->d_subdirs, d_child) {
spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);
if (simple_positive(child)) {
spin_unlock(&child->d_lock);
diff --git a/fs/namei.c b/fs/namei.c
index f7c4393f853..036c21246d6 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1542,7 +1542,8 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
if (should_follow_link(inode, follow)) {
if (nd->flags & LOOKUP_RCU) {
- if (unlikely(unlazy_walk(nd, path->dentry))) {
+ if (unlikely(nd->path.mnt != path->mnt ||
+ unlazy_walk(nd, path->dentry))) {
err = -ECHILD;
goto out_err;
}
@@ -2824,7 +2825,8 @@ finish_lookup:
if (should_follow_link(inode, !symlink_ok)) {
if (nd->flags & LOOKUP_RCU) {
- if (unlikely(unlazy_walk(nd, path->dentry))) {
+ if (unlikely(nd->path.mnt != path->mnt ||
+ unlazy_walk(nd, path->dentry))) {
error = -ECHILD;
goto out;
}
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index 6792ce11f2b..c578ba9949e 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -391,7 +391,7 @@ ncp_dget_fpos(struct dentry *dentry, struct dentry *parent, unsigned long fpos)
spin_lock(&parent->d_lock);
next = parent->d_subdirs.next;
while (next != &parent->d_subdirs) {
- dent = list_entry(next, struct dentry, d_u.d_child);
+ dent = list_entry(next, struct dentry, d_child);
if ((unsigned long)dent->d_fsdata == fpos) {
if (dent->d_inode)
dget(dent);
diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h
index 32c06587351..6d5e7c56c79 100644
--- a/fs/ncpfs/ncplib_kernel.h
+++ b/fs/ncpfs/ncplib_kernel.h
@@ -194,7 +194,7 @@ ncp_renew_dentries(struct dentry *parent)
spin_lock(&parent->d_lock);
next = parent->d_subdirs.next;
while (next != &parent->d_subdirs) {
- dentry = list_entry(next, struct dentry, d_u.d_child);
+ dentry = list_entry(next, struct dentry, d_child);
if (dentry->d_fsdata == NULL)
ncp_age_dentry(server, dentry);
@@ -216,7 +216,7 @@ ncp_invalidate_dircache_entries(struct dentry *parent)
spin_lock(&parent->d_lock);
next = parent->d_subdirs.next;
while (next != &parent->d_subdirs) {
- dentry = list_entry(next, struct dentry, d_u.d_child);
+ dentry = list_entry(next, struct dentry, d_child);
dentry->d_fsdata = NULL;
ncp_age_dentry(server, dentry);
next = next->next;
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index 44efaa8c5f7..0fe3ced6438 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -58,7 +58,7 @@ static int nfs_superblock_set_dummy_root(struct super_block *sb, struct inode *i
*/
spin_lock(&sb->s_root->d_inode->i_lock);
spin_lock(&sb->s_root->d_lock);
- hlist_del_init(&sb->s_root->d_alias);
+ hlist_del_init(&sb->s_root->d_u.d_alias);
spin_unlock(&sb->s_root->d_lock);
spin_unlock(&sb->s_root->d_inode->i_lock);
}
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index 4bb21d67d9b..a3153e2d0f1 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -63,14 +63,14 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode)
spin_lock(&inode->i_lock);
/* run all of the dentries associated with this inode. Since this is a
* directory, there damn well better only be one item on this list */
- hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
+ hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
struct dentry *child;
/* run all of the children of the original inode and fix their
* d_flags to indicate parental interest (their parent is the
* original inode) */
spin_lock(&alias->d_lock);
- list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) {
+ list_for_each_entry(child, &alias->d_subdirs, d_child) {
if (!child->d_inode)
continue;
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
index ef999729e27..ce37013b4a5 100644
--- a/fs/ocfs2/dcache.c
+++ b/fs/ocfs2/dcache.c
@@ -172,7 +172,7 @@ struct dentry *ocfs2_find_local_alias(struct inode *inode,
struct dentry *dentry;
spin_lock(&inode->i_lock);
- hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
+ hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
spin_lock(&dentry->d_lock);
if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) {
trace_ocfs2_find_local_alias(dentry->d_name.len,
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 46387e49aa4..d0e8c0b1767 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -2372,10 +2372,14 @@ out_dio:
/* buffered aio wouldn't have proper lock coverage today */
BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
+ if (unlikely(written <= 0))
+ goto no_sync;
+
if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
((file->f_flags & O_DIRECT) && !direct_io)) {
- ret = filemap_fdatawrite_range(file->f_mapping, *ppos,
- *ppos + count - 1);
+ ret = filemap_fdatawrite_range(file->f_mapping,
+ iocb->ki_pos - written,
+ iocb->ki_pos - 1);
if (ret < 0)
written = ret;
@@ -2388,10 +2392,12 @@ out_dio:
}
if (!ret)
- ret = filemap_fdatawait_range(file->f_mapping, *ppos,
- *ppos + count - 1);
+ ret = filemap_fdatawait_range(file->f_mapping,
+ iocb->ki_pos - written,
+ iocb->ki_pos - 1);
}
+no_sync:
/*
* deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
* function pointer which is called when o_direct io completes so that
@@ -2453,12 +2459,14 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
struct address_space *mapping = out->f_mapping;
struct inode *inode = mapping->host;
struct splice_desc sd = {
- .total_len = len,
.flags = flags,
- .pos = *ppos,
.u.file = out,
};
-
+ ret = generic_write_checks(out, ppos, &len, 0);
+ if(ret)
+ return ret;
+ sd.total_len = len;
+ sd.pos = *ppos;
trace_ocfs2_file_splice_write(inode, out, out->f_path.dentry,
(unsigned long long)OCFS2_I(inode)->ip_blkno,
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 154ebf8d7c1..0e289949213 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1187,9 +1187,19 @@ out:
return ret;
}
+static int pagemap_open(struct inode *inode, struct file *file)
+{
+ /* do not disclose physical addresses to unprivileged
+ userspace (closes a rowhammer attack vector) */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ return 0;
+}
+
const struct file_operations proc_pagemap_operations = {
.llseek = mem_lseek, /* borrow this */
.read = pagemap_read,
+ .open = pagemap_open,
};
#endif /* CONFIG_PROC_PAGE_MONITOR */
diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
index 157e474ab30..635a1425d37 100644
--- a/fs/reiserfs/reiserfs.h
+++ b/fs/reiserfs/reiserfs.h
@@ -1954,8 +1954,6 @@ struct treepath var = {.path_length = ILLEGAL_PATH_ELEMENT_OFFSET, .reada = 0,}
#define MAX_US_INT 0xffff
// reiserfs version 2 has max offset 60 bits. Version 1 - 32 bit offset
-#define U32_MAX (~(__u32)0)
-
static inline loff_t max_reiserfs_offset(struct inode *inode)
{
if (get_inode_item_key_version(inode) == KEY_FORMAT_3_5)
diff --git a/fs/splice.c b/fs/splice.c
index 4b5a5fac338..f183f1342c0 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1012,13 +1012,17 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
struct address_space *mapping = out->f_mapping;
struct inode *inode = mapping->host;
struct splice_desc sd = {
- .total_len = len,
.flags = flags,
- .pos = *ppos,
.u.file = out,
};
ssize_t ret;
+ ret = generic_write_checks(out, ppos, &len, S_ISBLK(inode->i_mode));
+ if (ret)
+ return ret;
+ sd.total_len = len;
+ sd.pos = *ppos;
+
pipe_lock(pipe);
splice_from_pipe_begin(&sd);
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index a64adcc29ae..f819e813c8a 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -198,9 +198,29 @@ typedef int INT32;
typedef s32 acpi_native_int;
typedef u32 acpi_size;
+
+#ifdef ACPI_32BIT_PHYSICAL_ADDRESS
+
+/*
+ * OSPMs can define this to shrink the size of the structures for 32-bit
+ * none PAE environment. ASL compiler may always define this to generate
+ * 32-bit OSPM compliant tables.
+ */
typedef u32 acpi_io_address;
typedef u32 acpi_physical_address;
+#else /* ACPI_32BIT_PHYSICAL_ADDRESS */
+
+/*
+ * It is reported that, after some calculations, the physical addresses can
+ * wrap over the 32-bit boundary on 32-bit PAE environment.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=87971
+ */
+typedef u64 acpi_io_address;
+typedef u64 acpi_physical_address;
+
+#endif /* ACPI_32BIT_PHYSICAL_ADDRESS */
+
#define ACPI_MAX_PTR ACPI_UINT32_MAX
#define ACPI_SIZE_MAX ACPI_UINT32_MAX
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index ef04b36ca6e..f7db107abb0 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -76,6 +76,7 @@
#define ACPI_LARGE_NAMESPACE_NODE
#define ACPI_DATA_TABLE_DISASSEMBLY
#define ACPI_SINGLE_THREADED
+#define ACPI_32BIT_PHYSICAL_ADDRESS
#endif
/* acpi_exec configuration. Multithreaded with full AML debugger */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 17bccd3a4b0..dd6d9b89d33 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -550,11 +550,10 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
barrier();
#endif
- if (pmd_none(pmdval))
+ if (pmd_none(pmdval) || pmd_trans_huge(pmdval))
return 1;
if (unlikely(pmd_bad(pmdval))) {
- if (!pmd_trans_huge(pmdval))
- pmd_clear_bad(pmd);
+ pmd_clear_bad(pmd);
return 1;
}
return 0;
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index c1a1216e29c..87b27263f5e 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -3,6 +3,8 @@
/* References to section boundaries */
+#include <linux/compiler.h>
+
extern char _text[], _stext[], _etext[];
extern char _data[], _sdata[], _edata[];
extern char __bss_start[], __bss_stop[];
@@ -18,6 +20,8 @@ extern char __start_rodata[], __end_rodata[];
/* Start and end of .ctors section - used for constructor calls. */
extern char __ctors_start[], __ctors_end[];
+extern __visible const void __nosave_begin, __nosave_end;
+
/* function descriptor handling (if any). Override
* in asm/sections.h */
#ifndef dereference_function_descriptor
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index fa1abeb45b7..49c48dda162 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -170,7 +170,9 @@ enum rq_flag_bits {
__REQ_ELVPRIV, /* elevator private data attached */
__REQ_FAILED, /* set if the request failed */
__REQ_QUIET, /* don't worry about errors */
- __REQ_PREEMPT, /* set for "ide_preempt" requests */
+ __REQ_PREEMPT, /* set for "ide_preempt" requests and also
+ for requests for which the SCSI "quiesce"
+ state must be ignored. */
__REQ_ALLOCED, /* request came from our alloc pool */
__REQ_COPY_USER, /* contains copies of user pages */
__REQ_FLUSH_SEQ, /* request for flush sequence */
diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h
index 0442c3d800f..a6ef9cc267e 100644
--- a/include/linux/ceph/decode.h
+++ b/include/linux/ceph/decode.h
@@ -8,23 +8,6 @@
#include <linux/ceph/types.h>
-/* This seemed to be the easiest place to define these */
-
-#define U8_MAX ((u8)(~0U))
-#define U16_MAX ((u16)(~0U))
-#define U32_MAX ((u32)(~0U))
-#define U64_MAX ((u64)(~0ULL))
-
-#define S8_MAX ((s8)(U8_MAX >> 1))
-#define S16_MAX ((s16)(U16_MAX >> 1))
-#define S32_MAX ((s32)(U32_MAX >> 1))
-#define S64_MAX ((s64)(U64_MAX >> 1LL))
-
-#define S8_MIN ((s8)(-S8_MAX - 1))
-#define S16_MIN ((s16)(-S16_MAX - 1))
-#define S32_MIN ((s32)(-S32_MAX - 1))
-#define S64_MIN ((s64)(-S64_MAX - 1LL))
-
/*
* in all cases,
* void **p pointer to position pointer
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 3bf2bc41e41..a482811399a 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -120,26 +120,46 @@ enum {
{ .notifier_call = fn, .priority = pri }; \
register_cpu_notifier(&fn##_nb); \
}
+
+#define __cpu_notifier(fn, pri) { \
+ static struct notifier_block fn##_nb = \
+ { .notifier_call = fn, .priority = pri }; \
+ __register_cpu_notifier(&fn##_nb); \
+}
#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
+#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
+
#ifdef CONFIG_HOTPLUG_CPU
extern int register_cpu_notifier(struct notifier_block *nb);
+extern int __register_cpu_notifier(struct notifier_block *nb);
extern void unregister_cpu_notifier(struct notifier_block *nb);
+extern void __unregister_cpu_notifier(struct notifier_block *nb);
#else
#ifndef MODULE
extern int register_cpu_notifier(struct notifier_block *nb);
+extern int __register_cpu_notifier(struct notifier_block *nb);
#else
static inline int register_cpu_notifier(struct notifier_block *nb)
{
return 0;
}
+
+static inline int __register_cpu_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
#endif
static inline void unregister_cpu_notifier(struct notifier_block *nb)
{
}
+
+static inline void __unregister_cpu_notifier(struct notifier_block *nb)
+{
+}
#endif
int cpu_up(unsigned int cpu);
@@ -147,19 +167,32 @@ void notify_cpu_starting(unsigned int cpu);
extern void cpu_maps_update_begin(void);
extern void cpu_maps_update_done(void);
+#define cpu_notifier_register_begin cpu_maps_update_begin
+#define cpu_notifier_register_done cpu_maps_update_done
+
#else /* CONFIG_SMP */
#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
+#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
static inline int register_cpu_notifier(struct notifier_block *nb)
{
return 0;
}
+static inline int __register_cpu_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
static inline void unregister_cpu_notifier(struct notifier_block *nb)
{
}
+static inline void __unregister_cpu_notifier(struct notifier_block *nb)
+{
+}
+
static inline void cpu_maps_update_begin(void)
{
}
@@ -168,6 +201,14 @@ static inline void cpu_maps_update_done(void)
{
}
+static inline void cpu_notifier_register_begin(void)
+{
+}
+
+static inline void cpu_notifier_register_done(void)
+{
+}
+
#endif /* CONFIG_SMP */
extern struct bus_type cpu_subsys;
@@ -179,8 +220,11 @@ extern void put_online_cpus(void);
extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void);
#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
+#define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri)
#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
+#define __register_hotcpu_notifier(nb) __register_cpu_notifier(nb)
#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
+#define __unregister_hotcpu_notifier(nb) __unregister_cpu_notifier(nb)
void clear_tasks_mm_cpumask(int cpu);
int cpu_down(unsigned int cpu);
@@ -204,9 +248,12 @@ static inline void cpu_hotplug_driver_unlock(void)
#define cpu_hotplug_disable() do { } while (0)
#define cpu_hotplug_enable() do { } while (0)
#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
+#define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
/* These aren't inline functions due to a GCC bug. */
#define register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
+#define __register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
#define unregister_hotcpu_notifier(nb) ({ (void)(nb); })
+#define __unregister_hotcpu_notifier(nb) ({ (void)(nb); })
#endif /* CONFIG_HOTPLUG_CPU */
#ifdef CONFIG_PM_SLEEP_SMP
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 9be5ac960fd..c1999d1fe6f 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -120,15 +120,15 @@ struct dentry {
void *d_fsdata; /* fs-specific data */
struct list_head d_lru; /* LRU list */
+ struct list_head d_child; /* child of parent list */
+ struct list_head d_subdirs; /* our children */
/*
- * d_child and d_rcu can share memory
+ * d_alias and d_rcu can share memory
*/
union {
- struct list_head d_child; /* child of parent list */
+ struct hlist_node d_alias; /* inode alias list */
struct rcu_head d_rcu;
} d_u;
- struct list_head d_subdirs; /* our children */
- struct hlist_node d_alias; /* inode alias list */
};
/*
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index eabc56d0f82..5ff503bb94f 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -29,6 +29,19 @@
#define ULLONG_MAX (~0ULL)
#define SIZE_MAX (~(size_t)0)
+#define U8_MAX ((u8)~0U)
+#define S8_MAX ((s8)(U8_MAX>>1))
+#define S8_MIN ((s8)(-S8_MAX - 1))
+#define U16_MAX ((u16)~0U)
+#define S16_MAX ((s16)(U16_MAX>>1))
+#define S16_MIN ((s16)(-S16_MAX - 1))
+#define U32_MAX ((u32)~0U)
+#define S32_MAX ((s32)(U32_MAX>>1))
+#define S32_MIN ((s32)(-S32_MAX - 1))
+#define U64_MAX ((u64)~0ULL)
+#define S64_MAX ((s64)(U64_MAX>>1))
+#define S64_MIN ((s64)(-S64_MAX - 1))
+
#define STACK_MAGIC 0xdeadbeef
#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ab40149553d..d06441c10e3 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -896,6 +896,7 @@ static inline int page_mapped(struct page *page)
#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */
#define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */
#define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */
+#define VM_FAULT_SIGSEGV 0x0040
#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */
#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
@@ -903,8 +904,8 @@ static inline int page_mapped(struct page *page)
#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
-#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
- VM_FAULT_HWPOISON_LARGE)
+#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
+ VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)
/* Encode hstate index for a hwpoisoned large page */
#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
diff --git a/include/linux/usb.h b/include/linux/usb.h
index a0bee5a28d1..28bd3a898cb 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -206,6 +206,32 @@ void usb_put_intf(struct usb_interface *intf);
#define USB_MAXINTERFACES 32
#define USB_MAXIADS (USB_MAXINTERFACES/2)
+/*
+ * USB Resume Timer: Every Host controller driver should drive the resume
+ * signalling on the bus for the amount of time defined by this macro.
+ *
+ * That way we will have a 'stable' behavior among all HCDs supported by Linux.
+ *
+ * Note that the USB Specification states we should drive resume for *at least*
+ * 20 ms, but it doesn't give an upper bound. This creates two possible
+ * situations which we want to avoid:
+ *
+ * (a) sometimes an msleep(20) might expire slightly before 20 ms, which causes
+ * us to fail USB Electrical Tests, thus failing Certification
+ *
+ * (b) Some (many) devices actually need more than 20 ms of resume signalling,
+ * and while we can argue that's against the USB Specification, we don't have
+ * control over which devices a certification laboratory will be using for
+ * certification. If CertLab uses a device which was tested against Windows and
+ * that happens to have relaxed resume signalling rules, we might fall into
+ * situations where we fail interoperability and electrical tests.
+ *
+ * In order to avoid both conditions, we're using a 40 ms resume timeout, which
+ * should cope with both LPJ calibration errors and devices not following every
+ * detail of the USB Specification.
+ */
+#define USB_RESUME_TIMEOUT 40 /* ms */
+
/**
* struct usb_interface_cache - long-term representation of a device interface
* @num_altsetting: number of altsettings defined.
diff --git a/include/linux/zpool.h b/include/linux/zpool.h
new file mode 100644
index 00000000000..f14bd75f08b
--- /dev/null
+++ b/include/linux/zpool.h
@@ -0,0 +1,106 @@
+/*
+ * zpool memory storage api
+ *
+ * Copyright (C) 2014 Dan Streetman
+ *
+ * This is a common frontend for the zbud and zsmalloc memory
+ * storage pool implementations. Typically, this is used to
+ * store compressed memory.
+ */
+
+#ifndef _ZPOOL_H_
+#define _ZPOOL_H_
+
+struct zpool;
+
+struct zpool_ops {
+ int (*evict)(struct zpool *pool, unsigned long handle);
+};
+
+/*
+ * Control how a handle is mapped. It will be ignored if the
+ * implementation does not support it. Its use is optional.
+ * Note that this does not refer to memory protection, it
+ * refers to how the memory will be copied in/out if copying
+ * is necessary during mapping; read-write is the safest as
+ * it copies the existing memory in on map, and copies the
+ * changed memory back out on unmap. Write-only does not copy
+ * in the memory and should only be used for initialization.
+ * If in doubt, use ZPOOL_MM_DEFAULT which is read-write.
+ */
+enum zpool_mapmode {
+ ZPOOL_MM_RW, /* normal read-write mapping */
+ ZPOOL_MM_RO, /* read-only (no copy-out at unmap time) */
+ ZPOOL_MM_WO, /* write-only (no copy-in at map time) */
+
+ ZPOOL_MM_DEFAULT = ZPOOL_MM_RW
+};
+
+struct zpool *zpool_create_pool(char *type, gfp_t gfp, struct zpool_ops *ops);
+
+char *zpool_get_type(struct zpool *pool);
+
+void zpool_destroy_pool(struct zpool *pool);
+
+int zpool_malloc(struct zpool *pool, size_t size, gfp_t gfp,
+ unsigned long *handle);
+
+void zpool_free(struct zpool *pool, unsigned long handle);
+
+int zpool_shrink(struct zpool *pool, unsigned int pages,
+ unsigned int *reclaimed);
+
+void *zpool_map_handle(struct zpool *pool, unsigned long handle,
+ enum zpool_mapmode mm);
+
+void zpool_unmap_handle(struct zpool *pool, unsigned long handle);
+
+u64 zpool_get_total_size(struct zpool *pool);
+
+
+/**
+ * struct zpool_driver - driver implementation for zpool
+ * @type: name of the driver.
+ * @list: entry in the list of zpool drivers.
+ * @create: create a new pool.
+ * @destroy: destroy a pool.
+ * @malloc: allocate mem from a pool.
+ * @free: free mem from a pool.
+ * @shrink: shrink the pool.
+ * @map: map a handle.
+ * @unmap: unmap a handle.
+ * @total_size: get total size of a pool.
+ *
+ * This is created by a zpool implementation and registered
+ * with zpool.
+ */
+struct zpool_driver {
+ char *type;
+ struct module *owner;
+ atomic_t refcount;
+ struct list_head list;
+
+ void *(*create)(gfp_t gfp, struct zpool_ops *ops);
+ void (*destroy)(void *pool);
+
+ int (*malloc)(void *pool, size_t size, gfp_t gfp,
+ unsigned long *handle);
+ void (*free)(void *pool, unsigned long handle);
+
+ int (*shrink)(void *pool, unsigned int pages,
+ unsigned int *reclaimed);
+
+ void *(*map)(void *pool, unsigned long handle,
+ enum zpool_mapmode mm);
+ void (*unmap)(void *pool, unsigned long handle);
+
+ u64 (*total_size)(void *pool);
+};
+
+void zpool_register_driver(struct zpool_driver *driver);
+
+int zpool_unregister_driver(struct zpool_driver *driver);
+
+int zpool_evict(void *pool, unsigned long handle);
+
+#endif
diff --git a/drivers/staging/zsmalloc/zsmalloc.h b/include/linux/zsmalloc.h
index 46dbd0558d8..ebb2841f752 100644
--- a/drivers/staging/zsmalloc/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -2,6 +2,7 @@
* zsmalloc memory allocator
*
* Copyright (C) 2011 Nitin Gupta
+ * Copyright (C) 2012, 2013 Minchan Kim
*
* This code is released using a dual license strategy: BSD/GPL
* You can choose the license that better fits your requirements.
@@ -38,6 +39,6 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
enum zs_mapmode mm);
void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
-u64 zs_get_total_size_bytes(struct zs_pool *pool);
+unsigned long zs_get_total_pages(struct zs_pool *pool);
#endif
diff --git a/ipc/compat.c b/ipc/compat.c
index 892f6585dd6..d3b376025e9 100644
--- a/ipc/compat.c
+++ b/ipc/compat.c
@@ -381,7 +381,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
uptr = compat_ptr(ipck.msgp);
fifth = ipck.msgtyp;
}
- return do_msgrcv(first, uptr, second, fifth, third,
+ return do_msgrcv(first, uptr, second, (s32)fifth, third,
compat_do_msg_fill);
}
case MSGGET:
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index e646e870ec5..fcf0aeff45b 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -984,7 +984,7 @@ static void cgroup_d_remove_dir(struct dentry *dentry)
parent = dentry->d_parent;
spin_lock(&parent->d_lock);
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
- list_del_init(&dentry->d_u.d_child);
+ list_del_init(&dentry->d_child);
spin_unlock(&dentry->d_lock);
spin_unlock(&parent->d_lock);
remove_dir(dentry);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 7d4755634d3..f1cfa7367a9 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -27,18 +27,23 @@
static DEFINE_MUTEX(cpu_add_remove_lock);
/*
- * The following two API's must be used when attempting
- * to serialize the updates to cpu_online_mask, cpu_present_mask.
+ * The following two APIs (cpu_maps_update_begin/done) must be used when
+ * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
+ * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
+ * hotplug callback (un)registration performed using __register_cpu_notifier()
+ * or __unregister_cpu_notifier().
*/
void cpu_maps_update_begin(void)
{
mutex_lock(&cpu_add_remove_lock);
}
+EXPORT_SYMBOL(cpu_notifier_register_begin);
void cpu_maps_update_done(void)
{
mutex_unlock(&cpu_add_remove_lock);
}
+EXPORT_SYMBOL(cpu_notifier_register_done);
static RAW_NOTIFIER_HEAD(cpu_chain);
@@ -169,6 +174,11 @@ int __ref register_cpu_notifier(struct notifier_block *nb)
return ret;
}
+int __ref __register_cpu_notifier(struct notifier_block *nb)
+{
+ return raw_notifier_chain_register(&cpu_chain, nb);
+}
+
static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
int *nr_calls)
{
@@ -192,6 +202,7 @@ static void cpu_notify_nofail(unsigned long val, void *v)
BUG_ON(cpu_notify(val, v));
}
EXPORT_SYMBOL(register_cpu_notifier);
+EXPORT_SYMBOL(__register_cpu_notifier);
void __ref unregister_cpu_notifier(struct notifier_block *nb)
{
@@ -201,6 +212,12 @@ void __ref unregister_cpu_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL(unregister_cpu_notifier);
+void __ref __unregister_cpu_notifier(struct notifier_block *nb)
+{
+ raw_notifier_chain_unregister(&cpu_chain, nb);
+}
+EXPORT_SYMBOL(__unregister_cpu_notifier);
+
/**
* clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
* @cpu: a CPU id
diff --git a/kernel/printk.c b/kernel/printk.c
index f7aff4bd545..fd0154a57d6 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -107,7 +107,7 @@ static struct console *exclusive_console;
*/
struct console_cmdline
{
- char name[8]; /* Name of the driver */
+ char name[16]; /* Name of the driver */
int index; /* Minor dev. to use */
char *options; /* Options for the driver */
#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
@@ -2290,6 +2290,8 @@ void register_console(struct console *newcon)
*/
for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0];
i++) {
+ BUILD_BUG_ON(sizeof(console_cmdline[i].name) !=
+ sizeof(newcon->name));
if (strcmp(console_cmdline[i].name, newcon->name) != 0)
continue;
if (newcon->index >= 0 &&
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index afadcf7b4a2..118323bc852 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -720,6 +720,8 @@ static int ptrace_peek_siginfo(struct task_struct *child,
static int ptrace_resume(struct task_struct *child, long request,
unsigned long data)
{
+ bool need_siglock;
+
if (!valid_signal(data))
return -EIO;
@@ -747,8 +749,26 @@ static int ptrace_resume(struct task_struct *child, long request,
user_disable_single_step(child);
}
+ /*
+ * Change ->exit_code and ->state under siglock to avoid the race
+ * with wait_task_stopped() in between; a non-zero ->exit_code will
+ * wrongly look like another report from tracee.
+ *
+ * Note that we need siglock even if ->exit_code == data and/or this
+ * status was not reported yet, the new status must not be cleared by
+ * wait_task_stopped() after resume.
+ *
+ * If data == 0 we do not care if wait_task_stopped() reports the old
+ * status and clears the code too; this can't race with the tracee, it
+ * takes siglock after resume.
+ */
+ need_siglock = data && !thread_group_empty(current);
+ if (need_siglock)
+ spin_lock_irq(&child->sighand->siglock);
child->exit_code = data;
wake_up_state(child, __TASK_TRACED);
+ if (need_siglock)
+ spin_unlock_irq(&child->sighand->siglock);
return 0;
}
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 787b3a03242..21956f00cb5 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -774,9 +774,13 @@ static void run_ksoftirqd(unsigned int cpu)
local_irq_disable();
if (local_softirq_pending()) {
__do_softirq();
- rcu_note_context_switch(cpu);
local_irq_enable();
cond_resched();
+
+ preempt_disable();
+ rcu_note_context_switch(cpu);
+ preempt_enable();
+
return;
}
local_irq_enable();
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 3d9fee3a80b..ab21b8c6653 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2650,7 +2650,7 @@ static DEFINE_PER_CPU(unsigned int, current_context);
static __always_inline int trace_recursive_lock(void)
{
- unsigned int val = this_cpu_read(current_context);
+ unsigned int val = __this_cpu_read(current_context);
int bit;
if (in_interrupt()) {
@@ -2667,18 +2667,17 @@ static __always_inline int trace_recursive_lock(void)
return 1;
val |= (1 << bit);
- this_cpu_write(current_context, val);
+ __this_cpu_write(current_context, val);
return 0;
}
static __always_inline void trace_recursive_unlock(void)
{
- unsigned int val = this_cpu_read(current_context);
+ unsigned int val = __this_cpu_read(current_context);
- val--;
- val &= this_cpu_read(current_context);
- this_cpu_write(current_context, val);
+ val &= val & (val - 1);
+ __this_cpu_write(current_context, val);
}
#else
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 88e8f9057d1..249e32b5aa0 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -6154,7 +6154,7 @@ static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t m
int ret;
/* Paranoid: Make sure the parent is the "instances" directory */
- parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
+ parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
if (WARN_ON_ONCE(parent != trace_instance_dir))
return -ENOENT;
@@ -6181,7 +6181,7 @@ static int instance_rmdir(struct inode *inode, struct dentry *dentry)
int ret;
/* Paranoid: Make sure the parent is the "instances" directory */
- parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
+ parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
if (WARN_ON_ONCE(parent != trace_instance_dir))
return -ENOENT;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 001b349af93..5a898f15bfc 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -425,7 +425,7 @@ static void remove_event_file_dir(struct ftrace_event_file *file)
if (dir) {
spin_lock(&dir->d_lock); /* probably unneeded */
- list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) {
+ list_for_each_entry(child, &dir->d_subdirs, d_child) {
if (child->d_inode) /* probably unneeded */
child->d_inode->i_private = NULL;
}
diff --git a/lib/string.c b/lib/string.c
index 43d0781daf4..cb9ea218155 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -598,7 +598,7 @@ EXPORT_SYMBOL(memset);
void memzero_explicit(void *s, size_t count)
{
memset(s, 0, count);
- OPTIMIZER_HIDE_VAR(s);
+ barrier();
}
EXPORT_SYMBOL(memzero_explicit);
diff --git a/mm/Kconfig b/mm/Kconfig
index b2d1aed5643..7c13c134f64 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -504,3 +504,35 @@ config CMA_DEBUG
messages for every CMA call as well as various messages while
processing calls such as dma_alloc_from_contiguous().
This option does not affect warning and error messages.
+
+config ZPOOL
+ tristate "Common API for compressed memory storage"
+ default n
+ help
+ Compressed memory storage API. This allows using either zbud or
+ zsmalloc.
+
+config ZSMALLOC
+ bool "Memory allocator for compressed pages"
+ depends on MMU
+ default n
+ help
+ zsmalloc is a slab-based memory allocator designed to store
+ compressed RAM pages. zsmalloc uses virtual memory mapping
+ in order to reduce fragmentation. However, this results in a
+ non-standard allocator interface where a handle, not a pointer, is
+ returned by an alloc(). This handle must be mapped in order to
+ access the allocated space.
+
+config PGTABLE_MAPPING
+ bool "Use page table mapping to access object in zsmalloc"
+ depends on ZSMALLOC
+ help
+ By default, zsmalloc uses a copy-based object mapping method to
+ access allocations that span two pages. However, if a particular
+ architecture (ex, ARM) performs VM mapping faster than copying,
+ then you should select this. This causes zsmalloc to use page table
+ mapping rather than copying for object mapping.
+
+ You can check speed with zsmalloc benchmark[1].
+ [1] https://github.com/spartacus06/zsmalloc
diff --git a/mm/Makefile b/mm/Makefile
index 89244cb9622..0fa6442b4d4 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -59,3 +59,5 @@ obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o
obj-$(CONFIG_CLEANCACHE) += cleancache.o
obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o
obj-$(CONFIG_GENERIC_EARLY_IOREMAP) += early_ioremap.o
+obj-$(CONFIG_ZPOOL) += zpool.o
+obj-$(CONFIG_ZSMALLOC) += zsmalloc.o
diff --git a/mm/ksm.c b/mm/ksm.c
index 784d1e4bc38..7bf748f30aa 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -376,7 +376,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
else
ret = VM_FAULT_WRITE;
put_page(page);
- } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM)));
+ } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));
/*
* We must loop because handle_mm_fault() may back out if there's
* any difficulty e.g. if pte accessed bit gets updated concurrently.
diff --git a/mm/memory.c b/mm/memory.c
index 280930f51ef..7c86434d9cf 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1846,7 +1846,8 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
else
return -EFAULT;
}
- if (ret & VM_FAULT_SIGBUS)
+ if (ret & (VM_FAULT_SIGBUS |
+ VM_FAULT_SIGSEGV))
return i ? i : -EFAULT;
BUG();
}
@@ -1956,7 +1957,7 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
return -ENOMEM;
if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
return -EHWPOISON;
- if (ret & VM_FAULT_SIGBUS)
+ if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
return -EFAULT;
BUG();
}
@@ -3233,7 +3234,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
/* Check if we need to add a guard page to the stack */
if (check_stack_guard_page(vma, address) < 0)
- return VM_FAULT_SIGBUS;
+ return VM_FAULT_SIGSEGV;
/* Use the zero-page for reads */
if (!(flags & FAULT_FLAG_WRITE)) {
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 1ad92b46753..2298237db14 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1039,6 +1039,10 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
return NULL;
arch_refresh_nodedata(nid, pgdat);
+ } else {
+ /* Reset the nr_zones and classzone_idx to 0 before reuse */
+ pgdat->nr_zones = 0;
+ pgdat->classzone_idx = 0;
}
/* we can use NODE_DATA(nid) from here */
@@ -1802,15 +1806,6 @@ void try_offline_node(int nid)
if (is_vmalloc_addr(zone->wait_table))
vfree(zone->wait_table);
}
-
- /*
- * Since there is no way to guarentee the address of pgdat/zone is not
- * on stack of any kernel threads or used by other kernel objects
- * without reference counting or other symchronizing method, do not
- * reset node_data and free pgdat here. Just reset it to 0 and reuse
- * the memory when the node is online again.
- */
- memset(pgdat, 0, sizeof(*pgdat));
}
EXPORT_SYMBOL(try_offline_node);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 73cbc5dc150..b034f79deb0 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -793,8 +793,11 @@ static void bdi_update_write_bandwidth(struct backing_dev_info *bdi,
* bw * elapsed + write_bandwidth * (period - elapsed)
* write_bandwidth = ---------------------------------------------------
* period
+ *
+ * @written may have decreased due to account_page_redirty().
+ * Avoid underflowing @bw calculation.
*/
- bw = written - bdi->written_stamp;
+ bw = written - min(written, bdi->written_stamp);
bw *= HZ;
if (unlikely(elapsed > period)) {
do_div(bw, elapsed);
@@ -858,7 +861,7 @@ static void global_update_bandwidth(unsigned long thresh,
unsigned long now)
{
static DEFINE_SPINLOCK(dirty_lock);
- static unsigned long update_time;
+ static unsigned long update_time = INITIAL_JIFFIES;
/*
* check locklessly first to optimize away locking for the most time
diff --git a/mm/zpool.c b/mm/zpool.c
new file mode 100644
index 00000000000..739cdf0d183
--- /dev/null
+++ b/mm/zpool.c
@@ -0,0 +1,364 @@
+/*
+ * zpool memory storage api
+ *
+ * Copyright (C) 2014 Dan Streetman
+ *
+ * This is a common frontend for memory storage pool implementations.
+ * Typically, this is used to store compressed memory.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/zpool.h>
+
+struct zpool {
+ char *type;
+
+ struct zpool_driver *driver;
+ void *pool;
+ struct zpool_ops *ops;
+
+ struct list_head list;
+};
+
+static LIST_HEAD(drivers_head);
+static DEFINE_SPINLOCK(drivers_lock);
+
+static LIST_HEAD(pools_head);
+static DEFINE_SPINLOCK(pools_lock);
+
+/**
+ * zpool_register_driver() - register a zpool implementation.
+ * @driver: driver to register
+ */
+void zpool_register_driver(struct zpool_driver *driver)
+{
+ spin_lock(&drivers_lock);
+ atomic_set(&driver->refcount, 0);
+ list_add(&driver->list, &drivers_head);
+ spin_unlock(&drivers_lock);
+}
+EXPORT_SYMBOL(zpool_register_driver);
+
+/**
+ * zpool_unregister_driver() - unregister a zpool implementation.
+ * @driver: driver to unregister.
+ *
+ * Module usage counting is used to prevent using a driver
+ * while/after unloading, so if this is called from module
+ * exit function, this should never fail; if called from
+ * other than the module exit function, and this returns
+ * failure, the driver is in use and must remain available.
+ */
+int zpool_unregister_driver(struct zpool_driver *driver)
+{
+ int ret = 0, refcount;
+
+ spin_lock(&drivers_lock);
+ refcount = atomic_read(&driver->refcount);
+ WARN_ON(refcount < 0);
+ if (refcount > 0)
+ ret = -EBUSY;
+ else
+ list_del(&driver->list);
+ spin_unlock(&drivers_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(zpool_unregister_driver);
+
+/**
+ * zpool_evict() - evict callback from a zpool implementation.
+ * @pool: pool to evict from.
+ * @handle: handle to evict.
+ *
+ * This can be used by zpool implementations to call the
+ * user's evict zpool_ops struct evict callback.
+ */
+int zpool_evict(void *pool, unsigned long handle)
+{
+ struct zpool *zpool;
+
+ spin_lock(&pools_lock);
+ list_for_each_entry(zpool, &pools_head, list) {
+ if (zpool->pool == pool) {
+ spin_unlock(&pools_lock);
+ if (!zpool->ops || !zpool->ops->evict)
+ return -EINVAL;
+ return zpool->ops->evict(zpool, handle);
+ }
+ }
+ spin_unlock(&pools_lock);
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(zpool_evict);
+
+static struct zpool_driver *zpool_get_driver(char *type)
+{
+ struct zpool_driver *driver;
+
+ spin_lock(&drivers_lock);
+ list_for_each_entry(driver, &drivers_head, list) {
+ if (!strcmp(driver->type, type)) {
+ bool got = try_module_get(driver->owner);
+
+ if (got)
+ atomic_inc(&driver->refcount);
+ spin_unlock(&drivers_lock);
+ return got ? driver : NULL;
+ }
+ }
+
+ spin_unlock(&drivers_lock);
+ return NULL;
+}
+
+static void zpool_put_driver(struct zpool_driver *driver)
+{
+ atomic_dec(&driver->refcount);
+ module_put(driver->owner);
+}
+
+/**
+ * zpool_create_pool() - Create a new zpool
+ * @type The type of the zpool to create (e.g. zbud, zsmalloc)
+ * @gfp The GFP flags to use when allocating the pool.
+ * @ops The optional ops callback.
+ *
+ * This creates a new zpool of the specified type. The gfp flags will be
+ * used when allocating memory, if the implementation supports it. If the
+ * ops param is NULL, then the created zpool will not be shrinkable.
+ *
+ * Implementations must guarantee this to be thread-safe.
+ *
+ * Returns: New zpool on success, NULL on failure.
+ */
+struct zpool *zpool_create_pool(char *type, gfp_t gfp, struct zpool_ops *ops)
+{
+ struct zpool_driver *driver;
+ struct zpool *zpool;
+
+ pr_info("creating pool type %s\n", type);
+
+ driver = zpool_get_driver(type);
+
+ if (!driver) {
+ request_module("zpool-%s", type);
+ driver = zpool_get_driver(type);
+ }
+
+ if (!driver) {
+ pr_err("no driver for type %s\n", type);
+ return NULL;
+ }
+
+ zpool = kmalloc(sizeof(*zpool), gfp);
+ if (!zpool) {
+ pr_err("couldn't create zpool - out of memory\n");
+ zpool_put_driver(driver);
+ return NULL;
+ }
+
+ zpool->type = driver->type;
+ zpool->driver = driver;
+ zpool->pool = driver->create(gfp, ops);
+ zpool->ops = ops;
+
+ if (!zpool->pool) {
+ pr_err("couldn't create %s pool\n", type);
+ zpool_put_driver(driver);
+ kfree(zpool);
+ return NULL;
+ }
+
+ pr_info("created %s pool\n", type);
+
+ spin_lock(&pools_lock);
+ list_add(&zpool->list, &pools_head);
+ spin_unlock(&pools_lock);
+
+ return zpool;
+}
+
+/**
+ * zpool_destroy_pool() - Destroy a zpool
+ * @pool The zpool to destroy.
+ *
+ * Implementations must guarantee this to be thread-safe,
+ * however only when destroying different pools. The same
+ * pool should only be destroyed once, and should not be used
+ * after it is destroyed.
+ *
+ * This destroys an existing zpool. The zpool should not be in use.
+ */
+void zpool_destroy_pool(struct zpool *zpool)
+{
+ pr_info("destroying pool type %s\n", zpool->type);
+
+ spin_lock(&pools_lock);
+ list_del(&zpool->list);
+ spin_unlock(&pools_lock);
+ zpool->driver->destroy(zpool->pool);
+ zpool_put_driver(zpool->driver);
+ kfree(zpool);
+}
+
+/**
+ * zpool_get_type() - Get the type of the zpool
+ * @pool The zpool to check
+ *
+ * This returns the type of the pool.
+ *
+ * Implementations must guarantee this to be thread-safe.
+ *
+ * Returns: The type of zpool.
+ */
+char *zpool_get_type(struct zpool *zpool)
+{
+ return zpool->type;
+}
+
+/**
+ * zpool_malloc() - Allocate memory
+ * @pool The zpool to allocate from.
+ * @size The amount of memory to allocate.
+ * @gfp The GFP flags to use when allocating memory.
+ * @handle Pointer to the handle to set
+ *
+ * This allocates the requested amount of memory from the pool.
+ * The gfp flags will be used when allocating memory, if the
+ * implementation supports it. The provided @handle will be
+ * set to the allocated object handle.
+ *
+ * Implementations must guarantee this to be thread-safe.
+ *
+ * Returns: 0 on success, negative value on error.
+ */
+int zpool_malloc(struct zpool *zpool, size_t size, gfp_t gfp,
+ unsigned long *handle)
+{
+ return zpool->driver->malloc(zpool->pool, size, gfp, handle);
+}
+
+/**
+ * zpool_free() - Free previously allocated memory
+ * @pool The zpool that allocated the memory.
+ * @handle The handle to the memory to free.
+ *
+ * This frees previously allocated memory. This does not guarantee
+ * that the pool will actually free memory, only that the memory
+ * in the pool will become available for use by the pool.
+ *
+ * Implementations must guarantee this to be thread-safe,
+ * however only when freeing different handles. The same
+ * handle should only be freed once, and should not be used
+ * after freeing.
+ */
+void zpool_free(struct zpool *zpool, unsigned long handle)
+{
+ zpool->driver->free(zpool->pool, handle);
+}
+
+/**
+ * zpool_shrink() - Shrink the pool size
+ * @pool The zpool to shrink.
+ * @pages The number of pages to shrink the pool.
+ * @reclaimed The number of pages successfully evicted.
+ *
+ * This attempts to shrink the actual memory size of the pool
+ * by evicting currently used handle(s). If the pool was
+ * created with no zpool_ops, or the evict call fails for any
+ * of the handles, this will fail. If non-NULL, the @reclaimed
+ * parameter will be set to the number of pages reclaimed,
+ * which may be more than the number of pages requested.
+ *
+ * Implementations must guarantee this to be thread-safe.
+ *
+ * Returns: 0 on success, negative value on error/failure.
+ */
+int zpool_shrink(struct zpool *zpool, unsigned int pages,
+ unsigned int *reclaimed)
+{
+ return zpool->driver->shrink(zpool->pool, pages, reclaimed);
+}
+
+/**
+ * zpool_map_handle() - Map a previously allocated handle into memory
+ * @pool The zpool that the handle was allocated from
+ * @handle The handle to map
+ * @mm How the memory should be mapped
+ *
+ * This maps a previously allocated handle into memory. The @mm
+ * param indicates to the implementation how the memory will be
+ * used, i.e. read-only, write-only, read-write. If the
+ * implementation does not support it, the memory will be treated
+ * as read-write.
+ *
+ * This may hold locks, disable interrupts, and/or preemption,
+ * and the zpool_unmap_handle() must be called to undo those
+ * actions. The code that uses the mapped handle should complete
+ * its operatons on the mapped handle memory quickly and unmap
+ * as soon as possible. As the implementation may use per-cpu
+ * data, multiple handles should not be mapped concurrently on
+ * any cpu.
+ *
+ * Returns: A pointer to the handle's mapped memory area.
+ */
+void *zpool_map_handle(struct zpool *zpool, unsigned long handle,
+ enum zpool_mapmode mapmode)
+{
+ return zpool->driver->map(zpool->pool, handle, mapmode);
+}
+
+/**
+ * zpool_unmap_handle() - Unmap a previously mapped handle
+ * @pool The zpool that the handle was allocated from
+ * @handle The handle to unmap
+ *
+ * This unmaps a previously mapped handle. Any locks or other
+ * actions that the implementation took in zpool_map_handle()
+ * will be undone here. The memory area returned from
+ * zpool_map_handle() should no longer be used after this.
+ */
+void zpool_unmap_handle(struct zpool *zpool, unsigned long handle)
+{
+ zpool->driver->unmap(zpool->pool, handle);
+}
+
+/**
+ * zpool_get_total_size() - The total size of the pool
+ * @pool The zpool to check
+ *
+ * This returns the total size in bytes of the pool.
+ *
+ * Returns: Total size of the zpool in bytes.
+ */
+u64 zpool_get_total_size(struct zpool *zpool)
+{
+ return zpool->driver->total_size(zpool->pool);
+}
+
+static int __init init_zpool(void)
+{
+ pr_info("loaded\n");
+ return 0;
+}
+
+static void __exit exit_zpool(void)
+{
+ pr_info("unloaded\n");
+}
+
+module_init(init_zpool);
+module_exit(exit_zpool);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
+MODULE_DESCRIPTION("Common API for compressed memory storage");
diff --git a/drivers/staging/zsmalloc/zsmalloc-main.c b/mm/zsmalloc.c
index 288f58252a1..7031e12fcf2 100644
--- a/drivers/staging/zsmalloc/zsmalloc-main.c
+++ b/mm/zsmalloc.c
@@ -2,6 +2,7 @@
* zsmalloc memory allocator
*
* Copyright (C) 2011 Nitin Gupta
+ * Copyright (C) 2012, 2013 Minchan Kim
*
* This code is released using a dual license strategy: BSD/GPL
* You can choose the license that better fits your requirements.
@@ -78,8 +79,8 @@
#include <linux/hardirq.h>
#include <linux/spinlock.h>
#include <linux/types.h>
-
-#include "zsmalloc.h"
+#include <linux/zsmalloc.h>
+#include <linux/zpool.h>
/*
* This must be power of 2 and greater than of equal to sizeof(link_free).
@@ -129,7 +130,7 @@
#define ZS_MAX_ALLOC_SIZE PAGE_SIZE
/*
- * On systems with 4K page size, this gives 254 size classes! There is a
+ * On systems with 4K page size, this gives 255 size classes! There is a
* trader-off here:
* - Large number of size classes is potentially wasteful as free page are
* spread across these classes
@@ -162,7 +163,7 @@ enum fullness_group {
* n <= N / f, where
* n = number of allocated objects
* N = total number of objects zspage can store
- * f = 1/fullness_threshold_frac
+ * f = fullness_threshold_frac
*
* Similarly, we assign zspage to:
* ZS_ALMOST_FULL when n > N / f
@@ -186,9 +187,6 @@ struct size_class {
spinlock_t lock;
- /* stats */
- u64 pages_allocated;
-
struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS];
};
@@ -204,9 +202,10 @@ struct link_free {
};
struct zs_pool {
- struct size_class size_class[ZS_SIZE_CLASSES];
+ struct size_class *size_class[ZS_SIZE_CLASSES];
gfp_t flags; /* allocation flags used when growing pool */
+ atomic_long_t pages_allocated;
};
/*
@@ -239,6 +238,82 @@ struct mapping_area {
enum zs_mapmode vm_mm; /* mapping mode */
};
+/* zpool driver */
+
+#ifdef CONFIG_ZPOOL
+
+static void *zs_zpool_create(gfp_t gfp, struct zpool_ops *zpool_ops)
+{
+ return zs_create_pool(gfp);
+}
+
+static void zs_zpool_destroy(void *pool)
+{
+ zs_destroy_pool(pool);
+}
+
+static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp,
+ unsigned long *handle)
+{
+ *handle = zs_malloc(pool, size);
+ return *handle ? 0 : -1;
+}
+static void zs_zpool_free(void *pool, unsigned long handle)
+{
+ zs_free(pool, handle);
+}
+
+static int zs_zpool_shrink(void *pool, unsigned int pages,
+ unsigned int *reclaimed)
+{
+ return -EINVAL;
+}
+
+static void *zs_zpool_map(void *pool, unsigned long handle,
+ enum zpool_mapmode mm)
+{
+ enum zs_mapmode zs_mm;
+
+ switch (mm) {
+ case ZPOOL_MM_RO:
+ zs_mm = ZS_MM_RO;
+ break;
+ case ZPOOL_MM_WO:
+ zs_mm = ZS_MM_WO;
+ break;
+ case ZPOOL_MM_RW: /* fallthru */
+ default:
+ zs_mm = ZS_MM_RW;
+ break;
+ }
+
+ return zs_map_object(pool, handle, zs_mm);
+}
+static void zs_zpool_unmap(void *pool, unsigned long handle)
+{
+ zs_unmap_object(pool, handle);
+}
+
+static u64 zs_zpool_total_size(void *pool)
+{
+ return zs_get_total_pages(pool) << PAGE_SHIFT;
+}
+
+static struct zpool_driver zs_zpool_driver = {
+ .type = "zsmalloc",
+ .owner = THIS_MODULE,
+ .create = zs_zpool_create,
+ .destroy = zs_zpool_destroy,
+ .malloc = zs_zpool_malloc,
+ .free = zs_zpool_free,
+ .shrink = zs_zpool_shrink,
+ .map = zs_zpool_map,
+ .unmap = zs_zpool_unmap,
+ .total_size = zs_zpool_total_size,
+};
+
+MODULE_ALIAS("zpool-zsmalloc");
+#endif /* CONFIG_ZPOOL */
/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
@@ -359,7 +434,7 @@ static enum fullness_group fix_fullness_group(struct zs_pool *pool,
if (newfg == currfg)
goto out;
- class = &pool->size_class[class_idx];
+ class = pool->size_class[class_idx];
remove_zspage(page, class, currfg);
insert_zspage(page, class, newfg);
set_zspage_mapping(page, class_idx, newfg);
@@ -519,7 +594,8 @@ static void init_zspage(struct page *first_page, struct size_class *class)
while (page) {
struct page *next_page;
struct link_free *link;
- unsigned int i, objs_on_page;
+ unsigned int i = 1;
+ void *vaddr;
/*
* page->index stores offset of first object starting
@@ -530,16 +606,12 @@ static void init_zspage(struct page *first_page, struct size_class *class)
if (page != first_page)
page->index = off;
- link = (struct link_free *)kmap_atomic(page) +
- off / sizeof(*link);
- objs_on_page = (PAGE_SIZE - off) / class->size;
+ vaddr = kmap_atomic(page);
+ link = (struct link_free *)vaddr + off / sizeof(*link);
- for (i = 1; i <= objs_on_page; i++) {
- off += class->size;
- if (off < PAGE_SIZE) {
- link->next = obj_location_to_handle(page, i);
- link += class->size / sizeof(*link);
- }
+ while ((off += class->size) < PAGE_SIZE) {
+ link->next = obj_location_to_handle(page, i++);
+ link += class->size / sizeof(*link);
}
/*
@@ -549,9 +621,9 @@ static void init_zspage(struct page *first_page, struct size_class *class)
*/
next_page = get_next_page(page);
link->next = obj_location_to_handle(next_page, 0);
- kunmap_atomic(link);
+ kunmap_atomic(vaddr);
page = next_page;
- off = (off + class->size) % PAGE_SIZE;
+ off %= PAGE_SIZE;
}
}
@@ -776,31 +848,76 @@ static struct notifier_block zs_cpu_nb = {
.notifier_call = zs_cpu_notifier
};
-static void zs_exit(void)
+static void zs_unregister_cpu_notifier(void)
{
int cpu;
+ cpu_notifier_register_begin();
+
for_each_online_cpu(cpu)
zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu);
- unregister_cpu_notifier(&zs_cpu_nb);
+ __unregister_cpu_notifier(&zs_cpu_nb);
+
+ cpu_notifier_register_done();
}
-static int zs_init(void)
+static int zs_register_cpu_notifier(void)
{
- int cpu, ret;
+ int cpu, uninitialized_var(ret);
+
+ cpu_notifier_register_begin();
- register_cpu_notifier(&zs_cpu_nb);
+ __register_cpu_notifier(&zs_cpu_nb);
for_each_online_cpu(cpu) {
ret = zs_cpu_notifier(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
if (notifier_to_errno(ret))
- goto fail;
+ break;
}
- return 0;
-fail:
- zs_exit();
+
+ cpu_notifier_register_done();
return notifier_to_errno(ret);
}
+static void __exit zs_exit(void)
+{
+#ifdef CONFIG_ZPOOL
+ zpool_unregister_driver(&zs_zpool_driver);
+#endif
+ zs_unregister_cpu_notifier();
+}
+
+static int __init zs_init(void)
+{
+ int ret = zs_register_cpu_notifier();
+
+ if (ret) {
+ zs_unregister_cpu_notifier();
+ return ret;
+ }
+
+#ifdef CONFIG_ZPOOL
+ zpool_register_driver(&zs_zpool_driver);
+#endif
+ return 0;
+}
+
+static unsigned int get_maxobj_per_zspage(int size, int pages_per_zspage)
+{
+ return pages_per_zspage * PAGE_SIZE / size;
+}
+
+static bool can_merge(struct size_class *prev, int size, int pages_per_zspage)
+{
+ if (prev->pages_per_zspage != pages_per_zspage)
+ return false;
+
+ if (get_maxobj_per_zspage(prev->size, prev->pages_per_zspage)
+ != get_maxobj_per_zspage(size, pages_per_zspage))
+ return false;
+
+ return true;
+}
+
/**
* zs_create_pool - Creates an allocation pool to work from.
* @flags: allocation flags used to allocate pool metadata
@@ -821,25 +938,56 @@ struct zs_pool *zs_create_pool(gfp_t flags)
if (!pool)
return NULL;
- for (i = 0; i < ZS_SIZE_CLASSES; i++) {
+ /*
+ * Iterate reversly, because, size of size_class that we want to use
+ * for merging should be larger or equal to current size.
+ */
+ for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
int size;
+ int pages_per_zspage;
struct size_class *class;
+ struct size_class *prev_class;
size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA;
if (size > ZS_MAX_ALLOC_SIZE)
size = ZS_MAX_ALLOC_SIZE;
+ pages_per_zspage = get_pages_per_zspage(size);
+
+ /*
+ * size_class is used for normal zsmalloc operation such
+ * as alloc/free for that size. Although it is natural that we
+ * have one size_class for each size, there is a chance that we
+ * can get more memory utilization if we use one size_class for
+ * many different sizes whose size_class have same
+ * characteristics. So, we makes size_class point to
+ * previous size_class if possible.
+ */
+ if (i < ZS_SIZE_CLASSES - 1) {
+ prev_class = pool->size_class[i + 1];
+ if (can_merge(prev_class, size, pages_per_zspage)) {
+ pool->size_class[i] = prev_class;
+ continue;
+ }
+ }
+
+ class = kzalloc(sizeof(struct size_class), GFP_KERNEL);
+ if (!class)
+ goto err;
- class = &pool->size_class[i];
class->size = size;
class->index = i;
+ class->pages_per_zspage = pages_per_zspage;
spin_lock_init(&class->lock);
- class->pages_per_zspage = get_pages_per_zspage(size);
-
+ pool->size_class[i] = class;
}
pool->flags = flags;
return pool;
+
+err:
+ zs_destroy_pool(pool);
+ return NULL;
}
EXPORT_SYMBOL_GPL(zs_create_pool);
@@ -849,7 +997,13 @@ void zs_destroy_pool(struct zs_pool *pool)
for (i = 0; i < ZS_SIZE_CLASSES; i++) {
int fg;
- struct size_class *class = &pool->size_class[i];
+ struct size_class *class = pool->size_class[i];
+
+ if (!class)
+ continue;
+
+ if (class->index != i)
+ continue;
for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) {
if (class->fullness_list[fg]) {
@@ -858,6 +1012,7 @@ void zs_destroy_pool(struct zs_pool *pool)
class->size, fg);
}
}
+ kfree(class);
}
kfree(pool);
}
@@ -876,8 +1031,8 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
{
unsigned long obj;
struct link_free *link;
- int class_idx;
struct size_class *class;
+ void *vaddr;
struct page *first_page, *m_page;
unsigned long m_objidx, m_offset;
@@ -885,9 +1040,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
return 0;
- class_idx = get_size_class_index(size);
- class = &pool->size_class[class_idx];
- BUG_ON(class_idx != class->index);
+ class = pool->size_class[get_size_class_index(size)];
spin_lock(&class->lock);
first_page = find_get_zspage(class);
@@ -899,19 +1052,20 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
return 0;
set_zspage_mapping(first_page, class->index, ZS_EMPTY);
+ atomic_long_add(class->pages_per_zspage,
+ &pool->pages_allocated);
spin_lock(&class->lock);
- class->pages_allocated += class->pages_per_zspage;
}
obj = (unsigned long)first_page->freelist;
obj_handle_to_location(obj, &m_page, &m_objidx);
m_offset = obj_idx_to_offset(m_page, m_objidx, class->size);
- link = (struct link_free *)kmap_atomic(m_page) +
- m_offset / sizeof(*link);
+ vaddr = kmap_atomic(m_page);
+ link = (struct link_free *)vaddr + m_offset / sizeof(*link);
first_page->freelist = link->next;
memset(link, POISON_INUSE, sizeof(*link));
- kunmap_atomic(link);
+ kunmap_atomic(vaddr);
first_page->inuse++;
/* Now move the zspage to another fullness group, if required */
@@ -927,6 +1081,7 @@ void zs_free(struct zs_pool *pool, unsigned long obj)
struct link_free *link;
struct page *first_page, *f_page;
unsigned long f_objidx, f_offset;
+ void *vaddr;
int class_idx;
struct size_class *class;
@@ -939,28 +1094,27 @@ void zs_free(struct zs_pool *pool, unsigned long obj)
first_page = get_first_page(f_page);
get_zspage_mapping(first_page, &class_idx, &fullness);
- class = &pool->size_class[class_idx];
+ class = pool->size_class[class_idx];
f_offset = obj_idx_to_offset(f_page, f_objidx, class->size);
spin_lock(&class->lock);
/* Insert this object in containing zspage's freelist */
- link = (struct link_free *)((unsigned char *)kmap_atomic(f_page)
- + f_offset);
+ vaddr = kmap_atomic(f_page);
+ link = (struct link_free *)(vaddr + f_offset);
link->next = first_page->freelist;
- kunmap_atomic(link);
+ kunmap_atomic(vaddr);
first_page->freelist = (void *)obj;
first_page->inuse--;
fullness = fix_fullness_group(pool, first_page);
-
- if (fullness == ZS_EMPTY)
- class->pages_allocated -= class->pages_per_zspage;
-
spin_unlock(&class->lock);
- if (fullness == ZS_EMPTY)
+ if (fullness == ZS_EMPTY) {
+ atomic_long_sub(class->pages_per_zspage,
+ &pool->pages_allocated);
free_zspage(first_page);
+ }
}
EXPORT_SYMBOL_GPL(zs_free);
@@ -1001,7 +1155,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
obj_handle_to_location(handle, &page, &obj_idx);
get_zspage_mapping(get_first_page(page), &class_idx, &fg);
- class = &pool->size_class[class_idx];
+ class = pool->size_class[class_idx];
off = obj_idx_to_offset(page, obj_idx, class->size);
area = &get_cpu_var(zs_map_area);
@@ -1035,7 +1189,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
obj_handle_to_location(handle, &page, &obj_idx);
get_zspage_mapping(get_first_page(page), &class_idx, &fg);
- class = &pool->size_class[class_idx];
+ class = pool->size_class[class_idx];
off = obj_idx_to_offset(page, obj_idx, class->size);
area = &__get_cpu_var(zs_map_area);
@@ -1054,17 +1208,11 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
}
EXPORT_SYMBOL_GPL(zs_unmap_object);
-u64 zs_get_total_size_bytes(struct zs_pool *pool)
+unsigned long zs_get_total_pages(struct zs_pool *pool)
{
- int i;
- u64 npages = 0;
-
- for (i = 0; i < ZS_SIZE_CLASSES; i++)
- npages += pool->size_class[i].pages_allocated;
-
- return npages << PAGE_SHIFT;
+ return atomic_long_read(&pool->pages_allocated);
}
-EXPORT_SYMBOL_GPL(zs_get_total_size_bytes);
+EXPORT_SYMBOL_GPL(zs_get_total_pages);
module_init(zs_init);
module_exit(zs_exit);
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 31ee5c6033d..479e8a63125 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -126,6 +126,9 @@ int ip_forward(struct sk_buff *skb)
struct rtable *rt; /* Route we use */
struct ip_options *opt = &(IPCB(skb)->opt);
+ if (unlikely(skb->sk))
+ goto drop;
+
if (skb_warn_if_lro(skb))
goto drop;
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
index 834857f3c87..86183c4e4fd 100644
--- a/net/ipv4/tcp_illinois.c
+++ b/net/ipv4/tcp_illinois.c
@@ -23,7 +23,6 @@
#define ALPHA_MIN ((3*ALPHA_SCALE)/10) /* ~0.3 */
#define ALPHA_MAX (10*ALPHA_SCALE) /* 10.0 */
#define ALPHA_BASE ALPHA_SCALE /* 1.0 */
-#define U32_MAX ((u32)~0U)
#define RTT_MAX (U32_MAX / ALPHA_MAX) /* 3.3 secs */
#define BETA_SHIFT 6
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 7aa7faa7c3d..22fc869465e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3077,10 +3077,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
if (seq_rtt < 0) {
seq_rtt = ca_seq_rtt;
}
- if (!(sacked & TCPCB_SACKED_ACKED))
+ if (!(sacked & TCPCB_SACKED_ACKED)) {
reord = min(pkts_acked, reord);
- if (!after(scb->end_seq, tp->high_seq))
- flag |= FLAG_ORIG_SACK_ACKED;
+ if (!after(scb->end_seq, tp->high_seq))
+ flag |= FLAG_ORIG_SACK_ACKED;
+ }
}
if (sacked & TCPCB_SACKED_ACKED)
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 8e374e56693..d64453a8102 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1902,7 +1902,7 @@ void tcp_v4_early_demux(struct sk_buff *skb)
skb->sk = sk;
skb->destructor = sock_edemux;
if (sk->sk_state != TCP_TIME_WAIT) {
- struct dst_entry *dst = sk->sk_rx_dst;
+ struct dst_entry *dst = ACCESS_ONCE(sk->sk_rx_dst);
if (dst)
dst = dst_check(dst, 0);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index f755a8326e8..5653febcb2a 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2570,39 +2570,65 @@ begin_fwd:
}
}
-/* Send a fin. The caller locks the socket for us. This cannot be
- * allowed to fail queueing a FIN frame under any circumstances.
+/* We allow to exceed memory limits for FIN packets to expedite
+ * connection tear down and (memory) recovery.
+ * Otherwise tcp_send_fin() could be tempted to either delay FIN
+ * or even be forced to close flow without any FIN.
+ */
+static void sk_forced_wmem_schedule(struct sock *sk, int size)
+{
+ int amt, status;
+
+ if (size <= sk->sk_forward_alloc)
+ return;
+ amt = sk_mem_pages(size);
+ sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
+ sk_memory_allocated_add(sk, amt, &status);
+}
+
+/* Send a FIN. The caller locks the socket for us.
+ * We should try to send a FIN packet really hard, but eventually give up.
*/
void tcp_send_fin(struct sock *sk)
{
+ struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk);
struct tcp_sock *tp = tcp_sk(sk);
- struct sk_buff *skb = tcp_write_queue_tail(sk);
- int mss_now;
- /* Optimization, tack on the FIN if we have a queue of
- * unsent frames. But be careful about outgoing SACKS
- * and IP options.
+ /* Optimization, tack on the FIN if we have one skb in write queue and
+ * this skb was not yet sent, or we are under memory pressure.
+ * Note: in the latter case, FIN packet will be sent after a timeout,
+ * as TCP stack thinks it has already been transmitted.
*/
- mss_now = tcp_current_mss(sk);
-
- if (tcp_send_head(sk) != NULL) {
- TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
- TCP_SKB_CB(skb)->end_seq++;
+ if (tskb && (tcp_send_head(sk) || sk_under_memory_pressure(sk))) {
+coalesce:
+ TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
+ TCP_SKB_CB(tskb)->end_seq++;
tp->write_seq++;
+ if (!tcp_send_head(sk)) {
+ /* This means tskb was already sent.
+ * Pretend we included the FIN on previous transmit.
+ * We need to set tp->snd_nxt to the value it would have
+ * if FIN had been sent. This is because retransmit path
+ * does not change tp->snd_nxt.
+ */
+ tp->snd_nxt++;
+ return;
+ }
} else {
- /* Socket is locked, keep trying until memory is available. */
- for (;;) {
- skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
- if (skb)
- break;
- yield();
+ skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
+ if (unlikely(!skb)) {
+ if (tskb)
+ goto coalesce;
+ return;
}
+ skb_reserve(skb, MAX_TCP_HEADER);
+ sk_forced_wmem_schedule(sk, skb->truesize);
/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
tcp_init_nondata_skb(skb, tp->write_seq,
TCPHDR_ACK | TCPHDR_FIN);
tcp_queue_skb(sk, skb);
}
- __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
+ __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
}
/* We get here when a process closes a file descriptor (either due to
@@ -2771,6 +2797,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
}
#endif
+ /* Do not fool tcpdump (if any), clean our debris */
+ skb->tstamp.tv64 = 0;
return skb;
}
EXPORT_SYMBOL(tcp_make_synack);
@@ -2908,6 +2936,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
goto fallback;
syn_data->ip_summed = CHECKSUM_PARTIAL;
memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
+ skb_shinfo(syn_data)->gso_segs = 1;
if (unlikely(memcpy_fromiovecend(skb_put(syn_data, space),
fo->data->msg_iov, 0, space))) {
kfree_skb(syn_data);
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 060a0449aca..05f361338c2 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1193,7 +1193,14 @@ static void ndisc_router_discovery(struct sk_buff *skb)
if (rt)
rt6_set_expires(rt, jiffies + (HZ * lifetime));
if (ra_msg->icmph.icmp6_hop_limit) {
- in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
+ /* Only set hop_limit on the interface if it is higher than
+ * the current hop_limit.
+ */
+ if (in6_dev->cnf.hop_limit < ra_msg->icmph.icmp6_hop_limit) {
+ in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
+ } else {
+ ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than current\n");
+ }
if (rt)
dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
ra_msg->icmph.icmp6_hop_limit);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 0acad490d9d..a4738211c60 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1619,7 +1619,7 @@ static void tcp_v6_early_demux(struct sk_buff *skb)
skb->sk = sk;
skb->destructor = sock_edemux;
if (sk->sk_state != TCP_TIME_WAIT) {
- struct dst_entry *dst = sk->sk_rx_dst;
+ struct dst_entry *dst = ACCESS_ONCE(sk->sk_rx_dst);
if (dst)
dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
diff --git a/net/llc/sysctl_net_llc.c b/net/llc/sysctl_net_llc.c
index 612a5ddaf93..799bafc2af3 100644
--- a/net/llc/sysctl_net_llc.c
+++ b/net/llc/sysctl_net_llc.c
@@ -18,28 +18,28 @@ static struct ctl_table llc2_timeout_table[] = {
{
.procname = "ack",
.data = &sysctl_llc2_ack_timeout,
- .maxlen = sizeof(long),
+ .maxlen = sizeof(sysctl_llc2_ack_timeout),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "busy",
.data = &sysctl_llc2_busy_timeout,
- .maxlen = sizeof(long),
+ .maxlen = sizeof(sysctl_llc2_busy_timeout),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "p",
.data = &sysctl_llc2_p_timeout,
- .maxlen = sizeof(long),
+ .maxlen = sizeof(sysctl_llc2_p_timeout),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "rej",
.data = &sysctl_llc2_rej_timeout,
- .maxlen = sizeof(long),
+ .maxlen = sizeof(sysctl_llc2_rej_timeout),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
index d25f2937764..957c1db6665 100644
--- a/net/netfilter/nf_conntrack_proto_generic.c
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -14,6 +14,30 @@
static unsigned int nf_ct_generic_timeout __read_mostly = 600*HZ;
+static bool nf_generic_should_process(u8 proto)
+{
+ switch (proto) {
+#ifdef CONFIG_NF_CT_PROTO_SCTP_MODULE
+ case IPPROTO_SCTP:
+ return false;
+#endif
+#ifdef CONFIG_NF_CT_PROTO_DCCP_MODULE
+ case IPPROTO_DCCP:
+ return false;
+#endif
+#ifdef CONFIG_NF_CT_PROTO_GRE_MODULE
+ case IPPROTO_GRE:
+ return false;
+#endif
+#ifdef CONFIG_NF_CT_PROTO_UDPLITE_MODULE
+ case IPPROTO_UDPLITE:
+ return false;
+#endif
+ default:
+ return true;
+ }
+}
+
static inline struct nf_generic_net *generic_pernet(struct net *net)
{
return &net->ct.nf_ct_proto.generic;
@@ -67,7 +91,7 @@ static int generic_packet(struct nf_conn *ct,
static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff, unsigned int *timeouts)
{
- return true;
+ return nf_generic_should_process(nf_ct_protonum(ct));
}
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
diff --git a/net/rds/sysctl.c b/net/rds/sysctl.c
index 907214b4c4d..fc6cbe82785 100644
--- a/net/rds/sysctl.c
+++ b/net/rds/sysctl.c
@@ -71,14 +71,14 @@ static ctl_table rds_sysctl_rds_table[] = {
{
.procname = "max_unacked_packets",
.data = &rds_sysctl_max_unacked_packets,
- .maxlen = sizeof(unsigned long),
+ .maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "max_unacked_bytes",
.data = &rds_sysctl_max_unacked_bytes,
- .maxlen = sizeof(unsigned long),
+ .maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index ff427733c29..a96bed4db3e 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -150,7 +150,7 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf,
goto out;
/* No partial writes. */
- length = EINVAL;
+ length = -EINVAL;
if (*ppos != 0)
goto out;
@@ -1190,7 +1190,7 @@ static void sel_remove_entries(struct dentry *de)
spin_lock(&de->d_lock);
node = de->d_subdirs.next;
while (node != &de->d_subdirs) {
- struct dentry *d = list_entry(node, struct dentry, d_u.d_child);
+ struct dentry *d = list_entry(node, struct dentry, d_child);
spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
list_del_init(node);
@@ -1664,12 +1664,12 @@ static void sel_remove_classes(void)
list_for_each(class_node, &class_dir->d_subdirs) {
struct dentry *class_subdir = list_entry(class_node,
- struct dentry, d_u.d_child);
+ struct dentry, d_child);
struct list_head *class_subdir_node;
list_for_each(class_subdir_node, &class_subdir->d_subdirs) {
struct dentry *d = list_entry(class_subdir_node,
- struct dentry, d_u.d_child);
+ struct dentry, d_child);
if (d->d_inode)
if (d->d_inode->i_mode & S_IFDIR)
diff --git a/sound/pci/emu10k1/emuproc.c b/sound/pci/emu10k1/emuproc.c
index 2ca9f2e9313..53745f4c2bf 100644
--- a/sound/pci/emu10k1/emuproc.c
+++ b/sound/pci/emu10k1/emuproc.c
@@ -241,31 +241,22 @@ static void snd_emu10k1_proc_spdif_read(struct snd_info_entry *entry,
struct snd_emu10k1 *emu = entry->private_data;
u32 value;
u32 value2;
- unsigned long flags;
u32 rate;
if (emu->card_capabilities->emu_model) {
- spin_lock_irqsave(&emu->emu_lock, flags);
snd_emu1010_fpga_read(emu, 0x38, &value);
- spin_unlock_irqrestore(&emu->emu_lock, flags);
if ((value & 0x1) == 0) {
- spin_lock_irqsave(&emu->emu_lock, flags);
snd_emu1010_fpga_read(emu, 0x2a, &value);
snd_emu1010_fpga_read(emu, 0x2b, &value2);
- spin_unlock_irqrestore(&emu->emu_lock, flags);
rate = 0x1770000 / (((value << 5) | value2)+1);
snd_iprintf(buffer, "ADAT Locked : %u\n", rate);
} else {
snd_iprintf(buffer, "ADAT Unlocked\n");
}
- spin_lock_irqsave(&emu->emu_lock, flags);
snd_emu1010_fpga_read(emu, 0x20, &value);
- spin_unlock_irqrestore(&emu->emu_lock, flags);
if ((value & 0x4) == 0) {
- spin_lock_irqsave(&emu->emu_lock, flags);
snd_emu1010_fpga_read(emu, 0x28, &value);
snd_emu1010_fpga_read(emu, 0x29, &value2);
- spin_unlock_irqrestore(&emu->emu_lock, flags);
rate = 0x1770000 / (((value << 5) | value2)+1);
snd_iprintf(buffer, "SPDIF Locked : %d\n", rate);
} else {
@@ -410,14 +401,11 @@ static void snd_emu_proc_emu1010_reg_read(struct snd_info_entry *entry,
{
struct snd_emu10k1 *emu = entry->private_data;
u32 value;
- unsigned long flags;
int i;
snd_iprintf(buffer, "EMU1010 Registers:\n\n");
for(i = 0; i < 0x40; i+=1) {
- spin_lock_irqsave(&emu->emu_lock, flags);
snd_emu1010_fpga_read(emu, i, &value);
- spin_unlock_irqrestore(&emu->emu_lock, flags);
snd_iprintf(buffer, "%02X: %08X, %02X\n", i, value, (value >> 8) & 0x7f);
}
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 4008034b6eb..a8eb7fe2766 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -266,7 +266,7 @@ static void alc_auto_setup_eapd(struct hda_codec *codec, bool on)
{
/* We currently only handle front, HP */
static hda_nid_t pins[] = {
- 0x0f, 0x10, 0x14, 0x15, 0
+ 0x0f, 0x10, 0x14, 0x15, 0x17, 0
};
hda_nid_t *p;
for (p = pins; *p; p++)
@@ -3363,6 +3363,7 @@ enum {
ALC269_FIXUP_QUANTA_MUTE,
ALC269_FIXUP_LIFEBOOK,
ALC269_FIXUP_LIFEBOOK_EXTMIC,
+ ALC269_FIXUP_LIFEBOOK_HP_PIN,
ALC269_FIXUP_AMIC,
ALC269_FIXUP_DMIC,
ALC269VB_FIXUP_AMIC,
@@ -3477,6 +3478,13 @@ static const struct hda_fixup alc269_fixups[] = {
{ }
},
},
+ [ALC269_FIXUP_LIFEBOOK_HP_PIN] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x21, 0x0221102f }, /* HP out */
+ { }
+ },
+ },
[ALC269_FIXUP_AMIC] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
@@ -3727,6 +3735,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC),
SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
+ SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN),
SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index c89a5bf5c00..c311681bd39 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -175,6 +175,7 @@ static const struct rc_config {
{ USB_ID(0x041e, 0x3040), 2, 2, 6, 6, 2, 0x6e91 }, /* Live! 24-bit */
{ USB_ID(0x041e, 0x3042), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 */
{ USB_ID(0x041e, 0x30df), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */
+ { USB_ID(0x041e, 0x3237), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */
{ USB_ID(0x041e, 0x3048), 2, 2, 6, 6, 2, 0x6e91 }, /* Toshiba SB0500 */
};
diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile
index d1b3a361e52..4039854560d 100644
--- a/tools/power/x86/turbostat/Makefile
+++ b/tools/power/x86/turbostat/Makefile
@@ -1,8 +1,12 @@
CC = $(CROSS_COMPILE)gcc
-BUILD_OUTPUT := $(PWD)
+BUILD_OUTPUT := $(CURDIR)
PREFIX := /usr
DESTDIR :=
+ifeq ("$(origin O)", "command line")
+ BUILD_OUTPUT := $(O)
+endif
+
turbostat : turbostat.c
CFLAGS += -Wall
CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/uapi/asm/msr-index.h"'
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 4eed4cd9b58..8c56eae63e7 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1550,8 +1550,8 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
ghc->generation = slots->generation;
ghc->len = len;
ghc->memslot = gfn_to_memslot(kvm, start_gfn);
- ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail);
- if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) {
+ ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL);
+ if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) {
ghc->hva += offset;
} else {
/*