aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/feature-removal-schedule.txt20
-rw-r--r--MAINTAINERS7
-rw-r--r--Makefile2
-rw-r--r--arch/avr32/include/asm/setup.h9
-rw-r--r--arch/avr32/kernel/setup.c15
-rw-r--r--arch/avr32/kernel/traps.c22
-rw-r--r--arch/avr32/mach-at32ap/clock.c24
-rw-r--r--arch/avr32/mach-at32ap/extint.c22
-rw-r--r--arch/avr32/mach-at32ap/pio.c2
-rw-r--r--arch/avr32/mach-at32ap/pm-at32ap700x.S2
-rw-r--r--arch/m68k/include/asm/unistd.h6
-rw-r--r--arch/m68k/kernel/entry_mm.S4
-rw-r--r--arch/m68k/kernel/syscalltable.S4
-rw-r--r--arch/powerpc/kernel/ibmebus.c6
-rw-r--r--arch/x86/xen/Kconfig1
-rw-r--r--arch/x86/xen/enlighten.c21
-rw-r--r--arch/x86/xen/mmu.c4
-rw-r--r--drivers/amba/bus.c6
-rw-r--r--drivers/base/platform.c7
-rw-r--r--drivers/base/power/main.c8
-rw-r--r--drivers/dma/fsldma.c2
-rw-r--r--drivers/gpio/ml_ioh_gpio.c1
-rw-r--r--drivers/gpio/pca953x.c5
-rw-r--r--drivers/gpio/pch_gpio.c1
-rw-r--r--drivers/gpu/drm/Kconfig1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c53
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c76
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vm.c24
-rw-r--r--drivers/gpu/drm/radeon/atom.c6
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c17
-rw-r--r--drivers/gpu/drm/radeon/r600.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon.h12
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c2
-rw-r--r--drivers/gpu/drm/radeon/rs600.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c26
-rw-r--r--drivers/gpu/stub/Kconfig1
-rw-r--r--drivers/mfd/mfd-core.c16
-rw-r--r--drivers/net/benet/be.h4
-rw-r--r--drivers/net/benet/be_main.c19
-rw-r--r--drivers/net/bna/bfa_ioc.c10
-rw-r--r--drivers/net/can/mcp251x.c3
-rw-r--r--drivers/net/mlx4/en_rx.c4
-rw-r--r--drivers/net/mlx4/main.c5
-rw-r--r--drivers/net/mlx4/mlx4.h2
-rw-r--r--drivers/net/mlx4/sense.c4
-rw-r--r--drivers/net/pppoe.c2
-rw-r--r--drivers/net/smsc911x.c8
-rw-r--r--drivers/net/usb/smsc95xx.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c1
-rw-r--r--drivers/net/wireless/b43/dma.c2
-rw-r--r--drivers/net/wireless/b43/dma.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h2
-rw-r--r--drivers/net/wireless/p54/p54usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c6
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c2
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c2
-rw-r--r--drivers/net/wireless/wl12xx/sdio.c2
-rw-r--r--drivers/net/wireless/wl12xx/spi.c2
-rw-r--r--drivers/net/wireless/wl12xx/testmode.c5
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c20
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.h1
-rw-r--r--drivers/pci/pci-driver.c6
-rw-r--r--drivers/pci/setup-bus.c4
-rw-r--r--drivers/platform/x86/Kconfig3
-rw-r--r--drivers/platform/x86/acer-wmi.c2
-rw-r--r--drivers/platform/x86/asus-wmi.c4
-rw-r--r--drivers/platform/x86/eeepc-wmi.c2
-rw-r--r--drivers/platform/x86/intel_pmic_gpio.c43
-rw-r--r--drivers/platform/x86/samsung-laptop.c17
-rw-r--r--drivers/platform/x86/sony-laptop.c65
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c3
-rw-r--r--drivers/spi/amba-pl022.c2
-rw-r--r--drivers/spi/dw_spi.c2
-rw-r--r--drivers/spi/pxa2xx_spi.c2
-rw-r--r--drivers/spi/spi_bfin5xx.c2
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/samsung-laptop/Kconfig10
-rw-r--r--drivers/staging/samsung-laptop/Makefile1
-rw-r--r--drivers/staging/samsung-laptop/TODO5
-rw-r--r--drivers/staging/samsung-laptop/samsung-laptop.c843
-rw-r--r--drivers/xen/events.c6
-rw-r--r--drivers/xen/manage.c6
-rw-r--r--fs/cifs/README16
-rw-r--r--fs/cifs/cache.c2
-rw-r--r--fs/cifs/cifs_debug.c43
-rw-r--r--fs/cifs/cifs_spnego.c4
-rw-r--r--fs/cifs/cifs_unicode.c35
-rw-r--r--fs/cifs/cifs_unicode.h2
-rw-r--r--fs/cifs/cifsencrypt.c21
-rw-r--r--fs/cifs/cifsfs.c6
-rw-r--r--fs/cifs/cifsglob.h13
-rw-r--r--fs/cifs/cifssmb.c14
-rw-r--r--fs/cifs/connect.c68
-rw-r--r--fs/cifs/file.c70
-rw-r--r--fs/cifs/link.c4
-rw-r--r--fs/cifs/misc.c3
-rw-r--r--fs/cifs/sess.c23
-rw-r--r--fs/ext4/ext4_jbd2.h4
-rw-r--r--fs/ext4/fsync.c17
-rw-r--r--fs/ext4/inode.c35
-rw-r--r--fs/ext4/super.c74
-rw-r--r--fs/jbd2/commit.c4
-rw-r--r--fs/jbd2/journal.c3
-rw-r--r--fs/namespace.c16
-rw-r--r--fs/nfs/write.c6
-rw-r--r--fs/nfsd/lockd.c1
-rw-r--r--fs/nfsd/nfs4state.c9
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c24
-rw-r--r--fs/xfs/linux-2.6/xfs_message.c27
-rw-r--r--fs/xfs/linux-2.6/xfs_message.h24
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c129
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c228
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.h2
-rw-r--r--fs/xfs/quota/xfs_qm.c7
-rw-r--r--fs/xfs/quota/xfs_qm.h5
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c2
-rw-r--r--fs/xfs/xfs_alloc.c30
-rw-r--r--fs/xfs/xfs_inode_item.c67
-rw-r--r--fs/xfs/xfs_itable.c2
-rw-r--r--fs/xfs/xfs_log.c38
-rw-r--r--fs/xfs/xfs_log_priv.h1
-rw-r--r--fs/xfs/xfs_mount.h9
-rw-r--r--fs/xfs/xfs_trans_ail.c421
-rw-r--r--fs/xfs/xfs_trans_priv.h22
-rw-r--r--include/linux/can/platform/mcp251x.h2
-rw-r--r--include/linux/mfd/core.h13
-rw-r--r--include/linux/netfilter.h3
-rw-r--r--include/linux/netfilter/ipset/ip_set.h2
-rw-r--r--include/linux/netfilter/ipset/ip_set_ahash.h3
-rw-r--r--include/linux/platform_device.h5
-rw-r--r--include/linux/suspend.h11
-rw-r--r--include/net/ip_vs.h2
-rw-r--r--include/net/mac80211.h15
-rw-r--r--include/net/route.h5
-rw-r--r--kernel/power/Kconfig6
-rw-r--r--kernel/sched.c20
-rw-r--r--mm/memory.c26
-rw-r--r--mm/mlock.c13
-rw-r--r--mm/mmap.c13
-rw-r--r--net/dsa/mv88e6131.c23
-rw-r--r--net/dsa/mv88e6xxx.h2
-rw-r--r--net/ipv4/netfilter.c5
-rw-r--r--net/ipv4/route.c8
-rw-r--r--net/ipv4/xfrm4_policy.c1
-rw-r--r--net/ipv6/netfilter.c13
-rw-r--r--net/ipv6/tcp_ipv6.c4
-rw-r--r--net/ipv6/udp.c3
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/netfilter/Kconfig1
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ip.c3
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c3
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_port.c3
-rw-r--r--net/netfilter/ipset/ip_set_core.c109
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c53
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c2
-rw-r--r--net/netfilter/nf_conntrack_h323_asn1.c2
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c16
-rw-r--r--net/netfilter/xt_TCPMSS.c2
-rw-r--r--net/netfilter/xt_addrtype.c42
-rw-r--r--net/netfilter/xt_conntrack.c2
178 files changed, 1587 insertions, 2083 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 274b32d1253..492e81df296 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -387,26 +387,6 @@ Who: Tejun Heo <tj@kernel.org>
----------------------------
-What: Support for lcd_switch and display_get in asus-laptop driver
-When: March 2010
-Why: These two features use non-standard interfaces. There are the
- only features that really need multiple path to guess what's
- the right method name on a specific laptop.
-
- Removing them will allow to remove a lot of code an significantly
- clean the drivers.
-
- This will affect the backlight code which won't be able to know
- if the backlight is on or off. The platform display file will also be
- write only (like the one in eeepc-laptop).
-
- This should'nt affect a lot of user because they usually know
- when their display is on or off.
-
-Who: Corentin Chary <corentin.chary@gmail.com>
-
-----------------------------
-
What: sysfs-class-rfkill state file
When: Feb 2014
Files: net/rfkill/core.c
diff --git a/MAINTAINERS b/MAINTAINERS
index 6b4b9cdec37..649600cb8ec 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6916,6 +6916,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86.
S: Maintained
F: drivers/platform/x86
+XEN NETWORK BACKEND DRIVER
+M: Ian Campbell <ian.campbell@citrix.com>
+L: xen-devel@lists.xensource.com (moderated for non-subscribers)
+L: netdev@vger.kernel.org
+S: Supported
+F: drivers/net/xen-netback/*
+
XEN PCI SUBSYSTEM
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
L: xen-devel@lists.xensource.com (moderated for non-subscribers)
diff --git a/Makefile b/Makefile
index 8392b64079d..322e7334ccb 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 39
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc3
NAME = Flesh-Eating Bats with Fangs
# *DOCUMENTATION*
diff --git a/arch/avr32/include/asm/setup.h b/arch/avr32/include/asm/setup.h
index ff5b7cf6be4..160543dbec7 100644
--- a/arch/avr32/include/asm/setup.h
+++ b/arch/avr32/include/asm/setup.h
@@ -94,6 +94,13 @@ struct tag_ethernet {
#define ETH_INVALID_PHY 0xff
+/* board information */
+#define ATAG_BOARDINFO 0x54410008
+
+struct tag_boardinfo {
+ u32 board_number;
+};
+
struct tag {
struct tag_header hdr;
union {
@@ -102,6 +109,7 @@ struct tag {
struct tag_cmdline cmdline;
struct tag_clock clock;
struct tag_ethernet ethernet;
+ struct tag_boardinfo boardinfo;
} u;
};
@@ -128,6 +136,7 @@ extern struct tag *bootloader_tags;
extern resource_size_t fbmem_start;
extern resource_size_t fbmem_size;
+extern u32 board_number;
void setup_processor(void);
diff --git a/arch/avr32/kernel/setup.c b/arch/avr32/kernel/setup.c
index 5c7083916c3..bb0974cce4a 100644
--- a/arch/avr32/kernel/setup.c
+++ b/arch/avr32/kernel/setup.c
@@ -391,6 +391,21 @@ static int __init parse_tag_clock(struct tag *tag)
__tagtable(ATAG_CLOCK, parse_tag_clock);
/*
+ * The board_number correspond to the bd->bi_board_number in U-Boot. This
+ * parameter is only available during initialisation and can be used in some
+ * kind of board identification.
+ */
+u32 __initdata board_number;
+
+static int __init parse_tag_boardinfo(struct tag *tag)
+{
+ board_number = tag->u.boardinfo.board_number;
+
+ return 0;
+}
+__tagtable(ATAG_BOARDINFO, parse_tag_boardinfo);
+
+/*
* Scan the tag table for this tag, and call its parse function. The
* tag table is built by the linker from all the __tagtable
* declarations.
diff --git a/arch/avr32/kernel/traps.c b/arch/avr32/kernel/traps.c
index b91b2044af9..7aa25756412 100644
--- a/arch/avr32/kernel/traps.c
+++ b/arch/avr32/kernel/traps.c
@@ -95,28 +95,6 @@ void _exception(long signr, struct pt_regs *regs, int code,
info.si_code = code;
info.si_addr = (void __user *)addr;
force_sig_info(signr, &info, current);
-
- /*
- * Init gets no signals that it doesn't have a handler for.
- * That's all very well, but if it has caused a synchronous
- * exception and we ignore the resulting signal, it will just
- * generate the same exception over and over again and we get
- * nowhere. Better to kill it and let the kernel panic.
- */
- if (is_global_init(current)) {
- __sighandler_t handler;
-
- spin_lock_irq(&current->sighand->siglock);
- handler = current->sighand->action[signr-1].sa.sa_handler;
- spin_unlock_irq(&current->sighand->siglock);
- if (handler == SIG_DFL) {
- /* init has generated a synchronous exception
- and it doesn't have a handler for the signal */
- printk(KERN_CRIT "init has generated signal %ld "
- "but has no handler for it\n", signr);
- do_exit(signr);
- }
- }
}
asmlinkage void do_nmi(unsigned long ecr, struct pt_regs *regs)
diff --git a/arch/avr32/mach-at32ap/clock.c b/arch/avr32/mach-at32ap/clock.c
index 442f08c5e64..86925fd6ea5 100644
--- a/arch/avr32/mach-at32ap/clock.c
+++ b/arch/avr32/mach-at32ap/clock.c
@@ -35,22 +35,30 @@ void at32_clk_register(struct clk *clk)
spin_unlock(&clk_list_lock);
}
-struct clk *clk_get(struct device *dev, const char *id)
+static struct clk *__clk_get(struct device *dev, const char *id)
{
struct clk *clk;
- spin_lock(&clk_list_lock);
-
list_for_each_entry(clk, &at32_clock_list, list) {
if (clk->dev == dev && strcmp(id, clk->name) == 0) {
- spin_unlock(&clk_list_lock);
return clk;
}
}
- spin_unlock(&clk_list_lock);
return ERR_PTR(-ENOENT);
}
+
+struct clk *clk_get(struct device *dev, const char *id)
+{
+ struct clk *clk;
+
+ spin_lock(&clk_list_lock);
+ clk = __clk_get(dev, id);
+ spin_unlock(&clk_list_lock);
+
+ return clk;
+}
+
EXPORT_SYMBOL(clk_get);
void clk_put(struct clk *clk)
@@ -257,15 +265,15 @@ static int clk_show(struct seq_file *s, void *unused)
spin_lock(&clk_list_lock);
/* show clock tree as derived from the three oscillators */
- clk = clk_get(NULL, "osc32k");
+ clk = __clk_get(NULL, "osc32k");
dump_clock(clk, &r);
clk_put(clk);
- clk = clk_get(NULL, "osc0");
+ clk = __clk_get(NULL, "osc0");
dump_clock(clk, &r);
clk_put(clk);
- clk = clk_get(NULL, "osc1");
+ clk = __clk_get(NULL, "osc1");
dump_clock(clk, &r);
clk_put(clk);
diff --git a/arch/avr32/mach-at32ap/extint.c b/arch/avr32/mach-at32ap/extint.c
index 47ba4b9b6db..fbc2aeaebdd 100644
--- a/arch/avr32/mach-at32ap/extint.c
+++ b/arch/avr32/mach-at32ap/extint.c
@@ -61,34 +61,34 @@ struct eic {
static struct eic *nmi_eic;
static bool nmi_enabled;
-static void eic_ack_irq(struct irq_chip *d)
+static void eic_ack_irq(struct irq_data *d)
{
- struct eic *eic = irq_data_get_irq_chip_data(data);
+ struct eic *eic = irq_data_get_irq_chip_data(d);
eic_writel(eic, ICR, 1 << (d->irq - eic->first_irq));
}
-static void eic_mask_irq(struct irq_chip *d)
+static void eic_mask_irq(struct irq_data *d)
{
- struct eic *eic = irq_data_get_irq_chip_data(data);
+ struct eic *eic = irq_data_get_irq_chip_data(d);
eic_writel(eic, IDR, 1 << (d->irq - eic->first_irq));
}
-static void eic_mask_ack_irq(struct irq_chip *d)
+static void eic_mask_ack_irq(struct irq_data *d)
{
- struct eic *eic = irq_data_get_irq_chip_data(data);
+ struct eic *eic = irq_data_get_irq_chip_data(d);
eic_writel(eic, ICR, 1 << (d->irq - eic->first_irq));
eic_writel(eic, IDR, 1 << (d->irq - eic->first_irq));
}
-static void eic_unmask_irq(struct irq_chip *d)
+static void eic_unmask_irq(struct irq_data *d)
{
- struct eic *eic = irq_data_get_irq_chip_data(data);
+ struct eic *eic = irq_data_get_irq_chip_data(d);
eic_writel(eic, IER, 1 << (d->irq - eic->first_irq));
}
-static int eic_set_irq_type(struct irq_chip *d, unsigned int flow_type)
+static int eic_set_irq_type(struct irq_data *d, unsigned int flow_type)
{
- struct eic *eic = irq_data_get_irq_chip_data(data);
+ struct eic *eic = irq_data_get_irq_chip_data(d);
unsigned int irq = d->irq;
unsigned int i = irq - eic->first_irq;
u32 mode, edge, level;
@@ -191,7 +191,7 @@ static int __init eic_probe(struct platform_device *pdev)
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
int_irq = platform_get_irq(pdev, 0);
- if (!regs || !int_irq) {
+ if (!regs || (int)int_irq <= 0) {
dev_dbg(&pdev->dev, "missing regs and/or irq resource\n");
return -ENXIO;
}
diff --git a/arch/avr32/mach-at32ap/pio.c b/arch/avr32/mach-at32ap/pio.c
index f308e1ddc62..2e0aa853a4b 100644
--- a/arch/avr32/mach-at32ap/pio.c
+++ b/arch/avr32/mach-at32ap/pio.c
@@ -257,7 +257,7 @@ static void gpio_irq_mask(struct irq_data *d)
pio_writel(pio, IDR, 1 << (gpio & 0x1f));
}
-static void gpio_irq_unmask(struct irq_data *d))
+static void gpio_irq_unmask(struct irq_data *d)
{
unsigned gpio = irq_to_gpio(d->irq);
struct pio_device *pio = &pio_dev[gpio >> 5];
diff --git a/arch/avr32/mach-at32ap/pm-at32ap700x.S b/arch/avr32/mach-at32ap/pm-at32ap700x.S
index 17503b0ed6c..f868f4ce761 100644
--- a/arch/avr32/mach-at32ap/pm-at32ap700x.S
+++ b/arch/avr32/mach-at32ap/pm-at32ap700x.S
@@ -53,7 +53,7 @@ cpu_enter_idle:
st.w r8[TI_flags], r9
unmask_interrupts
sleep CPU_SLEEP_IDLE
- .size cpu_idle_sleep, . - cpu_idle_sleep
+ .size cpu_enter_idle, . - cpu_enter_idle
/*
* Common return path for PM functions that don't run from
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 26d851d385b..29e17907d9f 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -343,10 +343,14 @@
#define __NR_fanotify_init 337
#define __NR_fanotify_mark 338
#define __NR_prlimit64 339
+#define __NR_name_to_handle_at 340
+#define __NR_open_by_handle_at 341
+#define __NR_clock_adjtime 342
+#define __NR_syncfs 343
#ifdef __KERNEL__
-#define NR_syscalls 340
+#define NR_syscalls 344
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/m68k/kernel/entry_mm.S b/arch/m68k/kernel/entry_mm.S
index 1559dea36e5..1359ee65957 100644
--- a/arch/m68k/kernel/entry_mm.S
+++ b/arch/m68k/kernel/entry_mm.S
@@ -750,4 +750,8 @@ sys_call_table:
.long sys_fanotify_init
.long sys_fanotify_mark
.long sys_prlimit64
+ .long sys_name_to_handle_at /* 340 */
+ .long sys_open_by_handle_at
+ .long sys_clock_adjtime
+ .long sys_syncfs
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index 79b1ed198c0..9b8393d8adb 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -358,6 +358,10 @@ ENTRY(sys_call_table)
.long sys_fanotify_init
.long sys_fanotify_mark
.long sys_prlimit64
+ .long sys_name_to_handle_at /* 340 */
+ .long sys_open_by_handle_at
+ .long sys_clock_adjtime
+ .long sys_syncfs
.rept NR_syscalls-(.-sys_call_table)/4
.long sys_ni_syscall
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index c00d4ca1ee1..28581f1ad2c 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -527,7 +527,7 @@ static int ibmebus_bus_pm_resume_noirq(struct device *dev)
#endif /* !CONFIG_SUSPEND */
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
static int ibmebus_bus_pm_freeze(struct device *dev)
{
@@ -665,7 +665,7 @@ static int ibmebus_bus_pm_restore_noirq(struct device *dev)
return ret;
}
-#else /* !CONFIG_HIBERNATION */
+#else /* !CONFIG_HIBERNATE_CALLBACKS */
#define ibmebus_bus_pm_freeze NULL
#define ibmebus_bus_pm_thaw NULL
@@ -676,7 +676,7 @@ static int ibmebus_bus_pm_restore_noirq(struct device *dev)
#define ibmebus_bus_pm_poweroff_noirq NULL
#define ibmebus_bus_pm_restore_noirq NULL
-#endif /* !CONFIG_HIBERNATION */
+#endif /* !CONFIG_HIBERNATE_CALLBACKS */
static struct dev_pm_ops ibmebus_bus_dev_pm_ops = {
.prepare = ibmebus_bus_pm_prepare,
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index 1c7121ba18f..5cc821cb2e0 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -39,6 +39,7 @@ config XEN_MAX_DOMAIN_MEMORY
config XEN_SAVE_RESTORE
bool
depends on XEN
+ select HIBERNATE_CALLBACKS
default y
config XEN_DEBUG_FS
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 49dbd78ec3c..e3c6a06cf72 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -238,6 +238,7 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx,
static __init void xen_init_cpuid_mask(void)
{
unsigned int ax, bx, cx, dx;
+ unsigned int xsave_mask;
cpuid_leaf1_edx_mask =
~((1 << X86_FEATURE_MCE) | /* disable MCE */
@@ -249,24 +250,16 @@ static __init void xen_init_cpuid_mask(void)
cpuid_leaf1_edx_mask &=
~((1 << X86_FEATURE_APIC) | /* disable local APIC */
(1 << X86_FEATURE_ACPI)); /* disable ACPI */
-
ax = 1;
- cx = 0;
xen_cpuid(&ax, &bx, &cx, &dx);
- /* cpuid claims we support xsave; try enabling it to see what happens */
- if (cx & (1 << (X86_FEATURE_XSAVE % 32))) {
- unsigned long cr4;
-
- set_in_cr4(X86_CR4_OSXSAVE);
-
- cr4 = read_cr4();
+ xsave_mask =
+ (1 << (X86_FEATURE_XSAVE % 32)) |
+ (1 << (X86_FEATURE_OSXSAVE % 32));
- if ((cr4 & X86_CR4_OSXSAVE) == 0)
- cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_XSAVE % 32));
-
- clear_in_cr4(X86_CR4_OSXSAVE);
- }
+ /* Xen will set CR4.OSXSAVE if supported and not disabled by force */
+ if ((cx & xsave_mask) != xsave_mask)
+ cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */
}
static void xen_set_debugreg(int reg, unsigned long val)
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index c82df6c9c0f..a991b57f91f 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -565,13 +565,13 @@ pte_t xen_make_pte_debug(pteval_t pte)
if (io_page &&
(xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT;
- WARN(addr != other_addr,
+ WARN_ONCE(addr != other_addr,
"0x%lx is using VM_IO, but it is 0x%lx!\n",
(unsigned long)addr, (unsigned long)other_addr);
} else {
pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP;
other_addr = (_pte.pte & PTE_PFN_MASK);
- WARN((addr == other_addr) && (!io_page) && (!iomap_set),
+ WARN_ONCE((addr == other_addr) && (!io_page) && (!iomap_set),
"0x%lx is missing VM_IO (and wasn't fixed)!\n",
(unsigned long)addr);
}
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 82104050315..7025593a58c 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -214,7 +214,7 @@ static int amba_pm_resume_noirq(struct device *dev)
#endif /* !CONFIG_SUSPEND */
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
static int amba_pm_freeze(struct device *dev)
{
@@ -352,7 +352,7 @@ static int amba_pm_restore_noirq(struct device *dev)
return ret;
}
-#else /* !CONFIG_HIBERNATION */
+#else /* !CONFIG_HIBERNATE_CALLBACKS */
#define amba_pm_freeze NULL
#define amba_pm_thaw NULL
@@ -363,7 +363,7 @@ static int amba_pm_restore_noirq(struct device *dev)
#define amba_pm_poweroff_noirq NULL
#define amba_pm_restore_noirq NULL
-#endif /* !CONFIG_HIBERNATION */
+#endif /* !CONFIG_HIBERNATE_CALLBACKS */
#ifdef CONFIG_PM
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index f051cfff18a..9e0e4fc24c4 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -149,6 +149,7 @@ static void platform_device_release(struct device *dev)
of_device_node_put(&pa->pdev.dev);
kfree(pa->pdev.dev.platform_data);
+ kfree(pa->pdev.mfd_cell);
kfree(pa->pdev.resource);
kfree(pa);
}
@@ -771,7 +772,7 @@ int __weak platform_pm_resume_noirq(struct device *dev)
#endif /* !CONFIG_SUSPEND */
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
static int platform_pm_freeze(struct device *dev)
{
@@ -909,7 +910,7 @@ static int platform_pm_restore_noirq(struct device *dev)
return ret;
}
-#else /* !CONFIG_HIBERNATION */
+#else /* !CONFIG_HIBERNATE_CALLBACKS */
#define platform_pm_freeze NULL
#define platform_pm_thaw NULL
@@ -920,7 +921,7 @@ static int platform_pm_restore_noirq(struct device *dev)
#define platform_pm_poweroff_noirq NULL
#define platform_pm_restore_noirq NULL
-#endif /* !CONFIG_HIBERNATION */
+#endif /* !CONFIG_HIBERNATE_CALLBACKS */
#ifdef CONFIG_PM_RUNTIME
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 052dc53eef3..fbc5b6e7c59 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -233,7 +233,7 @@ static int pm_op(struct device *dev,
}
break;
#endif /* CONFIG_SUSPEND */
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
if (ops->freeze) {
@@ -260,7 +260,7 @@ static int pm_op(struct device *dev,
suspend_report_result(ops->restore, error);
}
break;
-#endif /* CONFIG_HIBERNATION */
+#endif /* CONFIG_HIBERNATE_CALLBACKS */
default:
error = -EINVAL;
}
@@ -308,7 +308,7 @@ static int pm_noirq_op(struct device *dev,
}
break;
#endif /* CONFIG_SUSPEND */
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
if (ops->freeze_noirq) {
@@ -335,7 +335,7 @@ static int pm_noirq_op(struct device *dev,
suspend_report_result(ops->restore_noirq, error);
}
break;
-#endif /* CONFIG_HIBERNATION */
+#endif /* CONFIG_HIBERNATE_CALLBACKS */
default:
error = -EINVAL;
}
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 6b396759e7f..8a781540590 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1448,7 +1448,7 @@ static const struct of_device_id fsldma_of_ids[] = {
{}
};
-static struct of_platform_driver fsldma_of_driver = {
+static struct platform_driver fsldma_of_driver = {
.driver = {
.name = "fsl-elo-dma",
.owner = THIS_MODULE,
diff --git a/drivers/gpio/ml_ioh_gpio.c b/drivers/gpio/ml_ioh_gpio.c
index 7f6f01a4b14..0a775f7987c 100644
--- a/drivers/gpio/ml_ioh_gpio.c
+++ b/drivers/gpio/ml_ioh_gpio.c
@@ -116,6 +116,7 @@ static int ioh_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
reg_val |= (1 << nr);
else
reg_val &= ~(1 << nr);
+ iowrite32(reg_val, &chip->reg->regs[chip->ch].po);
mutex_unlock(&chip->lock);
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index 583e9259207..7630ab7b9be 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -558,7 +558,7 @@ static int __devinit pca953x_probe(struct i2c_client *client,
ret = gpiochip_add(&chip->gpio_chip);
if (ret)
- goto out_failed;
+ goto out_failed_irq;
if (pdata->setup) {
ret = pdata->setup(client, chip->gpio_chip.base,
@@ -570,8 +570,9 @@ static int __devinit pca953x_probe(struct i2c_client *client,
i2c_set_clientdata(client, chip);
return 0;
-out_failed:
+out_failed_irq:
pca953x_irq_teardown(chip);
+out_failed:
kfree(chip->dyn_pdata);
kfree(chip);
return ret;
diff --git a/drivers/gpio/pch_gpio.c b/drivers/gpio/pch_gpio.c
index 2c6af870510..f970a5f3585 100644
--- a/drivers/gpio/pch_gpio.c
+++ b/drivers/gpio/pch_gpio.c
@@ -105,6 +105,7 @@ static int pch_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
reg_val |= (1 << nr);
else
reg_val &= ~(1 << nr);
+ iowrite32(reg_val, &chip->reg->po);
mutex_unlock(&chip->lock);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index a6feb78c404..c58f691ec3c 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -96,6 +96,7 @@ config DRM_I915
# i915 depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
select BACKLIGHT_CLASS_DEVICE if ACPI
+ select VIDEO_OUTPUT_CONTROL if ACPI
select INPUT if ACPI
select ACPI_VIDEO if ACPI
select ACPI_BUTTON if ACPI
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 8314a49b6b9..90aef64b76f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -269,7 +269,7 @@ struct init_tbl_entry {
int (*handler)(struct nvbios *, uint16_t, struct init_exec *);
};
-static int parse_init_table(struct nvbios *, unsigned int, struct init_exec *);
+static int parse_init_table(struct nvbios *, uint16_t, struct init_exec *);
#define MACRO_INDEX_SIZE 2
#define MACRO_SIZE 8
@@ -2011,6 +2011,27 @@ init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
}
static int
+init_jump(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_JUMP opcode: 0x5C ('\')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (16 bit): offset (in bios)
+ *
+ * Continue execution of init table from 'offset'
+ */
+
+ uint16_t jmp_offset = ROM16(bios->data[offset + 1]);
+
+ if (!iexec->execute)
+ return 3;
+
+ BIOSLOG(bios, "0x%04X: Jump to 0x%04X\n", offset, jmp_offset);
+ return jmp_offset - offset;
+}
+
+static int
init_i2c_if(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -3659,6 +3680,7 @@ static struct init_tbl_entry itbl_entry[] = {
{ "INIT_ZM_REG_SEQUENCE" , 0x58, init_zm_reg_sequence },
/* INIT_INDIRECT_REG (0x5A, 7, 0, 0) removed due to no example of use */
{ "INIT_SUB_DIRECT" , 0x5B, init_sub_direct },
+ { "INIT_JUMP" , 0x5C, init_jump },
{ "INIT_I2C_IF" , 0x5E, init_i2c_if },
{ "INIT_COPY_NV_REG" , 0x5F, init_copy_nv_reg },
{ "INIT_ZM_INDEX_IO" , 0x62, init_zm_index_io },
@@ -3700,8 +3722,7 @@ static struct init_tbl_entry itbl_entry[] = {
#define MAX_TABLE_OPS 1000
static int
-parse_init_table(struct nvbios *bios, unsigned int offset,
- struct init_exec *iexec)
+parse_init_table(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
* Parses all commands in an init table.
@@ -6333,6 +6354,32 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
}
}
+ /* XFX GT-240X-YA
+ *
+ * So many things wrong here, replace the entire encoder table..
+ */
+ if (nv_match_device(dev, 0x0ca3, 0x1682, 0x3003)) {
+ if (idx == 0) {
+ *conn = 0x02001300; /* VGA, connector 1 */
+ *conf = 0x00000028;
+ } else
+ if (idx == 1) {
+ *conn = 0x01010312; /* DVI, connector 0 */
+ *conf = 0x00020030;
+ } else
+ if (idx == 2) {
+ *conn = 0x01010310; /* VGA, connector 0 */
+ *conf = 0x00000028;
+ } else
+ if (idx == 3) {
+ *conn = 0x02022362; /* HDMI, connector 2 */
+ *conf = 0x00020010;
+ } else {
+ *conn = 0x0000000e; /* EOL */
+ *conf = 0x00000000;
+ }
+ }
+
return true;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 57e5302503d..856d56a98d1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -1190,7 +1190,7 @@ extern int nv50_graph_load_context(struct nouveau_channel *);
extern int nv50_graph_unload_context(struct drm_device *);
extern int nv50_grctx_init(struct nouveau_grctx *);
extern void nv50_graph_tlb_flush(struct drm_device *dev);
-extern void nv86_graph_tlb_flush(struct drm_device *dev);
+extern void nv84_graph_tlb_flush(struct drm_device *dev);
extern struct nouveau_enum nv50_data_error_names[];
/* nvc0_graph.c */
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 2683377f413..78f467fe30b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -552,6 +552,7 @@ nouveau_mem_timing_init(struct drm_device *dev)
u8 tRC; /* Byte 9 */
u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14;
u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21;
+ u8 magic_number = 0; /* Yeah... sorry*/
u8 *mem = NULL, *entry;
int i, recordlen, entries;
@@ -596,6 +597,12 @@ nouveau_mem_timing_init(struct drm_device *dev)
if (!memtimings->timing)
return;
+ /* Get "some number" from the timing reg for NV_40
+ * Used in calculations later */
+ if(dev_priv->card_type == NV_40) {
+ magic_number = (nv_rd32(dev,0x100228) & 0x0f000000) >> 24;
+ }
+
entry = mem + mem[1];
for (i = 0; i < entries; i++, entry += recordlen) {
struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i];
@@ -635,36 +642,51 @@ nouveau_mem_timing_init(struct drm_device *dev)
/* XXX: I don't trust the -1's and +1's... they must come
* from somewhere! */
- timing->reg_100224 = ((tUNK_0 + tUNK_19 + 1) << 24 |
+ timing->reg_100224 = (tUNK_0 + tUNK_19 + 1 + magic_number) << 24 |
tUNK_18 << 16 |
- (tUNK_1 + tUNK_19 + 1) << 8 |
- (tUNK_2 - 1));
+ (tUNK_1 + tUNK_19 + 1 + magic_number) << 8;
+ if(dev_priv->chipset == 0xa8) {
+ timing->reg_100224 |= (tUNK_2 - 1);
+ } else {
+ timing->reg_100224 |= (tUNK_2 + 2 - magic_number);
+ }
timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10);
- if(recordlen > 19) {
- timing->reg_100228 += (tUNK_19 - 1) << 24;
- }/* I cannot back-up this else-statement right now
- else {
- timing->reg_100228 += tUNK_12 << 24;
- }*/
-
- /* XXX: reg_10022c */
- timing->reg_10022c = tUNK_2 - 1;
-
- timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 |
- tUNK_13 << 8 | tUNK_13);
-
- /* XXX: +6? */
- timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC);
- timing->reg_100234 += max(tUNK_10,tUNK_11) << 16;
-
- /* XXX; reg_100238, reg_10023c
- * reg: 0x00??????
- * reg_10023c:
- * 0 for pre-NV50 cards
- * 0x????0202 for NV50+ cards (empirical evidence) */
- if(dev_priv->card_type >= NV_50) {
+ if(dev_priv->chipset >= 0xa3 && dev_priv->chipset < 0xaa) {
+ timing->reg_100228 |= (tUNK_19 - 1) << 24;
+ }
+
+ if(dev_priv->card_type == NV_40) {
+ /* NV40: don't know what the rest of the regs are..
+ * And don't need to know either */
+ timing->reg_100228 |= 0x20200000 | magic_number << 24;
+ } else if(dev_priv->card_type >= NV_50) {
+ /* XXX: reg_10022c */
+ timing->reg_10022c = tUNK_2 - 1;
+
+ timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 |
+ tUNK_13 << 8 | tUNK_13);
+
+ timing->reg_100234 = (tRAS << 24 | tRC);
+ timing->reg_100234 += max(tUNK_10,tUNK_11) << 16;
+
+ if(dev_priv->chipset < 0xa3) {
+ timing->reg_100234 |= (tUNK_2 + 2) << 8;
+ } else {
+ /* XXX: +6? */
+ timing->reg_100234 |= (tUNK_19 + 6) << 8;
+ }
+
+ /* XXX; reg_100238, reg_10023c
+ * reg_100238: 0x00??????
+ * reg_10023c: 0x!!??0202 for NV50+ cards (empirical evidence) */
timing->reg_10023c = 0x202;
+ if(dev_priv->chipset < 0xa3) {
+ timing->reg_10023c |= 0x4000000 | (tUNK_2 - 1) << 16;
+ } else {
+ /* currently unknown
+ * 10023c seen as 06xxxxxx, 0bxxxxxx or 0fxxxxxx */
+ }
}
NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i,
@@ -675,7 +697,7 @@ nouveau_mem_timing_init(struct drm_device *dev)
timing->reg_100238, timing->reg_10023c);
}
- memtimings->nr_timing = entries;
+ memtimings->nr_timing = entries;
memtimings->supported = true;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
index ac62a1b8c4f..670e3cb697e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
@@ -134,7 +134,7 @@ nouveau_perf_init(struct drm_device *dev)
case 0x13:
case 0x15:
perflvl->fanspeed = entry[55];
- perflvl->voltage = entry[56];
+ perflvl->voltage = (recordlen > 56) ? entry[56] : 0;
perflvl->core = ROM32(entry[1]) * 10;
perflvl->memory = ROM32(entry[5]) * 20;
break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 5bb2859001e..6e2b1a6caa2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -376,15 +376,11 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->graph.destroy_context = nv50_graph_destroy_context;
engine->graph.load_context = nv50_graph_load_context;
engine->graph.unload_context = nv50_graph_unload_context;
- if (dev_priv->chipset != 0x86)
+ if (dev_priv->chipset == 0x50 ||
+ dev_priv->chipset == 0xac)
engine->graph.tlb_flush = nv50_graph_tlb_flush;
- else {
- /* from what i can see nvidia do this on every
- * pre-NVA3 board except NVAC, but, we've only
- * ever seen problems on NV86
- */
- engine->graph.tlb_flush = nv86_graph_tlb_flush;
- }
+ else
+ engine->graph.tlb_flush = nv84_graph_tlb_flush;
engine->fifo.channels = 128;
engine->fifo.init = nv50_fifo_init;
engine->fifo.takedown = nv50_fifo_takedown;
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index c82db37d9f4..12098bf839c 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -581,12 +581,13 @@ static void nv04_dfp_restore(struct drm_encoder *encoder)
int head = nv_encoder->restore.head;
if (nv_encoder->dcb->type == OUTPUT_LVDS) {
- struct drm_display_mode *native_mode = nouveau_encoder_connector_get(nv_encoder)->native_mode;
- if (native_mode)
- call_lvds_script(dev, nv_encoder->dcb, head, LVDS_PANEL_ON,
- native_mode->clock);
- else
- NV_ERROR(dev, "Not restoring LVDS without native mode\n");
+ struct nouveau_connector *connector =
+ nouveau_encoder_connector_get(nv_encoder);
+
+ if (connector && connector->native_mode)
+ call_lvds_script(dev, nv_encoder->dcb, head,
+ LVDS_PANEL_ON,
+ connector->native_mode->clock);
} else if (nv_encoder->dcb->type == OUTPUT_TMDS) {
int clock = nouveau_hw_pllvals_to_clk
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 2b9984027f4..a19ccaa025b 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -469,9 +469,6 @@ nv50_crtc_wait_complete(struct drm_crtc *crtc)
start = ptimer->read(dev);
do {
- nv_wr32(dev, 0x61002c, 0x370);
- nv_wr32(dev, 0x000140, 1);
-
if (nv_ro32(disp->ntfy, 0x000))
return 0;
} while (ptimer->read(dev) - start < 2000000000ULL);
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
index a2cfaa691e9..c8e83c1a4de 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -186,6 +186,7 @@ nv50_evo_channel_init(struct nouveau_channel *evo)
nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id);
evo->dma.max = (4096/4) - 2;
+ evo->dma.max &= ~7;
evo->dma.put = 0;
evo->dma.cur = evo->dma.put;
evo->dma.free = evo->dma.max - evo->dma.cur;
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 8675b00caf1..b02a5b1e7d3 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -503,7 +503,7 @@ nv50_graph_tlb_flush(struct drm_device *dev)
}
void
-nv86_graph_tlb_flush(struct drm_device *dev)
+nv84_graph_tlb_flush(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c
index 69af0ba7edd..a0a2a0277f7 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vm.c
@@ -104,20 +104,26 @@ nvc0_vm_flush(struct nouveau_vm *vm)
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
struct drm_device *dev = vm->dev;
struct nouveau_vm_pgd *vpgd;
- u32 r100c80, engine;
+ u32 engine = (dev_priv->chan_vm == vm) ? 1 : 5;
pinstmem->flush(vm->dev);
- if (vm == dev_priv->chan_vm)
- engine = 1;
- else
- engine = 5;
-
+ spin_lock(&dev_priv->ramin_lock);
list_for_each_entry(vpgd, &vm->pgd_list, head) {
- r100c80 = nv_rd32(dev, 0x100c80);
+ /* looks like maybe a "free flush slots" counter, the
+ * faster you write to 0x100cbc to more it decreases
+ */
+ if (!nv_wait_ne(dev, 0x100c80, 0x00ff0000, 0x00000000)) {
+ NV_ERROR(dev, "vm timeout 0: 0x%08x %d\n",
+ nv_rd32(dev, 0x100c80), engine);
+ }
nv_wr32(dev, 0x100cb8, vpgd->obj->vinst >> 8);
nv_wr32(dev, 0x100cbc, 0x80000000 | engine);
- if (!nv_wait(dev, 0x100c80, 0xffffffff, r100c80))
- NV_ERROR(dev, "vm flush timeout eng %d\n", engine);
+ /* wait for flush to be queued? */
+ if (!nv_wait(dev, 0x100c80, 0x00008000, 0x00008000)) {
+ NV_ERROR(dev, "vm timeout 1: 0x%08x %d\n",
+ nv_rd32(dev, 0x100c80), engine);
+ }
}
+ spin_unlock(&dev_priv->ramin_lock);
}
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 258fa5e7a2d..d71d375149f 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -32,6 +32,7 @@
#include "atom.h"
#include "atom-names.h"
#include "atom-bits.h"
+#include "radeon.h"
#define ATOM_COND_ABOVE 0
#define ATOM_COND_ABOVEOREQUAL 1
@@ -101,7 +102,9 @@ static void debug_print_spaces(int n)
static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
uint32_t index, uint32_t data)
{
+ struct radeon_device *rdev = ctx->card->dev->dev_private;
uint32_t temp = 0xCDCDCDCD;
+
while (1)
switch (CU8(base)) {
case ATOM_IIO_NOP:
@@ -112,7 +115,8 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
base += 3;
break;
case ATOM_IIO_WRITE:
- (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1));
+ if (rdev->family == CHIP_RV515)
+ (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1));
ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
base += 3;
break;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index b41ec59c710..9d516a8c4df 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -531,6 +531,12 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
else
pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+
+ if ((rdev->family == CHIP_R600) ||
+ (rdev->family == CHIP_RV610) ||
+ (rdev->family == CHIP_RV630) ||
+ (rdev->family == CHIP_RV670))
+ pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
} else {
pll->flags |= RADEON_PLL_LEGACY;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 0b0cc74c08c..3453910ee0f 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -120,11 +120,16 @@ void evergreen_pm_misc(struct radeon_device *rdev)
struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
- if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
- if (voltage->voltage != rdev->pm.current_vddc) {
- radeon_atom_set_voltage(rdev, voltage->voltage);
+ if (voltage->type == VOLTAGE_SW) {
+ if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
+ radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
rdev->pm.current_vddc = voltage->voltage;
- DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
+ DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
+ }
+ if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
+ radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);
+ rdev->pm.current_vddci = voltage->vddci;
+ DRM_DEBUG("Setting: vddci: %d\n", voltage->vddci);
}
}
}
@@ -3036,9 +3041,6 @@ int evergreen_init(struct radeon_device *rdev)
{
int r;
- r = radeon_dummy_page_init(rdev);
- if (r)
- return r;
/* This don't do much */
r = radeon_gem_init(rdev);
if (r)
@@ -3150,7 +3152,6 @@ void evergreen_fini(struct radeon_device *rdev)
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
- radeon_dummy_page_fini(rdev);
}
static void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index be271c42de4..15d58292677 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -587,7 +587,7 @@ void r600_pm_misc(struct radeon_device *rdev)
if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
if (voltage->voltage != rdev->pm.current_vddc) {
- radeon_atom_set_voltage(rdev, voltage->voltage);
+ radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
rdev->pm.current_vddc = voltage->voltage;
DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
}
@@ -2509,9 +2509,6 @@ int r600_init(struct radeon_device *rdev)
{
int r;
- r = radeon_dummy_page_init(rdev);
- if (r)
- return r;
if (r600_debugfs_mc_info_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for mc !\n");
}
@@ -2625,7 +2622,6 @@ void r600_fini(struct radeon_device *rdev)
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
- radeon_dummy_page_fini(rdev);
}
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 93f536594c7..ba643b57605 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -177,7 +177,7 @@ void radeon_pm_suspend(struct radeon_device *rdev);
void radeon_pm_resume(struct radeon_device *rdev);
void radeon_combios_get_power_modes(struct radeon_device *rdev);
void radeon_atombios_get_power_modes(struct radeon_device *rdev);
-void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level);
+void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
void rs690_pm_info(struct radeon_device *rdev);
extern int rv6xx_get_temp(struct radeon_device *rdev);
extern int rv770_get_temp(struct radeon_device *rdev);
@@ -767,7 +767,9 @@ struct radeon_voltage {
u8 vddci_id; /* index into vddci voltage table */
bool vddci_enabled;
/* r6xx+ sw */
- u32 voltage;
+ u16 voltage;
+ /* evergreen+ vddci */
+ u16 vddci;
};
/* clock mode flags */
@@ -835,10 +837,12 @@ struct radeon_pm {
int default_power_state_index;
u32 current_sclk;
u32 current_mclk;
- u32 current_vddc;
+ u16 current_vddc;
+ u16 current_vddci;
u32 default_sclk;
u32 default_mclk;
- u32 default_vddc;
+ u16 default_vddc;
+ u16 default_vddci;
struct radeon_i2c_chan *i2c_bus;
/* selected pm method */
enum radeon_pm_method pm_method;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index eb888ee5f67..ca576191d05 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -94,7 +94,7 @@ static void radeon_register_accessor_init(struct radeon_device *rdev)
rdev->mc_rreg = &rs600_mc_rreg;
rdev->mc_wreg = &rs600_mc_wreg;
}
- if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_HEMLOCK)) {
+ if (rdev->family >= CHIP_R600) {
rdev->pciep_rreg = &r600_pciep_rreg;
rdev->pciep_wreg = &r600_pciep_wreg;
}
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 99768d9d91d..f5d12fb103f 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2176,24 +2176,27 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
}
}
-static u16 radeon_atombios_get_default_vddc(struct radeon_device *rdev)
+static void radeon_atombios_get_default_voltages(struct radeon_device *rdev,
+ u16 *vddc, u16 *vddci)
{
struct radeon_mode_info *mode_info = &rdev->mode_info;
int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
u8 frev, crev;
u16 data_offset;
union firmware_info *firmware_info;
- u16 vddc = 0;
+
+ *vddc = 0;
+ *vddci = 0;
if (atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset)) {
firmware_info =
(union firmware_info *)(mode_info->atom_context->bios +
data_offset);
- vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
+ *vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
+ if ((frev == 2) && (crev >= 2))
+ *vddci = le16_to_cpu(firmware_info->info_22.usBootUpVDDCIVoltage);
}
-
- return vddc;
}
static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rdev,
@@ -2203,7 +2206,9 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
int j;
u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
u32 misc2 = le16_to_cpu(non_clock_info->usClassification);
- u16 vddc = radeon_atombios_get_default_vddc(rdev);
+ u16 vddc, vddci;
+
+ radeon_atombios_get_default_voltages(rdev, &vddc, &vddci);
rdev->pm.power_state[state_index].misc = misc;
rdev->pm.power_state[state_index].misc2 = misc2;
@@ -2244,6 +2249,7 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage;
+ rdev->pm.default_vddci = rdev->pm.power_state[state_index].clock_info[0].voltage.vddci;
} else {
/* patch the table values with the default slck/mclk from firmware info */
for (j = 0; j < mode_index; j++) {
@@ -2286,6 +2292,8 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
VOLTAGE_SW;
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
le16_to_cpu(clock_info->evergreen.usVDDC);
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci =
+ le16_to_cpu(clock_info->evergreen.usVDDCI);
} else {
sclk = le16_to_cpu(clock_info->r600.usEngineClockLow);
sclk |= clock_info->r600.ucEngineClockHigh << 16;
@@ -2577,25 +2585,25 @@ union set_voltage {
struct _SET_VOLTAGE_PARAMETERS_V2 v2;
};
-void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level)
+void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type)
{
union set_voltage args;
int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
- u8 frev, crev, volt_index = level;
+ u8 frev, crev, volt_index = voltage_level;
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return;
switch (crev) {
case 1:
- args.v1.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC;
+ args.v1.ucVoltageType = voltage_type;
args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE;
args.v1.ucVoltageIndex = volt_index;
break;
case 2:
- args.v2.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC;
+ args.v2.ucVoltageType = voltage_type;
args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE;
- args.v2.usVoltageLevel = cpu_to_le16(level);
+ args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 9e59868d354..bbcd1dd7bac 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -79,7 +79,7 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
else
scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
- seq = rdev->wb.wb[scratch_index/4];
+ seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
} else
seq = RREG32(rdev->fence_drv.scratch_reg);
if (seq != rdev->fence_drv.last_seq) {
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index f0534ef2f33..8a955bbdb60 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -285,4 +285,6 @@ void radeon_gart_fini(struct radeon_device *rdev)
rdev->gart.pages = NULL;
rdev->gart.pages_addr = NULL;
rdev->gart.ttm_alloced = NULL;
+
+ radeon_dummy_page_fini(rdev);
}
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index ded2a45bc95..ccbabf734a6 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -1062,7 +1062,7 @@ void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
*val = in_buf[0];
DRM_DEBUG("val = 0x%02x\n", *val);
} else {
- DRM_ERROR("i2c 0x%02x 0x%02x read failed\n",
+ DRM_DEBUG("i2c 0x%02x 0x%02x read failed\n",
addr, *val);
}
}
@@ -1084,7 +1084,7 @@ void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus,
out_buf[1] = val;
if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1)
- DRM_ERROR("i2c 0x%02x 0x%02x write failed\n",
+ DRM_DEBUG("i2c 0x%02x 0x%02x write failed\n",
addr, val);
}
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 5b54268ed6b..2f46e0c8df5 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -269,7 +269,7 @@ static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
.disable = radeon_legacy_encoder_disable,
};
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
#define MAX_RADEON_LEVEL 0xFF
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 08de669e025..86eda1ea94d 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -23,6 +23,7 @@
#include "drmP.h"
#include "radeon.h"
#include "avivod.h"
+#include "atom.h"
#ifdef CONFIG_ACPI
#include <linux/acpi.h>
#endif
@@ -535,7 +536,11 @@ void radeon_pm_resume(struct radeon_device *rdev)
/* set up the default clocks if the MC ucode is loaded */
if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
if (rdev->pm.default_vddc)
- radeon_atom_set_voltage(rdev, rdev->pm.default_vddc);
+ radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
+ SET_VOLTAGE_TYPE_ASIC_VDDC);
+ if (rdev->pm.default_vddci)
+ radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
+ SET_VOLTAGE_TYPE_ASIC_VDDCI);
if (rdev->pm.default_sclk)
radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
if (rdev->pm.default_mclk)
@@ -548,6 +553,7 @@ void radeon_pm_resume(struct radeon_device *rdev)
rdev->pm.current_sclk = rdev->pm.default_sclk;
rdev->pm.current_mclk = rdev->pm.default_mclk;
rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+ rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
if (rdev->pm.pm_method == PM_METHOD_DYNPM
&& rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
@@ -585,7 +591,8 @@ int radeon_pm_init(struct radeon_device *rdev)
/* set up the default clocks if the MC ucode is loaded */
if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
if (rdev->pm.default_vddc)
- radeon_atom_set_voltage(rdev, rdev->pm.default_vddc);
+ radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
+ SET_VOLTAGE_TYPE_ASIC_VDDC);
if (rdev->pm.default_sclk)
radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
if (rdev->pm.default_mclk)
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index bbc9cd82333..c6776e48fdd 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -248,7 +248,7 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
void radeon_ring_free_size(struct radeon_device *rdev)
{
if (rdev->wb.enabled)
- rdev->cp.rptr = rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4];
+ rdev->cp.rptr = le32_to_cpu(rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]);
else {
if (rdev->family >= CHIP_R600)
rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 876cebc4b8b..6e3b11e5abb 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -114,7 +114,7 @@ void rs600_pm_misc(struct radeon_device *rdev)
udelay(voltage->delay);
}
} else if (voltage->type == VOLTAGE_VDDC)
- radeon_atom_set_voltage(rdev, voltage->vddc_id);
+ radeon_atom_set_voltage(rdev, voltage->vddc_id, SET_VOLTAGE_TYPE_ASIC_VDDC);
dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH);
dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index b974ac7df8d..ef8a5babe9f 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -106,7 +106,7 @@ void rv770_pm_misc(struct radeon_device *rdev)
if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
if (voltage->voltage != rdev->pm.current_vddc) {
- radeon_atom_set_voltage(rdev, voltage->voltage);
+ radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
rdev->pm.current_vddc = voltage->voltage;
DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
}
@@ -1255,9 +1255,6 @@ int rv770_init(struct radeon_device *rdev)
{
int r;
- r = radeon_dummy_page_init(rdev);
- if (r)
- return r;
/* This don't do much */
r = radeon_gem_init(rdev);
if (r)
@@ -1372,7 +1369,6 @@ void rv770_fini(struct radeon_device *rdev)
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
- radeon_dummy_page_fini(rdev);
}
static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 737a2a2e46a..9d9d92945f8 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -683,22 +683,14 @@ int ttm_get_pages(struct list_head *pages, int flags,
gfp_flags |= GFP_HIGHUSER;
for (r = 0; r < count; ++r) {
- if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) {
- void *addr;
- addr = dma_alloc_coherent(NULL, PAGE_SIZE,
- &dma_address[r],
- gfp_flags);
- if (addr == NULL)
- return -ENOMEM;
- p = virt_to_page(addr);
- } else
- p = alloc_page(gfp_flags);
+ p = alloc_page(gfp_flags);
if (!p) {
printk(KERN_ERR TTM_PFX
"Unable to allocate page.");
return -ENOMEM;
}
+
list_add(&p->lru, pages);
}
return 0;
@@ -746,24 +738,12 @@ void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
unsigned long irq_flags;
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
struct page *p, *tmp;
- unsigned r;
if (pool == NULL) {
/* No pool for this memory type so free the pages */
- r = page_count-1;
list_for_each_entry_safe(p, tmp, pages, lru) {
- if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) {
- void *addr = page_address(p);
- WARN_ON(!addr || !dma_address[r]);
- if (addr)
- dma_free_coherent(NULL, PAGE_SIZE,
- addr,
- dma_address[r]);
- dma_address[r] = 0;
- } else
- __free_page(p);
- r--;
+ __free_page(p);
}
/* Make the pages list empty */
INIT_LIST_HEAD(pages);
diff --git a/drivers/gpu/stub/Kconfig b/drivers/gpu/stub/Kconfig
index 70e60a4bb67..419917955bf 100644
--- a/drivers/gpu/stub/Kconfig
+++ b/drivers/gpu/stub/Kconfig
@@ -5,6 +5,7 @@ config STUB_POULSBO
# Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
select BACKLIGHT_CLASS_DEVICE if ACPI
+ select VIDEO_OUTPUT_CONTROL if ACPI
select INPUT if ACPI
select ACPI_VIDEO if ACPI
select THERMAL if ACPI
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index d01574d9887..f4c8c844b91 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -55,6 +55,19 @@ int mfd_cell_disable(struct platform_device *pdev)
}
EXPORT_SYMBOL(mfd_cell_disable);
+static int mfd_platform_add_cell(struct platform_device *pdev,
+ const struct mfd_cell *cell)
+{
+ if (!cell)
+ return 0;
+
+ pdev->mfd_cell = kmemdup(cell, sizeof(*cell), GFP_KERNEL);
+ if (!pdev->mfd_cell)
+ return -ENOMEM;
+
+ return 0;
+}
+
static int mfd_add_device(struct device *parent, int id,
const struct mfd_cell *cell,
struct resource *mem_base,
@@ -75,7 +88,7 @@ static int mfd_add_device(struct device *parent, int id,
pdev->dev.parent = parent;
- ret = platform_device_add_data(pdev, cell, sizeof(*cell));
+ ret = mfd_platform_add_cell(pdev, cell);
if (ret)
goto fail_res;
@@ -123,7 +136,6 @@ static int mfd_add_device(struct device *parent, int id,
return 0;
-/* platform_device_del(pdev); */
fail_res:
kfree(res);
fail_device:
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index f803c58b941..66823eded7a 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -154,7 +154,7 @@ struct be_eq_obj {
u16 min_eqd; /* in usecs */
u16 max_eqd; /* in usecs */
u16 cur_eqd; /* in usecs */
- u8 msix_vec_idx;
+ u8 eq_idx;
struct napi_struct napi;
};
@@ -291,7 +291,7 @@ struct be_adapter {
u32 num_rx_qs;
u32 big_page_size; /* Compounded page size shared by rx wrbs */
- u8 msix_vec_next_idx;
+ u8 eq_next_idx;
struct be_drv_stats drv_stats;
struct vlan_group *vlan_grp;
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 9a54c8b24ff..7cb5a114c73 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -1497,7 +1497,7 @@ static int be_tx_queues_create(struct be_adapter *adapter)
if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
goto tx_eq_free;
- adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
+ adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
/* Alloc TX eth compl queue */
@@ -1590,7 +1590,7 @@ static int be_rx_queues_create(struct be_adapter *adapter)
if (rc)
goto err;
- rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
+ rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
/* CQ */
cq = &rxo->cq;
@@ -1666,11 +1666,11 @@ static irqreturn_t be_intx(int irq, void *dev)
if (!isr)
return IRQ_NONE;
- if ((1 << adapter->tx_eq.msix_vec_idx & isr))
+ if ((1 << adapter->tx_eq.eq_idx & isr))
event_handle(adapter, &adapter->tx_eq);
for_all_rx_queues(adapter, rxo, i) {
- if ((1 << rxo->rx_eq.msix_vec_idx & isr))
+ if ((1 << rxo->rx_eq.eq_idx & isr))
event_handle(adapter, &rxo->rx_eq);
}
}
@@ -1951,7 +1951,7 @@ static void be_sriov_disable(struct be_adapter *adapter)
static inline int be_msix_vec_get(struct be_adapter *adapter,
struct be_eq_obj *eq_obj)
{
- return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
+ return adapter->msix_entries[eq_obj->eq_idx].vector;
}
static int be_request_irq(struct be_adapter *adapter,
@@ -2345,6 +2345,7 @@ static int be_clear(struct be_adapter *adapter)
be_mcc_queues_destroy(adapter);
be_rx_queues_destroy(adapter);
be_tx_queues_destroy(adapter);
+ adapter->eq_next_idx = 0;
if (be_physfn(adapter) && adapter->sriov_enabled)
for (vf = 0; vf < num_vfs; vf++)
@@ -3141,12 +3142,14 @@ static int be_resume(struct pci_dev *pdev)
static void be_shutdown(struct pci_dev *pdev)
{
struct be_adapter *adapter = pci_get_drvdata(pdev);
- struct net_device *netdev = adapter->netdev;
- if (netif_running(netdev))
+ if (!adapter)
+ return;
+
+ if (netif_running(adapter->netdev))
cancel_delayed_work_sync(&adapter->work);
- netif_device_detach(netdev);
+ netif_device_detach(adapter->netdev);
be_cmd_reset_function(adapter);
diff --git a/drivers/net/bna/bfa_ioc.c b/drivers/net/bna/bfa_ioc.c
index 34933cb9569..e3de0b8625c 100644
--- a/drivers/net/bna/bfa_ioc.c
+++ b/drivers/net/bna/bfa_ioc.c
@@ -2219,13 +2219,9 @@ bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
static void
bfa_ioc_recover(struct bfa_ioc *ioc)
{
- u16 bdf;
-
- bdf = (ioc->pcidev.pci_slot << 8 | ioc->pcidev.pci_func << 3 |
- ioc->pcidev.device_id);
-
- pr_crit("Firmware heartbeat failure at %d", bdf);
- BUG_ON(1);
+ pr_crit("Heart Beat of IOC has failed\n");
+ bfa_ioc_stats(ioc, ioc_hbfails);
+ bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
}
static void
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 7513c4523ac..330140ee266 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -931,7 +931,8 @@ static int mcp251x_open(struct net_device *net)
priv->tx_len = 0;
ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
- IRQF_TRIGGER_FALLING, DEVICE_NAME, priv);
+ pdata->irq_flags ? pdata->irq_flags : IRQF_TRIGGER_FALLING,
+ DEVICE_NAME, priv);
if (ret) {
dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
if (pdata->transceiver_enable)
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index cfd50bc4916..62dd21b06df 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -345,6 +345,8 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
err = mlx4_en_init_allocator(priv, ring);
if (err) {
en_err(priv, "Failed initializing ring allocator\n");
+ if (ring->stride <= TXBB_SIZE)
+ ring->buf -= TXBB_SIZE;
ring_ind--;
goto err_allocator;
}
@@ -369,6 +371,8 @@ err_buffers:
ring_ind = priv->rx_ring_num - 1;
err_allocator:
while (ring_ind >= 0) {
+ if (priv->rx_ring[ring_ind].stride <= TXBB_SIZE)
+ priv->rx_ring[ring_ind].buf -= TXBB_SIZE;
mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]);
ring_ind--;
}
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 62fa7eec5f0..3814fc9b114 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -944,6 +944,10 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
}
for (port = 1; port <= dev->caps.num_ports; port++) {
+ enum mlx4_port_type port_type = 0;
+ mlx4_SENSE_PORT(dev, port, &port_type);
+ if (port_type)
+ dev->caps.port_type[port] = port_type;
ib_port_default_caps = 0;
err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps);
if (err)
@@ -958,6 +962,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
goto err_mcg_table_free;
}
}
+ mlx4_set_port_mask(dev);
return 0;
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index c1e0e5f1bcd..dd7d745fbab 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -431,6 +431,8 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
void mlx4_handle_catas_err(struct mlx4_dev *dev);
+int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
+ enum mlx4_port_type *type);
void mlx4_do_sense_ports(struct mlx4_dev *dev,
enum mlx4_port_type *stype,
enum mlx4_port_type *defaults);
diff --git a/drivers/net/mlx4/sense.c b/drivers/net/mlx4/sense.c
index 015fbe785c1..e2337a7411d 100644
--- a/drivers/net/mlx4/sense.c
+++ b/drivers/net/mlx4/sense.c
@@ -38,8 +38,8 @@
#include "mlx4.h"
-static int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
- enum mlx4_port_type *type)
+int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
+ enum mlx4_port_type *type)
{
u64 out_param;
int err = 0;
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index 693aaef4e3c..718879b35b7 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -317,7 +317,7 @@ static void pppoe_flush_dev(struct net_device *dev)
lock_sock(sk);
if (po->pppoe_dev == dev &&
- sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
+ sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
pppox_unbind_sock(sk);
sk->sk_state = PPPOX_ZOMBIE;
sk->sk_state_change(sk);
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index c498b720b53..4b42ecc63dc 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -1818,6 +1818,7 @@ static int __devinit smsc911x_init(struct net_device *dev)
SMSC_TRACE(PROBE, "PHY will be autodetected.");
spin_lock_init(&pdata->dev_lock);
+ spin_lock_init(&pdata->mac_lock);
if (pdata->ioaddr == 0) {
SMSC_WARNING(PROBE, "pdata->ioaddr: 0x00000000");
@@ -1895,8 +1896,11 @@ static int __devinit smsc911x_init(struct net_device *dev)
/* workaround for platforms without an eeprom, where the mac address
* is stored elsewhere and set by the bootloader. This saves the
* mac address before resetting the device */
- if (pdata->config.flags & SMSC911X_SAVE_MAC_ADDRESS)
+ if (pdata->config.flags & SMSC911X_SAVE_MAC_ADDRESS) {
+ spin_lock_irq(&pdata->mac_lock);
smsc911x_read_mac_address(dev);
+ spin_unlock_irq(&pdata->mac_lock);
+ }
/* Reset the LAN911x */
if (smsc911x_soft_reset(pdata))
@@ -2059,8 +2063,6 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
SMSC_TRACE(PROBE, "Network interface: \"%s\"", dev->name);
}
- spin_lock_init(&pdata->mac_lock);
-
retval = smsc911x_mii_init(pdev, dev);
if (retval) {
SMSC_WARNING(PROBE,
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 727874d9deb..47a6c870b51 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1313,6 +1313,21 @@ static const struct usb_device_id products[] = {
USB_DEVICE(0x0424, 0x9909),
.driver_info = (unsigned long) &smsc95xx_info,
},
+ {
+ /* SMSC LAN9530 USB Ethernet Device */
+ USB_DEVICE(0x0424, 0x9530),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
+ {
+ /* SMSC LAN9730 USB Ethernet Device */
+ USB_DEVICE(0x0424, 0x9730),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
+ {
+ /* SMSC LAN89530 USB Ethernet Device */
+ USB_DEVICE(0x0424, 0x9E08),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
{ }, /* END */
};
MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 338b07502f1..1ec9bcd6b28 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -2546,6 +2546,7 @@ static struct {
{ AR_SREV_VERSION_9287, "9287" },
{ AR_SREV_VERSION_9271, "9271" },
{ AR_SREV_VERSION_9300, "9300" },
+ { AR_SREV_VERSION_9485, "9485" },
};
/* For devices with external radios */
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 3d5566e7af0..ff0f5ba14b2 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -1536,7 +1536,7 @@ static void dma_rx(struct b43_dmaring *ring, int *slot)
dmaaddr = meta->dmaaddr;
goto drop_recycle_buffer;
}
- if (unlikely(len > ring->rx_buffersize)) {
+ if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) {
/* The data did not fit into one descriptor buffer
* and is split over multiple buffers.
* This should never happen, as we try to allocate buffers
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
index a01c2100f16..e8a80a1251b 100644
--- a/drivers/net/wireless/b43/dma.h
+++ b/drivers/net/wireless/b43/dma.h
@@ -163,7 +163,7 @@ struct b43_dmadesc_generic {
/* DMA engine tuning knobs */
#define B43_TXRING_SLOTS 256
#define B43_RXRING_SLOTS 64
-#define B43_DMA0_RX_BUFFERSIZE IEEE80211_MAX_FRAME_LEN
+#define B43_DMA0_RX_BUFFERSIZE (B43_DMA0_RX_FRAMEOFFSET + IEEE80211_MAX_FRAME_LEN)
/* Pointer poison */
#define B43_DMA_PTR_POISON ((void *)ERR_PTR(-ENOMEM))
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index 98aa8af0119..20b66469d68 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -241,7 +241,7 @@ struct iwl_eeprom_enhanced_txpwr {
/* 6x00 Specific */
#define EEPROM_6000_TX_POWER_VERSION (4)
-#define EEPROM_6000_EEPROM_VERSION (0x434)
+#define EEPROM_6000_EEPROM_VERSION (0x423)
/* 6x50 Specific */
#define EEPROM_6050_TX_POWER_VERSION (4)
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 9b344a921e7..e18358725b6 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -56,6 +56,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
{USB_DEVICE(0x0846, 0x4210)}, /* Netgear WG121 the second ? */
{USB_DEVICE(0x0846, 0x4220)}, /* Netgear WG111 */
{USB_DEVICE(0x09aa, 0x1000)}, /* Spinnaker Proto board */
+ {USB_DEVICE(0x0bf8, 0x1007)}, /* Fujitsu E-5400 USB */
{USB_DEVICE(0x0cde, 0x0006)}, /* Medion 40900, Roper Europe */
{USB_DEVICE(0x0db0, 0x6826)}, /* MSI UB54G (MS-6826) */
{USB_DEVICE(0x107b, 0x55f2)}, /* Gateway WGU-210 (Gemtek) */
@@ -68,6 +69,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
{USB_DEVICE(0x1915, 0x2235)}, /* Linksys WUSB54G Portable OEM */
{USB_DEVICE(0x2001, 0x3701)}, /* DLink DWL-G120 Spinnaker */
{USB_DEVICE(0x2001, 0x3703)}, /* DLink DWL-G122 */
+ {USB_DEVICE(0x2001, 0x3762)}, /* Conceptronic C54U */
{USB_DEVICE(0x5041, 0x2234)}, /* Linksys WUSB54G */
{USB_DEVICE(0x5041, 0x2235)}, /* Linksys WUSB54G Portable */
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 9de9dbe9439..84eb6ad3637 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -1062,8 +1062,10 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
* Stop all work.
*/
cancel_work_sync(&rt2x00dev->intf_work);
- cancel_work_sync(&rt2x00dev->rxdone_work);
- cancel_work_sync(&rt2x00dev->txdone_work);
+ if (rt2x00_is_usb(rt2x00dev)) {
+ cancel_work_sync(&rt2x00dev->rxdone_work);
+ cancel_work_sync(&rt2x00dev->txdone_work);
+ }
destroy_workqueue(rt2x00dev->workqueue);
/*
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index f74a8701c67..590f14f45a8 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -685,7 +685,7 @@ static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data)
u8 efuse_data, word_cnts = 0;
u16 efuse_addr = 0;
- u8 hworden;
+ u8 hworden = 0;
u8 tmpdata[8];
if (data == NULL)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
index 5ef91374b23..28a6ce3bc23 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
@@ -303,7 +303,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
u16 box_reg, box_extreg;
u8 u1b_tmp;
bool isfw_read = false;
- u8 buf_index;
+ u8 buf_index = 0;
bool bwrite_sucess = false;
u8 wait_h2c_limmit = 100;
u8 wait_writeh2c_limmit = 100;
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index a4b2613d6a8..f5d85735d64 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -246,7 +246,7 @@ static void _rtl_usb_io_handler_init(struct device *dev,
static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw)
{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_priv __maybe_unused *rtlpriv = rtl_priv(hw);
mutex_destroy(&rtlpriv->io.bb_mutex);
}
diff --git a/drivers/net/wireless/wl12xx/sdio.c b/drivers/net/wireless/wl12xx/sdio.c
index 5b9dbeafec0..b1c7d031c39 100644
--- a/drivers/net/wireless/wl12xx/sdio.c
+++ b/drivers/net/wireless/wl12xx/sdio.c
@@ -340,7 +340,7 @@ module_init(wl1271_init);
module_exit(wl1271_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
+MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
MODULE_FIRMWARE(WL1271_FW_NAME);
MODULE_FIRMWARE(WL1271_AP_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c
index 18cf01719ae..ffc745b17f4 100644
--- a/drivers/net/wireless/wl12xx/spi.c
+++ b/drivers/net/wireless/wl12xx/spi.c
@@ -487,7 +487,7 @@ module_init(wl1271_init);
module_exit(wl1271_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
+MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
MODULE_FIRMWARE(WL1271_FW_NAME);
MODULE_FIRMWARE(WL1271_AP_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/testmode.c b/drivers/net/wireless/wl12xx/testmode.c
index e64403b6896..6ec06a4a4c6 100644
--- a/drivers/net/wireless/wl12xx/testmode.c
+++ b/drivers/net/wireless/wl12xx/testmode.c
@@ -204,7 +204,10 @@ static int wl1271_tm_cmd_nvs_push(struct wl1271 *wl, struct nlattr *tb[])
kfree(wl->nvs);
- wl->nvs = kzalloc(sizeof(struct wl1271_nvs_file), GFP_KERNEL);
+ if (len != sizeof(struct wl1271_nvs_file))
+ return -EINVAL;
+
+ wl->nvs = kzalloc(len, GFP_KERNEL);
if (!wl->nvs) {
wl1271_error("could not allocate memory for the nvs file");
ret = -ENOMEM;
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 58236e6d092..ab607bbd629 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -643,7 +643,7 @@ static void rx_urb_complete(struct urb *urb)
usb = urb->context;
rx = &usb->rx;
- zd_usb_reset_rx_idle_timer(usb);
+ tasklet_schedule(&rx->reset_timer_tasklet);
if (length%rx->usb_packet_size > rx->usb_packet_size-4) {
/* If there is an old first fragment, we don't care. */
@@ -812,6 +812,7 @@ void zd_usb_disable_rx(struct zd_usb *usb)
__zd_usb_disable_rx(usb);
mutex_unlock(&rx->setup_mutex);
+ tasklet_kill(&rx->reset_timer_tasklet);
cancel_delayed_work_sync(&rx->idle_work);
}
@@ -1106,6 +1107,13 @@ static void zd_rx_idle_timer_handler(struct work_struct *work)
zd_usb_reset_rx(usb);
}
+static void zd_usb_reset_rx_idle_timer_tasklet(unsigned long param)
+{
+ struct zd_usb *usb = (struct zd_usb *)param;
+
+ zd_usb_reset_rx_idle_timer(usb);
+}
+
void zd_usb_reset_rx_idle_timer(struct zd_usb *usb)
{
struct zd_usb_rx *rx = &usb->rx;
@@ -1127,6 +1135,7 @@ static inline void init_usb_interrupt(struct zd_usb *usb)
static inline void init_usb_rx(struct zd_usb *usb)
{
struct zd_usb_rx *rx = &usb->rx;
+
spin_lock_init(&rx->lock);
mutex_init(&rx->setup_mutex);
if (interface_to_usbdev(usb->intf)->speed == USB_SPEED_HIGH) {
@@ -1136,11 +1145,14 @@ static inline void init_usb_rx(struct zd_usb *usb)
}
ZD_ASSERT(rx->fragment_length == 0);
INIT_DELAYED_WORK(&rx->idle_work, zd_rx_idle_timer_handler);
+ rx->reset_timer_tasklet.func = zd_usb_reset_rx_idle_timer_tasklet;
+ rx->reset_timer_tasklet.data = (unsigned long)usb;
}
static inline void init_usb_tx(struct zd_usb *usb)
{
struct zd_usb_tx *tx = &usb->tx;
+
spin_lock_init(&tx->lock);
atomic_set(&tx->enabled, 0);
tx->stopped = 0;
@@ -1671,6 +1683,10 @@ static void iowrite16v_urb_complete(struct urb *urb)
if (urb->status && !usb->cmd_error)
usb->cmd_error = urb->status;
+
+ if (!usb->cmd_error &&
+ urb->actual_length != urb->transfer_buffer_length)
+ usb->cmd_error = -EIO;
}
static int zd_submit_waiting_urb(struct zd_usb *usb, bool last)
@@ -1805,7 +1821,7 @@ int zd_usb_iowrite16v_async(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, EP_REGS_OUT),
req, req_len, iowrite16v_urb_complete, usb,
ep->desc.bInterval);
- urb->transfer_flags |= URB_FREE_BUFFER | URB_SHORT_NOT_OK;
+ urb->transfer_flags |= URB_FREE_BUFFER;
/* Submit previous URB */
r = zd_submit_waiting_urb(usb, false);
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h
index b3df2c8116c..325d0f98925 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.h
+++ b/drivers/net/wireless/zd1211rw/zd_usb.h
@@ -183,6 +183,7 @@ struct zd_usb_rx {
spinlock_t lock;
struct mutex setup_mutex;
struct delayed_work idle_work;
+ struct tasklet_struct reset_timer_tasklet;
u8 fragment[2 * USB_MAX_RX_SIZE];
unsigned int fragment_length;
unsigned int usb_packet_size;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index d86ea8b0113..135df164a4c 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -781,7 +781,7 @@ static int pci_pm_resume(struct device *dev)
#endif /* !CONFIG_SUSPEND */
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
static int pci_pm_freeze(struct device *dev)
{
@@ -970,7 +970,7 @@ static int pci_pm_restore(struct device *dev)
return error;
}
-#else /* !CONFIG_HIBERNATION */
+#else /* !CONFIG_HIBERNATE_CALLBACKS */
#define pci_pm_freeze NULL
#define pci_pm_freeze_noirq NULL
@@ -981,7 +981,7 @@ static int pci_pm_restore(struct device *dev)
#define pci_pm_restore NULL
#define pci_pm_restore_noirq NULL
-#endif /* !CONFIG_HIBERNATION */
+#endif /* !CONFIG_HIBERNATE_CALLBACKS */
#ifdef CONFIG_PM_RUNTIME
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 89d0a6a88df..ebf51ad1b71 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -676,10 +676,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
min_align = align1 >> 1;
align += aligns[order];
}
- size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), align);
+ size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align);
size1 = !add_size ? size :
calculate_memsize(size, min_size+add_size, 0,
- resource_size(b_res), align);
+ resource_size(b_res), min_align);
if (!size0 && !size1) {
if (b_res->start || b_res->end)
dev_info(&bus->self->dev, "disabling bridge window "
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 2ee442c2a5d..0485e394712 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -187,7 +187,8 @@ config MSI_LAPTOP
depends on ACPI
depends on BACKLIGHT_CLASS_DEVICE
depends on RFKILL
- depends on SERIO_I8042
+ depends on INPUT && SERIO_I8042
+ select INPUT_SPARSEKMAP
---help---
This is a driver for laptops built by MSI (MICRO-STAR
INTERNATIONAL):
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 5ea6c3477d1..ac4e7f83ce6 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -89,7 +89,7 @@ MODULE_LICENSE("GPL");
#define ACERWMID_EVENT_GUID "676AA15E-6A47-4D9F-A2CC-1E6D18D14026"
MODULE_ALIAS("wmi:67C3371D-95A3-4C37-BB61-DD47B491DAAB");
-MODULE_ALIAS("wmi:6AF4F258-B401-42Fd-BE91-3D4AC2D7C0D3");
+MODULE_ALIAS("wmi:6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3");
MODULE_ALIAS("wmi:676AA15E-6A47-4D9F-A2CC-1E6D18D14026");
enum acer_wmi_event_ids {
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index efc776cb0c6..832a3fd7c1c 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -201,8 +201,8 @@ static int asus_wmi_input_init(struct asus_wmi *asus)
if (!asus->inputdev)
return -ENOMEM;
- asus->inputdev->name = asus->driver->input_phys;
- asus->inputdev->phys = asus->driver->input_name;
+ asus->inputdev->name = asus->driver->input_name;
+ asus->inputdev->phys = asus->driver->input_phys;
asus->inputdev->id.bustype = BUS_HOST;
asus->inputdev->dev.parent = &asus->platform_device->dev;
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c
index 0ddc434fb93..649dcadd8ea 100644
--- a/drivers/platform/x86/eeepc-wmi.c
+++ b/drivers/platform/x86/eeepc-wmi.c
@@ -67,9 +67,11 @@ static const struct key_entry eeepc_wmi_keymap[] = {
{ KE_KEY, 0x82, { KEY_CAMERA } },
{ KE_KEY, 0x83, { KEY_CAMERA_ZOOMIN } },
{ KE_KEY, 0x88, { KEY_WLAN } },
+ { KE_KEY, 0xbd, { KEY_CAMERA } },
{ KE_KEY, 0xcc, { KEY_SWITCHVIDEOMODE } },
{ KE_KEY, 0xe0, { KEY_PROG1 } }, /* Task Manager */
{ KE_KEY, 0xe1, { KEY_F14 } }, /* Change Resolution */
+ { KE_KEY, 0xe8, { KEY_SCREENLOCK } },
{ KE_KEY, 0xe9, { KEY_BRIGHTNESS_ZERO } },
{ KE_KEY, 0xeb, { KEY_CAMERA_ZOOMOUT } },
{ KE_KEY, 0xec, { KEY_CAMERA_UP } },
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c
index d653104b59c..464bb3fc4d8 100644
--- a/drivers/platform/x86/intel_pmic_gpio.c
+++ b/drivers/platform/x86/intel_pmic_gpio.c
@@ -74,6 +74,19 @@ struct pmic_gpio {
u32 trigger_type;
};
+static void pmic_program_irqtype(int gpio, int type)
+{
+ if (type & IRQ_TYPE_EDGE_RISING)
+ intel_scu_ipc_update_register(GPIO0 + gpio, 0x20, 0x20);
+ else
+ intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x20);
+
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ intel_scu_ipc_update_register(GPIO0 + gpio, 0x10, 0x10);
+ else
+ intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x10);
+};
+
static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
if (offset > 8) {
@@ -166,16 +179,38 @@ static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
return pg->irq_base + offset;
}
+static void pmic_bus_lock(struct irq_data *data)
+{
+ struct pmic_gpio *pg = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&pg->buslock);
+}
+
+static void pmic_bus_sync_unlock(struct irq_data *data)
+{
+ struct pmic_gpio *pg = irq_data_get_irq_chip_data(data);
+
+ if (pg->update_type) {
+ unsigned int gpio = pg->update_type & ~GPIO_UPDATE_TYPE;
+
+ pmic_program_irqtype(gpio, pg->trigger_type);
+ pg->update_type = 0;
+ }
+ mutex_unlock(&pg->buslock);
+}
+
/* the gpiointr register is read-clear, so just do nothing. */
static void pmic_irq_unmask(struct irq_data *data) { }
static void pmic_irq_mask(struct irq_data *data) { }
static struct irq_chip pmic_irqchip = {
- .name = "PMIC-GPIO",
- .irq_mask = pmic_irq_mask,
- .irq_unmask = pmic_irq_unmask,
- .irq_set_type = pmic_irq_type,
+ .name = "PMIC-GPIO",
+ .irq_mask = pmic_irq_mask,
+ .irq_unmask = pmic_irq_unmask,
+ .irq_set_type = pmic_irq_type,
+ .irq_bus_lock = pmic_bus_lock,
+ .irq_bus_sync_unlock = pmic_bus_sync_unlock,
};
static irqreturn_t pmic_irq_handler(int irq, void *data)
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
index de434c6dc2d..d347116d150 100644
--- a/drivers/platform/x86/samsung-laptop.c
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -571,6 +571,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
.callback = dmi_check_cb,
},
{
+ .ident = "R410 Plus",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "R410P"),
+ DMI_MATCH(DMI_BOARD_NAME, "R460"),
+ },
+ .callback = dmi_check_cb,
+ },
+ {
.ident = "R518",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR,
@@ -591,12 +601,12 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
.callback = dmi_check_cb,
},
{
- .ident = "N150/N210/N220",
+ .ident = "N150/N210/N220/N230",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR,
"SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220"),
- DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220/N230"),
+ DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220/N230"),
},
.callback = dmi_check_cb,
},
@@ -771,6 +781,7 @@ static int __init samsung_init(void)
/* create a backlight device to talk to this one */
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
props.max_brightness = sabi_config->max_brightness;
backlight_device = backlight_device_register("samsung", &sdev->dev,
NULL, &backlight_ops,
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index e642f5f2950..8f709aec4da 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -138,6 +138,8 @@ MODULE_PARM_DESC(kbd_backlight_timeout,
"1 for 30 seconds, 2 for 60 seconds and 3 to disable timeout "
"(default: 0)");
+static void sony_nc_kbd_backlight_resume(void);
+
enum sony_nc_rfkill {
SONY_WIFI,
SONY_BLUETOOTH,
@@ -771,11 +773,6 @@ static int sony_nc_handles_setup(struct platform_device *pd)
if (!handles)
return -ENOMEM;
- sysfs_attr_init(&handles->devattr.attr);
- handles->devattr.attr.name = "handles";
- handles->devattr.attr.mode = S_IRUGO;
- handles->devattr.show = sony_nc_handles_show;
-
for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
if (!acpi_callsetfunc(sony_nc_acpi_handle,
"SN00", i + 0x20, &result)) {
@@ -785,11 +782,18 @@ static int sony_nc_handles_setup(struct platform_device *pd)
}
}
- /* allow reading capabilities via sysfs */
- if (device_create_file(&pd->dev, &handles->devattr)) {
- kfree(handles);
- handles = NULL;
- return -1;
+ if (debug) {
+ sysfs_attr_init(&handles->devattr.attr);
+ handles->devattr.attr.name = "handles";
+ handles->devattr.attr.mode = S_IRUGO;
+ handles->devattr.show = sony_nc_handles_show;
+
+ /* allow reading capabilities via sysfs */
+ if (device_create_file(&pd->dev, &handles->devattr)) {
+ kfree(handles);
+ handles = NULL;
+ return -1;
+ }
}
return 0;
@@ -798,7 +802,8 @@ static int sony_nc_handles_setup(struct platform_device *pd)
static int sony_nc_handles_cleanup(struct platform_device *pd)
{
if (handles) {
- device_remove_file(&pd->dev, &handles->devattr);
+ if (debug)
+ device_remove_file(&pd->dev, &handles->devattr);
kfree(handles);
handles = NULL;
}
@@ -808,6 +813,11 @@ static int sony_nc_handles_cleanup(struct platform_device *pd)
static int sony_find_snc_handle(int handle)
{
int i;
+
+ /* not initialized yet, return early */
+ if (!handles)
+ return -1;
+
for (i = 0; i < 0x10; i++) {
if (handles->cap[i] == handle) {
dprintk("found handle 0x%.4x (offset: 0x%.2x)\n",
@@ -1168,6 +1178,9 @@ static int sony_nc_resume(struct acpi_device *device)
/* re-read rfkill state */
sony_nc_rfkill_update();
+ /* restore kbd backlight states */
+ sony_nc_kbd_backlight_resume();
+
return 0;
}
@@ -1355,6 +1368,7 @@ out_no_enum:
#define KBDBL_HANDLER 0x137
#define KBDBL_PRESENT 0xB00
#define SET_MODE 0xC00
+#define SET_STATE 0xD00
#define SET_TIMEOUT 0xE00
struct kbd_backlight {
@@ -1377,6 +1391,10 @@ static ssize_t __sony_nc_kbd_backlight_mode_set(u8 value)
(value << 0x10) | SET_MODE, &result))
return -EIO;
+ /* Try to turn the light on/off immediately */
+ sony_call_snc_handle(KBDBL_HANDLER, (value << 0x10) | SET_STATE,
+ &result);
+
kbdbl_handle->mode = value;
return 0;
@@ -1458,7 +1476,7 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd)
{
int result;
- if (sony_call_snc_handle(0x137, KBDBL_PRESENT, &result))
+ if (sony_call_snc_handle(KBDBL_HANDLER, KBDBL_PRESENT, &result))
return 0;
if (!(result & 0x02))
return 0;
@@ -1501,13 +1519,36 @@ outkzalloc:
static int sony_nc_kbd_backlight_cleanup(struct platform_device *pd)
{
if (kbdbl_handle) {
+ int result;
+
device_remove_file(&pd->dev, &kbdbl_handle->mode_attr);
device_remove_file(&pd->dev, &kbdbl_handle->timeout_attr);
+
+ /* restore the default hw behaviour */
+ sony_call_snc_handle(KBDBL_HANDLER, 0x1000 | SET_MODE, &result);
+ sony_call_snc_handle(KBDBL_HANDLER, SET_TIMEOUT, &result);
+
kfree(kbdbl_handle);
}
return 0;
}
+static void sony_nc_kbd_backlight_resume(void)
+{
+ int ignore = 0;
+
+ if (!kbdbl_handle)
+ return;
+
+ if (kbdbl_handle->mode == 0)
+ sony_call_snc_handle(KBDBL_HANDLER, SET_MODE, &ignore);
+
+ if (kbdbl_handle->timeout != 0)
+ sony_call_snc_handle(KBDBL_HANDLER,
+ (kbdbl_handle->timeout << 0x10) | SET_TIMEOUT,
+ &ignore);
+}
+
static void sony_nc_backlight_setup(void)
{
acpi_handle unused;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index a08561f5349..efb3b6b9bcd 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -8618,8 +8618,7 @@ static bool __pure __init tpacpi_is_valid_fw_id(const char* const s,
tpacpi_is_fw_digit(s[1]) &&
s[2] == t && s[3] == 'T' &&
tpacpi_is_fw_digit(s[4]) &&
- tpacpi_is_fw_digit(s[5]) &&
- s[6] == 'W' && s[7] == 'W';
+ tpacpi_is_fw_digit(s[5]);
}
/* returns 0 - probe ok, or < 0 - probe error.
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c
index 5825370bad2..08de58e7f59 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/amba-pl022.c
@@ -1555,7 +1555,7 @@ static int stop_queue(struct pl022 *pl022)
* A wait_queue on the pl022->busy could be used, but then the common
* execution path (pump_messages) would be required to call wake_up or
* friends on every SPI message. Do this instead */
- while (!list_empty(&pl022->queue) && pl022->busy && limit--) {
+ while ((!list_empty(&pl022->queue) || pl022->busy) && limit--) {
spin_unlock_irqrestore(&pl022->queue_lock, flags);
msleep(10);
spin_lock_irqsave(&pl022->queue_lock, flags);
diff --git a/drivers/spi/dw_spi.c b/drivers/spi/dw_spi.c
index b1a4b9f503a..871e337c917 100644
--- a/drivers/spi/dw_spi.c
+++ b/drivers/spi/dw_spi.c
@@ -821,7 +821,7 @@ static int stop_queue(struct dw_spi *dws)
spin_lock_irqsave(&dws->lock, flags);
dws->run = QUEUE_STOPPED;
- while (!list_empty(&dws->queue) && dws->busy && limit--) {
+ while ((!list_empty(&dws->queue) || dws->busy) && limit--) {
spin_unlock_irqrestore(&dws->lock, flags);
msleep(10);
spin_lock_irqsave(&dws->lock, flags);
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index 9c74aad6be9..dc25bee8d33 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -1493,7 +1493,7 @@ static int stop_queue(struct driver_data *drv_data)
* execution path (pump_messages) would be required to call wake_up or
* friends on every SPI message. Do this instead */
drv_data->run = QUEUE_STOPPED;
- while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
+ while ((!list_empty(&drv_data->queue) || drv_data->busy) && limit--) {
spin_unlock_irqrestore(&drv_data->lock, flags);
msleep(10);
spin_lock_irqsave(&drv_data->lock, flags);
diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c
index bdb7289a1d2..f706dba165c 100644
--- a/drivers/spi/spi_bfin5xx.c
+++ b/drivers/spi/spi_bfin5xx.c
@@ -1284,7 +1284,7 @@ static inline int bfin_spi_stop_queue(struct bfin_spi_master_data *drv_data)
* friends on every SPI message. Do this instead
*/
drv_data->running = false;
- while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
+ while ((!list_empty(&drv_data->queue) || drv_data->busy) && limit--) {
spin_unlock_irqrestore(&drv_data->lock, flags);
msleep(10);
spin_lock_irqsave(&drv_data->lock, flags);
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index dca4a0bb6ca..e3786f161bc 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -131,8 +131,6 @@ source "drivers/staging/wlags49_h2/Kconfig"
source "drivers/staging/wlags49_h25/Kconfig"
-source "drivers/staging/samsung-laptop/Kconfig"
-
source "drivers/staging/sm7xx/Kconfig"
source "drivers/staging/dt3155v4l/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index eb93012b6f5..f0d5c531561 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -48,7 +48,6 @@ obj-$(CONFIG_XVMALLOC) += zram/
obj-$(CONFIG_ZCACHE) += zcache/
obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/
obj-$(CONFIG_WLAGS49_H25) += wlags49_h25/
-obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop/
obj-$(CONFIG_FB_SM7XX) += sm7xx/
obj-$(CONFIG_VIDEO_DT3155) += dt3155v4l/
obj-$(CONFIG_CRYSTALHD) += crystalhd/
diff --git a/drivers/staging/samsung-laptop/Kconfig b/drivers/staging/samsung-laptop/Kconfig
deleted file mode 100644
index f27c60864c2..00000000000
--- a/drivers/staging/samsung-laptop/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
-config SAMSUNG_LAPTOP
- tristate "Samsung Laptop driver"
- default n
- depends on RFKILL && BACKLIGHT_CLASS_DEVICE && X86
- help
- This module implements a driver for the N128 Samsung Laptop
- providing control over the Wireless LED and the LCD backlight
-
- To compile this driver as a module, choose
- M here: the module will be called samsung-laptop.
diff --git a/drivers/staging/samsung-laptop/Makefile b/drivers/staging/samsung-laptop/Makefile
deleted file mode 100644
index 3c6f4204521..00000000000
--- a/drivers/staging/samsung-laptop/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop.o
diff --git a/drivers/staging/samsung-laptop/TODO b/drivers/staging/samsung-laptop/TODO
deleted file mode 100644
index f7a6d589916..00000000000
--- a/drivers/staging/samsung-laptop/TODO
+++ /dev/null
@@ -1,5 +0,0 @@
-TODO:
- - review from other developers
- - figure out ACPI video issues
-
-Please send patches to Greg Kroah-Hartman <gregkh@suse.de>
diff --git a/drivers/staging/samsung-laptop/samsung-laptop.c b/drivers/staging/samsung-laptop/samsung-laptop.c
deleted file mode 100644
index 25294462b8b..00000000000
--- a/drivers/staging/samsung-laptop/samsung-laptop.c
+++ /dev/null
@@ -1,843 +0,0 @@
-/*
- * Samsung Laptop driver
- *
- * Copyright (C) 2009,2011 Greg Kroah-Hartman (gregkh@suse.de)
- * Copyright (C) 2009,2011 Novell Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/backlight.h>
-#include <linux/fb.h>
-#include <linux/dmi.h>
-#include <linux/platform_device.h>
-#include <linux/rfkill.h>
-
-/*
- * This driver is needed because a number of Samsung laptops do not hook
- * their control settings through ACPI. So we have to poke around in the
- * BIOS to do things like brightness values, and "special" key controls.
- */
-
-/*
- * We have 0 - 8 as valid brightness levels. The specs say that level 0 should
- * be reserved by the BIOS (which really doesn't make much sense), we tell
- * userspace that the value is 0 - 7 and then just tell the hardware 1 - 8
- */
-#define MAX_BRIGHT 0x07
-
-
-#define SABI_IFACE_MAIN 0x00
-#define SABI_IFACE_SUB 0x02
-#define SABI_IFACE_COMPLETE 0x04
-#define SABI_IFACE_DATA 0x05
-
-/* Structure to get data back to the calling function */
-struct sabi_retval {
- u8 retval[20];
-};
-
-struct sabi_header_offsets {
- u8 port;
- u8 re_mem;
- u8 iface_func;
- u8 en_mem;
- u8 data_offset;
- u8 data_segment;
-};
-
-struct sabi_commands {
- /*
- * Brightness is 0 - 8, as described above.
- * Value 0 is for the BIOS to use
- */
- u8 get_brightness;
- u8 set_brightness;
-
- /*
- * first byte:
- * 0x00 - wireless is off
- * 0x01 - wireless is on
- * second byte:
- * 0x02 - 3G is off
- * 0x03 - 3G is on
- * TODO, verify 3G is correct, that doesn't seem right...
- */
- u8 get_wireless_button;
- u8 set_wireless_button;
-
- /* 0 is off, 1 is on */
- u8 get_backlight;
- u8 set_backlight;
-
- /*
- * 0x80 or 0x00 - no action
- * 0x81 - recovery key pressed
- */
- u8 get_recovery_mode;
- u8 set_recovery_mode;
-
- /*
- * on seclinux: 0 is low, 1 is high,
- * on swsmi: 0 is normal, 1 is silent, 2 is turbo
- */
- u8 get_performance_level;
- u8 set_performance_level;
-
- /*
- * Tell the BIOS that Linux is running on this machine.
- * 81 is on, 80 is off
- */
- u8 set_linux;
-};
-
-struct sabi_performance_level {
- const char *name;
- u8 value;
-};
-
-struct sabi_config {
- const char *test_string;
- u16 main_function;
- const struct sabi_header_offsets header_offsets;
- const struct sabi_commands commands;
- const struct sabi_performance_level performance_levels[4];
- u8 min_brightness;
- u8 max_brightness;
-};
-
-static const struct sabi_config sabi_configs[] = {
- {
- .test_string = "SECLINUX",
-
- .main_function = 0x4c49,
-
- .header_offsets = {
- .port = 0x00,
- .re_mem = 0x02,
- .iface_func = 0x03,
- .en_mem = 0x04,
- .data_offset = 0x05,
- .data_segment = 0x07,
- },
-
- .commands = {
- .get_brightness = 0x00,
- .set_brightness = 0x01,
-
- .get_wireless_button = 0x02,
- .set_wireless_button = 0x03,
-
- .get_backlight = 0x04,
- .set_backlight = 0x05,
-
- .get_recovery_mode = 0x06,
- .set_recovery_mode = 0x07,
-
- .get_performance_level = 0x08,
- .set_performance_level = 0x09,
-
- .set_linux = 0x0a,
- },
-
- .performance_levels = {
- {
- .name = "silent",
- .value = 0,
- },
- {
- .name = "normal",
- .value = 1,
- },
- { },
- },
- .min_brightness = 1,
- .max_brightness = 8,
- },
- {
- .test_string = "SwSmi@",
-
- .main_function = 0x5843,
-
- .header_offsets = {
- .port = 0x00,
- .re_mem = 0x04,
- .iface_func = 0x02,
- .en_mem = 0x03,
- .data_offset = 0x05,
- .data_segment = 0x07,
- },
-
- .commands = {
- .get_brightness = 0x10,
- .set_brightness = 0x11,
-
- .get_wireless_button = 0x12,
- .set_wireless_button = 0x13,
-
- .get_backlight = 0x2d,
- .set_backlight = 0x2e,
-
- .get_recovery_mode = 0xff,
- .set_recovery_mode = 0xff,
-
- .get_performance_level = 0x31,
- .set_performance_level = 0x32,
-
- .set_linux = 0xff,
- },
-
- .performance_levels = {
- {
- .name = "normal",
- .value = 0,
- },
- {
- .name = "silent",
- .value = 1,
- },
- {
- .name = "overclock",
- .value = 2,
- },
- { },
- },
- .min_brightness = 0,
- .max_brightness = 8,
- },
- { },
-};
-
-static const struct sabi_config *sabi_config;
-
-static void __iomem *sabi;
-static void __iomem *sabi_iface;
-static void __iomem *f0000_segment;
-static struct backlight_device *backlight_device;
-static struct mutex sabi_mutex;
-static struct platform_device *sdev;
-static struct rfkill *rfk;
-
-static int force;
-module_param(force, bool, 0);
-MODULE_PARM_DESC(force,
- "Disable the DMI check and forces the driver to be loaded");
-
-static int debug;
-module_param(debug, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Debug enabled or not");
-
-static int sabi_get_command(u8 command, struct sabi_retval *sretval)
-{
- int retval = 0;
- u16 port = readw(sabi + sabi_config->header_offsets.port);
- u8 complete, iface_data;
-
- mutex_lock(&sabi_mutex);
-
- /* enable memory to be able to write to it */
- outb(readb(sabi + sabi_config->header_offsets.en_mem), port);
-
- /* write out the command */
- writew(sabi_config->main_function, sabi_iface + SABI_IFACE_MAIN);
- writew(command, sabi_iface + SABI_IFACE_SUB);
- writeb(0, sabi_iface + SABI_IFACE_COMPLETE);
- outb(readb(sabi + sabi_config->header_offsets.iface_func), port);
-
- /* write protect memory to make it safe */
- outb(readb(sabi + sabi_config->header_offsets.re_mem), port);
-
- /* see if the command actually succeeded */
- complete = readb(sabi_iface + SABI_IFACE_COMPLETE);
- iface_data = readb(sabi_iface + SABI_IFACE_DATA);
- if (complete != 0xaa || iface_data == 0xff) {
- pr_warn("SABI get command 0x%02x failed with completion flag 0x%02x and data 0x%02x\n",
- command, complete, iface_data);
- retval = -EINVAL;
- goto exit;
- }
- /*
- * Save off the data into a structure so the caller use it.
- * Right now we only want the first 4 bytes,
- * There are commands that need more, but not for the ones we
- * currently care about.
- */
- sretval->retval[0] = readb(sabi_iface + SABI_IFACE_DATA);
- sretval->retval[1] = readb(sabi_iface + SABI_IFACE_DATA + 1);
- sretval->retval[2] = readb(sabi_iface + SABI_IFACE_DATA + 2);
- sretval->retval[3] = readb(sabi_iface + SABI_IFACE_DATA + 3);
-
-exit:
- mutex_unlock(&sabi_mutex);
- return retval;
-
-}
-
-static int sabi_set_command(u8 command, u8 data)
-{
- int retval = 0;
- u16 port = readw(sabi + sabi_config->header_offsets.port);
- u8 complete, iface_data;
-
- mutex_lock(&sabi_mutex);
-
- /* enable memory to be able to write to it */
- outb(readb(sabi + sabi_config->header_offsets.en_mem), port);
-
- /* write out the command */
- writew(sabi_config->main_function, sabi_iface + SABI_IFACE_MAIN);
- writew(command, sabi_iface + SABI_IFACE_SUB);
- writeb(0, sabi_iface + SABI_IFACE_COMPLETE);
- writeb(data, sabi_iface + SABI_IFACE_DATA);
- outb(readb(sabi + sabi_config->header_offsets.iface_func), port);
-
- /* write protect memory to make it safe */
- outb(readb(sabi + sabi_config->header_offsets.re_mem), port);
-
- /* see if the command actually succeeded */
- complete = readb(sabi_iface + SABI_IFACE_COMPLETE);
- iface_data = readb(sabi_iface + SABI_IFACE_DATA);
- if (complete != 0xaa || iface_data == 0xff) {
- pr_warn("SABI set command 0x%02x failed with completion flag 0x%02x and data 0x%02x\n",
- command, complete, iface_data);
- retval = -EINVAL;
- }
-
- mutex_unlock(&sabi_mutex);
- return retval;
-}
-
-static void test_backlight(void)
-{
- struct sabi_retval sretval;
-
- sabi_get_command(sabi_config->commands.get_backlight, &sretval);
- printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]);
-
- sabi_set_command(sabi_config->commands.set_backlight, 0);
- printk(KERN_DEBUG "backlight should be off\n");
-
- sabi_get_command(sabi_config->commands.get_backlight, &sretval);
- printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]);
-
- msleep(1000);
-
- sabi_set_command(sabi_config->commands.set_backlight, 1);
- printk(KERN_DEBUG "backlight should be on\n");
-
- sabi_get_command(sabi_config->commands.get_backlight, &sretval);
- printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]);
-}
-
-static void test_wireless(void)
-{
- struct sabi_retval sretval;
-
- sabi_get_command(sabi_config->commands.get_wireless_button, &sretval);
- printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]);
-
- sabi_set_command(sabi_config->commands.set_wireless_button, 0);
- printk(KERN_DEBUG "wireless led should be off\n");
-
- sabi_get_command(sabi_config->commands.get_wireless_button, &sretval);
- printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]);
-
- msleep(1000);
-
- sabi_set_command(sabi_config->commands.set_wireless_button, 1);
- printk(KERN_DEBUG "wireless led should be on\n");
-
- sabi_get_command(sabi_config->commands.get_wireless_button, &sretval);
- printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]);
-}
-
-static u8 read_brightness(void)
-{
- struct sabi_retval sretval;
- int user_brightness = 0;
- int retval;
-
- retval = sabi_get_command(sabi_config->commands.get_brightness,
- &sretval);
- if (!retval) {
- user_brightness = sretval.retval[0];
- if (user_brightness != 0)
- user_brightness -= sabi_config->min_brightness;
- }
- return user_brightness;
-}
-
-static void set_brightness(u8 user_brightness)
-{
- u8 user_level = user_brightness - sabi_config->min_brightness;
-
- sabi_set_command(sabi_config->commands.set_brightness, user_level);
-}
-
-static int get_brightness(struct backlight_device *bd)
-{
- return (int)read_brightness();
-}
-
-static int update_status(struct backlight_device *bd)
-{
- set_brightness(bd->props.brightness);
-
- if (bd->props.power == FB_BLANK_UNBLANK)
- sabi_set_command(sabi_config->commands.set_backlight, 1);
- else
- sabi_set_command(sabi_config->commands.set_backlight, 0);
- return 0;
-}
-
-static const struct backlight_ops backlight_ops = {
- .get_brightness = get_brightness,
- .update_status = update_status,
-};
-
-static int rfkill_set(void *data, bool blocked)
-{
- /* Do something with blocked...*/
- /*
- * blocked == false is on
- * blocked == true is off
- */
- if (blocked)
- sabi_set_command(sabi_config->commands.set_wireless_button, 0);
- else
- sabi_set_command(sabi_config->commands.set_wireless_button, 1);
-
- return 0;
-}
-
-static struct rfkill_ops rfkill_ops = {
- .set_block = rfkill_set,
-};
-
-static int init_wireless(struct platform_device *sdev)
-{
- int retval;
-
- rfk = rfkill_alloc("samsung-wifi", &sdev->dev, RFKILL_TYPE_WLAN,
- &rfkill_ops, NULL);
- if (!rfk)
- return -ENOMEM;
-
- retval = rfkill_register(rfk);
- if (retval) {
- rfkill_destroy(rfk);
- return -ENODEV;
- }
-
- return 0;
-}
-
-static void destroy_wireless(void)
-{
- rfkill_unregister(rfk);
- rfkill_destroy(rfk);
-}
-
-static ssize_t get_performance_level(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct sabi_retval sretval;
- int retval;
- int i;
-
- /* Read the state */
- retval = sabi_get_command(sabi_config->commands.get_performance_level,
- &sretval);
- if (retval)
- return retval;
-
- /* The logic is backwards, yeah, lots of fun... */
- for (i = 0; sabi_config->performance_levels[i].name; ++i) {
- if (sretval.retval[0] == sabi_config->performance_levels[i].value)
- return sprintf(buf, "%s\n", sabi_config->performance_levels[i].name);
- }
- return sprintf(buf, "%s\n", "unknown");
-}
-
-static ssize_t set_performance_level(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t count)
-{
- if (count >= 1) {
- int i;
- for (i = 0; sabi_config->performance_levels[i].name; ++i) {
- const struct sabi_performance_level *level =
- &sabi_config->performance_levels[i];
- if (!strncasecmp(level->name, buf, strlen(level->name))) {
- sabi_set_command(sabi_config->commands.set_performance_level,
- level->value);
- break;
- }
- }
- if (!sabi_config->performance_levels[i].name)
- return -EINVAL;
- }
- return count;
-}
-static DEVICE_ATTR(performance_level, S_IWUSR | S_IRUGO,
- get_performance_level, set_performance_level);
-
-
-static int __init dmi_check_cb(const struct dmi_system_id *id)
-{
- pr_info("found laptop model '%s'\n",
- id->ident);
- return 0;
-}
-
-static struct dmi_system_id __initdata samsung_dmi_table[] = {
- {
- .ident = "N128",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR,
- "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "N128"),
- DMI_MATCH(DMI_BOARD_NAME, "N128"),
- },
- .callback = dmi_check_cb,
- },
- {
- .ident = "N130",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR,
- "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "N130"),
- DMI_MATCH(DMI_BOARD_NAME, "N130"),
- },
- .callback = dmi_check_cb,
- },
- {
- .ident = "X125",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR,
- "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X125"),
- DMI_MATCH(DMI_BOARD_NAME, "X125"),
- },
- .callback = dmi_check_cb,
- },
- {
- .ident = "X120/X170",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR,
- "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X120/X170"),
- DMI_MATCH(DMI_BOARD_NAME, "X120/X170"),
- },
- .callback = dmi_check_cb,
- },
- {
- .ident = "NC10",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR,
- "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "NC10"),
- DMI_MATCH(DMI_BOARD_NAME, "NC10"),
- },
- .callback = dmi_check_cb,
- },
- {
- .ident = "NP-Q45",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR,
- "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "SQ45S70S"),
- DMI_MATCH(DMI_BOARD_NAME, "SQ45S70S"),
- },
- .callback = dmi_check_cb,
- },
- {
- .ident = "X360",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR,
- "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X360"),
- DMI_MATCH(DMI_BOARD_NAME, "X360"),
- },
- .callback = dmi_check_cb,
- },
- {
- .ident = "R410 Plus",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR,
- "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "R410P"),
- DMI_MATCH(DMI_BOARD_NAME, "R460"),
- },
- .callback = dmi_check_cb,
- },
- {
- .ident = "R518",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR,
- "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "R518"),
- DMI_MATCH(DMI_BOARD_NAME, "R518"),
- },
- .callback = dmi_check_cb,
- },
- {
- .ident = "R519/R719",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR,
- "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "R519/R719"),
- DMI_MATCH(DMI_BOARD_NAME, "R519/R719"),
- },
- .callback = dmi_check_cb,
- },
- {
- .ident = "N150/N210/N220/N230",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR,
- "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220/N230"),
- DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220/N230"),
- },
- .callback = dmi_check_cb,
- },
- {
- .ident = "N150P/N210P/N220P",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR,
- "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "N150P/N210P/N220P"),
- DMI_MATCH(DMI_BOARD_NAME, "N150P/N210P/N220P"),
- },
- .callback = dmi_check_cb,
- },
- {
- .ident = "R530/R730",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "R530/R730"),
- DMI_MATCH(DMI_BOARD_NAME, "R530/R730"),
- },
- .callback = dmi_check_cb,
- },
- {
- .ident = "NF110/NF210/NF310",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "NF110/NF210/NF310"),
- DMI_MATCH(DMI_BOARD_NAME, "NF110/NF210/NF310"),
- },
- .callback = dmi_check_cb,
- },
- {
- .ident = "N145P/N250P/N260P",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "N145P/N250P/N260P"),
- DMI_MATCH(DMI_BOARD_NAME, "N145P/N250P/N260P"),
- },
- .callback = dmi_check_cb,
- },
- {
- .ident = "R70/R71",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR,
- "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "R70/R71"),
- DMI_MATCH(DMI_BOARD_NAME, "R70/R71"),
- },
- .callback = dmi_check_cb,
- },
- {
- .ident = "P460",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "P460"),
- DMI_MATCH(DMI_BOARD_NAME, "P460"),
- },
- .callback = dmi_check_cb,
- },
- { },
-};
-MODULE_DEVICE_TABLE(dmi, samsung_dmi_table);
-
-static int find_signature(void __iomem *memcheck, const char *testStr)
-{
- int i = 0;
- int loca;
-
- for (loca = 0; loca < 0xffff; loca++) {
- char temp = readb(memcheck + loca);
-
- if (temp == testStr[i]) {
- if (i == strlen(testStr)-1)
- break;
- ++i;
- } else {
- i = 0;
- }
- }
- return loca;
-}
-
-static int __init samsung_init(void)
-{
- struct backlight_properties props;
- struct sabi_retval sretval;
- unsigned int ifaceP;
- int i;
- int loca;
- int retval;
-
- mutex_init(&sabi_mutex);
-
- if (!force && !dmi_check_system(samsung_dmi_table))
- return -ENODEV;
-
- f0000_segment = ioremap_nocache(0xf0000, 0xffff);
- if (!f0000_segment) {
- pr_err("Can't map the segment at 0xf0000\n");
- return -EINVAL;
- }
-
- /* Try to find one of the signatures in memory to find the header */
- for (i = 0; sabi_configs[i].test_string != 0; ++i) {
- sabi_config = &sabi_configs[i];
- loca = find_signature(f0000_segment, sabi_config->test_string);
- if (loca != 0xffff)
- break;
- }
-
- if (loca == 0xffff) {
- pr_err("This computer does not support SABI\n");
- goto error_no_signature;
- }
-
- /* point to the SMI port Number */
- loca += 1;
- sabi = (f0000_segment + loca);
-
- if (debug) {
- printk(KERN_DEBUG "This computer supports SABI==%x\n",
- loca + 0xf0000 - 6);
- printk(KERN_DEBUG "SABI header:\n");
- printk(KERN_DEBUG " SMI Port Number = 0x%04x\n",
- readw(sabi + sabi_config->header_offsets.port));
- printk(KERN_DEBUG " SMI Interface Function = 0x%02x\n",
- readb(sabi + sabi_config->header_offsets.iface_func));
- printk(KERN_DEBUG " SMI enable memory buffer = 0x%02x\n",
- readb(sabi + sabi_config->header_offsets.en_mem));
- printk(KERN_DEBUG " SMI restore memory buffer = 0x%02x\n",
- readb(sabi + sabi_config->header_offsets.re_mem));
- printk(KERN_DEBUG " SABI data offset = 0x%04x\n",
- readw(sabi + sabi_config->header_offsets.data_offset));
- printk(KERN_DEBUG " SABI data segment = 0x%04x\n",
- readw(sabi + sabi_config->header_offsets.data_segment));
- }
-
- /* Get a pointer to the SABI Interface */
- ifaceP = (readw(sabi + sabi_config->header_offsets.data_segment) & 0x0ffff) << 4;
- ifaceP += readw(sabi + sabi_config->header_offsets.data_offset) & 0x0ffff;
- sabi_iface = ioremap_nocache(ifaceP, 16);
- if (!sabi_iface) {
- pr_err("Can't remap %x\n", ifaceP);
- goto exit;
- }
- if (debug) {
- printk(KERN_DEBUG "ifaceP = 0x%08x\n", ifaceP);
- printk(KERN_DEBUG "sabi_iface = %p\n", sabi_iface);
-
- test_backlight();
- test_wireless();
-
- retval = sabi_get_command(sabi_config->commands.get_brightness,
- &sretval);
- printk(KERN_DEBUG "brightness = 0x%02x\n", sretval.retval[0]);
- }
-
- /* Turn on "Linux" mode in the BIOS */
- if (sabi_config->commands.set_linux != 0xff) {
- retval = sabi_set_command(sabi_config->commands.set_linux,
- 0x81);
- if (retval) {
- pr_warn("Linux mode was not set!\n");
- goto error_no_platform;
- }
- }
-
- /* knock up a platform device to hang stuff off of */
- sdev = platform_device_register_simple("samsung", -1, NULL, 0);
- if (IS_ERR(sdev))
- goto error_no_platform;
-
- /* create a backlight device to talk to this one */
- memset(&props, 0, sizeof(struct backlight_properties));
- props.type = BACKLIGHT_PLATFORM;
- props.max_brightness = sabi_config->max_brightness;
- backlight_device = backlight_device_register("samsung", &sdev->dev,
- NULL, &backlight_ops,
- &props);
- if (IS_ERR(backlight_device))
- goto error_no_backlight;
-
- backlight_device->props.brightness = read_brightness();
- backlight_device->props.power = FB_BLANK_UNBLANK;
- backlight_update_status(backlight_device);
-
- retval = init_wireless(sdev);
- if (retval)
- goto error_no_rfk;
-
- retval = device_create_file(&sdev->dev, &dev_attr_performance_level);
- if (retval)
- goto error_file_create;
-
-exit:
- return 0;
-
-error_file_create:
- destroy_wireless();
-
-error_no_rfk:
- backlight_device_unregister(backlight_device);
-
-error_no_backlight:
- platform_device_unregister(sdev);
-
-error_no_platform:
- iounmap(sabi_iface);
-
-error_no_signature:
- iounmap(f0000_segment);
- return -EINVAL;
-}
-
-static void __exit samsung_exit(void)
-{
- /* Turn off "Linux" mode in the BIOS */
- if (sabi_config->commands.set_linux != 0xff)
- sabi_set_command(sabi_config->commands.set_linux, 0x80);
-
- device_remove_file(&sdev->dev, &dev_attr_performance_level);
- backlight_device_unregister(backlight_device);
- destroy_wireless();
- iounmap(sabi_iface);
- iounmap(f0000_segment);
- platform_device_unregister(sdev);
-}
-
-module_init(samsung_init);
-module_exit(samsung_exit);
-
-MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@suse.de>");
-MODULE_DESCRIPTION("Samsung Backlight driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 42d6c930cc8..33167b43ac7 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -912,8 +912,7 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn,
unsigned long irqflags,
const char *devname, void *dev_id)
{
- unsigned int irq;
- int retval;
+ int irq, retval;
irq = bind_evtchn_to_irq(evtchn);
if (irq < 0)
@@ -955,8 +954,7 @@ int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
irq_handler_t handler,
unsigned long irqflags, const char *devname, void *dev_id)
{
- unsigned int irq;
- int retval;
+ int irq, retval;
irq = bind_virq_to_irq(virq, cpu);
if (irq < 0)
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 95143dd6904..1ac94125bf9 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -61,7 +61,7 @@ static void xen_post_suspend(int cancelled)
xen_mm_unpin_all();
}
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
static int xen_suspend(void *data)
{
struct suspend_info *si = data;
@@ -173,7 +173,7 @@ out:
#endif
shutting_down = SHUTDOWN_INVALID;
}
-#endif /* CONFIG_HIBERNATION */
+#endif /* CONFIG_HIBERNATE_CALLBACKS */
struct shutdown_handler {
const char *command;
@@ -202,7 +202,7 @@ static void shutdown_handler(struct xenbus_watch *watch,
{ "poweroff", do_poweroff },
{ "halt", do_poweroff },
{ "reboot", do_reboot },
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
{ "suspend", do_suspend },
#endif
{NULL, NULL},
diff --git a/fs/cifs/README b/fs/cifs/README
index fe168359082..74ab165fc64 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -685,22 +685,6 @@ LinuxExtensionsEnabled If set to one then the client will attempt to
support and want to map the uid and gid fields
to values supplied at mount (rather than the
actual values, then set this to zero. (default 1)
-Experimental When set to 1 used to enable certain experimental
- features (currently enables multipage writes
- when signing is enabled, the multipage write
- performance enhancement was disabled when
- signing turned on in case buffer was modified
- just before it was sent, also this flag will
- be used to use the new experimental directory change
- notification code). When set to 2 enables
- an additional experimental feature, "raw ntlmssp"
- session establishment support (which allows
- specifying "sec=ntlmssp" on mount). The Linux cifs
- module will use ntlmv2 authentication encapsulated
- in "raw ntlmssp" (not using SPNEGO) when
- "sec=ntlmssp" is specified on mount.
- This support also requires building cifs with
- the CONFIG_CIFS_EXPERIMENTAL configuration flag.
These experimental features and tracing can be enabled by changing flags in
/proc/fs/cifs (after the cifs module has been installed or built into the
diff --git a/fs/cifs/cache.c b/fs/cifs/cache.c
index e654dfd092c..53d57a3fe42 100644
--- a/fs/cifs/cache.c
+++ b/fs/cifs/cache.c
@@ -50,7 +50,7 @@ void cifs_fscache_unregister(void)
*/
struct cifs_server_key {
uint16_t family; /* address family */
- uint16_t port; /* IP port */
+ __be16 port; /* IP port */
union {
struct in_addr ipv4_addr;
struct in6_addr ipv6_addr;
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 65829d32128..30d01bc9085 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -423,7 +423,6 @@ static const struct file_operations cifs_lookup_cache_proc_fops;
static const struct file_operations traceSMB_proc_fops;
static const struct file_operations cifs_multiuser_mount_proc_fops;
static const struct file_operations cifs_security_flags_proc_fops;
-static const struct file_operations cifs_experimental_proc_fops;
static const struct file_operations cifs_linux_ext_proc_fops;
void
@@ -441,8 +440,6 @@ cifs_proc_init(void)
proc_create("cifsFYI", 0, proc_fs_cifs, &cifsFYI_proc_fops);
proc_create("traceSMB", 0, proc_fs_cifs, &traceSMB_proc_fops);
proc_create("OplockEnabled", 0, proc_fs_cifs, &cifs_oplock_proc_fops);
- proc_create("Experimental", 0, proc_fs_cifs,
- &cifs_experimental_proc_fops);
proc_create("LinuxExtensionsEnabled", 0, proc_fs_cifs,
&cifs_linux_ext_proc_fops);
proc_create("MultiuserMount", 0, proc_fs_cifs,
@@ -469,7 +466,6 @@ cifs_proc_clean(void)
remove_proc_entry("OplockEnabled", proc_fs_cifs);
remove_proc_entry("SecurityFlags", proc_fs_cifs);
remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs);
- remove_proc_entry("Experimental", proc_fs_cifs);
remove_proc_entry("LookupCacheEnabled", proc_fs_cifs);
remove_proc_entry("fs/cifs", NULL);
}
@@ -550,45 +546,6 @@ static const struct file_operations cifs_oplock_proc_fops = {
.write = cifs_oplock_proc_write,
};
-static int cifs_experimental_proc_show(struct seq_file *m, void *v)
-{
- seq_printf(m, "%d\n", experimEnabled);
- return 0;
-}
-
-static int cifs_experimental_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, cifs_experimental_proc_show, NULL);
-}
-
-static ssize_t cifs_experimental_proc_write(struct file *file,
- const char __user *buffer, size_t count, loff_t *ppos)
-{
- char c;
- int rc;
-
- rc = get_user(c, buffer);
- if (rc)
- return rc;
- if (c == '0' || c == 'n' || c == 'N')
- experimEnabled = 0;
- else if (c == '1' || c == 'y' || c == 'Y')
- experimEnabled = 1;
- else if (c == '2')
- experimEnabled = 2;
-
- return count;
-}
-
-static const struct file_operations cifs_experimental_proc_fops = {
- .owner = THIS_MODULE,
- .open = cifs_experimental_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = cifs_experimental_proc_write,
-};
-
static int cifs_linux_ext_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "%d\n", linuxExtEnabled);
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
index 4dfba828316..33d221394ac 100644
--- a/fs/cifs/cifs_spnego.c
+++ b/fs/cifs/cifs_spnego.c
@@ -113,7 +113,7 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo)
MAX_MECH_STR_LEN +
UID_KEY_LEN + (sizeof(uid_t) * 2) +
CREDUID_KEY_LEN + (sizeof(uid_t) * 2) +
- USER_KEY_LEN + strlen(sesInfo->userName) +
+ USER_KEY_LEN + strlen(sesInfo->user_name) +
PID_KEY_LEN + (sizeof(pid_t) * 2) + 1;
spnego_key = ERR_PTR(-ENOMEM);
@@ -153,7 +153,7 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo)
sprintf(dp, ";creduid=0x%x", sesInfo->cred_uid);
dp = description + strlen(description);
- sprintf(dp, ";user=%s", sesInfo->userName);
+ sprintf(dp, ";user=%s", sesInfo->user_name);
dp = description + strlen(description);
sprintf(dp, ";pid=0x%x", current->pid);
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index fc0fd4fde30..23d43cde430 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -90,7 +90,7 @@ cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp,
case UNI_COLON:
*target = ':';
break;
- case UNI_ASTERIK:
+ case UNI_ASTERISK:
*target = '*';
break;
case UNI_QUESTION:
@@ -264,40 +264,40 @@ cifs_strndup_from_ucs(const char *src, const int maxlen, const bool is_unicode,
* names are little endian 16 bit Unicode on the wire
*/
int
-cifsConvertToUCS(__le16 *target, const char *source, int maxlen,
+cifsConvertToUCS(__le16 *target, const char *source, int srclen,
const struct nls_table *cp, int mapChars)
{
int i, j, charlen;
- int len_remaining = maxlen;
char src_char;
- __u16 temp;
+ __le16 dst_char;
+ wchar_t tmp;
if (!mapChars)
return cifs_strtoUCS(target, source, PATH_MAX, cp);
- for (i = 0, j = 0; i < maxlen; j++) {
+ for (i = 0, j = 0; i < srclen; j++) {
src_char = source[i];
switch (src_char) {
case 0:
- put_unaligned_le16(0, &target[j]);
+ put_unaligned(0, &target[j]);
goto ctoUCS_out;
case ':':
- temp = UNI_COLON;
+ dst_char = cpu_to_le16(UNI_COLON);
break;
case '*':
- temp = UNI_ASTERIK;
+ dst_char = cpu_to_le16(UNI_ASTERISK);
break;
case '?':
- temp = UNI_QUESTION;
+ dst_char = cpu_to_le16(UNI_QUESTION);
break;
case '<':
- temp = UNI_LESSTHAN;
+ dst_char = cpu_to_le16(UNI_LESSTHAN);
break;
case '>':
- temp = UNI_GRTRTHAN;
+ dst_char = cpu_to_le16(UNI_GRTRTHAN);
break;
case '|':
- temp = UNI_PIPE;
+ dst_char = cpu_to_le16(UNI_PIPE);
break;
/*
* FIXME: We can not handle remapping backslash (UNI_SLASH)
@@ -305,17 +305,17 @@ cifsConvertToUCS(__le16 *target, const char *source, int maxlen,
* as they use backslash as separator.
*/
default:
- charlen = cp->char2uni(source+i, len_remaining,
- &temp);
+ charlen = cp->char2uni(source + i, srclen - i, &tmp);
+ dst_char = cpu_to_le16(tmp);
+
/*
* if no match, use question mark, which at least in
* some cases serves as wild card
*/
if (charlen < 1) {
- temp = 0x003f;
+ dst_char = cpu_to_le16(0x003f);
charlen = 1;
}
- len_remaining -= charlen;
/*
* character may take more than one byte in the source
* string, but will take exactly two bytes in the
@@ -324,9 +324,8 @@ cifsConvertToUCS(__le16 *target, const char *source, int maxlen,
i += charlen;
continue;
}
- put_unaligned_le16(temp, &target[j]);
+ put_unaligned(dst_char, &target[j]);
i++; /* move to next char in source string */
- len_remaining--;
}
ctoUCS_out:
diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h
index 7fe6b52df50..644dd882a56 100644
--- a/fs/cifs/cifs_unicode.h
+++ b/fs/cifs/cifs_unicode.h
@@ -44,7 +44,7 @@
* reserved symbols (along with \ and /), otherwise illegal to store
* in filenames in NTFS
*/
-#define UNI_ASTERIK (__u16) ('*' + 0xF000)
+#define UNI_ASTERISK (__u16) ('*' + 0xF000)
#define UNI_QUESTION (__u16) ('?' + 0xF000)
#define UNI_COLON (__u16) (':' + 0xF000)
#define UNI_GRTRTHAN (__u16) ('>' + 0xF000)
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index a51585f9852..d1a016be73b 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -30,12 +30,13 @@
#include <linux/ctype.h>
#include <linux/random.h>
-/* Calculate and return the CIFS signature based on the mac key and SMB PDU */
-/* the 16 byte signature must be allocated by the caller */
-/* Note we only use the 1st eight bytes */
-/* Note that the smb header signature field on input contains the
- sequence number before this function is called */
-
+/*
+ * Calculate and return the CIFS signature based on the mac key and SMB PDU.
+ * The 16 byte signature must be allocated by the caller. Note we only use the
+ * 1st eight bytes and that the smb header signature field on input contains
+ * the sequence number before this function is called. Also, this function
+ * should be called with the server->srv_mutex held.
+ */
static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu,
struct TCP_Server_Info *server, char *signature)
{
@@ -209,8 +210,10 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
cpu_to_le32(expected_sequence_number);
cifs_pdu->Signature.Sequence.Reserved = 0;
+ mutex_lock(&server->srv_mutex);
rc = cifs_calculate_signature(cifs_pdu, server,
what_we_think_sig_should_be);
+ mutex_unlock(&server->srv_mutex);
if (rc)
return rc;
@@ -469,15 +472,15 @@ static int calc_ntlmv2_hash(struct cifsSesInfo *ses, char *ntlmv2_hash,
return rc;
}
- /* convert ses->userName to unicode and uppercase */
- len = strlen(ses->userName);
+ /* convert ses->user_name to unicode and uppercase */
+ len = strlen(ses->user_name);
user = kmalloc(2 + (len * 2), GFP_KERNEL);
if (user == NULL) {
cERROR(1, "calc_ntlmv2_hash: user mem alloc failure\n");
rc = -ENOMEM;
goto calc_exit_2;
}
- len = cifs_strtoUCS((__le16 *)user, ses->userName, len, nls_cp);
+ len = cifs_strtoUCS((__le16 *)user, ses->user_name, len, nls_cp);
UniStrupr(user);
crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index f2970136d17..5c412b33cd7 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -53,7 +53,6 @@ int cifsFYI = 0;
int cifsERROR = 1;
int traceSMB = 0;
unsigned int oplockEnabled = 1;
-unsigned int experimEnabled = 0;
unsigned int linuxExtEnabled = 1;
unsigned int lookupCacheEnabled = 1;
unsigned int multiuser_mount = 0;
@@ -127,6 +126,7 @@ cifs_read_super(struct super_block *sb, void *data,
kfree(cifs_sb);
return rc;
}
+ cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages;
#ifdef CONFIG_CIFS_DFS_UPCALL
/* copy mount params to sb for use in submounts */
@@ -409,8 +409,8 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
seq_printf(s, ",multiuser");
- else if (tcon->ses->userName)
- seq_printf(s, ",username=%s", tcon->ses->userName);
+ else if (tcon->ses->user_name)
+ seq_printf(s, ",username=%s", tcon->ses->user_name);
if (tcon->ses->domainName)
seq_printf(s, ",domain=%s", tcon->ses->domainName);
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 17afb0fbcae..a5d1106fcbd 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -37,10 +37,9 @@
#define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1)
#define MAX_SERVER_SIZE 15
-#define MAX_SHARE_SIZE 64 /* used to be 20, this should still be enough */
-#define MAX_USERNAME_SIZE 32 /* 32 is to allow for 15 char names + null
- termination then *2 for unicode versions */
-#define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */
+#define MAX_SHARE_SIZE 80
+#define MAX_USERNAME_SIZE 256 /* reasonable maximum for current servers */
+#define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */
#define CIFS_MIN_RCV_POOL 4
@@ -92,7 +91,8 @@ enum statusEnum {
CifsNew = 0,
CifsGood,
CifsExiting,
- CifsNeedReconnect
+ CifsNeedReconnect,
+ CifsNeedNegotiate
};
enum securityEnum {
@@ -274,7 +274,7 @@ struct cifsSesInfo {
int capabilities;
char serverName[SERVER_NAME_LEN_WITH_NULL * 2]; /* BB make bigger for
TCP names - will ipv6 and sctp addresses fit? */
- char userName[MAX_USERNAME_SIZE + 1];
+ char *user_name;
char *domainName;
char *password;
struct session_key auth_key;
@@ -817,7 +817,6 @@ GLOBAL_EXTERN unsigned int multiuser_mount; /* if enabled allows new sessions
have the uid/password or Kerberos credential
or equivalent for current user */
GLOBAL_EXTERN unsigned int oplockEnabled;
-GLOBAL_EXTERN unsigned int experimEnabled;
GLOBAL_EXTERN unsigned int lookupCacheEnabled;
GLOBAL_EXTERN unsigned int global_secflags; /* if on, session setup sent
with more secure ntlmssp2 challenge/resp */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 2644a5d6cc6..df959bae672 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -142,9 +142,9 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command)
*/
while (server->tcpStatus == CifsNeedReconnect) {
wait_event_interruptible_timeout(server->response_q,
- (server->tcpStatus == CifsGood), 10 * HZ);
+ (server->tcpStatus != CifsNeedReconnect), 10 * HZ);
- /* is TCP session is reestablished now ?*/
+ /* are we still trying to reconnect? */
if (server->tcpStatus != CifsNeedReconnect)
break;
@@ -729,7 +729,7 @@ CIFSSMBEcho(struct TCP_Server_Info *server)
return rc;
/* set up echo request */
- smb->hdr.Tid = cpu_to_le16(0xffff);
+ smb->hdr.Tid = 0xffff;
smb->hdr.WordCount = 1;
put_unaligned_le16(1, &smb->EchoCount);
put_bcc_le(1, &smb->hdr);
@@ -1884,10 +1884,10 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
__constant_cpu_to_le16(CIFS_WRLCK))
pLockData->fl_type = F_WRLCK;
- pLockData->fl_start = parm_data->start;
- pLockData->fl_end = parm_data->start +
- parm_data->length - 1;
- pLockData->fl_pid = parm_data->pid;
+ pLockData->fl_start = le64_to_cpu(parm_data->start);
+ pLockData->fl_end = pLockData->fl_start +
+ le64_to_cpu(parm_data->length) - 1;
+ pLockData->fl_pid = le32_to_cpu(parm_data->pid);
}
}
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 6e2b2addfc7..db9d55b507d 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -199,8 +199,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
}
spin_unlock(&GlobalMid_Lock);
- while ((server->tcpStatus != CifsExiting) &&
- (server->tcpStatus != CifsGood)) {
+ while (server->tcpStatus == CifsNeedReconnect) {
try_to_freeze();
/* we should try only the port we connected to before */
@@ -212,7 +211,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
atomic_inc(&tcpSesReconnectCount);
spin_lock(&GlobalMid_Lock);
if (server->tcpStatus != CifsExiting)
- server->tcpStatus = CifsGood;
+ server->tcpStatus = CifsNeedNegotiate;
spin_unlock(&GlobalMid_Lock);
}
}
@@ -248,24 +247,24 @@ static int check2ndT2(struct smb_hdr *pSMB, unsigned int maxBufSize)
total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount);
data_in_this_rsp = get_unaligned_le16(&pSMBt->t2_rsp.DataCount);
- remaining = total_data_size - data_in_this_rsp;
-
- if (remaining == 0)
+ if (total_data_size == data_in_this_rsp)
return 0;
- else if (remaining < 0) {
+ else if (total_data_size < data_in_this_rsp) {
cFYI(1, "total data %d smaller than data in frame %d",
total_data_size, data_in_this_rsp);
return -EINVAL;
- } else {
- cFYI(1, "missing %d bytes from transact2, check next response",
- remaining);
- if (total_data_size > maxBufSize) {
- cERROR(1, "TotalDataSize %d is over maximum buffer %d",
- total_data_size, maxBufSize);
- return -EINVAL;
- }
- return remaining;
}
+
+ remaining = total_data_size - data_in_this_rsp;
+
+ cFYI(1, "missing %d bytes from transact2, check next response",
+ remaining);
+ if (total_data_size > maxBufSize) {
+ cERROR(1, "TotalDataSize %d is over maximum buffer %d",
+ total_data_size, maxBufSize);
+ return -EINVAL;
+ }
+ return remaining;
}
static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
@@ -421,7 +420,7 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
pdu_length = 4; /* enough to get RFC1001 header */
incomplete_rcv:
- if (echo_retries > 0 &&
+ if (echo_retries > 0 && server->tcpStatus == CifsGood &&
time_after(jiffies, server->lstrp +
(echo_retries * SMB_ECHO_INTERVAL))) {
cERROR(1, "Server %s has not responded in %d seconds. "
@@ -881,7 +880,8 @@ cifs_parse_mount_options(char *options, const char *devname,
/* null user, ie anonymous, authentication */
vol->nullauth = 1;
}
- if (strnlen(value, 200) < 200) {
+ if (strnlen(value, MAX_USERNAME_SIZE) <
+ MAX_USERNAME_SIZE) {
vol->username = value;
} else {
printk(KERN_WARNING "CIFS: username too long\n");
@@ -1472,7 +1472,7 @@ srcip_matches(struct sockaddr *srcaddr, struct sockaddr *rhs)
static bool
match_port(struct TCP_Server_Info *server, struct sockaddr *addr)
{
- unsigned short int port, *sport;
+ __be16 port, *sport;
switch (addr->sa_family) {
case AF_INET:
@@ -1765,6 +1765,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
module_put(THIS_MODULE);
goto out_err_crypto_release;
}
+ tcp_ses->tcpStatus = CifsNeedNegotiate;
/* thread spawned, put it on the list */
spin_lock(&cifs_tcp_ses_lock);
@@ -1808,7 +1809,9 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol)
break;
default:
/* anything else takes username/password */
- if (strncmp(ses->userName, vol->username,
+ if (ses->user_name == NULL)
+ continue;
+ if (strncmp(ses->user_name, vol->username,
MAX_USERNAME_SIZE))
continue;
if (strlen(vol->username) != 0 &&
@@ -1851,6 +1854,8 @@ cifs_put_smb_ses(struct cifsSesInfo *ses)
cifs_put_tcp_session(server);
}
+static bool warned_on_ntlm; /* globals init to false automatically */
+
static struct cifsSesInfo *
cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
{
@@ -1906,9 +1911,11 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
else
sprintf(ses->serverName, "%pI4", &addr->sin_addr);
- if (volume_info->username)
- strncpy(ses->userName, volume_info->username,
- MAX_USERNAME_SIZE);
+ if (volume_info->username) {
+ ses->user_name = kstrdup(volume_info->username, GFP_KERNEL);
+ if (!ses->user_name)
+ goto get_ses_fail;
+ }
/* volume_info->password freed at unmount */
if (volume_info->password) {
@@ -1923,6 +1930,15 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
}
ses->cred_uid = volume_info->cred_uid;
ses->linux_uid = volume_info->linux_uid;
+
+ /* ntlmv2 is much stronger than ntlm security, and has been broadly
+ supported for many years, time to update default security mechanism */
+ if ((volume_info->secFlg == 0) && warned_on_ntlm == false) {
+ warned_on_ntlm = true;
+ cERROR(1, "default security mechanism requested. The default "
+ "security mechanism will be upgraded from ntlm to "
+ "ntlmv2 in kernel release 2.6.41");
+ }
ses->overrideSecFlg = volume_info->secFlg;
mutex_lock(&ses->session_mutex);
@@ -2276,7 +2292,7 @@ static int
generic_ip_connect(struct TCP_Server_Info *server)
{
int rc = 0;
- unsigned short int sport;
+ __be16 sport;
int slen, sfamily;
struct socket *socket = server->ssocket;
struct sockaddr *saddr;
@@ -2361,7 +2377,7 @@ generic_ip_connect(struct TCP_Server_Info *server)
static int
ip_connect(struct TCP_Server_Info *server)
{
- unsigned short int *sport;
+ __be16 *sport;
struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
@@ -2826,7 +2842,7 @@ try_mount_again:
remote_path_check:
/* check if a whole path (including prepath) is not remote */
- if (!rc && cifs_sb->prepathlen && tcon) {
+ if (!rc && tcon) {
/* build_path_to_root works only when we have a valid tcon */
full_path = cifs_build_path_to_root(cifs_sb, tcon);
if (full_path == NULL) {
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index c27d236738f..faf59529e84 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -575,8 +575,10 @@ reopen_error_exit:
int cifs_close(struct inode *inode, struct file *file)
{
- cifsFileInfo_put(file->private_data);
- file->private_data = NULL;
+ if (file->private_data != NULL) {
+ cifsFileInfo_put(file->private_data);
+ file->private_data = NULL;
+ }
/* return code from the ->release op is always ignored */
return 0;
@@ -970,6 +972,9 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file,
total_written += bytes_written) {
rc = -EAGAIN;
while (rc == -EAGAIN) {
+ struct kvec iov[2];
+ unsigned int len;
+
if (open_file->invalidHandle) {
/* we could deadlock if we called
filemap_fdatawait from here so tell
@@ -979,31 +984,14 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file,
if (rc != 0)
break;
}
- if (experimEnabled || (pTcon->ses->server &&
- ((pTcon->ses->server->secMode &
- (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
- == 0))) {
- struct kvec iov[2];
- unsigned int len;
-
- len = min((size_t)cifs_sb->wsize,
- write_size - total_written);
- /* iov[0] is reserved for smb header */
- iov[1].iov_base = (char *)write_data +
- total_written;
- iov[1].iov_len = len;
- rc = CIFSSMBWrite2(xid, pTcon,
- open_file->netfid, len,
- *poffset, &bytes_written,
- iov, 1, 0);
- } else
- rc = CIFSSMBWrite(xid, pTcon,
- open_file->netfid,
- min_t(const int, cifs_sb->wsize,
- write_size - total_written),
- *poffset, &bytes_written,
- write_data + total_written,
- NULL, 0);
+
+ len = min((size_t)cifs_sb->wsize,
+ write_size - total_written);
+ /* iov[0] is reserved for smb header */
+ iov[1].iov_base = (char *)write_data + total_written;
+ iov[1].iov_len = len;
+ rc = CIFSSMBWrite2(xid, pTcon, open_file->netfid, len,
+ *poffset, &bytes_written, iov, 1, 0);
}
if (rc || (bytes_written == 0)) {
if (total_written)
@@ -1240,12 +1228,6 @@ static int cifs_writepages(struct address_space *mapping,
}
tcon = tlink_tcon(open_file->tlink);
- if (!experimEnabled && tcon->ses->server->secMode &
- (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
- cifsFileInfo_put(open_file);
- kfree(iov);
- return generic_writepages(mapping, wbc);
- }
cifsFileInfo_put(open_file);
xid = GetXid();
@@ -1980,6 +1962,24 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
return total_read;
}
+/*
+ * If the page is mmap'ed into a process' page tables, then we need to make
+ * sure that it doesn't change while being written back.
+ */
+static int
+cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct page *page = vmf->page;
+
+ lock_page(page);
+ return VM_FAULT_LOCKED;
+}
+
+static struct vm_operations_struct cifs_file_vm_ops = {
+ .fault = filemap_fault,
+ .page_mkwrite = cifs_page_mkwrite,
+};
+
int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
{
int rc, xid;
@@ -1991,6 +1991,8 @@ int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
cifs_invalidate_mapping(inode);
rc = generic_file_mmap(file, vma);
+ if (rc == 0)
+ vma->vm_ops = &cifs_file_vm_ops;
FreeXid(xid);
return rc;
}
@@ -2007,6 +2009,8 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
return rc;
}
rc = generic_file_mmap(file, vma);
+ if (rc == 0)
+ vma->vm_ops = &cifs_file_vm_ops;
FreeXid(xid);
return rc;
}
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index e8804d37340..ce417a9764a 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -239,7 +239,7 @@ CIFSQueryMFSymLink(const int xid, struct cifsTconInfo *tcon,
if (rc != 0)
return rc;
- if (file_info.EndOfFile != CIFS_MF_SYMLINK_FILE_SIZE) {
+ if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) {
CIFSSMBClose(xid, tcon, netfid);
/* it's not a symlink */
return -EINVAL;
@@ -316,7 +316,7 @@ CIFSCheckMFSymlink(struct cifs_fattr *fattr,
if (rc != 0)
goto out;
- if (file_info.EndOfFile != CIFS_MF_SYMLINK_FILE_SIZE) {
+ if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) {
CIFSSMBClose(xid, pTcon, netfid);
/* it's not a symlink */
goto out;
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 2a930a752a7..0c684ae4c07 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -100,6 +100,7 @@ sesInfoFree(struct cifsSesInfo *buf_to_free)
memset(buf_to_free->password, 0, strlen(buf_to_free->password));
kfree(buf_to_free->password);
}
+ kfree(buf_to_free->user_name);
kfree(buf_to_free->domainName);
kfree(buf_to_free);
}
@@ -520,7 +521,7 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
(struct smb_com_transaction_change_notify_rsp *)buf;
struct file_notify_information *pnotify;
__u32 data_offset = 0;
- if (pSMBr->ByteCount > sizeof(struct file_notify_information)) {
+ if (get_bcc_le(buf) > sizeof(struct file_notify_information)) {
data_offset = le32_to_cpu(pSMBr->DataOffset);
pnotify = (struct file_notify_information *)
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 16765703131..f6728eb6f4b 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -219,12 +219,12 @@ static void unicode_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
bcc_ptr++;
} */
/* copy user */
- if (ses->userName == NULL) {
+ if (ses->user_name == NULL) {
/* null user mount */
*bcc_ptr = 0;
*(bcc_ptr+1) = 0;
} else {
- bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->userName,
+ bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->user_name,
MAX_USERNAME_SIZE, nls_cp);
}
bcc_ptr += 2 * bytes_ret;
@@ -244,12 +244,11 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
/* copy user */
/* BB what about null user mounts - check that we do this BB */
/* copy user */
- if (ses->userName == NULL) {
- /* BB what about null user mounts - check that we do this BB */
- } else {
- strncpy(bcc_ptr, ses->userName, MAX_USERNAME_SIZE);
- }
- bcc_ptr += strnlen(ses->userName, MAX_USERNAME_SIZE);
+ if (ses->user_name != NULL)
+ strncpy(bcc_ptr, ses->user_name, MAX_USERNAME_SIZE);
+ /* else null user mount */
+
+ bcc_ptr += strnlen(ses->user_name, MAX_USERNAME_SIZE);
*bcc_ptr = 0;
bcc_ptr++; /* account for null termination */
@@ -405,8 +404,8 @@ static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
/* BB spec says that if AvId field of MsvAvTimestamp is populated then
we must set the MIC field of the AUTHENTICATE_MESSAGE */
ses->ntlmssp->server_flags = le32_to_cpu(pblob->NegotiateFlags);
- tioffset = cpu_to_le16(pblob->TargetInfoArray.BufferOffset);
- tilen = cpu_to_le16(pblob->TargetInfoArray.Length);
+ tioffset = le32_to_cpu(pblob->TargetInfoArray.BufferOffset);
+ tilen = le16_to_cpu(pblob->TargetInfoArray.Length);
if (tilen) {
ses->auth_key.response = kmalloc(tilen, GFP_KERNEL);
if (!ses->auth_key.response) {
@@ -523,14 +522,14 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
tmp += len;
}
- if (ses->userName == NULL) {
+ if (ses->user_name == NULL) {
sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
sec_blob->UserName.Length = 0;
sec_blob->UserName.MaximumLength = 0;
tmp += 2;
} else {
int len;
- len = cifs_strtoUCS((__le16 *)tmp, ses->userName,
+ len = cifs_strtoUCS((__le16 *)tmp, ses->user_name,
MAX_USERNAME_SIZE, nls_cp);
len *= 2; /* unicode is 2 bytes each */
sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index e25e99bf7ee..d0f53538a57 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -86,8 +86,8 @@
#ifdef CONFIG_QUOTA
/* Amount of blocks needed for quota update - we know that the structure was
- * allocated so we need to update only inode+data */
-#define EXT4_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 2 : 0)
+ * allocated so we need to update only data block */
+#define EXT4_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 1 : 0)
/* Amount of blocks needed for quota insert/delete - we do some block writes
* but inode, sb and group updates are done only once */
#define EXT4_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 4673bc05274..e9473cbe80d 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -125,9 +125,11 @@ extern int ext4_flush_completed_IO(struct inode *inode)
* the parent directory's parent as well, and so on recursively, if
* they are also freshly created.
*/
-static void ext4_sync_parent(struct inode *inode)
+static int ext4_sync_parent(struct inode *inode)
{
+ struct writeback_control wbc;
struct dentry *dentry = NULL;
+ int ret = 0;
while (inode && ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) {
ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY);
@@ -136,8 +138,17 @@ static void ext4_sync_parent(struct inode *inode)
if (!dentry || !dentry->d_parent || !dentry->d_parent->d_inode)
break;
inode = dentry->d_parent->d_inode;
- sync_mapping_buffers(inode->i_mapping);
+ ret = sync_mapping_buffers(inode->i_mapping);
+ if (ret)
+ break;
+ memset(&wbc, 0, sizeof(wbc));
+ wbc.sync_mode = WB_SYNC_ALL;
+ wbc.nr_to_write = 0; /* only write out the inode */
+ ret = sync_inode(inode, &wbc);
+ if (ret)
+ break;
}
+ return ret;
}
/*
@@ -176,7 +187,7 @@ int ext4_sync_file(struct file *file, int datasync)
if (!journal) {
ret = generic_file_fsync(file, datasync);
if (!ret && !list_empty(&inode->i_dentry))
- ext4_sync_parent(inode);
+ ret = ext4_sync_parent(inode);
goto out;
}
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index ad8e303c0d2..f2fa5e8a582 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2502,6 +2502,7 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
* for partial write.
*/
set_buffer_new(bh);
+ set_buffer_mapped(bh);
}
return 0;
}
@@ -4429,8 +4430,8 @@ void ext4_truncate(struct inode *inode)
Indirect chain[4];
Indirect *partial;
__le32 nr = 0;
- int n;
- ext4_lblk_t last_block;
+ int n = 0;
+ ext4_lblk_t last_block, max_block;
unsigned blocksize = inode->i_sb->s_blocksize;
trace_ext4_truncate_enter(inode);
@@ -4455,14 +4456,18 @@ void ext4_truncate(struct inode *inode)
last_block = (inode->i_size + blocksize-1)
>> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
+ max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
+ >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
if (inode->i_size & (blocksize - 1))
if (ext4_block_truncate_page(handle, mapping, inode->i_size))
goto out_stop;
- n = ext4_block_to_path(inode, last_block, offsets, NULL);
- if (n == 0)
- goto out_stop; /* error */
+ if (last_block != max_block) {
+ n = ext4_block_to_path(inode, last_block, offsets, NULL);
+ if (n == 0)
+ goto out_stop; /* error */
+ }
/*
* OK. This truncate is going to happen. We add the inode to the
@@ -4493,7 +4498,13 @@ void ext4_truncate(struct inode *inode)
*/
ei->i_disksize = inode->i_size;
- if (n == 1) { /* direct blocks */
+ if (last_block == max_block) {
+ /*
+ * It is unnecessary to free any data blocks if last_block is
+ * equal to the indirect block limit.
+ */
+ goto out_unlock;
+ } else if (n == 1) { /* direct blocks */
ext4_free_data(handle, inode, NULL, i_data+offsets[0],
i_data + EXT4_NDIR_BLOCKS);
goto do_indirects;
@@ -4553,6 +4564,7 @@ do_indirects:
;
}
+out_unlock:
up_write(&ei->i_data_sem);
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
@@ -5398,13 +5410,12 @@ static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks,
/* if nrblocks are contiguous */
if (chunk) {
/*
- * With N contiguous data blocks, it need at most
- * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) indirect blocks
- * 2 dindirect blocks
- * 1 tindirect block
+ * With N contiguous data blocks, we need at most
+ * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks,
+ * 2 dindirect blocks, and 1 tindirect block
*/
- indirects = nrblocks / EXT4_ADDR_PER_BLOCK(inode->i_sb);
- return indirects + 3;
+ return DIV_ROUND_UP(nrblocks,
+ EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
}
/*
* if nrblocks are not contiguous, worse case, each block touch
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 056474b7b8e..8553dfb310a 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -242,27 +242,44 @@ static void ext4_put_nojournal(handle_t *handle)
* journal_end calls result in the superblock being marked dirty, so
* that sync() will call the filesystem's write_super callback if
* appropriate.
+ *
+ * To avoid j_barrier hold in userspace when a user calls freeze(),
+ * ext4 prevents a new handle from being started by s_frozen, which
+ * is in an upper layer.
*/
handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
{
journal_t *journal;
+ handle_t *handle;
if (sb->s_flags & MS_RDONLY)
return ERR_PTR(-EROFS);
- vfs_check_frozen(sb, SB_FREEZE_TRANS);
- /* Special case here: if the journal has aborted behind our
- * backs (eg. EIO in the commit thread), then we still need to
- * take the FS itself readonly cleanly. */
journal = EXT4_SB(sb)->s_journal;
- if (journal) {
- if (is_journal_aborted(journal)) {
- ext4_abort(sb, "Detected aborted journal");
- return ERR_PTR(-EROFS);
- }
- return jbd2_journal_start(journal, nblocks);
+ handle = ext4_journal_current_handle();
+
+ /*
+ * If a handle has been started, it should be allowed to
+ * finish, otherwise deadlock could happen between freeze
+ * and others(e.g. truncate) due to the restart of the
+ * journal handle if the filesystem is forzen and active
+ * handles are not stopped.
+ */
+ if (!handle)
+ vfs_check_frozen(sb, SB_FREEZE_TRANS);
+
+ if (!journal)
+ return ext4_get_nojournal();
+ /*
+ * Special case here: if the journal has aborted behind our
+ * backs (eg. EIO in the commit thread), then we still need to
+ * take the FS itself readonly cleanly.
+ */
+ if (is_journal_aborted(journal)) {
+ ext4_abort(sb, "Detected aborted journal");
+ return ERR_PTR(-EROFS);
}
- return ext4_get_nojournal();
+ return jbd2_journal_start(journal, nblocks);
}
/*
@@ -2975,6 +2992,12 @@ static int ext4_register_li_request(struct super_block *sb,
mutex_unlock(&ext4_li_info->li_list_mtx);
sbi->s_li_request = elr;
+ /*
+ * set elr to NULL here since it has been inserted to
+ * the request_list and the removal and free of it is
+ * handled by ext4_clear_request_list from now on.
+ */
+ elr = NULL;
if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) {
ret = ext4_run_lazyinit_thread();
@@ -3385,6 +3408,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
spin_lock_init(&sbi->s_next_gen_lock);
+ init_timer(&sbi->s_err_report);
+ sbi->s_err_report.function = print_daily_error_info;
+ sbi->s_err_report.data = (unsigned long) sb;
+
err = percpu_counter_init(&sbi->s_freeblocks_counter,
ext4_count_free_blocks(sb));
if (!err) {
@@ -3646,9 +3673,6 @@ no_journal:
"Opts: %s%s%s", descr, sbi->s_es->s_mount_opts,
*sbi->s_es->s_mount_opts ? "; " : "", orig_data);
- init_timer(&sbi->s_err_report);
- sbi->s_err_report.function = print_daily_error_info;
- sbi->s_err_report.data = (unsigned long) sb;
if (es->s_error_count)
mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
@@ -3672,6 +3696,7 @@ failed_mount_wq:
sbi->s_journal = NULL;
}
failed_mount3:
+ del_timer(&sbi->s_err_report);
if (sbi->s_flex_groups) {
if (is_vmalloc_addr(sbi->s_flex_groups))
vfree(sbi->s_flex_groups);
@@ -4138,6 +4163,11 @@ static int ext4_sync_fs(struct super_block *sb, int wait)
/*
* LVM calls this function before a (read-only) snapshot is created. This
* gives us a chance to flush the journal completely and mark the fs clean.
+ *
+ * Note that only this function cannot bring a filesystem to be in a clean
+ * state independently, because ext4 prevents a new handle from being started
+ * by @sb->s_frozen, which stays in an upper layer. It thus needs help from
+ * the upper layer.
*/
static int ext4_freeze(struct super_block *sb)
{
@@ -4614,11 +4644,24 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
static int ext4_quota_off(struct super_block *sb, int type)
{
+ struct inode *inode = sb_dqopt(sb)->files[type];
+ handle_t *handle;
+
/* Force all delayed allocation blocks to be allocated.
* Caller already holds s_umount sem */
if (test_opt(sb, DELALLOC))
sync_filesystem(sb);
+ /* Update modification times of quota files when userspace can
+ * start looking at them */
+ handle = ext4_journal_start(inode, 1);
+ if (IS_ERR(handle))
+ goto out;
+ inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ ext4_mark_inode_dirty(handle, inode);
+ ext4_journal_stop(handle);
+
+out:
return dquot_quota_off(sb, type);
}
@@ -4714,9 +4757,8 @@ out:
if (inode->i_size < off + len) {
i_size_write(inode, off + len);
EXT4_I(inode)->i_disksize = inode->i_size;
+ ext4_mark_inode_dirty(handle, inode);
}
- inode->i_mtime = inode->i_ctime = CURRENT_TIME;
- ext4_mark_inode_dirty(handle, inode);
mutex_unlock(&inode->i_mutex);
return len;
}
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 20af62f4304..6e28000a4b2 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -105,6 +105,8 @@ static int journal_submit_commit_record(journal_t *journal,
int ret;
struct timespec now = current_kernel_time();
+ *cbh = NULL;
+
if (is_journal_aborted(journal))
return 0;
@@ -806,7 +808,7 @@ wait_for_iobuf:
if (err)
__jbd2_journal_abort_hard(journal);
}
- if (!err && !is_journal_aborted(journal))
+ if (cbh)
err = journal_wait_on_commit_record(journal, cbh);
if (JBD2_HAS_INCOMPAT_FEATURE(journal,
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) &&
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index aba8ebaec25..e0ec3db1c39 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -2413,10 +2413,12 @@ const char *jbd2_dev_to_name(dev_t device)
new_dev = kmalloc(sizeof(struct devname_cache), GFP_KERNEL);
if (!new_dev)
return "NODEV-ALLOCFAILURE"; /* Something non-NULL */
+ bd = bdget(device);
spin_lock(&devname_cache_lock);
if (devcache[i]) {
if (devcache[i]->device == device) {
kfree(new_dev);
+ bdput(bd);
ret = devcache[i]->devname;
spin_unlock(&devname_cache_lock);
return ret;
@@ -2425,7 +2427,6 @@ const char *jbd2_dev_to_name(dev_t device)
}
devcache[i] = new_dev;
devcache[i]->device = device;
- bd = bdget(device);
if (bd) {
bdevname(bd, devcache[i]->devname);
bdput(bd);
diff --git a/fs/namespace.c b/fs/namespace.c
index 7dba2ed0342..d99bcf59e4c 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1030,18 +1030,6 @@ const struct seq_operations mounts_op = {
.show = show_vfsmnt
};
-static int uuid_is_nil(u8 *uuid)
-{
- int i;
- u8 *cp = (u8 *)uuid;
-
- for (i = 0; i < 16; i++) {
- if (*cp++)
- return 0;
- }
- return 1;
-}
-
static int show_mountinfo(struct seq_file *m, void *v)
{
struct proc_mounts *p = m->private;
@@ -1085,10 +1073,6 @@ static int show_mountinfo(struct seq_file *m, void *v)
if (IS_MNT_UNBINDABLE(mnt))
seq_puts(m, " unbindable");
- if (!uuid_is_nil(mnt->mnt_sb->s_uuid))
- /* print the uuid */
- seq_printf(m, " uuid:%pU", mnt->mnt_sb->s_uuid);
-
/* Filesystem specific data */
seq_puts(m, " - ");
show_type(m, sb);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index af0c6279a4a..e4cbc11a74a 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -542,11 +542,15 @@ nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, u
if (!nfs_need_commit(nfsi))
return 0;
+ spin_lock(&inode->i_lock);
ret = nfs_scan_list(nfsi, dst, idx_start, npages, NFS_PAGE_TAG_COMMIT);
if (ret > 0)
nfsi->ncommit -= ret;
+ spin_unlock(&inode->i_lock);
+
if (nfs_need_commit(NFS_I(inode)))
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+
return ret;
}
#else
@@ -1483,9 +1487,7 @@ int nfs_commit_inode(struct inode *inode, int how)
res = nfs_commit_set_lock(NFS_I(inode), may_wait);
if (res <= 0)
goto out_mark_dirty;
- spin_lock(&inode->i_lock);
res = nfs_scan_commit(inode, &head, 0, 0);
- spin_unlock(&inode->i_lock);
if (res) {
int error;
diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
index 0c6d8167013..7c831a2731f 100644
--- a/fs/nfsd/lockd.c
+++ b/fs/nfsd/lockd.c
@@ -38,7 +38,6 @@ nlm_fopen(struct svc_rqst *rqstp, struct nfs_fh *f, struct file **filp)
exp_readlock();
nfserr = nfsd_open(rqstp, &fh, S_IFREG, NFSD_MAY_LOCK, filp);
fh_put(&fh);
- rqstp->rq_client = NULL;
exp_readunlock();
/* We return nlm error codes as nlm doesn't know
* about nfsd, but nfsd does know about nlm..
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 4b36ec3eb8e..aa309aa93fe 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -397,10 +397,13 @@ static void unhash_generic_stateid(struct nfs4_stateid *stp)
static void free_generic_stateid(struct nfs4_stateid *stp)
{
- int oflag = nfs4_access_bmap_to_omode(stp);
+ int oflag;
- nfs4_file_put_access(stp->st_file, oflag);
- put_nfs4_file(stp->st_file);
+ if (stp->st_access_bmap) {
+ oflag = nfs4_access_bmap_to_omode(stp);
+ nfs4_file_put_access(stp->st_file, oflag);
+ put_nfs4_file(stp->st_file);
+ }
kmem_cache_free(stateid_slab, stp);
}
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 5ea402023eb..9ef9ed2cfe2 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -293,7 +293,6 @@ xfs_buf_allocate_memory(
size_t nbytes, offset;
gfp_t gfp_mask = xb_to_gfp(flags);
unsigned short page_count, i;
- pgoff_t first;
xfs_off_t end;
int error;
@@ -333,7 +332,6 @@ use_alloc_page:
return error;
offset = bp->b_offset;
- first = bp->b_file_offset >> PAGE_SHIFT;
bp->b_flags |= _XBF_PAGES;
for (i = 0; i < bp->b_page_count; i++) {
@@ -657,8 +655,6 @@ xfs_buf_readahead(
xfs_off_t ioff,
size_t isize)
{
- struct backing_dev_info *bdi;
-
if (bdi_read_congested(target->bt_bdi))
return;
@@ -919,8 +915,6 @@ xfs_buf_lock(
if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
xfs_log_force(bp->b_target->bt_mount, 0);
- if (atomic_read(&bp->b_io_remaining))
- blk_flush_plug(current);
down(&bp->b_sema);
XB_SET_OWNER(bp);
@@ -1309,8 +1303,6 @@ xfs_buf_iowait(
{
trace_xfs_buf_iowait(bp, _RET_IP_);
- if (atomic_read(&bp->b_io_remaining))
- blk_flush_plug(current);
wait_for_completion(&bp->b_iowait);
trace_xfs_buf_iowait_done(bp, _RET_IP_);
@@ -1747,8 +1739,8 @@ xfsbufd(
do {
long age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
long tout = xfs_buf_timer_centisecs * msecs_to_jiffies(10);
- int count = 0;
struct list_head tmp;
+ struct blk_plug plug;
if (unlikely(freezing(current))) {
set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
@@ -1764,16 +1756,15 @@ xfsbufd(
xfs_buf_delwri_split(target, &tmp, age);
list_sort(NULL, &tmp, xfs_buf_cmp);
+
+ blk_start_plug(&plug);
while (!list_empty(&tmp)) {
struct xfs_buf *bp;
bp = list_first_entry(&tmp, struct xfs_buf, b_list);
list_del_init(&bp->b_list);
xfs_bdstrat_cb(bp);
- count++;
}
- if (count)
- blk_flush_plug(current);
-
+ blk_finish_plug(&plug);
} while (!kthread_should_stop());
return 0;
@@ -1793,6 +1784,7 @@ xfs_flush_buftarg(
int pincount = 0;
LIST_HEAD(tmp_list);
LIST_HEAD(wait_list);
+ struct blk_plug plug;
xfs_buf_runall_queues(xfsconvertd_workqueue);
xfs_buf_runall_queues(xfsdatad_workqueue);
@@ -1807,6 +1799,8 @@ xfs_flush_buftarg(
* we do that after issuing all the IO.
*/
list_sort(NULL, &tmp_list, xfs_buf_cmp);
+
+ blk_start_plug(&plug);
while (!list_empty(&tmp_list)) {
bp = list_first_entry(&tmp_list, struct xfs_buf, b_list);
ASSERT(target == bp->b_target);
@@ -1817,10 +1811,10 @@ xfs_flush_buftarg(
}
xfs_bdstrat_cb(bp);
}
+ blk_finish_plug(&plug);
if (wait) {
- /* Expedite and wait for IO to complete. */
- blk_flush_plug(current);
+ /* Wait for IO to complete. */
while (!list_empty(&wait_list)) {
bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
diff --git a/fs/xfs/linux-2.6/xfs_message.c b/fs/xfs/linux-2.6/xfs_message.c
index 508e06fd7d1..3ca79560911 100644
--- a/fs/xfs/linux-2.6/xfs_message.c
+++ b/fs/xfs/linux-2.6/xfs_message.c
@@ -28,53 +28,47 @@
/*
* XFS logging functions
*/
-static int
+static void
__xfs_printk(
const char *level,
const struct xfs_mount *mp,
struct va_format *vaf)
{
if (mp && mp->m_fsname)
- return printk("%sXFS (%s): %pV\n", level, mp->m_fsname, vaf);
- return printk("%sXFS: %pV\n", level, vaf);
+ printk("%sXFS (%s): %pV\n", level, mp->m_fsname, vaf);
+ printk("%sXFS: %pV\n", level, vaf);
}
-int xfs_printk(
+void xfs_printk(
const char *level,
const struct xfs_mount *mp,
const char *fmt, ...)
{
struct va_format vaf;
va_list args;
- int r;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
- r = __xfs_printk(level, mp, &vaf);
+ __xfs_printk(level, mp, &vaf);
va_end(args);
-
- return r;
}
#define define_xfs_printk_level(func, kern_level) \
-int func(const struct xfs_mount *mp, const char *fmt, ...) \
+void func(const struct xfs_mount *mp, const char *fmt, ...) \
{ \
struct va_format vaf; \
va_list args; \
- int r; \
\
va_start(args, fmt); \
\
vaf.fmt = fmt; \
vaf.va = &args; \
\
- r = __xfs_printk(kern_level, mp, &vaf); \
+ __xfs_printk(kern_level, mp, &vaf); \
va_end(args); \
- \
- return r; \
} \
define_xfs_printk_level(xfs_emerg, KERN_EMERG);
@@ -88,7 +82,7 @@ define_xfs_printk_level(xfs_info, KERN_INFO);
define_xfs_printk_level(xfs_debug, KERN_DEBUG);
#endif
-int
+void
xfs_alert_tag(
const struct xfs_mount *mp,
int panic_tag,
@@ -97,7 +91,6 @@ xfs_alert_tag(
struct va_format vaf;
va_list args;
int do_panic = 0;
- int r;
if (xfs_panic_mask && (xfs_panic_mask & panic_tag)) {
xfs_printk(KERN_ALERT, mp,
@@ -110,12 +103,10 @@ xfs_alert_tag(
vaf.fmt = fmt;
vaf.va = &args;
- r = __xfs_printk(KERN_ALERT, mp, &vaf);
+ __xfs_printk(KERN_ALERT, mp, &vaf);
va_end(args);
BUG_ON(do_panic);
-
- return r;
}
void
diff --git a/fs/xfs/linux-2.6/xfs_message.h b/fs/xfs/linux-2.6/xfs_message.h
index e77ffa16745..f1b3fc1b6c4 100644
--- a/fs/xfs/linux-2.6/xfs_message.h
+++ b/fs/xfs/linux-2.6/xfs_message.h
@@ -3,32 +3,34 @@
struct xfs_mount;
-extern int xfs_printk(const char *level, const struct xfs_mount *mp,
+extern void xfs_printk(const char *level, const struct xfs_mount *mp,
const char *fmt, ...)
__attribute__ ((format (printf, 3, 4)));
-extern int xfs_emerg(const struct xfs_mount *mp, const char *fmt, ...)
+extern void xfs_emerg(const struct xfs_mount *mp, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
-extern int xfs_alert(const struct xfs_mount *mp, const char *fmt, ...)
+extern void xfs_alert(const struct xfs_mount *mp, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
-extern int xfs_alert_tag(const struct xfs_mount *mp, int tag,
+extern void xfs_alert_tag(const struct xfs_mount *mp, int tag,
const char *fmt, ...)
__attribute__ ((format (printf, 3, 4)));
-extern int xfs_crit(const struct xfs_mount *mp, const char *fmt, ...)
+extern void xfs_crit(const struct xfs_mount *mp, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
-extern int xfs_err(const struct xfs_mount *mp, const char *fmt, ...)
+extern void xfs_err(const struct xfs_mount *mp, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
-extern int xfs_warn(const struct xfs_mount *mp, const char *fmt, ...)
+extern void xfs_warn(const struct xfs_mount *mp, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
-extern int xfs_notice(const struct xfs_mount *mp, const char *fmt, ...)
+extern void xfs_notice(const struct xfs_mount *mp, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
-extern int xfs_info(const struct xfs_mount *mp, const char *fmt, ...)
+extern void xfs_info(const struct xfs_mount *mp, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
#ifdef DEBUG
-extern int xfs_debug(const struct xfs_mount *mp, const char *fmt, ...)
+extern void xfs_debug(const struct xfs_mount *mp, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
#else
-#define xfs_debug(mp, fmt, ...) (0)
+static inline void xfs_debug(const struct xfs_mount *mp, const char *fmt, ...)
+{
+}
#endif
extern void assfail(char *expr, char *f, int l);
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 1ba5c451da3..b38e58d0229 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -816,75 +816,6 @@ xfs_setup_devices(
return 0;
}
-/*
- * XFS AIL push thread support
- */
-void
-xfsaild_wakeup(
- struct xfs_ail *ailp,
- xfs_lsn_t threshold_lsn)
-{
- /* only ever move the target forwards */
- if (XFS_LSN_CMP(threshold_lsn, ailp->xa_target) > 0) {
- ailp->xa_target = threshold_lsn;
- wake_up_process(ailp->xa_task);
- }
-}
-
-STATIC int
-xfsaild(
- void *data)
-{
- struct xfs_ail *ailp = data;
- xfs_lsn_t last_pushed_lsn = 0;
- long tout = 0; /* milliseconds */
-
- while (!kthread_should_stop()) {
- /*
- * for short sleeps indicating congestion, don't allow us to
- * get woken early. Otherwise all we do is bang on the AIL lock
- * without making progress.
- */
- if (tout && tout <= 20)
- __set_current_state(TASK_KILLABLE);
- else
- __set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(tout ?
- msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
-
- /* swsusp */
- try_to_freeze();
-
- ASSERT(ailp->xa_mount->m_log);
- if (XFS_FORCED_SHUTDOWN(ailp->xa_mount))
- continue;
-
- tout = xfsaild_push(ailp, &last_pushed_lsn);
- }
-
- return 0;
-} /* xfsaild */
-
-int
-xfsaild_start(
- struct xfs_ail *ailp)
-{
- ailp->xa_target = 0;
- ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
- ailp->xa_mount->m_fsname);
- if (IS_ERR(ailp->xa_task))
- return -PTR_ERR(ailp->xa_task);
- return 0;
-}
-
-void
-xfsaild_stop(
- struct xfs_ail *ailp)
-{
- kthread_stop(ailp->xa_task);
-}
-
-
/* Catch misguided souls that try to use this interface on XFS */
STATIC struct inode *
xfs_fs_alloc_inode(
@@ -1191,22 +1122,12 @@ xfs_fs_sync_fs(
return -error;
if (laptop_mode) {
- int prev_sync_seq = mp->m_sync_seq;
-
/*
* The disk must be active because we're syncing.
* We schedule xfssyncd now (now that the disk is
* active) instead of later (when it might not be).
*/
- wake_up_process(mp->m_sync_task);
- /*
- * We have to wait for the sync iteration to complete.
- * If we don't, the disk activity caused by the sync
- * will come after the sync is completed, and that
- * triggers another sync from laptop mode.
- */
- wait_event(mp->m_wait_single_sync_task,
- mp->m_sync_seq != prev_sync_seq);
+ flush_delayed_work_sync(&mp->m_sync_work);
}
return 0;
@@ -1490,9 +1411,6 @@ xfs_fs_fill_super(
spin_lock_init(&mp->m_sb_lock);
mutex_init(&mp->m_growlock);
atomic_set(&mp->m_active_trans, 0);
- INIT_LIST_HEAD(&mp->m_sync_list);
- spin_lock_init(&mp->m_sync_lock);
- init_waitqueue_head(&mp->m_wait_single_sync_task);
mp->m_super = sb;
sb->s_fs_info = mp;
@@ -1799,6 +1717,38 @@ xfs_destroy_zones(void)
}
STATIC int __init
+xfs_init_workqueues(void)
+{
+ /*
+ * max_active is set to 8 to give enough concurency to allow
+ * multiple work operations on each CPU to run. This allows multiple
+ * filesystems to be running sync work concurrently, and scales with
+ * the number of CPUs in the system.
+ */
+ xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8);
+ if (!xfs_syncd_wq)
+ goto out;
+
+ xfs_ail_wq = alloc_workqueue("xfsail", WQ_CPU_INTENSIVE, 8);
+ if (!xfs_ail_wq)
+ goto out_destroy_syncd;
+
+ return 0;
+
+out_destroy_syncd:
+ destroy_workqueue(xfs_syncd_wq);
+out:
+ return -ENOMEM;
+}
+
+STATIC void
+xfs_destroy_workqueues(void)
+{
+ destroy_workqueue(xfs_ail_wq);
+ destroy_workqueue(xfs_syncd_wq);
+}
+
+STATIC int __init
init_xfs_fs(void)
{
int error;
@@ -1813,10 +1763,14 @@ init_xfs_fs(void)
if (error)
goto out;
- error = xfs_mru_cache_init();
+ error = xfs_init_workqueues();
if (error)
goto out_destroy_zones;
+ error = xfs_mru_cache_init();
+ if (error)
+ goto out_destroy_wq;
+
error = xfs_filestream_init();
if (error)
goto out_mru_cache_uninit;
@@ -1833,6 +1787,10 @@ init_xfs_fs(void)
if (error)
goto out_cleanup_procfs;
+ error = xfs_init_workqueues();
+ if (error)
+ goto out_sysctl_unregister;
+
vfs_initquota();
error = register_filesystem(&xfs_fs_type);
@@ -1850,6 +1808,8 @@ init_xfs_fs(void)
xfs_filestream_uninit();
out_mru_cache_uninit:
xfs_mru_cache_uninit();
+ out_destroy_wq:
+ xfs_destroy_workqueues();
out_destroy_zones:
xfs_destroy_zones();
out:
@@ -1866,6 +1826,7 @@ exit_xfs_fs(void)
xfs_buf_terminate();
xfs_filestream_uninit();
xfs_mru_cache_uninit();
+ xfs_destroy_workqueues();
xfs_destroy_zones();
}
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index 9cf35a688f5..e4f9c1b0836 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -22,6 +22,7 @@
#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h"
+#include "xfs_trans_priv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
@@ -39,6 +40,8 @@
#include <linux/kthread.h>
#include <linux/freezer.h>
+struct workqueue_struct *xfs_syncd_wq; /* sync workqueue */
+
/*
* The inode lookup is done in batches to keep the amount of lock traffic and
* radix tree lookups to a minimum. The batch size is a trade off between
@@ -431,62 +434,12 @@ xfs_quiesce_attr(
xfs_unmountfs_writesb(mp);
}
-/*
- * Enqueue a work item to be picked up by the vfs xfssyncd thread.
- * Doing this has two advantages:
- * - It saves on stack space, which is tight in certain situations
- * - It can be used (with care) as a mechanism to avoid deadlocks.
- * Flushing while allocating in a full filesystem requires both.
- */
-STATIC void
-xfs_syncd_queue_work(
- struct xfs_mount *mp,
- void *data,
- void (*syncer)(struct xfs_mount *, void *),
- struct completion *completion)
-{
- struct xfs_sync_work *work;
-
- work = kmem_alloc(sizeof(struct xfs_sync_work), KM_SLEEP);
- INIT_LIST_HEAD(&work->w_list);
- work->w_syncer = syncer;
- work->w_data = data;
- work->w_mount = mp;
- work->w_completion = completion;
- spin_lock(&mp->m_sync_lock);
- list_add_tail(&work->w_list, &mp->m_sync_list);
- spin_unlock(&mp->m_sync_lock);
- wake_up_process(mp->m_sync_task);
-}
-
-/*
- * Flush delayed allocate data, attempting to free up reserved space
- * from existing allocations. At this point a new allocation attempt
- * has failed with ENOSPC and we are in the process of scratching our
- * heads, looking about for more room...
- */
-STATIC void
-xfs_flush_inodes_work(
- struct xfs_mount *mp,
- void *arg)
-{
- struct inode *inode = arg;
- xfs_sync_data(mp, SYNC_TRYLOCK);
- xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT);
- iput(inode);
-}
-
-void
-xfs_flush_inodes(
- xfs_inode_t *ip)
+static void
+xfs_syncd_queue_sync(
+ struct xfs_mount *mp)
{
- struct inode *inode = VFS_I(ip);
- DECLARE_COMPLETION_ONSTACK(completion);
-
- igrab(inode);
- xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inodes_work, &completion);
- wait_for_completion(&completion);
- xfs_log_force(ip->i_mount, XFS_LOG_SYNC);
+ queue_delayed_work(xfs_syncd_wq, &mp->m_sync_work,
+ msecs_to_jiffies(xfs_syncd_centisecs * 10));
}
/*
@@ -496,9 +449,10 @@ xfs_flush_inodes(
*/
STATIC void
xfs_sync_worker(
- struct xfs_mount *mp,
- void *unused)
+ struct work_struct *work)
{
+ struct xfs_mount *mp = container_of(to_delayed_work(work),
+ struct xfs_mount, m_sync_work);
int error;
if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
@@ -508,73 +462,106 @@ xfs_sync_worker(
error = xfs_fs_log_dummy(mp);
else
xfs_log_force(mp, 0);
- xfs_reclaim_inodes(mp, 0);
error = xfs_qm_sync(mp, SYNC_TRYLOCK);
+
+ /* start pushing all the metadata that is currently dirty */
+ xfs_ail_push_all(mp->m_ail);
}
- mp->m_sync_seq++;
- wake_up(&mp->m_wait_single_sync_task);
+
+ /* queue us up again */
+ xfs_syncd_queue_sync(mp);
}
-STATIC int
-xfssyncd(
- void *arg)
+/*
+ * Queue a new inode reclaim pass if there are reclaimable inodes and there
+ * isn't a reclaim pass already in progress. By default it runs every 5s based
+ * on the xfs syncd work default of 30s. Perhaps this should have it's own
+ * tunable, but that can be done if this method proves to be ineffective or too
+ * aggressive.
+ */
+static void
+xfs_syncd_queue_reclaim(
+ struct xfs_mount *mp)
{
- struct xfs_mount *mp = arg;
- long timeleft;
- xfs_sync_work_t *work, *n;
- LIST_HEAD (tmp);
-
- set_freezable();
- timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
- for (;;) {
- if (list_empty(&mp->m_sync_list))
- timeleft = schedule_timeout_interruptible(timeleft);
- /* swsusp */
- try_to_freeze();
- if (kthread_should_stop() && list_empty(&mp->m_sync_list))
- break;
- spin_lock(&mp->m_sync_lock);
- /*
- * We can get woken by laptop mode, to do a sync -
- * that's the (only!) case where the list would be
- * empty with time remaining.
- */
- if (!timeleft || list_empty(&mp->m_sync_list)) {
- if (!timeleft)
- timeleft = xfs_syncd_centisecs *
- msecs_to_jiffies(10);
- INIT_LIST_HEAD(&mp->m_sync_work.w_list);
- list_add_tail(&mp->m_sync_work.w_list,
- &mp->m_sync_list);
- }
- list_splice_init(&mp->m_sync_list, &tmp);
- spin_unlock(&mp->m_sync_lock);
+ /*
+ * We can have inodes enter reclaim after we've shut down the syncd
+ * workqueue during unmount, so don't allow reclaim work to be queued
+ * during unmount.
+ */
+ if (!(mp->m_super->s_flags & MS_ACTIVE))
+ return;
- list_for_each_entry_safe(work, n, &tmp, w_list) {
- (*work->w_syncer)(mp, work->w_data);
- list_del(&work->w_list);
- if (work == &mp->m_sync_work)
- continue;
- if (work->w_completion)
- complete(work->w_completion);
- kmem_free(work);
- }
+ rcu_read_lock();
+ if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
+ queue_delayed_work(xfs_syncd_wq, &mp->m_reclaim_work,
+ msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
}
+ rcu_read_unlock();
+}
- return 0;
+/*
+ * This is a fast pass over the inode cache to try to get reclaim moving on as
+ * many inodes as possible in a short period of time. It kicks itself every few
+ * seconds, as well as being kicked by the inode cache shrinker when memory
+ * goes low. It scans as quickly as possible avoiding locked inodes or those
+ * already being flushed, and once done schedules a future pass.
+ */
+STATIC void
+xfs_reclaim_worker(
+ struct work_struct *work)
+{
+ struct xfs_mount *mp = container_of(to_delayed_work(work),
+ struct xfs_mount, m_reclaim_work);
+
+ xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
+ xfs_syncd_queue_reclaim(mp);
+}
+
+/*
+ * Flush delayed allocate data, attempting to free up reserved space
+ * from existing allocations. At this point a new allocation attempt
+ * has failed with ENOSPC and we are in the process of scratching our
+ * heads, looking about for more room.
+ *
+ * Queue a new data flush if there isn't one already in progress and
+ * wait for completion of the flush. This means that we only ever have one
+ * inode flush in progress no matter how many ENOSPC events are occurring and
+ * so will prevent the system from bogging down due to every concurrent
+ * ENOSPC event scanning all the active inodes in the system for writeback.
+ */
+void
+xfs_flush_inodes(
+ struct xfs_inode *ip)
+{
+ struct xfs_mount *mp = ip->i_mount;
+
+ queue_work(xfs_syncd_wq, &mp->m_flush_work);
+ flush_work_sync(&mp->m_flush_work);
+}
+
+STATIC void
+xfs_flush_worker(
+ struct work_struct *work)
+{
+ struct xfs_mount *mp = container_of(work,
+ struct xfs_mount, m_flush_work);
+
+ xfs_sync_data(mp, SYNC_TRYLOCK);
+ xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT);
}
int
xfs_syncd_init(
struct xfs_mount *mp)
{
- mp->m_sync_work.w_syncer = xfs_sync_worker;
- mp->m_sync_work.w_mount = mp;
- mp->m_sync_work.w_completion = NULL;
- mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd/%s", mp->m_fsname);
- if (IS_ERR(mp->m_sync_task))
- return -PTR_ERR(mp->m_sync_task);
+ INIT_WORK(&mp->m_flush_work, xfs_flush_worker);
+ INIT_DELAYED_WORK(&mp->m_sync_work, xfs_sync_worker);
+ INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
+
+ xfs_syncd_queue_sync(mp);
+ xfs_syncd_queue_reclaim(mp);
+
return 0;
}
@@ -582,7 +569,9 @@ void
xfs_syncd_stop(
struct xfs_mount *mp)
{
- kthread_stop(mp->m_sync_task);
+ cancel_delayed_work_sync(&mp->m_sync_work);
+ cancel_delayed_work_sync(&mp->m_reclaim_work);
+ cancel_work_sync(&mp->m_flush_work);
}
void
@@ -601,6 +590,10 @@ __xfs_inode_set_reclaim_tag(
XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
XFS_ICI_RECLAIM_TAG);
spin_unlock(&ip->i_mount->m_perag_lock);
+
+ /* schedule periodic background inode reclaim */
+ xfs_syncd_queue_reclaim(ip->i_mount);
+
trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
-1, _RET_IP_);
}
@@ -1017,7 +1010,13 @@ xfs_reclaim_inodes(
}
/*
- * Shrinker infrastructure.
+ * Inode cache shrinker.
+ *
+ * When called we make sure that there is a background (fast) inode reclaim in
+ * progress, while we will throttle the speed of reclaim via doiing synchronous
+ * reclaim of inodes. That means if we come across dirty inodes, we wait for
+ * them to be cleaned, which we hope will not be very long due to the
+ * background walker having already kicked the IO off on those dirty inodes.
*/
static int
xfs_reclaim_inode_shrink(
@@ -1032,10 +1031,15 @@ xfs_reclaim_inode_shrink(
mp = container_of(shrink, struct xfs_mount, m_inode_shrink);
if (nr_to_scan) {
+ /* kick background reclaimer and push the AIL */
+ xfs_syncd_queue_reclaim(mp);
+ xfs_ail_push_all(mp->m_ail);
+
if (!(gfp_mask & __GFP_FS))
return -1;
- xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK, &nr_to_scan);
+ xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT,
+ &nr_to_scan);
/* terminate if we don't exhaust the scan */
if (nr_to_scan > 0)
return -1;
diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h
index 32ba6628290..e3a6ad27415 100644
--- a/fs/xfs/linux-2.6/xfs_sync.h
+++ b/fs/xfs/linux-2.6/xfs_sync.h
@@ -32,6 +32,8 @@ typedef struct xfs_sync_work {
#define SYNC_WAIT 0x0001 /* wait for i/o to complete */
#define SYNC_TRYLOCK 0x0002 /* only try to lock inodes */
+extern struct workqueue_struct *xfs_syncd_wq; /* sync workqueue */
+
int xfs_syncd_init(struct xfs_mount *mp);
void xfs_syncd_stop(struct xfs_mount *mp);
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 254ee062bd7..69228aa8605 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -461,12 +461,10 @@ xfs_qm_dqflush_all(
struct xfs_quotainfo *q = mp->m_quotainfo;
int recl;
struct xfs_dquot *dqp;
- int niters;
int error;
if (!q)
return 0;
- niters = 0;
again:
mutex_lock(&q->qi_dqlist_lock);
list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
@@ -1314,14 +1312,9 @@ xfs_qm_dqiter_bufs(
{
xfs_buf_t *bp;
int error;
- int notcommitted;
- int incr;
int type;
ASSERT(blkcnt > 0);
- notcommitted = 0;
- incr = (blkcnt > XFS_QM_MAX_DQCLUSTER_LOGSZ) ?
- XFS_QM_MAX_DQCLUSTER_LOGSZ : blkcnt;
type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
(flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
error = 0;
diff --git a/fs/xfs/quota/xfs_qm.h b/fs/xfs/quota/xfs_qm.h
index c9446f1c726..567b29b9f1b 100644
--- a/fs/xfs/quota/xfs_qm.h
+++ b/fs/xfs/quota/xfs_qm.h
@@ -65,11 +65,6 @@ extern kmem_zone_t *qm_dqtrxzone;
* block in the dquot/xqm code.
*/
#define XFS_DQUOT_CLUSTER_SIZE_FSB (xfs_filblks_t)1
-/*
- * When doing a quotacheck, we log dquot clusters of this many FSBs at most
- * in a single transaction. We don't want to ask for too huge a log reservation.
- */
-#define XFS_QM_MAX_DQCLUSTER_LOGSZ 3
typedef xfs_dqhash_t xfs_dqlist_t;
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index 0d62a07b7fd..2dadb15d5ca 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -313,14 +313,12 @@ xfs_qm_scall_quotaon(
{
int error;
uint qf;
- uint accflags;
__int64_t sbflags;
flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
/*
* Switching on quota accounting must be done at mount time.
*/
- accflags = flags & XFS_ALL_QUOTA_ACCT;
flags &= ~(XFS_ALL_QUOTA_ACCT);
sbflags = 0;
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index 4bc3c649aee..27d64d752ea 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -2395,17 +2395,33 @@ xfs_free_extent(
memset(&args, 0, sizeof(xfs_alloc_arg_t));
args.tp = tp;
args.mp = tp->t_mountp;
+
+ /*
+ * validate that the block number is legal - the enables us to detect
+ * and handle a silent filesystem corruption rather than crashing.
+ */
args.agno = XFS_FSB_TO_AGNO(args.mp, bno);
- ASSERT(args.agno < args.mp->m_sb.sb_agcount);
+ if (args.agno >= args.mp->m_sb.sb_agcount)
+ return EFSCORRUPTED;
+
args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno);
+ if (args.agbno >= args.mp->m_sb.sb_agblocks)
+ return EFSCORRUPTED;
+
args.pag = xfs_perag_get(args.mp, args.agno);
- if ((error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING)))
+ ASSERT(args.pag);
+
+ error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
+ if (error)
goto error0;
-#ifdef DEBUG
- ASSERT(args.agbp != NULL);
- ASSERT((args.agbno + len) <=
- be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length));
-#endif
+
+ /* validate the extent size is legal now we have the agf locked */
+ if (args.agbno + len >
+ be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length)) {
+ error = EFSCORRUPTED;
+ goto error0;
+ }
+
error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0);
error0:
xfs_perag_put(args.pag);
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 46cc40131d4..576fdfe81d6 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -198,6 +198,41 @@ xfs_inode_item_size(
}
/*
+ * xfs_inode_item_format_extents - convert in-core extents to on-disk form
+ *
+ * For either the data or attr fork in extent format, we need to endian convert
+ * the in-core extent as we place them into the on-disk inode. In this case, we
+ * need to do this conversion before we write the extents into the log. Because
+ * we don't have the disk inode to write into here, we allocate a buffer and
+ * format the extents into it via xfs_iextents_copy(). We free the buffer in
+ * the unlock routine after the copy for the log has been made.
+ *
+ * In the case of the data fork, the in-core and on-disk fork sizes can be
+ * different due to delayed allocation extents. We only log on-disk extents
+ * here, so always use the physical fork size to determine the size of the
+ * buffer we need to allocate.
+ */
+STATIC void
+xfs_inode_item_format_extents(
+ struct xfs_inode *ip,
+ struct xfs_log_iovec *vecp,
+ int whichfork,
+ int type)
+{
+ xfs_bmbt_rec_t *ext_buffer;
+
+ ext_buffer = kmem_alloc(XFS_IFORK_SIZE(ip, whichfork), KM_SLEEP);
+ if (whichfork == XFS_DATA_FORK)
+ ip->i_itemp->ili_extents_buf = ext_buffer;
+ else
+ ip->i_itemp->ili_aextents_buf = ext_buffer;
+
+ vecp->i_addr = ext_buffer;
+ vecp->i_len = xfs_iextents_copy(ip, ext_buffer, whichfork);
+ vecp->i_type = type;
+}
+
+/*
* This is called to fill in the vector of log iovecs for the
* given inode log item. It fills the first item with an inode
* log format structure, the second with the on-disk inode structure,
@@ -213,7 +248,6 @@ xfs_inode_item_format(
struct xfs_inode *ip = iip->ili_inode;
uint nvecs;
size_t data_bytes;
- xfs_bmbt_rec_t *ext_buffer;
xfs_mount_t *mp;
vecp->i_addr = &iip->ili_format;
@@ -320,22 +354,8 @@ xfs_inode_item_format(
} else
#endif
{
- /*
- * There are delayed allocation extents
- * in the inode, or we need to convert
- * the extents to on disk format.
- * Use xfs_iextents_copy()
- * to copy only the real extents into
- * a separate buffer. We'll free the
- * buffer in the unlock routine.
- */
- ext_buffer = kmem_alloc(ip->i_df.if_bytes,
- KM_SLEEP);
- iip->ili_extents_buf = ext_buffer;
- vecp->i_addr = ext_buffer;
- vecp->i_len = xfs_iextents_copy(ip, ext_buffer,
- XFS_DATA_FORK);
- vecp->i_type = XLOG_REG_TYPE_IEXT;
+ xfs_inode_item_format_extents(ip, vecp,
+ XFS_DATA_FORK, XLOG_REG_TYPE_IEXT);
}
ASSERT(vecp->i_len <= ip->i_df.if_bytes);
iip->ili_format.ilf_dsize = vecp->i_len;
@@ -445,19 +465,12 @@ xfs_inode_item_format(
*/
vecp->i_addr = ip->i_afp->if_u1.if_extents;
vecp->i_len = ip->i_afp->if_bytes;
+ vecp->i_type = XLOG_REG_TYPE_IATTR_EXT;
#else
ASSERT(iip->ili_aextents_buf == NULL);
- /*
- * Need to endian flip before logging
- */
- ext_buffer = kmem_alloc(ip->i_afp->if_bytes,
- KM_SLEEP);
- iip->ili_aextents_buf = ext_buffer;
- vecp->i_addr = ext_buffer;
- vecp->i_len = xfs_iextents_copy(ip, ext_buffer,
- XFS_ATTR_FORK);
+ xfs_inode_item_format_extents(ip, vecp,
+ XFS_ATTR_FORK, XLOG_REG_TYPE_IATTR_EXT);
#endif
- vecp->i_type = XLOG_REG_TYPE_IATTR_EXT;
iip->ili_format.ilf_asize = vecp->i_len;
vecp++;
nvecs++;
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index dc1882adaf5..751e94fe1f7 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -204,7 +204,6 @@ xfs_bulkstat(
xfs_agi_t *agi; /* agi header data */
xfs_agino_t agino; /* inode # in allocation group */
xfs_agnumber_t agno; /* allocation group number */
- xfs_daddr_t bno; /* inode cluster start daddr */
int chunkidx; /* current index into inode chunk */
int clustidx; /* current index into inode cluster */
xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */
@@ -463,7 +462,6 @@ xfs_bulkstat(
mp->m_sb.sb_inopblog);
}
ino = XFS_AGINO_TO_INO(mp, agno, agino);
- bno = XFS_AGB_TO_DADDR(mp, agno, agbno);
/*
* Skip if this inode is free.
*/
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 25efa9b8a60..b612ce4520a 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -761,7 +761,7 @@ xfs_log_need_covered(xfs_mount_t *mp)
break;
case XLOG_STATE_COVER_NEED:
case XLOG_STATE_COVER_NEED2:
- if (!xfs_trans_ail_tail(log->l_ailp) &&
+ if (!xfs_ail_min_lsn(log->l_ailp) &&
xlog_iclogs_empty(log)) {
if (log->l_covered_state == XLOG_STATE_COVER_NEED)
log->l_covered_state = XLOG_STATE_COVER_DONE;
@@ -801,7 +801,7 @@ xlog_assign_tail_lsn(
xfs_lsn_t tail_lsn;
struct log *log = mp->m_log;
- tail_lsn = xfs_trans_ail_tail(mp->m_ail);
+ tail_lsn = xfs_ail_min_lsn(mp->m_ail);
if (!tail_lsn)
tail_lsn = atomic64_read(&log->l_last_sync_lsn);
@@ -1239,7 +1239,7 @@ xlog_grant_push_ail(
* the filesystem is shutting down.
*/
if (!XLOG_FORCED_SHUTDOWN(log))
- xfs_trans_ail_push(log->l_ailp, threshold_lsn);
+ xfs_ail_push(log->l_ailp, threshold_lsn);
}
/*
@@ -3407,6 +3407,17 @@ xlog_verify_dest_ptr(
xfs_emerg(log->l_mp, "%s: invalid ptr", __func__);
}
+/*
+ * Check to make sure the grant write head didn't just over lap the tail. If
+ * the cycles are the same, we can't be overlapping. Otherwise, make sure that
+ * the cycles differ by exactly one and check the byte count.
+ *
+ * This check is run unlocked, so can give false positives. Rather than assert
+ * on failures, use a warn-once flag and a panic tag to allow the admin to
+ * determine if they want to panic the machine when such an error occurs. For
+ * debug kernels this will have the same effect as using an assert but, unlinke
+ * an assert, it can be turned off at runtime.
+ */
STATIC void
xlog_verify_grant_tail(
struct log *log)
@@ -3414,17 +3425,22 @@ xlog_verify_grant_tail(
int tail_cycle, tail_blocks;
int cycle, space;
- /*
- * Check to make sure the grant write head didn't just over lap the
- * tail. If the cycles are the same, we can't be overlapping.
- * Otherwise, make sure that the cycles differ by exactly one and
- * check the byte count.
- */
xlog_crack_grant_head(&log->l_grant_write_head, &cycle, &space);
xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks);
if (tail_cycle != cycle) {
- ASSERT(cycle - 1 == tail_cycle);
- ASSERT(space <= BBTOB(tail_blocks));
+ if (cycle - 1 != tail_cycle &&
+ !(log->l_flags & XLOG_TAIL_WARN)) {
+ xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
+ "%s: cycle - 1 != tail_cycle", __func__);
+ log->l_flags |= XLOG_TAIL_WARN;
+ }
+
+ if (space > BBTOB(tail_blocks) &&
+ !(log->l_flags & XLOG_TAIL_WARN)) {
+ xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
+ "%s: space > BBTOB(tail_blocks)", __func__);
+ log->l_flags |= XLOG_TAIL_WARN;
+ }
}
}
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index ffae692c983..5864850e9e3 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -144,6 +144,7 @@ static inline uint xlog_get_client_id(__be32 i)
#define XLOG_RECOVERY_NEEDED 0x4 /* log was recovered */
#define XLOG_IO_ERROR 0x8 /* log hit an I/O error, and being
shutdown */
+#define XLOG_TAIL_WARN 0x10 /* log tail verify warning issued */
#ifdef __KERNEL__
/*
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index a62e8971539..19af0ab0d0c 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -203,12 +203,9 @@ typedef struct xfs_mount {
struct mutex m_icsb_mutex; /* balancer sync lock */
#endif
struct xfs_mru_cache *m_filestream; /* per-mount filestream data */
- struct task_struct *m_sync_task; /* generalised sync thread */
- xfs_sync_work_t m_sync_work; /* work item for VFS_SYNC */
- struct list_head m_sync_list; /* sync thread work item list */
- spinlock_t m_sync_lock; /* work item list lock */
- int m_sync_seq; /* sync thread generation no. */
- wait_queue_head_t m_wait_single_sync_task;
+ struct delayed_work m_sync_work; /* background sync work */
+ struct delayed_work m_reclaim_work; /* background inode reclaim */
+ struct work_struct m_flush_work; /* background inode flush */
__int64_t m_update_flags; /* sb flags we need to update
on the next remount,rw */
struct shrinker m_inode_shrink; /* inode reclaim shrinker */
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 12aff9584e2..acdb92f14d5 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -28,74 +28,138 @@
#include "xfs_trans_priv.h"
#include "xfs_error.h"
-STATIC void xfs_ail_splice(struct xfs_ail *, struct list_head *, xfs_lsn_t);
-STATIC void xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *);
-STATIC xfs_log_item_t * xfs_ail_min(struct xfs_ail *);
-STATIC xfs_log_item_t * xfs_ail_next(struct xfs_ail *, xfs_log_item_t *);
+struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
#ifdef DEBUG
-STATIC void xfs_ail_check(struct xfs_ail *, xfs_log_item_t *);
-#else
+/*
+ * Check that the list is sorted as it should be.
+ */
+STATIC void
+xfs_ail_check(
+ struct xfs_ail *ailp,
+ xfs_log_item_t *lip)
+{
+ xfs_log_item_t *prev_lip;
+
+ if (list_empty(&ailp->xa_ail))
+ return;
+
+ /*
+ * Check the next and previous entries are valid.
+ */
+ ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
+ prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail);
+ if (&prev_lip->li_ail != &ailp->xa_ail)
+ ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
+
+ prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail);
+ if (&prev_lip->li_ail != &ailp->xa_ail)
+ ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0);
+
+
+#ifdef XFS_TRANS_DEBUG
+ /*
+ * Walk the list checking lsn ordering, and that every entry has the
+ * XFS_LI_IN_AIL flag set. This is really expensive, so only do it
+ * when specifically debugging the transaction subsystem.
+ */
+ prev_lip = list_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
+ list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
+ if (&prev_lip->li_ail != &ailp->xa_ail)
+ ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
+ ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
+ prev_lip = lip;
+ }
+#endif /* XFS_TRANS_DEBUG */
+}
+#else /* !DEBUG */
#define xfs_ail_check(a,l)
#endif /* DEBUG */
+/*
+ * Return a pointer to the first item in the AIL. If the AIL is empty, then
+ * return NULL.
+ */
+static xfs_log_item_t *
+xfs_ail_min(
+ struct xfs_ail *ailp)
+{
+ if (list_empty(&ailp->xa_ail))
+ return NULL;
+
+ return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
+}
+
+ /*
+ * Return a pointer to the last item in the AIL. If the AIL is empty, then
+ * return NULL.
+ */
+static xfs_log_item_t *
+xfs_ail_max(
+ struct xfs_ail *ailp)
+{
+ if (list_empty(&ailp->xa_ail))
+ return NULL;
+
+ return list_entry(ailp->xa_ail.prev, xfs_log_item_t, li_ail);
+}
+
+/*
+ * Return a pointer to the item which follows the given item in the AIL. If
+ * the given item is the last item in the list, then return NULL.
+ */
+static xfs_log_item_t *
+xfs_ail_next(
+ struct xfs_ail *ailp,
+ xfs_log_item_t *lip)
+{
+ if (lip->li_ail.next == &ailp->xa_ail)
+ return NULL;
+
+ return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail);
+}
/*
- * This is called by the log manager code to determine the LSN
- * of the tail of the log. This is exactly the LSN of the first
- * item in the AIL. If the AIL is empty, then this function
- * returns 0.
+ * This is called by the log manager code to determine the LSN of the tail of
+ * the log. This is exactly the LSN of the first item in the AIL. If the AIL
+ * is empty, then this function returns 0.
*
- * We need the AIL lock in order to get a coherent read of the
- * lsn of the last item in the AIL.
+ * We need the AIL lock in order to get a coherent read of the lsn of the last
+ * item in the AIL.
*/
xfs_lsn_t
-xfs_trans_ail_tail(
+xfs_ail_min_lsn(
struct xfs_ail *ailp)
{
- xfs_lsn_t lsn;
+ xfs_lsn_t lsn = 0;
xfs_log_item_t *lip;
spin_lock(&ailp->xa_lock);
lip = xfs_ail_min(ailp);
- if (lip == NULL) {
- lsn = (xfs_lsn_t)0;
- } else {
+ if (lip)
lsn = lip->li_lsn;
- }
spin_unlock(&ailp->xa_lock);
return lsn;
}
/*
- * xfs_trans_push_ail
- *
- * This routine is called to move the tail of the AIL forward. It does this by
- * trying to flush items in the AIL whose lsns are below the given
- * threshold_lsn.
- *
- * the push is run asynchronously in a separate thread, so we return the tail
- * of the log right now instead of the tail after the push. This means we will
- * either continue right away, or we will sleep waiting on the async thread to
- * do its work.
- *
- * We do this unlocked - we only need to know whether there is anything in the
- * AIL at the time we are called. We don't need to access the contents of
- * any of the objects, so the lock is not needed.
+ * Return the maximum lsn held in the AIL, or zero if the AIL is empty.
*/
-void
-xfs_trans_ail_push(
- struct xfs_ail *ailp,
- xfs_lsn_t threshold_lsn)
+static xfs_lsn_t
+xfs_ail_max_lsn(
+ struct xfs_ail *ailp)
{
- xfs_log_item_t *lip;
+ xfs_lsn_t lsn = 0;
+ xfs_log_item_t *lip;
- lip = xfs_ail_min(ailp);
- if (lip && !XFS_FORCED_SHUTDOWN(ailp->xa_mount)) {
- if (XFS_LSN_CMP(threshold_lsn, ailp->xa_target) > 0)
- xfsaild_wakeup(ailp, threshold_lsn);
- }
+ spin_lock(&ailp->xa_lock);
+ lip = xfs_ail_max(ailp);
+ if (lip)
+ lsn = lip->li_lsn;
+ spin_unlock(&ailp->xa_lock);
+
+ return lsn;
}
/*
@@ -236,16 +300,57 @@ out:
}
/*
- * xfsaild_push does the work of pushing on the AIL. Returning a timeout of
- * zero indicates that the caller should sleep until woken.
+ * splice the log item list into the AIL at the given LSN.
*/
-long
-xfsaild_push(
- struct xfs_ail *ailp,
- xfs_lsn_t *last_lsn)
+static void
+xfs_ail_splice(
+ struct xfs_ail *ailp,
+ struct list_head *list,
+ xfs_lsn_t lsn)
{
- long tout = 0;
- xfs_lsn_t last_pushed_lsn = *last_lsn;
+ xfs_log_item_t *next_lip;
+
+ /* If the list is empty, just insert the item. */
+ if (list_empty(&ailp->xa_ail)) {
+ list_splice(list, &ailp->xa_ail);
+ return;
+ }
+
+ list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) {
+ if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0)
+ break;
+ }
+
+ ASSERT(&next_lip->li_ail == &ailp->xa_ail ||
+ XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0);
+
+ list_splice_init(list, &next_lip->li_ail);
+}
+
+/*
+ * Delete the given item from the AIL. Return a pointer to the item.
+ */
+static void
+xfs_ail_delete(
+ struct xfs_ail *ailp,
+ xfs_log_item_t *lip)
+{
+ xfs_ail_check(ailp, lip);
+ list_del(&lip->li_ail);
+ xfs_trans_ail_cursor_clear(ailp, lip);
+}
+
+/*
+ * xfs_ail_worker does the work of pushing on the AIL. It will requeue itself
+ * to run at a later time if there is more work to do to complete the push.
+ */
+STATIC void
+xfs_ail_worker(
+ struct work_struct *work)
+{
+ struct xfs_ail *ailp = container_of(to_delayed_work(work),
+ struct xfs_ail, xa_work);
+ long tout;
xfs_lsn_t target = ailp->xa_target;
xfs_lsn_t lsn;
xfs_log_item_t *lip;
@@ -256,15 +361,15 @@ xfsaild_push(
spin_lock(&ailp->xa_lock);
xfs_trans_ail_cursor_init(ailp, cur);
- lip = xfs_trans_ail_cursor_first(ailp, cur, *last_lsn);
+ lip = xfs_trans_ail_cursor_first(ailp, cur, ailp->xa_last_pushed_lsn);
if (!lip || XFS_FORCED_SHUTDOWN(mp)) {
/*
* AIL is empty or our push has reached the end.
*/
xfs_trans_ail_cursor_done(ailp, cur);
spin_unlock(&ailp->xa_lock);
- *last_lsn = 0;
- return tout;
+ ailp->xa_last_pushed_lsn = 0;
+ return;
}
XFS_STATS_INC(xs_push_ail);
@@ -301,13 +406,13 @@ xfsaild_push(
case XFS_ITEM_SUCCESS:
XFS_STATS_INC(xs_push_ail_success);
IOP_PUSH(lip);
- last_pushed_lsn = lsn;
+ ailp->xa_last_pushed_lsn = lsn;
break;
case XFS_ITEM_PUSHBUF:
XFS_STATS_INC(xs_push_ail_pushbuf);
IOP_PUSHBUF(lip);
- last_pushed_lsn = lsn;
+ ailp->xa_last_pushed_lsn = lsn;
push_xfsbufd = 1;
break;
@@ -319,7 +424,7 @@ xfsaild_push(
case XFS_ITEM_LOCKED:
XFS_STATS_INC(xs_push_ail_locked);
- last_pushed_lsn = lsn;
+ ailp->xa_last_pushed_lsn = lsn;
stuck++;
break;
@@ -374,9 +479,23 @@ xfsaild_push(
wake_up_process(mp->m_ddev_targp->bt_task);
}
+ /* assume we have more work to do in a short while */
+ tout = 10;
if (!count) {
/* We're past our target or empty, so idle */
- last_pushed_lsn = 0;
+ ailp->xa_last_pushed_lsn = 0;
+
+ /*
+ * Check for an updated push target before clearing the
+ * XFS_AIL_PUSHING_BIT. If the target changed, we've got more
+ * work to do. Wait a bit longer before starting that work.
+ */
+ smp_rmb();
+ if (ailp->xa_target == target) {
+ clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags);
+ return;
+ }
+ tout = 50;
} else if (XFS_LSN_CMP(lsn, target) >= 0) {
/*
* We reached the target so wait a bit longer for I/O to
@@ -384,7 +503,7 @@ xfsaild_push(
* start the next scan from the start of the AIL.
*/
tout = 50;
- last_pushed_lsn = 0;
+ ailp->xa_last_pushed_lsn = 0;
} else if ((stuck * 100) / count > 90) {
/*
* Either there is a lot of contention on the AIL or we
@@ -396,14 +515,61 @@ xfsaild_push(
* continuing from where we were.
*/
tout = 20;
- } else {
- /* more to do, but wait a short while before continuing */
- tout = 10;
}
- *last_lsn = last_pushed_lsn;
- return tout;
+
+ /* There is more to do, requeue us. */
+ queue_delayed_work(xfs_syncd_wq, &ailp->xa_work,
+ msecs_to_jiffies(tout));
+}
+
+/*
+ * This routine is called to move the tail of the AIL forward. It does this by
+ * trying to flush items in the AIL whose lsns are below the given
+ * threshold_lsn.
+ *
+ * The push is run asynchronously in a workqueue, which means the caller needs
+ * to handle waiting on the async flush for space to become available.
+ * We don't want to interrupt any push that is in progress, hence we only queue
+ * work if we set the pushing bit approriately.
+ *
+ * We do this unlocked - we only need to know whether there is anything in the
+ * AIL at the time we are called. We don't need to access the contents of
+ * any of the objects, so the lock is not needed.
+ */
+void
+xfs_ail_push(
+ struct xfs_ail *ailp,
+ xfs_lsn_t threshold_lsn)
+{
+ xfs_log_item_t *lip;
+
+ lip = xfs_ail_min(ailp);
+ if (!lip || XFS_FORCED_SHUTDOWN(ailp->xa_mount) ||
+ XFS_LSN_CMP(threshold_lsn, ailp->xa_target) <= 0)
+ return;
+
+ /*
+ * Ensure that the new target is noticed in push code before it clears
+ * the XFS_AIL_PUSHING_BIT.
+ */
+ smp_wmb();
+ ailp->xa_target = threshold_lsn;
+ if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
+ queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0);
}
+/*
+ * Push out all items in the AIL immediately
+ */
+void
+xfs_ail_push_all(
+ struct xfs_ail *ailp)
+{
+ xfs_lsn_t threshold_lsn = xfs_ail_max_lsn(ailp);
+
+ if (threshold_lsn)
+ xfs_ail_push(ailp, threshold_lsn);
+}
/*
* This is to be called when an item is unlocked that may have
@@ -615,7 +781,6 @@ xfs_trans_ail_init(
xfs_mount_t *mp)
{
struct xfs_ail *ailp;
- int error;
ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL);
if (!ailp)
@@ -624,15 +789,9 @@ xfs_trans_ail_init(
ailp->xa_mount = mp;
INIT_LIST_HEAD(&ailp->xa_ail);
spin_lock_init(&ailp->xa_lock);
- error = xfsaild_start(ailp);
- if (error)
- goto out_free_ailp;
+ INIT_DELAYED_WORK(&ailp->xa_work, xfs_ail_worker);
mp->m_ail = ailp;
return 0;
-
-out_free_ailp:
- kmem_free(ailp);
- return error;
}
void
@@ -641,124 +800,6 @@ xfs_trans_ail_destroy(
{
struct xfs_ail *ailp = mp->m_ail;
- xfsaild_stop(ailp);
+ cancel_delayed_work_sync(&ailp->xa_work);
kmem_free(ailp);
}
-
-/*
- * splice the log item list into the AIL at the given LSN.
- */
-STATIC void
-xfs_ail_splice(
- struct xfs_ail *ailp,
- struct list_head *list,
- xfs_lsn_t lsn)
-{
- xfs_log_item_t *next_lip;
-
- /*
- * If the list is empty, just insert the item.
- */
- if (list_empty(&ailp->xa_ail)) {
- list_splice(list, &ailp->xa_ail);
- return;
- }
-
- list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) {
- if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0)
- break;
- }
-
- ASSERT((&next_lip->li_ail == &ailp->xa_ail) ||
- (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0));
-
- list_splice_init(list, &next_lip->li_ail);
- return;
-}
-
-/*
- * Delete the given item from the AIL. Return a pointer to the item.
- */
-STATIC void
-xfs_ail_delete(
- struct xfs_ail *ailp,
- xfs_log_item_t *lip)
-{
- xfs_ail_check(ailp, lip);
- list_del(&lip->li_ail);
- xfs_trans_ail_cursor_clear(ailp, lip);
-}
-
-/*
- * Return a pointer to the first item in the AIL.
- * If the AIL is empty, then return NULL.
- */
-STATIC xfs_log_item_t *
-xfs_ail_min(
- struct xfs_ail *ailp)
-{
- if (list_empty(&ailp->xa_ail))
- return NULL;
-
- return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
-}
-
-/*
- * Return a pointer to the item which follows
- * the given item in the AIL. If the given item
- * is the last item in the list, then return NULL.
- */
-STATIC xfs_log_item_t *
-xfs_ail_next(
- struct xfs_ail *ailp,
- xfs_log_item_t *lip)
-{
- if (lip->li_ail.next == &ailp->xa_ail)
- return NULL;
-
- return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail);
-}
-
-#ifdef DEBUG
-/*
- * Check that the list is sorted as it should be.
- */
-STATIC void
-xfs_ail_check(
- struct xfs_ail *ailp,
- xfs_log_item_t *lip)
-{
- xfs_log_item_t *prev_lip;
-
- if (list_empty(&ailp->xa_ail))
- return;
-
- /*
- * Check the next and previous entries are valid.
- */
- ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
- prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail);
- if (&prev_lip->li_ail != &ailp->xa_ail)
- ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
-
- prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail);
- if (&prev_lip->li_ail != &ailp->xa_ail)
- ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0);
-
-
-#ifdef XFS_TRANS_DEBUG
- /*
- * Walk the list checking lsn ordering, and that every entry has the
- * XFS_LI_IN_AIL flag set. This is really expensive, so only do it
- * when specifically debugging the transaction subsystem.
- */
- prev_lip = list_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
- list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
- if (&prev_lip->li_ail != &ailp->xa_ail)
- ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
- ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
- prev_lip = lip;
- }
-#endif /* XFS_TRANS_DEBUG */
-}
-#endif /* DEBUG */
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index 35162c238fa..6b164e9e9a1 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -65,16 +65,22 @@ struct xfs_ail_cursor {
struct xfs_ail {
struct xfs_mount *xa_mount;
struct list_head xa_ail;
- uint xa_gen;
- struct task_struct *xa_task;
xfs_lsn_t xa_target;
struct xfs_ail_cursor xa_cursors;
spinlock_t xa_lock;
+ struct delayed_work xa_work;
+ xfs_lsn_t xa_last_pushed_lsn;
+ unsigned long xa_flags;
};
+#define XFS_AIL_PUSHING_BIT 0
+
/*
* From xfs_trans_ail.c
*/
+
+extern struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
+
void xfs_trans_ail_update_bulk(struct xfs_ail *ailp,
struct xfs_log_item **log_items, int nr_items,
xfs_lsn_t lsn) __releases(ailp->xa_lock);
@@ -98,12 +104,13 @@ xfs_trans_ail_delete(
xfs_trans_ail_delete_bulk(ailp, &lip, 1);
}
-void xfs_trans_ail_push(struct xfs_ail *, xfs_lsn_t);
+void xfs_ail_push(struct xfs_ail *, xfs_lsn_t);
+void xfs_ail_push_all(struct xfs_ail *);
+xfs_lsn_t xfs_ail_min_lsn(struct xfs_ail *ailp);
+
void xfs_trans_unlocked_item(struct xfs_ail *,
xfs_log_item_t *);
-xfs_lsn_t xfs_trans_ail_tail(struct xfs_ail *ailp);
-
struct xfs_log_item *xfs_trans_ail_cursor_first(struct xfs_ail *ailp,
struct xfs_ail_cursor *cur,
xfs_lsn_t lsn);
@@ -112,11 +119,6 @@ struct xfs_log_item *xfs_trans_ail_cursor_next(struct xfs_ail *ailp,
void xfs_trans_ail_cursor_done(struct xfs_ail *ailp,
struct xfs_ail_cursor *cur);
-long xfsaild_push(struct xfs_ail *, xfs_lsn_t *);
-void xfsaild_wakeup(struct xfs_ail *, xfs_lsn_t);
-int xfsaild_start(struct xfs_ail *);
-void xfsaild_stop(struct xfs_ail *);
-
#if BITS_PER_LONG != 64
static inline void
xfs_trans_ail_copy_lsn(
diff --git a/include/linux/can/platform/mcp251x.h b/include/linux/can/platform/mcp251x.h
index 8e20540043f..089fe43211a 100644
--- a/include/linux/can/platform/mcp251x.h
+++ b/include/linux/can/platform/mcp251x.h
@@ -12,6 +12,7 @@
/**
* struct mcp251x_platform_data - MCP251X SPI CAN controller platform data
* @oscillator_frequency: - oscillator frequency in Hz
+ * @irq_flags: - IRQF configuration flags
* @board_specific_setup: - called before probing the chip (power,reset)
* @transceiver_enable: - called to power on/off the transceiver
* @power_enable: - called to power on/off the mcp *and* the
@@ -24,6 +25,7 @@
struct mcp251x_platform_data {
unsigned long oscillator_frequency;
+ unsigned long irq_flags;
int (*board_specific_setup)(struct spi_device *spi);
int (*transceiver_enable)(int enable);
int (*power_enable) (int enable);
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
index ad1b19aa650..aef23309a74 100644
--- a/include/linux/mfd/core.h
+++ b/include/linux/mfd/core.h
@@ -86,16 +86,25 @@ extern int mfd_clone_cell(const char *cell, const char **clones,
*/
static inline const struct mfd_cell *mfd_get_cell(struct platform_device *pdev)
{
- return pdev->dev.platform_data;
+ return pdev->mfd_cell;
}
/*
* Given a platform device that's been created by mfd_add_devices(), fetch
* the .mfd_data entry from the mfd_cell that created it.
+ * Otherwise just return the platform_data pointer.
+ * This maintains compatibility with platform drivers whose devices aren't
+ * created by the mfd layer, and expect platform_data to contain what would've
+ * otherwise been in mfd_data.
*/
static inline void *mfd_get_data(struct platform_device *pdev)
{
- return mfd_get_cell(pdev)->mfd_data;
+ const struct mfd_cell *cell = mfd_get_cell(pdev);
+
+ if (cell)
+ return cell->mfd_data;
+ else
+ return pdev->dev.platform_data;
}
extern int mfd_add_devices(struct device *parent, int id,
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index eeec00abb66..7fa95df6014 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -270,7 +270,8 @@ struct nf_afinfo {
unsigned int dataoff,
unsigned int len,
u_int8_t protocol);
- int (*route)(struct dst_entry **dst, struct flowi *fl);
+ int (*route)(struct net *net, struct dst_entry **dst,
+ struct flowi *fl, bool strict);
void (*saveroute)(const struct sk_buff *skb,
struct nf_queue_entry *entry);
int (*reroute)(struct sk_buff *skb,
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index ec333d83f3b..5a262e3ae71 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -293,7 +293,7 @@ struct ip_set {
/* Lock protecting the set data */
rwlock_t lock;
/* References to the set */
- atomic_t ref;
+ u32 ref;
/* The core set type */
struct ip_set_type *type;
/* The type variant doing the real job */
diff --git a/include/linux/netfilter/ipset/ip_set_ahash.h b/include/linux/netfilter/ipset/ip_set_ahash.h
index ec9d9bea1e3..a0196ac7905 100644
--- a/include/linux/netfilter/ipset/ip_set_ahash.h
+++ b/include/linux/netfilter/ipset/ip_set_ahash.h
@@ -515,8 +515,7 @@ type_pf_head(struct ip_set *set, struct sk_buff *skb)
if (h->netmask != HOST_MASK)
NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, h->netmask);
#endif
- NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
- htonl(atomic_read(&set->ref) - 1));
+ NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize));
if (with_timeout(h->timeout))
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout));
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index d96db982570..744942c95fe 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -14,6 +14,8 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
+struct mfd_cell;
+
struct platform_device {
const char * name;
int id;
@@ -23,6 +25,9 @@ struct platform_device {
const struct platform_device_id *id_entry;
+ /* MFD cell pointer */
+ struct mfd_cell *mfd_cell;
+
/* arch specific additions */
struct pdev_archdata archdata;
};
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 5a89e361287..083ffea7ba1 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -249,6 +249,8 @@ extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
extern int hibernate(void);
extern bool system_entering_hibernation(void);
#else /* CONFIG_HIBERNATION */
+static inline void register_nosave_region(unsigned long b, unsigned long e) {}
+static inline void register_nosave_region_late(unsigned long b, unsigned long e) {}
static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
static inline void swsusp_set_page_free(struct page *p) {}
static inline void swsusp_unset_page_free(struct page *p) {}
@@ -297,14 +299,7 @@ static inline bool pm_wakeup_pending(void) { return false; }
extern struct mutex pm_mutex;
-#ifndef CONFIG_HIBERNATION
-static inline void register_nosave_region(unsigned long b, unsigned long e)
-{
-}
-static inline void register_nosave_region_late(unsigned long b, unsigned long e)
-{
-}
-
+#ifndef CONFIG_HIBERNATE_CALLBACKS
static inline void lock_system_sleep(void) {}
static inline void unlock_system_sleep(void) {}
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 814b434db74..d516f00c8e0 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -52,7 +52,7 @@ static inline struct net *skb_net(const struct sk_buff *skb)
*/
if (likely(skb->dev && skb->dev->nd_net))
return dev_net(skb->dev);
- if (skb_dst(skb)->dev)
+ if (skb_dst(skb) && skb_dst(skb)->dev)
return dev_net(skb_dst(skb)->dev);
WARN(skb->sk, "Maybe skb_sknet should be used in %s() at line:%d\n",
__func__, __LINE__);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index cb13239fe8e..025d4cc7bbf 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1753,8 +1753,19 @@ enum ieee80211_ampdu_mlme_action {
* that TX/RX_STOP can pass NULL for this parameter.
* The @buf_size parameter is only valid when the action is set to
* %IEEE80211_AMPDU_TX_OPERATIONAL and indicates the peer's reorder
- * buffer size (number of subframes) for this session -- aggregates
- * containing more subframes than this may not be transmitted to the peer.
+ * buffer size (number of subframes) for this session -- the driver
+ * may neither send aggregates containing more subframes than this
+ * nor send aggregates in a way that lost frames would exceed the
+ * buffer size. If just limiting the aggregate size, this would be
+ * possible with a buf_size of 8:
+ * - TX: 1.....7
+ * - RX: 2....7 (lost frame #1)
+ * - TX: 8..1...
+ * which is invalid since #1 was now re-transmitted well past the
+ * buffer size of 8. Correct ways to retransmit #1 would be:
+ * - TX: 1 or 18 or 81
+ * Even "189" would be wrong since 1 could be lost again.
+ *
* Returns a negative error code on failure.
* The callback can sleep.
*
diff --git a/include/net/route.h b/include/net/route.h
index f88429cad52..8fce0621cad 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -64,6 +64,7 @@ struct rtable {
__be32 rt_dst; /* Path destination */
__be32 rt_src; /* Path source */
+ int rt_route_iif;
int rt_iif;
int rt_oif;
__u32 rt_mark;
@@ -80,12 +81,12 @@ struct rtable {
static inline bool rt_is_input_route(struct rtable *rt)
{
- return rt->rt_iif != 0;
+ return rt->rt_route_iif != 0;
}
static inline bool rt_is_output_route(struct rtable *rt)
{
- return rt->rt_iif == 0;
+ return rt->rt_route_iif == 0;
}
struct ip_rt_acct {
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 4603f08dc47..6de9a8fc341 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -18,9 +18,13 @@ config SUSPEND_FREEZER
Turning OFF this setting is NOT recommended! If in doubt, say Y.
+config HIBERNATE_CALLBACKS
+ bool
+
config HIBERNATION
bool "Hibernation (aka 'suspend to disk')"
depends on SWAP && ARCH_HIBERNATION_POSSIBLE
+ select HIBERNATE_CALLBACKS
select LZO_COMPRESS
select LZO_DECOMPRESS
---help---
@@ -85,7 +89,7 @@ config PM_STD_PARTITION
config PM_SLEEP
def_bool y
- depends on SUSPEND || HIBERNATION || XEN_SAVE_RESTORE
+ depends on SUSPEND || HIBERNATE_CALLBACKS
config PM_SLEEP_SMP
def_bool y
diff --git a/kernel/sched.c b/kernel/sched.c
index 48013633d79..a187c3fe027 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4111,20 +4111,20 @@ need_resched:
try_to_wake_up_local(to_wakeup);
}
deactivate_task(rq, prev, DEQUEUE_SLEEP);
+
+ /*
+ * If we are going to sleep and we have plugged IO queued, make
+ * sure to submit it to avoid deadlocks.
+ */
+ if (blk_needs_flush_plug(prev)) {
+ raw_spin_unlock(&rq->lock);
+ blk_flush_plug(prev);
+ raw_spin_lock(&rq->lock);
+ }
}
switch_count = &prev->nvcsw;
}
- /*
- * If we are going to sleep and we have plugged IO queued, make
- * sure to submit it to avoid deadlocks.
- */
- if (prev->state != TASK_RUNNING && blk_needs_flush_plug(prev)) {
- raw_spin_unlock(&rq->lock);
- blk_flush_plug(prev);
- raw_spin_lock(&rq->lock);
- }
-
pre_schedule(rq, prev);
if (unlikely(!rq->nr_running))
diff --git a/mm/memory.c b/mm/memory.c
index 9da8cab1b1b..b623a249918 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1410,6 +1410,13 @@ no_page_table:
return page;
}
+static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
+{
+ return (vma->vm_flags & VM_GROWSDOWN) &&
+ (vma->vm_start == addr) &&
+ !vma_stack_continue(vma->vm_prev, addr);
+}
+
/**
* __get_user_pages() - pin user pages in memory
* @tsk: task_struct of target task
@@ -1488,7 +1495,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
vma = find_extend_vma(mm, start);
if (!vma && in_gate_area(mm, start)) {
unsigned long pg = start & PAGE_MASK;
- struct vm_area_struct *gate_vma = get_gate_vma(mm);
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
@@ -1513,10 +1519,11 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
pte_unmap(pte);
return i ? : -EFAULT;
}
+ vma = get_gate_vma(mm);
if (pages) {
struct page *page;
- page = vm_normal_page(gate_vma, start, *pte);
+ page = vm_normal_page(vma, start, *pte);
if (!page) {
if (!(gup_flags & FOLL_DUMP) &&
is_zero_pfn(pte_pfn(*pte)))
@@ -1530,12 +1537,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
get_page(page);
}
pte_unmap(pte);
- if (vmas)
- vmas[i] = gate_vma;
- i++;
- start += PAGE_SIZE;
- nr_pages--;
- continue;
+ goto next_page;
}
if (!vma ||
@@ -1549,6 +1551,13 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
continue;
}
+ /*
+ * If we don't actually want the page itself,
+ * and it's the stack guard page, just skip it.
+ */
+ if (!pages && stack_guard_page(vma, start))
+ goto next_page;
+
do {
struct page *page;
unsigned int foll_flags = gup_flags;
@@ -1631,6 +1640,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
flush_anon_page(vma, page, start);
flush_dcache_page(page);
}
+next_page:
if (vmas)
vmas[i] = vma;
i++;
diff --git a/mm/mlock.c b/mm/mlock.c
index 2689a08c79a..6b55e3efe0d 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -135,13 +135,6 @@ void munlock_vma_page(struct page *page)
}
}
-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
-{
- return (vma->vm_flags & VM_GROWSDOWN) &&
- (vma->vm_start == addr) &&
- !vma_stack_continue(vma->vm_prev, addr);
-}
-
/**
* __mlock_vma_pages_range() - mlock a range of pages in the vma.
* @vma: target vma
@@ -188,12 +181,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
if (vma->vm_flags & VM_LOCKED)
gup_flags |= FOLL_MLOCK;
- /* We don't try to access the guard page of a stack vma */
- if (stack_guard_page(vma, start)) {
- addr += PAGE_SIZE;
- nr_pages--;
- }
-
return __get_user_pages(current, mm, addr, nr_pages, gup_flags,
NULL, NULL, nonblocking);
}
diff --git a/mm/mmap.c b/mm/mmap.c
index 2ec8eb5a9cd..8c05e5b43b6 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1814,11 +1814,14 @@ static int expand_downwards(struct vm_area_struct *vma,
size = vma->vm_end - address;
grow = (vma->vm_start - address) >> PAGE_SHIFT;
- error = acct_stack_growth(vma, size, grow);
- if (!error) {
- vma->vm_start = address;
- vma->vm_pgoff -= grow;
- perf_event_mmap(vma);
+ error = -ENOMEM;
+ if (grow <= vma->vm_pgoff) {
+ error = acct_stack_growth(vma, size, grow);
+ if (!error) {
+ vma->vm_start = address;
+ vma->vm_pgoff -= grow;
+ perf_event_mmap(vma);
+ }
}
}
vma_unlock_anon_vma(vma);
diff --git a/net/dsa/mv88e6131.c b/net/dsa/mv88e6131.c
index d951f93644b..3da418894ef 100644
--- a/net/dsa/mv88e6131.c
+++ b/net/dsa/mv88e6131.c
@@ -14,6 +14,13 @@
#include "dsa_priv.h"
#include "mv88e6xxx.h"
+/*
+ * Switch product IDs
+ */
+#define ID_6085 0x04a0
+#define ID_6095 0x0950
+#define ID_6131 0x1060
+
static char *mv88e6131_probe(struct mii_bus *bus, int sw_addr)
{
int ret;
@@ -21,9 +28,11 @@ static char *mv88e6131_probe(struct mii_bus *bus, int sw_addr)
ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03);
if (ret >= 0) {
ret &= 0xfff0;
- if (ret == 0x0950)
+ if (ret == ID_6085)
+ return "Marvell 88E6085";
+ if (ret == ID_6095)
return "Marvell 88E6095/88E6095F";
- if (ret == 0x1060)
+ if (ret == ID_6131)
return "Marvell 88E6131";
}
@@ -164,6 +173,7 @@ static int mv88e6131_setup_global(struct dsa_switch *ds)
static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
{
+ struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
int addr = REG_PORT(p);
u16 val;
@@ -171,10 +181,13 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
* MAC Forcing register: don't force link, speed, duplex
* or flow control state to any particular values on physical
* ports, but force the CPU port and all DSA ports to 1000 Mb/s
- * full duplex.
+ * (100 Mb/s on 6085) full duplex.
*/
if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p))
- REG_WRITE(addr, 0x01, 0x003e);
+ if (ps->id == ID_6085)
+ REG_WRITE(addr, 0x01, 0x003d); /* 100 Mb/s */
+ else
+ REG_WRITE(addr, 0x01, 0x003e); /* 1000 Mb/s */
else
REG_WRITE(addr, 0x01, 0x0003);
@@ -286,6 +299,8 @@ static int mv88e6131_setup(struct dsa_switch *ds)
mv88e6xxx_ppu_state_init(ds);
mutex_init(&ps->stats_mutex);
+ ps->id = REG_READ(REG_PORT(0), 0x03) & 0xfff0;
+
ret = mv88e6131_switch_reset(ds);
if (ret < 0)
return ret;
diff --git a/net/dsa/mv88e6xxx.h b/net/dsa/mv88e6xxx.h
index eb0e0aaa9f1..61156ca26a0 100644
--- a/net/dsa/mv88e6xxx.h
+++ b/net/dsa/mv88e6xxx.h
@@ -39,6 +39,8 @@ struct mv88e6xxx_priv_state {
* Hold this mutex over snapshot + dump sequences.
*/
struct mutex stats_mutex;
+
+ int id; /* switch product id */
};
struct mv88e6xxx_hw_stat {
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index f3c0b549b8e..4614babdc45 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -221,9 +221,10 @@ static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
return csum;
}
-static int nf_ip_route(struct dst_entry **dst, struct flowi *fl)
+static int nf_ip_route(struct net *net, struct dst_entry **dst,
+ struct flowi *fl, bool strict __always_unused)
{
- struct rtable *rt = ip_route_output_key(&init_net, &fl->u.ip4);
+ struct rtable *rt = ip_route_output_key(net, &fl->u.ip4);
if (IS_ERR(rt))
return PTR_ERR(rt);
*dst = &rt->dst;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index ea107515c53..c1acf69858f 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1891,6 +1891,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
#ifdef CONFIG_IP_ROUTE_CLASSID
rth->dst.tclassid = itag;
#endif
+ rth->rt_route_iif = dev->ifindex;
rth->rt_iif = dev->ifindex;
rth->dst.dev = init_net.loopback_dev;
dev_hold(rth->dst.dev);
@@ -2026,6 +2027,7 @@ static int __mkroute_input(struct sk_buff *skb,
rth->rt_key_src = saddr;
rth->rt_src = saddr;
rth->rt_gateway = daddr;
+ rth->rt_route_iif = in_dev->dev->ifindex;
rth->rt_iif = in_dev->dev->ifindex;
rth->dst.dev = (out_dev)->dev;
dev_hold(rth->dst.dev);
@@ -2202,6 +2204,7 @@ local_input:
#ifdef CONFIG_IP_ROUTE_CLASSID
rth->dst.tclassid = itag;
#endif
+ rth->rt_route_iif = dev->ifindex;
rth->rt_iif = dev->ifindex;
rth->dst.dev = net->loopback_dev;
dev_hold(rth->dst.dev);
@@ -2401,7 +2404,8 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
rth->rt_mark = oldflp4->flowi4_mark;
rth->rt_dst = fl4->daddr;
rth->rt_src = fl4->saddr;
- rth->rt_iif = 0;
+ rth->rt_route_iif = 0;
+ rth->rt_iif = oldflp4->flowi4_oif ? : dev_out->ifindex;
/* get references to the devices that are to be hold by the routing
cache entry */
rth->dst.dev = dev_out;
@@ -2716,6 +2720,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
rt->rt_key_dst = ort->rt_key_dst;
rt->rt_key_src = ort->rt_key_src;
rt->rt_tos = ort->rt_tos;
+ rt->rt_route_iif = ort->rt_route_iif;
rt->rt_iif = ort->rt_iif;
rt->rt_oif = ort->rt_oif;
rt->rt_mark = ort->rt_mark;
@@ -2725,7 +2730,6 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
rt->rt_type = ort->rt_type;
rt->rt_dst = ort->rt_dst;
rt->rt_src = ort->rt_src;
- rt->rt_iif = ort->rt_iif;
rt->rt_gateway = ort->rt_gateway;
rt->rt_spec_dst = ort->rt_spec_dst;
rt->peer = ort->peer;
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 13e0e7f659f..d20a05e970d 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -74,6 +74,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
rt->rt_key_dst = fl4->daddr;
rt->rt_key_src = fl4->saddr;
rt->rt_tos = fl4->flowi4_tos;
+ rt->rt_route_iif = fl4->flowi4_iif;
rt->rt_iif = fl4->flowi4_iif;
rt->rt_oif = fl4->flowi4_oif;
rt->rt_mark = fl4->flowi4_mark;
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 39aaca2b4fd..28bc1f644b7 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -90,9 +90,18 @@ static int nf_ip6_reroute(struct sk_buff *skb,
return 0;
}
-static int nf_ip6_route(struct dst_entry **dst, struct flowi *fl)
+static int nf_ip6_route(struct net *net, struct dst_entry **dst,
+ struct flowi *fl, bool strict)
{
- *dst = ip6_route_output(&init_net, NULL, &fl->u.ip6);
+ static const struct ipv6_pinfo fake_pinfo;
+ static const struct inet_sock fake_sk = {
+ /* makes ip6_route_output set RT6_LOOKUP_F_IFACE: */
+ .sk.sk_bound_dev_if = 1,
+ .pinet6 = (struct ipv6_pinfo *) &fake_pinfo,
+ };
+ const void *sk = strict ? &fake_sk : NULL;
+
+ *dst = ip6_route_output(net, sk, &fl->u.ip6);
return (*dst)->error;
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 56fa12538d4..4f49e5dd41b 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1622,6 +1622,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
opt_skb = skb_clone(skb, GFP_ATOMIC);
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
+ sock_rps_save_rxhash(sk, skb->rxhash);
if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
goto reset;
if (opt_skb)
@@ -1649,7 +1650,8 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
__kfree_skb(opt_skb);
return 0;
}
- }
+ } else
+ sock_rps_save_rxhash(sk, skb->rxhash);
if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
goto reset;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index d7037c006e1..15c37746845 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -505,6 +505,9 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
int rc;
int is_udplite = IS_UDPLITE(sk);
+ if (!ipv6_addr_any(&inet6_sk(sk)->daddr))
+ sock_rps_save_rxhash(sk, skb->rxhash);
+
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto drop;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 9d192d665ff..c5d4530d828 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2541,7 +2541,6 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx)
* same TID from the same station
*/
rx->skb = skb;
- rx->flags = 0;
CALL_RXH(ieee80211_rx_h_decrypt)
CALL_RXH(ieee80211_rx_h_check_more_data)
@@ -2612,6 +2611,7 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
.sdata = sta->sdata,
.local = sta->local,
.queue = tid,
+ .flags = 0,
};
struct tid_ampdu_rx *tid_agg_rx;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index c3f988aa115..32bff6d86cb 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -652,7 +652,6 @@ comment "Xtables matches"
config NETFILTER_XT_MATCH_ADDRTYPE
tristate '"addrtype" address type match support'
depends on NETFILTER_ADVANCED
- depends on (IPV6 || IPV6=n)
---help---
This option allows you to match what routing thinks of an address,
eg. UNICAST, LOCAL, BROADCAST, ...
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index bca96990218..a113ff06692 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -338,8 +338,7 @@ bitmap_ip_head(struct ip_set *set, struct sk_buff *skb)
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
if (map->netmask != 32)
NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, map->netmask);
- NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
- htonl(atomic_read(&set->ref) - 1));
+ NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
htonl(sizeof(*map) + map->memsize));
if (with_timeout(map->timeout))
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 5e790172def..00a33242e90 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -434,8 +434,7 @@ bitmap_ipmac_head(struct ip_set *set, struct sk_buff *skb)
goto nla_put_failure;
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip));
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
- NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
- htonl(atomic_read(&set->ref) - 1));
+ NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
htonl(sizeof(*map)
+ (map->last_ip - map->first_ip + 1) * map->dsize));
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
index 165f09b1a9c..6b38eb8f6ed 100644
--- a/net/netfilter/ipset/ip_set_bitmap_port.c
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -320,8 +320,7 @@ bitmap_port_head(struct ip_set *set, struct sk_buff *skb)
goto nla_put_failure;
NLA_PUT_NET16(skb, IPSET_ATTR_PORT, htons(map->first_port));
NLA_PUT_NET16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port));
- NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
- htonl(atomic_read(&set->ref) - 1));
+ NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
htonl(sizeof(*map) + map->memsize));
if (with_timeout(map->timeout))
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 253326e8d99..9152e69a162 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -26,6 +26,7 @@
static LIST_HEAD(ip_set_type_list); /* all registered set types */
static DEFINE_MUTEX(ip_set_type_mutex); /* protects ip_set_type_list */
+static DEFINE_RWLOCK(ip_set_ref_lock); /* protects the set refs */
static struct ip_set **ip_set_list; /* all individual sets */
static ip_set_id_t ip_set_max = CONFIG_IP_SET_MAX; /* max number of sets */
@@ -301,13 +302,18 @@ EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6);
static inline void
__ip_set_get(ip_set_id_t index)
{
- atomic_inc(&ip_set_list[index]->ref);
+ write_lock_bh(&ip_set_ref_lock);
+ ip_set_list[index]->ref++;
+ write_unlock_bh(&ip_set_ref_lock);
}
static inline void
__ip_set_put(ip_set_id_t index)
{
- atomic_dec(&ip_set_list[index]->ref);
+ write_lock_bh(&ip_set_ref_lock);
+ BUG_ON(ip_set_list[index]->ref == 0);
+ ip_set_list[index]->ref--;
+ write_unlock_bh(&ip_set_ref_lock);
}
/*
@@ -324,7 +330,7 @@ ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
struct ip_set *set = ip_set_list[index];
int ret = 0;
- BUG_ON(set == NULL || atomic_read(&set->ref) == 0);
+ BUG_ON(set == NULL);
pr_debug("set %s, index %u\n", set->name, index);
if (dim < set->type->dimension ||
@@ -356,7 +362,7 @@ ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
struct ip_set *set = ip_set_list[index];
int ret;
- BUG_ON(set == NULL || atomic_read(&set->ref) == 0);
+ BUG_ON(set == NULL);
pr_debug("set %s, index %u\n", set->name, index);
if (dim < set->type->dimension ||
@@ -378,7 +384,7 @@ ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
struct ip_set *set = ip_set_list[index];
int ret = 0;
- BUG_ON(set == NULL || atomic_read(&set->ref) == 0);
+ BUG_ON(set == NULL);
pr_debug("set %s, index %u\n", set->name, index);
if (dim < set->type->dimension ||
@@ -397,7 +403,6 @@ EXPORT_SYMBOL_GPL(ip_set_del);
* Find set by name, reference it once. The reference makes sure the
* thing pointed to, does not go away under our feet.
*
- * The nfnl mutex must already be activated.
*/
ip_set_id_t
ip_set_get_byname(const char *name, struct ip_set **set)
@@ -423,15 +428,12 @@ EXPORT_SYMBOL_GPL(ip_set_get_byname);
* reference count by 1. The caller shall not assume the index
* to be valid, after calling this function.
*
- * The nfnl mutex must already be activated.
*/
void
ip_set_put_byindex(ip_set_id_t index)
{
- if (ip_set_list[index] != NULL) {
- BUG_ON(atomic_read(&ip_set_list[index]->ref) == 0);
+ if (ip_set_list[index] != NULL)
__ip_set_put(index);
- }
}
EXPORT_SYMBOL_GPL(ip_set_put_byindex);
@@ -441,7 +443,6 @@ EXPORT_SYMBOL_GPL(ip_set_put_byindex);
* can't be destroyed. The set cannot be renamed due to
* the referencing either.
*
- * The nfnl mutex must already be activated.
*/
const char *
ip_set_name_byindex(ip_set_id_t index)
@@ -449,7 +450,7 @@ ip_set_name_byindex(ip_set_id_t index)
const struct ip_set *set = ip_set_list[index];
BUG_ON(set == NULL);
- BUG_ON(atomic_read(&set->ref) == 0);
+ BUG_ON(set->ref == 0);
/* Referenced, so it's safe */
return set->name;
@@ -515,10 +516,7 @@ void
ip_set_nfnl_put(ip_set_id_t index)
{
nfnl_lock();
- if (ip_set_list[index] != NULL) {
- BUG_ON(atomic_read(&ip_set_list[index]->ref) == 0);
- __ip_set_put(index);
- }
+ ip_set_put_byindex(index);
nfnl_unlock();
}
EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
@@ -526,7 +524,7 @@ EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
/*
* Communication protocol with userspace over netlink.
*
- * We already locked by nfnl_lock.
+ * The commands are serialized by the nfnl mutex.
*/
static inline bool
@@ -657,7 +655,6 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
return -ENOMEM;
rwlock_init(&set->lock);
strlcpy(set->name, name, IPSET_MAXNAMELEN);
- atomic_set(&set->ref, 0);
set->family = family;
/*
@@ -690,8 +687,8 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
/*
* Here, we have a valid, constructed set and we are protected
- * by nfnl_lock. Find the first free index in ip_set_list and
- * check clashing.
+ * by the nfnl mutex. Find the first free index in ip_set_list
+ * and check clashing.
*/
if ((ret = find_free_id(set->name, &index, &clash)) != 0) {
/* If this is the same set and requested, ignore error */
@@ -751,31 +748,51 @@ ip_set_destroy(struct sock *ctnl, struct sk_buff *skb,
const struct nlattr * const attr[])
{
ip_set_id_t i;
+ int ret = 0;
if (unlikely(protocol_failed(attr)))
return -IPSET_ERR_PROTOCOL;
- /* References are protected by the nfnl mutex */
+ /* Commands are serialized and references are
+ * protected by the ip_set_ref_lock.
+ * External systems (i.e. xt_set) must call
+ * ip_set_put|get_nfnl_* functions, that way we
+ * can safely check references here.
+ *
+ * list:set timer can only decrement the reference
+ * counter, so if it's already zero, we can proceed
+ * without holding the lock.
+ */
+ read_lock_bh(&ip_set_ref_lock);
if (!attr[IPSET_ATTR_SETNAME]) {
for (i = 0; i < ip_set_max; i++) {
- if (ip_set_list[i] != NULL &&
- (atomic_read(&ip_set_list[i]->ref)))
- return -IPSET_ERR_BUSY;
+ if (ip_set_list[i] != NULL && ip_set_list[i]->ref) {
+ ret = IPSET_ERR_BUSY;
+ goto out;
+ }
}
+ read_unlock_bh(&ip_set_ref_lock);
for (i = 0; i < ip_set_max; i++) {
if (ip_set_list[i] != NULL)
ip_set_destroy_set(i);
}
} else {
i = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME]));
- if (i == IPSET_INVALID_ID)
- return -ENOENT;
- else if (atomic_read(&ip_set_list[i]->ref))
- return -IPSET_ERR_BUSY;
+ if (i == IPSET_INVALID_ID) {
+ ret = -ENOENT;
+ goto out;
+ } else if (ip_set_list[i]->ref) {
+ ret = -IPSET_ERR_BUSY;
+ goto out;
+ }
+ read_unlock_bh(&ip_set_ref_lock);
ip_set_destroy_set(i);
}
return 0;
+out:
+ read_unlock_bh(&ip_set_ref_lock);
+ return ret;
}
/* Flush sets */
@@ -834,6 +851,7 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
struct ip_set *set;
const char *name2;
ip_set_id_t i;
+ int ret = 0;
if (unlikely(protocol_failed(attr) ||
attr[IPSET_ATTR_SETNAME] == NULL ||
@@ -843,25 +861,33 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
if (set == NULL)
return -ENOENT;
- if (atomic_read(&set->ref) != 0)
- return -IPSET_ERR_REFERENCED;
+
+ read_lock_bh(&ip_set_ref_lock);
+ if (set->ref != 0) {
+ ret = -IPSET_ERR_REFERENCED;
+ goto out;
+ }
name2 = nla_data(attr[IPSET_ATTR_SETNAME2]);
for (i = 0; i < ip_set_max; i++) {
if (ip_set_list[i] != NULL &&
- STREQ(ip_set_list[i]->name, name2))
- return -IPSET_ERR_EXIST_SETNAME2;
+ STREQ(ip_set_list[i]->name, name2)) {
+ ret = -IPSET_ERR_EXIST_SETNAME2;
+ goto out;
+ }
}
strncpy(set->name, name2, IPSET_MAXNAMELEN);
- return 0;
+out:
+ read_unlock_bh(&ip_set_ref_lock);
+ return ret;
}
/* Swap two sets so that name/index points to the other.
* References and set names are also swapped.
*
- * We are protected by the nfnl mutex and references are
- * manipulated only by holding the mutex. The kernel interfaces
+ * The commands are serialized by the nfnl mutex and references are
+ * protected by the ip_set_ref_lock. The kernel interfaces
* do not hold the mutex but the pointer settings are atomic
* so the ip_set_list always contains valid pointers to the sets.
*/
@@ -874,7 +900,6 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
struct ip_set *from, *to;
ip_set_id_t from_id, to_id;
char from_name[IPSET_MAXNAMELEN];
- u32 from_ref;
if (unlikely(protocol_failed(attr) ||
attr[IPSET_ATTR_SETNAME] == NULL ||
@@ -899,17 +924,15 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
from->type->family == to->type->family))
return -IPSET_ERR_TYPE_MISMATCH;
- /* No magic here: ref munging protected by the nfnl_lock */
strncpy(from_name, from->name, IPSET_MAXNAMELEN);
- from_ref = atomic_read(&from->ref);
-
strncpy(from->name, to->name, IPSET_MAXNAMELEN);
- atomic_set(&from->ref, atomic_read(&to->ref));
strncpy(to->name, from_name, IPSET_MAXNAMELEN);
- atomic_set(&to->ref, from_ref);
+ write_lock_bh(&ip_set_ref_lock);
+ swap(from->ref, to->ref);
ip_set_list[from_id] = to;
ip_set_list[to_id] = from;
+ write_unlock_bh(&ip_set_ref_lock);
return 0;
}
@@ -926,7 +949,7 @@ ip_set_dump_done(struct netlink_callback *cb)
{
if (cb->args[2]) {
pr_debug("release set %s\n", ip_set_list[cb->args[1]]->name);
- __ip_set_put((ip_set_id_t) cb->args[1]);
+ ip_set_put_byindex((ip_set_id_t) cb->args[1]);
}
return 0;
}
@@ -1068,7 +1091,7 @@ release_refcount:
/* If there was an error or set is done, release set */
if (ret || !cb->args[2]) {
pr_debug("release set %s\n", ip_set_list[index]->name);
- __ip_set_put(index);
+ ip_set_put_byindex(index);
}
/* If we dump all sets, continue with dumping last ones */
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index a47c32982f0..e9159e99fc4 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -43,14 +43,19 @@ struct list_set {
static inline struct set_elem *
list_set_elem(const struct list_set *map, u32 id)
{
- return (struct set_elem *)((char *)map->members + id * map->dsize);
+ return (struct set_elem *)((void *)map->members + id * map->dsize);
+}
+
+static inline struct set_telem *
+list_set_telem(const struct list_set *map, u32 id)
+{
+ return (struct set_telem *)((void *)map->members + id * map->dsize);
}
static inline bool
list_set_timeout(const struct list_set *map, u32 id)
{
- const struct set_telem *elem =
- (const struct set_telem *) list_set_elem(map, id);
+ const struct set_telem *elem = list_set_telem(map, id);
return ip_set_timeout_test(elem->timeout);
}
@@ -58,19 +63,11 @@ list_set_timeout(const struct list_set *map, u32 id)
static inline bool
list_set_expired(const struct list_set *map, u32 id)
{
- const struct set_telem *elem =
- (const struct set_telem *) list_set_elem(map, id);
+ const struct set_telem *elem = list_set_telem(map, id);
return ip_set_timeout_expired(elem->timeout);
}
-static inline int
-list_set_exist(const struct set_telem *elem)
-{
- return elem->id != IPSET_INVALID_ID &&
- !ip_set_timeout_expired(elem->timeout);
-}
-
/* Set list without and with timeout */
static int
@@ -146,11 +143,11 @@ list_elem_tadd(struct list_set *map, u32 i, ip_set_id_t id,
struct set_telem *e;
for (; i < map->size; i++) {
- e = (struct set_telem *)list_set_elem(map, i);
+ e = list_set_telem(map, i);
swap(e->id, id);
+ swap(e->timeout, timeout);
if (e->id == IPSET_INVALID_ID)
break;
- swap(e->timeout, timeout);
}
}
@@ -164,7 +161,7 @@ list_set_add(struct list_set *map, u32 i, ip_set_id_t id,
/* Last element replaced: e.g. add new,before,last */
ip_set_put_byindex(e->id);
if (with_timeout(map->timeout))
- list_elem_tadd(map, i, id, timeout);
+ list_elem_tadd(map, i, id, ip_set_timeout_set(timeout));
else
list_elem_add(map, i, id);
@@ -172,11 +169,11 @@ list_set_add(struct list_set *map, u32 i, ip_set_id_t id,
}
static int
-list_set_del(struct list_set *map, ip_set_id_t id, u32 i)
+list_set_del(struct list_set *map, u32 i)
{
struct set_elem *a = list_set_elem(map, i), *b;
- ip_set_put_byindex(id);
+ ip_set_put_byindex(a->id);
for (; i < map->size - 1; i++) {
b = list_set_elem(map, i + 1);
@@ -308,11 +305,11 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[],
(before == 0 ||
(before > 0 &&
next_id_eq(map, i, refid))))
- ret = list_set_del(map, id, i);
+ ret = list_set_del(map, i);
else if (before < 0 &&
elem->id == refid &&
next_id_eq(map, i, id))
- ret = list_set_del(map, id, i + 1);
+ ret = list_set_del(map, i + 1);
}
break;
default:
@@ -369,8 +366,7 @@ list_set_head(struct ip_set *set, struct sk_buff *skb)
NLA_PUT_NET32(skb, IPSET_ATTR_SIZE, htonl(map->size));
if (with_timeout(map->timeout))
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
- NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
- htonl(atomic_read(&set->ref) - 1));
+ NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
htonl(sizeof(*map) + map->size * map->dsize));
ipset_nest_end(skb, nested);
@@ -461,16 +457,13 @@ list_set_gc(unsigned long ul_set)
struct set_telem *e;
u32 i;
- /* We run parallel with other readers (test element)
- * but adding/deleting new entries is locked out */
- read_lock_bh(&set->lock);
- for (i = map->size - 1; i >= 0; i--) {
- e = (struct set_telem *) list_set_elem(map, i);
- if (e->id != IPSET_INVALID_ID &&
- list_set_expired(map, i))
- list_set_del(map, e->id, i);
+ write_lock_bh(&set->lock);
+ for (i = 0; i < map->size; i++) {
+ e = list_set_telem(map, i);
+ if (e->id != IPSET_INVALID_ID && list_set_expired(map, i))
+ list_set_del(map, i);
}
- read_unlock_bh(&set->lock);
+ write_unlock_bh(&set->lock);
map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
add_timer(&map->gc);
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 33733c8872e..ae47090bf45 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3120,7 +3120,7 @@ nla_put_failure:
static int ip_vs_genl_dump_daemons(struct sk_buff *skb,
struct netlink_callback *cb)
{
- struct net *net = skb_net(skb);
+ struct net *net = skb_sknet(skb);
struct netns_ipvs *ipvs = net_ipvs(net);
mutex_lock(&__ip_vs_mutex);
diff --git a/net/netfilter/nf_conntrack_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c
index 867882313e4..bcd5ed6b713 100644
--- a/net/netfilter/nf_conntrack_h323_asn1.c
+++ b/net/netfilter/nf_conntrack_h323_asn1.c
@@ -631,7 +631,7 @@ static int decode_seqof(bitstr_t *bs, const struct field_t *f,
CHECK_BOUND(bs, 2);
count = *bs->cur++;
count <<= 8;
- count = *bs->cur++;
+ count += *bs->cur++;
break;
case SEMI:
BYTE_ALIGN(bs);
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 533a183e666..18b2ce5c8ce 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -731,10 +731,10 @@ static int callforward_do_filter(const union nf_inet_addr *src,
memset(&fl2, 0, sizeof(fl2));
fl2.daddr = dst->ip;
- if (!afinfo->route((struct dst_entry **)&rt1,
- flowi4_to_flowi(&fl1))) {
- if (!afinfo->route((struct dst_entry **)&rt2,
- flowi4_to_flowi(&fl2))) {
+ if (!afinfo->route(&init_net, (struct dst_entry **)&rt1,
+ flowi4_to_flowi(&fl1), false)) {
+ if (!afinfo->route(&init_net, (struct dst_entry **)&rt2,
+ flowi4_to_flowi(&fl2), false)) {
if (rt1->rt_gateway == rt2->rt_gateway &&
rt1->dst.dev == rt2->dst.dev)
ret = 1;
@@ -755,10 +755,10 @@ static int callforward_do_filter(const union nf_inet_addr *src,
memset(&fl2, 0, sizeof(fl2));
ipv6_addr_copy(&fl2.daddr, &dst->in6);
- if (!afinfo->route((struct dst_entry **)&rt1,
- flowi6_to_flowi(&fl1))) {
- if (!afinfo->route((struct dst_entry **)&rt2,
- flowi6_to_flowi(&fl2))) {
+ if (!afinfo->route(&init_net, (struct dst_entry **)&rt1,
+ flowi6_to_flowi(&fl1), false)) {
+ if (!afinfo->route(&init_net, (struct dst_entry **)&rt2,
+ flowi6_to_flowi(&fl2), false)) {
if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway,
sizeof(rt1->rt6i_gateway)) &&
rt1->dst.dev == rt2->dst.dev)
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 6e6b46cb1db..9e63b43faee 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -166,7 +166,7 @@ static u_int32_t tcpmss_reverse_mtu(const struct sk_buff *skb,
rcu_read_lock();
ai = nf_get_afinfo(family);
if (ai != NULL)
- ai->route((struct dst_entry **)&rt, &fl);
+ ai->route(&init_net, (struct dst_entry **)&rt, &fl, false);
rcu_read_unlock();
if (rt != NULL) {
diff --git a/net/netfilter/xt_addrtype.c b/net/netfilter/xt_addrtype.c
index 2220b85e951..b77d383cec7 100644
--- a/net/netfilter/xt_addrtype.c
+++ b/net/netfilter/xt_addrtype.c
@@ -32,11 +32,32 @@ MODULE_ALIAS("ipt_addrtype");
MODULE_ALIAS("ip6t_addrtype");
#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
-static u32 xt_addrtype_rt6_to_type(const struct rt6_info *rt)
+static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
+ const struct in6_addr *addr)
{
+ const struct nf_afinfo *afinfo;
+ struct flowi6 flow;
+ struct rt6_info *rt;
u32 ret;
+ int route_err;
- if (!rt)
+ memset(&flow, 0, sizeof(flow));
+ ipv6_addr_copy(&flow.daddr, addr);
+ if (dev)
+ flow.flowi6_oif = dev->ifindex;
+
+ rcu_read_lock();
+
+ afinfo = nf_get_afinfo(NFPROTO_IPV6);
+ if (afinfo != NULL)
+ route_err = afinfo->route(net, (struct dst_entry **)&rt,
+ flowi6_to_flowi(&flow), !!dev);
+ else
+ route_err = 1;
+
+ rcu_read_unlock();
+
+ if (route_err)
return XT_ADDRTYPE_UNREACHABLE;
if (rt->rt6i_flags & RTF_REJECT)
@@ -48,6 +69,9 @@ static u32 xt_addrtype_rt6_to_type(const struct rt6_info *rt)
ret |= XT_ADDRTYPE_LOCAL;
if (rt->rt6i_flags & RTF_ANYCAST)
ret |= XT_ADDRTYPE_ANYCAST;
+
+
+ dst_release(&rt->dst);
return ret;
}
@@ -65,18 +89,8 @@ static bool match_type6(struct net *net, const struct net_device *dev,
return false;
if ((XT_ADDRTYPE_LOCAL | XT_ADDRTYPE_ANYCAST |
- XT_ADDRTYPE_UNREACHABLE) & mask) {
- struct rt6_info *rt;
- u32 type;
- int ifindex = dev ? dev->ifindex : 0;
-
- rt = rt6_lookup(net, addr, NULL, ifindex, !!dev);
-
- type = xt_addrtype_rt6_to_type(rt);
-
- dst_release(&rt->dst);
- return !!(mask & type);
- }
+ XT_ADDRTYPE_UNREACHABLE) & mask)
+ return !!(mask & match_lookup_rt6(net, dev, addr));
return true;
}
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c
index 2c0086a4751..481a86fdc40 100644
--- a/net/netfilter/xt_conntrack.c
+++ b/net/netfilter/xt_conntrack.c
@@ -195,7 +195,7 @@ conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par,
return info->match_flags & XT_CONNTRACK_STATE;
if ((info->match_flags & XT_CONNTRACK_DIRECTION) &&
(CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) ^
- !!(info->invert_flags & XT_CONNTRACK_DIRECTION))
+ !(info->invert_flags & XT_CONNTRACK_DIRECTION))
return false;
if (info->match_flags & XT_CONNTRACK_ORIGSRC)